diff --git "a/2546.jsonl" "b/2546.jsonl" new file mode 100644--- /dev/null +++ "b/2546.jsonl" @@ -0,0 +1,721 @@ +{"seq_id":"393017875","text":"from Logic.LogicAPI import LogicAPI\nfrom Presentation.Menu import Menu, format_function_name\nfrom Presentation.Operations import *\nimport os, re\n\n# Import all operations\nfrom Presentation.Operations.Generic import *\nfrom Presentation.Operations.Contract import *\nfrom Presentation.Operations.Customer import *\nfrom Presentation.Operations.Destination import *\nfrom Presentation.Operations.Employee import *\nfrom Presentation.Operations.Financial import *\nfrom Presentation.Operations.Invoice import *\nfrom Presentation.Operations.Vehicle import *\nfrom Presentation.Operations.Vehicle_type import *\n\nclass UserInterface:\n def __init__(self):\n self.logic = LogicAPI(self)\n self.operation = Operations(self.logic, self)\n\n # Clear window\n os.system('cls')\n\n self.access = self.get_user_login()\n\n # Create main categories\n main_menu = Menu(\"Main Menu\", None, None, self.logic, self, 0)\n office_menu = Menu(\"Office Menu\", None, main_menu, self.logic, self, 'office')\n airport_menu = Menu(\"Airport menu\", None, main_menu, self.logic, self, 'airport')\n admin_menu = Menu(\"Adminstration menu\", None, main_menu,self.logic, self, 'admin')\n papa_chuck_menu = Menu(\"Chuck Norris menu\", None, main_menu,self.logic,self,'chuck')\n\n # Add four menu nodes to the main menu\n main_menu.selectable_options.append(admin_menu)\n main_menu.selectable_options.append(office_menu)\n main_menu.selectable_options.append(airport_menu)\n main_menu.selectable_options.append(papa_chuck_menu)\n #region ADMIN MENU SYSTEM =======\n\n\n #Chuck submenus \n report_menu_papa_chuck = Menu(\"Report Menu For Papa Chuck\", None, papa_chuck_menu, self.logic, self, 'chuck')\n\n\n #adding submenus to chucks menu node\n papa_chuck_menu.selectable_options += [\n report_menu_papa_chuck\n ]\n\n #region Report menu\n report_menu_papa_chuck.selectable_options += [\n get_financial_report,\n get_vehicle_report,\n get_invoice_report_by_state,\n get_invoice_report_by_customer,\n ]\n\n\n #region ADMIN MENU SYSTEM ====\n\n #admins submenus\n employee_menu_admin = Menu(\"Employee Menu\", None, admin_menu, self.logic, self, 'admin')\n vehicle_menu_admin = Menu(\"Vehicle Menu\", None, admin_menu, self.logic, self, 'admin')\n contract_menu_admin = Menu(\"Contract Menu\", None, admin_menu, self.logic, self, 'admin')\n report_menu_admin = Menu(\"Report Menu\", None, admin_menu, self.logic, self, 'admin')\n customer_menu_admin = Menu(\"Customer Menu\", None, admin_menu, self.logic, self, 'admin')\n destination_menu_admin = Menu(\"Destination Menu\", None, admin_menu, self.logic, self, 'admin')\n vehicle_type_menu_admin = Menu(\"Vehicle Type Menu\", None, admin_menu, self.logic, self, 'admin')\n \n\n #Add submenus to office menu node\n admin_menu.selectable_options += [\n employee_menu_admin, vehicle_menu_admin,\n contract_menu_admin, report_menu_admin,\n customer_menu_admin, destination_menu_admin,\n vehicle_type_menu_admin,\n ]\n\n #region admin Employee menu -----\n\n # Submenu for specfic employee display options\n display_employee_menu_admin = Menu(\"Display Employee\", None, employee_menu_admin, self.logic, self, 'admin')\n\n # Employee functions with employee display menu\n employee_menu_admin.selectable_options += [\n register_employee, edit_employee,\n display_employee_menu_admin\n ]\n\n # Employee display functions\n display_employee_menu_admin.selectable_options += [\n get_all_employees,\n get_employee,\n get_employee_after_location,\n ]\n\n #endregion -----\n\n\n\n #region Admin Vehicle menu -----\n\n # Submenu for specific vehicle display options\n display_vehicle_menu_admin = Menu(\"Display Vehicle\", None, vehicle_menu_admin, self.logic, self, 'admin')\n\n # Vehicle functions with vehicle display menu\n vehicle_menu_admin.selectable_options += [\n register_vehicle, edit_vehicle,\n display_vehicle_menu_admin,\n handover_vehicle, handin_vehicle\n \n ]\n\n # Vehicle display functions\n display_vehicle_menu_admin.selectable_options += [\n get_all_vehicles,\n get_vehicle,\n get_vehicle_after_location,\n get_all_vehicle_types,\n get_vehicle_after_condition,\n get_vehicle_fit_for_rental,\n \n ]\n\n #endregion -----\n\n #region Office Contract menu -----\n\n # Submenu for specific contract display options\n display_contract_menu_admin = Menu (\"Display Contract\", None, contract_menu_admin, self.logic, self, 'admin')\n\n # Contract functions with contract display menu\n contract_menu_admin.selectable_options += [\n register_contract, edit_contract,\n display_contract_menu_admin,\n ]\n\n # Contract display functions\n display_contract_menu_admin.selectable_options += [\n get_all_contracts,\n get_contract,\n get_printable_contract\n ]\n\n #endregion -----\n\n #region Report menu\n\n report_menu_admin.selectable_options += [\n get_financial_report,\n get_vehicle_report,\n get_invoice_report_by_state,\n get_invoice_report_by_customer,\n get_invoice,\n pay_invoice\n ]\n #endregion\n\n\n #region Customer Contract menu -----\n\n # Submenu for specific customer display options\n display_customer_menu_admin = Menu (\"Display Customer\", None, customer_menu_admin, self.logic, self, 'admin')\n\n # Customer functions with customer display menu\n customer_menu_admin.selectable_options += [\n register_customer, edit_customer,\n display_customer_menu_admin\n ]\n\n # Customer display functions\n display_customer_menu_admin.selectable_options += [\n get_customer,\n get_all_customers,\n ]\n\n #endregion -----\n\n #region Vehicle Type Contract menu -----\n\n # Submenu for specific vehicle type display options\n display_vehicle_type_menu_admin = Menu (\"Display Vehicle Type\", None, vehicle_type_menu_admin, self.logic, self, 'admin')\n\n # Vehicle type functions with vehicle type display menu\n vehicle_type_menu_admin.selectable_options += [\n register_vehicle_type, edit_vehicle_type,\n display_vehicle_type_menu_admin\n ]\n\n # vehicle_type display functions\n display_vehicle_type_menu_admin.selectable_options += [\n get_vehicle_type,\n get_vehicle_type_rates\n ]\n\n #endregion -----\n\n #region destination Contract menu -----\n\n # Submenu for specific destination display options\n display_destination_menu_admin = Menu (\"Display destination\", None, destination_menu_admin, self.logic, self, 'admin')\n\n # destination functions with destination display menu\n destination_menu_admin.selectable_options += [\n register_destination, edit_destination,\n display_destination_menu_admin\n ]\n\n # destination display functions\n display_destination_menu_admin.selectable_options += [\n get_destination,\n get_all_destinations,\n ]\n\n #endregion -----\n #endregion =====\n\n\n #region OFFICE MENU SYSTEM =====\n \"\"\"OFFICE MENU\"\"\"\n\n # Office submenus\n employee_menu_office = Menu(\"Employee Menu\", None, office_menu, self.logic, self, 'office')\n vehicle_menu_office = Menu(\"Vehicle Menu\", None, office_menu, self.logic, self, 'office')\n contract_menu_office = Menu(\"Contract Menu\", None, office_menu, self.logic, self, 'office')\n report_menu_office = Menu(\"Report Menu\", None, office_menu, self.logic, self, 'office')\n customer_menu_office = Menu(\"Customer Menu\", None, office_menu, self.logic, self, 'office')\n destination_menu_office = Menu(\"Destination Menu\", None, office_menu, self.logic, self, 'office')\n vehicle_type_menu_office = Menu(\"Vehicle Type Menu\", None,office_menu, self.logic, self, 'office')\n\n # Add submenus to office menu node\n office_menu.selectable_options += [\n employee_menu_office,vehicle_menu_office,\n contract_menu_office, report_menu_office,\n customer_menu_office,destination_menu_office,\n vehicle_type_menu_office,\n ]\n\n #endregion -----\n #region Office Employee menu -----\n\n # Submenu for specfic employee display options\n display_employee_menu_office= Menu(\"Display Employee\", None, employee_menu_office, self.logic, self, 'office')\n\n # Employee functions with employee display menu\n employee_menu_office.selectable_options += [\n register_employee, edit_employee,\n display_employee_menu_office,\n ]\n\n # Employee display functions\n display_employee_menu_office.selectable_options += [\n get_all_employees,\n get_employee,\n get_employee_after_location,\n ]\n\n #endregion -----\n display_vehicle_menu_office= Menu(\"Display Vehicle\", None, vehicle_menu_office, self.logic, self, 'office')\n\n vehicle_menu_office.selectable_options += [\n display_vehicle_menu_office,\n ]\n\n display_vehicle_menu_office.selectable_options += [\n get_all_vehicles,\n get_vehicle,\n get_vehicle_after_location,\n get_vehicle_after_condition,\n get_vehicle_fit_for_rental,\n ]\n\n #region Office Contract menu -----\n\n # Submenu for specific contract display options\n display_contract_menu_office = Menu (\"Display Contract\", None, contract_menu_office, self.logic, self, 'office')\n\n # Contract functions with contract display menu\n contract_menu_office.selectable_options += [\n register_contract, edit_contract,\n display_contract_menu_office,\n ]\n\n # Contract display functions\n display_contract_menu_office.selectable_options += [\n get_all_contracts,\n get_contract,\n get_printable_contract\n ]\n\n #endregion -----\n #region Report menu\n report_menu_office.selectable_options += [\n get_financial_report,\n get_vehicle_report,\n get_invoice_report_by_state,\n get_invoice_report_by_customer,\n ]\n\n #endregion\n\n\n #region Customer Contract menu -----\n\n # Submenu for specific customer display options\n display_customer_menu_office = Menu (\"Display Customer\", None, customer_menu_office, self.logic, self, 'office')\n\n # Customer functions with customer display menu\n customer_menu_office.selectable_options += [\n register_customer, edit_customer,\n display_customer_menu_office\n ]\n\n # Customer display functions\n display_customer_menu_office.selectable_options += [\n get_customer,\n get_all_customers,\n ]\n\n #endregion -----\n\n #region Vehicle Type Contract menu -----\n\n # Submenu for specific vehicle type display options\n display_vehicle_type_menu_office = Menu (\"Display Vehicle Type\", None, vehicle_type_menu_office, self.logic, self, 'office')\n\n # Vehicle type functions with vehicle type display menu\n vehicle_type_menu_office.selectable_options += [\n register_vehicle_type, edit_vehicle_type,\n display_vehicle_type_menu_office\n ]\n\n # vehicle_type display functions\n display_vehicle_type_menu_office.selectable_options += [\n get_vehicle_type,\n get_vehicle_type_rates\n ]\n\n #endregion -----\n\n #region destination Contract menu -----\n\n # Submenu for specific destination display options\n display_destination_menu_office = Menu (\"Display destination\", None, destination_menu_office, self.logic, self, 'office')\n\n # destination functions with destination display menu\n destination_menu_office.selectable_options += [\n register_destination, edit_destination,\n display_destination_menu_office\n ]\n\n # destination display functions\n display_destination_menu_office.selectable_options += [\n get_destination,\n get_all_destinations,\n ]\n\n #endregion -----\n #endregion =====\n\n #region AIRPORT MENU SYSTEM =====\n \"\"\"AIRPORT MENU\"\"\"\n\n # Airport submenus\n employee_menu_airport = Menu(\"Employee Menu\", None, airport_menu, self.logic, self, 'airport')\n vehicle_menu_airport = Menu(\"Vehicle Menu\", None, airport_menu, self.logic, self, 'airport')\n report_menu_airport = Menu(\"Report Menu\", None,airport_menu,self.logic,self,'airport')\n customer_menu_airport = Menu(\"Customer Menu\", None, airport_menu, self.logic, self, 'airport')\n destination_menu_airport = Menu(\"Destination Menu\", None, airport_menu, self.logic, self, 'airport')\n vehicle_type_menu_airport = Menu(\"Vehicle Type Menu\", None, airport_menu, self.logic, self, 'airport')\n # Add submenus to airport menu node\n airport_menu.selectable_options += [\n employee_menu_airport,\n vehicle_menu_airport,report_menu_airport,\n customer_menu_airport,destination_menu_airport,\n vehicle_type_menu_airport\n ]\n\n #region airport Employee menu -----\n\n # Submenu for specfic employee display options\n display_employee_menu_airport = Menu(\"Display Employee\", None, employee_menu_airport, self.logic, self, 'airport')\n\n # Employee functions with employee display menu\n employee_menu_airport.selectable_options += [\n display_employee_menu_airport\n ]\n\n # Employee display functions\n display_employee_menu_airport.selectable_options += [\n get_all_employees,\n get_employee,\n get_employee_after_location,\n ]\n\n #endregion -----\n\n\n #region Airport vehicle menu -----\n\n # Submenu for specific vehicle display options\n display_vehicle_menu_airport = Menu(\"Display Vehicle\", None, vehicle_menu_airport, self.logic, self, 'airport')\n\n # Vehicle functions with vehicle display menu\n vehicle_menu_airport.selectable_options += [\n register_vehicle, edit_vehicle,\n display_vehicle_menu_airport,\n handover_vehicle, handin_vehicle \n ]\n\n\n # Vehicle display functions\n display_vehicle_menu_airport.selectable_options += [\n get_all_vehicles,\n get_vehicle,\n get_vehicle_after_location,\n get_vehicle_after_condition,\n get_vehicle_fit_for_rental,\n ]\n \n\n report_menu_airport.selectable_options += [\n get_invoice,\n pay_invoice\n ]\n\n\n \n #region Customer Contract menu -----\n\n # Submenu for specific customer display options\n display_customer_menu_airport = Menu (\"Display Customer\", None, customer_menu_airport, self.logic, self, 'airport')\n\n # Customer functions with customer display menu\n customer_menu_airport.selectable_options += [\n register_customer, edit_customer,\n display_customer_menu_airport\n ]\n\n # Customer display functions\n display_customer_menu_airport.selectable_options += [\n get_customer,\n get_all_customers,\n ]\n\n #endregion -----\n\n #region Vehicle Type Contract menu -----\n\n # Submenu for specific vehicle type display options\n display_vehicle_type_menu_airport = Menu (\"Display Vehicle Type\", None, vehicle_type_menu_airport, self.logic, self, 'airport')\n\n # Vehicle type functions with vehicle type display menu\n vehicle_type_menu_airport.selectable_options += [\n register_vehicle_type, edit_vehicle_type,\n display_vehicle_type_menu_airport\n ]\n\n # vehicle_type display functions\n display_vehicle_type_menu_airport.selectable_options += [\n get_vehicle_type,\n get_vehicle_type_rates\n ]\n\n #endregion -----\n\n #region destination Contract menu -----\n\n # Submenu for specific destination display options\n display_destination_menu_airport = Menu (\"Display destination\", None, destination_menu_airport, self.logic, self, 'airport')\n\n # destination functions with destination display menu\n destination_menu_airport.selectable_options += [\n register_destination, edit_destination,\n display_destination_menu_airport\n ]\n\n # destination display functions\n display_destination_menu_airport.selectable_options += [\n get_destination,\n get_all_destinations,\n ]\n\n #endregion -----\n #endregion =====\n\n\n\n self.current_menu = main_menu\n\n def get_user_login(self):\n ''' Get user login credentials and assign access values '''\n\n # Enumerate registered employees, relevant information to login\n employees = {employee.ssn:employee.title for employee in self.logic.employee.get_all()}\n \n # Custom login credentials for administrative purposes\n employees['admin'] = 'admin'\n\n employees['chuck norris can divide by zero'] = 'chuck'\n\n login = False\n\n print('{:-^45}\\n'.format('Login'))\n\n while not login:\n user_ssid = self.get_user_input('Enter SSID: ')\n\n if user_ssid.lower() == 'q' or user_ssid.lower() == 'b':\n exit()\n\n if user_ssid not in employees:\n self.display_error('Invalid login, please try again')\n else:\n return employees[user_ssid]\n\n def get_user_input(self, message):\n ''' Get a single user input '''\n return input('{:>20}'.format(message))\n\n def get_user_form(self, fields):\n ''' Collect user inputs in a form to process with regex validation \n Returns false if the user cancels the operation\n Fields = {Field name : [Regex, Validation instructions]} '''\n print(\"Press b to go back\")\n form = []\n\n for field in fields:\n # Disable the ability to change id's\n if field == 'id': continue\n\n \n \n # If there is no specific regex validation to the input\n if fields[field] is None:\n answer = input(format_function_name(field) + ': ')\n\n if answer.lower() == 'b':\n return False\n\n else:\n match = False\n # Check for input message\n msg = None\n if len(fields[field]) == 4: \n msg = fields[field][3]\n else:\n msg = field\n\n while not match:\n answer = input(format_function_name(msg) + ': ')\n\n if answer.lower() == 'b':\n return False\n\n match = re.search(fields[field][0], answer)\n\n if not match:\n print(fields[field][1])\n\n # Calls validation function, if one is provided\n if len(fields[field]) == 3 and match:\n if callable(fields[field][2]):\n res = fields[field][2](form, answer)\n match = res[0]\n if res[0] == False:\n print(res[1])\n \n \n form.append(answer)\n\n return form\n\n def exit_prompt(self):\n ''' Prompt the user to exit the program '''\n\n prompt_answer = input(\"\\n\\nAre you sure you want to exit? (Y/N) \")\n if prompt_answer.lower() == 'y':\n exit()\n\n def get_user_option(self, options):\n ''' Prompt the user to select an option from a list \n Returns the selected item and supports a list of methods ''' \n\n invalid = True\n\n # Check if options are methods\n if callable(options[0]):\n temp_options = [format_function_name(option.__name__) for option in options]\n else:\n temp_options = options\n\n for index, option in enumerate(temp_options):\n print(' ' + str(index + 1) + '.', option)\n\n # Newline\n print('')\n\n while invalid:\n opt = self.get_user_input('Select an option: ')\n if opt.isnumeric():\n opt = int(opt)\n if opt < 0 or opt > len(temp_options):\n print(\"Invalid input, please input a range between %d and %d.\" % (1, len(temp_options)))\n else:\n invalid = False\n elif opt == 'b':\n return False\n\n return options[opt - 1]\n\n def display_error(self, errorMsg):\n print()\n print('\\t' + errorMsg)\n print()\n\n def interface_loop(self):\n while True:\n options = self.current_menu.display()\n\n choice = self.get_user_input(\"Enter a choice: \")\n\n # Check if the option is a command\n if not str.isnumeric(choice):\n if choice == 'q' or choice == 'Q':\n exit()\n elif choice == 'b' or choice == 'B':\n # If the parent menu is None, the current menu is the main menu\n if self.current_menu.parent is None:\n self.exit_prompt()\n else:\n self.current_menu = self.current_menu.parent\n else:\n print(\"Invalid command: %s\" % (choice))\n\n else:\n\n choice = int(choice) - 1\n\n # Check if the input is invalid, below 1 or above the range of options\n if choice < 0 or choice > len(options) - 1:\n print(\"Invalid input, please input a range between %d and %d.\" % (1, len(options)))\n else:\n self.current_menu = self.current_menu.select_option(choice, options)\n\n print('\\n\\n')","sub_path":"src/Presentation/UserInterface.py","file_name":"UserInterface.py","file_ext":"py","file_size_in_byte":23271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"638969409","text":"#!/cygdrive/c/ProgramData/Miniconda3/python\r\nimport discord\r\nimport sqlite3\r\nimport DB\r\n\r\nprint(\"LAUNCHING BOT\")\r\n\r\nTOKEN = 'changeme'\r\n\r\nclient = discord.Client()\r\n\r\n@client.event\r\nasync def on_message(message):\r\n # we do not want the bot to reply to itself\r\n\tif message.author == client.user:\r\n\t\treturn\r\n\r\n\tif message.content.startswith('!event new'):\r\n\t\tprint(type(message.channel))\r\n\t#\tserver = client.get_server(message.server.id)\r\n\t#\tchan = server.get_channel(message.channel.id)\r\n\t\ttitle = message.content[len('!event new'):]\r\n\t\tDB.newActivity(title)\r\n\t\tawait client.send_message(chan, 'New activity set')\r\n\t\t\r\n\telif message.content == '!event all':\r\n\t\tsend = '\\n'.join([x[0] for x in DB.getAllActivities()])\r\n\t\tprint(send)\r\n\t\tawait client.send_message(message.channel, send)\r\n\t\t\r\n\r\n@client.event\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(client.user.name)\r\n print(client.user.id)\r\n print('------')\r\n\r\nclient.run(TOKEN)\r\n","sub_path":"EventBot.py","file_name":"EventBot.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"268951431","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/thomas/software/codegra.fs/env/lib/python3.7/site-packages/codegra_fs/utils.py\n# Compiled at: 2019-02-14 08:40:52\n# Size of source mod 2**32: 2560 bytes\nimport sys, typing as t\nfrom collections import defaultdict\nimport requests, codegra_fs\nT = t.TypeVar('T')\nY = t.TypeVar('Y')\n\ndef _get_fuse_version_info() -> t.Tuple[(int, int)]:\n if not sys.platform.startswith('win32'):\n return (-1, -1)\n import winfspy, cffi\n ffi = cffi.FFI()\n res = ffi.new('unsigned int *')\n if winfspy.lib.FspVersion(res) != 0:\n return (0, 0)\n return (\n res[0] >> 16 & 65535, res[0] & 65535)\n\n\ndef get_fuse_install_message() -> t.Optional[t.Tuple[(str, t.Optional[str])]]:\n try:\n import fuse\n except:\n pass\n else:\n if sys.platform.startswith('win32'):\n winfsp_version = _get_fuse_version_info()\n if winfsp_version < (1, 4):\n return ('You need at least WinFsp version 1.4 (currently in beta).',\n 'https://github.com/billziss-gh/winfsp/releases')\n return\n if sys.platform.startswith('darwin'):\n return ('Fuse is not installed, this can be done by installing OSXFuse',\n 'https://osxfuse.github.io/')\n if sys.platform.startswith('linux'):\n return ('Fuse is not installed, this can be done by doing `sudo apt install fuse` on ubuntu',\n None)\n if sys.platform.startswith('win32'):\n return ('WinFsp not installed, please download version 1.4 (currently in beta) or later.',\n 'https://github.com/billziss-gh/winfsp/releases')\n return ('Unsupported platform, only GNU/Linux, Mac and Windows are supported',\n None)\n\n\ndef newer_version_available() -> bool:\n req = requests.get('https://codegra.de/.cgfs.version', timeout=2)\n return req.status_code < 300 and tuple((int(p) for p in req.content.decode('utf8').strip().split('.'))) > codegra_fs.__version__\n\n\ndef find_all_dups(seq: t.Sequence[T], key: t.Callable[([T], Y)]) -> t.List[t.Tuple[(T, ...)]]:\n dct = defaultdict(list)\n for el in seq:\n dct[key(el)].append(el)\n\n return [tuple(v) for v in dct.values() if len(v) > 1]\n\n\ndef name_of_user(user: t.Dict[(str, t.Any)]) -> str:\n if user.get('group') is not None:\n return 'Group \"{}\"'.format(user['group']['name'])\n return user['name']\n\n\ndef format_datestring(datestring: str) -> str:\n return datestring.replace('T', ' ').split('.')[0]","sub_path":"pycfiles/CodeGra.fs-0.5.0.linux-x86_64.tar/utils.cpython-37.py","file_name":"utils.cpython-37.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"507482483","text":"import argparse\nimport base64\nimport json\nimport os\nimport sys\n\nimport common_functions\nimport entity_detector\n\n#Initial Query:\n# Confidence Greater Than or Equal To Zero\ninitial_query = json.loads('''{\n \"condition\": \"AND\",\n \"rules\": [\n {\n \"field\": \"table.tags\",\n \"operator\": \"contains\",\n \"type\": \"string\",\n \"value\": []\n }\n ],\n \"valid\": true\n}''')\n\n\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description = 'Print Hostnames and Tag Details for hosts with the provided Tag')\n\n required = parser.add_argument_group('required arguments')\n\n required.add_argument(\"-t\", \"--tag\", required=True, \n help=\"Tag for which to search. If tag includes spaces, enclose the string in quotes\")\n\n args = parser.parse_args()\n\n initial_query['rules'][0]['value'].append(args.tag)\n\n for host in common_functions.get_hostnames(initial_query):\n print(host)\n \n","sub_path":"print/tagged_hosts.py","file_name":"tagged_hosts.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"76775239","text":"# -*- coding: utf-8 -*-\n#\nfrom lib.commands import Base_Command\nfrom lib.modules.database import Database\n\nclass Delete(Base_Command.Base_Command):\n\n def admin(self, _nick):\n db = Database()\n try:\n db.delete('admins', 'admin', _nick.strip())\n except:\n return False\n return True\n\n def delete(self):\n if len(self.args) > 1:\n content = ' '.join(self.args[1:])\n result = None\n if self.args[0] == 'admin' and self.check_admin():\n result = self.admin(content)\n if result:\n self.parent.conn.privmsg(self.channel, \"%s, %s removido.\" % (self.nick, self.args[0]))\n\n def run(self):\n self.delete()\n","sub_path":"lib/commands/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"357754213","text":"# -*- coding=utf-8 -*-\nimport itertools\nimport re\n\nSCALE_MANIFEST_FILE = \"/data/manifest.json\"\nSCALE_UPDATE_SERVER = \"https://update.freenas.org/scale\"\n\nUPLOAD_LOCATION = \"/var/tmp/firmware\"\n\nSEP = re.compile(r\"[-.]\")\n\n\ndef can_update(old_version, new_version):\n for x, y in itertools.zip_longest(SEP.split(old_version), SEP.split(new_version), fillvalue=''):\n if x < y:\n return True\n if x > y:\n return False\n\n return False\n","sub_path":"src/middlewared/middlewared/plugins/update_/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"458028277","text":"typo_full_names = {}\n\ndef audit_street_name(street_types, street_name):\n m = street_type_re.search(street_name)\n if m:\n street_type = m.group()\n if (all_types[street_type] < 20) and (street_type not in expected) and (street_type not in abbr_mapping):\n if street_type in typo_full_names:\n typo_full_names[street_type].append(street_name)\n else:\n typo_full_names.update({ street_type:[street_name] })\n\ndef audit_name(filename):\n for event, elem in ET.iterparse(filename):\n if is_street_name(elem):\n audit_street_name(street_types, elem.attrib['v']) \n # print_sorted_dict(street_types)\n return typo_full_names\n\naudit_name(dataset)","sub_path":"modules/07audit_names.py","file_name":"07audit_names.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"383446758","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom django_dynamic_fixture import get\n\n\nclass ProfileViewsTest(TestCase):\n\n def setUp(self):\n self.user = get(User)\n self.user.set_password('test')\n self.user.save()\n self.client.login(username=self.user.username, password='test')\n\n def test_edit_profile(self):\n resp = self.client.get(\n reverse('profiles_profile_edit'),\n )\n self.assertTrue(resp.status_code, 200)\n resp = self.client.post(\n reverse('profiles_profile_edit'),\n data={\n 'first_name': 'Read',\n 'last_name': 'Docs',\n 'homepage': 'readthedocs.org',\n },\n )\n self.assertTrue(resp.status_code, 200)\n\n self.user.refresh_from_db()\n self.user.profile.refresh_from_db()\n self.assertEqual(self.user.first_name, 'Read')\n self.assertEqual(self.user.last_name, 'Docs')\n self.assertEqual(self.user.profile.homepage, 'readthedocs.org')\n\n def test_edit_profile_with_invalid_values(self):\n resp = self.client.get(\n reverse('profiles_profile_edit'),\n )\n self.assertTrue(resp.status_code, 200)\n\n resp = self.client.post(\n reverse('profiles_profile_edit'),\n data={\n 'first_name': 'a' * 31,\n 'last_name': 'b' * 31,\n 'homepage': 'c' * 101,\n },\n )\n\n FORM_ERROR_FORMAT = 'Ensure this value has at most {} characters (it has {}).'\n\n self.assertFormError(resp, form='form', field='first_name', errors=FORM_ERROR_FORMAT.format(30, 31))\n self.assertFormError(resp, form='form', field='last_name', errors=FORM_ERROR_FORMAT.format(30, 31))\n self.assertFormError(resp, form='form', field='homepage', errors=FORM_ERROR_FORMAT.format(100, 101))\n\n def test_delete_account(self):\n resp = self.client.get(\n reverse('delete_account'),\n )\n self.assertEqual(resp.status_code, 200)\n resp = self.client.post(\n reverse('delete_account'),\n data={\n 'username': self.user.username,\n },\n )\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp['Location'], reverse('homepage'))\n\n self.assertFalse(\n User.objects.filter(username=self.user.username).exists(),\n )\n\n def test_profile_detail(self):\n resp = self.client.get(\n reverse('profiles_profile_detail', args=(self.user.username,)),\n )\n self.assertTrue(resp.status_code, 200)\n\n def test_profile_detail_logout(self):\n self.client.logout()\n resp = self.client.get(\n reverse('profiles_profile_detail', args=(self.user.username,)),\n )\n self.assertTrue(resp.status_code, 200)\n\n def test_profile_detail_not_found(self):\n resp = self.client.get(\n reverse('profiles_profile_detail', args=('not-found',)),\n )\n self.assertTrue(resp.status_code, 404)\n\n def test_account_advertising(self):\n resp = self.client.get(\n reverse('account_advertising'),\n )\n self.assertEqual(resp.status_code, 200)\n self.assertTrue(self.user.profile.allow_ads)\n resp = self.client.post(\n reverse('account_advertising'),\n data={'allow_ads': False},\n )\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp['Location'], reverse('account_advertising'))\n self.user.profile.refresh_from_db()\n self.assertFalse(self.user.profile.allow_ads)\n","sub_path":"readthedocs/rtd_tests/tests/test_profile_views.py","file_name":"test_profile_views.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"24637676","text":"import unittest\n\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.core.periodic_table import Element\n\nfrom matminer_design_module.matminer.design.composition import CompositionGenerator\n\n\nclass TestCompositionGenerators(unittest.TestCase):\n\n def test_simple_generator(self):\n gen = CompositionGenerator(['Al', Element('Fe'), 'Zr'],\n min_elements=1, max_elements=2, spacing=4)\n\n # Test the stoichiometry generator\n stoichs = list(gen._generate_stoichiometries(1, 4))\n self.assertEquals([(4,)], stoichs)\n stoichs = set(gen._generate_stoichiometries(2, 4))\n self.assertEquals({(3, 1), (2, 2), (1, 3)}, stoichs)\n\n # Test the composition generator\n comps = list(gen.generate_entries())\n self.assertEquals(len(comps), len(set(comps))) # No duplicates\n self.assertEquals(3 + 3 * 3, len(comps)) # 3 elements, 3 entries/binary * 3 binaries\n self.assertIn(Composition('Al4'), comps)\n self.assertIn(Composition('Al2Fe2'), comps)\n\n # Test the dataframe generator (from the base class)\n data = gen.generate_dataframe('composition')\n self.assertEquals(comps, data['composition'].tolist())\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_gen_comp.py","file_name":"test_gen_comp.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"314615030","text":"import json\nfrom math import floor\n\n\nclass Renderer():\n\n\n\tdef __init__(self, app, pygame):\n\t\tself.pygame = pygame\n\t\tself.spritemap = self.pygame.image.load(\"data/textures/32x32.png\")\n\t\tself.show_fps = True\n\t\tself.no_ghost_px = int(app.tile/4)\n\t\twith open(\"data/src.json\", \"r\") as src_file:\n\t\t\tself.map_src = json.load(src_file)\n\t\twith open(\"data/obst.json\", \"r\") as obst_file:\n\t\t\tself.obstacles = json.load(obst_file)\n\n\n\tdef render(self, app, player):\n\t\tif app.need_new_map:\n\t\t\tsrc = self.map_src[player.current_map]\n\t\t\tself.map = self.pygame.image.load(src).convert()\n\t\t\t# self.map = self.pygame.image.load(\"data/textures/maps/C3.png\")\n\n\t\t# app.display_surf.fill(app.color[\"black\"])\n\t\tapp.display_surf.blit(self.map, (0,0))\n\t\tif app.need_new_map:\n\t\t\tself.pygame.display.update((0,0,app.width, app.height))\n\t\tapp.display_surf.blit(\n\t\t\tself.spritemap, (player.xpos*app.tile, player.ypos*app.tile),\n\t\t\t(0*32, 53*32, 32, 32)\n\t\t)\n\n\t\tif self.show_fps:\n\t\t\tfps = app.clock.get_fps()\n\t\t\tfps = floor(fps+1) # rounds to the nearest int\n\t\t\tfps_pos = (5, 5)\n\t\t\tfps_surf = app.font.render(str(fps), False, app.color[\"white\"])\n\t\t\tfps_surf_rect = (\n\t\t\t\tfps_pos[0], fps_pos[1],\n\t\t\t\tfps_surf.get_width(), fps_surf.get_height()\n\t\t\t)\n\t\t\tapp.display_surf.blit(fps_surf, fps_pos)\n\n\t\tupdate_rect = [(\n\t\t\tplayer.xpos*app.tile, player.ypos*app.tile,\n\t\t\tapp.tile, app.tile\n\t\t)]\n\t\tupdate_rect.append(fps_surf_rect)\n\n\t\tself.pygame.display.update(update_rect)\n\n\t\tapp.need_new_map = False\n\t\t# self.pygame.display.flip() # sets the changes into effect\n\n\n\tdef move_anim(self, app, player, axis, direction):\n\t\tspeed = 2\n\t\tif player.sprint:\n\t\t\tspeed *= 2\n\n\t\tif axis == 0:\n\t\t\tmodifiers = [1,0]\n\t\telif axis == 1:\n\t\t\tmodifiers = [0,1]\n\n\t\tfor i in range(int(app.tile)):\n\t\t\tadderx = i * modifiers[0] * direction\n\t\t\taddery = i * modifiers[1] * direction\n\t\t\tapp.display_surf.fill(app.color[\"black\"])\n\t\t\tapp.display_surf.blit(self.map, (0,0))\n\t\t\tapp.display_surf.blit(\n\t\t\t\tself.spritemap,\n\t\t\t\t(player.xpos*app.tile+adderx, player.ypos*app.tile+addery),\n\t\t\t\t(0*32, 53*32, 32, 32)\n\t\t\t)\n\t\t\tself.pygame.time.wait(speed)\n\t\t\tupdate_rect = (\n\t\t\t\tplayer.xpos*app.tile+adderx-self.no_ghost_px,\n\t\t\t\tplayer.ypos*app.tile+addery-self.no_ghost_px,\n\t\t\t\tapp.tile+2*self.no_ghost_px,\n\t\t\t\tapp.tile+2*self.no_ghost_px\n\t\t\t)\n\t\t\tself.pygame.display.update(update_rect)\n\t\t\t# self.pygame.display.flip()\n\n\t\tif axis == 0:\n\t\t\tplayer.xpos += 1 * direction\n\t\telif axis == 1:\n\t\t\tplayer.ypos += 1 * direction\n\n\n\tdef legal_move(self, player, axis, direction):\n\t\tcurrent_pos = [player.xpos, player.ypos]\n\t\tnew_pos = [current_pos[0], current_pos[1]]\n\t\tnew_pos[axis] += direction\n\t\tif new_pos in self.obstacles[player.current_map]:\n\t\t\treturn False\n\t\treturn True\n","sub_path":"lib/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"605872385","text":"### Task 4.2\n# Write a function that check whether a string is a palindrome or not. Usage of\n# any reversing functions is prohibited. To check your implementation you can use\n# strings from [here](https://en.wikipedia.org/wiki/Palindrome#Famous_palindromes).\n\ndef is_this_palindrome (str_to_check):\n reg_str = ''\n rev_str = ''\n len_of_str = 0\n for ch in str_to_check:\n reg_str = reg_str + ch\n len_of_str += 1 # as we shouldn't use any func - let's calculate manually\n for i in range (len_of_str):\n rev_ch = str_to_check[len_of_str - i - 1]\n rev_str = rev_str + rev_ch\n\n if reg_str == rev_str:\n return 'is palindrome'\n else:\n return 'is not palindrome'\n\nmayby_palindrom = \"sator arepo tenet opera rotas\" # please enter only lower case and no spaces in end, line beggin and between words \n # othercase \"а роза упала на лапу азора\" will work unexpectedly )))\nprint(is_this_palindrome(mayby_palindrom))\n","sub_path":"02_Functions/Task 4.2.py","file_name":"Task 4.2.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"522979189","text":"import csv\nimport unittest\nfrom time import sleep\n\nimport requests\n\nfrom automate_driver.automate_driver import AutomateDriver\nfrom model.assert_text import AssertText\nfrom model.connect_sql import ConnectSql\nfrom model.send_mail import request_base_url\nfrom pages.base.base_page import BasePage\nfrom pages.base.lon_in_base import LogInBase\nfrom pages.statistical_form.obd_form_page import ObdFormPage\nfrom pages.statistical_form.search_sql import SearchSql\nfrom pages.statistical_form.statistical_form_page import StatisticalFormPage\nfrom pages.statistical_form.statistical_form_page_read_csv import StatisticalFormPageReadCsv\n\n\nclass TestCase186ObdTroubleForm(unittest.TestCase):\n def setUp(self):\n # 前置条件\n # 实例化对象\n self.driver = AutomateDriver()\n self.base_url = self.driver.base_url\n self.base_page = BasePage(self.driver, self.base_url)\n self.statistical_form_page = StatisticalFormPage(self.driver, self.base_url)\n self.statistical_form_page_read_csv = StatisticalFormPageReadCsv()\n self.log_in_base = LogInBase(self.driver, self.base_url)\n self.connect_sql = ConnectSql()\n self.search_sql = SearchSql(self.driver, self.base_url)\n self.obd_form_page = ObdFormPage(self.driver, self.base_url)\n # 打开页面,填写用户名、密码、点击登录\n self.base_page.open_page()\n self.driver.set_window_max()\n self.driver.implicitly_wait(5)\n self.driver.clear_cookies()\n self.log_in_base.log_in_jimitest()\n self.assert_text = AssertText()\n\n # 登录之后点击控制台,然后点击设置\n self.statistical_form_page.click_control_after_click_statistical_form_page()\n sleep(3)\n\n def tearDown(self):\n # 退出浏览器\n self.driver.quit_browser()\n\n def test_case_obd_trouble_form(self):\n # 断言url\n expect_url_after_click_statistical_form = self.base_url + '/deviceReport/statisticalReport'\n self.assertEqual(expect_url_after_click_statistical_form,\n self.statistical_form_page.actual_url_after_statistical_form())\n\n # 点击obd统计的里程报表\n self.obd_form_page.click_obd_trouble_form_button()\n # 切换到odb里程统计的frame里面\n self.obd_form_page.switch_to_obd_trouble_form_frame()\n\n csv_file = self.statistical_form_page_read_csv.read_csv('obd_milage_report_search_data.csv')\n csv_data = csv.reader(csv_file)\n is_header = True\n for row in csv_data:\n if is_header:\n is_header = False\n continue\n search_data = {\n 'user_name': row[0],\n 'choose_date': row[2],\n 'begin_time': row[3],\n 'end_time': row[4]\n }\n self.obd_form_page.add_data_to_search_obd_trouble_form(search_data)\n\n # 获取页面上设备的信息\n dev_total_mile = self.obd_form_page.get_dev_total_mile_obd_vehicle_condition_form()\n dev_avg_oil = self.obd_form_page.get_dev_avg_oil_obd_vehicle_condition_form()\n dev_avg_speed = self.obd_form_page.get_avg_oil_obd_vehicle_condition_form()\n dev_total_oil = self.obd_form_page.get_dev_total_oil_obd_vehicle_condition_form()\n begin_time = self.obd_form_page.get_begin_time()\n end_time = self.obd_form_page.get_end_time()\n\n request_url = request_base_url()\n header = {\n '_method_': 'getObdVehicleCondition',\n 'imeis': self.obd_form_page.search_imei(),\n 'startTime': begin_time,\n 'endTime': end_time,\n 'type': 'carfault'\n }\n sleep(10)\n res_json = requests.post(request_url, data=header).json()\n\n total_page = self.obd_form_page.get_obd_list_total_page_number()\n if total_page == 0:\n self.assertEqual('0', dev_total_mile)\n self.assertEqual('0', dev_avg_oil)\n self.assertEqual('0', dev_avg_speed)\n self.assertEqual('0', dev_total_oil)\n\n elif total_page == 1:\n # 获取页面上的里程和耗油\n mile_and_oil_list = []\n per_page_total_number = self.obd_form_page.get_per_page_total_number()\n for n in range(per_page_total_number):\n mile_and_oil_list.append({\n 'gpsTime': self.obd_form_page.get_gps_time_in_vehicle_condition_form(n),\n 'lng': float(self.obd_form_page.get_lot_in_trouble_form(n)),\n 'lat': float(self.obd_form_page.get_lat_in_trouble_form(n)),\n 'errorCode': self.obd_form_page.get_error_code_in_trouble_form(n)\n })\n\n res_data = res_json['data']\n for data in res_data:\n del data['acc'], data['addr'], data['batteryVoltage'], data['direction'], data[\n 'engineLoad'], data['fuelConsumption1'], data['fuelConsumption2'], data['gpsInfo'], data[\n 'gpsSpeed'], data['heatingTime'], \\\n data['idleTime'], data['imei'], data[\n 'maxSpeed'], data['oilPer'], data['rapidAcceleration'], data['rapidDeceleration'], data[\n 'rotatingSpeed'], data[\n 'speed'], data['throttlePosition'], data['totalMileage'], data['waterTemperature']\n print(mile_and_oil_list)\n print(res_data)\n self.assertEqual(mile_and_oil_list, res_data)\n else:\n mile_and_oil_list = []\n for i in range(total_page):\n # 循环点击每一页\n self.obd_form_page.click_per_page(i)\n # 获取页面上的里程和耗油\n per_page_total_number = self.obd_form_page.get_per_page_total_number()\n for n in range(per_page_total_number):\n mile_and_oil_list.append({\n 'gpsTime': self.obd_form_page.get_gps_time_in_vehicle_condition_form(n),\n 'lng': float(self.obd_form_page.get_lot_in_trouble_form(n)),\n 'lat': float(self.obd_form_page.get_lat_in_trouble_form(n)),\n 'errorCode': self.obd_form_page.get_error_code_in_trouble_form(n)\n })\n\n res_data = res_json['data']\n for data in res_data:\n del data['acc'], data['addr'], data['batteryVoltage'], data['direction'], data[\n 'engineLoad'], data['fuelConsumption1'], data['fuelConsumption2'], data['gpsInfo'], data[\n 'gpsSpeed'], data['heatingTime'], \\\n data['idleTime'], data['imei'], data[\n 'maxSpeed'], data['oilPer'], data['rapidAcceleration'], data['rapidDeceleration'], data[\n 'rotatingSpeed'], data[\n 'speed'], data['throttlePosition'], data['totalMileage'], data['waterTemperature']\n print(mile_and_oil_list)\n print(res_data)\n self.assertEqual(mile_and_oil_list, res_data)\n csv_file.close()\n self.driver.default_frame()\n","sub_path":"testcases/statistical_form3/test_case_186_0925_obd_trouble_form.py","file_name":"test_case_186_0925_obd_trouble_form.py","file_ext":"py","file_size_in_byte":7434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"393666915","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profesionales', '0002_auto_20150530_1324'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('turnos', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='turno',\n name='pro_id',\n ),\n migrations.RemoveField(\n model_name='turno',\n name='usu_id',\n ),\n migrations.AddField(\n model_name='turno',\n name='pro',\n field=models.ForeignKey(default=1, to='profesionales.Profesional'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='turno',\n name='usu',\n field=models.ForeignKey(default=2, to=settings.AUTH_USER_MODEL),\n preserve_default=False,\n ),\n ]\n","sub_path":"apps/turnos/migrations/0002_auto_20150601_1724.py","file_name":"0002_auto_20150601_1724.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"636396669","text":"import numpy as np\nfrom matplotlib import pyplot\nimport gym\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten, LSTM\nfrom keras.optimizers import Adam\n\nfrom rl.agents.cem import CEMAgent\nfrom rl.memory import EpisodeParameterMemory\n\nENV_NAME = 'CartPole-v0'\n\n# Get the environment and extract the number of actions.\nenv = gym.make(ENV_NAME)\nnp.random.seed(123)\nenv.seed(123)\n\nnb_actions = env.action_space.n\nobs_dim = env.observation_space.shape[0]\n\n\nmodelL = Sequential()\nmodelL.add(LSTM(nb_actions, input_shape=(1,) + env.observation_space.shape))\nmodelL.add(Activation('softmax'))\nmemoryL = EpisodeParameterMemory(limit=1000, window_length=1)\ncemL = CEMAgent(model=modelL, nb_actions=nb_actions, memory=memoryL,\n batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05)\ncemL.compile()\nhistL = cemL.fit(env, nb_steps=50000, visualize=False, verbose=1)\ncemL.save_weights('cemL_{}_params.h5f'.format(ENV_NAME), overwrite=True)\ncemL.test(env, nb_episodes=5, visualize=True)\n\n\nmodelD = Sequential()\nmodelD.add(Flatten(input_shape=(1,) + env.observation_space.shape))\nmodelD.add(Dense(nb_actions))\nmodelD.add(Activation('softmax'))\nmemoryD = EpisodeParameterMemory(limit=1000, window_length=1)\ncemD = CEMAgent(model=modelD, nb_actions=nb_actions, memory=memoryD,\n batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05)\ncemD.compile()\nhistD = cemD.fit(env, nb_steps=50000, visualize=False, verbose=1)\ncemD.save_weights('cemD_{}_params.h5f'.format(ENV_NAME), overwrite=True)\ncemD.test(env, nb_episodes=5, visualize=True)\n\n\npyplot.plot(histL.history['nb_steps'], histL.history['episode_reward'], linewidth=3, label='LSTM')\npyplot.plot(histD.history['nb_steps'], histD.history['episode_reward'], linewidth=3, label='Dense')\npyplot.grid()\npyplot.legend()\npyplot.xlabel('steps')\npyplot.ylabel('reward')\n#pyplot.yscale('log')\npyplot.show()\n","sub_path":"examples/_cem_cartpole_compare.py","file_name":"_cem_cartpole_compare.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"227254819","text":"# -*- coding:utf-8 -*-\n\nfrom dbbasic import dbMgr\n\ndef update(id, value):\n\twith dbMgr() as cursor:\n\t\tcursor.execute('update work set value=? where id=?', (value, id))\n\t\t\ndef get(id):\n\twith dbMgr() as cursor:\n\t\tcursor.execute('select * from work where id=?' , (id,))\n\t\tvalue = cursor.fetchone()\n\t\tif value == None:\n\t\t\treturn None\n\t\treturn value[1]","sub_path":"www/transpack/workDao.py","file_name":"workDao.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"297116243","text":"\"\"\"\r\nThis is a test file, that you can use to validate \r\n\"\"\"\r\n\r\n#%% validate that pathpy was installed correct\r\nimport pathpy as pp\r\npaths = pp.Paths()\r\npaths.add_path('a,b,c')\r\nprint(paths)\r\n\r\n#%% validate that kernel was started in correct root directory\r\nt = pp.TemporalNetwork.read_file('data/temporal_clusters.tedges')\r\nprint(t)\r\n\r\n\r\n#%% validate that infomap is installed correctly\r\nimport infomap\r\nprint(\"Infomap version:\", infomap.Infomap().version)\r\nprint(\"Make sure it is at least 1.0.0-beta.14\")\r\n\r\n#%% check that relative read and write works\r\nfrom pathlib import Path\r\nPath('output').mkdir(exist_ok=True)\r\nim = infomap.Infomap(\"\")\r\nim.network().readInputData(\"data/ninetriangles.net\")\r\nim.run()\r\nim.writeClu(\"output/ninetriangles.clu\")\r\nprint(im.maxTreeDepth()) # Should print 3\r\n","sub_path":"live_solutions/0_test_environment.py","file_name":"0_test_environment.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"203834401","text":"\"\"\"\nInput: Start and end date as datetime.date.\nOutput: The quantity of the rest days as an integer.\n\"\"\"\n\nfrom datetime import date, timedelta\n\ndef checkio(from_date, to_date):\n result = 0\n for i in range((to_date - from_date).days + 1):\n if from_date.isoweekday() == 6 or from_date.isoweekday() == 7:\n result += 1\n from_date += timedelta(days=1) # 设置一个时间间隔为1天\n # 遍历过程从开始日期到结束日期(+1)检查weekday,符合周六周日的记入结果\n return result\n\nif __name__ == \"__main__\":\n assert checkio(date(2013, 9, 18), date(2013, 9, 23)) == 2, \"1st example\"\n assert checkio(date(2013, 1, 1), date(2013, 2, 1)) == 8, \"2nd example\"\n assert checkio(date(2013, 2, 2), date(2013, 2, 3)) == 2, \"3rd example\"\n print(\"done\")\n","sub_path":"AlgorithmTraining/Checkio/pycon_tw/p2_weekend_counter.py","file_name":"p2_weekend_counter.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"256753390","text":"from django.conf import settings\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.views import generic\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.core.exceptions import ValidationError\nfrom . import models\nfrom . import forms\nfrom account.models import AccessToken\nfrom helpers import MailHelper\n\n\nclass BaseMixin(generic.View):\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if request.user.is_superuser:\n return redirect('/')\n\n return super(BaseMixin, self).dispatch(request, *args, **kwargs)\n\n\nclass ProjectDetail(BaseMixin, generic.View):\n\n def get(self, request, pk):\n\n project = get_object_or_404(models.Project, pk=self.request.user.project.pk)\n\n return render(request, 'views/project/detail.html', {\n 'project': project,\n 'is_onwer': project.owner == request.user})\n\n\nclass ProjectCreate(BaseMixin, generic.View):\n\n def get(self, request, pk):\n\n try:\n project = models.Project.objects.create_project(request.user)\n except Exception as e:\n raise e\n\n return redirect('project_detail')\n\n\nclass ProjectUpdate(BaseMixin, generic.View):\n\n def post(self, request, pk):\n\n project = request.user.project\n form = forms.ProjectUpdateForm(request.POST, request.FILES, instance=project)\n\n try:\n form.save()\n except Exception as e:\n raise e\n\n return redirect('project_detail')\n\n\nclass InviteCreate(BaseMixin, generic.View):\n\n def post(self, request, pk):\n\n invite = models.Invite(\n by=request.user,\n project=request.user.project,\n to=request.POST['to'])\n\n\n try:\n invite.validate_unique()\n except ValidationError as e:\n raise e\n\n token = AccessToken.objects.create_token(\n request.POST['to'],\n 'Empreendedor')\n try:\n invite.save()\n except Exception as e:\n raise e\n\n\n recipients = [{\n 'email': request.POST['to'],\n 'type': 'to'\n }]\n\n subject = \"Você recebeu um convite para participar do projeto %s\" % (request.user.project, )\n MailHelper().sendmail(\n template='new_invite',\n recipients=recipients,\n subject=subject,\n bulk=False,\n user=request.user.get_full_name(),\n token=str(token),\n project=request.user.project.title,\n action_url=settings.BASE_URL + '/registrar')\n\n return redirect('project_detail')\n\n\nclass InviteDelete(BaseMixin, generic.View):\n\n def get(self, request, pk, invite_pk):\n\n invite = models.Invite.objects.get(pk=invite_pk)\n token = AccessToken.objects.delete_token(invite.to)\n invite.delete()\n\n return redirect('project_detail')\n\n\nclass AreasList(BaseMixin, generic.View):\n\n def get(self, request, pk):\n\n project = request.user.project\n areas = models.Area.objects.filter(project=project).order_by('relevance')\n return render(\n request,\n 'views/project/areas/list.html',\n {'areas':areas, 'project':project})\n\n\nclass AreaDetail(BaseMixin, generic.View):\n\n def get(self, request, pk, area_slug):\n\n project = request.user.project\n area = get_object_or_404(models.Area, slug=area_slug, project=project)\n polls = area.polls.all().order_by('relevance')\n\n return render(\n request,\n 'views/project/areas/detail.html',\n {'area':area, 'project':project, 'polls':polls, 'need_backup': True})\n\n\nclass AreaUpdate(BaseMixin, generic.View):\n\n def post(self, request, pk, area_slug):\n\n project = request.user.project\n info = None\n\n if 'info_pk' in request.POST:\n info = models.Info.objects.get(pk=request.POST['info_pk'])\n else:\n poll = get_object_or_404(models.Poll, pk=request.POST['poll_pk'])\n info = models.Info(project=project, question=poll)\n\n info.content = request.POST['content']\n\n try:\n info.save()\n except Exception as e:\n raise e\n\n project_users = list(project.users.values_list('email', flat=True).all())\n\n if request.user.email in project_users:\n project_users.remove(request.user.email)\n\n if project_users:\n recipients = []\n for user in project_users:\n recipients.append({\n \"email\": user,\n \"name\": project.users.get(email=user).get_full_name(),\n \"type\": \"to\"\n })\n\n subject = \"%s editou informações do projeto %s!\" % (request.user.get_full_name(), project.title)\n MailHelper().sendmail(\n template='project_edit',\n recipients=recipients,\n subject=subject,\n owner=request.user.get_full_name(),\n action_url=settings.BASE_URL)\n\n return redirect('area_detail', area_slug=area_slug)\n\nclass CommentsList(BaseMixin, generic.View):\n\n def get(self, request, pk):\n\n project = request.user.project\n comments = models.Comment.objects.filter(project=project)\n return render(\n request,\n 'views/project/comments/list.html',\n {'comments':comments, 'project':project})\n\n\nclass CommentAnswerCreate(BaseMixin, generic.View):\n\n def post(self, request, pk, comment_pk):\n\n comment = models.Comment.objects.get(pk=comment_pk)\n new_answer = models.CommentAnswer(\n content=request.POST['content'],\n owner=request.user,\n comment=comment,\n project=comment.project)\n try:\n new_answer.save()\n except Exception as e:\n raise e\n\n project_users = list(comment.project.users.values_list('email', flat=True).all())\n if request.user.email in project_users:\n project_users.remove(request.user.email)\n\n if project_users:\n recipients = []\n for user in project_users:\n recipients.append({\n \"email\": user,\n \"name\": comment.project.users.get(email=user).get_full_name(),\n \"type\": \"to\"\n })\n\n subject = \"%s adicionou uma nova resposta a um comentário em %s!\" % (request.user.get_full_name(), comment.project.title)\n MailHelper().sendmail(\n template='new_answer',\n recipients=recipients,\n subject=subject,\n owner=request.user.get_full_name(),\n action_url=settings.BASE_URL)\n\n return redirect('comments_list')\n\n\nclass ChatList(BaseMixin, generic.View):\n\n def get(self, request, pk):\n\n project = request.user.project\n messages = models.Chat.objects.filter(project=project)\n return render(\n request,\n 'views/project/chat/list.html',\n {'chat':messages, 'project':project})\n\n\nclass ChatCreate(BaseMixin, generic.View):\n\n def post(self, request, pk):\n\n project = request.user.project\n message = models.Chat(\n content=request.POST['content'],\n owner=request.user,\n project=project)\n try:\n message.save()\n except Exception as e:\n raise e\n\n project_users = list(project.users.values_list('email', flat=True).all())\n if request.user.email in project_users:\n project_users.remove(request.user.email)\n\n if project_users:\n recipients = []\n for user in project_users:\n recipients.append({\n \"email\": user,\n \"name\": project.users.get(email=user).get_full_name(),\n \"type\": \"to\"\n })\n\n subject = \"%s enviou uma nova mensagem em %s!\" % (request.user.get_full_name(), project.title)\n MailHelper().sendmail(\n template='new_chat',\n recipients=recipients,\n subject=subject,\n owner=request.user.get_full_name(),\n action_url=settings.BASE_URL)\n\n return redirect('chat_list')\n","sub_path":"api/project/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"422528990","text":"# Main game waar de speler zoekt naar de code\nimport itertools\nfrom random import choice\nfrom Feedback import BlackPointsPlayer\ndef code():\n # random code word ge genereerdt\n global RandomCode\n RandomCode = [choice(\"abcdef\") for i in range(4)]\n # print(RandomCode)\n return RandomCode\ncode()\nglobal teller\nteller = 0\ndef guess(RandomCode, teller):\n # hier raad de speler de code\n global a\n while teller <= 9:\n a = list(input(\"raad de 4 letterige code \"))\n BlackPointsPlayer(a, RandomCode)\n if a == RandomCode:\n print(\"Did andwoord klopt helemaal\")\n break\n else:\n # telt de hoeveelheid pogingen die nog over zijn\n teller += 1\n Pogingen = str(10 - teller)\n print(\"u heeft nog \" + Pogingen + \" pogingen\")\n return\nguess(RandomCode, teller)\n\n","sub_path":"MasterMindMain.py","file_name":"MasterMindMain.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"535710242","text":"from source.text_categorization import TextCategorization\nfrom source.content_categorization import ContentCategorization\nfrom source.helpers.content_categorization.transform import articles_v1_to_v2\nfrom source.helpers.content_categorization.transform import representative_v2_to_v1\nfrom source.helpers.content_categorization.transform import unclassified_v2_to_v1\nfrom source.helpers.content_categorization.transform import corpus_v2_to_v1\nimport json\nimport sys\nsys.path.append('./source/')\nimport nltk\nimport operator\nimport random\nimport numpy\nimport math\nfrom tqdm import tqdm\nimport time\nimport pickle\nimport math\n\nwith open('./data/jan2018.json', 'r') as file:\n articles = json.load(file)\n\ncategories = {'seguranca': ['segurança','insegurança','seguranças','polícia','policias','crime', 'bombeiros',\n 'prisão','roubo','crimes','delegacia','suspeitos','suspeito','pm','presos',\n 'rebelião','detentos','tráfico'],\n 'justica' : ['justiça','advogados','tribunal','estado','partido','stf','lei','união','tjgo','ofício',\n 'juiz','decisão','julgamento','audiência','audiências','testemunha','testemunhas',\n 'ministra','ministro','stf','jurisprudência','ordem'],\n 'educacao' : ['educação','aluno','alunos','ensino','escolas','novatos',\n 'matrícula','prématrícula','mec','ufg','concurso'],\n 'cidade' : ['cidade','município','habitantes','região','infraestrutura','prefeitura','reparos',\n 'goiânia','obras','celg'],\n 'transito' : ['carro','transito','veículo','veículos','motorista','motoristas','acidente','km',\n 'rua','motociclista','rodovia','sentido','avenida','pedestres','pedestre','multa',\n 'colisão','detrango','cruzamento','rodovias','quilômetros','trecho','pedágios',\n 'triunfo','sinalização','transporte','frota'],\n 'saude': ['saúde','casos','tratamento','doença','cirurgias','tratamento','doenças','vida','hospital',\n 'pacientes']\n }\n\ncc = ContentCategorization()\nprint('Initialization ...')\n# cc.initialization(articles,categories)\nprint('Extracting representativity ...')\n# cc.extract_representativity()\nprint('Extracting representativity using version 2...')\n[articles_v2, categories_v2, bag_sentences_v2, unique_sentences_v2, bag_words_v2, unique_words_v2, bag_map] = articles_v1_to_v2(articles, categories)\ntc = TextCategorization(articles_v2, categories_v2, threshold=0.7, min_words_per_sentence=1, min_sentences_per_article=1)\n[v2_representative_sentences, v2_representative_words] = representative_v2_to_v1(tc.representative_sentences, bag_map, categories) \n[v2_unclassified_sentences, v2_unclassified_words] = unclassified_v2_to_v1(tc.unclassified_sentences, bag_map)\n# [v2_bag_words, v2_bag_sentences] = corpus_v2_to_v1(tc.corpus_word, tc.corpus_sentences, bag_map)\nprint('Inserting version 2 representativity...')\ncc.bag_words = bag_words_v2\ncc.bag_sentences = bag_sentences_v2\ncc.unclassified_words = v2_unclassified_words\ncc.representative_words = v2_representative_words\ncc.representative_sentences = v2_representative_sentences\ncc.unclassified_sentences = v2_unclassified_sentences\ncc.unique_sentences = unique_sentences_v2\ncc.unique_words = unique_words_v2\ncc.index_words = {}\ncc.words_index = {}\ncc.index_sentences = {}\ncc.sentences_index = {}\ncc.wsm = {}\ncc.ssm = {}\nprint('Computing global frequency factor ...')\ncc.global_frequency_factor()\nprint('Computing log likelihood factor ...')\ncc.log_likelihood_factor()\nprint('Computing words weight ...')\ncc.words_weight()\n\ncat = sys.argv[1]\n\nprint('Extending '+cat.capitalize()+' representativity ...')\ncc.extend_representativity(cat)\n\nprint('Saving Object File ...')\npickle.dump( cc, open( \"cc_\"+cat.capitalize()+\".object\", \"wb\" ) )\n\n# cc2 = pickle.load( open( \"cc.object\", \"rb\" ) )\n\n","sub_path":"demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"217050864","text":"import importlib\nimport logging\nimport sys\nimport time\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Any\n\nimport fire\nimport yaml\nfrom tau.core import HistoricNetworkScheduler\nfrom tau.event import Do\n\nfrom serenity.algo.api import StrategyContext\nfrom serenity.analytics.api import HDF5DataCaptureService, Mode, Snapshot\nfrom serenity.app.base import Application\nfrom serenity.db.api import connect_serenity_db, InstrumentCache, TypeCodeCache\nfrom serenity.marketdata.azure import AzureHistoricMarketdataService\nfrom serenity.pnl.api import MarketdataMarkService\nfrom serenity.position.api import PositionService, NullExchangePositionService\nfrom serenity.trading.oms import OrderManagerService, OrderPlacerService\nfrom serenity.trading.connector.simulator import AutoFillOrderPlacer\nfrom serenity.utils.config import Environment\n\n\nclass BacktestConfig:\n @staticmethod\n def load(config_path: Path, strategy_basedir: Path, start_time_txt: str, end_time_txt):\n timestamp_fmt = '%Y-%m-%dT%H:%M:%S'\n start_time_millis = int(time.mktime(datetime.strptime(start_time_txt, timestamp_fmt).timetuple()) * 1000)\n end_time_millis = int(time.mktime(datetime.strptime(end_time_txt, timestamp_fmt).timetuple()) * 1000)\n\n with open(config_path, 'r') as config_yaml:\n config = yaml.safe_load(config_yaml)\n api_version = config['api-version']\n if api_version != 'v1Beta':\n raise ValueError(f'Unsupported API version: {api_version}')\n\n env = Environment(config['environment'])\n strategy_name = config['strategy']['name']\n strategy_class = config['strategy']['strategy-class']\n strategy_module = config['strategy']['module']\n strategy_env = Environment(config['strategy']['environment'], parent=env)\n return BacktestConfig(env, strategy_name, strategy_class, strategy_module, strategy_basedir, strategy_env,\n start_time_millis, end_time_millis)\n\n def __init__(self, env: Environment, strategy_name: str, strategy_class: str, strategy_module: str,\n strategy_basedir: Path, strategy_env: Environment, start_time_millis: int, end_time_millis: int):\n self.env = env\n self.strategy_name = strategy_name\n self.strategy_class = strategy_class\n self.strategy_module = strategy_module\n self.strategy_basedir = strategy_basedir\n self.strategy_env = strategy_env\n self.start_time_millis = start_time_millis\n self.end_time_millis = end_time_millis\n\n def get_env(self) -> Environment:\n return self.env\n\n def get_strategy_name(self) -> str:\n return self.strategy_name\n\n def get_strategy_instance(self) -> Any:\n module = importlib.import_module(self.strategy_module)\n klass = getattr(module, self.strategy_class)\n strategy_instance = klass()\n return strategy_instance\n\n def get_strategy_basedir(self) -> Path:\n return self.strategy_basedir\n\n def get_strategy_env(self) -> Environment:\n return self.strategy_env\n\n def get_start_time_millis(self) -> int:\n return self.start_time_millis\n\n def get_end_time_millis(self) -> int:\n return self.end_time_millis\n\n\nclass BacktestResult:\n def __init__(self, strategy_id: str, snapshot: Snapshot):\n self.strategy_id = strategy_id\n self.snapshot = snapshot\n\n def get_strategy_id(self) -> str:\n return self.strategy_id\n\n def get_snapshot(self) -> Snapshot:\n return self.snapshot\n\n\nclass AlgoBacktester:\n \"\"\"\"\n Algorithmic trading strategy backtester.\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n def __init__(self, config: BacktestConfig):\n logger = logging.getLogger(__name__)\n logger.info('Serenity backtester starting up')\n\n sys.path.append(str(config.get_strategy_basedir()))\n\n bt_env = config.get_env()\n\n exchange_id = bt_env.getenv('EXCHANGE_ID', 'autofill')\n instance_id = bt_env.getenv('EXCHANGE_INSTANCE', 'prod')\n account = bt_env.getenv('EXCHANGE_ACCOUNT', 'Main')\n\n self.logger.info('Connecting to Serenity database')\n conn = connect_serenity_db()\n conn.autocommit = True\n cur = conn.cursor()\n\n self.scheduler = HistoricNetworkScheduler(config.get_start_time_millis(), config.get_end_time_millis())\n instrument_cache = InstrumentCache(cur, TypeCodeCache(cur))\n\n oms = OrderManagerService(self.scheduler)\n\n md_service = AzureHistoricMarketdataService(self.scheduler)\n mark_service = MarketdataMarkService(self.scheduler.get_network(), md_service)\n op_service = OrderPlacerService(self.scheduler, oms)\n op_service.register_order_placer(f'{exchange_id}:{instance_id}',\n AutoFillOrderPlacer(self.scheduler, oms, md_service, account))\n\n xps = NullExchangePositionService(self.scheduler)\n\n extra_outputs_txt = bt_env.getenv('EXTRA_OUTPUTS')\n if extra_outputs_txt is None:\n extra_outputs = []\n else:\n extra_outputs = extra_outputs_txt.split(',')\n self.dcs = HDF5DataCaptureService(Mode.BACKTEST, self.scheduler, extra_outputs)\n\n # wire up orders and fills from OMS\n Do(self.scheduler.get_network(), oms.get_orders(),\n lambda: self.dcs.capture_order(oms.get_orders().get_value()))\n Do(self.scheduler.get_network(), oms.get_order_events(),\n lambda: self.dcs.capture_fill(oms.get_order_events().get_value()))\n\n self.strategy_name = config.get_strategy_name()\n strategy_env = config.get_strategy_env()\n ctx = StrategyContext(self.scheduler, instrument_cache, md_service, mark_service, op_service,\n PositionService(self.scheduler, oms), xps, self.dcs, strategy_env.values)\n strategy_instance = config.get_strategy_instance()\n strategy_instance.init(ctx)\n strategy_instance.start()\n\n def run(self) -> BacktestResult:\n self.scheduler.run()\n\n # store output after historic run completes\n snapshot = self.dcs.store_snapshot(self.strategy_name)\n self.logger.info(f'stored snapshot: {snapshot.get_id()}')\n\n return BacktestResult(self.strategy_name, snapshot)\n\n\ndef main(config_path: str, start_time: str = None, end_time: str = None, strategy_dir: str = '.'):\n Application.init_logging()\n config = BacktestConfig.load(Path(config_path), Path(strategy_dir), start_time, end_time)\n engine = AlgoBacktester(config)\n engine.run()\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n","sub_path":"src/serenity/algo/backtester.py","file_name":"backtester.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"331399057","text":"# a single GRU\n# a bidirectional RNN\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport random\n\n\"\"\"\nFor Encoder\n\n\"\"\"\n\nclass Encoder(nn.Module):\n def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout_p):\n super().__init__()\n \n self.embedding = nn.Embedding(input_dim, emb_dim)\n \n self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = False)\n \n self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)\n \n self.dropout = nn.Dropout(dropout_p)\n \n def forward(self, src, src_len):\n \n #src = [src_len, batch size] The value is token tensor. shape[0] = src_len[0]\n #src_len = [batch size] The value means the src length\n \n embedded = self.dropout(self.embedding(src))\n \n #embedded = [src len, batch size, emb dim]\n \n packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, src_len.cpu())\n \n packed_outputs, hidden = self.rnn(packed_embedded)\n \n #packed_outputs is a packed sequence containing all hidden states, not tensor\n #hidden is now from the final non-padded element in the batch\n \n # Unpack the packed_outputs, return outputs and length of each\n outputs, _ = nn.utils.rnn.pad_packed_sequence(packed_outputs) \n \n #outputs is now a non-packed sequence, all hidden states obtained\n # when the input is a pad token are all zeros\n \n #outputs = [src len, batch size, hid dim * num directions]\n #hidden = [n layers * num directions, batch size, hid dim]\n \n return outputs, hidden\n\n\"\"\"\nFor attention\n\nPreviously: it pay attention to padding tokens within the source sentence\n\nNow: Using masking force the attention to only be over non-padding elements\n\"\"\"\nclass Attention(nn.Module):\n def __init__(self, enc_hid_dim, dec_hid_dim):\n super().__init__()\n \n self.attn = nn.Linear(enc_hid_dim + dec_hid_dim, dec_hid_dim)\n self.v = nn.Linear(dec_hid_dim, 1, bias = False)\n \n def forward(self, hidden, encoder_outputs, mask):\n \n #hidden = [1, batch size, dec hid dim]\n #encoder_outputs = [src_len, batch size, enc hid dim * 1]\n # mask.shape[batch_size, src_len]\n batch_size = encoder_outputs.shape[1]\n src_len = encoder_outputs.shape[0]\n \n #repeat decoder hidden state src_len times\n hidden = hidden.repeat(src_len, 1, 1)\n # hidden = [src_len, batch_size, dec_hid_dim]\n \n energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim = 2))) \n \"\"\"\n torch.cat() = [src_len, batchs_size, dec_hid_dim + enc_hid_dim]\n energy = [src_len, batch size, dec hid dim]\n \"\"\"\n\n attention = self.v(energy).squeeze(2).permute(1,0)\n #attention = [batch_size, src_len]\n \n attention = attention.masked_fill(mask == 0, -1e10)\n \"\"\"\n maked_fill: this will handle the tensor that its firt argument(mask == 0) is ture\n It will give the value -1e10 to padding tokens\n When the sentence is passed through the softmax, the padding tokens value will be zero\n \"\"\"\n \n return F.softmax(attention, dim = 1)\n\n\nclass Decoder(nn.Module):\n def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout_p, attention):\n super().__init__()\n\n self.output_dim = output_dim\n self.attention = attention\n \n self.embedding = nn.Embedding(output_dim, emb_dim)\n \n self.rnn = nn.GRU(enc_hid_dim + emb_dim, dec_hid_dim)\n \n self.fc_out = nn.Linear(dec_hid_dim, output_dim)\n \n self.dropout = nn.Dropout(dropout_p)\n \n def forward(self, input, hidden, encoder_outputs, mask):\n \"\"\"\n input = [batch_size]\n hidden = [1, batch_size, dec_hid_dim]\n encoder_outputs = [src_len, batch_size, enc_hid_dim]\n mask = [batch size, src len]\n \"\"\"\n \n input = input.unsqueeze(0)\n #input = [1, batch size]\n \n embedded = self.dropout(self.embedding(input))\n #embedded = [1, batch_size, emb_dim]\n \n attn_weights = self.attention(hidden, encoder_outputs, mask).unsqueeze(1) \n #attn_weights = [batch_size, 1, src_len]\n \n encoder_outputs = encoder_outputs.permute(1, 0, 2)\n #encoder_outputs = [batch_size, src_len, enc hid dim ]\n \n attn_applied = torch.bmm(attn_weights, encoder_outputs)\n #attn_applied = [batch size, 1, enc hid dim]\n \n attn_applied = attn_applied.permute(1, 0, 2)\n #attn_applied = [1, batch size, enc hid dim]\n \n rnn_input = self.dropout(torch.cat((embedded, attn_applied), dim = 2))\n #rnn_input = [1, batch size, enc_hid_dim + emb dim]\n \n output, hidden = self.rnn(rnn_input, hidden)\n \n #output = [seq len, batch size, dec hid dim * n directions]\n #hidden = [n layers * n directions, batch size, dec hid dim]\n \n #seq len, n layers and n directions will always be 1 in this decoder, therefore:\n #output = [1, batch size, dec hid dim]\n #hidden = [1, batch size, dec hid dim]\n #this also means that output == hidden\n assert (output == hidden).all()\n \n output = self.dropout(output)\n \n #output = F.log_softmax(self.fc_out(output[0]), dim=1)\n # [batch_size, dec_hid_dim]\n output = self.fc_out(output[0])\n # [batch_size, output_dim]\n return output, hidden, attn_weights.squeeze(1) # [batch_size, enc_hid_dim]\n\n\n\n\n\n\nclass Seq2Seq(nn.Module):\n def __init__(self, encoder, decoder, src_pad_idx, device):\n super().__init__()\n \n self.encoder = encoder\n self.decoder = decoder\n self.src_pad_idx = src_pad_idx\n self.device = device\n \n def create_mask(self, src):\n mask = (src != self.src_pad_idx).permute(1, 0)\n return mask\n \n def forward(self, src, src_len, trg, teacher_forcing_ratio = 0.5):\n \n \"\"\"\n src = [src_len, batch_size]\n src_len = [batch_size]\n trg = [trg_len, batch_size]\n \"\"\"\n batch_size = src.shape[1]\n trg_len = trg.shape[0]\n trg_vocab_size = self.decoder.output_dim\n \n #tensor to store decoder outputs\n outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)\n \n encoder_outputs, hidden = self.encoder(src, src_len)\n \n #first input to the decoder is the tokens\n input = trg[0,:]\n \n mask = self.create_mask(src)\n #mask = [batch size, src len]\n \n for t in range(1, trg_len):\n \n #insert input token embedding, previous hidden state, all encoder hidden states \n # and mask\n #receive output tensor (predictions) and new hidden state\n output, hidden, _ = self.decoder(input, hidden, encoder_outputs, mask)\n \n #place predictions in a tensor holding predictions for each token\n outputs[t] = output\n \n #decide if we are going to use teacher forcing or not\n teacher_force = random.random() < teacher_forcing_ratio\n \n #get the highest predicted token from our predictions\n top1 = output.argmax(1) \n \n #if teacher forcing, use actual next token as next input\n #if not, use predicted token\n input = trg[t] if teacher_force else top1\n \n return outputs","sub_path":"Class4/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"34949073","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nusage: server.py [-h] [-l LADDR] [-p PORT]\n\nTartu Night Life by Bruno Produit, Basar Turgut, Abel Mesfin, Saumitra Bagchi,\nversion 1.0\n\noptional arguments:\n -h, --help show this help message and exit\n -l LADDR, --laddr LADDR\n Listen address. Default localhost.\n -p PORT, --port PORT Listen on port.\n\"\"\"\n\n#-------------- Imports -------------------------\n\nimport logging\nfrom constants import *\nfrom facebook_scraper import *\nfrom argparse import ArgumentParser\nimport psycopg2\nfrom eve import Eve\n\n#-------------- Methods -------------------------\n\ndef initialize():\n # Logging\n FORMAT = '%(asctime)-15s %(levelname)s %(threadName)s %(message)s'\n logging.basicConfig(level=logging.DEBUG, format=FORMAT)\n LOG = logging.getLogger()\n\n # Arguments\n parser = ArgumentParser(description=info())\n parser.add_argument('-l', '--laddr', help=\"Listen address. Default localhost.\", default=SERVER_INET_ADDR)\n parser.add_argument('-p', '--port', help=\"Listen on port.\", default=SERVER_PORT, type=int)\n args = parser.parse_args()\n\n # Database\n database = open('.pgpass', 'r').read().split(':')\n #conn = psycopg2.connect(host=database[0], port=int(database[1]), database=database[2], user=database[3], password=database[4])\n #pg = conn.cursor()\n\ndef info(): return '%s by %s, version %s' % (NAME, AUTHOR, VERSION)\n\n\n#-------------- Main -------------------------\n\nif __name__==\"__main__\":\n try:\n # Init\n initialize()\n\n # Facebook Scraping\n fb = FacebookScraper()\n events = fb.get_events()\n print (events)\n app = Eve()\n app.run()\n\n except KeyboardInterrupt as e:\n print ('Ctrl+C issued ...')\n print ('Terminating ...')\n sys.exit(0)\n","sub_path":"backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"13662378","text":"def exam10():\n s=list(input())\n l = len(s)\n if s.count(s[0]) == len(s):\n print(\"\")\n return\n arr = [s[0]]\n s.remove(s[0])\n for i in range(1,l):\n for item in s:\n if item!=arr[i-1]:\n arr.append(item)\n s.remove(item)\n string =''.join(arr)\n if len(string)!=l:\n print(\"\")\n return\n print(string)\nexam10()","sub_path":"Code/CodeRecords/2496/60586/250033.py","file_name":"250033.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"389597229","text":"import cv2\nimport numpy as np\ndef to_bin(data):\n \"\"\"Convert `data` to binary format as string\"\"\"\n if isinstance(data, str):\n return ''.join([ format(ord(i), \"08b\") for i in data ])\n elif isinstance(data, bytes) or isinstance(data, np.ndarray):\n return [ format(i, \"08b\") for i in data ]\n elif isinstance(data, int) or isinstance(data, np.uint8):\n return format(data, \"08b\")\n else:\n raise TypeError(\"Type not supported.\")\n\ndef decode(image_name):\n print(\"[+] Decoding...\")\n # read the image\n image = cv2.imread(image_name)\n binary_data = \"\"\n for row in image:\n for pixel in row:\n r, g, b = to_bin(pixel)\n binary_data += r[-1]\n binary_data += g[-1]\n binary_data += b[-1]\n # split by 8-bits\n all_bytes = [ binary_data[i: i+8] for i in range(0, len(binary_data), 8) ]\n # convert from bits to characters\n decoded_data = \"\"\n for byte in all_bytes:\n decoded_data += chr(int(byte, 2))\n if decoded_data[-5:] == \"=====\":\n break\n return decoded_data[:-5]\n\nif __name__ == \"__main__\":\n input_image = \"test.jpg\"\n output_image = \"encoded_image.PNG\"\n secret_data = \"This is a top secret message.\"\n \n decoded_data = decode(output_image)\n print(\"[+] Decoded data:\", decoded_data)","sub_path":"sternography/decode_word/sterno.py","file_name":"sterno.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"282111177","text":"import string\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom guizero import *\n\n#ANDARE A GENERARE LE COPPIE DA SCRIVIFILE\ncoordX = []\ncoordY = []\n\nf= open(\"dati.txt\",\"r\")\n\ndef coordinate ():\n for riga in f:\n valori = str(riga) # converto in stringa la riga\n valori = valori.strip('\\n') # elimino i lterminatore di riga\n valori = valori.split(',') # separo la stringa in due numeri\n valori = list(valori) # converto in lista\n print(valori)\n coordX.append(int(valori[0])) # aggiungo la coordinata X\n coordY.append(int(valori[1])) # aggiungo la coordinata Y \n\n output.append(\"X:\")\n output.append(coordX)\n output.append(\"Y:\")\n output.append(coordY)\n\ndef grafico ():\n for riga in f:\n valori = str(riga) # converto in stringa la riga\n valori = valori.strip('\\n') # elimino i lterminatore di riga\n valori = valori.split(',') # separo la stringa in due numeri\n valori = list(valori) # converto in lista\n print(valori)\n coordX.append(int(valori[0])) # aggiungo la coordinata X\n coordY.append(int(valori[1])) # aggiungo la coordinata Y\n\n f.close() \n\n plt.scatter(coordX,coordY,color='red', alpha=0.5, marker='.')\n plt.title(\"GRAFICO A DISPERSIONE\", color='#003399')\n plt.grid()\n plt.ylabel('Y', color='#003399')\n plt.xlabel('X', color='#003399')\n plt.xticks([10*k for k in range(0,11)])\n plt.yticks([10*k for k in range(0,11)])\n plt.savefig(\"grafico.png\")\n picture = Picture(app, image=\"grafico.png\")\n\napp= App(title=\"GRAFICO A DISPERSIONE\", width=1200,height=1000, bg=\"#93C572\")\netichetta = Text(app, text=\"INSERISCI IL NOME DEL FILE DA APRIRE: \", color=\"#B20000\", size=30, font=\"helvetica\")\nnomefile = TextBox(app, width=100)\nbottone1 = PushButton(app, text=\"GENERA LE COORDINATE\", command=coordinate, args=[], width=40, height=4, align=\"top\")\noutput = TextBox(app, width=100, height=15, multiline=True)\nbottone = PushButton(app, text=\"GENERA IL GRAFICO\", command=grafico, args=[], width=40, height=4, align=\"top\")\nbottone.bg = \"#F5DEB3\"\nbottone1.bg = \"#D2B48C\"\napp.display()\n","sub_path":"interfaccia-16-01-2021-iovine-manzilli-1.1.py","file_name":"interfaccia-16-01-2021-iovine-manzilli-1.1.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"353697788","text":"from elasticsearch import Elasticsearch\n\nes = Elasticsearch()\n\ndatas = [\n {\n 'title': '美国留给伊拉克的是个烂摊子吗',\n 'url': 'http://view.news.qq.com/zt2011/usa_iraq/index.htm',\n 'date': '2011-12-16'\n },\n {\n 'title': '公安部:各地校车将享最高路权',\n 'url': 'http://www.chinanews.com/gn/2011/12-16/3536077.shtml',\n 'date': '2011-12-16'\n },\n {\n 'title': '中韩渔警冲突调查:韩警平均每天扣1艘中国渔船',\n 'url': 'https://news.qq.com/a/20111216/001044.htm',\n 'date': '2011-12-17'\n },\n {\n 'title': '中国驻洛杉矶领事馆遭亚裔男子枪击,嫌犯已自首',\n 'url': 'http://news.ifeng.com/world/detail_2011_12/16/11372558_0.shtml',\n 'date': '2011-12-18'\n }\n]\n\nfor data in datas:\n es.index(index='news', doc_type='politics', body=data)\n","sub_path":"insert_datas.py","file_name":"insert_datas.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"144767637","text":"# Copyright NTRLab 2019\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------------\n\n# pylint: disable=inconsistent-return-statements\n\nimport abc\nfrom collections import deque\nimport logging\nimport queue\nfrom threading import RLock\nimport time\nimport json\n\nfrom sawtooth_validator.concurrent.thread import InstrumentedThread\nfrom sawtooth_validator.execution.scheduler_exceptions import SchedulerError\n\nfrom sawtooth_validator.journal.block_builder import BlockBuilder\nfrom sawtooth_validator.journal.block_wrapper import BlockWrapper\nfrom sawtooth_validator.journal.consensus.batch_publisher import BatchPublisher\nfrom sawtooth_validator.journal.consensus.consensus_factory import ConsensusFactory\n\nfrom sawtooth_validator.journal.chain_commit_state import TransactionCommitCache\n\nfrom sawtooth_validator.metrics.wrappers import CounterWrapper\nfrom sawtooth_validator.metrics.wrappers import GaugeWrapper\n\nfrom sawtooth_validator.protobuf.block_pb2 import BlockHeader\nfrom sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader\n\nfrom sawtooth_validator.exceptions import NotRegisteredConsensusModule \nfrom sawtooth_sdk.consensus.exceptions import BlockNotReady \nLOGGER = logging.getLogger(__name__)\n\nimport cProfile\nprofile_filename = 'publisher' + '.prof'\nprofiler = cProfile.Profile()\n#import hotshot\n\n\nNUM_PUBLISH_COUNT_SAMPLES = 5\nINITIAL_PUBLISH_COUNT = 30\n_MAX_BATCHES_ = 6\n\n\n\nclass PendingBatchObserver(metaclass=abc.ABCMeta):\n \"\"\"An interface class for components wishing to be notified when a Batch\n has begun being processed.\n \"\"\"\n\n @abc.abstractmethod\n def notify_batch_pending(self, batch):\n \"\"\"This method will be called when a Batch has passed initial\n validation and is queued to be processed by the Publisher.\n\n Args:\n batch (Batch): The Batch that has been added to the Publisher\n \"\"\"\n raise NotImplementedError('PendingBatchObservers must have a \"notify_batch_pending\" method')\n\n\nclass _PublisherThread(InstrumentedThread):\n def __init__(self, block_publisher, batch_queue,\n check_publish_block_frequency):\n super().__init__(name='_PublisherThread')\n self._block_publisher = block_publisher\n self._batch_queue = batch_queue\n self._check_publish_block_frequency = \\\n check_publish_block_frequency\n self._exit = False\n\n def run(self):\n try:\n # make sure we don't check to publish the block\n # to frequently.\n next_check_publish_block_time = time.time() + self._check_publish_block_frequency\n LOGGER.debug(\"_PublisherThread:run _check_publish_block_frequency=%s\",self._check_publish_block_frequency)\n while True:\n try:\n batch = self._batch_queue.get(timeout=self._check_publish_block_frequency)\n self._block_publisher.on_batch_received(batch)\n\n except queue.Empty:\n # If getting a batch times out, just try again.\n pass\n\n if next_check_publish_block_time < time.time():\n self._block_publisher.on_check_publish_block()\n next_check_publish_block_time = time.time() + self._check_publish_block_frequency\n if self._exit:\n return\n # pylint: disable=broad-except\n except Exception as exc:\n LOGGER.exception(exc)\n LOGGER.critical(\"BlockPublisher thread exited with error.\")\n\n def stop(self):\n self._exit = True\n\ndef profile(func):\n \"\"\"Decorator for run function profile\"\"\"\n def wrapper1(*args, **kwargs):\n profile_filename = func.__name__ + '.prof'\n profiler = cProfile.Profile()\n result = profiler.runcall(func, *args, **kwargs)\n profiler.dump_stats(profile_filename)\n return result\n def wrapper(*args, **kwargs):\n #profile_filename = func.__name__ + '.prof'\n #profiler = cProfile.Profile()\n result = profiler.runcall(func, *args, **kwargs)\n profiler.dump_stats(profile_filename)\n return result\n\n return wrapper\n\n\nclass _CandidateBlock(object):\n \"\"\"This is a helper class for the BlockPublisher. The _CandidateBlock\n tracks all the state associated with the Block that is being built.\n This allows the BlockPublisher to focus on when to create and finalize\n a block and not worry about how the block is built.\n \"\"\"\n\n def __init__(self,\n block_store,\n consensus,\n scheduler,\n committed_txn_cache,\n block_builder,\n max_batches,\n batch_injectors,\n identifier,\n nest_colour='Genesis'\n ):\n self._pending_batches = []\n self._pending_batch_ids = set()\n self._injected_batch_ids = set()\n self._missing_batches = []\n self._block_store = block_store\n self._consensus = consensus\n self._scheduler = scheduler\n self._committed_txn_cache = committed_txn_cache\n # Look-up cache for transactions that are committed in the current\n # chain and the state of the transactions already added to the\n # candidate block.\n self._block_builder = block_builder\n self._max_batches = max_batches\n self._batch_injectors = batch_injectors\n self._identifier = identifier\n self._recompute_context = None\n self._make_batch_broadcast = False\n self._batches_num = 0 # must have batches\n # cluster info\n self._nest_colour = nest_colour\n \n\n def __del__(self):\n # Cancel the scheduler if it is not complete\n if not self._scheduler.complete(block=False):\n self._scheduler.cancel()\n\n @property\n def identifier(self):\n return self._identifier\n\n @property\n def batches_num(self):\n return self._batches_num\n\n @property\n def make_batch_broadcast(self):\n return self._make_batch_broadcast\n\n @property\n def nest_colour(self):\n return self._nest_colour \n\n @property\n def block_num(self):\n return self._block_builder.block_num\n\n @block_num.setter\n def block_num(self, block_num):\n self._block_builder.block_num = block_num\n\n @property\n def previous_block_id(self):\n return self._block_builder.previous_block_id\n\n def has_pending_batches(self):\n return len(self._pending_batches) != 0\n\n @property\n def last_batch(self):\n if self._pending_batches:\n return self._pending_batches[-1]\n raise ValueError(\n 'last_batch called on an empty block.'\n 'Empty block publishing is not supported.'\n )\n\n @property\n def batches(self):\n return self._pending_batches.copy()\n\n @property\n def can_add_batch(self):\n return (\n self._max_batches == 0\n or len(self._pending_batches) < self._max_batches\n )\n\n def _check_batch_dependencies(self, batch, committed_txn_cache):\n \"\"\"Check the dependencies for all transactions in this are present.\n If all are present the committed_txn is updated with all txn in this\n batch and True is returned. If they are not return failure and the\n committed_txn is not updated.\n :param batch: the batch to validate\n :param committed_txn_cache: The cache holding the set of committed\n transactions to check against, updated during processing.\n :return: Boolean, True if dependencies checkout, False otherwise.\n \"\"\"\n for txn in batch.transactions:\n if self._is_txn_already_committed(txn, committed_txn_cache):\n LOGGER.debug(\"Transaction rejected as it is already in the chain %s\",txn.header_signature[:8])\n return False\n elif not self._check_transaction_dependencies(txn, committed_txn_cache):\n # if any transaction in this batch fails the whole batch\n # fails.\n committed_txn_cache.remove_batch(batch)\n return False\n # update so any subsequent txn in the same batch can be dependent\n # on this transaction.\n committed_txn_cache.add(txn.header_signature)\n return True\n\n def _check_transaction_dependencies(self, txn, committed_txn_cache):\n \"\"\"Check that all this transactions dependencies are present.\n :param txn: the transaction to check\n :param committed_txn_cache: The cache holding the set of committed\n transactions to check against.\n :return: Boolean, True if dependencies checkout, False otherwise.\n \"\"\"\n txn_hdr = TransactionHeader()\n txn_hdr.ParseFromString(txn.header)\n for dep in txn_hdr.dependencies:\n if dep not in committed_txn_cache:\n LOGGER.debug(\"Transaction rejected due missing dependency, transaction %s depends on %s\",txn.header_signature, dep)\n return False\n return True\n\n def _is_batch_already_committed(self, batch):\n \"\"\" Test if a batch is already committed to the chain or\n is already in the pending queue.\n :param batch: the batch to check\n \"\"\"\n return (self._block_store.has_batch(batch.header_signature) or\n batch.header_signature in self._pending_batch_ids)\n\n def _is_txn_already_committed(self, txn, committed_txn_cache):\n \"\"\" Test if a transaction is already committed to the chain or\n is already in the pending queue.\n \"\"\"\n return (self._block_store.has_batch(txn.header_signature) or\n txn.header_signature in committed_txn_cache)\n\n def _poll_injectors(self, poller, batch_list):\n for injector in self._batch_injectors:\n inject = poller(injector)\n if inject:\n for b in inject:\n self._injected_batch_ids.add(b.header_signature)\n batch_list.append(b)\n\n def add_batch(self, batch):\n \"\"\"\n Add a batch to the _CandidateBlock\n :params batch: the batch to add to the block\n if self._make_batch_broadcast == False we should inject this batch and don't finalize block util all such batch will be completed\n \"\"\"\n if batch.trace:\n LOGGER.debug(\"TRACE %s: %s\", batch.header_signature[:8],self.__class__.__name__)\n\n # first we check if the transaction dependencies are satisfied\n # The completer should have taken care of making sure all\n # Batches containing dependent transactions were sent to the\n # BlockPublisher prior to this Batch. So if there is a missing\n # dependency this is an error condition and the batch will be\n # dropped.\n if self._is_batch_already_committed(batch):\n # batch is already committed.\n LOGGER.debug(\"Dropping previously committed batch: %s\",batch.header_signature[:8])\n return\n elif self._check_batch_dependencies(batch, self._committed_txn_cache):\n batches_to_add = []\n\n # Inject batches at the beginning of the block\n if not self._pending_batches:\n self._poll_injectors(lambda injector: injector.block_start(self._block_builder.previous_block_id), batches_to_add)\n\n batches_to_add.append(batch)\n LOGGER.debug(\"ADD_BATCH INTO BRANCH[%s]: len=%s batch=%s\",self._identifier[:8],len(batches_to_add), batch.header_signature[:8])\n for b in batches_to_add:\n self._pending_batches.append(b)\n self._pending_batch_ids.add(b.header_signature)\n try:\n injected = b.header_signature in self._injected_batch_ids\n LOGGER.debug(\"add_batch: add batch=%s branch=%s\",batch.header_signature[:8],self._identifier[:8])\n self._scheduler.add_batch(b, required=injected)\n except SchedulerError as err:\n LOGGER.debug(\"Scheduler error processing batch: %s\", err)\n else:\n \"\"\"\n In case DAG could be missing dependencies because of many head \n keep this batch and try to add in check_publish_block() \n \"\"\"\n if not self._make_batch_broadcast:\n self._missing_batches.append(batch)\n LOGGER.debug(\"Dropping batch due to missing=%s dependencies: %s \\n\",len(self._missing_batches),batch.header_signature[:8])\n\n def check_publish_block(self):\n \"\"\"\n Check if it is okay to publish this candidate.\n For many peers we should control block's content .\n If this peer is not owner of batch we must wait \n until all batches which were putted into block for peer owner of batch \n will be putted into block for this peer too. \n \"\"\"\n \n publish = self._consensus.check_publish_block(self._block_builder.block_header)\n if publish and not self._make_batch_broadcast:\n if len(self._missing_batches) > 0 :\n # try to add \n batch = self._missing_batches.pop(0)\n LOGGER.debug(\"TRY TO ADD MISSING BATCH=%s\",batch.header_signature[:8])\n self.add_batch(batch)\n return False\n # check maybe there are incomplete batches if so - wait\n num = self._scheduler.num_batches()\n if num < self.batches_num:\n \"\"\"\n check maybe rest batch were rejected by dependency \n TRY TO ADD its\n \"\"\"\n LOGGER.debug(\"Waiting for BLOCK=%s.%s - too less batches=%s~%s \\n\",self.block_num,self.identifier[:8],num,self.batches_num)\n return False\n publish = (self._scheduler.check_incomplete_batches() == 0) \n \n return publish\n\n def _sign_block(self, block, identity_signer):\n \"\"\" The block should be complete and the final\n signature from the publishing validator(this validator) needs to\n be added.\n :param block: the Block to sign.\n :param identity_signer: the singer to sign the block with.\n \"\"\"\n header_bytes = block.block_header.SerializeToString()\n signature = identity_signer.sign(header_bytes)\n block.set_signature(signature)\n\n def finalize_block_complete(self,consensus):\n # proxy reply\n LOGGER.debug(\"_CandidateBlock::finalize_block_complete for BRANCH=%s\",self._identifier[:8])\n self._consensus.finalize_block_complete(consensus)\n\n @profile\n def finalize_block(self, identity_signer, pending_batches):\n \"\"\"Compose the final Block to publish. This involves flushing\n the scheduler, having consensus bless the block, and signing\n the block.\n :param identity_signer: the cryptographic signer to sign the block\n with.\n :param pending_batches: list to receive any batches that were\n submitted to add to the block but were not validated before this\n call.\n :return: The generated Block, or None if Block failed to finalize.\n In both cases the pending_batches will contain the list of batches\n that need to be added to the next Block that is built.\n \"\"\"\n LOGGER.debug(\"_CandidateBlock::finalize_block for BRANCH=%s PENDING=%s\", self._identifier[:8],len(self._pending_batches))\n self._scheduler.unschedule_incomplete_batches() # can drop out some batch from block \n #\n # at this point all batch will be done\n #\n self._scheduler.finalize()\n self._scheduler.complete(block=True)\n # for DAG get context for recompute merkle state\n #self._recompute_context = self._scheduler.get_state_hash_context()\n #sth = self._scheduler.recompute_merkle_root(self._scheduler.previous_state_hash,self._recompute_context)\n #LOGGER.debug(\"_CandidateBlock::Branch=%s context for merkle recompute=%s\\n\",self._identifier[:8],self._recompute_context)\n # this is a transaction cache to track the transactions committed\n # up to this batch. Only valid transactions that were processed\n # by the scheduler are added.\n committed_txn_cache = TransactionCommitCache(self._block_store)\n\n builder = self._block_builder\n bad_batches = [] # the list of batches that failed processing\n state_hash = None\n\n # Walk the pending batch list:\n # - find the state hash for the block, the block state_hash is\n # is randomly placed on one of the transactions, so must interogate\n # every batch to find it. If it is on a batch that failed processing\n # then this block will be abandoned.\n # - build three lists of batches:\n # 1) a lists of all valid transactions that will be included in the\n # block, these are added to the BlockBuilder to include in the Block\n # 2) all batches that were not executed, these are to be returned\n # in the pending_batches list\n # 3) all batches that failed processing. These will be discarded.\n # This list is needed in some case when the block is abandoned to\n # make sure they do not remain in the pending_batches list.\n for batch in self._pending_batches:\n if batch.trace:\n LOGGER.debug(\"TRACE %s: %s\", batch.header_signature,self.__class__.__name__)\n\n result = self._scheduler.get_batch_execution_result(batch.header_signature)\n # if a result is None, this means that the executor never\n # received the batch and it should be added to\n # the pending_batches, to be added to the next\n # block\n if result is None:\n # If this was an injected batch, don't keep it in pending\n # batches since it had to be in this block\n if batch.header_signature not in self._injected_batch_ids:\n pending_batches.append(batch)\n else:\n LOGGER.warning(\"Failed to inject batch '%s'\", batch.header_signature)\n elif result.is_valid:\n # check if a dependent batch failed. This could be belt and\n # suspenders action here but it is logically possible that\n # a transaction has a dependency that fails it could\n # still succeed validation. In which case we do not want\n # to add it to the batch.\n if not self._check_batch_dependencies(batch,committed_txn_cache):\n LOGGER.debug(\"Batch %s invalid, due to missing txn dependency.\", batch.header_signature[:8])\n LOGGER.debug(\"Abandoning block %s: root state hash has invalid txn applied\",builder)\n # Update the pending batch list to be all the\n # batches that passed validation to this point and\n # none of the ones that failed. It is possible that\n # this batch caused a future batch to fail so\n # we leave all of the batches that failed after this\n # one in the list.\n bad_batches.append(batch)\n pending_batches.clear()\n pending_batches.extend([\n x for x in self._pending_batches\n if x not in bad_batches\n ])\n return None\n else:\n builder.add_batch(batch)\n committed_txn_cache.add_batch(batch)\n if result.state_hash is not None:\n state_hash = result.state_hash\n else:\n bad_batches.append(batch)\n LOGGER.debug(\"Batch %s invalid, not added to block.\",batch.header_signature[:8])\n\n if state_hash is None or not builder.batches:\n LOGGER.debug(\"Abandoning block %s: no batches added\", builder)\n return None\n \"\"\"\n After this point in case PROXY consensus we should inform consensus engine about possibility finalize block\n \"\"\"\n LOGGER.debug(\"_CandidateBlock:: _consensus.finalize_block()-->\\n\")\n if not self._consensus.finalize_block(builder.block_header):\n LOGGER.debug(\"Abandoning block %s, consensus failed to finalize it\", builder)\n # return all valid batches to the pending_batches list\n pending_batches.clear()\n pending_batches.extend([x for x in self._pending_batches if x not in bad_batches])\n return None\n LOGGER.debug(\"_CandidateBlock:: _consensus.finalize_block()<-- DONE NEW ROOT STATE=%s pending=%s bad=%s\\n\",state_hash[:10],len(pending_batches),[batch.header_signature[:8] for batch in bad_batches])\n #\n # this is new root state for this block\n #\n builder.set_state_hash(state_hash)\n self._sign_block(builder, identity_signer)\n # for parallel scheduler we should get context here\n self._recompute_context = self._scheduler.get_state_hash_context() \n LOGGER.debug(\"_CandidateBlock::Branch=%s context for merkle recompute=%s\\n\",self._identifier[:8],self._recompute_context)\n return builder.build_block()\n\n @property\n def recompute_context(self):\n # for DAG only\n return self._recompute_context\n\nclass BlockPublisher(object):\n \"\"\"\n Responsible for generating new blocks and publishing them when the\n Consensus deems it appropriate.\n \"\"\"\n\n def __init__(self,\n transaction_executor,\n block_cache,\n state_view_factory,\n settings_cache,\n block_sender,\n batch_sender,\n squash_handler,\n context_handlers,\n chain_head,\n identity_signer,\n data_dir,\n config_dir,\n permission_verifier,\n check_publish_block_frequency,\n batch_observers,\n batch_injector_factory=None,\n metrics_registry=None,\n consensus_notifier=None):\n \"\"\"\n Initialize the BlockPublisher object\n\n Args:\n transaction_executor (:obj:`TransactionExecutor`): A\n TransactionExecutor instance.\n block_cache (:obj:`BlockCache`): A BlockCache instance.\n state_view_factory (:obj:`StateViewFactory`): StateViewFactory for\n read-only state views.\n block_sender (:obj:`BlockSender`): The BlockSender instance.\n batch_sender (:obj:`BatchSender`): The BatchSender instance.\n squash_handler (function): Squash handler function for merging\n contexts.\n chain_head (:obj:`BlockWrapper`): The initial chain head.\n identity_signer (:obj:`Signer`): Cryptographic signer for signing\n blocks\n data_dir (str): path to location where persistent data for the\n consensus module can be stored.\n config_dir (str): path to location where configuration can be\n found.\n batch_injector_factory (:obj:`BatchInjectorFatctory`): A factory\n for creating BatchInjectors.\n metrics_registry (MetricsRegistry): Metrics registry used to\n create pending batch gauge\n \"\"\"\n self._lock = RLock()\n self._proxy_lock = RLock() # for external consensus\n \"\"\"\n for modern proxy consensus -\n wait until external engine ask block candidate for one of the DAG's branch - \n \"\"\" \n \n self._engine_ask_candidate = {}\n self._blocks_summarize = [] # list of blocks which could be summarized\n self._consensus_notifier = consensus_notifier\n self._consensus = None # for external consensus name\n self._candidate_blocks = {} # all active branches - for DAG version only \n self._candidate_block = None # _CandidateBlock helper,\n self._chain_heads = {}\n self._recompute_contexts = {} # for DAG - save context for recompute merkle tree\n # the next block in potential chain\n self._block_cache = block_cache\n self._block_store = block_cache.block_store\n self._state_view_factory = state_view_factory\n self._settings_cache = settings_cache\n self._transaction_executor = transaction_executor\n self._block_sender = block_sender\n self._batch_sender = batch_sender\n self._batch_publisher = BatchPublisher(identity_signer, batch_sender)\n self._pending_batches = [] # batches we are waiting for validation, arranged in the order of batches received.\n self._pending_batch_ids = []\n self._pending_batch_cid = [] # save recommendated candidate id\n self._pending_batch_recomm = [] # save batch recommendation (block_num,batch_num)about candidate choice \n #self._pending_batch_num = []\n\n self._publish_count_average = _RollingAverage(NUM_PUBLISH_COUNT_SAMPLES, INITIAL_PUBLISH_COUNT)\n\n self._chain_head = chain_head # block (BlockWrapper)\n self._squash_handler = squash_handler\n self._context_handlers = context_handlers\n self._get_merkle_root = context_handlers['merkle_root']\n self._identity_signer = identity_signer\n self._validator_id = identity_signer.get_public_key().as_hex()\n # FBFT Topology\n self._topology = None\n self._data_dir = data_dir\n self._config_dir = config_dir\n self._permission_verifier = permission_verifier\n self._batch_injector_factory = batch_injector_factory\n self._nest_building_mode = True\n self._send_batches = None\n\n # For metric gathering\n if metrics_registry:\n self._pending_batch_gauge = GaugeWrapper(metrics_registry.gauge('publisher.BlockPublisher.pending_batch_gauge'))\n self._blocks_published_count = CounterWrapper(metrics_registry.counter('publisher.BlockPublisher.blocks_published_count'))\n else:\n self._blocks_published_count = CounterWrapper()\n self._pending_batch_gauge = GaugeWrapper()\n\n self._batch_queue = queue.Queue()\n self._queued_batch_ids = []\n self._queued_batch_recomm = [] # for DAG - say about candidate\n self._batch_observers = batch_observers\n self._check_publish_block_frequency = check_publish_block_frequency\n self._publisher_thread = None\n \n LOGGER.debug(\"BlockPublisher: INIT chain_head=%s block_store=%s validator=%s\\n\",chain_head,type(self._block_store),self._validator_id[:8])\n\n @property\n def is_recovery(self):\n return self._block_store.is_recovery\n\n @property\n def malicious(self):\n return self._block_sender.malicious\n\n @property\n def queued_batch_recomm(self):\n return [str(recom[1])+'.'+recom[0][:8] for recom in self._queued_batch_recomm]\n\n @property\n def pending_batch_recomm(self):\n return [recom[:8] for recom in self._pending_batch_cid]\n\n @property\n def pending_batches(self):\n return [b.header_signature[:8] for b in self._pending_batches]\n\n @property\n def nest_colour(self):\n # own validator color\n return self._topology.nest_colour\n\n @property\n def candidate_blocks(self):\n return [blk.nest_colour+':'+str(blk.block_num)+':'+key[:8] for key,blk in self._candidate_blocks.items()]\n @property\n def chain_heads(self):\n return [str(blk.block_num)+':'+key[:8] for key,blk in self._chain_heads.items()]\n\n def belong_cluster(self,peer_id):\n LOGGER.debug('Check CLUSTER for peer_id=%s',peer_id[:8])\n return (peer_id in self._topology.cluster) if self._topology.cluster else True\n \n def get_topology_info(self):\n \"\"\"\n get topology info - we should know own nests color\n \"\"\" \n # get topology\n #LOGGER.debug('get topology=%s',stopology)\n #self._topology.get_topology(stopology)\n self._topology = self._block_sender.get_topology()\n self._batch_sender.set_cluster(self._topology)\n \n\n\n def start(self):\n self._publisher_thread = _PublisherThread(\n block_publisher=self,\n batch_queue=self._batch_queue,\n check_publish_block_frequency=self._check_publish_block_frequency)\n LOGGER.debug(\"BlockPublisher: start _PublisherThread\")\n \n self._publisher_thread.start()\n\n def stop(self):\n if self._publisher_thread is not None:\n self._publisher_thread.stop()\n self._publisher_thread = None\n\n def queue_batch(self, batch,recomm=None):\n \"\"\"\n New batch has been received, queue it with the BlockPublisher for\n inclusion in the next block.\n num - say number of batches into block\n \"\"\"\n #batch.header_signature[:10]\n LOGGER.debug(\"BlockPublisher::queue_batch ADD new BATCH=%s recomend=%s.%s num=%s queue=%s\",batch.header_signature[:10],recomm[1],recomm[0][:8],recomm[2],self.get_current_queue_info())\n self._batch_queue.put(batch)\n self._queued_batch_ids.append(batch.header_signature)\n self._queued_batch_recomm.append(recomm if recomm else ('',0,0)) # (candidate_id if candidate_id is not None else '',num))\n for observer in self._batch_observers:\n observer.notify_batch_pending(batch)\n LOGGER.debug(\"BlockPublisher::queue_batch queue=%s recom=%s DONE\",self.get_current_queue_info(),len(self._queued_batch_recomm))\n\n def can_accept_batch(self):\n return len(self._pending_batches) < self._get_current_queue_limit()\n\n def _get_current_queue_limit(self):\n # Limit the number of batches to 2 times the publishing average. This\n # allows the queue to grow geometrically, if the queue is drained.\n return 2 * self._publish_count_average.value\n\n def get_current_queue_info(self):\n \"\"\"Returns a tuple of the current size of the pending batch queue\n and the current queue limit.\n \"\"\"\n return (len(self._pending_batches), self._get_current_queue_limit())\n \n def get_candidates(self):\n \"\"\"\n Send for rest-api list of condidate\n \"\"\"\n with self._lock:\n return sorted([cand.nest_colour+':'+str(cand.block_num)+':'+cand.identifier for key,cand in self._candidate_blocks.items()])\n \"\"\"\n def is_block_num_in_nest(self,block_num):\n pass\n \"\"\"\n\n @property\n def chain_head_lock(self):\n return self._lock\n #@profile\n def _build_candidate_block(self, chain_head):\n \"\"\" Build a candidate block and construct the consensus object to\n validate it.\n :param chain_head: The block to build on top of.\n :return: (BlockBuilder) - The candidate block in a BlockBuilder\n wrapper.\n For DAG build candidate for chain_head.identifier branch\n should works under locking\n \"\"\"\n #LOGGER.debug(\"BUILD CANDIDATE BLOCK..\")\n main_head = self._block_cache.block_store.chain_head\n bid = chain_head.identifier\n if self._topology is None:\n self.get_topology_info()\n # publisher mode \n self._send_batches = int(self._settings_cache.get_setting('bgx.publisher.send_batches',main_head.state_root_hash,default_value=1))\n \n LOGGER.debug(\"BUILD CANDIDATE_BLOCK for BRANCH=%s:%s main=%s STATE=%s~%s\",chain_head.block_num,bid[:8],main_head.block_num,main_head.state_root_hash[:10],chain_head.state_root_hash[:10])\n\n state_view = BlockWrapper.state_view_for_block(main_head ,self._state_view_factory) # main_head FOR DAG use main_head instead chain_head\n \n consensus_module,consensus_name = ConsensusFactory.try_configured_consensus_module(chain_head.header_signature,state_view)\n if not consensus_module:\n # there is no internal consensus \n # check may consensus engine already was registred\n LOGGER.debug(\"BlockPublisher:_build_candidate_block no internal consensus_module=%s\",consensus_name)\n if not self._consensus_notifier.was_registered_engine(consensus_name):\n raise NotRegisteredConsensusModule\n \"\"\" \n External consensus was registered.Maybe create fake consensus module?\n \"\"\"\n self._consensus = consensus_name[0] # save consensus name\n consensus_module = ConsensusFactory.try_configured_proxy_consensus()\n LOGGER.debug(\"BlockPublisher:_build_candidate_block External consensus was registered=%s\",consensus_name)\n \n LOGGER.debug(\"BlockPublisher: BUILD CANDIDATE_BLOCK BRANCH=%s:%s consensus_module=(%s) ask_candidate=%s\",chain_head.block_num,bid[:8],consensus_name,self._engine_ask_candidate)\n # using chain_head so so we can use the setting_cache\n max_batches = int(self._settings_cache.get_setting(\n 'bgx.publisher.max_batches_per_block',\n main_head.state_root_hash,# for DAG chain_head.state_root_hash,\n default_value=_MAX_BATCHES_))\n \n # this is my signer_id\n public_key = self._validator_id #self._identity_signer.get_public_key().as_hex()\n consensus = consensus_module.\\\n BlockPublisher(block_cache=self._block_cache,\n state_view_factory=self._state_view_factory,\n batch_publisher=self._batch_publisher,\n data_dir=self._data_dir,\n config_dir=self._config_dir,\n validator_id=public_key)\n if hasattr(consensus, 'set_publisher'):\n # external proxy consensus\n consensus.set_publisher(self)\n #self._block_summarize = None\n if bid in self._engine_ask_candidate:\n # set consensus\n consensus.set_consensus_name(self._consensus)\n\n batch_injectors = []\n if self._batch_injector_factory is not None:\n batch_injectors = self._batch_injector_factory.create_injectors(main_head.identifier) # FOR DAG main_head instead of chain_head.identifier\n if batch_injectors:\n LOGGER.debug(\"Loaded batch injectors: %s\", batch_injectors)\n \"\"\"\n For DAG version we should ask block_num for last node into sorted graph - FIXME\n block_num for new candidate should be more then block_num of its parent\n and in this case block_num is coloured\n \"\"\"\n nest_colour = self._engine_ask_candidate[bid][1] if hasattr(consensus, 'set_publisher') else 'Genesis'\n LOGGER.debug(\"Get BLOCK NUM for color=%s\",nest_colour)\n block_num = self._block_store.get_block_num(chain_head.block_num,self._validator_id,nest_colour)\n LOGGER.debug(\"Header for block candidate(%s:...)->(%s:%s) heads=%s\",block_num,chain_head.block_num,chain_head.header_signature[:8],self.chain_heads)\n block_header = BlockHeader(\n block_num=block_num , # ask last block number from store\n previous_block_id=chain_head.header_signature,\n signer_public_key=public_key)\n block_builder = BlockBuilder(block_header)\n if not consensus.initialize_block(block_builder.block_header):\n # for proxy consensus waiting until reply fron consensus\n # return reserved block num\n LOGGER.debug(\"Consensus not ready to build candidate block.\")\n self._block_store.pop_block_number(block_num,self._validator_id)\n return None\n \n if hasattr(consensus, 'set_publisher'):\n # switch of marker from proxy engine\n del self._engine_ask_candidate[bid] \n \"\"\"\n create a new scheduler\n for DAG we should use state_root_hash from head with last updated merkle root \n because of concurrence block with max number could has not last merkle root, so take root from merkle directly \n \"\"\"\n main_head = self._block_cache.block_store.chain_head\n state_root_hash = self._get_merkle_root()\n LOGGER.debug(\"Use for executor BRANCH=%s:%s ROOT STATE=%s:%s~%s max_batches=%s\\n\",chain_head.block_num,bid[:8],main_head.block_num,state_root_hash[:10],main_head.state_root_hash[:10],max_batches)\n scheduler = self._transaction_executor.create_scheduler(self._squash_handler, state_root_hash,self._context_handlers) # for DAG try use main_head.state_root_hash \n\n # build the TransactionCommitCache\n committed_txn_cache = TransactionCommitCache(self._block_cache.block_store)\n LOGGER.debug(\"_build_candidate_block: self._transaction_executor.execute(scheduler) malicious=%s parent=%s\",self.malicious,bid[:8]) \n self._transaction_executor.set_malicious(self.malicious)\n self._transaction_executor.execute(scheduler)\n self._candidate_block = _CandidateBlock(\n self._block_cache.block_store,\n consensus, scheduler,\n committed_txn_cache,\n block_builder,\n max_batches if not self._nest_building_mode else 1,\n batch_injectors,\n bid,\n nest_colour)\n # add new candidate into list\n self._candidate_blocks[bid] = self._candidate_block\n LOGGER.debug(\"NEW candidate=<%s:%s:%s> candidates=%s batches=%s recom=%s\",\n nest_colour,self._candidate_block.block_num,bid[:8],\n self.candidate_blocks,\n [key[:8] for key in self._pending_batch_ids],\n self.queued_batch_recomm\n )\n \"\"\"\n for DAG we should prefer add batch's with recomended candidate\n first check may be there is such batch in pending queue\n if no take batch without any recomendation\n \"\"\"\n num = 0\n start_ind = 0 \n while True: #bid in self._pending_batch_cid:\n # use all recomended batch from pending queue\n try:\n ind = self._pending_batch_cid.index(bid,start_ind)\n except ValueError:\n break\n start_ind = ind + 1 # for skipping this one\n # others candidate can take it because of recomendation\n batch = self._pending_batches[ind]\n self._candidate_block._batches_num = self._pending_batch_recomm[ind][1]\n num += 1\n recomm_num = self._pending_batch_recomm[ind][0]\n LOGGER.debug(\"NEW candidate=<%s:%s:%s> add recomended batch[%s]=%s start=%s total=%s\",nest_colour,self._candidate_block.block_num,bid[:8],ind,batch.header_signature[:8],start_ind,num)\n if self._candidate_block.block_num != recomm_num:\n # this candidate with wrong block number\n LOGGER.debug(\"NEW candidate=<%s:%s:%s> missmatch cand block num=%s!!!\\n\\n\",nest_colour,self._candidate_block.block_num,bid[:8],recomm_num)\n self.correct_candidate_num(self._candidate_block,recomm_num)\n\n self._candidate_block.add_batch(batch)\n \n\n if num == 0 :\n # there are no recomended batch for this candidate- take batch without recomendation\n # because candidate for batch with recomendation could not be ready\n if nest_colour == self.nest_colour:\n LOGGER.debug(\"Try add batch to NEW candidate=<%s:%s:%s> pend=%s cid=%s\",nest_colour,self._candidate_block.block_num,bid[:8],self.pending_batches,self.pending_batch_recomm)\n for (ind,batch) in enumerate(self._pending_batches):\n if self._pending_batch_cid[ind] != '':\n continue # skip recomended batch\n if self._candidate_block.can_add_batch and num < max_batches:\n num += 1\n #LOGGER.debug(\"NEW candidate=%s.%s add batch[%s]=%s total=%s\",self._candidate_block.block_num,bid[:8],ind,batch.header_signature[:8],num)\n # mark taken batch with recomendation (but with num == 0)- because others candidate can take it too\n # when we make block and same bacthes becaime incompleted unmark it\n self._pending_batch_cid[ind] = bid # self._pending_batch_num[ind] == 0 \n self._candidate_block._make_batch_broadcast = True\n self._candidate_block.add_batch(batch)\n else:\n break\n else:\n LOGGER.debug(\"SKIP NEW candidate=<%s:%s:%s> as belonging other federation\",nest_colour,self._candidate_block.block_num,bid[:8])\n\n \n\n LOGGER.debug(\"NEW candidate=<%s:%s:%s> DONE batches total=%s pending=%s\",nest_colour,self._candidate_block.block_num,bid[:8],num,len(self._pending_batches))\n if self.is_recovery:\n \"\"\"\n recovery from DAG store - ask next block for nest_colour\n \"\"\"\n blks = self._block_store.get_recovery_block(nest_colour)\n if blks is not None:\n LOGGER.debug(\"RECOVERY next BLOCK=%s for nest=%s\",[blk.identifier[:8] for blk in blks],nest_colour)\n self._block_sender.recover_block(blks)\n else: \n if not self.is_recovery:\n LOGGER.debug(\"RECOVERY WAS DONE !\\n\")\n self._block_sender.try_to_sync_with_net()\n\n \n \n def correct_candidate_num(self,recomm_cand,recomm_num):\n # for DAG - correct candidate block number\n for cand in self._candidate_blocks.values():\n if cand.block_num == recomm_num:\n cand.block_num = recomm_cand.block_num\n recomm_cand.block_num = recomm_num\n LOGGER.debug(\"Candidate num corrected=%s\",self.candidate_blocks)\n return True\n return False\n\n def on_batch_received(self, batch):\n \"\"\"\n A new batch is received, send it for validation\n :param batch: the new pending batch\n :return: None\n \"\"\"\n LOGGER.debug(\"On batch=%s received batchs=%s recomend=%s\",batch.header_signature[:8],[key[:8] for key in self._queued_batch_ids],self.queued_batch_recomm)\n with self._lock:\n (cid,block_num,num) = self._queued_batch_recomm.pop(0) # [-1] # recomended branch\n self._queued_batch_ids = self._queued_batch_ids[:1]\n \n LOGGER.debug(\"BlockPublisher:Pop batch=%s batchs=%s recomend=%s\",batch.header_signature[:8],[key[:8] for key in self._queued_batch_ids],self.queued_batch_recomm)\n if self._permission_verifier.is_batch_signer_authorized(batch):\n # add into pending\n self._pending_batches.append(batch)\n self._pending_batch_ids.append(batch.header_signature)\n #self._pending_batch_num.append(num) \n\n self._pending_batch_gauge.set_value(len(self._pending_batches))\n # if we are building a block then send schedule it for\n # execution.\n \n LOGGER.debug(\"On BATCH=%s received candidate block's CID=%s num=%s heads=%s cands=%s\",batch.header_signature[:8],cid[:8],num,\n self.chain_heads,\n self.candidate_blocks\n )\n \"\"\"\n choice block candidate for adding batch from self._candidate_blocks\n FIXME - USE some strategy for choicing candidate\n \"\"\"\n candidate = None\n if cid != '' :\n # use recomended candidate if there is no such candidate - we should put this batch into _pending_batches\n if cid in self._candidate_blocks and self._candidate_blocks[cid].can_add_batch:\n candidate = self._candidate_blocks[cid]\n candidate._batches_num = num\n LOGGER.debug(\"On batch=%s received use recomended candidate=<%s:%s:%s> from=%s\",batch.header_signature[:8],candidate.nest_colour,candidate.block_num,candidate.identifier[:8],self.candidate_blocks)\n if candidate.block_num != block_num:\n # there is candidate but with wrong block number\n LOGGER.debug(\"On batch=%s received missmatch cand block num=%s~%s!!!\\n\\n\",batch.header_signature[:8],candidate.block_num,block_num)\n self.correct_candidate_num(candidate,block_num)\n # try to change candidate's number on recommended\n # send batch to peers and say about selected branch \n else:\n # take first ready candidate \n # FIXME - think about strategy for candidate choice\n for cand in self._candidate_blocks.values():\n if cand.nest_colour == self.nest_colour and cand.can_add_batch:\n \"\"\"\n there is block candidate and we can add batch into them\n for DAG we should choice one of the block candidate and inform others peer about that choice\n and set cid for marker \n \"\"\"\n candidate,cid,block_num = cand,cand.identifier,cand.block_num\n LOGGER.debug(\"On batch=%s received use candidate=<%s:%s:%s> from=%s\",batch.header_signature[:8],cand.nest_colour,cand.block_num,candidate.identifier[:8],self.candidate_blocks)\n # send batch to peers and say about selected branch \n #self._batch_publisher.send_batch(batch,candidate.identifier)\n candidate._make_batch_broadcast = True # mark for broadcasting\n break\n if candidate is not None:\n \"\"\"\n could be situation when batch can't be added because of dependency \n in case of recomendation we should wait and try to add again\n \"\"\"\n candidate.add_batch(batch)\n #if cid == '' : # send in case batch owner\n # self._batch_publisher.send_batch(batch,candidate.identifier)\n else:\n # we should save somewhere cid and num for using next when free block candidate appeared \n LOGGER.debug(\"On BATCH=%s received THERE ARE NO CANDIDATE - put CID=%s into pending=%s cid=%s!!!\\n\",batch.header_signature[:8],cid[:8],len(self._pending_batches),\n self.pending_batch_recomm\n ) \n # mark for this candidate or for any of them\n self._pending_batch_cid.append(cid) \n self._pending_batch_recomm.append((block_num,num))\n\n else:\n LOGGER.debug(\"BATCH=%s has an unauthorized signer.\",batch.header_signature[:8])\n\n def _rebuild_pending_batches(self, committed_batches, uncommitted_batches):\n \"\"\"When the chain head is changed. This recomputes the list of pending\n transactions\n :param committed_batches: Batches committed in the current chain\n since the root of the fork switching from.\n :param uncommitted_batches: Batches that were committed in the old\n fork since the common root.\n \"\"\"\n if committed_batches is None:\n committed_batches = []\n if uncommitted_batches is None:\n uncommitted_batches = []\n\n committed_set = set([x.header_signature for x in committed_batches])\n\n pending_batches = self._pending_batches\n pending_batch_cid = self._pending_batch_cid\n pending_batch_recomm = self._pending_batch_recomm\n\n LOGGER.debug(\"BlockPublisher:_rebuild_pending_batches num=%s~%s c:uc=%s:%s!!!\\n\\n\",len(pending_batches),len(self._pending_batch_cid),len(committed_batches),len(uncommitted_batches))\n self._pending_batches = []\n self._pending_batch_ids = []\n self._pending_batch_cid = [] \n self._pending_batch_recomm = [] # also take recomendation \n\n num_committed_batches = len(committed_batches)\n if num_committed_batches > 0:\n # Only update the average if either:\n # a. Not drained below the current average\n # b. Drained the queue, but the queue was not bigger than the\n # current running average\n remainder = len(self._pending_batches) - num_committed_batches\n if remainder > self._publish_count_average.value or \\\n num_committed_batches > self._publish_count_average.value:\n self._publish_count_average.update(num_committed_batches)\n\n # Uncommitted and pending disjoint sets\n # since batches can only be committed to a chain once.\n for batch in uncommitted_batches:\n if batch.header_signature not in committed_set:\n self._pending_batches.append(batch)\n self._pending_batch_ids.append(batch.header_signature)\n self._pending_batch_cid.append('') # !!!! recomended batch could not be uncommited\n self._pending_batch_recomm.append((0,0))\n\n for (ind,batch) in enumerate(pending_batches):\n if batch.header_signature not in committed_set:\n self._pending_batches.append(batch)\n self._pending_batch_ids.append(batch.header_signature)\n self._pending_batch_cid.append(pending_batch_cid[ind])\n self._pending_batch_recomm.append(pending_batch_recomm[ind])\n\n LOGGER.debug(\"BlockPublisher:_rebuild_pending_batches num=%s~%s DONE\\n\",len(self._pending_batches),len(self._pending_batch_cid))\n\n def on_chain_updated(self, chain_head,\n committed_batches=None,\n uncommitted_batches=None,\n branch_id=None):\n \"\"\"\n The existing chain has been updated, the current head block has\n changed.\n\n :param chain_head: the new head of block_chain, can be None if\n no block publishing is desired.\n :param committed_batches: the set of batches that were committed\n as part of the new chain.\n :param uncommitted_batches: the list of transactions if any that are\n now de-committed when the new chain was selected.\n :return: None\n \"\"\"\n try:\n LOGGER.info('on_chain_updated: try update chain HEAD=%s LOCK',chain_head.identifier[:8] if chain_head is not None else None)\n with self._lock:\n if chain_head is not None:\n \"\"\"\n call from chain controller for changing head of previous_block_id's branch \n also change _chain_heads for branch branch_candidate_id\n \"\"\"\n branch_candidate_id = chain_head.previous_block_id\n LOGGER.info('Now building on top of block: %s-->%s heads=%s',branch_candidate_id[:8],chain_head.identifier[:8],self.chain_heads)\n if branch_candidate_id in self._chain_heads:\n del self._chain_heads[branch_candidate_id]\n LOGGER.info('DEL HEAD for DAG branch=%s\\n',branch_candidate_id[:8])\n else:\n LOGGER.info('SWITCH BLOCK CONDIDATE ON EXTERNAL BLOCK=%s.%s AS HEAD for BRANCH=%s\\n',chain_head.block_num,chain_head.identifier[:8],branch_candidate_id[:8])\n for key,head in self._chain_heads.items():\n if head.block_num == chain_head.block_num:\n del self._chain_heads[key]\n branch_candidate_id = key # drop old candidate\n if key in self._candidate_blocks:\n candidate = self._candidate_blocks[key]\n # return unused block num with was reserved for own candidate\n self._block_store.pop_block_number(candidate.block_num, self._validator_id)\n else:\n # it could be external block\n LOGGER.info('THERE IS NO CANDIDATE for key=%s candidates=%s\\n',key[:8],self.candidate_blocks)\n\n LOGGER.info('DEL OLD HEAD=%s by block num branch=%s.%s\\n',chain_head.identifier[:8],head.block_num,key[:8])\n \"\"\"\n mark try because consensus engine will switch branch on new head too\n and use the same color\n \"\"\"\n self._engine_ask_candidate[chain_head.identifier] = (True,candidate.nest_colour) \n break\n # update head for DAG branch\n self._chain_heads[chain_head.identifier] = chain_head\n LOGGER.info('Current HEADS=%s\\n',self.chain_heads)\n \n else:\n \"\"\"\n for DAG suspended only for current branch \n clean _chain_heads for branch_id\n \"\"\"\n branch_candidate_id = branch_id\n LOGGER.info('Block publishing is suspended until new chain head for %s arrives.',branch_id[:8] if branch_id else None)\n\n if branch_candidate_id in self._candidate_blocks:\n LOGGER.info('Update DAG branch head for ID=%s heads=%s\\n',branch_candidate_id[:8],self.chain_heads)\n del self._candidate_blocks[branch_candidate_id]\n\n \"\"\"\n for DAG we can have many heads and for each of them we should have candidate block\n chain_head is new position into branch \n also we should have a list of chain_head and correct item which is corresponding with chain_head.previous_block_id\n \"\"\"\n self._chain_head = chain_head\n \"\"\"\n for DAG we should clean block candidate only for current branch \n \"\"\"\n self._candidate_block = None # we need to make a new\n # _CandidateBlock (if we can) since the block chain has updated\n # under us.\n if chain_head is not None:\n self._rebuild_pending_batches(committed_batches,uncommitted_batches)\n \"\"\"\n We can check is internal or external consensus present and if there is one of them build candidate for branch\n \"\"\"\n try:\n LOGGER.info('Update DAG branch head=%s build_candidate_block\\n\\n',branch_candidate_id[:8])\n # FIXME for external consensus wait until engine ask candidate ?\n #self._build_candidate_block(chain_head)\n except NotRegisteredConsensusModule:\n \"\"\"\n we should do it after request from consensus engine\n \"\"\"\n LOGGER.debug(\"BlockPublisher: CANT BUILD BLOCK CANDIDATE (WAIT consensus engine)\\n\")\n\n \n self._pending_batch_gauge.set_value(len(self._pending_batches))\n\n # pylint: disable=broad-except\n except Exception as exc:\n LOGGER.critical(\"on_chain_updated exception.\")\n LOGGER.exception(exc)\n\n #@profile\n def on_check_publish_block(self, force=False):\n \"\"\"\n Ask the consensus module if it is time to claim the candidate block\n if it is then, claim it and tell the world about it.\n :return:\n None\n \"\"\"\n #LOGGER.debug(\"BlockPublisher:on_check_publish_block ...\")\n \"\"\"\n periodicaly ask publish block for current candidate\n \"\"\"\n try:\n # for DAG we can lock only shortly for select candidate\n bid,candidate = None,None\n with self._lock:\n # go through _chain_heads and create block candidate\n for hid,head in self._chain_heads.items():\n if (hid not in self._candidate_blocks \n and hid in self._engine_ask_candidate\n and self._pending_batches):\n # for case when block candidate was dropped by mistakes\n LOGGER.debug(\"BlockPublisher: on_check_publish_block BUILD CANDIDATE BLOCK for head=%s\\n\",hid[:8])\n self._build_candidate_block(head)\n\n # find candidate which is ready to be finalized\n \"\"\"\n for DAG we should check all branches here and try to find candidate which is ready to be finalized and send to chain controller\n \"\"\"\n for key,cand in self._candidate_blocks.items():\n #LOGGER.debug(\"BlockPublisher: check candidate=%s\",bid[:8])\n if (force or cand.has_pending_batches()) and cand.check_publish_block():\n bid,candidate = key,cand\n break\n # unlock chain heads\n if candidate is not None:\n \"\"\"\n candidate.finalize_block() will receive the list of batches \n that were not added to the block but were marked for this block candidate\n \"\"\"\n pending_batches = [] \n last_batch = candidate.last_batch\n LOGGER.debug(\"BlockPublisher: before finalize BLOCK=%s->%s last_batch=%s make_batch_broadcast=%s\\n\",candidate.block_num,bid[:8],last_batch.header_signature[:8],candidate.make_batch_broadcast)\n block = candidate.finalize_block(self._identity_signer,pending_batches)\n LOGGER.debug(\"BlockPublisher: after finalize BLOCK=%s->%s pending BATCHS=%s+%s block=%s\",candidate.block_num,bid[:8],len(self._pending_batches),[batch.header_signature[:8] for batch in pending_batches],block is not None)\n \"\"\"\n after proxy engine answer we can lock again\n \"\"\"\n with self._lock:\n self._candidate_block = None\n # Update the _pending_batches to reflect what we learned.\n try:\n # at this point batches relating to this candidate were marked - so we can drop it from pending using this marker\n #last_batch_index = self._pending_batches.index(last_batch)\n #unsent_batches = self._pending_batches[last_batch_index + 1:]\n unsent_batches = []\n unsent_batch_cid = []\n unsent_batch_recomm = []\n unsent_batch_ids = []\n for batch in pending_batches: # rest of batches which weren't put into block\n unsent_batch_ids.append(batch.header_signature) \n unsent_batch_cid.append('') # became without recomendation \n unsent_batch_recomm.append((0,0)) \n\n for (ind,batch) in enumerate(self._pending_batches):\n if self._pending_batch_cid[ind] != bid:\n # skip batch relating this candidate\n unsent_batches.append(batch)\n unsent_batch_ids.append(batch.header_signature)\n unsent_batch_cid.append(self._pending_batch_cid[ind])\n unsent_batch_recomm.append(self._pending_batch_recomm[ind])\n else:\n # skip all batches which were marked for this candidate = some of them could be into pending_batches and from this block candidate\n LOGGER.debug(\"BlockPublisher: after finalize block SKIP batch=%s\",batch.header_signature[:8])\n\n # new pending queue \n self._pending_batches = pending_batches + unsent_batches\n self._pending_batch_ids = unsent_batch_ids\n self._pending_batch_cid = unsent_batch_cid\n self._pending_batch_recomm = unsent_batch_recomm\n\n self._pending_batch_gauge.set_value(len(self._pending_batches))\n LOGGER.debug(\"BlockPublisher: After finalize for BRANCH=%s.%s new pending=%s batches=%s cid=%s\\n\",candidate.block_num,bid[:8],len(self._pending_batches),\n [key[:8] for key in self._pending_batch_ids], self.pending_batch_recomm\n )\n except ValueError:\n LOGGER.debug(\"BlockPublisher: last_batch=%s is not in list pending batches=%s~%s!!!!\\n\", last_batch.header_signature[:8], len(self._pending_batches), len(self._pending_batch_cid))\n\n if block:\n blkw = BlockWrapper(block)\n LOGGER.debug(\"Claimed Block: for branch=%s NEW BLOCK=%s.%s BATCHES=%s\\n\",bid[:8],blkw.block_num,blkw.identifier[:8],[batch.header_signature[:8] for batch in blkw.batches])\n if candidate.make_batch_broadcast and self._send_batches == 1: \n # send in case batch owner\n # only to peers own cluster\n self._batch_publisher.send_batches(blkw.batches,candidate.identifier,candidate.block_num)\n #ind = 0\n #for batch in blkw.batches:\n # self._batch_publisher.send_batch(batch,candidate.identifier,ind)\n # ind += 1\n\n \"\"\"\n send block to chain controller where we will do consensus\n external engine after this moment will be waiting NEW BLOCK message\n also save recompute context\n \"\"\"\n self._recompute_contexts[bid] = candidate.recompute_context\n \"\"\"\n send block to others peers but we should use cluster info\n \"\"\"\n LOGGER.debug(\"SEND NEW BLOCK=%s.%s\\n\",blkw.block_num,blkw.identifier[:8])\n self._block_sender.send(blkw.block)\n self._blocks_published_count.inc()\n\n # We built our candidate, disable processing until\n # the chain head is updated. Only set this if\n # we succeeded. Otherwise try again, this\n # can happen in cases where txn dependencies\n # did not validate when building the block.\n LOGGER.info(\"on_check_publish_block: on_chain_updated(None) BRANCH=%s\",bid[:8])\n \"\"\"\n for DAG we stop processing only for this branch (self._candidate_block.identifier) \n send branch id as additional argument for on_chain_updated()\n \"\"\"\n self.on_chain_updated(None,branch_id=bid)\n LOGGER.debug(\"on_check_publish_block: after update candidates=%s heads=%s\",self.candidate_blocks,self.chain_heads)\n else:\n \"\"\"\n candidate.finalize_block() return None but external consensus don't know about this and use bid for reply summarize()\n so we should create new candidate for this BID in this function\n also _chain_heads has head for branch bid\n \"\"\" \n LOGGER.debug(\"Was not finalize branch=%s REBUILD THIS CANDIDATE block=%s heads=%s!!!!\\n\\n\",bid[:8],candidate.block_num,self.chain_heads)\n # for correct block number allocation we should keep block number relation this candidate\n self._block_store.pop_block_number(candidate.block_num,self._validator_id)\n # Use color from self._candidate_blocks[bid]\n self._engine_ask_candidate[bid] = (True,candidate.nest_colour)\n del self._candidate_blocks[bid]\n head = self._block_store._get_block(bid)\n self._build_candidate_block(head)\n\n\n # pylint: disable=broad-except\n except Exception as exc:\n LOGGER.critical(\"on_check_publish_block exception.\")\n LOGGER.exception(exc)\n\n\n def has_batch(self, batch_id):\n with self._lock:\n # FIXME may be we will have problem because we drop batch from queue before finalizing ?\n if batch_id in self._pending_batch_ids:\n return True\n if batch_id in self._queued_batch_ids:\n return True\n\n return False\n \"\"\"\n for proxy consensus interface\n \"\"\"\n def on_head_updated(self,hid,new_hid,chain_head):\n with self._lock:\n # update head of branch\n del self._chain_heads[hid]\n self._chain_heads[new_hid] = chain_head\n LOGGER.info('UPDATE HEAD for branch=%s heads=%s\\n',hid[:8],self.chain_heads)\n\n def get_recompute_context(self,bid):\n # for DAG only\n if bid in self._recompute_contexts:\n context = self._recompute_contexts[bid]\n del self._recompute_contexts[bid] \n return context\n else:\n LOGGER.info('get_recompute_context NO BRANCH=%s HEAD CONTEXT\\n',bid[:8])\n return None\n\n def on_initialize_build_candidate(self,nest_colour, chain_head = None):\n \"\"\"\n build only after request from consensus engine and for chain_head only \n external consensus have got chain_head via chain_head_get() \n \"\"\"\n try:\n with self._lock:\n # DO IT HERE because of conflict with call _build_candidate_block() into on_check_publish_block\n self._engine_ask_candidate[chain_head.identifier] = (True,nest_colour)\n if chain_head is not None:\n #self._chain_head = chain_head\n LOGGER.info('on_initialize_build_candidate: parent=%s heads=%s', chain_head.identifier[:8],self.chain_heads)\n self._build_candidate_block(chain_head)\n\n # pylint: disable=broad-except\n except Exception as exc:\n LOGGER.critical(\"on_initialize_build_candidate exception parent=%s(%s).\",chain_head.identifier[:8],exc)\n raise exc\n #LOGGER.exception(exc)\n\n def on_finalize_block(self,block_header):\n # add block for summarizing - call from candidate.finalize_block() \n with self._proxy_lock:\n self._blocks_summarize.append((block_header.consensus,block_header.previous_block_id)) \n \n LOGGER.debug('BlockPublisher: on_finalize_block parent block=%s total ready=%s',block_header.previous_block_id[:8],len(self._blocks_summarize))\n # try to wait until proxy.finalize_block\n\n def initialize_block(self, block,nest_colour=''):\n \"\"\"\n we are know parent's ID from chain_head_get()\n \"\"\"\n if nest_colour == '':\n nest_colour = 'Genesis'\n LOGGER.debug('BlockPublisher: initialize_block for BLOCK=%s.%s COLOR=%s\\n',block.block_num, block.identifier[:8],nest_colour)\n #self._engine_ask_candidate[block.identifier] = (True,nest_colour)\n self._can_print_summarize = True\n self.on_initialize_build_candidate(nest_colour,block)\n LOGGER.debug('BlockPublisher: initialize_block DONE for BLOCK=%s.%s\\n',block.block_num, block.identifier[:8]) \n #raise BlockInProgress\n\n def summarize_block(self, force=False):\n \"\"\"\n call from ConsensusSummarizeBlockHandler\n for DAG we should check all dag header and return one of them \n also we can send list candidate which could be finalized\n \"\"\"\n #LOGGER.debug('BlockPublisher: summarize_block ...')\n with self._proxy_lock:\n num_ready = len(self._blocks_summarize)\n if num_ready == 0:\n if self._can_print_summarize:\n self._can_print_summarize = False\n LOGGER.debug('BlockPublisher: summarize_block BLOCK EMPTY self=%s',self)\n raise BlockEmpty #BlockNotReady\n # return one of the elements\n elem = self._blocks_summarize.pop()\n\n # elem[1] THIS IS PARENT OF BLOCK CANDIDATE\n LOGGER.debug('BlockPublisher: summarize_block id=%s total ready=%s',elem[1][:8],num_ready)\n return elem\n \n\n def finalize_block(self, consensus=None,block_id=None, force=False):\n \"\"\"\n at this point we should continue _candidate_block.finalize_block\n \"\"\"\n bid = block_id.hex() \n LOGGER.debug('BlockPublisher: finalize_block consensus=%s branch=%s',consensus,bid[:8])\n if bid in self._candidate_blocks:\n candidate = self._candidate_blocks[bid]\n LOGGER.debug('BlockPublisher: compare candidate=%s',candidate==self._candidate_block)\n else:\n raise BlockNotInitialized\n \n\n # now we can send block to chain controller\n # \n candidate.finalize_block_complete(consensus)\n LOGGER.debug('BlockPublisher: finalize_block send reply candidate=%s',candidate is not None)\n # return parent block id \n return bid \n\n def cancel_block(self,branch_id=None):\n \"\"\"\n cancel block only for branch \n we can free this block into block manager\n \"\"\"\n bid = branch_id.hex() \n LOGGER.debug('BlockPublisher:cancel_block ASK cancel for BRANCH=%s num=%s',bid[:8],len(self._candidate_blocks))\n if bid in self._candidate_blocks:\n LOGGER.debug('BlockPublisher:cancel_block DO cancel for BRANCH=%s',bid[:8])\n if self._candidate_block is not None:\n LOGGER.debug('BlockPublisher:cancel_block Stop adding batches to the current block and abandon it')\n # need new block candidate\n self._candidate_block = None\n\n def reset_max_batches_per_block(self):\n self._nest_building_mode = False\n LOGGER.debug('BlockPublisher:ALL NESTS WERE BUILDED\\n')\n\n def arbitrate_block(self,block,arbiter=True):\n \"\"\"\n consensus ask arbitration - send this block to arbiter\n id consensus ask us - it means we leader of this cluster\n \"\"\"\n LOGGER.debug('BlockPublisher:arbitrate_block block=%s to arbiter=%s',block.header_signature[:8],arbiter)\n self._block_sender.send_arbiter(block,arbiter)\n \n \nclass _RollingAverage(object):\n\n def __init__(self, sample_size, initial_value):\n self._samples = deque(maxlen=sample_size)\n\n self._samples.append(initial_value)\n self._current_average = initial_value\n\n @property\n def value(self):\n return self._current_average\n\n def update(self, sample):\n \"\"\"Add the sample and return the updated average.\n \"\"\"\n self._samples.append(sample)\n\n self._current_average = sum(self._samples) / len(self._samples)\n\n return self._current_average\n\nclass BlockEmpty(Exception):\n \"\"\"There are no batches in the block.\"\"\"\n\nclass BlockInProgress(Exception):\n \"\"\"There is already a block in progress.\"\"\"\n\n\nclass BlockNotInitialized(Exception):\n \"\"\"There is no block in progress to finalize.\"\"\"\n\n\nclass MissingPredecessor(Exception):\n \"\"\"A predecessor was missing\"\"\"\n\n\n","sub_path":"bgx/validator-bgx/sawtooth_validator/journal/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":72861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"326970168","text":"import os\n\nfrom six.moves import urllib_parse as urlparse\n\nimport jsonpointer\n\nfrom flex.exceptions import ValidationError\nfrom flex.error_messages import MESSAGES\nfrom flex.constants import (\n STRING,\n)\nfrom flex.datastructures import (\n ValidationList,\n)\nfrom flex.validation.common import (\n generate_object_validator,\n)\nfrom flex.decorators import (\n skip_if_not_of_type,\n skip_if_empty,\n)\n\n\n@skip_if_empty\n@skip_if_not_of_type(STRING)\ndef validate_reference(reference, context, **kwargs):\n try:\n parts = urlparse.urlparse(reference)\n if parts.path:\n from flex.core import load_source\n if parts.path.startswith('/'):\n context = load_source(parts.path)\n elif 'base_path' in kwargs:\n context = load_source(os.path.join(kwargs['base_path'], parts.path))\n jsonpointer.resolve_pointer(context, parts.fragment)\n except jsonpointer.JsonPointerException:\n raise ValidationError(MESSAGES['reference']['undefined'].format(reference))\n\n\nref_schema = {\n 'type': STRING,\n}\n\nnon_field_validators = ValidationList()\nnon_field_validators.add_validator(validate_reference)\n\nref_validator = generate_object_validator(\n schema=ref_schema,\n non_field_validators=non_field_validators,\n)\n","sub_path":"flex/loading/schema/paths/path_item/operation/responses/single/schema/ref.py","file_name":"ref.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"322698914","text":"# _*_ coding : UTF-8 _*_\n# 开发团队 : 当场发财科技\n# 开发人员 : shenglan\n# 开发时间 : 2020-08-09 20:10\n# 文件名称 : dp PY\n# 开发工具 : PyCharm\n\n# 最小路径和\nclass Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n if not grid:\n return 0\n dp = grid\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if i == 0 and j == 0:\n continue\n elif i == 0 and j != 0:\n dp[i][j] = dp[i][j-1] + grid[i][j]\n elif i != 0 and j == 0:\n dp[i][j] = dp[i-1][j] + grid[i][j]\n else:\n dp[i][j] = min(dp[i][j-1]+grid[i][j], dp[i-1][j]+grid[i][j])\n return dp[-1][-1]\n\n# 解码方法\nclass Solution:\n def numDecodings(self, s: str) -> int:\n pp, p = 1, int(s[0] != '0')\n for i in range(1, len(s)):\n pp, p = p, pp * (9 < int(s[i-1:i+1]) <= 26) + p * (int(s[i]) > 0)\n return p\n\n# 最大正方形\nclass Solution:\n def maximalSquare(self, matrix) -> int:\n if not matrix:\n return 0\n res = 0 # 记录结果\n # 定义dp数组,每个元素代表当前位置可以达到的最大的正方形的边长\n dp = [[0 for _ in range(len(matrix[0]) + 1)] for _ in range(len(matrix) + 1)]\n for i in range(1, len(dp)):\n for j in range(1, len(dp[0])):\n if matrix[i - 1][j - 1] == '1':\n dp[i][j] = min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1]) + 1\n res = max(res, dp[i][j])\n return pow(res, 2)\n\n# 回文字串\nclass Solution:\n def countSubstrings(self, s: str) -> int:\n if not s: return 0\n res = len(s)\n dp = [[i,i+1] for i in range(len(s))]\n for i in range(1, len(s)):\n for j in dp[i-1]:\n if j-1 >= 0 and s[j-1] == s[i]:\n res += 1\n dp[i].append(j-1)\n return res\n\n# 最长有效括号\nclass Solution:\n def longestValidParentheses(self, s: str) -> int:\n res=[]\n stack=[]\n for i in range(len(s)):\n if(stack and s[i]==\")\"):\n res.append(stack.pop())\n res.append(i)\n if(s[i]==\"(\"):\n stack.append(i)\n #print(res)\n res.sort()\n max_len=0\n i=0\n while(i int:\n n1 = len(word1)\n n2 = len(word2)\n dp = [[0] * (n2 + 1) for _ in range(n1 + 1)]\n # 第一行\n for j in range(1, n2 + 1):\n dp[0][j] = dp[0][j-1] + 1\n # 第一列\n for i in range(1, n1 + 1):\n dp[i][0] = dp[i-1][0] + 1\n for i in range(1, n1 + 1):\n for j in range(1, n2 + 1):\n if word1[i-1] == word2[j-1]:\n dp[i][j] = dp[i-1][j-1]\n else:\n dp[i][j] = min(dp[i][j-1], dp[i-1][j], dp[i-1][j-1] ) + 1\n #print(dp)\n return dp[-1][-1]\n\n","sub_path":"Week_04/dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"271749230","text":"#!/usr/bin/env python3\nimport functools\nimport sys\nimport os\nimport traceback\nimport argparse\nimport inspect\n\nfrom flask import Flask, request, jsonify, make_response, render_template, abort, Response\ntry:\n import coffeescript\nexcept ImportError:\n coffeescript = None\n\n# modified version of http://stackoverflow.com/a/6655098\nif __name__ == \"__main__\" and __package__ is None:\n # The following assumes the script is in the top level of the package\n # directory. We use dirname() to help get the parent directory to add to\n # sys.path, so that we can import the current package. This is necessary\n # since when invoked directly, the 'current' package is not automatically\n # imported.\n parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n sys.path.insert(1, parent_dir)\n # noinspection PyUnresolvedReferences\n import stepic_plugins\n\n __package__ = str(\"stepic_plugins\")\n\nfrom stepic_plugins.base import load_by_name\nfrom stepic_plugins.exceptions import FormatError\n\napp = Flask(__name__)\n\n\nclass Storage(object):\n def __init__(self):\n self.quiz_name = None\n self.quiz_class = None\n self.quiz = None\n self.dataset = None\n self.dataset_created = False\n self.clue = None\n\nSTORE = Storage()\n\n\nclass InconsistentStateError(Exception):\n pass\n\n\ndef jsbin_view(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except FormatError as e:\n traceback.print_exc()\n return make_response(\"FormatError: \" + str(e), 400)\n except InconsistentStateError as e:\n return make_response(\"InconsistentStateError: \" + str(e), 400)\n except Exception as e:\n traceback.print_exc()\n return make_response(\"Exception! Check console output\", 500)\n\n return wrapper\n\n\n@app.route(\"/\")\ndef index():\n name = STORE.quiz_name\n quiz_name_camelized = ''.join(x.capitalize() for x in name.split('-'))\n return render_template(\n 'index.html', quiz_name=name, quiz_name_camelized=quiz_name_camelized)\n\n@app.route(\"/quiz/static/\")\ndef quiz_static(file):\n def read_file(file):\n base_dir = os.path.dirname(inspect.getabsfile(STORE.quiz_class.wrapped_class))\n path = os.path.join(base_dir, file)\n return open(path).read() if os.path.isfile(path) else None\n\n mime_map = [\n ({'show.js', 'edit.js', 'show.coffee', 'edit.coffee'}, 'application/javascript'),\n ({'show.hbs', 'edit.hbs'}, 'text/x-handlebars-template'),\n ({'style.css'}, 'text/css')\n ]\n for files, mime in mime_map:\n if file in files:\n mimetype = mime\n break\n else:\n mimetype = None\n\n if not mimetype:\n abort(404)\n\n body = read_file(file)\n if body is None and file.endswith('.js'):\n coffee_source = read_file(file.replace('.js', '.coffee'))\n if coffee_source is not None:\n if not coffeescript:\n raise Exception(\"coffeescript module is required to compile coffeescript\")\n body = coffeescript.compile(coffee_source)\n\n if body is None:\n return Response(\"Can't find {} file!\".format(file), status=404, mimetype='text/plain')\n\n return Response(body, mimetype=mimetype)\n\n\n@app.route(\"/quiz/\", methods=['POST'])\n@jsbin_view\ndef create_quiz():\n global STORE\n if request.method == 'POST':\n quiz = STORE.quiz_class(request.json)\n supplementary = quiz.async_init()\n if supplementary:\n STORE.quiz = STORE.quiz_class(request.json, supplementary)\n else:\n STORE.quiz = STORE.quiz_class(request.json)\n return 'OK'\n\n\n@app.route(\"/quiz/attempt/\", methods=['POST'])\n@jsbin_view\ndef attempt():\n global STORE\n if not STORE.quiz:\n raise InconsistentStateError(\"Quiz should be created first\\n\"\n \"Have you pressed `Update Quiz` button?\")\n\n STORE.dataset, STORE.clue = STORE.quiz.generate() or (None, None)\n STORE.dataset_created = True\n return jsonify(**STORE.dataset) if STORE.dataset else jsonify({'dataset': ''})\n\n\n@app.route(\"/quiz/submission/\", methods=['POST'])\n@jsbin_view\ndef submit():\n global STORE\n if not STORE.dataset_created:\n raise InconsistentStateError(\"Dataset should be created first\\n\"\n \"Have you pressed `New Attempt` button?\")\n\n reply = request.json\n reply = STORE.quiz.clean_reply(reply, STORE.dataset)\n (score, hint) = STORE.quiz.check(reply, STORE.clue)\n return jsonify(\n score=score,\n hint=hint\n )\n\n\ndef start_server(quiz_name):\n STORE.quiz_name = quiz_name\n STORE.quiz_class = load_by_name(quiz_name)\n app.run(host='0.0.0.0', debug=True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"quiz_name\")\n args = parser.parse_args()\n start_server(args.quiz_name)\n","sub_path":"stepic_plugins/dev-server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"324297678","text":"import sys\nimport os\nimport datetime\nimport time\n\nfrom TestCase.TestBase import ParametrizedTestCase\nfrom Utils.YamlUtil import getYam\nfrom Utils.Variables import username, password, case_yaml_path, fs\nfrom Pages.LoginPage import LoginPage\nfrom Pages.RealTimeMonitorPage import RealTimeMonitorPage\n\nPATH = lambda p: os.path.abspath(\n os.path.join(os.path.dirname(__file__), p)\n)\n\n\nclass TestRealTimeMonitorList(ParametrizedTestCase):\n\n def test_001_fuzzy_search(self):\n \"\"\"\n Test fuzzy search\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][0]\n self.testinfo = self.testcase[\"title\"]\n self.part_vin = self.testcase[\"part_vin\"]\n self.full_vin = self.testcase[\"full_vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_real_time_data_and_switch_to_list(self.part_vin)\n self.result = self.page.page.assert_text_exist(self.full_vin)\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Fuzzy search successfully.\")\n\n def test_002_invalid_search(self):\n \"\"\"\n Test invalid search\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][1]\n self.testinfo = self.testcase[\"title\"]\n self.part_vin = self.testcase[\"part_vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_real_time_data(self.part_vin)\n self.page.page.click_button(self.page.search_btn)\n self.result = self.page.page.assert_toast_text(self.page.toast_search, \"请输入更多连续关键字\")\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Invalid search failed.\")\n\n def test_003_searched_vehicle_not_in_map(self):\n \"\"\"\n Test searching vehicle not in map\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][2]\n self.testinfo = self.testcase[\"title\"]\n self.log.build_start_line(self.case_name)\n self.page.go_to_list_page()\n vin = self.page.get_vehicle_list_info([\"vin\"])['vin']\n if self.platform != 'android':\n vin = vin.replace(':', ':').split(':')[1]\n self.page.go_to_map_page()\n time.sleep(1)\n for i in range(3):\n self.page.page.swipe_right()\n self.page.search_real_time_data(vin, zoomlevel=0)\n self.page.page.click_button(self.page.search_btn)\n if self.platform == 'android':\n self.page.go_to_list_page()\n self.result = self.page.page.assert_text_exist(\"车辆数据为空\")\n else:\n self.result = self.page.page.assert_text_exist(\"搜索结果为空\")\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Search vehicle which is not in map failed.\")\n\n def test_004_cancel_search_real_time_data(self):\n \"\"\"\n Click return button to cancel search\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][3]\n self.testinfo = self.testcase[\"title\"]\n self.log.build_start_line(self.case_name)\n self.page.go_to_search_page()\n self.page.go_back()\n self.result = self.page.page.assert_element_exist(self.page.zoomout_icon)\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Cancel search by clicking return button.\")\n\n def test_005_check_vehicle_list_info(self):\n \"\"\"\n Check vehicle information in vehicle list page\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][4]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.model_name = self.testcase[\"model_name\"]\n self.vehicle_plate_number = self.testcase[\"vehicle_plate_number\"]\n self.log.build_start_line(self.case_name)\n self.page.search_real_time_data_and_switch_to_list(self.vin)\n self.vehicle_list_info = self.page.get_vehicle_list_info()\n if self.vehicle_list_info != {}:\n try:\n self.assertIn(self.vin, self.vehicle_list_info[\"vin\"], msg=\"\")\n self.assertIn(self.model_name, self.vehicle_list_info[\"model_name\"], msg=\"\")\n self.assertIn(self.vehicle_plate_number, self.vehicle_list_info[\"vehicle_plate_number\"], msg=\"\")\n self.assertNotEqual(self.vehicle_list_info[\"electric\"], \"\", msg=\"\")\n self.assertNotEqual(self.vehicle_list_info[\"vehicle_state\"], \"\", msg=\"\")\n self.result = True\n except:\n self.result = False\n else:\n self.result = False\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Vehicle info is as expected in list page.\")\n\n def test_006_check_vehicle_basic_info(self):\n \"\"\"\n Check vehicle basic information in vehicle detail page\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][5]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_real_time_data_and_switch_to_list(self.vin)\n self.vehicle_list_info = self.page.get_vehicle_list_info()\n print(\"Vehicle information on list page: \", self.vehicle_list_info)\n self.page.go_to_detail_page(self.vin)\n self.vehicle_detail_info = self.page.get_vehicle_detail_info([\"vin_title\", \"model_name_detail\",\n \"vehicle_plate_number_detail\", \"electric_detail\",\n \"vehicle_mileage\", \"vehicle_condition\"])\n print(\"Vehicle information on detail page: \", self.vehicle_detail_info)\n if self.vehicle_detail_info != {}:\n try:\n self.assertIn(self.vin, self.vehicle_detail_info[\"vin_title\"], msg=\"Vin in title does not match.\")\n self.assertIn(self.vehicle_detail_info[\"model_name_detail\"], self.vehicle_list_info[\"model_name\"],\n msg=\"Vehicle model name does not match.\")\n self.assertIn(self.vehicle_detail_info[\"vehicle_plate_number_detail\"],\n self.vehicle_list_info[\"vehicle_plate_number\"],\n msg=\"Vehicle plate number does not match.\")\n self.assertIn(self.vehicle_detail_info[\"electric_detail\"], self.vehicle_list_info[\"electric\"],\n msg=\"Vehicle electric state does not match.\")\n self.assertNotEqual(self.vehicle_detail_info[\"vehicle_mileage\"], \"--\",\n msg=\"Vehicle mileage is not available.\")\n self.assertNotEqual(self.vehicle_detail_info[\"vehicle_condition\"], \"--\",\n msg=\"Vehicle condition is not available.\")\n self.result = True\n except Exception as e:\n self.result = False\n raise e\n else:\n self.result = False\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Vehicle basic info is as expected in vehicle detail page.\")\n\n def test_007_vehicle_basic_info_map(self):\n \"\"\"\n Check map in vehicle basic info page\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][6]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_real_time_data_and_switch_to_list(self.vin)\n self.page.go_to_detail_page(self.vin)\n time.sleep(1)\n before = self.screenshot_path + \"RTM_List_test007_map_before_move.png\"\n self.page.page.save_screenshot(before)\n self.page.move_vehicle_basic_info_map()\n after = self.screenshot_path + \"RTM_List_test007_map_after_move.png\"\n self.page.page.save_screenshot(after)\n self.result = not (self.page.page.compare_image(before, after))\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Map in vehicle basic info page is as expected.\")\n\n def test_008_check_vehicle_monitor_info(self):\n \"\"\"\n Check vehicle monitor information in vehicle detail page\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][7]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_and_go_to_detail_page(self.vin)\n self.vehicle_detail_info = self.page.get_vehicle_detail_info([\"vehicle_condition\", \"vehicle_mileage\"])\n self.vehicle_monitor_info = self.page.get_vehicle_monitor_info([\"vehicle_condition_monitor\",\n \"vehicle_mileage_monitor\"])\n if self.vehicle_detail_info != {} and self.vehicle_monitor_info != {}:\n try:\n self.assertIn(self.vehicle_detail_info[\"vehicle_condition\"],\n self.vehicle_monitor_info[\"vehicle_condition_monitor\"],\n msg=\"vehicle condition doesn`t match.\")\n self.assertIn(self.vehicle_monitor_info[\"vehicle_mileage_monitor\"],\n self.vehicle_detail_info[\"vehicle_mileage\"],\n msg=\"vehicle mileage doesn`t match.\")\n self.result = True\n except AssertionError as e:\n self.result = False\n raise e\n else:\n self.result = False\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Vehicle monitor info is as expected.\")\n\n def test_009_show_hide_vehicle_monitor_info(self):\n \"\"\"\n Show/hide menu in vehicle monitor page\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][8]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_and_go_to_detail_page(self.vin)\n self.page.go_to_vehicle_monitor_tab()\n # verify all menus are displayed\n monitor_data = self.page.get_menu_status()\n if monitor_data != {}:\n self.result = True\n for data, state in monitor_data.items():\n if state is False:\n self.result = False\n break\n if self.result is True:\n # Hide all menus, verify all menus are hidden\n self.page.show_hide_vehicle_monitor_info()\n if self.platform == \"android\":\n monitor_data = self.page.check_vehicle_monitor_data_availability(expect=False)\n print(\"monitor_data\", monitor_data)\n for data, state in monitor_data.items():\n self.result = not state\n self.log.logger.error(\"Check availability: %s failed on android.\" % data)\n else:\n self.result = self.page.verify_empty_list_on_ios()\n else:\n self.result = False\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Show/Hide vehicle monitor info is as expected.\")\n\n def test_010_show_vehicle_monitor_info_again(self):\n \"\"\"\n Show/hide menu in vehicle monitor page\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][9]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_and_go_to_detail_page(self.vin)\n self.page.go_to_vehicle_monitor_tab()\n # verify all menus are displayed\n monitor_data = self.page.get_menu_status()\n if monitor_data != {}:\n for v in monitor_data.values():\n if v is True:\n self.result = True\n else:\n self.result = False\n break\n if self.result is True:\n self.page.show_hide_vehicle_monitor_info()\n time.sleep(1)\n availability = self.page.show_menu_and_check()\n for v in availability.values():\n if v is True:\n pass\n else:\n self.result = False\n break\n else:\n self.result = False\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Show vehicle monitor info again is as expected.\")\n\n def test_011_vehicle_track_info(self):\n \"\"\"\n Check vehicle track information in vehicle detail page\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][10]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_and_go_to_detail_page(self.vin)\n self.vehicle_detail_info = self.page.get_vehicle_detail_info([\"last_update_time\"])\n self.page.go_to_vehicle_track_tab()\n self.vehicle_monitor_info = self.page.get_longtitude_and_latitude()\n # self.vehicle_monitor_info = self.page.get_vehicle_monitor_info([\"vehicle_speed_monitor\", \"longtitude_monitor\",\n # \"latitude_monitor\"])\n self.vehicle_track_info = self.page.get_vehicle_track_info()\n if self.vehicle_monitor_info != {} and self.vehicle_track_info != {}:\n try:\n # self.assertEqual(self.vehicle_detail_info[\"last_update_time\"], self.vehicle_track_info[\"track_update_time\"],\n # msg=\"Last update time does not match.\")\n self.assertIn(self.vehicle_monitor_info[\"vehicle_speed_monitor\"], self.vehicle_track_info[\"speed_track\"],\n msg=\"Vehicle speed does not match.\")\n self.assertIn(self.vehicle_monitor_info[\"longtitude_monitor\"], self.vehicle_track_info[\"longtitude_track\"],\n msg=\"Longtitude does not match.\")\n self.assertIn(self.vehicle_monitor_info[\"latitude_monitor\"], self.vehicle_track_info[\"latitude_track\"],\n msg=\"Latitude does not match.\")\n self.result = True\n except AssertionError as e:\n self.result = False\n raise e\n else:\n self.result = False\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Vehicle track info is as expected.\")\n\n def test_012_vehicle_trace_info(self):\n \"\"\"\n Check vehicle trace information in vehicle detail page\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][11]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_real_time_data_and_switch_to_list(self.vin)\n self.page.go_to_detail_page(self.vin)\n last_update_time = self.page.get_vehicle_detail_info([\"last_update_time\"])[\"last_update_time\"]\n self.page.go_to_trace_playback_tab()\n self.page.search_vehicle_trace_info(end=last_update_time)\n self.result = self.page.get_vehicle_trace_info()\n self.testcase[\"msg\"] = \"Please check the screenshot manually.\"\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Vehicle trace info is as expected.\")\n\n def test_013_vehicle_trace_invalid_time_box(self):\n \"\"\"\n Search vehicle trace with invalid time\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][12]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_real_time_data_and_switch_to_list(self.vin)\n self.page.go_to_detail_page(self.vin)\n self.page.go_to_trace_playback_tab()\n self.page.set_time_box('start', value='Future', search=False)\n if self.platform == 'android':\n self.result = self.page.page.assert_toast_text(self.page.toast_time, \"开始时间不能大于当前时间\")\n else:\n date = self.page.page.get_text(self.page.start_time_value)\n date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M')\n if (datetime.datetime.today() - date).days == 0:\n self.result = True\n else:\n self.result = False\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Search vehicle trace failed with invalid time\")\n\n def test_014_vehicle_trace_empty_time_box(self):\n \"\"\"\n Search vehicle trace with empty time box\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][13]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_real_time_data_and_switch_to_list(self.vin)\n self.page.go_to_detail_page(self.vin)\n self.page.go_to_trace_playback_tab()\n self.page.page.click_button(self.page.search_time)\n self.result = self.page.page.assert_toast_text(self.page.toast_time, \"请选择开始时间\")\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Search vehicle trace failed with empty time box.\")\n\n def test_015_clear_vehicle_trace_search_box(self):\n \"\"\"\n Clear time box by clicking clear button\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][14]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_real_time_data_and_switch_to_list(self.vin)\n self.page.go_to_detail_page(self.vin)\n self.page.go_to_trace_playback_tab()\n if self.platform == \"android\":\n # search end time: today, search start time: yesterday\n end_time = datetime.datetime.today()\n start_time = end_time - datetime.timedelta(days=1)\n self.page.search_vehicle_trace_info(start=str(start_time), end=str(end_time), search=False)\n else:\n self.page.vehicle_trace_time_ios()\n if self.platform == \"android\":\n start_before_clear = self.page.page.get_text(self.page.start_time)\n end_before_clear = self.page.page.get_text(self.page.end_time)\n self.page.clear_vehicle_trace_search_criteria()\n start_after_clear = self.page.page.get_text(self.page.start_time)\n end_after_clear = self.page.page.get_text(self.page.end_time)\n else:\n start_before_clear = self.page.page.get_text(self.page.start_time_value)\n end_before_clear = self.page.page.get_text(self.page.end_time_value)\n self.page.clear_vehicle_trace_search_criteria()\n start_after_clear = self.page.page.get_text(self.page.start_time_value)\n end_after_clear = self.page.page.get_text(self.page.end_time_value)\n try:\n self.assertNotEqual(start_before_clear, start_after_clear)\n self.assertNotEqual(end_before_clear, end_after_clear)\n self.result = True\n except AssertionError as e:\n self.result = False\n raise e\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Clear vehicle trace search time successfully by clicking clear button.\")\n\n def test_016_return_to_list(self):\n \"\"\"\n Return button on vehicle detail page\n \"\"\"\n self.case_name = sys._getframe().f_code.co_name\n self.testcase = self.testCases[\"testcase\"][15]\n self.testinfo = self.testcase[\"title\"]\n self.vin = self.testcase[\"vin\"]\n self.log.build_start_line(self.case_name)\n self.page.search_real_time_data_and_switch_to_list(self.vin)\n self.page.go_to_detail_page(self.vin)\n self.page.go_back()\n self.result = self.page.page.assert_element_exist(self.page.vin)\n self.log.logger.info(\"Test case %s result: %s\" % (self.testinfo, self.result))\n self.assertTrue(self.result, msg=\"Return to list page successfully by clicking return button.\")\n\n @classmethod\n def setUpClass(cls):\n super(TestRealTimeMonitorList, cls).setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n super(TestRealTimeMonitorList, cls).tearDownClass()\n\n def setUp(self):\n login_param = {\"log\": self.log, \"driver\": self.driver, \"device\": self.device_param,\n \"elements\": \"loginPageElement.yaml\"}\n login_page = LoginPage(login_param)\n login_page.login_for_run(username, password)\n param = {\"log\": self.log, \"driver\": self.driver, \"case\": \"RealTimeMonitorList.yaml\",\n \"elements\": \"RealTimeMonitorPageElement.yaml\", \"device\": self.device_param,\n \"launch_app\": 1}\n self.testCases = getYam(case_yaml_path + param[\"case\"])\n self.page = RealTimeMonitorPage(param)\n self.screenshot_path = str(self.device_param[\"log\"]) + fs + \"screenshot\" + fs\n\n def tearDown(self):\n super(TestRealTimeMonitorList, self).tearDown()\n self.log.build_end_line(self.case_name)\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"TestCase/TestRealTimeMonitorList.py","file_name":"TestRealTimeMonitorList.py","file_ext":"py","file_size_in_byte":22798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"106613970","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 11 15:07:37 2020\n\n@author: Bill\n\"\"\"\nfrom train import NetWithLoss, CustomDataParallel, MultiBoxLoss, prepare_data\n\n#\n# I import Bolya's yolact/data subdirectory as D. This will pick\n# up information from config.py. \n#\nimport data as D \n\nfrom utils.augmentations import SSDAugmentation, FastBaseTransform #, BaseTransform\nfrom utils.functions import SavePath\nimport torch\nfrom yolact import Yolact\n#from eval import prep_display # oops no, clone and modify here as local_prep_display\nfrom layers.output_utils import postprocess, undo_image_transformation\nimport torch.nn as nn\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nimport copy\nfrom utils import timer\nimport cv2\nimport random\n\n#from torch.autograd import Variable\nfrom collections import defaultdict\ncolor_cache = defaultdict(lambda: {})\n\n\nclass NetWithLossPreds(nn.Module):\n \"\"\"\n A wrapper for running the network and computing the loss\n This is so we can more efficiently use DataParallel.\n \"\"\"\n \n def __init__(self, net:Yolact, criterion:MultiBoxLoss):\n super().__init__()\n\n self.net = net\n self.criterion = criterion\n \n def forward(self, images, targets, masks, num_crowds):\n preds = self.net(images)\n losses = self.criterion(self.net, preds, targets, masks, num_crowds)\n return losses, preds\n\n\ndef local_evalimage(net:Yolact, path:str, save_path:str=None):\n frame = torch.from_numpy(cv2.imread(path)).cuda().float()\n print('frame size is', frame.size())\n batch = FastBaseTransform()(frame.unsqueeze(0))\n print('Batch size is',batch.size())\n preds = net(batch)\n\n img_numpy = local_prep_display(preds, frame, None, None, undo_transform=False)\n \n if save_path is None:\n img_numpy = img_numpy[:, :, (2, 1, 0)]\n\n if save_path is None:\n plt.imshow(img_numpy)\n plt.title(path)\n plt.show()\n else:\n cv2.imwrite(save_path, img_numpy)\n\n\ndef local_prep_display(dets_out, img, h, w, undo_transform=True, class_color=False, mask_alpha=0.45, fps_str=''):\n \"\"\"\n Note: If undo_transform=False then im_h and im_w are allowed to be None.\n \n I don't have args available, so I need to create it and fill it with the defaults. \n \n \n \"\"\"\n# print('local_prep_display, type(dets_out) is',type(dets_out))\n \n top_k = 5\n score_threshold = 0.0\n display_masks = True\n display_text = True\n display_bboxes = True\n display_scores = True\n display_fps = False\n\n if undo_transform:\n img_numpy = undo_image_transformation(img, w, h)\n img_gpu = torch.Tensor(img_numpy).cuda()\n else:\n img_gpu = img / 255.0\n h, w, _ = img.shape\n \n with timer.env('Postprocess'):\n save =D.cfg.rescore_bbox\n D.cfg.rescore_bbox = True\n# t = postprocess(dets_out, w, h, visualize_lincomb = args.display_lincomb,\n# crop_masks = args.crop,\n# score_threshold = args.score_threshold)\n t = postprocess(dets_out, w, h, visualize_lincomb = False,\n crop_masks = True,\n score_threshold = score_threshold)\n\n D.cfg.rescore_bbox = save\n\n with timer.env('Copy'):\n# idx = t[1].argsort(0, descending=True)[:args.top_k]\n idx = t[1].argsort(0, descending=True)[:top_k]\n \n if D.cfg.eval_mask_branch:\n # Masks are drawn on the GPU, so don't copy\n masks = t[3][idx]\n classes, scores, boxes = [x[idx].cpu().detach().numpy() for x in t[:3]]\n\n num_dets_to_consider = min(top_k, classes.shape[0])\n for j in range(num_dets_to_consider):\n if scores[j] < score_threshold:\n num_dets_to_consider = j\n break\n\n # Quick and dirty lambda for selecting the color for a particular index\n # Also keeps track of a per-gpu color cache for maximum speed\n def get_color(j, on_gpu=None):\n global color_cache\n color_idx = (classes[j] * 5 if class_color else j * 5) % len(D.COLORS)\n \n if on_gpu is not None and color_idx in color_cache[on_gpu]:\n return color_cache[on_gpu][color_idx]\n else:\n color = D.COLORS[color_idx]\n if not undo_transform:\n # The image might come in as RGB or BRG, depending\n color = (color[2], color[1], color[0])\n if on_gpu is not None:\n color = torch.Tensor(color).to(on_gpu).float() / 255.\n color_cache[on_gpu][color_idx] = color\n return color\n\n # First, draw the masks on the GPU where we can do it really fast\n # Beware: very fast but possibly unintelligible mask-drawing code ahead\n # I wish I had access to OpenGL or Vulkan but alas, I guess Pytorch tensor operations will have to suffice\n if display_masks and D.cfg.eval_mask_branch and num_dets_to_consider > 0:\n # After this, mask is of size [num_dets, h, w, 1]\n masks = masks[:num_dets_to_consider, :, :, None]\n \n # Prepare the RGB images for each mask given their color (size [num_dets, h, w, 1])\n colors = torch.cat([get_color(j, on_gpu=img_gpu.device.index).view(1, 1, 1, 3) for j in range(num_dets_to_consider)], dim=0)\n masks_color = masks.repeat(1, 1, 1, 3) * colors * mask_alpha\n\n # This is 1 everywhere except for 1-mask_alpha where the mask is\n inv_alph_masks = masks * (-mask_alpha) + 1\n \n # I did the math for this on pen and paper. This whole block should be equivalent to:\n # for j in range(num_dets_to_consider):\n # img_gpu = img_gpu * inv_alph_masks[j] + masks_color[j]\n masks_color_summand = masks_color[0]\n if num_dets_to_consider > 1:\n inv_alph_cumul = inv_alph_masks[:(num_dets_to_consider-1)].cumprod(dim=0)\n masks_color_cumul = masks_color[1:] * inv_alph_cumul\n masks_color_summand += masks_color_cumul.sum(dim=0)\n\n img_gpu = img_gpu * inv_alph_masks.prod(dim=0) + masks_color_summand\n \n if display_fps:\n # Draw the box for the fps on the GPU\n font_face = cv2.FONT_HERSHEY_DUPLEX\n font_scale = 0.6\n font_thickness = 1\n\n text_w, text_h = cv2.getTextSize(fps_str, font_face, font_scale, font_thickness)[0]\n\n img_gpu[0:text_h+8, 0:text_w+8] *= 0.6 # 1 - Box alpha\n\n\n # Then draw the stuff that needs to be done on the cpu\n # Note, make sure this is a uint8 tensor or opencv will not anti alias text for whatever reason\n img_numpy = (img_gpu * 255).byte().cpu().numpy()\n\n if display_fps:\n # Draw the text on the CPU\n text_pt = (4, text_h + 2)\n text_color = [255, 255, 255]\n\n cv2.putText(img_numpy, fps_str, text_pt, font_face, font_scale, text_color, font_thickness, cv2.LINE_AA)\n \n if num_dets_to_consider == 0:\n return img_numpy\n\n if display_text or display_bboxes:\n for j in reversed(range(num_dets_to_consider)):\n x1, y1, x2, y2 = boxes[j, :]\n color = get_color(j)\n score = scores[j]\n\n if display_bboxes:\n cv2.rectangle(img_numpy, (x1, y1), (x2, y2), color, 1)\n\n if display_text:\n _class = D.cfg.dataset.class_names[classes[j]]\n text_str = '%s: %.2f' % (_class, score) if display_scores else _class\n\n font_face = cv2.FONT_HERSHEY_DUPLEX\n font_scale = 0.6\n font_thickness = 1\n\n text_w, text_h = cv2.getTextSize(text_str, font_face, font_scale, font_thickness)[0]\n\n text_pt = (x1, y1 - 3)\n text_color = [255, 255, 255]\n\n cv2.rectangle(img_numpy, (x1, y1), (x1 + text_w, y1 - text_h - 4), color, -1)\n cv2.putText(img_numpy, text_str, text_pt, font_face, font_scale, text_color, font_thickness, cv2.LINE_AA)\n \n \n return img_numpy\n\ndef gradinator(x):\n x.requires_grad = False\n return x\n\ndef local_prepare_data(datum, devices:list=None, allocation:list=None):\n batch_size = 4\n with torch.no_grad():\n if devices is None:\n devices = ['cuda:0'] #if args.cuda else ['cpu']\n if allocation is None:\n# allocation = [args.batch_size // len(devices)] * (len(devices) - 1)\n allocation = [batch_size // len(devices)] * (len(devices) - 1)\n# allocation.append(args.batch_size - sum(allocation)) # The rest might need more/less\n allocation.append(batch_size - sum(allocation)) # The rest might need more/less\n \n images, (targets, masks, num_crowds) = datum\n\n cur_idx = 0\n# print(len(images))\n for device, alloc in zip(devices, allocation):\n for _ in range(len(images)):\n# print('cur_idx is ',cur_idx)\n images[cur_idx] = gradinator(images[cur_idx].to(device))\n targets[cur_idx] = gradinator(targets[cur_idx].to(device))\n masks[cur_idx] = gradinator(masks[cur_idx].to(device))\n cur_idx += 1\n\n# if D.cfg.preserve_aspect_ratio:\n# # Choose a random size from the batch\n# _, h, w = images[random.randint(0, len(images)-1)].size()\n#\n# for idx, (image, target, mask, num_crowd) in enumerate(zip(images, targets, masks, num_crowds)):\n# images[idx], targets[idx], masks[idx], num_crowds[idx] \\\n# = enforce_size(image, target, mask, num_crowd, w, h)\n \n cur_idx = 0\n split_images, split_targets, split_masks, split_numcrowds \\\n = [[None for alloc in allocation] for _ in range(4)]\n\n for device_idx, alloc in enumerate(allocation):\n split_images[device_idx] = torch.stack(images[cur_idx:cur_idx+alloc], dim=0)\n split_targets[device_idx] = targets[cur_idx:cur_idx+alloc]\n split_masks[device_idx] = masks[cur_idx:cur_idx+alloc]\n split_numcrowds[device_idx] = num_crowds[cur_idx:cur_idx+alloc]\n\n cur_idx += alloc\n\n return split_images, split_targets, split_masks, split_numcrowds\n\n\n\ndef npscl(xin):\n x = copy.copy(xin)\n for i in range(x.shape[2]):\n xx = x[:,:,i]\n xmax = np.max(xx); xmin = np.min(xx)\n if xmax > xmin:\n x[:,:,i] = (xx-xmin)/(xmax-xmin)\n else:\n print('All the same!!',i)\n\n return x\n\ndef myshow(img):\n if img.size()[0] != 3:\n maskshow(img)\n \n ishow = img.cpu().numpy().transpose((1,2,0))\n ishow = (ishow-np.min(ishow))/(np.max(ishow) - np.min(ishow)).astype(np.int)\n plt.imshow(ishow)\n\ndef maskshow(img,pick=None):\n isize = img.size()\n if not pick:\n nmask = isize[0]\n pick = list(range(nmask))\n else:\n if type(pick) is not list:\n pick = list(pick)\n\n mask = np.zeros(isize[1:])\n img = img.cpu().detach().numpy()\n for i in pick:\n mask += img[i,:,:]\n \n plt.imshow(mask)\n rtn_size = list(mask.shape)\n rtn_size.append(1)\n return mask.reshape(rtn_size);\n\n#import copy\n\n#\n# If I don't wrap this in if _name__ == '__main__', it will usually\n# fail with a broken pipe error. Something to do with multiprocessing \n# on Windows. The phrase \"Forking pickler\" also appears in the \n# traceback, and if that doesn't make you giggle, go take a break. \n#\nif __name__ == '__main__':\n mode = 'eval'\n if mode == 'train':\n print('Testing net_cdp and custom loss...')\n \n if mode == 'eval':\n print('Testing net and preds...')\n \n# trained_model = 'weights/yolact_resnet50_54_800000.pth'\n# model_path = SavePath.from_str(trained_model)\n# force_config = model_path.model_name + '_config'\n# print(force_config)\n# D.set_cfg(force_config)\n#\n# \n# \n# D.set_cfg('yolact_resnet50_config') # what a fooking mess\n \n backend_I_want = 'Qt5Agg'\n #\n # Define a dataset and a DataLoader, and get one datum. \n #\n print('Before data set def, backend is',matplotlib.get_backend())\n dataset = D.COCODetection(image_path=D.cfg.dataset.train_images,\n info_file='./data/coco/annotations/milliCOCO.json',\n transform=SSDAugmentation(D.MEANS))\n \n img_ids = list(dataset.coco.imgToAnns.keys())\n \n# info_file=D.cfg.dataset.train_info,\n\n print('After data set def, backend is',matplotlib.get_backend())\n\n#\n# Defining datset somehow sometiems changes matplotlib backend to Agg, I \n# swear to God, so I reset it here if ncecessary. \n#\n if matplotlib.get_backend() != backend_I_want:\n print('WTF? Resetting matplotlib backend...')\n matplotlib.use(backend_I_want) \n \n batch_size = 4\n num_workers = 0\n \n data_loader = torch.utils.data.DataLoader(dataset, batch_size,\n num_workers=num_workers,\n shuffle=True, \n collate_fn=D.detection_collate,\n pin_memory=True)\n \n \n data_loader_iterator = iter(data_loader)\n \n# datum = next(data_loader_iterator) \n \n # datum itself is a list of 2 lists. The first has length batch_size and \n # contains images, the second has length 3 and contains targets, masks, \n # and num_crowds. \n #\n # images are 3x550x550 tensors, in a list of length batch_size.\n #\n # targets are nx5 tensors, what is n? in a list of length batch_size.\n #\n # masks are tensors, size kx550x550, list of length batch_size, where k is\n # the number of objects in the corresponding image. \n #\n # crowds are numpy.int32, in a list of length batch_size.\n # \n \n# images, (targets, masks, num_crowds) = datum\n # img, masks, boxes, labels = self.transform(img, masks, target[:, :4],\n# {'num_crowds': num_crowds, 'labels': target[:, 4]})\n\n #\n # Define a Yolact net, with a fancy combined prediction and loss\n # function, and use it to process datum. \n #\n net = Yolact()\n# net.init_weights(backbone_path='weights/' + D.cfg.backbone.path)\n print('loading weights/yolact_resnet50_54_800000.pth...')\n net.load_weights('weights\\\\yolact_resnet50_54_800000.pth')\n \n \n# net.eval()\n \n #D.yolact_resnet50_config how do I load this? \n \n net.detect.use_fast_nms = True\n net.detect.use_cross_class_nms = False\n criterion = MultiBoxLoss(num_classes=D.cfg.num_classes,\n pos_threshold=D.cfg.positive_iou_threshold,\n neg_threshold=D.cfg.negative_iou_threshold,\n negpos_ratio=D.cfg.ohem_negpos_ratio)\n\n if mode == 'eval':\n for i_img, img_id in enumerate(img_ids):\n net.eval()\n file_name = dataset.coco.loadImgs(img_id)[0]['file_name']\n if file_name.startswith('COCO'):\n file_name = file_name.split('_')[-1]\n \n img, gt, gt_masks, h, w, num_crowd = dataset.pull_item(i_img)\n batch = img.unsqueeze(0).cuda()\n print(type(batch), batch.size())\n preds = net(batch)\n img_numpy = local_prep_display(preds, img, h, w)\n plt.imshow(img_numpy)\n plt.pause(0.5)\n \n\n#-----------------------\n try:\n datum = next(data_loader_iterator)\n except StopIteration:\n break\n \n images, targets, masks, num_crowds = local_prepare_data(datum)\n net.train()\n predsT = net(images[0])\n losses = criterion(net, predsT, targets[0], masks[0], num_crowds[0])\n loss = sum([losses[k] for k in losses])\n print(loss)\n # no_inf_mean removes some components from the loss, so make sure to backward through all of it\n # all_loss = sum([v.mean() for v in losses.values()])\n\n # Backprop\n loss.backward() # Do this to free up vram even if loss is not finite\n#-----------------------\n\n\n\n#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ \n# if mode == 'train': \n# criterion = MultiBoxLoss(num_classes=D.cfg.num_classes,\n# pos_threshold=D.cfg.positive_iou_threshold,\n# neg_threshold=D.cfg.negative_iou_threshold,\n# negpos_ratio=D.cfg.ohem_negpos_ratio)\n# #\n# # \n# net.train()\n# net_cdp = CustomDataParallel(NetWithLoss(net, criterion))\n# net_cdp.cuda()\n# #\n## Follwing is lifted from train.py... \n## preds = self.net(images)\n## losses = self.criterion(self.net, preds, targets, masks, num_crowds)\n## How does it get images? I think self.net is just Yolact().train(). But it takes \n## images, rather than datum. \n##\n# # images, targets, masks, num_crowds = prepare_data...??\n# for idx, datum in enumerate(data_loader):\n## images, targets, masks, num_crowds = local_prepare_data(datum)\n#\n## preds = net(images)\n## losses = criterion(net, preds, targets, masks, num_crowds)\n# losses = net_cdp(datum)\n## for k,v in preds.items():\n## preds[k] = v.detach()\n# \n## losses = net_cdp(datum)\n## losses = { k: (v).mean() for k,v in losses.items() } # Mean here because Dataparallel\n# loss = sum([losses[k] for k in losses])\n# print('Loss',idx,'is',loss.item())\n# if idx > 0:\n# break\n#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ \n#\n# Below here I am trying to call the augmentation by hand, to quickly test\n# my work using a single datum. \n\n# tform=SSDAugmentation(D.MEANS)\n# \n# img = np.array(images[0].cpu()).transpose((1,2,0))\n# img0 = copy.copy(img)\n# mask = np.array(masks[0].cpu())\n# target = targets[0]\n# target = np.array(target.cpu())\n# nc = num_crowds[0]\n# img, mask, boxes, labels=\\\n# tform(img, mask, target[:, :4],\n# {'num_crowds': nc, 'labels': target[:, 4]})\n#\n# plt.imshow(npscl(img0))\n#\n# plt.imshow(npscl(img))\n#\n#\n\n\n\n","sub_path":"extract4trainer.py","file_name":"extract4trainer.py","file_ext":"py","file_size_in_byte":18359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"34605889","text":"from interfaces.application.hafailoverprovider import HAFailoverProvider\n\nimport urllib2\nimport boto3\nfrom botocore.client import Config\nfrom botocore.exceptions import EndpointConnectionError, ConnectTimeoutError\nfrom interfaces.gateway.retriesexceededexception import RetriesExceededException\nimport time\nimport logging\n\n\nclass AWS1FailoverGW(HAFailoverProvider):\n\n METADATA_URL_AZ = \"http://169.254.169.254/latest/meta-data/placement/availability-zone\"\n\n _ec2 = None\n _config = None\n\n _listeners = None\n\n _logger = None\n\n def __init__(self):\n self._config = Config(connect_timeout=120, read_timeout=120, retries={ \"max_attempts\": 4 })\n self._listeners = dict()\n self._logger = logging.getLogger(__name__)\n\n def initialize(self):\n regionName = self._regionName()\n self._ec2 = boto3.client('ec2', region_name=regionName, config=self._config)\n\n def _regionName(self):\n connection = None\n try:\n connection = urllib2.urlopen(self.METADATA_URL_AZ)\n availabilityZone = connection.read()\n except Exception as exception:\n self._logger.error(\"Exception thrown while starting...\", exc_info=True)\n raise exception\n connection.close()\n self._logger.info(\"Initialization completed successfully\")\n return availabilityZone[:-1]\n\n def failover(self, routeTableTagKey, routeTableTagValue, networkInterfaceTagKey, networkInterfaceTagValue):\n routeTable = self._fetchRouteTable(routeTableTagKey, routeTableTagValue)\n if not routeTable:\n self._logger.error(\"Route table with tag {0}:{1} not found. Not failing over\".format(routeTableTagKey, routeTableTagValue))\n return\n\n routes = self._buildRoutes(routeTableTagKey, routeTableTagValue, routeTable)\n if not routes:\n self._logger.error(\"No route in table with tag {0}:{1} was found. Not failing over\".format(routeTableTagKey, routeTableTagValue))\n return\n\n networkInterface = self._fetchNetworkInterface(networkInterfaceTagKey, networkInterfaceTagValue)\n if not networkInterface:\n self._logger.error(\"No network interface with tag {0}:{1} was found. Not failing over\".format(networkInterfaceTagKey, networkInterfaceTagValue))\n return\n\n self._replaceRoutes(routes, networkInterface[\"NetworkInterfaceId\"], routeTableTagKey, routeTableTagValue, networkInterfaceTagKey, networkInterfaceTagValue)\n\n def _fetchRouteTable(self, routeTableTagKey, routeTableTagValue):\n self._logger.debug(\"Fetching route table {0}:{1}\".format(routeTableTagKey, routeTableTagValue))\n try:\n response = self._ec2.describe_route_tables(\n Filters=[\n {\n 'Name': 'tag:{0}'.format(routeTableTagKey),\n 'Values': [ routeTableTagValue,]\n },\n ],\n )\n except (EndpointConnectionError, ConnectTimeoutError) as exception:\n raise RetriesExceededException(\"Retries exceeded fetching routes due to {0}\".format(exception))\n\n if not response[\"RouteTables\"]:\n return None\n routeTable = response[\"RouteTables\"][0]\n self._logger.debug(\"Route table tag {0}:{1} RouteTables {2} was fetched\".format(routeTableTagKey, routeTableTagValue, routeTable))\n return routeTable\n\n def _buildRoutes(self, routeTableTagKey, routeTableTagValue, routeTable):\n routes = list()\n for route in routeTable[\"Routes\"]:\n if route[\"Origin\"] != \"CreateRouteTable\": \n route.update({\"RouteTableId\": routeTable[\"RouteTableId\"]})\n routes.append(route)\n self._logger.debug(\"Route {0} of route table with tag {1}:{2} was built\".format(route, routeTableTagKey, routeTableTagValue))\n return routes\n\n def _fetchNetworkInterface(self, networkInterfaceTagKey, networkInterfaceTagValue):\n self._logger.debug(\"Fetching network interface {0}:{1}\".format(networkInterfaceTagKey, networkInterfaceTagValue))\n try:\n response = self._ec2.describe_network_interfaces(\n Filters=[\n {\n 'Name': 'tag:{0}'.format(networkInterfaceTagKey),\n 'Values': [ networkInterfaceTagValue,]\n },\n ],\n )\n except (EndpointConnectionError, ConnectTimeoutError) as exception:\n raise RetriesExceededException(\"Retries exceeded fetching network interface ID due to {0}\".format(exception))\n \n if not response[\"NetworkInterfaces\"]:\n return None\n networkInterface = response[\"NetworkInterfaces\"][0]\n self._logger.debug(\"Network interface {0} with tag {1}:{2} was fetched\".format(networkInterface, networkInterfaceTagKey, networkInterfaceTagValue))\n return networkInterface\n\n def _replaceRoutes(self, routes, networkInterfaceID, routeTableTagKey, routeTableTagValue, networkInterfaceTagKey, networkInterfaceTagValue):\n for route in routes:\n self._logger.info(\"Replacing target of CidrBlock {0} to {1} in route table {2}\".format(\n route[\"DestinationCidrBlock\"], networkInterfaceID, route[\"RouteTableId\"]))\n self._replaceRoute(route, networkInterfaceID, routeTableTagKey, routeTableTagValue, networkInterfaceTagKey, networkInterfaceTagValue)\n\n def _replaceRoute(self, route, networkInterfaceID, routeTableTagKey, routeTableTagValue, networkInterfaceTagKey, networkInterfaceTagValue):\n try:\n self._ec2.replace_route(\n RouteTableId=route[\"RouteTableId\"],\n NetworkInterfaceId=networkInterfaceID,\n DestinationCidrBlock=route[\"DestinationCidrBlock\"])\n self._listeners[\"onFailoverListener\"](route[\"DestinationCidrBlock\"], routeTableTagKey, routeTableTagValue, networkInterfaceTagKey, networkInterfaceTagValue)\n except (EndpointConnectionError, ConnectTimeoutError) as exception:\n raise RetriesExceededException(\"Retries exceeded replacing route due to {0}\".format(exception))\n self._logger.info(\"Route {0} target was replaced with network interface ID {1}\".format(route, networkInterfaceID))\n\n def addOnFailoverListener(self, listener):\n self._listeners[\"onFailoverListener\"] = listener","sub_path":"src/gateway/hafailoverprovider/aws1failovergw.py","file_name":"aws1failovergw.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"164377609","text":"#Created on 16/12/2020 by Lavinia Buleac\nfrom socket import *\nfrom sys import *\n\ntarget = argv[1]\n\n#i = 80 ----> for a specific port\nfor i in range(1,8000):\n\tsock = socket(AF_INET,SOCK_STREAM)\n\n\ttry:\n\t\tsock.connect((target,i))\n\t\tprint(\"Port %d is open\" %i)\n\t\tsock.close\n\n\texcept:\n\t\tprint(\"Port %d is closed\" %i)\n\t\tsock.close\n","sub_path":"ports.py","file_name":"ports.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"352248939","text":"import time\nimport gzip\nimport cPickle as pickle\n\nfrom prettytable import PrettyTable\nimport numpy as np\nimport theano\nimport theano.tensor as T\n\nfrom nn.initialization import get_activation_by_name\nfrom nn.optimization import create_optimization_updates\nfrom nn.basic import LSTM, GRU, apply_dropout\nfrom nn.advanced import RCNN, GRNN\nfrom utils import io_utils\nfrom utils.io_utils import say\nfrom utils.eval import Evaluation\n\nPAD = \"\"\n\n\nclass Model(object):\n\n def __init__(self, args, emb_layer):\n self.args = args\n\n ##########\n # Layers #\n ##########\n self.emb_layer = emb_layer\n self.layers = []\n self.params = []\n\n ###################\n # Network options #\n ###################\n self.activation = args.activation\n self.n_d = args.hidden_dim\n self.n_e = emb_layer.n_d\n self.padding_id = emb_layer.vocab_map[PAD]\n self.dropout = theano.shared(np.float32(args.dropout).astype(theano.config.floatX))\n\n #########################\n # Input variable format #\n #########################\n self.idts = None\n self.idbs = None\n self.idps = None\n\n ###########################################################################################\n # Example idps: C0=Query_id, C1=Positive q_id, C2-20=Negative q_ids #\n # [[ 0 1 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 20 20] #\n # [ 0 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 20 20] #\n # [ 0 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 20 20] #\n # ... #\n # [ 42 43 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 62] #\n # [ 42 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 62] #\n # ... #\n # [105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125]] #\n ###########################################################################################\n\n #########################\n # Hidden representation #\n #########################\n self.xt = None\n self.xb = None\n self.ht = None\n self.hb = None\n self.h_final = None\n\n ######################\n # Training objective #\n ######################\n self.loss = None\n self.cost = None\n\n ##################\n # Testing scores #\n ##################\n self.scores = None\n\n def compile(self):\n self.set_input_format()\n self.set_layers(args=self.args, n_d=self.n_d, n_e=self.n_e)\n\n self.set_input_layer(idts=self.idts, idbs=self.idbs, embedding_layer=self.emb_layer,\n n_e=self.n_e, dropout=self.dropout)\n self.set_intermediate_layer(args=self.args, prev_ht=self.xt, prev_hb=self.xb, layers=self.layers)\n self.set_output_layer(args=self.args, ht=self.ht, hb=self.hb, dropout=self.dropout)\n\n self.set_params(layers=self.layers)\n self.set_loss(n_d=self.n_d, idps=self.idps, h_final=self.h_final)\n self.set_cost(args=self.args, params=self.params, loss=self.loss)\n\n self.set_scores(h_final=self.h_final)\n\n def set_input_format(self):\n # len(title) * batch\n self.idts = T.imatrix()\n\n # len(body) * batch\n self.idbs = T.imatrix()\n\n # num queries * candidate size\n self.idps = T.imatrix()\n\n def set_layers(self, args, n_d, n_e):\n activation = get_activation_by_name(args.activation)\n\n ##################\n # Set layer type #\n ##################\n if args.layer.lower() == \"rcnn\":\n layer_type = RCNN\n elif args.layer.lower() == \"lstm\":\n layer_type = LSTM\n elif args.layer.lower() == \"gru\":\n layer_type = GRU\n elif args.layer.lower() == \"grnn\":\n layer_type = GRNN\n\n ##############\n # Set layers #\n ##############\n for i in range(args.depth):\n if layer_type != RCNN:\n feature_layer = layer_type(\n n_in=n_e if i == 0 else n_d,\n n_out=n_d,\n activation=activation\n )\n else:\n feature_layer = layer_type(\n n_in=n_e if i == 0 else n_d,\n n_out=n_d,\n activation=activation,\n order=args.order,\n mode=args.mode,\n has_outgate=args.outgate\n )\n self.layers.append(feature_layer)\n\n def set_input_layer(self, idts, idbs, embedding_layer, n_e, dropout):\n # (len*batch)*n_e\n xt = embedding_layer.forward(idts.ravel())\n xb = embedding_layer.forward(idbs.ravel())\n\n # len*batch*n_e\n xt = xt.reshape((idts.shape[0], idts.shape[1], n_e))\n xb = xb.reshape((idbs.shape[0], idbs.shape[1], n_e))\n self.xt = apply_dropout(xt, dropout)\n self.xb = apply_dropout(xb, dropout)\n\n def set_intermediate_layer(self, args, prev_ht, prev_hb, layers):\n for i in range(args.depth):\n # len*batch*n_d\n ht = layers[i].forward_all(prev_ht)\n hb = layers[i].forward_all(prev_hb)\n prev_ht = ht\n prev_hb = hb\n\n # normalize vectors\n if args.normalize:\n ht = self.normalize_3d(ht)\n hb = self.normalize_3d(hb)\n\n # average over length, ignore paddings\n # batch * d\n if self.args.average:\n ht = self.average_without_padding(ht, self.idts)\n hb = self.average_without_padding(hb, self.idbs)\n else:\n ht = ht[-1]\n hb = hb[-1]\n\n self.ht = ht\n self.hb = hb\n\n def set_output_layer(self, args, ht, hb, dropout):\n # 1D: n_queries * n_cands, 2D: dim_h\n if args.body:\n h_final = (ht + hb) * 0.5\n else:\n h_final = ht\n\n h_final = apply_dropout(h_final, dropout)\n self.h_final = self.normalize_2d(h_final)\n\n def set_params(self, layers):\n for l in layers:\n self.params += l.params\n say(\"num of parameters: {}\\n\".format(sum(len(x.get_value(borrow=True).ravel()) for x in self.params)))\n\n def set_loss(self, n_d, idps, h_final):\n # 1D: n_queries, 2D: n_cands-1, 3D: dim_h\n xp = h_final[idps.ravel()]\n xp = xp.reshape((idps.shape[0], idps.shape[1], n_d))\n\n if self.args.loss == 'ce':\n self.cross_entropy(xp)\n elif self.args.loss == 'sbs':\n self.soft_bootstrapping(xp, self.args.beta)\n elif self.args.loss == 'hbs':\n self.hard_bootstrapping(xp, self.args.beta)\n else:\n self.max_margin(xp)\n\n def cross_entropy(self, xp):\n # num query * n_d\n query_vecs = xp[:, 0, :] # 3D -> 2D\n # 1D: n_queries, 2D: n_cands\n scores = T.sum(query_vecs.dimshuffle((0, 'x', 1)) * xp[:, 1:, :], axis=2)\n probs = T.nnet.softmax(scores)\n\n self.train_scores = T.argmax(scores, axis=1)\n self.loss = - T.mean(T.log(probs[:, 0]))\n\n def soft_bootstrapping(self, xp, beta=0.9):\n # num query * n_d\n query_vecs = xp[:, 0, :] # 3D -> 2D\n # 1D: n_queries, 2D: n_cands\n scores = T.sum(query_vecs.dimshuffle((0, 'x', 1)) * xp[:, 1:, :], axis=2)\n probs = T.nnet.softmax(scores)\n zeros = T.zeros(shape=(probs.shape[0], probs.shape[1]-1), dtype=theano.config.floatX)\n ones = T.ones(shape=(probs.shape[0], 1), dtype=theano.config.floatX)\n target = T.concatenate([ones, zeros], axis=1)\n\n self.train_scores = T.argmax(scores, axis=1)\n self.loss = - T.mean((beta * target + (1. - beta) * probs) * T.log(probs))\n\n def hard_bootstrapping(self, xp, beta=0.9):\n # num query * n_d\n query_vecs = xp[:, 0, :] # 3D -> 2D\n # 1D: n_queries, 2D: n_cands\n scores = T.sum(query_vecs.dimshuffle((0, 'x', 1)) * xp[:, 1:, :], axis=2)\n probs = T.nnet.softmax(scores)\n z = T.argmax(probs, axis=1)\n\n pos_probs = probs[:, 0]\n max_probs = probs[T.arange(z.shape[0]), z]\n pos_loss = (beta + (1. - beta) * pos_probs) * T.log(pos_probs)\n max_loss = (beta + (1. - beta) * max_probs) * T.log(max_probs)\n\n self.train_scores = T.argmax(scores, axis=1)\n self.loss = - T.mean(pos_loss + max_loss)\n\n def max_margin(self, xp):\n # 1D: n_queries, 2D: n_d\n query_vecs = xp[:, 0, :] # 3D -> 2D\n\n # 1D: n_queries, 2D: n_cands\n scores = T.sum(query_vecs.dimshuffle((0, 'x', 1)) * xp[:, 1:, :], axis=2)\n pos_scores = scores[:, 0]\n neg_scores = scores[:, 1:]\n neg_scores = T.max(neg_scores, axis=1)\n\n diff = neg_scores - pos_scores + 1.0\n self.loss = T.mean((diff > 0) * diff)\n self.train_scores = T.argmax(scores, axis=1)\n\n def set_cost(self, args, params, loss):\n l2_reg = None\n for p in params:\n if l2_reg is None:\n l2_reg = p.norm(2)\n else:\n l2_reg += p.norm(2)\n self.cost = loss + l2_reg * args.l2_reg\n\n def set_scores(self, h_final):\n # first one in batch is query, the rest are candidate questions\n self.scores = T.dot(h_final[1:], h_final[0])\n\n def train(self, ids_corpus, dev=None, test=None):\n say('\\nBuilding functions...\\n\\n')\n\n args = self.args\n\n batch_size = args.batch_size\n padding_id = self.padding_id\n\n updates, lr, gnorm = create_optimization_updates(\n cost=self.cost,\n params=self.params,\n lr=args.learning_rate,\n method=args.learning\n )[:3]\n\n train_func = theano.function(\n inputs=[self.idts, self.idbs, self.idps],\n outputs=[self.cost, self.loss, gnorm, self.train_scores],\n updates=updates,\n on_unused_input='ignore'\n )\n\n if self.args.attention:\n eval_func = theano.function(\n inputs=[self.idts, self.idbs, self.idps],\n outputs=self.scores,\n on_unused_input='ignore'\n )\n else:\n eval_func = theano.function(\n inputs=[self.idts, self.idbs],\n outputs=self.scores,\n on_unused_input='ignore'\n )\n\n say(\"\\tp_norm: {}\\n\".format(self.get_pnorm_stat()))\n\n result_table = PrettyTable([\"Epoch\", \"dev MAP\", \"dev MRR\", \"dev P@1\", \"dev P@5\"] +\n [\"tst MAP\", \"tst MRR\", \"tst P@1\", \"tst P@5\"])\n\n unchanged = 0\n best_dev = -1\n\n dev_MAP = dev_MRR = dev_P1 = dev_P5 = 0\n test_MAP = test_MRR = test_P1 = test_P5 = 0\n max_epoch = args.max_epoch\n\n for epoch in xrange(max_epoch):\n unchanged += 1\n if unchanged > 15:\n break\n\n start_time = time.time()\n\n train = io_utils.read_annotations(args.train, data_size=args.data_size)\n train_batches = io_utils.create_batches(ids_corpus, train, batch_size, padding_id, pad_left=not args.average)\n n_train_batches = len(train_batches)\n\n train_loss = 0.0\n train_cost = 0.0\n crr = 0.\n ttl = 0.\n\n for i in xrange(n_train_batches):\n # get current batch\n idts, idbs, idps = train_batches[i]\n\n cur_cost, cur_loss, grad_norm, preds = train_func(idts, idbs, idps)\n\n train_loss += cur_loss\n train_cost += cur_cost\n\n crr += len([s for s in preds if s == 0])\n ttl += len(preds)\n\n if i % 10 == 0:\n say(\"\\r{}/{}\".format(i, n_train_batches))\n\n if i == n_train_batches - 1:\n # Set the dropout prob for validating\n self.dropout.set_value(0.0)\n\n if dev is not None:\n dev_MAP, dev_MRR, dev_P1, dev_P5 = self.evaluate(dev, eval_func)\n if test is not None:\n test_MAP, test_MRR, test_P1, test_P5 = self.evaluate(test, eval_func)\n\n if dev_MRR > best_dev:\n unchanged = 0\n best_dev = dev_MRR\n result_table.add_row(\n [epoch] +\n [\"%.2f\" % x for x in [dev_MAP, dev_MRR, dev_P1, dev_P5] +\n [test_MAP, test_MRR, test_P1, test_P5]]\n )\n if args.save_model:\n self.save_model(args.save_model)\n\n # Set the dropout prob for training\n dropout_p = np.float32(args.dropout).astype(theano.config.floatX)\n self.dropout.set_value(dropout_p)\n\n say(\"\\r\\n\\n\")\n say((\"Epoch {}\\tcost={:.3f}\\tloss={:.3f}\" + \"\\tMRR={:.2f},{:.2f}\\t|g|={:.3f}\\t[{:.3f}m]\\n\").format(\n epoch,\n train_cost / (i + 1),\n train_loss / (i + 1),\n dev_MRR,\n best_dev,\n float(grad_norm),\n (time.time() - start_time) / 60.0\n ))\n say(\"\\tTrain Accuracy: %f (%d/%d)\\n\" % (crr / ttl, crr, ttl))\n say(\"\\tp_norm: {}\\n\".format(self.get_pnorm_stat()))\n say(\"\\n\")\n say(\"{}\".format(result_table))\n say(\"\\n\")\n\n def get_pnorm_stat(self):\n lst_norms = []\n for p in self.params:\n vals = p.get_value(borrow=True)\n l2 = np.linalg.norm(vals)\n lst_norms.append(\"{:.3f}\".format(l2))\n return lst_norms\n\n def normalize_2d(self, x, eps=1e-8):\n # x is batch*d\n # l2 is batch*1\n l2 = x.norm(2, axis=1).dimshuffle((0, 'x'))\n return x / (l2 + eps)\n\n def normalize_3d(self, x, eps=1e-8):\n # x is len*batch*d\n # l2 is len*batch*1\n l2 = x.norm(2, axis=2).dimshuffle((0, 1, 'x'))\n return x / (l2 + eps)\n\n def average_without_padding(self, x, ids, eps=1e-8):\n # len*batch*1\n mask = T.neq(ids, self.padding_id).dimshuffle((0, 1, 'x'))\n mask = T.cast(mask, theano.config.floatX)\n # batch*d\n s = T.sum(x * mask, axis=0) / (T.sum(mask, axis=0) + eps)\n return s\n\n def evaluate(self, data, eval_func):\n res = []\n for idts, idbs, labels in data:\n # idts, idbs: 1D: n_words, 2D: n_cands\n if self.args.attention:\n idps = np.asarray([[i for i in xrange(idts.shape[1])]], dtype='int32')\n scores = eval_func(idts, idbs, idps)\n else:\n scores = eval_func(idts, idbs)\n\n assert len(scores) == len(labels)\n ranks = (-scores).argsort()\n ranked_labels = labels[ranks]\n res.append(ranked_labels)\n\n e = Evaluation(res)\n MAP = e.MAP() * 100\n MRR = e.MRR() * 100\n P1 = e.Precision(1) * 100\n P5 = e.Precision(5) * 100\n\n return MAP, MRR, P1, P5\n\n def load_pretrained_parameters(self, args):\n with gzip.open(args.load_pretrain) as fin:\n data = pickle.load(fin)\n assert args.hidden_dim == data[\"d\"]\n # assert args.layer == data[\"layer_type\"]\n for l, p in zip(self.layers, data[\"params\"]):\n l.params = p\n\n def save_model(self, path):\n if not path.endswith(\".pkl.gz\"):\n path += \".gz\" if path.endswith(\".pkl\") else \".pkl.gz\"\n\n args = self.args\n params = [x.params for x in self.layers]\n with gzip.open(path, \"w\") as fout:\n pickle.dump(\n {\n \"args\": args,\n \"d\": args.hidden_dim,\n \"params\": params,\n },\n fout,\n protocol=pickle.HIGHEST_PROTOCOL\n )\n\n","sub_path":"code/model/basic_model.py","file_name":"basic_model.py","file_ext":"py","file_size_in_byte":16430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"610055776","text":"# coding: utf-8\nfrom twi_bot2.kernel.pattern.tokenize import tokenize, ParseError\nfrom twi_bot2.kernel.pattern.tokens import *\n\n\ndef pattern2python(text, func_name='func'):\n result = ['''\\\n# coding: utf-8\n# DO NOT EDIT THIS!\n# __builtins__ = {}\n''']\n args = []\n\n g = tokenize(text)\n # dev\n while True:\n tokid, tokval = next(g)\n if tokid in (TOK_COMMENT, TOK_NEWLINE, TOK_SPACE):\n continue\n if tokid != TOK_ID or tokval != 'dev':\n raise ParseError(\"ожидалось 'dev', найдено '%s'\" % tokval)\n break\n # {\n while True:\n tokid, tokval = next(g)\n if tokid in (TOK_COMMENT, TOK_NEWLINE, TOK_SPACE):\n continue\n if tokid != TOK_BEGIN_BLOCK:\n raise ParseError(\"ожидалось '{', найдено '%s'\" % tokval)\n break\n # …}\n while True:\n tokid, tokval = next(g)\n if tokid in (TOK_COMMENT, TOK_NEWLINE, TOK_SPACE):\n continue\n if tokid == TOK_ID:\n args.append(tokval)\n continue\n if tokid == TOK_END_BLOCK:\n break\n raise ParseError(\"ожидалось 'id' или '}', найдено '%s'\" % tokval)\n result.append('def %s(%s):' % (func_name, ', '.join(args)))\n\n while True:\n tokid, tokval = next(g)\n if tokid in (TOK_COMMENT, TOK_NEWLINE, TOK_SPACE):\n continue\n if tokid != TOK_ID or tokval != 'code':\n raise ParseError(\"ожидалось 'code', найдено '%s'\" % tokval)\n break\n while True:\n tokid, tokval = next(g)\n if tokid in (TOK_COMMENT, TOK_NEWLINE, TOK_SPACE):\n continue\n if tokid != TOK_BEGIN_BLOCK:\n raise ParseError(\"ожидалось '{', найдено '%s'\" % tokval)\n break\n\n tabs = 4\n try:\n while True:\n tokid, tokval = next(g)\n if tokid == TOK_COMMENT:\n continue\n if tokid == TOK_NEWLINE:\n result.extend(('\\n', ' ' * tabs))\n continue\n if tokid == TOK_BEGIN_BLOCK:\n tabs += 4\n if result[-1] == ' ':\n result.pop()\n result.extend((':\\n', ' ' * tabs, 'pass\\n', ' ' * tabs))\n continue\n if tokid == TOK_END_BLOCK:\n tabs -= 4\n result.extend(('\\n', ' ' * tabs))\n continue\n if tokid == TOK_SPACE:\n if result[-1][-1] != ' ':\n result.append(' ')\n continue\n if tokid == TOK_ID:\n val = {\n 'function': 'def',\n 'elseif': 'elif',\n 'act': 'yield',\n }.get(tokval, tokval)\n result.append(val)\n continue\n result.append(tokval)\n except StopIteration:\n r = ''.join(result)\n # удалить пустые строки\n # r = '\\n'.join(line for line in r.split('\\n') if line.strip())\n # r = r.replace('def', '\\n\\ndef')\n r = '%s\\n' % r\n return tuple(args), r\n","sub_path":"twi_bot2/kernel/pattern/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"116545129","text":"from urllib import request\nfrom urllib.request import urlopen\nfrom dotenv import load_dotenv\nfrom twython import Twython \nimport random\nimport sys \nimport os\n\n# Load the API twitter secret key \nload_dotenv()\nAPP_KEY = os.getenv(\"APP_KEY\")\nAPP_SECRET = os.getenv(\"APP_SECRET\")\nOAUTH_TOKEN = os.getenv(\"OAUTH_TOKEN\")\nOAUTH_TOKEN_SECRET = os.getenv(\"OAUTH_TOKEN_SECRET\")\n\ntwitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n# Example how to update status on twitter:\n# twitter.update_status(status='')\n\n# Constants:\n# Comments made by the publisher that need to be removed.\nBEGINNING_PUBLISHER_COMMENT = 1000\nEND_PUBLISHER_COMMENT = 18753\n# Range of book ids, there are 1300 that are publicly available on project Gutenberg\nSMALEST_BOOK_ID = 10\nLARGEST_BOOK_ID = 1300\n# Tweet size:\nSMALEST_TWEET_SIZE = 15\nLARGEST_TWEET_SIZE = 276\n\n\n\n# This function:\n# 1) Generates random int within the range of book ids.\n# 2) Generates a url with the book id.\n# 3) \ndef get_book():\n\t\n\n\turl_is_OK = False\n\twhile url_is_OK == False:\n\n\t\trandom_book_number = random.randint(SMALEST_BOOK_ID, LARGEST_BOOK_ID)\n\t\trandom_book_number = str(random_book_number)\n\t\turl = 'http://www.gutenberg.org/files/'+random_book_number+'/'+random_book_number+'.txt'\n\t\tprint(url)\t\t\n\n\t\t#Check if the url is OK. If for some reason the url is broken it will generate a new link.\n\t\ttry:\n\t\t\tresponse = urlopen(url)\n\t\t\traw = response.read().decode('utf8')\n\t\t\turl_is_OK = True\n\t\t\t\n\t\t\treturn raw\n\n\t\texcept Exception:\n\t\t\tprint('error')\n\t\t\tpass\n\ndef get_quote(book):\n\n\trandom_index=random.randint(BEGINNING_PUBLISHER_COMMENT,(len(book)-END_PUBLISHER_COMMENT))\n\ttemp = random_index\t\n\n\tquote_forward=''\n\tquote_backward=''\n\n\twhile book[random_index]!='.':\n\n\t\tquote_backward=quote_backward+(book[random_index])\n\t\trandom_index = random_index-1\n\n\trandom_index=temp\n\n\twhile book[random_index]!='.':\n\n\t\tquote_forward=quote_forward+(book[random_index])\n\t\trandom_index = random_index+1\n\n\tcomplete_quote=quote_backward[::-1]+quote_forward\n\n\treturn complete_quote\n\n\nbook=get_book()\nbook=repr(book)\nbook=book.replace(\"\\\\r\\\\n\",'*')\n\n\n#get title \ntitle_start = book.find('Title')\ncondition=0\nj=7\ni=book[title_start+j]\ntitle=''\n\nwhile condition == 0:\t\n\t\n \tif(i=='*'):\n \t\tcondition=1\n \telse:\n \t\tj+=1\n \t\t\n \t\ttitle=title+i\n \t\ti=book[title_start+j]\n\n\n#get author, put unknown if cant find author \nauthor_start = book.find('Author')\nif author_start != -1:\n\tcondition=0\n\tj=8 \n\ti=book[author_start+j]\n\tauthor=''\n\n\twhile condition == 0:\n\t\t\n\t \tif(i=='*'):\n\t \t\tcondition=1\n\t \telse:\n\t \t\tj+=1\n\t \t\tauthor=author+i\n\t \t\ti=book[author_start+j]\nelse:\n\tauthor='Unknown'\n\ncondition = 0\nwhile condition == 0:\n\t quote_of_the_day = get_quote(book)\n\n\t if len(quote_of_the_day+title+author)SMALEST_TWEET_SIZE:\n\t \tcondition = 1\n\t \t\t\nprint(title)\nprint(author)\nprint(quote_of_the_day)\n\n\ntwitter.update_status(status=title+', '+author+'\\n'+quote_of_the_day)","sub_path":"quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"19154611","text":"from MultiBot.sessions.general import Session\r\nfrom MultiBot.sessions.argument import ArgSession, Argument\r\nfrom MultiBot.responses import ResponseMsg, ResponseImg\r\nfrom pyzbar import pyzbar\r\nfrom PIL import Image\r\nimport qrcode, datetime, os\r\n\r\n\r\nclass DeCodeSession(ArgSession):\r\n def __init__(self, user_id):\r\n ArgSession.__init__(self, user_id=user_id)\r\n self._max_delta = 3*60\r\n self.session_type = '二维码读取'\r\n self.strict_commands = ['scan', '扫描']\r\n self.description = '读取图片中的二维码,并将其中携带的字符串返回(有Bug)'\r\n self.arg_list = [Argument(key='image', alias_list=['-i'],\r\n required=True, get_all=True,\r\n ask_text='等待图片传输',\r\n help_text='接收图片参数,用于扫描')]\r\n\r\n def is_legal_request(self, request):\r\n return True\r\n\r\n def internal_handle(self, request):\r\n self.deactivate()\r\n img = self.arg_dict['image'].raw_req.img\r\n if isinstance(img, str):\r\n string_list = decode(img)\r\n response_list = []\r\n if len(string_list) == 0:\r\n response_list.append(ResponseMsg('【%s】未发现二维码' % self.session_type))\r\n else:\r\n response_list.append(ResponseMsg('【%s】识别到%i个二维码' % (self.session_type, len(string_list))))\r\n for i, code_string in enumerate(string_list):\r\n response_list.append(ResponseMsg('【%s】第%i个二维码是:%s' % (self.session_type, i+1, code_string)))\r\n return response_list\r\n else:\r\n return ResponseMsg(f'【{self.session_type}】未收到图片')\r\n\r\n\r\nclass EnCodeSession(ArgSession):\r\n def __init__(self, user_id):\r\n ArgSession.__init__(self, user_id=user_id)\r\n self._max_delta = 3*60\r\n self.session_type = '二维码生成'\r\n self.strict_commands = ['make', '生成']\r\n self.description = '从提供的字符串生成一个二维码图片'\r\n self.arg_list = [Argument(key='string', alias_list=['-t', '-s'],\r\n required=True, get_next=True,\r\n ask_text='请输入二维码携带的字符串',\r\n help_text='二维码携带的字符串')]\r\n self.default_arg = self.arg_list[0]\r\n\r\n def internal_handle(self, request):\r\n self.deactivate()\r\n filename = datetime.datetime.now().strftime('QR_image_%Y%m%d-%H%M%S.jpg')\r\n abs_dir = os.path.abspath(os.path.join('..', 'temp'))\r\n abs_path = os.path.join(abs_dir, filename)\r\n encode(text=self.arg_dict['string'].value, filename=abs_path)\r\n return [ResponseMsg('【%s】生成二维码,信息为:%s' % (self.session_type, request.msg)),\r\n ResponseImg(abs_path)]\r\n\r\n\r\ndef encode(text: str, filename: str):\r\n qrcode.make(text).save(filename)\r\n\r\n\r\ndef decode(filename: str):\r\n code_list = pyzbar.decode(Image.open(filename))\r\n string_list = []\r\n for code in code_list:\r\n string_list.append(str(code.data, encoding='utf-8'))\r\n return string_list\r\n","sub_path":"sessions/qrcode.py","file_name":"qrcode.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"157202843","text":"\"\"\"\nfirst = input(\"Enter 1st number\") # \"10\" # console input (user input)\nsec = input(\"Enter 2nd number\") # \"12\"\nprint(type(first)) #str\nprint(type(sec))\nsum = first+sec # \"10\"+\"12\" = \"1012\"\nprint(sum)\n\"\"\"\n\n\"\"\"\nfirst = input(\"Enter 1st number\") # \"10\" # console input (user input)\nsec = input(\"Enter 2nd number\") # \"12\"\nsum = int(first)+int(sec) # \"10\"+\"12\" = \"1012\"\nprint(sum)\n\"\"\"\n\nfirst = int(input(\"Enter 1st number: \")) # int(\"12\")\nsec = int(input(\"Enter 2nd number: \")) \nsum = first+sec \nprint(\"Result: \", sum)\n\n\"\"\"\nwrong => string+int => 'Result'+100\nright => string+string => 'Result'+'100' <= str(100) concat\n\"\"\"\n\n\n\n\n\n\n\n","sub_path":"Tutorials/Course-1/Basics/add_2_numbers.py","file_name":"add_2_numbers.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"516826890","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2021-9-15 10:19 am\n@author: liujin\n@function: detect stone\n@version: V2.0\n@modify: 2021/9/15\n\"\"\"\nimport cv2\nimport datetime\nimport numpy as np\nimport traceback\nimport time\nimport urllib\nimport os,sys\nimport importlib\nimport pika\nimport base64\nimport json\nimport logging\nimport logging.handlers\nimport argparse\nfrom numpy import random\nfrom log import Logger\nlogger = Logger(logname='./log/module_stone.txt', loglevel=1, logger=\"stone\").getlog()\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nif sys.version > '3':\n PY3 = True\n from scenes.config_file import config\n importlib.reload(sys)\nelse:\n PY3 = False\n sys.path.append(\"./scenes\")\n from config_file import config\n\n\nclass StoneDetector():\n def __init__(self):\n self.debug = False\n print('Finished Init model!')\n\n def write_frame(self, frame_input, sceneId, timeID):\n daytime = datetime.datetime.now().strftime('%Y-%m-%d')\n save_path = 'image_stone/'+daytime\n is_exist = os.path.exists(save_path)\n if not is_exist:\n os.umask(0)\n os.makedirs(save_path)\n file_name = os.path.join(save_path,\"%s-%s.jpg\" % (sceneId,timeID))\n cv2.imwrite(file_name, frame_input)\n \n \n def prediction(self, frame, sceneId, timeID, zone, params):\n is_warning = False\n circle_num = 0\n img_org = frame.copy()\n min_x = zone[:, 0].min()\n min_y = zone[:, 1].min()\n max_x = zone[:, 0].max()\n max_y = zone[:, 1].max()\n frame_crop = frame[min_y:max_y, min_x:max_x]\n if self.debug:\n frame_crop_org = frame_crop.copy()\n zone = [zone.reshape(zone.shape[0], 1, zone.shape[1])]\n \n frame_gray = cv2.cvtColor(frame_crop, cv2.COLOR_BGR2GRAY)\n th2 = cv2.adaptiveThreshold(frame_gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,201,11)\n erode2 = cv2.erode(th2, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (21, 21)), iterations=2)\n dilated2 = cv2.dilate(erode2, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (19, 19)), iterations=2)\n median = cv2.medianBlur(dilated2, 7)\n if self.debug:\n median_org = median.copy()\n img, contours, hierarchy = cv2.findContours(median,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n #print(\"========================\")\n for c in contours:\n hull = cv2.convexHull(c, returnPoints=False)\n if len(hull) > 0:\n dis = 0\n defects_point = []\n defects_crop = []\n defects = cv2.convexityDefects(c, hull)\n if defects is None:\n #print(\"!!!!!!!defects is None\")\n continue\n for i in range(defects.shape[0]):\n s,e,f,d = defects[i,0]\n far_crop = tuple(c[f][0])\n far = list(c[f][0])\n far = [far[0]+min_x, far[1]+min_y]\n far = tuple(far)\n dis = d/256\n if dis >= float(params['res_value1']) and dis <= 8*float(params['res_value1']):\n far = [far, dis]\n far_crop = [far_crop, dis]\n defects_point.append(far)\n defects_crop.append(far_crop)\n if len(defects_point) == 2:\n #print(\"\")\n for i in range(len(defects_point)):\n #print(\"distance=\",defects_point[i][1])\n cv2.line(median, defects_crop[i][0], defects_crop[(i+1)%len(defects_crop)][0],0,3)\n cv2.line(frame_crop,defects_crop[i][0],defects_crop[(i+1)%len(defects_crop)][0],[0,255,0],3)\n cv2.line(frame,defects_point[i][0],defects_point[(i+1)%len(defects_point)][0],[0,255,0],3)\n\n img, contours, hierarchy = cv2.findContours(median,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n for c in contours:\n area = cv2.contourArea(c)\n (x, y), radius = cv2.minEnclosingCircle(c)\n (x, y, radius) = np.int0((x, y, radius))\n ratio = area/(np.pi*radius*radius)\n #实际标定值15mm对应像素值大小35pixel(该值是襄阳工厂标定,具体指以实际工厂的标定为准)\n if radius > 35*float(params['res_value2'])/30 and ratio>float(params['res_value3']):\n cv2.circle(frame, (x+min_x, y+min_y), radius, (255, 255, 0), 2)\n cv2.circle(frame_crop, (x, y), radius, (255, 255, 0), 2)\n circle_num += 1\n \n ##can't find Defects\n if self.debug:\n circle_num_org = 0\n img, contours, hierarchy = cv2.findContours(median_org,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n for c in contours:\n area = cv2.contourArea(c)\n (x, y), radius = cv2.minEnclosingCircle(c)\n (x, y, radius) = np.int0((x, y, radius))\n ratio = area/(np.pi*radius*radius)\n if radius > 35*float(params['res_value2'])/30 and ratio>float(params['res_value3']):\n cv2.circle(frame_crop_org, (x, y), radius, (255, 255, 0), 2)\n circle_num_org += 1\n cv2.putText(frame_crop_org, str(circle_num_org), (5,60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) \n ######\n \n if circle_num >= int(params['res_value4']):\n is_warning = True\n \n if self.debug:\n cv2.putText(frame_crop, str(circle_num), (5,60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n cv2.putText(frame, str(circle_num), (5,60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n cv2.polylines(frame, zone, True, (0, 255, 255), 2) \n if is_warning:\n cv2.putText(frame, \"WARNING\", (5,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n self.write_frame(img_org, sceneId, timeID)\n else:\n cv2.putText(frame, \"NORMAL\", (5,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\n imshow_name = \"stone\"+str(sceneId)\n\n if self.debug:\n res_median = np.hstack((median_org, median))\n res = np.hstack((frame_crop_org, frame_crop))\n frame_median = cv2.resize(res_median, (1280,720))\n frame_crop = cv2.resize(res, (1280,720))\n cv2.imshow(\"frame_median\", frame_median)\n cv2.imshow(\"frame_crop\", frame_crop)\n cv2.imshow(imshow_name, cv2.resize(frame, (1280, 720)))\n if cv2.waitKey(1) & 0xFF == (ord('q') or ord('Q')):\n raise Exception(\"exit\")\n return is_warning, frame\n\n\nclass StoneWraper(object):\n def __init__(self):\n \"\"\"\n Wrapper of StoneWraper, declaring queues and register recall\n :param none:\n \"\"\"\n self.alertType = \"big-stone-in-area\" \n # Connections\n username = config['mq_username']\n pwd = config['mq_pswd']\n ip = config['mq_server_host']\n port = config['mq_server_port']\n user_pwd = pika.PlainCredentials(username, pwd)\n self.con = pika.BlockingConnection(pika.ConnectionParameters(host=ip, port=port, credentials=user_pwd))\n self.ch = self.con.channel()\n \n # Queue name declaration\n self.qn_in = config['stone_q_in']\n self.qn_out = config['warning_msg_q_out']\n self.ch.queue_declare(queue=self.qn_in, durable=True, arguments={'x-max-length': 5})\n self.ch.queue_declare(queue=self.qn_out, durable=True, arguments={'x-max-length': 5})\n \n # Init Detector\n self.detector = StoneDetector()\n\n\n def getJsonObj(self, body):\n # get json string from binary body\n data_string = bytes.decode(body)\n # load to json obj\n obj_json = json.loads(data_string)\n return obj_json\n\n def getOpencvImg(self, obj_json):\n # get image bytes string\n img = base64.b64decode(obj_json['img'].encode())\n # get image array\n img_opencv = cv2.imdecode(np.fromstring(img, np.uint8), 1)\n h, w, c = img_opencv.shape\n return img_opencv, h, w, c\n \n def enodeImgBase64(self, img):\n _, img_encode = cv2.imencode('.jpg', img)\n np_data = np.array(img_encode)\n str_data = np_data.tostring()\n b64_bytes = base64.b64encode(str_data)\n picData_string = b64_bytes.decode()\n return picData_string\n\n def running(self):\n def callback(ch, method, properties, body):\n start_time = time.time()\n obj_json = self.getJsonObj(body=body)\n sceneId = str(obj_json[\"cameraIP\"])\n timeID = str(obj_json[\"timestamp\"])\n zone = np.array(eval(obj_json[\"zone\"]))\n params = obj_json[\"params\"]\n logger.info(\"zone=%s, params=%s\",zone,params)\n img_opencv, h, w, c = self.getOpencvImg(obj_json)\n # Prediction\n is_alarm, myframe = self.detector.prediction(img_opencv, sceneId, timeID, zone, params)\n if is_alarm:\n picData_string = self.enodeImgBase64(myframe)\n response_dict = {\n 'protocol': '1.0.0',\n 'alertType': self.alertType,\n 'cameraIP': sceneId,\n 'timestamp': timeID,\n 'img': picData_string,\n }\n\n # dumps json obj\n response_dict = json.dumps(response_dict, sort_keys=True, indent=2)\n #print(\"response_dict=\", response_dict)\n self.ch.basic_publish(exchange='', routing_key=self.qn_out, body=response_dict)\n ch.basic_ack(delivery_tag=method.delivery_tag)\n cost_time = time.time()-start_time\n #print('callback:%f ms'%(cost_time*1000))\n # Register the consume function\n self.ch.basic_consume(queue=self.qn_in,on_message_callback=callback,auto_ack=False,exclusive=False,\n consumer_tag=None,\n arguments=None)\n logger.info('[*] Stone Waiting for logs. To exit press CTRL+C')\n # Starting consuming\n self.ch.start_consuming()\n\n\nif __name__ == '__main__':\n stoneWrapper = StoneWraper()\n stoneWrapper.running()\n\n\n\n","sub_path":"hx_alg_sdk/module_stone.py","file_name":"module_stone.py","file_ext":"py","file_size_in_byte":10336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"13937619","text":"import requests\nimport os\nimport re\nimport time\nimport auth\nimport httplib2\nimport pandas as pd\nimport excel_updater as xl_upd\nfrom apiclient import discovery\nfrom bs4 import BeautifulSoup as bs\nfrom datetime import datetime, timedelta, date\nfrom gdrive_files_manage import uploadFile, listFiles_folder, deleteFile, listFiles\n\nSCOPES = 'https://www.googleapis.com/auth/drive'\nCLIENT_SECRET_FILE = 'client_id.json'\nAPPLICATION_NAME = 'gdrive_file_manager'\nauthInst = auth.Auth(SCOPES, CLIENT_SECRET_FILE, APPLICATION_NAME)\ncredentials = authInst.get_credentials()\nhttp = credentials.authorize(httplib2.Http())\ndrive_service = discovery.build('drive', 'v3', http=http)\n\nbittrix_session = requests.Session()\nbittrix_session.auth = ('', '')\ndate = datetime.now().date() - timedelta(1)\n#date = datetime.strptime('05.07.2019', '%d.%m.%Y')\nparams = (('set_filter', 'Y'),\n ('sort_id', '3'),\n ('sort_type', 'ASC'),\n ('F_DATE_TYPE', 'interval'),\n ('F_DATE_FROM', '{0}.{1}.{2}'.format(date.day, date.month, date.year)),\n ('F_DATE_TO', '{0}.{1}.{2}'.format(date.day, date.month, date.year)),\n ('F_DATE_DAYS',''),\n ('filter','%5B0%5D%5B1%5D'),\n ('save','Y'),\n ('SHOWALL_1','1'))\n\nr = bittrix_session.get('https://sibcedar.bitrix24.ru/crm/reports/report/view/1523/', params=params)\n\nsoup = bs(r.text, features='lxml')\ntable = soup.findAll('table', {'class':'reports-list-table'})\nreport_table = str(table[0].prettify())\nreport_df = pd.read_html(report_table, header=None, index_col=None)[0]\n\n\n# паттерн для удаления строк (фильтрации), в которых содержатся общие итоги\npattern = re.compile('Ответственный|Страницы|—')\nresult_df = report_df[report_df['Ответственный'].apply(lambda x: not re.match(pattern, x))][0:]\nresult_df['date'] = date #date.strftime('%d.%m.%Y')\n\nfilenames = ['Рябов. Отчет Звонки + Письма.xlsx', 'Шпинев. Отчет Звонки + Письма.xlsx']\nfilepath = r'W:\\Gutorov\\02 - Отчеты\\Отчеты Звонки Письма'\ngdrive_folder_id = '12IyRgYoENfCBeprH_JT49DB3jkL2_ole'\nquery = '\"'+ gdrive_folder_id + '\" in parents'\n\nfor file_id in listFiles_gdrive_folder(drive_service, query):\n deleteFile_gdrive(drive_service, file_id)\n print('File %s has been deleted' % file_id)\n\n\nfor file in filenames:\n xl_upd.update_reports(file, filepath, result_df)\n uploadFile_gdrive(drive_service, file, os.path.join(filepath, file), 'application/vnd.ms-excel', gdrive_folder_id)\n","sub_path":"refresh_calls&emails_report.py","file_name":"refresh_calls&emails_report.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"98256839","text":"#!/usr/bin/env python\nimport os, glob, sys, re\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nchid = ''\n\ndef pick_chid():\n 'Lets user select desired CHID if multiple are in the same directory'\n\n # Default value \n sel = 0\n\n # find all CHIDs where a sim was run (using the devc file)\n chids = list(glob.glob(\"*_devc.csv\"))\n for i in range(len(chids)):\n chids[i] = chids[i][:-9]\n\n # Don't run if there aren't any sims to plot\n if len(chids) == 0:\n raise Exception(\"There are no simulations that were run\")\n return\n\n # If only one CHID exists, assume that one\n if len(chids) == 1:\n return chids[0]\n\n # Prompt user to clarify which CHID to use\n while True:\n for i, chid in enumerate(chids):\n print(\"{}) {}\".format(i, chid))\n\n sel = int(input(\"Which CHID would you like to use?: \"))\n\n if sel < len(chids): break\n\n return chids[sel]\n\ndef find_devc(chid):\n 'Returns a DataFrame of all the devc.csv files'\n\n # Initial empty DataFrame\n devc_df = pd.DataFrame()\n\n # Find all devc files with the CHID\n for file in glob.glob(\"{}*_devc.csv\".format(chid)):\n \n # header = 1 since units are on the first row\n df = pd.read_csv(file, header=1)\n # Make sure everything is indexed by Time\n df.set_index(['Time'], inplace=True)\n\n # Add to DataFrame\n if devc_df.empty:\n devc_df = df\n else:\n devc_df = devc_df.join(df)\n\n return devc_df\n\ndef find_hrr(chid):\n 'Returns a DataFrame of all the hrr.csv files'\n\n # Initial empty DataFrame\n hrr_df = pd.DataFrame()\n\n # Find all hrr files with the CHID\n for file in glob.glob(\"{}*_hrr.csv\".format(chid)):\n \n # header = 1 since units are on the first row\n df = pd.read_csv(file, header=1)\n # Make sure everything is indexed by Time\n df.set_index(['Time'], inplace=True)\n\n # Add to DataFrame\n if hrr_df.empty:\n hrr_df = df\n else:\n hrr_df = hrr_df.join(df)\n\n return hrr_df\n\ndef plot_data(df, argv):\n \n plot_cols = []\n\n if len(argv) > 1:\n for col in df.columns.values:\n if any(re.search(regex,col) for regex in argv[1:]):\n plot_cols.append(col)\n else:\n plot_cols = df.columns.values\n\n df[plot_cols].plot()\n plt.legend(bbox_to_anchor=(0, 1), loc='upper left', borderaxespad=0)\n plt.show()\n \ndef main():\n try:\n # Get information\n chid = pick_chid()\n fds_df = find_devc(chid).join(find_hrr(chid)).interpolate()\n\n # If 'list' is provided as the first argument, just print a list of columns\n if len(sys.argv) > 1 and sys.argv[1] == 'list':\n for col in fds_df.columns.values:\n print(col)\n # If list is not desired, plot based on argv\n else:\n plot_data(fds_df, sys.argv)\n except Exception as e:\n print(e)\n \nif __name__ == \"__main__\": main()\n\n","sub_path":"tools/quickplot.py","file_name":"quickplot.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"581158215","text":"import os\nimport numpy as np\nfrom lxml import objectify\nimport cv2\nfrom dataset import data_writer,data_reader\nfrom progress.bar import Bar\nimport random\nfrom multiprocessing import Process,Queue\nfrom data import *\nfrom setup import *\nfrom time import sleep\nclass data_generator():\n def __init__(self,data_name,type=\"video\",data_type=\"mp4\",final_size=None):\n self.data_name=data_name\n self.type=type\n self.data_type=data_type\n self.frame_size=(224,224)\n self.queue= Queue()\n def __get_cap(self):\n video_path=\"./data/video/\"+self.data_name+\".\"+self.data_type\n if self.type==\"video\":\n self.cap = cv2.VideoCapture(video_path)\n elif self.type==\"camera\":\n self.cap = cv2.VideoCapture(0)\n else:\n print(\"error type\")\n return -1\n if cap.isOpened()== False:\n print(\"error in opening data\")\n return -1\n def setsize(self,size):\n self.frame_size=size\n def data_process(self):\n error=self.__get_cap()\n if error==-1:\n self.cap.release()\n return\n buffer_multiple=10\n while 1:\n end, frame = cap.read()\n if end:\n break\n while self.queue.qsize()>=buffer_multiple:\n sleep(0.01)\n img=cv2.resize(frame,self.frame_size)\n input=preprocess_input(img)#inverse order of channel\n self.queue.put(input)\n self.cap.release()\n def generator(self):\n self.p=Process(target=self.data_process,daemon=True)\n self.p.start()\n while True:\n if self.queue.empty()==False:\n yield self.queue.get()\n\n\n","sub_path":"video_generator.py","file_name":"video_generator.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"281766080","text":"from lexer import Lexer, Location\r\nimport sys\r\n\r\nclass Grammar:\r\n def __init__(self, kind):\r\n self.kind = kind\r\n self.value = None\r\n \r\n def __repr__(self):\r\n return str(self.kind)\r\n\r\nclass Parser:\r\n def __init__(self):\r\n self.list = []\r\n self.tokens = []\r\n self.currentpos = 0\r\n self.eof = 0\r\n self.LPARS = 0\r\n self.RPARS = 0\r\n self.errorflag = 0\r\n self.errorloc = []\r\n\r\n def parse(self, tokenlist):\r\n self.tokens = tokenlist\r\n self.eof = (len(self.tokens)-1)\r\n self.propositions()\r\n self.checkErrors()\r\n if self.errorflag:\r\n return \"Syntax Error at line \" + self.errorloc[0] + \" column \" + self.errorloc[1] + \".\"\r\n else:\r\n return self.list\r\n\r\n def propositions(self):\r\n self.list.append(Grammar(\"propositions\"))\r\n self.proposition()\r\n self.more_propositions()\r\n\r\n def more_propositions(self):\r\n self.list.append(Grammar(\"more-proposition\"))\r\n if self.currentpos < self.eof:\r\n #self.checkPARS()\r\n self.list.append(Grammar(\"comma\"))\r\n self.currentpos += 1 if self.currentpos < self.eof else 0\r\n self.propositions()\r\n else:\r\n self.list.append(Grammar(\"epsilon\"))\r\n\r\n def proposition(self):\r\n self.list.append(Grammar(\"proposition\"))\r\n # Check for LPAR or NOT(!)\r\n peek = self.currentpos\r\n if self.tokens[self.currentpos].kind == \"LPAR\" or self.tokens[self.currentpos].kind == \"NOT\":\r\n self.compound()\r\n elif self.tokens[self.currentpos].kind == \"ID\":\r\n # Check for atomic connective (AND, OR, IFF, IMPLIES)\r\n if (peek + 1 <= len(self.tokens) - 1) and (self.tokens[peek + 1].kind == \"AND\" or self.tokens[peek + 1].kind == \"OR\" or self.tokens[peek + 1].kind == \"IFF\" or self.tokens[peek + 1].kind == \"IMPLIES\"):\r\n self.compound()\r\n # Normal atomic \r\n else:\r\n self.atomic()\r\n\r\n def atomic(self):\r\n self.list.append(Grammar(\"atomic\"))\r\n self.list.append(Grammar(\"ID\"))\r\n self.list[-1].value = self.tokens[self.currentpos].value\r\n self.currentpos += 1 if self.currentpos < self.eof else 0\r\n\r\n def compound(self):\r\n self.list.append(Grammar(\"compound\"))\r\n\r\n if self.tokens[self.currentpos].kind == \"LPAR\":\r\n self.list.append(Grammar(\"LPAR\"))\r\n self.currentpos += 1 if self.currentpos < self.eof else 0\r\n self.proposition()\r\n self.list.append(Grammar(\"RPAR\"))\r\n self.currentpos += 1 if self.currentpos < self.eof else 0\r\n \r\n elif self.tokens[self.currentpos].kind == \"NOT\":\r\n self.list.append(Grammar(\"NOT\"))\r\n self.currentpos += 1 if self.currentpos < self.eof else 0\r\n self.proposition()\r\n\r\n else:\r\n self.atomic()\r\n self.connective()\r\n self.proposition()\r\n\r\n def connective(self):\r\n self.list.append(Grammar(\"connective\"))\r\n \r\n if self.tokens[self.currentpos].kind == \"AND\":\r\n self.list.append(Grammar(\"AND\"))\r\n self.currentpos += 1 if self.currentpos < self.eof else 0\r\n \r\n elif self.tokens[self.currentpos].kind == \"OR\":\r\n self.list.append(Grammar(\"OR\"))\r\n self.currentpos += 1 if self.currentpos < self.eof else 0\r\n\r\n elif self.tokens[self.currentpos].kind == \"IMPLIES\":\r\n self.list.append(Grammar(\"IMPLIES\"))\r\n self.currentpos += 1 if self.currentpos < self.eof else 0\r\n\r\n elif self.tokens[self.currentpos].kind == \"IFF\":\r\n self.list.append(Grammar(\"IFF\"))\r\n self.currentpos += 1 if self.currentpos < self.eof else 0\r\n\r\n def checkErrors(self):\r\n errorpos = 0\r\n index = 0\r\n\r\n for token in self.tokens:\r\n \r\n # CHECKS FOR MISMATCHING PARS\r\n if token.kind == \"LPAR\":\r\n if self.LPARS == 0:\r\n errorpos = index \r\n self.LPARS += 1\r\n\r\n if token.kind == \"RPAR\":\r\n if self.LPARS > 0:\r\n self.LPARS -= 1\r\n else:\r\n errorpos = index\r\n self.RPARS += 1\r\n \r\n # CHECKS FOR OUT OF PLACE COMMAS\r\n # Comma at beginning of list\r\n if token.kind == \"COMMA\" and index == 0:\r\n errorpos = index\r\n self.errorflag = 1\r\n self.errorloc = [str(self.tokens[errorpos].loc.line), str(self.tokens[errorpos].loc.col)]\r\n return\r\n \r\n # Comma following wrong token - error\r\n if token.kind == \"COMMA\" and index > 0:\r\n if self.tokens[index - 1].kind != \"RPAR\" and self.tokens[index - 1].kind != \"ID\":\r\n errorpos = index\r\n self.errorflag = 1\r\n self.errorloc = [str(self.tokens[errorpos].loc.line), str(self.tokens[errorpos].loc.col)]\r\n return\r\n # CHECKS CONNECTIVES\r\n if (token.kind == \"AND\" or token.kind == \"OR\" or token.kind == \"IMPLIES\" or token.kind == \"IFF\"):\r\n if(index < self.eof):\r\n if (self.tokens[index + 1].kind != \"ID\" and self.tokens[index + 1].kind != \"LPAR\" and self.tokens[index + 1].kind != \"NOT\"):\r\n errorpos = index + 1\r\n self.errorflag = 1\r\n self.errorloc = [str(self.tokens[errorpos].loc.line), str(self.tokens[errorpos].loc.col)]\r\n return\r\n\r\n # Connective at end - Error\r\n if(index == self.eof) or (index == 0):\r\n errorpos = index\r\n self.errorflag = 1\r\n self.errorloc = [str(self.tokens[errorpos].loc.line), str(self.tokens[errorpos].loc.col)]\r\n return\r\n\r\n index += 1\r\n\r\n # PARENTHESIS ARE UNEQUAL\r\n if self.LPARS != self.RPARS:\r\n self.errorflag = 1\r\n self.errorloc = [str(self.tokens[errorpos].loc.line), str(self.tokens[errorpos].loc.col)]","sub_path":"parserr.py","file_name":"parserr.py","file_ext":"py","file_size_in_byte":6280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"441508642","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom scipy import stats\n\n\n#From https://www.kaggle.com/kabure/extensive-eda-and-modeling-xgb-hyperopt\n\ndef resumetable(df):\n '''\n :param df: input pandas dataset\n :return: dataset with summary\n '''\n print(f\"Dataset Shape: {df.shape}\")\n summary = pd.DataFrame(df.dtypes,columns=['dtypes'])\n summary = summary.reset_index()\n summary['Name'] = summary['index']\n summary = summary[['Name','dtypes']]\n summary['Missing'] = df.isnull().sum().values\n summary['Uniques'] = df.nunique().values\n summary['First Value'] = df.loc[0].values\n summary['Second Value'] = df.loc[1].values\n summary['Third Value'] = df.loc[2].values\n\n for name in summary['Name'].value_counts().index:\n summary.loc[summary['Name'] == name, 'Entropy'] = round(stats.entropy(df[name].value_counts(normalize=True), base=2),2)\n\n return summary\n\n\ndef calculate_outliers(df_num, cut_line =3):\n '''\n Calculates the outliers of an array\n :param df_num: the Series with numerical value\n :param cut_line: parameter for the cut\n :return: /\n '''\n # calculating mean and std of the array\n data_mean, data_std = np.mean(df_num), np.std(df_num)\n\n # seting the cut line to both higher and lower values\n # You can change this value\n cut = data_std * cut_line\n\n # Calculating the higher and lower cut values\n lower, upper = data_mean - cut, data_mean + cut\n\n # creating an array of lower, higher and total outlier values\n outliers_lower = [x for x in df_num if x < lower]\n outliers_higher = [x for x in df_num if x > upper]\n outliers_total = [x for x in df_num if x < lower or x > upper]\n\n # array without outlier values\n outliers_removed = [x for x in df_num if x > lower and x < upper]\n\n print('Identified lowest outliers: %d' % len(\n outliers_lower)) # printing total number of values in lower cut of outliers\n print('Identified upper outliers: %d' % len(\n outliers_higher)) # printing total number of values in higher cut of outliers\n print('Total outlier observations: %d' % len(\n outliers_total)) # printing total number of values outliers of both sides\n print('Non-outlier observations: %d' % len(outliers_removed)) # printing total number of non outlier values\n print(\"Total percentual of Outliers: \",\n round((len(outliers_total) / len(outliers_removed)) * 100, 4)) # Percentual of outliers in points\n\n return\n\n\ndef distribution_plot(values, log_scale=False, color=\"Tomato\"):\n if log_scale:\n sns.distplot(np.log(values), kde=False, color=color)\n else:\n sns.distplot(values, kde=False, color=color)\n","sub_path":"visualization/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"169571837","text":"import train_model\nimport pickle\nimport predict_model\nimport numpy as np \nimport pandas as pd\nfrom settings import *\n\n\nwith open(FINAL_VECT,'rb') as final_count_vect:\n\tcount_vect = pickle.load(final_count_vect)\nwith open(FINAL_TFIDF, 'rb') as final_tf_transformer:\n\ttf_transformer = pickle.load(final_tf_transformer)\nwith open(FINAL_MODEL, 'rb') as final_model:\n\tlr_clf = pickle.load(final_model)\nobj = [count_vect, tf_transformer, lr_clf]\n\ninput_list = [\"Hello there\",\" \" ,\"asshole\",\"black nigger\",\"hero\"]\noutput_list = [2,1,1,0,2]\ndef test_predictResult():\n\tfor i in range(0,len(input_list)):\n\t\tif input_list[i]==\" \":\n\t\t\tpredicted_class = predict_model.predictResult([input_list[i]],obj)\n\t\t\tassert predicted_class == 1\n\t\telse:\n\t\t\tpredicted_class = predict_model.predictResult([input_list[i]],obj)\n\t\t\tassert predicted_class == output_list[i]\n","sub_path":"IDK_rest/ML_Models/collapseChecker/src/models/test_predict.py","file_name":"test_predict.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"138096953","text":"sent1 = \"This film is the best film of the year.\"\nL = []\nparsed = []\n\nfi = \"film\"\nrep = \"movie\"\n\nfor i, c in enumerate(sent1):\n if c == \" \":\n L.append(i)\n\nx = 0\n\nfor n in L:\n parsed.append(sent1[x:n])\n x = n + 1\n if n == L[-1]:\n parsed.append(sent1[n+1:])\n\nfor i, word in enumerate(parsed):\n if word == fi:\n parsed[i] = rep\n\nsent2 = \" \".join(parsed)\nprint(sent2)\n\n# find-and-replace without using string manipulation","sub_path":"simple find-and-replace.py","file_name":"simple find-and-replace.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"115061285","text":"def teacherPosts():\n teacherposts = [\n {\n \"id\": 1,\n \"title\": \"Template Post\",\n \"body\": \"On the last quiz set date: dd/mm/yyyy, these are the things that everyone struggled with, these are tips most of you can benefit from.\",\n \"author\": \"Mr Smith\",\n \"create_date\": \"dd/mm/yyyy\"\n },\n {\n \"id\": 2,\n \"title\": \"Template Post2\",\n \"body\": \"On the last quiz set date: dd+1/mm/yyyy, these are the things that everyone struggled with, these are tips most of you can benefit from.\",\n \"author\": \"Mr Smith\",\n \"create_date\": \"dd+1/mm/yyyy\"\n }\n ]\n return teacherposts\n\ndef quizzes():\n quizzes = [\n {\n \"quizid\":1,\n \"Topic\": \"Data Representation\",\n \"Topicid\": 1,\n \"question1\":2,\n \"question2\":3,\n \"question3\":4\n },\n {\n \"quizid\":2,\n \"Topic\": \"Hardware\",\n \"Topicid\": 2\n }\n ]\n return quizzes","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"182390679","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n# @Time : 19-10-18 下午5:52 \n# @Author : Lattesea \n# @File : financial_spider.py \nimport requests\nimport json\nimport csv\nfrom fake_useragent import UserAgent\nimport time\nimport random\n\n\nclass YYpingjiSpider_financial(object):\n def __init__(self):\n self.url = 'https://api.ratingdog.cn/v1/GetIssuerYYRatingApi?IssuerID={}'\n\n def get_headers(self):\n ua = UserAgent()\n headers = {\n \"Accept\": \"application/json, text/plain, */*\",\n \"Origin\": \"https://www.ratingdog.cn\",\n \"Referer\": \"https://www.ratingdog.cn/\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"User-Agent\": ua.random\n }\n return headers\n\n def parse(self, IssuerID):\n url = self.url.format(IssuerID)\n financial = {}\n html_json = requests.get(url=url, headers=self.get_headers()).text\n html_py = json.loads(html_json)\n print(html_py)\n for i in html_py['rows']:\n financial['RankingStr'] = i['RankingStr']\n financial['LastTotalEquityScore'] = i['LastTotalEquityScore']\n financial['EBITDAScore'] = i['EBITDAScore']\n financial['AverTotalProfitDefrayNonCurrentPLThreeYearsScore'] = i[\n 'AverTotalProfitDefrayNonCurrentPLThreeYearsScore']\n financial['AverOperNetCashThreeYearsIBDebtScore'] = i['AverOperNetCashThreeYearsIBDebtScore']\n financial['EBITDAIBDebtScore'] = i['EBITDAIBDebtScore']\n financial['AssetLiabilityRatioScore'] = i['AssetLiabilityRatioScore']\n financial['CurrentRatioScore'] = i['CurrentRatioScore']\n financial['RealizableAssetShortTermIBDebtScore'] = i['RealizableAssetShortTermIBDebtScore']\n financial['ListedScore'] = i['ListedScore']\n financial['CompanyNatureScore'] = i['CompanyNatureScore']\n financial['FinCostsIBDebtScore'] = i['FinCostsIBDebtScore']\n\n print(financial)\n return financial\n\n def save_csv(self, result):\n keyword_list1 = ['RankingStr', 'LastTotalEquityScore', 'EBITDAScore',\n 'AverTotalProfitDefrayNonCurrentPLThreeYearsScore', 'EBITDAIBDebtScore',\n 'AssetLiabilityRatioScore', 'CurrentRatioScore', 'RealizableAssetShortTermIBDebtScore',\n 'ListedScore', 'CompanyNatureScore', 'FinCostsIBDebtScore']\n with open('财务评分.csv', 'a', newline='')as f:\n writer = csv.writer(f)\n writer.writerow(keyword_list1)\n with open('财务评分.csv', 'a', newline='') as f:\n writer = csv.DictWriter(f, keyword_list1)\n # for row in result:\n writer.writerow(result)\n\n def financial_run(self, IssuerID_IssuerType):\n print(IssuerID_IssuerType)\n for j in IssuerID_IssuerType:\n result = self.parse(j[0])\n self.save_csv(result)\n time.sleep(random.uniform(1, 4))\n print(\"%s存入成功\" % result)\n","sub_path":"YY评级基本信息/financial_spider.py","file_name":"financial_spider.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"26900386","text":"from sikuli import *\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport datetime\nimport time\n\n\n\nmyPath = os.path.join(os.environ.get(\"GIT_HOME\"), u\"sikuli-tests\")\nif not myPath in sys.path:\n\tsys.path.append(myPath)\n# Импорт ------------------------------\nimport baseFunction as BF\n\n\n\n\n# Если отчёт в конце списка, т.е. его не видно на панели сразу, прокручиваем список отчётов до конца и смотрим ещё раз\ndef findReportOnPanel(pattern):\n\ttry:\n\t\tfind(pattern)\n\texcept:\n\t\tclick(Pattern(\"scrol_down-2.png\").similar(0.90).targetOffset(-4,0))\n\t\tmouseDown( Button.LEFT)\n\t\tsleep(10)\n\t\tmouseUp()\n\t\tfind(pattern)\n\ndef startReport(patternList, reportGroupName):\n\tif reportGroupName == 'Fuel':\n\t\tprint (u\"Группа отчётов - Топливные баки\")\n\t\ttry:\n\t\t\tclick(Pattern(\"bowser.png\").similar(0.80).targetOffset(-53,0))\n\t\texcept:\n\t\t\tprint (u\"Не смогли свернуть группу Топливовоз\")\n\t\t\tBF.killAllNavstat()\n\t\t\texit(1)\t\t\n\telif reportGroupName == 'bigFuel':\n\t\tprint (u\"Группа отчётов - Топливовоз\")\n\t\ttry:\n\t\t\tclick(Pattern(\"fuel_tank_group.png\").similar(0.90).targetOffset(-45,2))\n\t\texcept:\n\t\t\tprint (u\"Не смогли свернуть группу Топливные баки\")\n\t\t\tBF.killAllNavstat()\t\t\n\t\t\texit(2)\n\telif reportGroupName == 'Fc':\n\t\tprint (u\"Группа отчётов - Топливные карты\")\n\t\texit(3)\n\telse:\n\t\tprint (u\"Группа отчётов - Общие или отраслевые\")\n#----------------------------------------------------------------------------------\n\ttry:\n\t\tfindReportOnPanel(patternList[0])\n\t\tclick(patternList[0])\n\t\tclick(\"runReport-2.png\")\t\t\n\texcept:\n\t\tprint (u\"Не нашли отчёт на панели\")\n\t\tBF.killAllNavstat()\n\t\texit(0)\n\ttry:\n\t\tfind(\"question-2.png\")\n\t\ttry:\n\t\t\tclick(\"da-3.png\")\n\t\texcept:\n\t\t\tprint (u\"Такой файл уже есть, но не нашёл кнопку 'Да'\")\n\t\t\texit(0)\n\texcept:\n\t\tprint (u\".\")\n\ttry:\n\t\treportStart = time.time()\n\t\tprint (u\"Стартанули таймер\")\n\t\tBF.waitAll(patternList,2000)\n\t\tprint (u\"Отчёт выполнен за \"), datetime.timedelta(seconds=time.time()-reportStart)\n\texcept:\n\t\tprint (u\"Отчёт не выполнен или результат не корректен\")\n\t\tBF.killAllNavstat()\n\t\texit(0)\n\n# Общая проверка отчётов, подходит для: Общие отчёты, События и сигналы, Отраслевые \ndef reportTest1(reportName, startDay, endDay, reportGroupName = 'other'):\n# ------ Подготовка к тесту --------------------\t\n\tstart = time.time()\n\tbaseDir = os.path.join(os.environ.get(\"GIT_HOME\"), u\"sikuli-tests\", u\"img\", u\"report_1\")\n\tfList = os.listdir(os.path.join(baseDir,reportName));\n\tpatternList = []\n\tfor f in fList:\n\t\tpatternList.append(os.path.join(baseDir,reportName,f))\n\tBF.clearData()\n#\tkeyer.editKeyAndService(\"404C2A00-B173-4844-BA59-9A6F296479E7\", \"http://services.navstat.infokinetika.net\")\n\tBF.firstStartNavstat()\n# Переходим на таб отчётов (просто закрыв таб с картой)\n\tBF.closeCurTab()\n\tclick(Pattern(\"6917gag-2.png\").similar(0.80).targetOffset(-1,0)) # Заглушка http://idea.navstat.ru/tickets/6917\n# ------ Тест --------------------\t\n\tBF.setInterval(startDay, endDay) #Устанавливаем интервал отчёта\n\tstartReport(patternList, reportGroupName) #Выполнили отчёт\n\tBF.saveReportAsCSV(reportName) #Сохранили отчёт в файл\n\tBF.mergeFile(reportName) #Сравнили файл с эталоном\n\tprint (u\"Время выполнения теста: \"), datetime.timedelta(seconds=time.time()-start)\n\tBF.killAllNavstat()","sub_path":"base_report_test.sikuli/base_report_test.py","file_name":"base_report_test.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"64418692","text":"from flask import Flask, render_template, request\nimport lib.lang\nimport lib.url\nimport json\nimport urllib2\n\napp = Flask(__name__)\n\n\n\n\n\n\n@app.route(\"/\")\ndef hello():\n return render_template(\"hello.html\")\n\n@app.route(\"/glossary\", methods=['POST'])\ndef glossary():\n if request.form['url'] != \"\":\n content = lib.url.readable(request.form['url'])\n else:\n content = request.form['input']\n weights = lib.lang.sort_weights(lib.lang.make_weights(content))\n new = []\n for weight in weights[0:15]:\n if weight[0].__len__() > 3:\n nlist = {}\n nlist['term'] = weight[0]\n if int(weight[1]) > 5:\n nlist['freq'] = 5\n else:\n nlist['freq'] = int(weight[1])\n\n defs = []\n url = \"http://api.wordreference.com/0.8/ce272/json/fren/\"+weight[0]\n try:\n j = json.loads(urllib2.urlopen(url).read())\n if '0' in j['term0']['PrincipalTranslations']:\n current = j['term0']['PrincipalTranslations']['0']\n d = {}\n d['type'] = current['OriginalTerm']['POS']\n d['sense'] = current['OriginalTerm']['sense']\n d['def'] = current['FirstTranslation']['term']\n defs.append(d)\n if '1' in j['term0']['PrincipalTranslations']:\n current = j['term0']['PrincipalTranslations']['1']\n d = {}\n d['type'] = current['OriginalTerm']['POS']\n d['sense'] = current['OriginalTerm']['sense']\n d['def'] = current['FirstTranslation']['term']\n defs.append(d)\n if '2' in j['term0']['PrincipalTranslations']:\n current = j['term0']['PrincipalTranslations']['2']\n d = {}\n d['type'] = current['OriginalTerm']['POS']\n d['sense'] = current['OriginalTerm']['sense']\n d['def'] = current['FirstTranslation']['term']\n defs.append(d)\n except:\n nlist['skip'] = 1\n nlist['def'] = defs\n new.append(nlist)\n return render_template(\"glossary.html\", words=new[0:10])\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"627342734","text":"from matplotlib import pyplot as plt\nimport math\n\nplot_points: int = 60\nx_data = [50 * x for x in range(plot_points)]\ny_data = [0.9] + [0.9 - 0.06*math.log(x, 2) for x in x_data[1:]]\n\nplt.title('Application\\'s Internal Flow Ratio (AIFR)')\nplt.xlabel('Data size [MB]')\nplt.ylabel('AIFR')\nplt.xlim(0, max(x_data))\nplt.ylim(0, 1)\nplt.plot(x_data, y_data, label='Runtime')\n#plt.legend(loc='lower right')\nplt.show()\n","sub_path":"draw_comp_time_func.py","file_name":"draw_comp_time_func.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"263665451","text":"from BinarySearchTree import *\n\nclass bbst(BinarySearchTree):\n def _put(self,key,val,currentNode):\n if key < currentNode.key:\n if currentNode.hasLeftChild():\n self._put(key,val,currentNode.left)\n else:\n currentNode.left = TreeNode(key,val,parent=currentNode)\n self.updatebalance(currentNode.left)\n else:\n if currentNode.hasRightChild():\n self._put(key,val,currentNode.right)\n else:\n currentNode.right = TreeNode(key,val,parent=currentNode)\n self.updatebalance(currentNode.right)\n\n def updatebalance(self,currentNode):\n if currentNode.balancefactor < -1 or currentNode.balancefactor > 1:\n self.rebalance(currentNode)\n return\n if currentNode.parent != None:\n if currentNode.isLeft():\n currentNode.parent.balancefactor += 1\n else:\n currentNode.parent.balancefactor -= 1\n if currentNode.parent.balancefactor != 0:\n self.updatebalance(currentNode.parent)\n\n def leftRotate(self,rotNode):\n newRoot = rotNode.right\n rotNode.right = newRoot.left\n\n if rotNode.right != None:\n rotNode.right.parent = rotNode\n if rotNode.isRoot():\n self.root = newRoot\n else:\n if rotNode.isLeft():\n rotNode.parent.left = newRoot\n else:\n rotNode.parent.right = newRoot\n newRoot.parent = rotNode.parent\n rotNode.parent = newRoot\n rotNode.balancefactor = rotNode.balancefactor + 1 - min(0,newRoot.balancefactor)\n newRoot.balancefactor = newRoot.balancefactor + 1 + max(0,rotNode.balancefactor)\n\n def rightRotate(self,rotNode):\n newRoot = rotNode.left\n rotNode.left = newRoot.right\n\n if rotNode.left != None:\n rotNode.left.parent = rotNode\n\n if rotNode.isRoot():\n self.root = newRoot\n else:\n if rotNode.isLeft():\n rotNode.parent.left = newRoot\n else:\n rotNode.parent.right = newRoot\n newRoot.parent = rotNode.parent\n rotNode.parent = newRoot\n rotNode.balancefactor = rotNode.balancefactor - 1 - max(0, newRoot.balancefactor)\n newRoot.balancefactor = newRoot.balancefactor - 1 + min(0,rotNode.balancefactor)\n\n def rebalance(self,currentNode):\n if currentNode.balancefactor > 0:\n if currentNode.left.balancefactor < 0:\n self.leftRotate(currentNode.left)\n self.rightRotate(currentNode)\n else:\n self.rightRotate(currentNode)\n else:\n if currentNode.right.balancefactor > 0:\n self.rightRotate(currentNode.right)\n self.leftRotate(currentNode)\n\n\n\n\n\n\nclass TreeNodeBal(TreeNode):\n def __init__(self,key,val,left=None,right=None,parent=None):\n super.__init__(key,val)\n self.balancefactor = 0\n","sub_path":"revisit/bbst.py","file_name":"bbst.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"249046941","text":"import random\nimport itertools as itr\n\nfrom nasbench_asr.quiet_tensorflow import tensorflow as tf\n\nfrom .ops import OPS_LIST, BRANCH_OPS_LIST, norm_op, PadConvRelu\nfrom .mean_variance_normalization import MeanVarianceNormalization\n\n\nclass Node(tf.keras.Model):\n def __init__(self, filters, op_idx, branch_op_idx_list):\n super().__init__()\n self._op = OPS_LIST[op_idx](filters)\n self.branch_ops = [BRANCH_OPS_LIST[i] for i in branch_op_idx_list]\n\n def call(self, input_list, training=None):\n assert len(input_list) == len(self.branch_ops), 'Branch op and input list have different lenghts'\n\n output = self._op(input_list[-1], training=training)\n edges = [output]\n for i in range(len(self.branch_ops)):\n x = self.branch_ops[i](input_list[i])\n edges.append(x)\n\n return tf.math.add_n(edges)\n\n\nclass SearchCell(tf.keras.Model): \n def __init__(self, filters, config, num_nodes=3):\n super().__init__()\n\n self._nodes = list() \n for n_config in config:\n node = Node(filters=filters, op_idx=n_config[0], branch_op_idx_list=n_config[1:])\n self._nodes.append(node) \n \n self.norm_layer = norm_op()\n\n def call(self, input, training=None):\n outputs = [input] # input is the output coming from node 0\n for node in self._nodes:\n n_out = node(outputs, training=training)\n outputs.append(n_out)\n\n output = self.norm_layer(outputs[-1]) #use layer norm at the end of a search cell\n return output \n\n\nclass ASRModel(tf.keras.Model):\n def __init__(self, arch_desc, num_classes=48, use_rnn=False, use_norm=True, dropout_rate=0.0, input_shape=None, data_norm=None, epsilon=0.001):\n super().__init__()\n\n self.arch_desc = list(arch_desc)\n self.num_classes = num_classes\n self.use_rnn = use_rnn\n self.use_norm = use_norm\n self.dropout_rate = dropout_rate\n\n cnn_time_reduction_kernels = [8, 8, 8, 8]\n cnn_time_reduction_strides = [1, 1, 2, 2]\n filters = [600, 800, 1000, 1200]\n scells_per_block = [3, 4, 5, 6]\n\n zipped_params = zip(cnn_time_reduction_kernels,\n cnn_time_reduction_strides,\n filters,\n scells_per_block)\n\n layers = []\n\n if input_shape is not None:\n layers.append(tf.keras.layers.Masking(input_shape=input_shape))\n else:\n layers.append(tf.keras.layers.Masking())\n\n if data_norm is not None:\n mean, variance = data_norm\n layers.append(MeanVarianceNormalization(epsilon, tf.keras.initializers.Constant(mean), tf.keras.initializers.Constant(variance)))\n\n for i, (kernel, stride, filters, cells) in enumerate(zipped_params):\n layers.append(PadConvRelu(kernel_size=kernel, strides=stride, filters=filters, dialation=1, name=f'conv_{i}'))\n layers.append(norm_op())\n\n for j in range(cells):\n layers.append(SearchCell(filters=filters, config=arch_desc))\n\n if use_rnn:\n layers.append(tf.keras.layers.LSTM(units=500, dropout=self.dropout_rate, time_major=False, return_sequences=True))\n\n layers.append(tf.keras.layers.Dense(self.num_classes+1))\n\n self._model = tf.keras.Sequential(layers)\n\n def call(self, input, training=None):\n return self._model(input, training=training)\n\n","sub_path":"nasbench_asr/model/tf/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"308307705","text":"import requests\nfrom lxml import etree\nfrom fake_useragent import UserAgent\nimport os\nimport urllib\n\n# 设置headers\nua = UserAgent()\nheaders = {\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n 'Connection': 'close',\n \"User-Agent\": ua.random\n}\n\n# 获取url链接的xml格式\n\n\ndef getxml(url):\n res = requests.get(url, headers, timeout=30)\n res.encoding = res.apparent_encoding\n text = res.text\n xml = etree.HTML(text)\n return xml\n\n# 获取文章内容\n\n\ndef getcontent(xml):\n data = xml.xpath('//div[@class=\"readcontent\"]/text()')\n for text in data:\n if text == '\\n':\n pass\n else:\n f.write(text.replace('\\n', ''))\n\n# 获取整本小说\n\n\ndef getbook(url):\n # 获取小说全部章节及链接\n xmllist = getxml(url)\n name = xmllist.xpath('//h1/text()')\n chapters = xmllist.xpath('//div[@id=\"list-chapterAll\"]//dd/a/text()')\n links = xmllist.xpath('//div[@id=\"list-chapterAll\"]//dd/a/@href')\n print('《' + name[0] + '》' + '已获取,共' + str(len(links)) + '章\\n')\n # 进入小说章节页,爬取章节内容,并存入txt文档中\n f.write(name[0]+'\\n\\n\\n')\n for i in range(0, 2):\n f.write('\\n' + chapters[i] + '\\n')\n urltxt = 'https://www.oldtimescc.cc/go/42472/' + links[i]\n xmlcont = getxml(urltxt)\n page = xmlcont.xpath('//div[@class=\"book read\"]//small/text()')\n getcontent(xmlcont)\n if '(1/2)' in page:\n urltxt2 = urltxt[0:-5] + '_2.html'\n xmlcont2 = getxml(urltxt2)\n getcontent(xmlcont2)\n print(chapters[i] + ': 已完成')\n f.close()\n\n\nsearchname = input('请输入小说名:')\nf = open('E:/'+searchname+'.txt', 'w', encoding='utf-8')\nsearch = urllib.parse.quote(searchname.encode('gbk'))\nsearchurl = 'https://www.oldtimescc.cc/modules/article/search.php?searchkey='+search\nprint(searchname + '正在搜索中......')\nsearchxml = getxml(searchurl)\ntry:\n bookname = searchxml.xpath('//div[@class=\"bookinfo\"]/h1/text()')\n getbook(searchurl)\n print('下载完成,已保存在E盘根目录中!')\nexcept:\n print('未查找到'+searchname+',请重试!')\n","sub_path":"python/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"642915804","text":"import sys\n\nfrom troposphere import And, Equals, Export, FindInMap, GetAtt, If, \\\n ImportValue, Join, Not, Output, Parameter, Ref, Sub, Template\nfrom troposphere import elasticloadbalancing, s3\nfrom troposphere.autoscaling import AutoScalingGroup, LaunchConfiguration\nfrom troposphere.ec2 import SecurityGroup\nfrom troposphere.elasticloadbalancing import LoadBalancer, HealthCheck\nfrom troposphere.iam import InstanceProfile, PolicyType, Role\nfrom troposphere.logs import LogGroup\nfrom troposphere.policies import AutoScalingRollingUpdate, UpdatePolicy\nfrom troposphere.route53 import RecordSetType\nfrom troposphere.s3 import Bucket\n\n\nXHST_RE = r\"^[0-9a-f]{16}\\.xhst\\.bbci\\.co\\.uk\\.?$\"\nSUBDOMAIN_RE = r\"^[a-z].*[a-z0-9]$\"\n\n\ndef asg_tag(key, value):\n return {\"Key\": key, \"Value\": value, \"PropagateAtLaunch\": True}\n\n\ndef param(type, name, Description=None, **kwargs):\n k = {\n str: {\"Type\": \"String\"},\n int: {\"Type\": \"Number\"},\n list: {\"Type\": \"CommaDelimitedList\"},\n \"ssh\": {\"Type\": \"AWS::EC2::KeyPair::KeyName\"},\n \"vpc\": {\"Type\": \"AWS::EC2::VPC::Id\"},\n }[type]\n k[\"Description\"] = Description or name\n k.update(kwargs)\n p = t.add_parameter(Parameter(name, **k))\n param_refs[name] = Ref(p)\n return p\n\n\nparam_refs = {}\nt = Template()\n\nt.add_description(\"Cloud Jenkins Master\")\n\nparam(str, \"ImageId\", Description=(\n \"AMI to use; set to an empty string or the word bootstrap when creating\"\n))\nparam(str, \"InstanceType\", Default=\"t2.micro\", Description=\"Instance type\")\nparam(int, \"InstanceCount\", Default=\"0\", AllowedValues=[0, 1], Description=(\n \"How many instances to run: 0 (disable service) or 1\"\n))\nparam(str, \"InstanceName\", Default=\"jenkins-master\", Description=(\n \"Name for EC2 instances, added after - \"\n))\nparam(int, \"VolumeSize\", Default=16, Description=\"Size in GB of root volume\")\nparam(str, \"Environment\", AllowedValues=[\"int\", \"test\", \"stage\", \"live\"])\nparam(str, \"Subdomain\", AllowedPattern=SUBDOMAIN_RE, Description=(\n \"Part of name added before .tools.bbc.co.uk, eg ci.teamname.test \"\n))\nparam(str, \"HostedZone\", AllowedPattern=XHST_RE, Description=(\n \"xhst.bbci.co.uk subdomain\"\n))\nparam(int, \"BackupS3ExpirationDays\", Default=30, Description=(\n \"The number of days to keep backups in S3\"\n))\nparam(str, \"BackupS3Path\", Default=\"jenkins-backups\", Description=(\n \"The S3 path that backups will be written to and cleaned from. Don't use\"\n \" a leading slash.\"\n))\nparam(str, \"JenkinsJNLPSlavePort\", Default=\"34624\", Description=(\n \"Port used by Jenkins to allow slaves to connect\"\n))\nparam(\"ssh\", \"KeyName\", \"SSH Key name\", Default=\"cosmos\")\nparam(str, \"CoreInfraStackName\", Default=\"core-infrastructure\", Description=(\n \"Core infrastructure stack name\"\n))\n\n\n# AWS Linux AMIs:\n# https://aws.amazon.com/amazon-linux-ami/\nt.add_mapping(\"AWSLinux\", {\n \"ap-southeast-1\": {\"ami\": \"ami-c63d6aa5\"},\n \"eu-central-1\": {\"ami\": \"ami-bf2ba8d0\"},\n \"eu-west-1\": {\"ami\": \"ami-1a962263\"},\n \"eu-west-2\": {\"ami\": \"ami-e7d6c983\"},\n \"us-east-1\": {\"ami\": \"ami-55ef662f\"},\n})\n\n\nt.add_condition(\"IsEnabled\", Not(Equals(param_refs[\"InstanceCount\"], \"0\")))\nt.add_condition(\"HasImageId\", And(\n Not(Equals(param_refs[\"ImageId\"], \"\")),\n Not(Equals(param_refs[\"ImageId\"], \"bootstrap\")),\n))\nt.add_condition(\"HasTwoAZs\", Equals(Ref(\"AWS::Region\"), \"ap-southeast-1\"))\n\nimage_id = If(\n \"HasImageId\",\n param_refs[\"ImageId\"],\n FindInMap(\"AWSLinux\", Ref(\"AWS::Region\"), \"ami\")\n)\navailability_zones = If(\"HasTwoAZs\", [\n Sub(\"${AWS::Region}a\"), Sub(\"${AWS::Region}b\")\n], [\n Sub(\"${AWS::Region}a\"), Sub(\"${AWS::Region}b\"), Sub(\"${AWS::Region}c\")\n])\npublic_subnets = If(\"HasTwoAZs\", [\n ImportValue(Sub(\"${CoreInfraStackName}-PublicSubnet0\")),\n ImportValue(Sub(\"${CoreInfraStackName}-PublicSubnet1\")),\n], [\n ImportValue(Sub(\"${CoreInfraStackName}-PublicSubnet0\")),\n ImportValue(Sub(\"${CoreInfraStackName}-PublicSubnet1\")),\n ImportValue(Sub(\"${CoreInfraStackName}-PublicSubnet2\")),\n])\nprivate_subnets = If(\"HasTwoAZs\", [\n ImportValue(Sub(\"${CoreInfraStackName}-PrivateSubnet0\")),\n ImportValue(Sub(\"${CoreInfraStackName}-PrivateSubnet1\")),\n], [\n ImportValue(Sub(\"${CoreInfraStackName}-PrivateSubnet0\")),\n ImportValue(Sub(\"${CoreInfraStackName}-PrivateSubnet1\")),\n ImportValue(Sub(\"${CoreInfraStackName}-PrivateSubnet2\")),\n])\n\n\nProfile = t.add_resource(InstanceProfile(\n \"Profile\",\n Path=\"/\",\n Roles=[Ref(\"Role\")],\n))\n\nLoadBalancerSecurityGroup = t.add_resource(SecurityGroup(\n \"LoadBalancerSecurityGroup\",\n SecurityGroupIngress=[{\n \"ToPort\": \"443\",\n \"IpProtocol\": \"tcp\",\n \"FromPort\": \"443\",\n \"CidrIp\": \"0.0.0.0/0\"\n }],\n VpcId=ImportValue(Sub(\"${CoreInfraStackName}-VpcId\")),\n GroupDescription=(\n \"An ELB group allowing access only to from the corresponding \"\n \"component\"\n ),\n))\n\nComponentElasticLoadBalancer = t.add_resource(LoadBalancer(\n \"ComponentElasticLoadBalancer\",\n Condition=\"IsEnabled\",\n Subnets=public_subnets,\n Listeners=[{\n \"InstancePort\": \"7443\",\n \"LoadBalancerPort\": \"443\",\n \"Protocol\": \"tcp\",\n \"InstanceProtocol\": \"tcp\"\n }],\n CrossZone=False,\n SecurityGroups=[Ref(LoadBalancerSecurityGroup)],\n HealthCheck=If(\n \"HasImageId\",\n HealthCheck(\n HealthyThreshold=\"3\",\n Interval=\"15\",\n Target=\"HTTP:7080/\",\n Timeout=\"10\",\n UnhealthyThreshold=\"3\",\n ),\n Ref(\"AWS::NoValue\")\n ),\n))\n\nInternalElasticLoadBalancer = t.add_resource(LoadBalancer(\n \"InternalElasticLoadBalancer\",\n Condition=\"IsEnabled\",\n Subnets=private_subnets,\n HealthCheck=HealthCheck(\n HealthyThreshold=\"3\",\n Interval=\"15\",\n Target=\"HTTP:7080/\",\n Timeout=\"10\",\n UnhealthyThreshold=\"3\",\n ),\n Listeners=[{\n \"InstancePort\": \"8081\",\n \"LoadBalancerPort\": \"8081\",\n \"Protocol\": \"tcp\",\n \"InstanceProtocol\": \"tcp\"\n }, {\n \"InstancePort\": param_refs[\"JenkinsJNLPSlavePort\"],\n \"LoadBalancerPort\": param_refs[\"JenkinsJNLPSlavePort\"],\n \"Protocol\": \"tcp\",\n \"InstanceProtocol\": \"tcp\"\n }],\n CrossZone=False,\n SecurityGroups=[Ref(\"InternalLoadBalancerSecurityGroup\")],\n ConnectionSettings=elasticloadbalancing.ConnectionSettings(\n IdleTimeout=3600\n ),\n Scheme=\"internal\",\n))\n\nSlaveSecurityGroup = t.add_resource(SecurityGroup(\n \"SlaveSecurityGroup\",\n VpcId=ImportValue(Sub(\"${CoreInfraStackName}-VpcId\")),\n GroupDescription=\"Security group\",\n))\n\nMasterSecurityGroup = t.add_resource(SecurityGroup(\n \"MasterSecurityGroup\",\n SecurityGroupIngress=[{\n \"ToPort\": \"7080\",\n \"IpProtocol\": \"tcp\",\n \"SourceSecurityGroupId\": Ref(LoadBalancerSecurityGroup),\n \"FromPort\": \"7080\"\n }, {\n \"ToPort\": \"7443\",\n \"IpProtocol\": \"tcp\",\n \"SourceSecurityGroupId\": Ref(LoadBalancerSecurityGroup),\n \"FromPort\": \"7443\"\n }, {\n \"ToPort\": \"7080\",\n \"IpProtocol\": \"tcp\",\n \"SourceSecurityGroupId\": Ref(\"InternalLoadBalancerSecurityGroup\"),\n \"FromPort\": \"7080\"\n }, {\n \"ToPort\": \"8081\",\n \"IpProtocol\": \"tcp\",\n \"SourceSecurityGroupId\": Ref(\"InternalLoadBalancerSecurityGroup\"),\n \"FromPort\": \"8081\"\n }, {\n \"ToPort\": param_refs[\"JenkinsJNLPSlavePort\"],\n \"IpProtocol\": \"tcp\",\n \"SourceSecurityGroupId\": Ref(\"InternalLoadBalancerSecurityGroup\"),\n \"FromPort\": param_refs[\"JenkinsJNLPSlavePort\"]\n }],\n VpcId=ImportValue(Sub(\"${CoreInfraStackName}-VpcId\")),\n GroupDescription=(\n \"A component security group allowing access only from the \"\n \"corresponding ELB\"\n ),\n))\n\nJenkinsLogGroup = t.add_resource(LogGroup(\n \"JenkinsLogGroup\",\n RetentionInDays=14,\n))\n\nInternalHostname = t.add_resource(RecordSetType(\n \"InternalHostname\",\n HostedZoneName=param_refs[\"HostedZone\"],\n ResourceRecords=If(\n \"IsEnabled\",\n [GetAtt(InternalElasticLoadBalancer, \"DNSName\")],\n [\"\\\"\\\"\"]\n ),\n Type=If(\"IsEnabled\", \"CNAME\", \"TXT\"),\n Name=Join(\".\", [\n \"internal\",\n param_refs[\"Subdomain\"],\n param_refs[\"HostedZone\"]\n ]),\n TTL=60,\n))\n\nt.add_resource(AutoScalingGroup(\n \"Instance\",\n Condition=\"IsEnabled\",\n Tags=[\n asg_tag(\"Name\", Join(\"-\", [\n param_refs[\"Environment\"],\n param_refs[\"InstanceName\"],\n ])),\n asg_tag(\"chaos-lambda-termination\", \"0.0\"),\n ],\n LoadBalancerNames=[\n Ref(ComponentElasticLoadBalancer),\n Ref(InternalElasticLoadBalancer)\n ],\n MinSize=param_refs[\"InstanceCount\"],\n MaxSize=param_refs[\"InstanceCount\"],\n VPCZoneIdentifier=private_subnets,\n LaunchConfigurationName=Ref(\"ComponentLaunchConfiguration\"),\n AvailabilityZones=availability_zones,\n UpdatePolicy=UpdatePolicy(\n AutoScalingRollingUpdate=AutoScalingRollingUpdate(\n MaxBatchSize=\"1\",\n MinInstancesInService=\"0\",\n PauseTime=\"PT0S\",\n )\n ),\n))\n\nComponentDNS = t.add_resource(RecordSetType(\n \"ComponentDNS\",\n HostedZoneName=param_refs[\"HostedZone\"],\n ResourceRecords=If(\n \"IsEnabled\",\n [GetAtt(ComponentElasticLoadBalancer, \"DNSName\")],\n [\"\\\"\\\"\"]\n ),\n Type=If(\"IsEnabled\", \"CNAME\", \"TXT\"),\n Name=Join(\".\", [param_refs[\"Subdomain\"], param_refs[\"HostedZone\"]]),\n TTL=60,\n))\n\nRole = t.add_resource(Role(\n \"Role\",\n AssumeRolePolicyDocument={\n \"Statement\": [{\n \"Action\": [\"sts:AssumeRole\"],\n \"Effect\": \"Allow\",\n \"Principal\": {\"Service\": [\"ec2.amazonaws.com\"]}\n }]\n },\n))\n\nInternalLoadBalancerSecurityGroup = t.add_resource(SecurityGroup(\n \"InternalLoadBalancerSecurityGroup\",\n SecurityGroupIngress=[{\n \"ToPort\": \"8081\",\n \"IpProtocol\": \"tcp\",\n \"SourceSecurityGroupId\": Ref(SlaveSecurityGroup),\n \"FromPort\": \"8081\"\n }, {\n \"ToPort\": param_refs[\"JenkinsJNLPSlavePort\"],\n \"IpProtocol\": \"tcp\",\n \"SourceSecurityGroupId\": Ref(SlaveSecurityGroup),\n \"FromPort\": param_refs[\"JenkinsJNLPSlavePort\"]\n }],\n VpcId=ImportValue(Sub(\"${CoreInfraStackName}-VpcId\")),\n GroupDescription=(\n \"An ELB group allowing access only to from the corresponding \"\n \"component\"\n ),\n))\n\nt.add_resource(PolicyType(\n \"Policy\",\n PolicyName=\"Policy\",\n PolicyDocument={\n \"Statement\": [{\n \"Action\": \"sts:AssumeRole\",\n \"Resource\": \"*\",\n \"Effect\": \"Allow\"\n }, {\n \"Action\": \"cloudwatch:*\",\n \"Resource\": \"*\",\n \"Effect\": \"Allow\"\n }, {\n \"Action\": \"s3:*\",\n \"Resource\": Join(\"\", [\n Join(\"\", [\"arn:aws:s3:::\", Ref(\"S3Bucket\")]), \"/*\"\n ]),\n \"Effect\": \"Allow\"\n }, {\n \"Action\": \"s3:ListBucket\",\n \"Resource\": [Join(\"\", [\"arn:aws:s3:::\", Ref(\"S3Bucket\")])],\n \"Effect\": \"Allow\"\n }, {\n \"Action\": \"cloudformation:Describe*\",\n \"Resource\": \"*\",\n \"Effect\": \"Allow\"\n }, {\n \"Action\": \"ec2:Describe*\",\n \"Resource\": \"*\",\n \"Effect\": \"Allow\"\n }, {\n \"Action\": [\"logs:*\"],\n \"Resource\": [\"arn:aws:logs:*:*:*\"],\n \"Effect\": \"Allow\"\n }]\n },\n Roles=[Ref(Role)],\n))\n\nComponentLaunchConfiguration = t.add_resource(LaunchConfiguration(\n \"ComponentLaunchConfiguration\",\n InstanceMonitoring=\"false\",\n ImageId=image_id,\n KeyName=param_refs[\"KeyName\"],\n BlockDeviceMappings=[{\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"DeleteOnTermination\": True,\n \"VolumeType\": \"gp2\",\n \"VolumeSize\": param_refs[\"VolumeSize\"]\n }\n }],\n EbsOptimized=False,\n SecurityGroups=[\n ImportValue(Sub(\"${CoreInfraStackName}-SSHFromBastionsSecGroup\")),\n Ref(MasterSecurityGroup)\n ],\n IamInstanceProfile=Ref(Profile),\n InstanceType=If(\"IsEnabled\", param_refs[\"InstanceType\"], \"t2.nano\"),\n))\n\nS3Bucket = t.add_resource(Bucket(\n \"S3Bucket\",\n AccessControl=\"BucketOwnerFullControl\",\n LifecycleConfiguration=s3.LifecycleConfiguration(\n Rules=[\n s3.LifecycleRule(\n Status=\"Enabled\",\n Prefix=Join(\"\", [param_refs[\"BackupS3Path\"], \"/\"]),\n Id=\"S3BackupsExpirationRule\",\n ExpirationInDays=param_refs[\"BackupS3ExpirationDays\"]\n )\n ]\n ),\n))\n\n\nt.add_output(Output(\n \"JenkinsBackupBucket\",\n Description=\"Bucket where Jenkins will store backups\",\n Value=Ref(S3Bucket),\n Export=Export(Sub(\"${AWS::StackName}-JenkinsBackupBucket\")),\n))\n\nt.add_output(Output(\n \"SlaveSecurityGroup\",\n Description=(\n \"The physical ID of the slave security group required for agents\"\n ),\n Value=Ref(SlaveSecurityGroup),\n Export=Export(Sub(\"${AWS::StackName}-SlaveSecurityGroup\")),\n))\n\nt.add_output(Output(\n \"JenkinsBackupBucketArn\",\n Description=\"Arn of Bucket where Jenkins will store backups\",\n Value=Join(\"\", [\"arn:aws:s3:::\", Ref(S3Bucket)]),\n Export=Export(Sub(\"${AWS::StackName}-JenkinsBackupBucketArn\")),\n))\n\nt.add_output(Output(\n \"MasterSecurityGroup\",\n Description=\"The physical ID of the master security group\",\n Value=Ref(MasterSecurityGroup),\n))\n\nt.add_output(Output(\n \"InternalHostName\",\n Description=\"The internal hostname for agents to access master\",\n Value=Ref(InternalHostname),\n))\n\nt.add_output(Output(\n \"ExternalHostName\",\n Description=\"The external hostname for the master to create a CNAME for.\",\n Value=Ref(ComponentDNS),\n))\n\nt.add_output(Output(\n \"JenkinsLogGroup\",\n Description=\"Log Group name for awslogs configuration\",\n Value=Ref(JenkinsLogGroup),\n))\n\n\ntemplate = t.to_json(indent=2)\nif len(sys.argv) > 1:\n open(sys.argv[1], \"w\").write(template + \"\\n\")\nelse:\n print(template)\n","sub_path":"master/stacks/src/infrastructure.py","file_name":"infrastructure.py","file_ext":"py","file_size_in_byte":14001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"277734330","text":"import os\nimport csv\n\nfrom .article_pipeline_base import ArticlePipelineBase\n\narticle_tmpl = {\n # 'title': 'string',\n # 'digest': 'brief introduction',\n # 'content': content,\n # 'thumb_media_id': 'kydwHY6c4LpYQ6QJIoazsO8R5Ofmp_2IfRd1pSHDQjQ',\n # 'author': settings.author,\n 'show_cover_pic': 0,\n 'content_source_url': '',\n # 'need_open_comment': 1,\n # 'only_fans_can_comment': 0,\n}\n\n\nclass PipeManualTransPapers(ArticlePipelineBase):\n\n def iter_tasks(self):\n input_data_dir = os.path.join(\n self.config_dir, 'manual-trans-files'\n )\n en_filename_ptn = os.path.join(\n self.project_data_dir,\n 'rawdata/nature/%(journal_pcode)s/%(journal_pcode)s-volume%(volume)02d-issue%(issue)02d.csv'\n )\n for trans_filename in sorted(os.listdir(input_data_dir)):\n full_path = os.path.join(input_data_dir, trans_filename)\n\n cn_papers = self.read_txt(full_path)\n\n # find en_issue\n basename = os.path.basename(trans_filename)\n journal_pcode, volume_str, issue_str, author = basename.split('-')\n issue_int = int(issue_str[-2:])\n volume_int = int(volume_str[-2:])\n _kwargs = {'journal_pcode': journal_pcode, 'volume': volume_int, 'issue': issue_int}\n\n en_papers_map = {}\n\n # read en papers and meta info\n en_filename = en_filename_ptn % _kwargs\n with open(en_filename) as csvfile:\n reader = csv.DictReader(csvfile)\n for p in reader:\n en_papers_map[p['url'].strip()] = p\n en_papers_map[p['doi_url'].strip()] = p\n\n # match cn with en papers\n trans_papers = []\n for p_cn in cn_papers:\n p_en = en_papers_map[p_cn['url'].strip()]\n p_cn.update(p_en)\n trans_papers.append(p_cn)\n\n # get titles to build table of contents section\n paper_titles = {}\n for idx, p in enumerate(trans_papers):\n contentType = p['contentType'].title() + 's'\n if contentType not in paper_titles:\n paper_titles[contentType] = []\n\n t_en = p['title']\n t_cn = p['title_atifical_translation']\n paper_titles[contentType].append({\n 'title': t_en,\n 'title_en': t_en,\n 'title_cn': t_cn,\n })\n paper_titles_sorted = []\n idx = 1\n for sec, pts in paper_titles.items():\n pts_sorted = []\n for p in pts:\n p['seq'] = idx\n pts_sorted.append(p)\n idx += 1\n paper_titles_sorted.append([sec, pts_sorted])\n\n journal_title = trans_papers[0]['journal_title'].title()\n task_meta = {\n 'journal_title': journal_title,\n 'journal_pcode': journal_pcode,\n 'year': 2020,\n 'volume': volume_int,\n 'issue': issue_int,\n 'author': author,\n 'papers': trans_papers,\n 'paper_titles': paper_titles_sorted,\n }\n\n yield task_meta\n\n def render_task_article(self, task_meta):\n content = {} # common_args.copy()\n content.update(task_meta)\n content.update({\n 'year_issue': '%(year)s.%(issue)02d' % task_meta,\n })\n\n filename = 'trans-%(journal_pcode)s-%(year)s-%(issue)s.html' % task_meta\n tmpl_name = 'trans_%(journal_pcode)s_tmpl.html' % task_meta\n\n # thumb_media_id = settings.thumb_ids['trans_%s_logo' % task_meta['journal_pcode']]['media_id']\n title = '%(journal_title)s 论文导读 -- %(year)s.%(issue)02d Vol.%(volume)s Issue %(issue)s' % task_meta\n brief = '光学领域国际顶级学术期刊. %(year)s.%(issue)s 月刊论文导读' % task_meta\n\n content = self.render(template=tmpl_name, filename=filename, **content)\n\n article = article_tmpl.copy()\n article.update({\n 'title': title,\n 'author': task_meta['author'],\n 'digest': task_meta.get('brief', brief),\n 'content': content,\n # 'thumb_media_id': thumb_media_id,\n })\n\n return article\n\n def read_txt(self, filename):\n data = []\n record_fields = [\n 'url',\n 'title',\n 'abstract',\n 'title_atifical_translation',\n 'abstract_atifical_translation',\n ]\n with open(filename) as fr:\n record_values = []\n for row in fr:\n if row.startswith('------'):\n data.append({k: v for k, v in zip(record_fields, record_values)})\n record_values = []\n elif len(row.strip()) > 0:\n record_values.append(row.strip())\n if len(record_values) > 0:\n data.append({k: v for k, v in zip(record_fields, record_values)})\n\n print('%s papers found in %s' % (len(data), filename))\n return data\n","sub_path":"article_generator/framework/PipeManualTransPapers.py","file_name":"PipeManualTransPapers.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"581926844","text":"#!/bin/python3\n# -*- coding: utf-8 -*-\n'''\n\n张三给李四通过网银转账 100 极客币,现有数据库中三张表:\n\n一张为用户表,包含用户 ID 和用户名字,\n另一张为用户资产表,包含用户 ID 用户总资产,\n第三张表为审计用表,记录了转账时间,转账 id,被转账 id,转账金额。\n\n请合理设计三张表的字段类型和表结构;\n请实现转账 100 极客币的 SQL(可以使用 pymysql 或 sqlalchemy-orm 实现),张三余额不足,转账过程中数据库 crash 等情况需保证数据一致性。\n'''\nimport pymysql\nfrom sqlalchemy import Column, create_engine, String, DateTime, DECIMAL, Integer, ForeignKey\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom decimal import Decimal\nfrom week03.homework.read_config import db_config\n\nBase = declarative_base()\n\n\nclass UserTable(Base):\n '''用户表'''\n __tablename__ = 'user_info'\n id = Column(Integer(), primary_key=True)\n name = Column(String(20))\n\n\nclass AssetTable(Base):\n '''资产表'''\n __tablename__ = 'user_asset'\n user_id = Column(Integer(), primary_key=True, nullable=False)\n asset = Column(DECIMAL(19, 4), nullable=False)\n\n\nclass RecordTable(Base):\n '''审计用表 记录了转账时间,转账 id,被转账 id,转账金额'''\n __tablename__ = 'record'\n id = Column(Integer(), primary_key=True, nullable=False)\n from_id = Column(Integer(), nullable=False)\n to_id = Column(Integer(), nullable=False)\n money = Column(DECIMAL(19, 4), nullable=False)\n\n\ndef transfer_accounts(self, other, deal):\n db_info = db_config()\n conn = pymysql.connect(**db_info)\n try:\n with conn.cursor() as cursor:\n sql = '''select id from user_info where name=%s'''\n cursor.execute(sql, self)\n self_id = cursor.fetchone()\n cursor.execute(sql, other)\n other_id = cursor.fetchone()\n if self_id and other_id:\n self_id = self_id[0]\n other_id = other_id[0]\n else:\n raise Exception(f'{self} or {other} not exit')\n sql = '''select asset from user_asset where user_id=%s'''\n cursor.execute(sql, self_id)\n self_money = cursor.fetchone()[0]\n if self_money < deal:\n print(f'{self} has not enough money to give {other}')\n else:\n sql = '''insert into record(from_id, to_id, money) VALUES (%s, %s, %s)'''\n values = (self_id, other_id, Decimal(deal))\n cursor.execute(sql, values)\n sql = '''update user_asset set asset=%s where user_id=%s'''\n values = (self_money - Decimal(deal), self_id)\n cursor.execute(sql, values)\n conn.commit()\n except Exception as e:\n print(e)\n finally:\n conn.close()\n\n\nif __name__ == '__main__':\n # 新建表格\n # db_curl = 'mysql+pymysql://root:123456@106.15.187.5:3306/demo?charset=utf8mb4'\n # engine = create_engine(db_curl, echo=True)\n # # Base.metadata.create_all(engine)\n # SessionClass = sessionmaker(engine)\n # session = SessionClass()\n # # 造数据\n # user_1 = UserTable(name='李四')\n # user_2 = UserTable(name='张三')\n # session.add_all([user_1, user_2])\n # session.commit()\n # 请实现转账 100 极客币的 SQL(可以使用 pymysql 或 sqlalchemy-orm 实现),张三余额不足,转账过程中数据库\n # crash 等情况需保证数据一致性。\n transfer_accounts('张三', '李四', 10)\n","sub_path":"week03/homework/orm_6.py","file_name":"orm_6.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"341198406","text":"from gpiozero import LED\nfrom time import sleep\n\n\"\"\"RPi Physical Computing 2.6\n\nCreates blinking LED via RPi's GPIO pins\n\n@copyright: 2019 (c) glenndog\n\"\"\"\n \ndef blinky_blink():\n red_led = LED(17)\n red_led.on()\n sleep(1)\n red_led.off()\n sleep(1)\n\n\n","sub_path":"physical_computing/gpio0/blinky_blink.py","file_name":"blinky_blink.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"117206215","text":"\"\"\"\nScript to build the Sphinx HTML documentation.\n\"\"\"\n\nfrom pathlib import Path\nfrom sphinx.application import Sphinx\n\n\ndocs_directory = Path('.').absolute()\nsource_directory = docs_directory\nconfiguration_directory = docs_directory\nbuild_directory = Path(configuration_directory, '_build')\ndoctree_directory = Path(build_directory, '.doctrees')\nbuilder = 'html'\n\napp = Sphinx(source_directory, configuration_directory, build_directory, doctree_directory, builder, freshenv=True,\n warningiserror=True)\napp.build()\n","sub_path":"docs/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"533906899","text":"import cProfile\n\ndef fun(start, finish):\n result = {}\n for i in range(2, 10):\n count = 0\n for el in range(start, finish):\n if el % i == 0:\n count += 1\n result[i] = count\n return result\n\n# \"task1_3.fun(2, 100)\"\n# 1000 loops, best of 3: 40.4 usec per loop\n\n# \"task1_3.fun(2, 1000)\"\n# 1000 loops, best of 3: 421 usec per loop\n\n# \"task1_3.fun(2, 10000)\"\n# 1000 loops, best of 3: 4.24 msec per loop\n\ncProfile.run('fun(2, 100)')","sub_path":"HW_1/task1_3.py","file_name":"task1_3.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"282110234","text":"############################################################################\n# Copyright (c) 2015 Saint Petersburg State University\n# Copyright (c) 2011-2014 Saint Petersburg Academic University\n# All Rights Reserved\n# See file LICENSE for details.\n############################################################################\n\nimport os\nimport itertools\n# There exists pyfasta package -- http://pypi.python.org/pypi/pyfasta/\n# Use it !\n\ndef get_lengths_from_fastafile(filename):\n \"\"\"\n Gets filename of FASTA-file\n Returns list of lengths of sequences in FASTA-file\n \"\"\"\n lengths = []\n l = 0\n for line in open(filename):\n if line[0] == '>':\n if l: # not the first sequence in FASTA\n lengths.append(l)\n l = 0\n else:\n l += len(line.strip())\n lengths.append(l)\n return lengths\n\n\ndef split_fasta(filename, outputdir):\n \"\"\"\n Gets filename of FASTA-file and directory to output\n Creates separate FASTA-files for each sequence in FASTA-file\n Returns nothing\n Oops, similar to: pyfasta split --header \"%(seqid).fasta\" original.fasta\n \"\"\"\n if not os.path.isdir(outputdir):\n os.mkdir(outputdir)\n outFile = None\n for line in open(filename):\n if line[0] == '>':\n if outFile:\n outFile.close()\n outFile = open(os.path.join(outputdir, line[1:].strip() + '.fa'), 'w')\n if outFile:\n outFile.write(line)\n if outFile: # if filename is empty\n outFile.close()\n\n\ndef read_fasta(filename):\n \"\"\"\n Returns list of FASTA entries (in tuples: name, seq)\n \"\"\"\n res_name = []\n res_seq = []\n first = True\n seq = ''\n fastafile = filename\n file_ext = os.path.splitext(filename)[1]\n if file_ext == \".gz\":\n import gzip\n fastafile = gzip.open(filename)\n else:\n fastafile = open(filename)\n\n for line in fastafile:\n if line[0] == '>':\n res_name.append(line.strip())\n if not first:\n res_seq.append(seq)\n else:\n first = False\n seq = ''\n else:\n seq += line.strip()\n res_seq.append(seq)\n return zip(res_name, res_seq)\n\ndef write_fasta(fasta):\n for name, seq in fasta:\n print (name)\n for i in xrange(0,len(seq),60):\n print (seq[i:i+60])\n\ndef write_fasta_to_file(filename, fasta):\n outfile = open(filename, 'a')\n for name, seq in fasta:\n outfile.write(name + '\\n')\n for i in xrange(0,len(seq),60):\n outfile.write(seq[i:i+60] + '\\n')\n outfile.close()\n\ndef comp(letter):\n return {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}[letter.upper()]\n\n\ndef rev_comp(seq):\n return ''.join(itertools.imap(comp, seq[::-1]))\n\ndef remove_nonACGT(seq):\n seq2 = []\n for c in seq:\n if c in 'ACGT':\n seq2.append(c)\n return string.join(seq2, '')\t\n","sub_path":"scripts/viralverify/fastaparser.py","file_name":"fastaparser.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"99671510","text":"# coding:utf-8\nfrom ylp_webapi.models.subject import AppSubjects\n\nclass SubjectService():\n def get_subjects(self, play_type, pindex, psize):\n \"\"\"获取专题列表\"\"\"\n query_set = AppSubjects.objects.filter(isdisabled=1, dr=0)\n\n #当前的星期\n from datetime import datetime\n dayofweek = str(datetime.now().isoweekday())\n if dayofweek == '7':\n dayofweek = '0'\n\n if play_type == 'main':\n query_set = query_set.filter(ispush=1).order_by('sortno')\n elif play_type == 'today':\n #icontains 不区分大小写\n query_set = query_set.filter(subjecttype=1, repeatday__icontains=dayofweek).order_by('sortno')\n elif play_type == 'update':\n #limit offset,size\n query_set = AppSubjects.objects.raw(\n \"select * from app_subjects where locate(%s,repeatday)=0 \\\n and isdisabled=1 and dr=0 order by sortno limit %s,%s \\\n \",\n [dayofweek, (pindex - 1) * psize, psize])\n elif play_type == 'finish':\n query_set = query_set.filter(subjecttype=2).order_by('-lastupdatetime')\n else:\n return []\n\n if play_type != 'update':\n query_set = query_set[(pindex - 1) * psize:pindex * psize + 1]\n\n return list(query_set)\n","sub_path":"ylp_webapi/services/subject_service.py","file_name":"subject_service.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"589743592","text":"#!/usr/bin/python3\n# coding: utf-8\n\nimport os\nimport subprocess\nimport datetime\nimport signal\nimport sys\nimport time\nimport logging\nimport json\n\nif len(sys.argv) > 1:\n folder_name = sys.argv[1]\n flight_speed_mps = float(sys.argv[2])\n flight_height_m = float(sys.argv[3])\n blur = float(sys.argv[4])\n overlap = float(sys.argv[5])\n flight_time_min = float(sys.argv[6])\nelse:\n folder_name = \"planned_mision\"\n flight_speed_mps = 10\n flight_height_m = 100\n blur = 0.75\n overlap = 0.90\n flight_time_min = 25\n\n#max_gigs_planned = 25\nmax_gigs_planned = 50\n\nwith open(\"cams_and_lenses.json\", \"r\") as fh:\n cams_and_lenses = json.loads(fh.read())\n\ncamera_id = 0\nlens_id = 0\n\ncamera_model = cams_and_lenses[\"cameras\"][camera_id] | cams_and_lenses[\"lenses\"][lens_id]\n\ndef clamp(n, minn, maxn):\n return max(min(maxn, n), minn)\n\ndef get_sensor_dims(camera_model):\n sensor_width_mm = camera_model[\"horizontal_pixels\"] * camera_model[\"pixel_size_um\"] / 1000\n sensor_height_mm = camera_model[\"vertical_pixels\"] * camera_model[\"pixel_size_um\"] / 1000\n return sensor_width_mm, sensor_height_mm\n\ndef get_fov_m(camera_model, flying_height_m):\n sensor_width_mm, sensor_height_mm = get_sensor_dims(camera_model)\n fov_width_m = flying_height_m * sensor_width_mm / camera_model[\"focal_length_mm\"]\n fov_height_m = flying_height_m * sensor_height_mm / camera_model[\"focal_length_mm\"]\n return fov_width_m, fov_height_m\n\ndef get_gsd_m(camera_model, flying_height_m):\n fov_width_m, fov_height_m = get_fov_m(camera_model, flying_height_m)\n gsd_across_m = fov_width_m / camera_model[\"horizontal_pixels\"]\n gsd_along_m = fov_height_m / camera_model[\"vertical_pixels\"]\n return gsd_across_m, gsd_along_m\n\ndef get_max_exp_time_ms(camera_model, flying_height_m, flying_speed_mps, max_blur = 0.1):\n gsd_along_m = get_gsd_m(camera_model, flying_height_m)[-1]\n max_exp_time_ms = (gsd_along_m * max_blur) / flying_speed_mps * 1000\n return clamp(max_exp_time_ms, camera_model[\"min_exposure_ms\"], camera_model[\"max_exposure_ms\"])\n\ndef get_min_frame_rate_Hz(camera_model, flying_height_m, flying_speed_mps, front_overlap = 0.9):\n fov_height_m = get_fov_m(camera_model, flying_height_m)[-1]\n min_frame_rate_Hz = flying_speed_mps / (fov_height_m * (1 -front_overlap))\n return clamp(min_frame_rate_Hz, camera_model[\"min_framerate_fps\"], camera_model[\"max_framerate_fps\"])\n\ndef get_gigs_per_minute(camera_model, flying_height_m, flying_speed_mps, front_overlap = 0.9):\n frame_rate_Hz = get_min_frame_rate_Hz(camera_model, flying_height_m, flying_speed_mps, front_overlap)\n bytes_per_frame = camera_model[\"horizontal_pixels\"] * camera_model[\"vertical_pixels\"] * max(list(camera_model[\"Bayer_mode_bpp\"].values())) / 8.0\n bytes_per_second = bytes_per_frame * frame_rate_Hz\n bytes_per_minute = bytes_per_second * 60\n gigs_per_minute = bytes_per_minute / (1024 ** 3)\n return gigs_per_minute\n\ndef get_max_number_of_frames(camera_model, flying_height_m, flying_speed_mps, front_overlap = 0.9, max_flight_time_m = 30):\n frame_rate_Hz = get_min_frame_rate_Hz(camera_model, flying_height_m, flying_speed_mps, front_overlap)\n max_number_of_frames = frame_rate_Hz * 60 * max_flight_time_m\n return max_number_of_frames\n\ntarget_framerate = get_min_frame_rate_Hz(camera_model=camera_model, flying_height_m=flight_height_m, flying_speed_mps=flight_speed_mps, front_overlap=overlap)\ntarget_number_of_images = get_max_number_of_frames(camera_model=camera_model, flying_height_m=flight_height_m, flying_speed_mps=flight_speed_mps, front_overlap=overlap, max_flight_time_m=flight_time_min)\ntarget_exposure_time = get_max_exp_time_ms(camera_model=camera_model, flying_height_m=flight_height_m, flying_speed_mps=flight_speed_mps, max_blur=blur)\ntarget_gigs_total = flight_time_min * get_gigs_per_minute(camera_model=camera_model, flying_height_m=flight_height_m, flying_speed_mps=flight_speed_mps, front_overlap=overlap)\n\n\nif target_gigs_total > max_gigs_planned:\n #get the highest bpp that fits the disk usage requirements\n #or the lowest if none fit!\n available_bpp = sorted(list(camera_model[\"Bayer_mode_bpp\"].values()), reverse=True)\n for bpp in available_bpp[1:]:\n if (bpp / max(available_bpp) * target_gigs_total <= max_gigs_planned) or (bpp == min(list(camera_model[\"Bayer_mode_bpp\"].values()))):\n target_bpp = bpp\n break\nelse:\n target_bpp = max(list(camera_model[\"Bayer_mode_bpp\"].values()))\ninv_bayer = {v: k[2:] for k, v in camera_model[\"Bayer_mode_bpp\"].items()}\ntarget_bayer_format = inv_bayer[target_bpp]\n\nmain_path = '/home/lidar/Documents/capture'\ndata_directory_name = 'RAWCAP'\n#navigate to top directory\nos.chdir(main_path)\nif os.getcwd() != main_path:\n print(f\"{datetime.datetime.now()} failed to change directory (0)\")\n sys.exit(0)\nelse:\n print(f\"{datetime.datetime.now()} succes! changed directory (0)\")\n\n\n\n\n\n\n\ndir_today = datetime.date.today().strftime(\"%Y.%m.%d\")\nmain_path = f\"{main_path}/{dir_today}\"\n\nif dir_today not in os.listdir():\n os.mkdir(dir_today)\n print(f\"{datetime.datetime.now()} created directory for today\")\nelse:\n print(f\"{datetime.datetime.now()} directory for today already exists\")\n\n\n#navigate to TODAY's directory\nos.chdir(main_path)\nif os.getcwd() != main_path:\n print(f\"{datetime.datetime.now()} failed to change directory (1)\")\n sys.exit(0)\nelse:\n print(f\"{datetime.datetime.now()} succes! changed directory (1)\")\n\nexisting_dirs = [name for name in os.listdir() if os.path.isdir(os.path.join(os.getcwd(), name)) and data_directory_name in name]\nnum_exist_subdir = len(existing_dirs)\n\ntry:\n sub_path = f\"{num_exist_subdir+1:03d}_{data_directory_name}_{folder_name}\"\n os.mkdir(sub_path)\nexcept:\n print(f\"{datetime.datetime.now()} failed to make new directory ({num_exist_subdir+1})\")\n sys.exit(0)\n\n\n \n \n \n \nmain_path = f\"{main_path}/{sub_path}\"\n#navigate to lowest sub-directory\nos.chdir(main_path)\nif os.getcwd() != main_path:\n print(f\"{datetime.datetime.now()} failed to change directory (2)\")\n sys.exit(0)\nelse:\n print(f\"{datetime.datetime.now()} succes! changed directory (2)\")\n\n \n \ndef kill_camera():\n try:\n print(\"killing camera\")\n camera_process.send_signal(signal.SIGINT)\n print(\"succeeded killing camera\")\n except:\n print(\"camera already dead\")\n time.sleep(2)\n \n \n\ndef kill_tcp_dump():\n #cmd = \"pkill -INT tcpdump\"\n for process in tcpdump_processes:\n process.send_signal(signal.SIGINT)\n #os.system(cmd)\n time.sleep(5)\n\ndef sig_handler(signum, frame):\n with open(\"python_log.txt\", \"a\") as fh:\n fh.write(f\"{datetime.datetime.now()} SIG was detected!\")\n fh.write(f\"sig num: {signum}\")\n kill_camera()\n kill_tcp_dump()\n sys.exit(0)\n\n\nsignal.signal(signal.SIGINT, sig_handler)\nsignal.signal(signal.SIGTERM, sig_handler)\n\n#time.sleep(120)\n\nlogging.basicConfig(format='<%(asctime)s> <%(levelname)-8s> <%(message)s>',\n level=logging.DEBUG,\n filename=f'{datetime.date.today().strftime(\"%Y.%m.%d\")}_{sub_path}.log',\n datefmt='%Y-%m-%d %H:%M:%S')\nlogging.info(f\"The acquisiion script is in the directory {os.getcwd()}.\")\nlogging.info(f\"Camera_parameters : {{'Bayer_mode':'{target_bayer_format}','max_exp':{target_exposure_time},'target_framerate':{target_framerate},'max_images':{target_number_of_images}}}\")\n\nwith open(\"logfile_tcpdump.txt\", \"w\") as logfile:\n tcpdump_processes = []\n with open(\"/home/lidar/Documents/capture/commands.txt\", \"r\") as fh:\n for line in fh.readlines():\n arguments = line.split()\n new_process = subprocess.Popen(arguments[:-1], stdout = logfile, stderr = logfile)\n tcpdump_processes.append(new_process)\n\n #start camera\nwith open(\"logfile_camera.txt\", \"w\") as logfile:\n camera_command = f\"/home/lidar/Documents/code/HSRW_PG02_Laser-Scanner/07_camera_acquisition/output/bin/x86_64/Debug/acquire_free_cpp_debug {target_framerate} {target_number_of_images} {target_bayer_format} {target_exposure_time}\"\n arguments = camera_command.split()\n camera_process = subprocess.Popen(arguments, stdout = logfile, stderr = logfile)\n\n\ntcpdump_still_running = True\n\nwhile tcpdump_still_running:\n if camera_process.poll() is not None:\n logging.info(f\"Camera process {camera_process.pid} returned with {camera_process.poll()}\")\n break\n for p in tcpdump_processes:\n if p.poll() is not None:\n logging.info(f\"Tcpdump process {p.pid} returned with {p.poll()}\")\n tcpdump_still_running = False\n break\n\nkill_camera()\nkill_tcp_dump()\nsys.exit(0)\n\n#for i in range(30):\n# print(i, camera_process.poll())\n# for p in tcpdump_processes:\n# print(p.poll())\n# time.sleep(5)\n#\n#signal.pause()\n\n\n\n","sub_path":"08_data_logging/capture_streams.py","file_name":"capture_streams.py","file_ext":"py","file_size_in_byte":8869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"512738037","text":"#escriba un algoritmo que dado el numero de horas trabajadas por un empleado por\n#hora, calcule el sueldo total de ese empleado. Tenga en cuenta que las horas\n#extras se pagan el doble\n\nsueldo = 0\nhoras_trab = 0\nhoras_extr = 0\ntotal_trab = 0\ntotal_extr = 0\ntotal = 0\n\nsueldo = float(input(\"ingrese sueldo por hora:.\"))\nhoras_trab = int(input(\"ingrese cantidad de horas trabajadas:.\"))\nhoras_extr = int(input(\"ingrese cantidad de horas extra:.\"))\ntotal_trab = sueldo * horas_trab\ntotal_extr = sueldo * horas_extr * 2\ntotal = total_trab + total_extr\n\nprint(\"sueldo por horas:.\",total_trab)\nprint(\"sueldo por horas extra:.\",total_extr)\nprint(\"sueldo total:.\",total)\n","sub_path":"ejercicios_progra/ejer9.py","file_name":"ejer9.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"634506980","text":"from app import db\nfrom app.models import ThirdPartyApp, ValuationType, CustomValuationField\n\n\nclass ThirdPartyAppService:\n\n @staticmethod\n def create(profile, app_params, custom_field_list=None):\n third_party_app = ThirdPartyApp(**app_params)\n third_party_app.owner_profile = profile\n result_field_list = []\n if custom_field_list is not None:\n for field in custom_field_list:\n new_field = CustomValuationField(**field)\n result_field_list.append(new_field)\n third_party_app.custom_valuation_fields = result_field_list\n db.session.add(third_party_app)\n db.session.commit()\n return third_party_app\n\n @staticmethod\n def update(id_, fields_to_update):\n third_party_app = (\n ThirdPartyApp.query.filter_by(id=id_).update(fields_to_update)\n )\n db.session.commit()\n return third_party_app\n\n @staticmethod\n def delete(id_):\n ThirdPartyApp.query.filter_by(id=id_).delete()\n db.session.commit()\n\n @staticmethod\n def get(id_):\n return ThirdPartyApp.query.filter_by(id=id_).first()\n\n @staticmethod\n def get_from_api_key(api_key):\n return ThirdPartyApp.query.filter_by(api_key=api_key).first()\n\n @staticmethod\n def add_custom_field(id_, field_type, field_name):\n app = ThirdPartyApp.query.filter_by(id=id_).first()\n new_field = app.add_custom_field(field_type, field_name)\n db.session.add(new_field)\n db.session.commit()\n return new_field\n\n\nclass ValuationTypeService:\n\n @staticmethod\n def get_all():\n return ValuationType.query.all()\n","sub_path":"app/third_party_app/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"368087697","text":"from os import path\nfrom google.colab import drive\n\nnotebooks_dir_name = 'notebooks'\ndrive.mount('/content/gdrive')\nnotebooks_base_dir = path.join('./gdrive/My Drive/', notebooks_dir_name)\n\n\nimport numpy as np\n\n\ndb_list = ['top', 'jgl', 'mid', 'bot', 'sup']\n\nfor db in db_list:\n for i in range(1, 6):\n if i == 1:\n minute_data = np.load('./gdrive/My Drive/notebooks/s11/v02/troll/%s_minute_%d.npy' % (db, i))\n else:\n minute_data = np.concatenate((minute_data, np.load('./gdrive/My Drive/notebooks/s11/v02/troll/%s_minute_%d.npy' % (db, i))))\n\n gameid_list = list(set(minute_data[:, 0].tolist()))\n gap = {'top': np.array([2300, 1750, 30]), 'jgl': np.array([1830, 1820, 27]), 'mid': np.array([2280, 1790, 31]), \n 'bot': np.array([1660, 1840, 34]), 'sup': np.array([1420, 1350, 5.4])}\n\n\n X, Y = [], []\n\n for gameid in gameid_list:\n one_game = minute_data[minute_data[:, 0]==gameid, 1:]\n one_game = one_game[:-1, :]\n one_game[:, 2] -= 500\n length = len(one_game)\n\n for i in range(length-5):\n min5_X = []\n for j in range(1, 6):\n min5_X.append(np.array((one_game[i+j, 1:] - one_game[i, 1:]) / gap[db], dtype='float32').tolist())\n \n X.append(min5_X)\n Y.append(get_Y(min5_X))\n\n if (gameid_list.index(gameid)+1) % (len(gameid_list) // 10) == 0:\n print((gameid_list.index(gameid)) // (len(gameid_list) // 10))\n\n np.save('./gdrive/My Drive/notebooks/s11/v02/%s/troll_minute_X' % (db), np.array(X))\n np.save('./gdrive/My Drive/notebooks/s11/v02/%s/troll_minute_Y' % (db), np.array(Y))\n","sub_path":"vv2/troll_make_dataset.py","file_name":"troll_make_dataset.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"164047769","text":"import configparser as cp\nimport os\nimport traceback\ntry: \n\tfrom ResponseLib import * \nexcept: \n\tfrom Libs.ResponseLib import *\n\nclass IniFile(object):\n def __init__(self,_file):\n self.file = _file\n self.Response = response()\n\n def write(self,sec,key,value):\n try:\n config = cp.ConfigParser()\n config.read(self.file)\n if not sec in config.sections():\n config[sec] = {}\n config[sec][key] = \"\"\n config.write(open(self.file, 'w'))\n config[sec][key] = value\n config.write(open(self.file, 'w'))\n return self.Response.success()\n except:\n return self.Response.error(rst = True,msg=traceback.format_exc())\n \n def read(self,sec,key):\n try:\n config = cp.ConfigParser()\n config.read(self.file)\n if not sec in config.sections():\n return self.Response.error(rst = True,msg=\"Section not found!\")\n if not key in config[sec]:\n return self.Response.error(rst = True,msg=\"Key not found!\")\n return self.Response.success(obj=config[sec][key])\n except:\n return self.Response.error(rst = True,msg=traceback.format_exc())\n \n def getKeys(self,section):\n try:\n config = cp.ConfigParser()\n config.read(self.file)\n return self.Response.success(obj=[k for k in config[section]])\n except:\n return self.Response.error(rst = True,msg=traceback.format_exc())","sub_path":"Selenium/BBB/Libs/IniFileLib.py","file_name":"IniFileLib.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"410495302","text":"# \n# Copyright (C) University College London, 2007-2012, all rights reserved.\n# \n# This file is part of HemeLB and is CONFIDENTIAL. You may not work \n# with, install, use, duplicate, modify, redistribute or share this\n# file, or any part thereof, other than as allowed by any agreement\n# specifically made by you with University College London.\n# \n\nimport os\nimport unittest\nimport fixtures\n\nimport results_collection\n\nclass TestExtraction(unittest.TestCase):\n def setUp(self):\n self.rc=results_collection.ResultsCollection(fixtures.Results('cylinders').path,fixtures.ResultsConfig('optional'))\n def test_optionally_present(self):\n for result in self.rc.results:\n if result.cores==1:\n self.assertIn('banana',result.properties)\n self.assertEqual(result.banana,1)\n else:\n self.assertNotIn('banana',result.properties)\n def test_optional_definition(self):\n for result in self.rc.results:\n if result.cores==1:\n self.assertIn('apple',result.properties)\n if result.voxelsize!=6:\n pass\n # conflicting definition case,\n # behaviour not defined\n else:\n self.assertEqual(result.apple,result.cores)\n elif result.voxelsize!=6:\n self.assertIn('apple',result.properties)\n self.assertEqual(result.apple,result.voxelsize)\n else:\n self.assertNotIn('apple',result.properties)\n\t \n","sub_path":"Tools/analysis/test/test_optional_properties.py","file_name":"test_optional_properties.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"309982236","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport scanpy as sc\nfrom torch.autograd import Variable\n\nclass SkipGramModel(nn.Module):\n\n def __init__(self, emb_size, emb_dimension):\n super(SkipGramModel, self).__init__()\n self.emb_size = emb_size\n self.emb_dimension = emb_dimension\n self.u_embeddings = nn.Embedding(emb_size, emb_dimension, sparse=True)\n self.v_embeddings = nn.Embedding(emb_size, emb_dimension, sparse=True)\n\n initrange = 1.0 / self.emb_dimension\n init.uniform_(self.u_embeddings.weight.data, -initrange, initrange)\n init.constant_(self.v_embeddings.weight.data, 0)\n\n def forward(self, pos_u, pos_v, neg_v, clip=10):\n emb_u = self.u_embeddings(pos_u)\n emb_v = self.v_embeddings(pos_v)\n emb_neg_v = self.v_embeddings(neg_v)\n\n score = torch.sum(torch.mul(emb_u, emb_v), dim=1)\n score = torch.clamp(score, max=clip, min=-clip)\n score = -F.logsigmoid(score)\n\n neg_score = torch.bmm(emb_neg_v, emb_u.unsqueeze(2)).squeeze()\n neg_score = torch.clamp(neg_score, max=clip, min=-clip)\n neg_score = -torch.sum(F.logsigmoid(-neg_score), dim=1)\n\n return torch.mean(score + neg_score)\n\n def save_embedding(self, id2word, file_name):\n embedding = self.u_embeddings.weight.cpu().data.numpy()\n with open(file_name, 'w') as f:\n f.write('%d %d\\n' % (len(id2word), self.emb_dimension))\n for wid, w in id2word.items():\n e = ' '.join(map(lambda x: str(x), embedding[wid]))\n f.write('%s %s\\n' % (w, e))\n\nclass CompassTrainer(object):\n def __init__(self, dataset, output_file, emb_dimension=100, batch_size=1000, initial_lr=0.01):\n self.dataset = dataset\n self.dataloader = DataLoader(self.dataset, batch_size=batch_size, shuffle=True, num_workers=0, collate_fn=dataset.collate)\n self.output_file_name = output_file\n self.emb_size = len(self.dataset.data.gene2id)\n self.emb_dimension = emb_dimension\n self.batch_size = batch_size\n self.initial_lr = initial_lr\n self.skip_gram_model = SkipGramModel(self.emb_size, self.emb_dimension)\n self.use_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if self.use_cuda else \"cpu\")\n if self.use_cuda:\n self.skip_gram_model.cuda()\n \n def create_dataloader(self, dataset):\n return DataLoader(dataset, batch_size=self.batch_size, shuffle=True, num_workers=0, collate_fn=dataset.collate)\n\n def train(self, iterations, lr=None, negative_targets=5, discard_probability=0.05):\n for iteration in range(iterations):\n print(\"Epoch: {}\".format(iteration+1))\n if not lr:\n lr = self.initial_lr\n optimizer = optim.SparseAdam(list(self.skip_gram_model.parameters()), lr=lr)\n self.dataset.update_discard_probability(discard_probability)\n self.dataset.update_negative_targets(negative_targets)\n self.dataloader = self.create_dataloader(self.dataset)\n running_loss = 0.0\n for i, sample_batched in enumerate(tqdm(self.dataloader)):\n if len(sample_batched[0]) > 1:\n pos_u = sample_batched[0].to(self.device)\n pos_v = sample_batched[1].to(self.device)\n neg_v = sample_batched[2].to(self.device)\n optimizer.zero_grad()\n loss = self.skip_gram_model.forward(pos_u, pos_v, neg_v)\n loss.backward()\n optimizer.step()\n running_loss =+ loss.item()\n print(\"Loss:\", running_loss)\n self.skip_gram_model.save_embedding(self.dataset.data.id2gene, self.output_file_name)\n","sub_path":"compass/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"631191170","text":"# -*- coding: utf-8 -*-\n# @File : squeezenet.py\n# @Author: SmartGx\n# @Date : 19-3-8 上午9:43\n# @Desc : 搭建squeezeNet网络模型\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom models.basic_module import BasicModule\nfrom torchvision.models.squeezenet import squeezenet1_1\n\nclass SqueezeNet(BasicModule):\n def __init__(self, num_classes=2):\n super(SqueezeNet, self).__init__()\n self.model_name = 'squeezenet'\n self.num_classes = num_classes\n self.model = squeezenet1_1(pretrained=True)\n self.model.num_classes = num_classes\n self.model.classifier = nn.Sequential(\n nn.Dropout(p=0.5),\n nn.Conv2d(512, self.num_classes, 1, 1),\n nn.ReLU(inplace=True),\n nn.AvgPool2d(13, stride=1)\n )\n\n def forward(self, x):\n return self.model(x)\n\n def get_optim(self, lr, weight_decay):\n return optim.Adam(self.model.classifier.parameters(), lr=lr, weight_decay=weight_decay)\n\nif __name__ == '__main__':\n # 测试网络模型\n squeezenet = SqueezeNet()\n print(squeezenet)\n\n data = torch.randn(1, 3, 224, 224)\n output = squeezenet(data)\n print(output)","sub_path":"07_cat_and_dog/models/squeezenet.py","file_name":"squeezenet.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"74340447","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom urllib.parse import urlparse\nimport urllib.request\nimport urllib\nfrom html.parser import HTMLParser\nimport unittest\nimport configparser\n\n\nclass ConfigReader(object):\n\n\tdef __init__(self):\n\t\tself.config = configparser.ConfigParser()\n\t\t#self.config.optionxform = str # this is to resolve case capital conversion issue\n\n\t\tself.config.read(\"../bin/config.ini\")\n\t\tself.sections = self.config.sections()\n\t\tself.d_config = {} #init a dict, key is section, value is a dict of option\n\t\tfor section in self.sections:\n\t\t\tself.d_option = {}\n\t\t\tfor option in self.config.options(section):\n\t\t\t\tself.d_option[option] = self.config.get(section, option)\n\t\t\tself.d_config[section] = self.d_option\n\n\tdef getDatabaseURI(self):\n\t\tusername = self.d_config[\"Database\"][\"username\"]\n\t\tpassword = self.d_config[\"Database\"][\"password\"]\n\t\thost = self.d_config[\"Database\"][\"host\"]\n\t\tport = self.d_config[\"Database\"][\"port\"]\n\t\tdb = self.d_config[\"Database\"][\"db\"]\n\t\turi = \"mongodb://\" \\\n\t\t\t + username + \":\" \\\n\t\t\t + password + \"@\" \\\n\t\t\t + host + \":\" \\\n\t\t\t + port + \"/\" \\\n\t\t\t + db\n\t\treturn uri\n\n\tdef getDb(self):\n\t\treturn self.d_config[\"Database\"][\"db\"]\n\n\tdef getDownloadFolder(self):\n\t\treturn self.d_config[\"Other\"][\"downloadfolder\"]\n\n# config = ConfigReader(\"../bin/config.ini\")\n# u = config.getDatabaseURI()\n# print(u)\nclass Helper(object):\n\n\t\"\"\"Convert a long dict into a list of dict.\n\t example: {key1: value1, key2: value2} ==> [{key1: value1}, {key2: value2}]\n\t\"\"\"\n\tdef convertDict(self, dict):\n\t\tl_of_dict = []\n\t\tfor k in dict:\n\t\t\td_temp = {}\n\t\t\td_temp[k] = dict[k]\n\t\t\tl_of_dict.append(d_temp)\n\t\treturn l_of_dict\n\n\tdef setStatus(self, dict, status):\n\t\tdict[\"Status\"] = status\n\t\treturn dict\n\n\tdef setBatchStatus(self, list_of_dict, status):\n\t\tfor dict in list_of_dict:\n\t\t\tdict[\"Status\"] = status\n\t\treturn list_of_dict\n\n\n#print(ConfigReader().getDatabaseURI())","sub_path":"lib/Helper.py","file_name":"Helper.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"146086315","text":"import object\nimport neat\nimport player\nimport constanst as const\nimport numpy as np\n\n\n\n\nclass NN(player.Player):\n def __init__(self, _side):\n # print(\"Creating NN Player...\")\n super().__init__(_side)\n self.count = 0\n\n @staticmethod\n def normalise(value, minValue, maxValue):\n return (value - minValue) / (maxValue - minValue)\n\n def evaluateSpeed(self, output):\n\n self.speed = [0,0]\n maxX = np.argmax(output[0:3])\n maxY = np.argmax(output[3:6])\n if maxX == 0:\n self.setSpeedRight()\n elif maxX == 1:\n self.setSpeedLeft()\n\n if maxY == 0:\n self.setSpeedUp()\n elif maxY == 1:\n self.setSpeedDown()\n\n # print(\"output = \", output)\n # if output[0] > 0.5 or output[1] > 0.5: # move in Y - axis\n # if output[0] > output[1]:\n # self.setSpeedUp()\n # else:\n # self.setSpeedDown()\n # else:\n # self.speed[1] = 0\n #\n # if output[2] > 0.5 or output[3] > 0.5: # move in X - axis\n # if output[2] > output[3]:\n # self.setSpeedRight()\n # else:\n # self.setSpeedLeft()\n # else:\n # self.speed[0] = 0\n\n # print(\"speed of NN player is \", self.speed)\n\n def updatePlayer(self, win, puck, mouse_pos = None, net = None):\n self.count += 1\n if self.count % 15 == 0:\n # input = (self.normalise(puck.x, const.barrier + puck.diameter/2, const.WIDTH - puck.diameter/2 - const.barrier),\n # self.normalise(puck.y, const.tabbleYOffset + const.barrier + puck.diameter/2, const.tabbleYOffset + const.HEIGHT - puck.diameter/2 - const.barrier),\n # self.normalise(puck.speed[0], 0, const.maxSpeed),\n # self.normalise(puck.speed[1], 0, const.maxSpeed),\n # self.normalise(self.x, const.WIDTH/2 + 2*self.diameter, const.WIDTH - self.diameter/2 - const.barrier),\n # self.normalise(self.y, const.tabbleYOffset + const.barrier + self.diameter/2, const.tabbleYOffset + const.HEIGHT - self.diameter/2 - const.barrier))\n input = (self.normalise(puck.y, const.tabbleYOffset + const.barrier + puck.diameter / 2, const.tabbleYOffset + const.HEIGHT - puck.diameter / 2 - const.barrier),\n self.normalise(self.y, const.tabbleYOffset + const.barrier + self.diameter / 2, const.tabbleYOffset + const.HEIGHT - self.diameter / 2 - const.barrier))\n output = net.activate(input)\n self.evaluateSpeed(output)\n # print(output)\n self.updatePos()\n self.allowPosition() # player is on the middle line -> set speed = 0 to prevent lagging with puck\n self.updateBarriers() # player is on the goal line -> set speed = 0 to prevent lagging with puck\n self.normaliseSpeed()\n if const.render:\n self.draw(win)\n","sub_path":"nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"192078047","text":"'''\r\n Determine whether the number is prime or composite\r\n'''\r\n\r\nn = int(input(\"enter the positive number = \")) # input positive number only\r\ni = int(pow(n, (1/2)))\r\n\r\nif n is 1: # condition\r\n print(n, \"is neither prime nor composite\")\r\nelif i > 1: # condition\r\n for f in range(2, i+1):\r\n if n % f is 0:\r\n print(n, \" is composite number\")\r\n break\r\n else:\r\n print(n, \" is prime number\")\r\n break\r\nelse: # condition\r\n print(n, \" is prime number\")\r\n","sub_path":"Prime_Number_Check.py","file_name":"Prime_Number_Check.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"603027505","text":"# ================================= #\n# ReaderCSV Class only #\n# Author : Quentin Prieels #\n# Date : April 2021 #\n# Version 1.4 #\n# ================================= #\n\n# Packages\nimport numpy as np\n\n# From project files\nfrom main.plots import plot_signals\nfrom main.HelpFunctions import warningText\n\n\nclass ReaderCSV:\n \"\"\"\n This class allows to read CSV files and to use them to make graphs. Each object must be a CSV file with the\n following structure:\n\n ;;;...;\n ;;;...;\n # Blank line\n ;;;... ;\n ;;;... ;\n ...\n ;;;... ;\n\n So there are m different signals ALL WITH THE SAME UNIT (except the x-axis) but in orders of magnitude that can be\n different. The units and their orders of magnitude accepted are:\n\n ===================================================\n == Name : Symbols used (do not use the name) ==\n == Volt : kV, V, mV, iV, nV, pV ==\n == Ampere : kA, A, mA, iA, nA, pA ==\n == Ohms : kO, O, mO, iA, nO, pO ==\n == Hertz : kH, H, mH, iH, nH, pH ==\n ===================================================\n\n These units are only valid for the different signals (y-axis).\n \"\"\"\n\n # Class Variables\n __units = {'V': 'Tension',\n 'A': 'Ampère',\n 'O': 'Ohms',\n 'H': 'Hertz'}\n __orders = {'k': 1000,\n '': 1,\n 'm': 0.001,\n 'i': 0.000001,\n 'n': 0.000000001,\n 'p': 0.0000000000001}\n\n # Constructor\n def __init__(self, filepath, filename, signals_names, preferred_unit='V'):\n \"\"\"\n :param filepath: Location of the CSV file\n :param filename: Name of the file\n :param signals_names: Names of the signals\n :param preferred_unit: Unit to be use for the y-axis.\n \"\"\"\n # File infos\n self.__path = filepath\n self.__name = filename\n\n # Signals infos\n self.__signalsNames = signals_names\n self.__numberSignals = self.findSignalsNumbers()\n self.__signalsUnits = self.findUnits()[0]\n\n # Units infos\n self.__preferredUnit = preferred_unit\n self.__axisUnits = self.findAxisUnits()\n\n # Axis info\n self.__axisNames = self.findAxisNames()\n\n # Accessor methods\n def getPath(self):\n \"\"\"\n :return: Path of CSV file\n :rtype: str\n \"\"\"\n return self.__path\n\n def getName(self):\n \"\"\"\n :return: Name of file\n :rtype: str\n \"\"\"\n return self.__name\n\n def getNumberSignals(self):\n \"\"\"\n :return: Number of signals (curve that will be draw)\n :rtype: int\n \"\"\"\n return self.__numberSignals\n\n def getSignalsNames(self):\n \"\"\"\n :return: Name(s) of the signals\n :rtype: list\n \"\"\"\n return self.__signalsNames\n\n def getSignalsUnits(self):\n \"\"\"\n :return: Units in which the signals are expressed\n :rtype: list\n \"\"\"\n return self.__signalsUnits\n\n def getPreferredUnit(self):\n \"\"\"\n :return: The unit that will be use for all signals when there are plot (also onto y-axis name)\n :rtype: str\n \"\"\"\n return self.__preferredUnit\n\n def getAxisUnits(self):\n \"\"\"\n :return: The units of the x and y axis\n :rtype: list\n \"\"\"\n return self.__axisUnits\n\n def getAxisNames(self):\n \"\"\"\n :return: The names of the x and y axis\n :rtype: list\n \"\"\"\n return self.__axisNames\n\n # Mutator methods\n def setName(self, new_name):\n \"\"\"\n Change the name of the object and check that the length of the new name is behind 144 charters.\n :param new_name: New name for the title of the plot\n :type new_name: str\n :return: Nothing, it just change the instance variable __name\n \"\"\"\n if len(new_name) <= 144:\n old_name = self.getName()\n self.__name = str(new_name)\n print('Name {} is change by {}.'.format(old_name, self.getName()))\n else:\n msg = \"The requested name has a length of more than 144 characters, please change the name.\"\n print(warningText(msg))\n\n def setPreferredUnit(self, unit):\n \"\"\"\n Changes the unit used to represent the graphs. What is most important is the prefix and therefore the order of\n magnitude. Be careful not to change the unit itself, otherwise the names of the axes will also be changed. It\n also check that the unit is correct and usable (see the definition of the class).\n :param unit: New unit to use\n :type unit: str\n \"\"\"\n if self.checkUnit(unit):\n old_unit = self.getPreferredUnit()\n self.__preferredUnit = unit\n self.__axisUnits = self.findAxisUnits()\n print('Preferred unit {} is change by {}.'.format(old_unit, self.getPreferredUnit()))\n else:\n msg = \"This unit is not usable. Please change the unit. Current unit: {}\".format(self.getPreferredUnit())\n print(warningText(msg))\n\n def setAxisNames(self, x_axis, y_axis):\n \"\"\"\n Change the name of the x and y axis.\n :param x_axis: New name for x axis\n :param y_axis: New name for y axis\n \"\"\"\n old_names = self.getAxisNames()\n self.__axisNames = [x_axis, y_axis]\n print('Axis names {} is change by {}.'.format(old_names, self.getAxisNames()))\n\n # Knowledge methods\n def findSignalsNumbers(self):\n \"\"\"\n Find the number of signals that are in the CSV file. The function does not count the x axis as a signal. A CSV\n file with 3 numbers\n (x; signal 1; signal 2)\n will have 2 signals.\n :return: The number of signals\n :rtype: int\n \"\"\"\n with open(self.getPath(), 'r') as file:\n first_line = file.readline().strip().split(';')\n return len(first_line) - 1\n\n def findUnits(self):\n \"\"\"\n Find the units needed to run the program. We find the units of the x-axis and the unit in which each signal is\n expressed.\n :return: The different units in the form [[signal unit], x-axis unit]\n :rtype: list\n \"\"\"\n with open(self.getPath(), 'r') as file:\n # Get units\n second_line = file.readlines()[1]\n units = second_line.strip().replace('(', '').replace(')', '').split(';')\n\n # Check units\n for unit in units[1:]:\n if not self.checkUnit(unit):\n raise ValueError('This unit ({})can not be use'.format(unit))\n\n return units[1:], units[0]\n\n def findAxisUnits(self):\n \"\"\"\n Find the unit of each axis\n :return: The unit of x and y axis [x axis, y axis]\n :rtype: list\n \"\"\"\n return [self.findUnits()[1], self.getPreferredUnit()]\n\n def findAxisNames(self):\n \"\"\"\n Find the name of each axis. The name of x axis is the name that is given bt the CSV file (first word at first\n line => see CSV structure that is use in the class description). The name of y axis is deduced from the unit of\n signals.\n :return: The name of x and y axis\n :rtype: list\n \"\"\"\n # x-axis\n with open(self.getPath(), 'r') as file:\n x_axis_name = file.readline().strip().split(';')[0]\n\n # y-axis\n try:\n y_axis_name = self.__units[self.getPreferredUnit()[-1]]\n except:\n y_axis_name = 'Error !'\n msg = \"y-axis name can not be define, please define them manually.\"\n print(warningText(msg))\n\n return [x_axis_name, y_axis_name]\n\n def unitsChange(self, number, old_unit, new_unit):\n \"\"\"\n Gives the value of a number in a desired unit from another unit\n :param number: Number to use\n :type number: float\n :param old_unit: Actual unit of this number\n :type old_unit: str\n :param new_unit: Unit of the new number\n :type new_unit: str\n :return: The value of the number into the new unit\n :rtype float\n \"\"\"\n try:\n # Old unit into standard unit\n if old_unit in self.__units:\n standard_unit = number\n else:\n standard_unit = number * self.__orders[old_unit[0]]\n\n # Standard unit into new unit\n if new_unit in self.__units:\n return standard_unit\n else:\n return standard_unit / self.__orders[new_unit[0]]\n\n except:\n ValueError('An error occurred during the transformation of units')\n\n # Plot methods\n def makeSignals(self, precision=1):\n \"\"\"\n Create numpy array with the different signals that can be\n :param precision: Allows to take only a part of the data in the file. This value MUST BE positive\n :type precision: int\n :return: Return the x list (list of all x-axis points) of size n and a numpy matrix of m signals and length n\n :rtype: tuple\n \"\"\"\n # Create the arguments of plotSignals function (see plotsTest.py > plotSignal)\n with open(self.getPath(), 'r') as file:\n lines = file.readlines()[3:]\n n = len(lines)\n m = self.getNumberSignals()\n x = np.zeros(n)\n signals = np.zeros((m, n))\n for i in range(0, n, precision):\n for j in range(m + 1):\n line = lines[i].strip().split(\";\")\n if j == 0:\n x[i] = float(line[j].replace(',', '.'))\n else:\n number = float(line[j].replace(',', '.'))\n signal_value = self.unitsChange(number, self.getSignalsUnits()[j - 1],\n self.getPreferredUnit())\n signals[j - 1][i] = signal_value\n\n return x, signals, self.getSignalsNames(), self.getSignalsUnits()\n\n def plot(self, precision=1, title=False, saving=False):\n \"\"\"\n Displays the data from the CSV file as a graph.\n :param precision: Allows to take only a part of the data in the file. This value MUST BE positive\n :type precision: int\n :param title: Know of the plot must have a title or not\n :type title: bool\n :return: Create a plot\n \"\"\"\n # Create the arguments of plotSignals function (see plotsTest.py > plotSignal)\n x, signals = self.makeSignals(precision)[:-2]\n\n x_axis_name = self.getAxisNames()[0] + \" [\" + self.getAxisUnits()[0] + \"]\"\n y_axis_name = self.getAxisNames()[1] + \" [\" + self.getAxisUnits()[1] + \"]\"\n\n # Use of plot function\n plot_signals(x, signals, self.getSignalsNames(), x_label=x_axis_name, y_label=y_axis_name, title=title,\n saving=saving)\n\n # Magic Methods\n def __str__(self):\n \"\"\"\n :return: A string representation of the most useful data of the CSV file\n :rtype: str\n \"\"\"\n n = self.getNumberSignals()\n text = '\\n=====================================================================\\n'\n text += 'This are informations about \\33[94m{}\\033[0m file, locate at \\33[94m{}\\033[0m. \\n'\\\n .format(self.getName(), self.getPath())\n text += 'Number of Signals : {} \\n'.format(n)\n for i in range(0, n):\n text += '\\t - {} ({}) \\n'.format(self.getSignalsNames()[i], self.getSignalsUnits()[i])\n text += 'Preferred unit is {}.\\n'.format(self.getPreferredUnit())\n text += 'Axis are : {} ({}) (x-axis) and {} ({}) (y-axis)\\n'.format(self.getAxisNames()[0],\n self.getAxisUnits()[0],\n self.getAxisNames()[1],\n self.getAxisUnits()[1])\n text += '=====================================================================\\n'\n return text\n\n # Check method\n def checkUnit(self, unit):\n \"\"\"\n Verifies that the unit is consistent with the unit the class is considering\n :param unit: Unit to check\n :return: True or False, it depend of the unit is correct or not\n :rtype: bool\n \"\"\"\n if unit[-1] in self.__units:\n if len(unit) == 2 and unit[0] in self.__orders:\n return True\n elif len(unit) == 1:\n return True\n else:\n return False\n else:\n return False\n\n\nif __name__ == '__main__':\n test = ReaderCSV('../../data/exemple.csv', 'test', ['Signal 1', 'Signal 2', 'Signal 3', 'Signal 4'])\n test.plot()\n","sub_path":"main/treatment/ReaderCSV.py","file_name":"ReaderCSV.py","file_ext":"py","file_size_in_byte":13390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"174506035","text":"# -*- coding: utf-8 -*- \n'''\nCreated on 17.10.2012\n\n@author: mr_bin\n'''\n\nfrom django.http import Http404\nfrom django.shortcuts import redirect\nfrom mainsite.models import *\nfrom mainsite.forms import *\nfrom mainsite.utils.utils import utils\nfrom mainsite.ui.route_ui import route_ui\n\nclass bus_ui():\n def __init__(self, context):\n self.context = context\n self.user = context.user \n self.user_profile = self.user.get_profile() \n self.u = utils()\n \n def buses(self):\n buses = Bus.objects.filter(route__city = self.user_profile.adress.city) \n return buses\n\n def add(self):\n post = self.context.POST\n route = route_ui(self.context)\n route_choises = self.u.choises_gen(route.routes(), 'number') \n form = BusForm(post, route_choises=route_choises)\n \n if self.context.method == \"POST\": \n if form.is_valid():\n route = Route.objects.get_or_create(number = form.cleaned_data['route_hide'], \n city = self.user_profile.adress.city)[0]\n \n one_bus = Bus.objects.get_or_create(route = route, \n model = form.cleaned_data['model'],\n licensePlate = form.cleaned_data['licensePlate'])[0]\n one_bus.save() \n \n cleared_form = BusForm()\n cleared_form.fields['route'].choices = route_choises\n return cleared_form\n \n def edit(self, id = 0):\n post = self.context.POST\n route = route_ui(self.context)\n route_choises = self.u.choises_gen(route.routes(), 'number') \n form = BusForm(post, route_choises=route_choises)\n bus = Bus.objects.get(id = id)\n \n if self.context.method == \"POST\": \n if form.is_valid():\n route = Route.objects.get_or_create(number = form.cleaned_data['route_hide'], \n city = self.user_profile.adress.city)[0]\n \n bus.route = route \n bus.model = form.cleaned_data['model']\n bus.licensePlate = form.cleaned_data['licensePlate']\n bus.save() \n \n cleared_form = BusForm()\n cleared_form.fields['route'].choices = route_choises\n cleared_form.fields['route'].initial = bus.route.number\n cleared_form.fields['model'].initial = bus.model\n cleared_form.fields['licensePlate'].initial = bus.licensePlate\n return cleared_form\n \n def remove(self, id = 0):\n try:\n Bus.objects.get(id = id).delete()\n except:\n raise Http404","sub_path":"oni_kuryat/mainsite/ui/bus_ui.py","file_name":"bus_ui.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"565700496","text":"import os\nimport time\n\n# 1. Файлы и каталоги, которые необходимо скопировать, собираются в список.\nsource = ['E:\\testbot\\simple', 'E:\\testbot\\simple\\today']\n# Заметьте, что для имён, содержащих пробелы, необходимо использовать# двойные кавычки внутри строки.\n# 2. Резервные копии должны храниться в основном каталоге резерва.\ntarget_dir = 'E:\\testbot\\simple\\today' # Подставьте ваш путь.\n# 3. Файлы помещаются в zip-архив.\n# 4. Именем для zip-архива служит текущая дата и время.\n# today = target_dir\ntoday = target_dir + os.sep + time.strftime('%Y%m%d')\nprint(today)\n# 5. Текущее имя служит именем архива\nnow = time.strftime(\"H%M%S\")\n# 6. Создаем каталог, если его еще нету\nif os.path.exists(today):\n os.mkdir('today') # Создание каталога имя\n print(\"Каталог успешно создан\", today)\nelse:\n print(\"Каталог уже есть\", today)\n# 7. Имя файла\ntarget = now + \".zip\"\n# 8. Используем команду Zip для помещения файлов в архив\nzip_command = \"zip -qr {0} {1}\".format(target, \"\".join(source))\n# 9. Используем команду запуска\nif os.system(zip_command) == 0:\n print(\"Резервная копия успешно создана\", target)\nelse:\n print(\"Создание резервной копии НЕ УДАЛОСЬ\")\n","sub_path":"backup_ver2_7zip.py","file_name":"backup_ver2_7zip.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"150649344","text":"from typing import Tuple, Union\r\n\r\nfrom base import Base\r\nfrom globals import GlobalEvents\r\n\r\n\r\n\"\"\"\r\nClass LightManager, handle common light use-cases\r\n\r\n- turn on/off ambiant ligts with remote, area listen to the event and act accordingly\r\n\r\n\"\"\"\r\n\r\n\r\nclass LightManager(Base):\r\n\r\n def initialize(self) -> None:\r\n \"\"\"Initialize.\"\"\"\r\n super().initialize() # Always call base class\r\n\r\n self._remote_buttons_ambient_light = self.args.get(\r\n 'remote_buttons_ambient_light', {})\r\n\r\n for remote_button_ambient_light in self._remote_buttons_ambient_light:\r\n self.listen_state(\r\n self.__on_ambient_light_button_pressed, remote_button_ambient_light)\r\n\r\n def __on_ambient_light_button_pressed(\r\n self, entity: Union[str, dict], attribute: str, old: dict,\r\n new: dict, kwargs: dict) -> None:\r\n \"\"\"called when ambient remote that button pressed on controlls \"\"\"\r\n if new == 'on':\r\n self.log(\"FIRE CMD_AMBIENT_LIGHTS_ON\")\r\n self.fire_event(GlobalEvents.CMD_AMBIENT_LIGHTS_ON.value)\r\n self.log_to_logbook('Lights', \"Ambient ligts on\")\r\n else:\r\n self.log(\"FIRE CMD_AMBIENT_LIGHTS_OFF\")\r\n self.fire_event(GlobalEvents.CMD_AMBIENT_LIGHTS_OFF.value)\r\n self.log_to_logbook('Lights', \"Ambient ligts off\")\r\n ","sub_path":"appdaemon/apps/lights/light_manager.py","file_name":"light_manager.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"596161977","text":"# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors\n# This software is distributed under the terms and conditions of the 'Apache-2.0'\n# license which can be found in the file 'LICENSE' in this package distribution\n# or at 'http://www.apache.org/licenses/LICENSE-2.0'.\n\n\"\"\"Unit tests for LogManager\"\"\"\n\nimport time\nfrom oslo_config import cfg\nfrom keystone.tests import unit as tests\nfrom keystone.contrib.moon.core import ConfigurationManager\nfrom keystone.contrib.moon.core import IntraExtensionAuthzManager\nfrom keystone.tests.unit.ksfixtures import database\nfrom keystone.contrib.moon.exception import *\nfrom keystone.tests.unit import default_fixtures\nfrom keystone.contrib.moon.core import LogManager, TenantManager\nfrom keystone.tests.moon.unit import *\n\nCONF = cfg.CONF\n\nUSER_ADMIN = {\n 'name': 'admin',\n 'domain_id': \"default\",\n 'password': 'admin'\n}\n\nIE = {\n \"name\": \"test IE\",\n \"policymodel\": \"policy_rbac_authz\",\n \"description\": \"a simple description.\"\n}\n\nTIME_FORMAT = '%Y-%m-%d-%H:%M:%S'\n\n\nclass TestIntraExtensionAdminManager(tests.TestCase):\n\n def setUp(self):\n self.useFixture(database.Database())\n super(TestIntraExtensionAdminManager, self).setUp()\n self.load_fixtures(default_fixtures)\n self.load_backends()\n domain = {'id': \"default\", 'name': \"default\"}\n self.resource_api.create_domain(domain['id'], domain)\n self.admin = create_user(self, username=\"admin\")\n self.demo = create_user(self, username=\"demo\")\n ref = self.root_api.load_root_intra_extension_dict()\n self.root_api.populate_default_data(ref)\n self.root_intra_extension = self.root_api.get_root_extension_dict()\n self.root_intra_extension_id = self.root_intra_extension.keys()[0]\n self.ADMIN_ID = self.root_api.root_admin_id\n self.authz_manager = self.authz_api\n self.admin_manager = self.admin_api\n self.tenant_manager = self.tenant_api\n\n def __get_key_from_value(self, value, values_dict):\n return filter(lambda v: v[1] == value, values_dict.iteritems())[0][0]\n\n def load_extra_backends(self):\n return {\n \"moonlog_api\": LogManager(),\n \"authz_api\": IntraExtensionAuthzManager(),\n \"tenant_api\": TenantManager(),\n \"configuration_api\": ConfigurationManager(),\n }\n\n def config_overrides(self):\n super(TestIntraExtensionAdminManager, self).config_overrides()\n self.policy_directory = '/etc/keystone/policies'\n self.config_fixture.config(\n group='moon',\n intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')\n self.config_fixture.config(\n group='moon',\n policy_directory=self.policy_directory)\n\n def send_logs(self):\n log_authz = \"Test for authz \" + uuid.uuid4().hex\n logs = []\n self.moonlog_api.authz(log_authz)\n logs.append(\"Test for critical \" + uuid.uuid4().hex)\n self.moonlog_api.critical(logs[-1])\n logs.append(\"Test for error \" + uuid.uuid4().hex)\n self.moonlog_api.error(logs[-1])\n logs.append(\"Test for warning \" + uuid.uuid4().hex)\n self.moonlog_api.warning(logs[-1])\n logs.append(\"Test for info \" + uuid.uuid4().hex)\n self.moonlog_api.info(logs[-1])\n logs.append(\"Test for debug \" + uuid.uuid4().hex)\n self.moonlog_api.debug(logs[-1])\n return log_authz, logs\n\n def test_get_set_logs(self):\n previous_authz_logs = self.moonlog_api.get_logs(logger=\"authz\")\n previous_sys_logs = self.moonlog_api.get_logs(logger=\"sys\")\n\n log_authz, logs = self.send_logs()\n time.sleep(1)\n\n authz_logs = self.moonlog_api.get_logs(logger=\"authz\")\n sys_logs = self.moonlog_api.get_logs(logger=\"sys\")\n\n self.assertIsInstance(authz_logs, list)\n self.assertIsInstance(sys_logs, list)\n\n self.assertIn(log_authz, \" \".join(authz_logs))\n\n self.assertEqual(len(authz_logs), len(previous_authz_logs)+1)\n self.assertTrue(len(sys_logs) >= len(previous_sys_logs)+5)\n for log in logs:\n self.assertIn(log, \" \".join(sys_logs))\n\n def test_get_syslogger_with_options(self):\n\n all_logs = self.moonlog_api.get_logs(logger=\"sys\")\n\n time_1 = time.strftime(TIME_FORMAT)\n time.sleep(1)\n\n log_authz, logs = self.send_logs()\n\n NUMBER_OF_LOG = 5\n sys_logs = self.moonlog_api.get_logs(logger=\"sys\", event_number=NUMBER_OF_LOG)\n self.assertIsInstance(sys_logs, list)\n self.assertEqual(len(sys_logs), NUMBER_OF_LOG)\n\n sys_logs = self.moonlog_api.get_logs(logger=\"sys\", time_from=time_1)\n self.assertIsInstance(sys_logs, list)\n self.assertEqual(len(sys_logs), NUMBER_OF_LOG)\n\n log_authz, logs = self.send_logs()\n\n time.sleep(1)\n time_2 = time.strftime(TIME_FORMAT)\n\n log_authz, logs = self.send_logs()\n\n sys_logs = self.moonlog_api.get_logs(logger=\"sys\", time_to=time_2)\n self.assertIsInstance(sys_logs, list)\n self.assertEqual(len(sys_logs), len(all_logs)+3*NUMBER_OF_LOG)\n\n sys_logs = self.moonlog_api.get_logs(logger=\"sys\", time_from=time_1, time_to=time_2)\n self.assertIsInstance(sys_logs, list)\n self.assertEqual(len(sys_logs), 3*NUMBER_OF_LOG)\n\n","sub_path":"keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py","file_name":"test_unit_core_log.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"31151622","text":"from django.contrib import admin\nfrom tours.models import TourDetail, TourCategory, TourItinerary, TourImage, ImportantPoint\n\n\nclass TourImageInline(admin.TabularInline):\n\tmodel = TourImage\n\textra = 0\n\nclass TourItineraryInline(admin.TabularInline):\n\tmodel = TourItinerary\n\textra = 0\n\n\nclass TourCategoryInline(admin.TabularInline):\n\tmodel = TourCategory\n\textra = 0\n\nclass TourImageInline(admin.TabularInline):\n\tmodel = TourImage\n\textra = 0\n\nclass ImportantPointInline(admin.TabularInline):\n\tmodel = ImportantPoint\n\textra= 0\n\n\nclass TourDetailAdmin(admin.ModelAdmin):\n\tfieldsets = [\n\t\t('TourName', {\n\t\t\t'fields' : ['tour_name','statedetail', 'placedetail' , 'main_img'],\n\t\t\t}),\n\t\t('Details', {\n 'fields': ['tour_tagline', 'tour_price','tour_detail',],\n \n }),\n ('Term, Inclusions and Exclusion', { \n \t'fields': ['inclusion_type', 'exclusion_type', 'term_type'],\n \t}),\n\t]\n\tinlines = [TourItineraryInline, TourCategoryInline, TourImageInline, ImportantPointInline]\n\nadmin.site.register(TourDetail, TourDetailAdmin)","sub_path":"tourepedia/tours/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"517394431","text":"\"\"\"\nTest the management of tulip.__version__\n\nWARNING: version_test() and possibly other routines in this file will\ntemporarily replace tulip/commit_hash.txt, if present. If the test\nfails, your tulip installation could be left with a broken version\nlabel. This comment especially matters for out-of-source testing.\n\"\"\"\n\nimport os\nimport os.path\nimport imp\n\n\nSAMPLE_FILE_TOP = \"\"\"# DO NOT EDIT! This file was automatically generated by setup.py of TuLiP\n\"\"\"\nSAMPLE_COMMIT_HASH = \"\"\"71aefd0659904ccdb8553e5d5f2436933ea42728\n\"\"\"\n\ndef version_test():\n import tulip\n tul_path = os.path.dirname(tulip.__file__)\n filename = os.path.join(tul_path, \"commit_hash.txt\")\n if os.path.exists(filename):\n commit_hash_backup = open(filename, \"r\").read()\n else:\n commit_hash_backup = None\n\n # Release\n with open(filename, \"w\") as f:\n f.write(SAMPLE_FILE_TOP)\n\n ver = imp.load_module(\"version\", *imp.find_module(\"version\", [tul_path]))\n assert ver.version == \\\n '.'.join([str(x) for x in ver.version_info[:2]])+ver.version_info[2]\n\n # Dev release\n with open(filename, \"a\") as f:\n f.write(SAMPLE_COMMIT_HASH)\n\n ver = imp.load_module(\"version\", *imp.find_module(\"version\", [tul_path]))\n release_str = '.'.join([str(x) for x in ver.version_info[:2]])+ver.version_info[2]\n assert ver.version == release_str+\"-dev-\"+SAMPLE_COMMIT_HASH.strip()\n\n # Unknown dev\n os.remove(filename)\n ver = imp.load_module(\"version\", *imp.find_module(\"version\", [tul_path]))\n assert ver.version == release_str+\"-dev-unknown-commit\"\n\n # Restore original, if present\n if commit_hash_backup is not None:\n with open(filename, \"w\") as f:\n f.write(commit_hash_backup)\n","sub_path":"tests/version_test.py","file_name":"version_test.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"481505390","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport glob\nimport os\nimport sys\n\nimport attr\nimport numpy as np\nimport pytest\n\nfrom earthio.tif import (load_dir_of_tifs_meta,\n load_dir_of_tifs_array,\n load_tif_meta,\n ls_tif_files)\nfrom earthio.tests.util import (EARTHIO_HAS_EXAMPLES,\n EARTHIO_EXAMPLE_DATA_PATH,\n TIF_FILES,\n assertions_on_metadata,\n assertions_on_band_metadata)\nfrom earthio.util import BandSpec\n\n\nif TIF_FILES:\n TIF_DIR = os.path.dirname(TIF_FILES[0])\nband_specs = [\n BandSpec('name', '_B1.TIF', 'band_1'),\n BandSpec('name', '_B2.TIF', 'band_2'),\n BandSpec('name', '_B3.TIF', 'band_3'),\n BandSpec('name', '_B4.TIF', 'band_4'),\n BandSpec('name', '_B5.TIF', 'band_5'),\n BandSpec('name', '_B6.TIF', 'band_6'),\n BandSpec('name', '_B7.TIF', 'band_7'),\n BandSpec('name', '_B9.TIF', 'band_9'),\n BandSpec('name', '_B10.TIF', 'band_10'),\n BandSpec('name', '_B11.TIF', 'band_11'),\n]\n\n@pytest.mark.skipif(not TIF_FILES,\n reason='elm-data repo has not been cloned')\ndef test_read_meta():\n for tif in TIF_FILES:\n raster, meta = load_tif_meta(tif)\n assert hasattr(raster, 'read')\n assert hasattr(raster, 'width')\n band_specs_with_band_8 = band_specs + [BandSpec('name', '_B8.TIF', 'band_8')]\n meta = load_dir_of_tifs_meta(TIF_DIR, band_specs_with_band_8)\n band_meta = meta['band_meta']\n heights_names = [(m['height'], m['name']) for m in band_meta]\n # band 8 is panchromatic with 15 m resolution\n # other bands have 30 m resolution. They\n # have the same bounds, so band 8 has 4 times as many pixels\n heights_names.sort(key=lambda x:x[0])\n assert heights_names[-1][-1].endswith('_B8.TIF')\n\n\n@pytest.mark.skipif(not TIF_FILES,\n reason='elm-data repo has not been cloned')\ndef test_read_array():\n meta = load_dir_of_tifs_meta(TIF_DIR, band_specs)\n es = load_dir_of_tifs_array(TIF_DIR, meta, band_specs)\n for var in es.data_vars:\n sample = getattr(es, var)\n mean_y = np.mean(sample.y)\n mean_x = np.mean(sample.x)\n band_names = np.array([b.name for b in band_specs])\n assert sorted((mean_x,\n sample.canvas.bounds.left,\n sample.canvas.bounds.right))[1] == mean_x\n assert sorted((mean_y,\n sample.canvas.bounds.top,\n sample.canvas.bounds.bottom))[1] == mean_y\n assert np.all(band_names == es.band_order)\n assertions_on_band_metadata(sample.attrs)\n\n\n@pytest.mark.skipif(not TIF_FILES,\n reason='elm-data repo has not been cloned')\ndef test_reader_kwargs():\n band_specs_kwargs = []\n for b in band_specs:\n b = attr.asdict(b)\n b['buf_xsize'], b['buf_ysize'] = 200, 300\n band_specs_kwargs.append(BandSpec(**b))\n meta = load_dir_of_tifs_meta(TIF_DIR, band_specs_kwargs)\n es = load_dir_of_tifs_array(TIF_DIR, meta, band_specs_kwargs)\n for b in es.band_order:\n assert getattr(es, b).values.shape == (300, 200)\n\n","sub_path":"earthio/tests/test_tif.py","file_name":"test_tif.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"544512209","text":"#!/usr/bin/env python\n\nimport re\nimport requests\nimport requests_html\nimport string\n\n# Queries for the MediaWiki backend.\n# Documentation here: https://www.mediawiki.org/wiki/API:Categorymembers\nCATEGORY = \"Category:Japanese_katakana\"\nLIMIT = 500\nINITIAL_QUERY = f\"https://en.wiktionary.org/w/api.php?action=query&format=json&list=categorymembers&cmtitle={CATEGORY}&cmlimit={LIMIT}\"\nCONTINUE_TEMPLATE = string.Template(INITIAL_QUERY + \"&cmcontinue=$cmcontinue\")\n\n# Selects the content on the page.\nPAGE_TEMPLATE = string.Template(\"https://en.wiktionary.org/wiki/$word\")\nSELECTOR = 'b[class=\"Latn form-of lang-ja romanized-form-of\"]'\n\n\ndef _print_data(data):\n session = requests_html.HTMLSession()\n for member in data[\"query\"][\"categorymembers\"]:\n katakana = member[\"title\"]\n # Skips examples starting or ending with a dash.\n if katakana.startswith(\"-\") or katakana.endswith(\"-\"):\n continue\n # Skips examples containing digits.\n if re.search(r\"\\d\", katakana):\n continue\n query = PAGE_TEMPLATE.substitute(word=katakana)\n got = session.get(query).html.find(SELECTOR, first=True)\n if not got:\n continue\n romaji = got.text\n # Skips multiword examples.\n if \" \" in romaji:\n continue\n if romaji.endswith(\")\") or romaji.endswith(\",\"):\n romaji = romaji[:-1]\n # Skips examples starting or ending with a dash.\n if romaji.startswith(\"-\") or romaji.endswith(\"-\"):\n continue\n romaji = romaji.casefold()\n print(f\"{katakana}\\t{romaji}\")\n\n\ndef main():\n data = requests.get(INITIAL_QUERY).json()\n _print_data(data)\n code = data[\"continue\"][\"cmcontinue\"]\n next_query = CONTINUE_TEMPLATE.substitute(cmcontinue=code)\n while True:\n data = requests.get(next_query).json()\n _print_data(data)\n # Then this is the last one.\n if not \"continue\" in data:\n break\n code = data[\"continue\"][\"cmcontinue\"]\n next_query = CONTINUE_TEMPLATE.substitute(cmcontinue=code)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"languages/jpn_wik/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"166217410","text":"from motors import Motors\nfrom sensob import CameraSensob\nfrom time import sleep\n\n\nclass Motob:\n\n def __init__(self, bbcon):\n self.bbcon = bbcon\n self.values = []\n self.motor = Motors()\n self.photograph = False\n self.camera=CameraSensob()\n\n def update(self, motor_recommendation):\n # Mottar en anbefaling fra bbcon og behaviors\n\n self.values = motor_recommendation\n self.operationlize()\n\n def operationlize(self):\n # Henter ut forste verdi fra anbefalinger, antall grader gis som andre vektor i self.values dersom anbefaling er\n # 'l' eller 'r'\n\n value=self.values[0]\n print(\"Motor Recommendation = \", value)\n if value == \"f\":\n print(\"Forward\")\n self.motor.set_value([0.5, 0.5],0.15)\n elif value == \"l\":\n print(\"Left\")\n self.motor.set_value([-1,1], self.turn_n_degrees(self.values[1]))\n elif value == \"r\":\n print(\"Right\")\n self.motor.set_value([1,-1], self.turn_n_degrees(self.values[1]))\n elif value == 'fl':\n print('Left and forward')\n self.motor.set_value([0.05, 0.35],0.15)\n elif value == 'fr':\n print('Right and forward')\n self.motor.set_value([0.35, 0.05],0.15)\n elif value == 't':\n self.motor.set_value([-0.5, 0.5], 0.25)\n self.motor.set_value([0.5, -0.5], 0.25)\n print(\"Found red!\")\n self.motor.set_value([-1, 1], self.turn_n_degrees(180))\n self.bbcon.photo_taken()\n elif value == \"s\":\n print(\"Stop\")\n self.motor.stop()\n self.photograph = True\n sleep(1)\n elif value == 'p':\n self.camera.update()\n\n @staticmethod\n def turn_n_degrees(deg):\n # Returnerer antall sekunder motorene maa kjores paa full speed, henholdsvis frem og bak for aa tilsvare grader\n return 0.0028 * deg\n","sub_path":"motob.py","file_name":"motob.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"214546995","text":"from tkinter import *\nimport time\nimport random\n\n#setup\nwindow = Tk()\nwindow.title('The Spider Survival Game Multiplayer')\n\ncanvas = Canvas(window, width = 400, height = 400, bg = 'white')\ncanvas.pack()\n\ntitle = canvas.create_text(200, 200, text = 'The Spider Survival Game: 2', fill = 'black', font = ('Helvetica', 30))\ndirections = canvas.create_text(200, 300, text = 'Multiplayer version!', fill = 'black', font = ('Helvetica', 30))\n\n#time\ntime = 0\ntime_display = Label(window, text = \"Time: \" + str(time))\ntime_display.pack()\n\n#level\nlevel = 1\nlevel_display = Label(window, text = \"Level: \" + str(level))\nlevel_display.pack()\n\n#players\nplayer1_image = PhotoImage(file = 'stickfigure.gif')\nplayer2_image = PhotoImage(file = 'stickfigure.gif')\nplayer1 = canvas.create_image(200,360,image = player1_image)\nplayer2 = canvas.create_image(200,360,image = player2_image)\n\n\n#variables for spiders\nspider_list = []\nspider_speed = 2\n\nspider_image = PhotoImage(file = 'spider.gif')\n\ndef make_spider():\n xposition = random.randint(1,400)\n spider = canvas.create_image(0, xposition, image = spider_image)\n spider_list.append(spider)\n if level == 1:\n window.after(1500, make_spider)\n if level == 2:\n window.after(600, make_spider)\n if level == 3:\n window.after(300, make_spider)\n \ndef move_spider():\n for spider in spider_list:\n canvas.move(spider, spider_speed, 0)\n window.after(50, move_spider)\n\n#time update\ndef update_time_level():\n global time, level, spider_speed\n time = time + 1\n time_display.config(text = 'Time: ' + str(time))\n if time > 15 and time <= 30:\n spider_speed = spider_speed + 1\n level = 2\n level_display.config(text = \"Level: \" + str(level))\n elif time > 30:\n spider_speed = spider_speed + 1\n level = 3\n level_display.config(text = \"Level: \" + str(level))\n window.after(1000, update_time_level)\n\ndef end_game_over():\n window.destroy()\n\ndef end_title():\n canvas.delete(title)\n canvas.delete(directions)\n\n#check collide\ndef collision(item1, item2, distance):\n xdistance = abs(canvas.coords(item1)[0] - canvas.coords(item2)[0])\n ydistance = abs(canvas.coords(item1)[1] - canvas.coords(item2)[1])\n overlap = xdistance < distance and ydistance < distance\n return overlap\n\ndef check_hits():\n for spider in spider_list:\n if collision(player1, spider, 30):\n game_over = canvas.create_text(200, 200, text = 'Game Over', fill = 'red', font = ('Helvetica', 30))\n window.after(2000, end_game_over)\n elif collision(player2, spider, 30):\n game_over = canvas.create_text(200, 200, text = 'Game Over', fill = 'red', font = ('Helvetica', 30))\n window.after(2000, end_game_over)\n return\n window.after(100, check_hits)\n\n#control character with keys\nmove_direction = 0\ndef check_input(event):\n global move_direction\n key = event.keysym\n if key == \"Up\":\n move_direction = \"Up1\"\n elif key == \"Down\":\n move_direction = \"Down1\"\n elif key == \"Right\":\n move_direction = \"Right1\"\n elif key == \"Left\":\n move_direction = \"Left1\"\n elif key == \"W\":\n move_direction = \"Up2\"\n elif key == \"S\":\n move_direction = \"Down2\"\n elif key == \"A\":\n move_direction = \"Right2\"\n elif key == \"D\":\n move_direction = \"Left2\"\n\ndef end_input(event):\n global move_direction\n move_direction = \"None\"\n\ndef move_character():\n if move_direction == \"Right1\" and canvas.coords(player1)[0] < 400:\n canvas.move(player1, 10,0)\n elif move_direction == \"Left1\" and canvas.coords(player1)[0] > 0:\n canvas.move(player1, -10,0)\n elif move_direction == \"Up1\" and canvas.coords(player1)[1] > 0:\n canvas.move(player1, 0,-10)\n elif move_direction == \"Down1\" and canvas.coords(player1)[1] < 400:\n canvas.move(player1, 0,10)\n elif move_direction == \"Right2\" and canvas.coords(player2)[0] < 400:\n canvas.move(player2, 10,0)\n elif move_direction == \"Left2\" and canvas.coords(player2)[0] > 0:\n canvas.move(player2, -10,0)\n elif move_direction == \"Up2\" and canvas.coords(player2)[1] > 0:\n canvas.move(player2, 0,-10)\n elif move_direction == \"Down2\" and canvas.coords(player2)[1] < 400:\n canvas.move(player2, 0,10)\n window.after(16, move_character)\n\n\ncanvas.bind_all('', check_input)\ncanvas.bind_all('', end_input)\n\n#start game\nwindow.after(1000, end_title)\nwindow.after(1000, make_spider)\nwindow.after(1000, update_time_level)\nwindow.after(1000, move_spider)\nwindow.after(1000, check_hits)\nwindow.after(1000, move_character)\n\nwindow.mainloop()\n","sub_path":"5-5 survival game multiplayer.py","file_name":"5-5 survival game multiplayer.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"180186950","text":"import random\nimport string\nfrom flask import views, jsonify\nfrom sqlalchemy import or_, asc, desc\nfrom webargs.flaskparser import use_args\nfrom webargs import fields\nfrom marshmallow import Schema, pre_load\nfrom .db import db, DdnsModel, DdnsSchema\n\n\nclass PostSchema(Schema):\n hostname = fields.Str(required=True)\n\n @pre_load\n def hostname_strip(self, in_data):\n in_data['hostname'] = in_data['hostname'].lower().strip()\n return in_data\n\n class Meta:\n strict = True\n\n\nget_fields = {\n 'per_page': fields.Int(),\n 'page': fields.Int(),\n 'sort': fields.Str(missing=''),\n 'filter': fields.Str(missing='')\n}\n\ndelete_fields = {\n 'id': fields.Int()\n}\n\n\nclass ManageItems(views.MethodView):\n @use_args(get_fields)\n def get(self, args):\n query = None\n if len(args['filter']) > 0:\n query = DdnsModel.query.filter(\n or_(\n DdnsModel.hostname.contains(args['filter']),\n DdnsModel.ip_send.contains(args['filter']),\n DdnsModel.ip_give.contains(args['filter'])\n )\n )\n else:\n query = DdnsModel.query\n\n sortstring = args['sort'].split('|')\n if len(sortstring) == 2:\n if sortstring[1] == 'asc':\n query = query.order_by(asc(sortstring[0]))\n if sortstring[1] == 'desc':\n query = query.order_by(desc(sortstring[0]))\n\n result = query.paginate(page=args['page'], per_page=args['per_page'])\n ddnslist = []\n for item in result.items:\n ddnslist.append(DdnsSchema().dump(item).data)\n\n modular_offset = 0\n if result.total % result.per_page > 0:\n modular_offset = 1\n\n to = result.per_page * result.page\n if to > result.total:\n to = result.total\n\n return jsonify({\n 'ddnslist': ddnslist,\n 'total': result.total,\n 'per_page': result.per_page,\n 'current_page': result.page,\n 'from': result.per_page * (result.page - 1) + 1,\n 'to': to,\n 'last_page': int(result.total / result.per_page) + modular_offset\n })\n\n @use_args(PostSchema)\n def post(self, args):\n hostname = args['hostname']\n host = DdnsModel.query.filter_by(hostname=hostname).first()\n print(host)\n if host:\n # TODO error handler\n return \"false\"\n\n key = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(32))\n host = DdnsModel(hostname=hostname, key=key)\n db.session.add(host)\n db.session.commit()\n return jsonify(DdnsSchema().dump(host))\n\n @use_args(delete_fields)\n def delete(self, args):\n entry = DdnsModel.query.get_or_404(args['id'])\n db.session.delete(entry)\n db.session.commit()\n return jsonify({'delete': 'ok', 'id': args['id']})\n","sub_path":"app/admin_view.py","file_name":"admin_view.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"304445773","text":"#!/usr/bin/env python3\n# coding:utf-8\n\nimport signal\nimport sys\nimport os\nimport glob\nimport logging\nimport logging.handlers\nimport datetime\nimport click\nimport pickle\nimport struct\nimport tempfile\nimport shutil\nimport math\nimport concurrent.futures\nimport errno\n\n\nclass SignalException(Exception):\n def __init__(self, message):\n super(SignalException, self).__init__(message)\n\n\ndef do_exit(sig, stack):\n raise SignalException(\"Exiting\")\n\n\n# ファイルを分割し、index、offset、offset + lengthを返す。\ndef tsv_separate_generator(inputf):\n CHUNK_SIZE = 1024 * 1024 * 100\n with open(inputf, 'rb') as f:\n f_size = os.stat(f.fileno()).st_size\n split_count = math.ceil(f_size / CHUNK_SIZE)\n start_offset = len(f.readline())\n for split_idx in range(split_count):\n offset = CHUNK_SIZE * (split_idx + 1) - 1\n f.seek(offset)\n last_line_len = len(f.readline())\n if offset < f_size:\n end_offset = offset + last_line_len\n else:\n end_offset = f_size\n yield (\n split_idx,\n start_offset,\n end_offset,\n )\n if end_offset >= f_size or last_line_len == 0:\n break\n start_offset = end_offset\n\n\ndef sum_file(self, files):\n with tempfile.NamedTemporaryFile(delete=False, dir='/var/tmp/',) as f:\n s = 0\n for file in self.files:\n with open(file) as f1:\n os.sendfile(f.fileno(), f1.fileno(), s)\n s += os.stat(file).st_size\n return f.name\n\n\nclass ReadTsvGenerator(object):\n\n def __init__(self, inputf, iterable):\n self.inputf = inputf\n self.iterable = iterable\n\n def read_tsv(self):\n with open(self.inputf, \"rb\") as f:\n start_offset = self.iterable[1],\n end_offset = self.iterable[2],\n f.seek(start_offset[0])\n start = start_offset[0]\n while start < end_offset[0]:\n row = f.readline()\n start += len(row)\n row = [\n i.decode(\n 'utf-8'\n ) for i in row.strip(b'\\n').split(b'\\t')\n ]\n row = (\n int(row[0]),\n int(row[1]),\n int(row[2]),\n float(row[3]),\n int(row[4]),\n row[5],\n row[6],\n row[7],\n row[8],\n )\n yield row\n\n\nclass ParseTsvGenerator(object):\n def __init__(self, iterable):\n self.iterable = iterable\n\n def pickle_tsv(self):\n lines = self.iterable\n next(lines)\n for record in lines:\n yield pickle.dumps(record)\n\n def struct_tsv(self):\n lines = self.iterable\n next(lines)\n for record in lines:\n s = struct.Struct(\n 'i h l d ? %ds %ds %ds %ds' % (\n len(record[5]), len(record[6]),\n len(record[7]), len(record[8]),\n )\n )\n yield s.pack(*record)\n\n\nclass ParseRowsTsv(object):\n\n def __init__(self, file, inputf, outputf):\n self.file = file\n self.inputf = os.path.abspath(os.path.expanduser(inputf))\n self.outputf = os.path.abspath(os.path.expanduser(outputf))\n\n # 単一タスク\n def dotask(self, rule):\n parsetsv = ParseTsvGenerator(\n ReadTsvGenerator(self.inputf, rule).read_tsv())\n if self.file == 'pickle':\n w = parsetsv.pickle_tsv()\n elif self.file == 'struct':\n w = parsetsv.struct_tsv()\n with tempfile.NamedTemporaryFile(\n delete=False, dir='/var/tmp', suffix='_dotask', prefix='tmp_',\n ) as f:\n for row in w:\n f.write(row)\n return f.name\n\n # マルチプロセス\n def multi_do_task(self):\n with concurrent.futures.ProcessPoolExecutor() as executor:\n future_to_tsv = {\n executor.submit(\n self.dotask, rule\n ): rule for rule in tsv_separate_generator(self.inputf)}\n with tempfile.TemporaryDirectory(\n suffix='_tsv', prefix='tmp_', dir='/var/tmp') as temp_dir:\n with tempfile.NamedTemporaryFile(\n suffix='_tsv', prefix='tmp_',\n delete=False, dir=temp_dir,) as f:\n s = 0\n for future in concurrent.futures.as_completed(\n future_to_tsv):\n chunk = future_to_tsv[future][2] - \\\n future_to_tsv[future][1]\n with open(future.result()) as separatefile:\n os.sendfile(\n f.fileno(), separatefile.fileno(), s, chunk)\n s += os.stat(separatefile.fileno()).st_size\n try:\n os.remove(separatefile.name)\n except OSError as exc:\n if exc.errno != errno.ENOENT:\n raise\n shutil.move(f.name, self.outputf)\n\n\n@click.command()\n@click.option(\n '--file', type=click.Choice(['pickle', 'struct']),\n default='pickle')\n@click.option('-i', '--inputf', default='~/kadai_1.tsv')\n@click.option('-o', '--outputf', default='~/zone/kadai_2v3.p')\ndef cmd(file, inputf, outputf):\n s = datetime.datetime.now()\n print(s + datetime.timedelta(0, 0, 0, 0, 0, 9))\n # シグナル\n signal.signal(signal.SIGINT, do_exit)\n signal.signal(signal.SIGHUP, do_exit)\n signal.signal(signal.SIGTERM, do_exit)\n # ログハンドラーを設定する\n LOG_MANYROWSTSV = 'logging_warning.out'\n my_logger = logging.getLogger('MyLogger')\n my_logger.setLevel(logging.WARNING)\n handler = logging.handlers.RotatingFileHandler(\n LOG_MANYROWSTSV, maxBytes=2000, backupCount=5,)\n my_logger.addHandler(handler)\n\n parser = ParseRowsTsv(file, inputf, outputf)\n\n try:\n parser.multi_do_task()\n\n except SignalException as e1:\n my_logger.warning('%s: %s' % (e1, datetime.datetime.now()))\n logfiles = glob.glob('%s*' % LOG_MANYROWSTSV)\n print(logfiles)\n sys.exit(1)\n finally:\n e = datetime.datetime.now()\n print(str(e-s))\n\n\ndef main():\n cmd()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"parsetsv_multitask_p3.py","file_name":"parsetsv_multitask_p3.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"11784060","text":"import urllib.request\nimport re\nimport time\nimport sys\nimport datetime\n'''__author__ = sjie\n 此脚本用来监控商品行情时间,返回1表示正常,反之,返回0则不正常!!!!\n'''\ndef market(markercode):\n #url='''http://10.0.1.60:6101/market/status.do?market=GDIEX&contract=%s''' %markercode\n url='''http://market43.gdiex.com/market/status.do?market=GDIEX&contract={markercode}'''.format(markercode=markercode)\n response = urllib.request.urlopen(url)\n data = response.read()\n data = data.decode('utf-8')\n #print(data)\n data_split = re.split(',',data)[6].split(':')\n data_dict = {}\n data_dict['status'] = data_split[1]\n for status,value in data_dict.items():\n return (value)\ndef getCodeQuote(Quoteurl):\n url = '''http://market43.gdiex.com/realtime/query/contracts2.do?value=%s''' %Quoteurl\n response = urllib.request.urlopen(url)\n response = response.read().decode('utf-8')\n response_split = response[-37:-18]\n timearray = time.strptime(response_split,\"%Y-%m-%d %H:%M:%S\")\n timestamp = int(time.mktime(timearray))\n now = int(time.time())\n #print(now)\n #print(timestamp)\n return int((now - timestamp))\n #print(response_split)\n\nif __name__ == '__main__':\n code = sys.argv[1]\n #interval = int(sys.argv[2])\n hour = time.strftime('%H',time.localtime(time.time()))\n week = time.strftime('%w',time.localtime(time.time()))\n if (week == 1 and hour < 8) or (week != 1 and 4 < hour < 7):\n print(0)\n else:\n if market(code) in '1':\n #getCodeQuote(code)\n interval = getCodeQuote(code)\n print(interval)\n else:\n print(0)\n\n","sub_path":"python/php/python/monitor_weipan_quote.py","file_name":"monitor_weipan_quote.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"191393021","text":"\"\"\"Raise the number to the power of exp.\n\nUsage\n-----\n\n $ python3 power_binary_expansion.py num exp\n\nnum - a number, the base for the exponentiation\nexp - an exponent, the power.\n\"\"\"\n\nimport sys\n\n\ndef power_binary_expansion(num, exp):\n \"\"\"Exponentiation by binary expansion of the exponent.\n\n The function to compute exponentiation by converting the exponent to base2.\n\n Parameters\n ----------\n num: float\n base, a positive real number.\n exp: int\n power, a positive integer (exp >= 0).\n\n Returns\n -------\n numeric\n the expth power of num.\n\n \"\"\"\n if exp == 0:\n return 1\n\n result = 1\n inter_result = num\n power_of_2 = 1\n deg_exp = exp\n while power_of_2 * 2 < exp:\n power_of_2 *= 2\n deg_exp = deg_exp // 2\n inter_result *= inter_result\n result *= inter_result if deg_exp % 2 else 1\n\n return result\n\n\ndef main():\n if len(sys.argv) > 2:\n num = float(sys.argv[1])\n exp = int(sys.argv[2])\n else:\n sys.exit()\n\n output = float(power_binary_expansion(num, exp))\n print(output)\n\n\nif __name__ == '__main__':\n main()\n\n # unit tests\n # assert power_binary_expansion(2, 10) == 1024.0\n # assert power_binary_expansion(123456789, 0) == 1.0\n","sub_path":"number_theoretic_algorithms/power/power_binary_expansion.py","file_name":"power_binary_expansion.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"35846581","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Ticket',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=250)),\n ('description', models.TextField()),\n ('attachment', models.FileField(null=True, upload_to=b'bugtracker', blank=True)),\n ('status', models.CharField(max_length=1,\n choices=[(b'I', b'Initial'), (b'A', b'Awaiting Update'), (b'F', b'Fixed'),\n (b'W', b\"Won't Fix\")])),\n ('priority',\n models.CharField(max_length=1, choices=[(b'L', b'Low'), (b'M', b'Medium'), (b'H', b'High')])),\n ('created_time', models.DateTimeField(auto_now_add=True)),\n ('updated_time', models.DateTimeField(auto_now=True)),\n ('assigned_to', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL)),\n ('created_by', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-updated_time'],\n },\n ),\n migrations.CreateModel(\n name='TicketUpdate',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=250)),\n ('update_text', models.TextField()),\n ('attachment', models.FileField(null=True, upload_to=b'bugtracker', blank=True)),\n ('updated_time', models.DateTimeField(auto_now=True)),\n ('ticket', models.ForeignKey(to='bugtracker.Ticket')),\n ('updated_by', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-updated_time'],\n },\n ),\n ]\n","sub_path":"bugtracker/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"400524005","text":"from StringIO import StringIO\nfrom bs4 import BeautifulSoup\nimport pycurl\nimport md5\nimport subprocess\n\nurl = \"URL-TO-MONITOR\"\n\n# curl url\nstorage = StringIO()\nc = pycurl.Curl()\nc.setopt(c.URL, url)\nc.setopt(c.WRITEFUNCTION, storage.write)\nc.perform()\nc.close()\nhtml = storage.getvalue()\n\nsoup = BeautifulSoup(html, 'html.parser')\nelem = soup.table # Modify this line!\nhtml = str(elem)\n\n# hash the page content\nnewHash = md5.new(html).hexdigest()\n\n# open stored hash\nwith open('hash', 'r') as myFile:\n oldHash = myFile.read().replace('\\n', '')\n\nif len(oldHash) != 0:\n\t# send notification if changed\n\tif newHash != oldHash:\n\t\t# execute php email script\n\t\tsubprocess.call(\"php email.php \" + url, shell=True)\n\t\n# save newHash to file\nhashFile = open(\"hash\", \"w\")\nhashFile.write(newHash)\nhashFile.close()\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"52132587","text":"class Solution:\n def canMakeArithmeticProgression(self, arr: List[int]) -> bool:\n if(len(arr)==1 or len(arr)==2): return True\n else:\n arr.sort()\n temp = abs(arr[1]-arr[0])\n for i in range(1,len(arr)-1):\n if(abs(arr[i]-arr[i+1]) != temp): \n return False\n return True","sub_path":"before midterm/leetcode(without_tsis)/arithmetic_progress_1502.py","file_name":"arithmetic_progress_1502.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"269285580","text":"# -*- coding: utf-8 -*-\n# Copyright 2017 GIG Technology NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.3@@\n\nfrom mcfw.exceptions import HttpNotFoundException, HttpConflictException, HttpBadRequestException\nfrom rogerthat.to.payment import ErrorPaymentTO\nfrom rogerthat.translations import localize\n\n\nclass InvalidPaymentProviderException(HttpBadRequestException):\n def __init__(self, payment_provider_id):\n self.payment_provider_id = payment_provider_id\n super(InvalidPaymentProviderException, self).__init__('invalid_payment_provider',\n {'payment_provider_id': payment_provider_id})\n\n\nclass PaymentProviderNotFoundException(HttpNotFoundException):\n def __init__(self, payment_provider_id):\n self.payment_provider_id = payment_provider_id\n super(PaymentProviderNotFoundException, self).__init__('payment_provider_not_found',\n {'payment_provider_id': payment_provider_id})\n\n\nclass PaymentProviderAlreadyExistsException(HttpConflictException):\n def __init__(self, payment_provider_id):\n self.payment_provider_id = payment_provider_id\n super(PaymentProviderAlreadyExistsException, self).__init__('payment_provider_already_exists',\n {'payment_provider_id': payment_provider_id})\n\n\nclass PaymentProviderNoOauthSettingsException(HttpNotFoundException):\n def __init__(self, payment_provider_id):\n self.payment_provider_id = payment_provider_id\n super(PaymentProviderNoOauthSettingsException, self).__init__('payment_provider_no_oauth_settings',\n {'payment_provider_id': payment_provider_id})\n\n\nclass InvalidPaymentImageException(HttpBadRequestException):\n def __init__(self, error='invalid_payment_image', data=None):\n super(InvalidPaymentImageException, self).__init__(error, data)\n\n\nclass PaymentException(Exception):\n\n def __init__(self, error, language, translation_data=None, data=None):\n # type: (unicode, unicode, dict) -> None\n key = 'payments.%s' % error\n self.error = ErrorPaymentTO(error, localize(language, key, **(translation_data or {})), data)\n super(PaymentException, self).__init__(self.error.message)\n","sub_path":"src/rogerthat/exceptions/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"403016455","text":"from experiment import Experiment, DBType\nfrom util.stats import Stats\n\nclass AggregateExperiment(Experiment):\n \"\"\"Experiment that performs aggregates on preexisting data in the DB.\"\"\"\n\n def main(self):\n self.runAggregates()\n\n Stats.output()\n Stats.dump(self.getDumpFileName())\n\nexp = AggregateExperiment()\nexp.main()\n","sub_path":"exp_agg.py","file_name":"exp_agg.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"500680448","text":"from model import *\nimport tensorflow as tf\n\n\ndef configure():\n flags = tf.app.flags\n flags.DEFINE_integer('max_step', 2000, 'How many steps to train')\n flags.DEFINE_float('rate', 0.01, 'learning rate for training')\n flags.DEFINE_float('weight_decay', 1e-4, 'L2 regularization')\n flags.DEFINE_integer('reload_step', 4000, 'Reload step to continue training')\n flags.DEFINE_integer('save_interval', 100, 'interval to save model')\n flags.DEFINE_integer('summary_interval', 5, 'interval to save summary')\n flags.DEFINE_integer('n_classes', 10, 'output class number')\n flags.DEFINE_integer('batch_size', 128, 'batch size for one iter')\n flags.DEFINE_boolean('is_training', True, 'training or predict (for batch normalization)')\n flags.DEFINE_integer('layers', 2, 'number of res-net layers in a res-group')\n flags.DEFINE_string('datadir', 'cifar', 'directory of data')\n flags.DEFINE_string('logdir', 'logs', 'directory to save logs of accuracy and loss')\n flags.DEFINE_string('modeldir', 'models', 'directory to save models ')\n flags.DEFINE_string('model_name', 'ResNet', 'Model file name')\n\n flags.FLAGS.__dict__['__parsed'] = False\n return flags.FLAGS\n\n\nif __name__ == '__main__':\n model = ResNet(configure(), tf.Session())\n model.train()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"1934316","text":"import json\r\n\r\nfrom core.dal.entities.dbdata.lipidmaps import LipidMapsData\r\nfrom modules.db_builder.parsers.lib import strip_attr, force_list, flatten_refs, force_flatten_extra_refs\r\nfrom modules.db_builder.parsers.pubchem.utils import split_pubchem_ids\r\n\r\n_mapping = dict(\r\n LM_ID='lipidmaps_id',\r\n NAME='names',\r\n SYSTEMATIC_NAME='names',\r\n SYNONYMS='names',\r\n ABBREVIATION='names',\r\n\r\n EXACT_MASS='mass',\r\n #SMILES='smiles',\r\n #INCHI='inchi',\r\n INCHI_KEY='inchikey',\r\n #FORMULA='formula',\r\n\r\n #KEGG_ID='kegg_id',\r\n #HMDB_ID='hmdb_id',\r\n #CHEBI_ID='chebi_id',\r\n PUBCHEM_CID='pubchem_id',\r\n pubchem_compound_id='pubchem_id',\r\n #LIPIDBANK_ID='lipidbank_id',\r\n #SWISSLIPIDS_ID='swisslipids_id',\r\n\r\n wikipedia_id='wiki_id',\r\n\r\n #CATEGORY='category',\r\n #MAIN_CLASS='main_class',\r\n #SUB_CLASS='sub_class',\r\n CLASS_LEVEL4='lvl4_class',\r\n)\r\n\r\n\r\ndef metajson_transform(me):\r\n flatten_refs(me)\r\n\r\n strip_attr(me, 'chebi_id', 'CHEBI:')\r\n strip_attr(me, 'chebi_id_alt', 'CHEBI:')\r\n strip_attr(me, 'hmdb_id', 'HMDB')\r\n strip_attr(me, 'lipidmaps_id', 'LM')\r\n strip_attr(me, 'inchi', 'InChI=')\r\n\r\n force_list(me, 'chebi_id_alt')\r\n force_list(me, 'names')\r\n\r\n split_pubchem_ids(me)\r\n\r\n force_flatten_extra_refs(me)\r\n\r\n\r\ndef parse_lipidmaps(content):\r\n if isinstance(content, str):\r\n data = json.loads(content)\r\n else:\r\n data = content\r\n\r\n for k in list(data.keys()):\r\n k2 = _mapping.get(k, k).lower()\r\n\r\n if k2 not in data:\r\n data[k2] = []\r\n\r\n v = data.pop(k)\r\n if isinstance(v, (list, tuple, set)):\r\n data[k2].extend(v)\r\n else:\r\n data[k2].append(v)\r\n\r\n # reduce vectors to scalars:\r\n metajson_transform(data)\r\n\r\n return LipidMapsData(**data)\r\n","sub_path":"modules/db_builder/parsers/lipidmaps/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"71164993","text":"import csv\nimport argparse\nimport logging\n\n# import matplotlib.pyplot as plt\n# from scipy import stats\n\nfrom octane.molecular_groups import (\n get_fingerprint_bitarray\n)\nfrom octane.model import Model\n\nmain_logger = logging.getLogger('Main')\nmain_logger.setLevel(logging.DEBUG)\nlogging.info('Main started')\n\n\ndef prepare_csv_data_set(data_file):\n \"\"\" \"\"\"\n\n logging.info('reading f{data_file} data set')\n structures = []\n try:\n with open(data_file) as data_set:\n csv_reader = csv.reader(data_set, delimiter=',')\n for row in csv_reader:\n structure, ron, mon = row\n fp = get_fingerprint_bitarray(structure, size=1024, fp_fmt='ecfp0')\n structures.append((fp, ron))\n except FileNotFoundError:\n main_logger.error('No such file f{data_file}')\n\n x_data = [struct for struct, ron in structures]\n y_data = [[ron] for struct, ron in structures]\n return x_data, y_data\n\ndef main():\n \"\"\" entry point \"\"\"\n\n parser = argparse.ArgumentParser(\n description='Octane number prediction'\n )\n parser.add_argument(\n '--data-set',\n metavar='data_file',\n # TODO: os.path.join\n default='data/data_set.csv',\n type=str,\n nargs='?',\n help='path to data set file'\n )\n parser.add_argument(\n '--train-and-save',\n metavar='to_train',\n # TODO: make as flag\n default=False,\n type=bool,\n nargs='?',\n help='train and save trained model to a file specified by --model-file'\n )\n parser.add_argument(\n '--model-file',\n metavar='saved_model_file',\n # TODO: os.path.join\n default='.\\\\model\\model.ckpt',\n type=str,\n nargs='?',\n help='path to model file, for saving/restoring model'\n )\n parser.add_argument(\n '--feed-through',\n metavar='feed_trough',\n type=str,\n nargs='?',\n help='smiles structure for octane number prediction'\n )\n parser.add_argument(\n '--epochs',\n metavar='saved_model_file',\n default=280,\n type=int,\n nargs='?',\n help='number of epochs'\n )\n args = parser.parse_args()\n\n prepared_data = prepare_csv_data_set(args.data_set)\n x_data, y_data = prepared_data\n\n model = Model(\n [1024, 64, 1],\n learning_rate=0.00001,\n epochs=args.epochs\n )\n\n if args.train_and_save:\n with model:\n model.fit_model([x_data, y_data])\n model.save(args.model_file)\n elif args.feed_through:\n with model:\n model.restore(args.model_file)\n fp = get_fingerprint_bitarray(args.feed_through, size=1024, fp_fmt='ecfp0')\n prediction = model.feed_through(fp)\n main_logger.info(f'Prediction for {args.feed_through}: {prediction[0][0]}')\n\n # y_toplot = []\n # for r in y_data:\n # for j in r:\n # y_toplot.append(float(j))\n\n # diff = []\n\n # for i, j in zip(y_toplot, pred):\n # print(i, j)\n # diff.append(i-j)\n # plt.plot(pred, 'ro')\n # plt.plot(y_toplot, pred, 'bo', list(range(-20,120)), list(range(-20,120)), 'r--')\n # plt.plot(list(range(len(y_toplot))), y_toplot, 'r--', list(range(len(y_toplot))), pred, 'g--')\n\n # slope, intercept, r_value, p_value, std_err = stats.linregress(y_toplot, pred)\n # print('r-squared value: {}'.format(r_value))\n\n\n\n # plt.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"156316967","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random\nimport sklearn.decomposition\nfrom matplotlib.colors import ListedColormap\nfrom predictor import knn_predict\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# GGPLOT + CRAYOLA FRAGRANCE COLORS\nplt.style.use(\"ggplot\")\npalette = ListedColormap([\"#FF5050\", \"#4F86F7\"])\n\ndef accuracy(predictions, outcomes):\n \"\"\"\n Returns the % of the predictions' accuracy\n \"\"\"\n return 100 * np.mean(predictions == outcomes)\n\ndef normalize(data):\n \"\"\"\n Calculates the Z-Score normalization for a dataframe with numeric\n values only.\n \n MORE INFO:\n https://en.wikipedia.org/wiki/Standard_score#Calculation_from_raw_score\n \"\"\"\n return (data - data.mean()) / data.std(ddof=0)\n\n# PART 1 - NORMALIZING DATA\ndata = pd.read_csv(\"./wine.csv\")\nnumericData = data.drop([\"color\"], 1)\nnumericData = numericData.apply(normalize)\n\n# PART 2 - EXTRACTING PRINCIPAL COMPONENTS\npca = sklearn.decomposition.PCA(n_components=2)\ncomponents = pca.fit(numericData).transform(numericData)\nx = components[:, 0]\ny = components[:, 1]\n\n# PART 3 - GENERATING THE PLOT\nplt.title(\"Principal Components of Wine\")\nplt.scatter(x, y, alpha=0.2, c=data.high_quality, cmap=palette, edgecolors=\"none\")\nplt.xlim(-8, 8); plt.ylim(-8, 8)\nplt.xlabel(\"Principal Component 1\"); plt.ylabel(\"Principal Component 2\")\nplt.savefig(\"./observations/wine_grid.pdf\")\n\n# PART 4 - VERIFYING SKLEARN PREDICTION ACCURACY\nknn = KNeighborsClassifier(n_neighbors = 5)\nknn.fit(numericData, data.high_quality)\nperformance = accuracy(knn.predict(numericData), data.high_quality)\nprint(\"Sklearn module predicting with \" + str(performance) + \"% of acurracy.\")\n\n# PART 5 - SPLITTING DATA\nn_rows = data.shape[0]\nrandom.seed(123)\nselection = random.sample(range(n_rows), 65)\npredictors = np.array(numericData)\noutcomes = np.array(data.high_quality)\ntrainSet = [i for i in range(len(predictors)) if i not in selection]\n\n# PART 6 - VERIFYING MODULE'S PREDICTION ACCURACY\nmyPredictions = np.array([knn_predict(p, predictors[trainSet], outcomes) for p in predictors[selection]])\nperformance = accuracy(myPredictions, outcomes[selection])\nprint(\"Self-made module predicting with \" + str(performance) + \"% of acurracy.\")\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"108439133","text":"#!/usr/bin/env python3\n\n# Python imports.\nimport random\nimport sys\nimport itertools\nimport numpy as np\nfrom copy import copy\n\n# Other imports\nfrom simple_rl.mdp.markov_game.MarkovGameMDPClass import MarkovGameMDP\nfrom simple_rl.mdp.StateClass import State\nfrom simple_rl.agents import QLearningAgent, FixedPolicyAgent\nfrom simple_rl.run_experiments import play_markov_game\nP1 = 0\nP2 = 1\nACTIONS = [\"red square\", \"blue square\", \"purple square\", \"red triangle\",\n \"blue triangle\", \"purple triangle\", \"red circle\", \"blue circle\", \"purple circle\"]\n\n\nclass BlockGameState(State):\n ''' Abstract State class '''\n\n AVAILABLE = 3\n\n def __init__(self):\n self.blocks = [BlockGameState.AVAILABLE for _ in range(0, 9)]\n self.turn = P1\n self.selection = [-1, -1]\n\n def get_play_num(self):\n num_p1_actions = self.blocks.count(P1)\n num_p2_actions = self.blocks.count(P2)\n\n return sum([num_p1_actions, num_p2_actions])\n\n def features(self):\n '''\n Summary\n Used by function approximators to represent the state.\n Override this method in State subclasses to have functiona\n approximators use a different set of features.\n Returns:\n (iterable)\n '''\n return np.array([self.blocks, self.turn]).flatten()\n\n def valid_moves(self):\n if self.is_terminal():\n return []\n return [ACTIONS[idx] for (idx, availablility) in enumerate(self.blocks) if availablility == self.AVAILABLE]\n\n def get_data(self):\n return [self.blocks, self.turn]\n\n def get_num_feats(self):\n return len(self.blocks()) + 1\n\n def is_terminal(self):\n return self.blocks.count(self.AVAILABLE) == 3\n\n def __hash__(self):\n # print(str([self.blocks, self.turn]))\n return hash(str([self.blocks, self.turn]))\n\n def __str__(self):\n return \"s.\" + str(self.blocks) + '.turn.' + str(self.turn)\n\n def __eq__(self, other):\n if isinstance(other, State):\n return self.blocks == other.blocks and self.turn == other.turn\n return False\n\n def __getitem__(self, index):\n if index < 9:\n return self.blocks[index]\n else:\n return self.turn\n\n def __len__(self):\n return len(self.data) + 1\n\n def next(self, action_0, action_1):\n act0 = ACTIONS.index(action_0) if action_0 is not None else None\n act1 = ACTIONS.index(action_1) if action_1 is not None else None\n state = BlockGameState()\n state.selection[0] = act0\n state.selection[1] = act1\n if self.turn == P1:\n if self.blocks[act0] == self.AVAILABLE:\n state.blocks = copy(self.blocks)\n state.blocks[act0] = P1\n state.turn = P2\n return state\n else:\n pass\n if self.turn == P2:\n if self.blocks[act1] == self.AVAILABLE:\n state.blocks = copy(self.blocks)\n state.blocks[act1] = P2\n state.turn = P1\n return state\n else:\n pass\n return self\n\n REWARDS = [75, 65, 60, 25, 15, 10, 15, 5, 0]\n\n def reward(self, player):\n reward = 0\n playerBlocks = []\n for (idx, block) in enumerate(self.blocks):\n if block == player:\n reward += self.REWARDS[idx]\n playerBlocks.append(idx)\n # All the same shape\n shapes = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n if anyOf(shapes, lambda x: allOf(x, lambda y: y in playerBlocks)):\n return reward\n # All the same color\n colors = [[0, 3, 6], [1, 4, 7], [2, 5, 8]]\n if anyOf(colors, lambda x: allOf(x, lambda y: y in playerBlocks)):\n return reward\n # Mixed sets\n mixed = [[0, 4, 8], [0, 5, 7], [1, 3, 8],\n [1, 5, 6], [2, 3, 7], [2, 4, 6]]\n if anyOf(mixed, lambda x: allOf(x, lambda y: y in playerBlocks)):\n return reward\n else:\n return -reward / 4\n\n\ndef anyOf(iter, fcn):\n for x in iter:\n if fcn(x):\n return True\n return False\n\n\ndef allOf(iter, fcn):\n for x in iter:\n if not fcn(x):\n return False\n return True\n\n\nclass BlockGameMDP(MarkovGameMDP):\n ''' Class for a Block Game '''\n\n def __init__(self):\n state = BlockGameState()\n MarkovGameMDP.__init__(\n self, ACTIONS, self._transition_func, self._reward_func, init_state=state)\n\n def _reward_func(self, state, action_dict, next_state=None):\n '''\n Args:\n state (State)\n action (dict of actions)\n\n Returns\n (float)\n '''\n actions = list(action_dict.keys())\n agent_a, agent_b = actions[P1], actions[P2]\n action_a, action_b = action_dict[agent_a], action_dict[agent_b]\n\n reward_dict = {}\n next_state = state.next(action_a, action_b)\n # print(state)\n\n # print(next_state)\n if next_state.is_terminal():\n reward_dict[agent_a], reward_dict[agent_b] = next_state.reward(\n P1), next_state.reward(P2)\n return reward_dict # TODO\n else:\n reward_dict[agent_a], reward_dict[agent_b] = 0, 0\n return reward_dict\n\n def _transition_func(self, state, action):\n '''\n Args:\n state (State)\n action_dict (str)\n\n Returns\n (State)\n '''\n if state.is_terminal():\n return state\n actions = list(action.keys())\n agent_a, agent_b = actions[P1], actions[P2]\n action_a, action_b = action[agent_a], action[agent_b]\n # print(action_a, action_b)\n return state.next(action_a, action_b)\n\n def __str__(self):\n return \"block game\"\n\n def end_of_instance(self):\n return self.get_curr_state().is_terminal()\n\n\ndef main(open_plot=True):\n # Setup MDP, Agents.\n markov_game = BlockGameMDP()\n ql_agent = QLearningAgent(actions=markov_game.get_actions(), name=\"q1\")\n fixed_agent = QLearningAgent(actions=markov_game.get_actions(), name=\"q2\")\n\n # Run experiment and make plot.\n play_markov_game([ql_agent, fixed_agent], markov_game,\n instances=5, episodes=500, steps=30, open_plot=open_plot)\n\n\nif __name__ == \"__main__\":\n # main(open_plot=not sys.argv[-1] == \"no_plot\")\n val = BlockGameState()\n print(val.valid_moves())\n val = val.next(ACTIONS[0], None)\n print(val.valid_moves())\n val = val.next(None, ACTIONS[4])\n print(val.valid_moves())\n","sub_path":"game/block_game.py","file_name":"block_game.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"484225993","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2014 Spanish National Research Council (CSIC)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport json\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nimport caso.messenger\n\n\nopts = [\n cfg.StrOpt('out',\n default=\"out.json\",\n help='File to output JSON-formatted records to.')\n]\n\nCONF = cfg.CONF\nCONF.register_opts(opts, group=\"json\")\n\nLOG = log.getLogger(__name__)\n\n\nclass JsonMessenger(caso.messenger.BaseMessenger):\n \"\"\"Format and send records to a logstash host.\"\"\"\n\n def __init__(self):\n super(JsonMessenger, self).__init__()\n self.path = CONF.json.out\n\n def push(self, records):\n body = []\n\n for _, record in records.iteritems():\n body.append(record.as_dict())\n\n with open(self.path, 'w') as target:\n target.truncate()\n target.write(json.dumps(body))\n\n LOG.info(\"Saved %d records to %s.\" %\n (len(records), self.path))\n","sub_path":"caso/messenger/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"256739308","text":"import time\nimport uuid\nimport numpy as np\n\nfrom stomp import Connection11\nfrom syncstomp.json_wrap import Connection\n\nfrom nose.tools import eq_\n\n\nDEST_NAME = '/queue/test.%s.syncstomp' % uuid.uuid4()\nRECEIVED = []\n\nSENDER = None\nRECEIVER = None\n\ndef wait(max_time=30):\n '''\n Wait for a message or exception to show up.\n '''\n waited = 0\n while waited < max_time and not RECEIVED:\n time.sleep(1)\n waited += 1\n\n\nclass TestListener(object):\n '''\n Test listener.\n '''\n def on_message(self, headers, message):\n if message == 'Die, please':\n raise RuntimeError('I was asked nicely to die, so I am.')\n RECEIVED.append(('message', (headers, message)))\n\n def on_exception(self, exc):\n RECEIVED.append(('exception', exc))\n\n def on_parseerror(self, headers, message):\n RECEIVED.append(('parseerror', (headers, message)))\n\n\ndef test_create_connections():\n # Create a sender and receiver connection.\n global SENDER\n global RECEIVER\n\n SENDER = Connection()\n RECEIVER = Connection()\n\n\ndef test_setup_receiver():\n # Set a listener on the receiver and subscribe to the destination.\n RECEIVER.set_listener('', TestListener())\n RECEIVER.subscribe(id='', destination=DEST_NAME)\n\n\ndef test_send_object():\n # Send an object and verify that it is received.\n SENDER.send([0, 1, 2], headers={'panda': 'sad'}, destination=DEST_NAME)\n wait()\n assert len(RECEIVED) == 1\n what, (headers, message) = RECEIVED.pop()\n eq_(what, 'message')\n eq_(message, [0, 1, 2])\n eq_(headers.get('panda'), 'sad')\n\n\ndef test_numpy_types():\n # Send things containing numpy types.\n obj = {'pandas': np.float16(22),\n 'keys are strings': [np.int32(-168), np.bool_(True)]}\n SENDER.send(obj, destination=DEST_NAME)\n wait()\n eq_(len(RECEIVED), 1)\n what, (headers, message) = RECEIVED.pop()\n eq_(what, 'message')\n eq_(message, obj)\n\n\ndef test_exception():\n # Send an object that causes an exception and verify the consequences.\n SENDER.send('Die, please', destination=DEST_NAME)\n wait()\n assert len(RECEIVED) == 1\n what, ex = RECEIVED.pop()\n eq_(what, 'exception')\n assert isinstance(ex, RuntimeError), 'Expected RuntimeError, got %r.' % ex\n eq_(str(ex), 'I was asked nicely to die, so I am.')\n\n\ndef test_parseerror():\n # Send a string that causes a parseerror and verify the consequences.\n Connection11.send(\n SENDER,\n body='erroneous ninja text',\n destination=DEST_NAME,\n content_type='application/json'\n )\n wait()\n assert len(RECEIVED) == 1\n what, (header, message) = RECEIVED.pop()\n eq_(what, 'parseerror')\n eq_(message, 'erroneous ninja text')\n\n","sub_path":"tests/test_json_wrap.py","file_name":"test_json_wrap.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"151858378","text":"import os\nimport unittest\nimport requests\nfrom unittestreport import ddt, list_data\nfrom common.open_excel import OpenExcel\nfrom common.get_path import EXECL_PATH\nfrom common.get_config import config_data\nfrom common.tools_re import replace_data, random_phone, assertDictIn\nfrom common.connect_mysql import ConnectMysql\nfrom common.login_info import log\n\n\n@ddt\nclass TestRegister(unittest.TestCase):\n ex = OpenExcel(os.path.join(EXECL_PATH, \"test_case.xlsx\"), \"register\")\n case_data = ex.red_data()\n\n @classmethod\n def setUpClass(cls):\n cls.headers = eval(config_data.get(\"api\", \"headers\"))\n cls.con = ConnectMysql(\n host=config_data.get(\"mysql\", \"host\"),\n port=config_data.getint(\"mysql\", \"port\"),\n user=config_data.get(\"mysql\", \"user\"),\n password=config_data.get(\"mysql\", \"password\"),\n charset=config_data.get(\"mysql\", \"charset\"))\n\n @list_data(case_data)\n def test_register(self, item):\n url = config_data.get(\"api\", \"basic_path\") + item[\"url\"]\n if \"#mobile_phone#\" in item[\"data\"]:\n setattr(self, \"mobile_phone\", random_phone())\n item[\"data\"] = replace_data(item[\"data\"], self)\n data = eval(item[\"data\"])\n expected = eval(item[\"expected\"])\n count01 = None\n count02 = None\n flag = item[\"sql\"]\n sql = \"select id from futureloan.member\"\n if flag:\n count01 = self.con.get_count_data(sql)\n rep = requests.request(item[\"method\"], json=data, headers=self.headers, url=url).json()\n if flag:\n count02 = self.con.get_count_data(sql)\n try:\n assertDictIn(rep, expected)\n if flag:\n self.assertEqual(count01, count02)\n except AssertionError as e:\n log.error(\"用例--【{}】--执行失败 : {}\".format(item[\"title\"], e))\n raise e\n else:\n log.info(\"用例--【{}】--执行成功 : \".format(item[\"title\"]))\n\n\n","sub_path":"test_case/test_register.py","file_name":"test_register.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"538292701","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 20 11:55:46 2020\n\n@author: tapiev\n\"\"\"\n\nimport pika \n \n\nf = open(\"P:\\\\Git_algo\\key.txt\", \"r\")\nconnec_string =f.read()\nf.close()\nmessagetext =\"yo\"\n\nconnection = pika.BlockingConnection(pika.URLParameters(connec_string))\nchannel=connection.channel()\n\nchannel.exchange_declare(exchange='logs',\n exchange_type='fanout')\nresult = channel.queue_declare(queue='',exclusive = True)\n\n\nconnection.close()\n","sub_path":"assignements/read_subscriber.py","file_name":"read_subscriber.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"162547302","text":"from bs4 import BeautifulSoup\nimport pandas as pd\nfrom urllib.request import urlopen\n\n\"\"\" -------------------------------------------------------------\n\n This program requires the following packages:\n - Pandas;\n - BeautifulSoup;\n - urllib\n \n Use Python version 3.4 or above to run this program. After installing\n the packages, run the program to see if everything is working fine.\n\n-------------------------------------------------------------- \"\"\"\n# Select country and number of top players\ncountry = 'Germany' # Example, Germany\ntop = 10 # Example, Top 10\n\n\n\"\"\" Don't change anything below this line!! \"\"\"\n\n# Create a dataframe to collect player information.\ndf = pd.DataFrame(columns=['Rank', 'Country', 'Player', 'Location', 'Battles', 'Wins', 'Loses', 'W/L', 'Tier'])\n\n# Create a variable to store the number of top players.\nitop = 0\n# Create a variable to store the url index.\niurl = 0\n\n# ----------------------------------------------------------------------------------------------------\nwhile itop <= top: # <--- Modify here the total players to make the ranking (ex. 300 to Top 300)\n# ----------------------------------------------------------------------------------------------------\n if iurl == 0:\n url = \"https://silph.gg/leaderboard/\"\n else:\n url = \"https://silph.gg/leaderboard/\" + str(iurl*25 + 1)\n\n html = urlopen(url).read()\n rows_part1 = BeautifulSoup(html, \"html.parser\")\n\n # Extract information from each player\n tables = [\n [\n [td.get_text(strip=False) for td in tr.find_all('td')]\n for tr in table.find_all('tr')\n ]\n for table in rows_part1.find_all('table')\n ]\n\n # Extracts the nationality information of each player\n ctable = [\n [\n [\n [img['title'] for img in h3.find_all('img')]\n for h3 in tr.find_all('h3')\n ]\n for tr in table.find_all('tr')\n ]\n for table in rows_part1.find_all('table')\n ]\n\n # Fills the dataframe with players\n s = ' '\n for i in range(1, len(tables[0])):\n rank = int((tables[0][i][0].split()[0][1:]).replace(',', ''))\n if ctable[0][i][0]:\n count = ctable[0][i][0][0]\n else:\n count = 'Unknown'\n trainer = tables[0][i][2].split()[0]\n location = s.join(tables[0][i][2].split()[1:])\n battles = int(tables[0][i][3])\n wins = int(tables[0][i][4])\n loses = int(tables[0][i][5])\n rate = float(wins)/float(battles) * 100\n tier = tables[0][i][7].split()[0]\n\n df = df.append({\n 'Rank': rank,\n 'Country': count,\n 'Player': trainer,\n 'Location': location,\n 'Battles': battles,\n 'Wins': wins,\n 'Loses': loses,\n 'W/L': str(round(rate,2)),\n 'Tier': tier\n },\n ignore_index=True)\n\n condition = [country]\n cdf = df[df.Country.isin(condition)]\n\n itop = len(cdf.index)\n\n # Iurl increments\n iurl += 1\n\n print(\"Top: \"+str(itop)+\"\\turl: \"+url)\n\ndf.to_csv(\"ranking-silph-arena-global.xlsx\", index=False)\ncdf.to_csv(\"ranking-silph-arena-%s.xlsx\" % country.lower(), index=False)\n","sub_path":"ranking-silph-arena.py","file_name":"ranking-silph-arena.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"16692662","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport math\n\n#comece abaixo\nn = input ('Digite a quantidade de elementos da lista:')\na = []\nSoma = 0\nSoma2 = 0\nfor i in range (0,n,1):\n a.append (input ('Digite um número:'))\n Soma = Soma + a[i]\nMd = Soma / len (a)\nfor y in range (0,n,1):\n Soma2 = Soma2+ (a[y]-Md)**2\nS = (Soma2/len(a)-1)**0.5\nprint ('%.2f' %a[0])\nprint ('%.2f' %a[len(a)-1])\nprint ('%.2f' %Md)\nprint ('%.2f' %S)","sub_path":"moodledata/vpl_data/44/usersdata/106/14766/submittedfiles/desvpad.py","file_name":"desvpad.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"435149746","text":"\"\"\"\nThis is the main file. It is where our command line script runs from.\n\"\"\"\nfrom argparse import ArgumentParser\nfrom drawing import PlanarDrawing, FileError, DirError, ConnectivityError, MaxNodesError\n\n\ndef main(args):\n \"\"\" This is our main function \"\"\"\n obj = PlanarDrawing()\n file = args.file\n file_out = args.fileout if args.fileout is not None else None\n output = args.output\n try:\n obj.load_adj_list(file)\n obj.draw_graph(output, file_out)\n except FileError:\n print(\"Error: The text file is not formatted correctly. See help.\")\n except DirError:\n print(\"Error: The output directory given does not exist. See help.\")\n except ConnectivityError:\n print(\"Error: The input graph must be triconnected. See help.\")\n except MaxNodesError:\n print(\"Error: The input graph must have between 3 and 100 nodes. See help.\")\n except FileExistsError:\n print(\"Error: Filein does not exist. See help.\")\n print(\"Program exiting...\")\n\n\nif __name__ == \"__main__\":\n PARSER = ArgumentParser(\n description=(\n \"This program will produce a plane graph drawing of an input planar graph \"\n \"file, as long as the graph is triconnected. To read more about \"\n \"triconnected graphs please visit https://en.wikipedia.org/wiki/K-vertex-connected_graph\"\n )\n )\n PARSER.add_argument(\n \"file\",\n type=str,\n nargs=\"?\",\n help=(\n \"The file containing the adjacency list of your graph. \"\n \"The correct format for each line is as follows and the \"\n \"graph must be spread across multiple lines: 1 1 1 1\"\n ),\n )\n PARSER.add_argument(\n \"--fileout\",\n type=str,\n nargs=\"?\",\n default=\"\",\n help=(\n \"This optional is only for file inputs. It defines the output file name \"\n \"of the output graph. It is an optional argument. If no name is chosen then \"\n \"the graph will be saved as CURRENT_TIME.png\"\n ),\n )\n PARSER.add_argument(\n \"output\", type=str, nargs=\"?\", help=(\"The directory that will contain the output(s).\")\n )\n\n main(PARSER.parse_args())\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"359156391","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/keystoneredis/common/redissl.py\n# Compiled at: 2013-02-13 13:57:35\nimport redis, ssl\n\nclass Connection(redis.Connection):\n\n def __init__(self, *args, **kwargs):\n super(Connection, self).__init__(*args, **kwargs)\n self.ca_certs = kwargs.get('ca_certs', None)\n self.cert_reqs = kwargs.get('cert_reqs', ssl.CERT_REQUIRED)\n self.keyfile = kwargs.get('keyfile', None)\n self.certfile = kwargs.get('certfile', None)\n return\n\n def _connect(self):\n sock = super(Connection, self)._connect()\n ssl_sock = ssl.wrap_socket(sock, keyfile=self.keyfile, certfile=self.certfile, ca_certs=self.ca_certs, cert_reqs=self.cert_reqs)\n return ssl_sock","sub_path":"pycfiles/keystone_redis-0.1.0-py2.7/redissl.py","file_name":"redissl.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"99504990","text":"from .exceptions import *\r\nimport random\r\n# Complete with your own, just for fun :)\r\nLIST_OF_WORDS = ['LOL', 'HELLO', 'FOOL']\r\n\r\n\r\ndef _get_random_word(list_of_words):\r\n if not list_of_words:\r\n raise InvalidListOfWordsException\r\n return random.choice(list_of_words)\r\n\r\n\r\ndef _mask_word(word):\r\n if not word:\r\n raise InvalidWordException\r\n return '*' * len(word)\r\n\r\n\r\ndef _uncover_word(answer_word, masked_word, character):\r\n if len(character) > 1:\r\n raise InvalidGuessedLetterException\r\n if not answer_word or not masked_word or len(answer_word) != len(masked_word):\r\n raise InvalidWordException\r\n character = character.lower()\r\n new_masked_word = masked_word.lower()\r\n answer_word = answer_word.lower()\r\n for index, alpha in enumerate(answer_word): #just in case of repeated letters, iterating over each letter in answer_word\r\n if character == alpha:\r\n new_masked_word = new_masked_word[:index] + character + new_masked_word[(index + 1):]\r\n return new_masked_word\r\n\r\n\r\n\r\ndef guess_letter(game, letter):\r\n if letter in game['previous_guesses']:\r\n raise InvalidGuessedLetterException\r\n\r\n if game['answer_word'].lower() == game['masked_word'].lower() or game['remaining_misses'] == 0:\r\n raise GameFinishedException\r\n\r\n letter = letter.lower()\r\n previous_masked = game['masked_word']\r\n new_masked = _uncover_word(game['answer_word'], previous_masked, letter)\r\n\r\n #letter not in answer, masked_word remains unchanged\r\n if previous_masked == new_masked:\r\n game['remaining_misses'] -= 1 #remaining misses go down by one\r\n\r\n #letter in answer, masked_word is updated\r\n elif previous_masked != new_masked:\r\n game['masked_word'] = new_masked\r\n \r\n game['previous_guesses'].append(letter)\r\n\r\n if game['answer_word'].lower() == game['masked_word'].lower():\r\n raise GameWonException\r\n\r\n if game['remaining_misses'] <= 0:\r\n game['masked_word'] = _mask_word(game['masked_word'])\r\n raise GameLostException\r\n\r\n\r\ndef start_new_game(list_of_words=None, number_of_guesses=5):\r\n if list_of_words is None:\r\n list_of_words = LIST_OF_WORDS\r\n\r\n word_to_guess = _get_random_word(list_of_words)\r\n masked_word = _mask_word(word_to_guess)\r\n game = {\r\n 'answer_word': word_to_guess,\r\n 'masked_word': masked_word,\r\n 'previous_guesses': [],\r\n 'remaining_misses': number_of_guesses,\r\n }\r\n\r\n return game\r\n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"228682550","text":"import csv\nimport numpy as np\nfrom prepare_data_classification import handle_row, get_test_data\nfrom pathlib import Path\n\n\n# meant to be used as a reference point for the effectiveness of other models\n# this is the most naive regression solution possible\nparent_path = Path().resolve().parent\n\ndirpath_folder = parent_path / 'dirpath'\n\ndef naive_model():\n\t# return the mean of the train set\n\tLabel_lst = []\n\tfilename = dirpath_folder/ 'Training_set.csv'\n\n\twith open(filename, \"r\") as csvfile:\n\t\tdatareader = csv.reader(csvfile)\n\t\trow = next(datareader) # yield the header row\n\t\tx, y = handle_row(row)\n\t\tLabel_lst.append(y)\n\t\tfor row in datareader:\n\t\t\tx, y = handle_row(row)\n\t\t\tLabel_lst.append(y)\n\n\treturn np.mean(Label_lst)\n\n\ndef SmoothL1Loss(x, y):\n\tif abs(x-y) < 1:\n\t\treturn 0.5*(x-y)**2\n\telse:\n\t\treturn abs(x-y)-0.5\n\n\ndef L1Loss(x, y):\n\treturn abs(x-y)\n\n\ndef test_naive_model():\n\n\tval = naive_model()\n\tTestLoss = []\n\n\tfor x, y in get_test_data():\n\t\tloss = L1Loss(val, y)\n\t\tTestLoss.append(loss.item())\n\treturn np.mean(TestLoss)\n\n\nif __name__ == '__main__':\n\tprint(test_naive_model())\n","sub_path":"classification_model/naive_model_classification.py","file_name":"naive_model_classification.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"595683324","text":"class Solution:\n def majorityElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # Time Complexity - O(N)\n # Space Complexity - O(N)\n nums_count = {}\n for n in nums:\n if n in nums_count:\n nums_count[n] = nums_count[n] + 1\n else:\n nums_count[n] = 1\n\n length = len(nums)\n for key, value in nums_count.items():\n if value > int(length / 2):\n return key\n return None\n\nif __name__ == '__main__':\n s = Solution()\n print(s.majorityElement([3,2,3]))\n print(s.majorityElement([2,2,1,1,1,2,2]))","sub_path":"MajorityElement.py","file_name":"MajorityElement.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"131998622","text":"# Copyright 2018 The RLgraph authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\n\nfrom rlgraph import get_backend\nfrom rlgraph.agents import DQNAgent, ApexAgent, IMPALAAgent\nfrom rlgraph.environments import OpenAIGymEnv\nfrom rlgraph.spaces import FloatBox, Tuple\nfrom rlgraph.tests.test_util import config_from_path\n\n\nclass TestAllCompile(unittest.TestCase):\n \"\"\"\n Tests if all agents compile correctly on relevant configurations.\n \"\"\"\n def test_dqn_compilation(self):\n \"\"\"\n Tests DQN Agent compilation.\n \"\"\"\n env = OpenAIGymEnv(\"Pong-v0\", frameskip=4, max_num_noops=30, episodic_life=True)\n agent_config = config_from_path(\"configs/dqn_agent_for_pong.json\")\n agent = DQNAgent.from_spec(\n # Uses 2015 DQN parameters as closely as possible.\n agent_config,\n state_space=env.state_space,\n # Try with \"reduced\" action space (actually only 3 actions, up, down, no-op)\n action_space=env.action_space\n )\n\n def test_apex_compilation(self):\n \"\"\"\n Tests agent compilation without Ray to ease debugging on Windows.\n \"\"\"\n agent_config = config_from_path(\"configs/ray_apex_for_pong.json\")\n agent_config[\"execution_spec\"].pop(\"ray_spec\")\n # TODO remove after unified.\n if get_backend() == \"pytorch\":\n agent_config[\"memory_spec\"][\"type\"] = \"mem_prioritized_replay\"\n environment = OpenAIGymEnv(\"Pong-v0\", frameskip=4)\n\n agent = ApexAgent.from_spec(\n agent_config, state_space=environment.state_space,\n action_space=environment.action_space\n )\n print('Compiled apex agent')\n\n def test_impala_actor_compilation(self):\n \"\"\"\n Tests IMPALA agent compilation (actor).\n \"\"\"\n try:\n from rlgraph.environments.deepmind_lab import DeepmindLabEnv\n except ImportError:\n print(\"Deepmind Lab not installed: Will skip this test.\")\n return\n\n agent_config = config_from_path(\"configs/impala_agent_for_deepmind_lab_env.json\")\n env = DeepmindLabEnv(\n level_id=\"seekavoid_arena_01\", observations=[\"RGB_INTERLEAVED\", \"INSTR\"], frameskip=4\n )\n\n actor_agent = IMPALAAgent.from_spec(\n agent_config,\n type=\"actor\",\n state_space=env.state_space,\n action_space=env.action_space,\n internal_states_space=Tuple(FloatBox(shape=(256,)), FloatBox(shape=(256,)), add_batch_rank=True),\n # Make session-creation hang in docker.\n execution_spec=dict(disable_monitoring=True)\n )\n # Start Specifiable Server with Env manually.\n actor_agent.environment_stepper.environment_server.start()\n print(\"Compiled IMPALA type=actor agent.\")\n actor_agent.environment_stepper.environment_server.stop()\n\n def test_impala_learner_compilation(self):\n \"\"\"\n Tests IMPALA agent compilation (learner).\n \"\"\"\n try:\n from rlgraph.environments.deepmind_lab import DeepmindLabEnv\n except ImportError:\n print(\"Deepmind Lab not installed: Will skip this test.\")\n return\n\n agent_config = config_from_path(\"configs/impala_agent_for_deepmind_lab_env.json\")\n env = DeepmindLabEnv(\n level_id=\"seekavoid_arena_01\", observations=[\"RGB_INTERLEAVED\", \"INSTR\"], frameskip=4\n )\n\n learner_agent = IMPALAAgent.from_spec(\n agent_config,\n type=\"learner\",\n state_space=env.state_space,\n action_space=env.action_space,\n internal_states_space=Tuple(FloatBox(shape=(256,)), FloatBox(shape=(256,)), add_batch_rank=True),\n )\n\n print(\"Compiled IMPALA type=learner agent.\")\n","sub_path":"rlgraph/tests/agent_functionality/test_all_compile.py","file_name":"test_all_compile.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"460275107","text":"import json\nimport sqlite3\nimport time\n\n\nclass VideoFeature(object):\n\n def __init__(self, db_path):\n if db_path is not None:\n self.video_connect = sqlite3.connect(db_path)\n self.cursor = self.video_connect.cursor()\n self.video_dict = dict()\n\n def insert(self, video_feature_path):\n count = 0\n\n video_file = open(video_feature_path, 'r')\n line = video_file.readline()\n while line:\n\n # print(line)\n item = json.loads(line)\n sql = \"INSERT INTO VIDEO (ID, FEATURE) VALUES ('%s', '%s')\" % (\n item[\"item_id\"], json.dumps(item[\"video_feature_dim_128\"]))\n self.cursor.execute(sql)\n line = video_file.readline()\n count += 1\n if count % 100000 == 0:\n self.video_connect.commit()\n print(count)\n self.video_connect.commit()\n video_file.close()\n\n def get_all_from_origin_file(self, video_feature_path):\n video_file = open(video_feature_path, 'r')\n line = video_file.readline()\n count = 0\n while line:\n count += 1\n if count % 500000 == 0:\n print(\"video\", count, len(self.video_dict))\n # break\n # print(line)\n item = json.loads(line)\n self.video_dict[item[\"item_id\"]] = json.dumps(item[\"video_feature_dim_128\"])\n line = video_file.readline()\n video_file.close()\n return self.video_dict\n\n @classmethod\n def save_origin_to_json_file(cls, video_feature_path):\n video_file = open(video_feature_path, 'r')\n line = video_file.readline()\n count = 0\n file_count = 0\n video_dict = dict()\n while line:\n count += 1\n if count % 200000 == 0:\n print(\"video\", count, len(video_dict))\n if count % 200000 == 0:\n with open(\"/Volumes/Seagate Expansion Drive/byte/track2/track2_video_features_%s.json\" % file_count, \"w\") as f:\n json.dump(video_dict, f)\n file_count += 1\n video_dict = dict()\n # break\n # print(line)\n item = json.loads(line)\n video_dict[item[\"item_id\"]] = json.dumps(item[\"video_feature_dim_128\"])\n\n line = video_file.readline()\n video_file.close()\n with open(\"/Volumes/Seagate Expansion Drive/byte/track2/track2_video_features_%s.json\" % file_count, \"w\") as f:\n json.dump(video_dict, f)\n file_count += 1\n\n def get_all_from_json_file(self, video_json_file_list):\n for video_json_file in video_json_file_list:\n with open(video_json_file) as f:\n print(f)\n video_dict = json.load(f)\n for key, value in video_dict.items():\n self.video_dict[key] = value\n # f.close()\n return self.video_dict\n\n def get(self, item_id):\n if item_id not in self.video_dict:\n print(\"video embedding is 0!!!!!!\")\n return json.dumps([0 for _ in range(128)])\n return self.video_dict.get(item_id)\n\n def get_video_embedding(self, item_id):\n start = time.time()\n sql = \"SELECT * FROM VIDEO WHERE id=%s\" % item_id\n result = list()\n cursor = self.cursor.execute(sql)\n for row in cursor:\n result = json.loads(row[1])\n\n # print(\"consume: %s\" % (time.time() - start))\n if len(result) == 0:\n print(\"video embedding is 0!!!!!!!\", item_id)\n result = [0 for _ in range(128)]\n return result\n\n\nif __name__ == \"__main__\":\n # VideoFeature().insert(\"/Volumes/Seagate Expansion Drive/byte/track2/track2_video_features.txt\")\n db_path = \"/Volumes/Seagate Expansion Drive/byte/track2/video.db\"\n video_feature = VideoFeature(db_path)\n # print(video_feature.get_video_embedding(123))\n # video_dict = video_feature.get_all_from_origin_file(\"/Volumes/Seagate Expansion Drive/byte/track2/track2_video_features.txt\")\n # fp = open(\"/Volumes/Seagate Expansion Drive/byte/track2/track2_video_features.json\", \"w\")\n # json.dump(video_dict, fp)\n # fp.close()\n # video_feature.save_origin_to_json_file(\"/Volumes/Seagate Expansion Drive/byte/track2/track2_video_features.txt\")\n # print(len(video_feature.video_dict))\n video_feature.get_all_from_json_file([\n \"/Volumes/Seagate Expansion Drive/byte/track2/track2_video_features_%s.json\" % i for i in range(21)])\n # time.sleep(200)\n","sub_path":"data_analy/video_feature.py","file_name":"video_feature.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"170354587","text":"\"\"\"\nThis file contains a set of common fixtures to get a set of different but\npredicable NDCube objects.\n\"\"\"\n\nimport astropy.units as u\nimport numpy as np\nimport pytest\nfrom astropy.time import Time\nfrom astropy.wcs import WCS\n\nfrom ndcube import ExtraCoords, NDCube\n\n\n@pytest.fixture\ndef wcs_4d():\n header = {\n 'CTYPE4': 'HPLN-TAN',\n 'CUNIT4': 'arcsec',\n 'CDELT4': 5,\n 'CRPIX4': 5,\n 'CRVAL4': 0,\n\n 'CTYPE3': 'HPLT-TAN',\n 'CUNIT3': 'arcsec',\n 'CDELT3': 20,\n 'CRPIX3': 0,\n 'CRVAL3': 0,\n\n 'CTYPE2': 'WAVE ',\n 'CUNIT2': 'Angstrom',\n 'CDELT2': 0.2,\n 'CRPIX2': 0,\n 'CRVAL2': 0,\n\n 'CTYPE1': 'TIME ',\n 'CUNIT1': 'min',\n 'CDELT1': 0.4,\n 'CRPIX1': 0,\n 'CRVAL1': 0,\n }\n return WCS(header=header)\n\n@pytest.fixture\ndef wcs_3d():\n header = {\n 'CTYPE1': 'WAVE ',\n 'CUNIT1': 'Angstrom',\n 'CDELT1': 0.2,\n 'CRPIX1': 0,\n 'CRVAL1': 10,\n\n 'CTYPE2': 'HPLT-TAN',\n 'CUNIT2': 'arcsec',\n 'CDELT2': 5,\n 'CRPIX2': 5,\n 'CRVAL2': 0,\n\n 'CTYPE3': 'HPLN-TAN',\n 'CUNIT3': 'arcsec',\n 'CDELT3': 10,\n 'CRPIX3': 0,\n 'CRVAL3': 0,\n }\n\n return WCS(header=header)\n\n@pytest.fixture\ndef wcs_2d_spatial():\n spatial = {\n 'CTYPE1': 'HPLT-TAN',\n 'CUNIT1': 'arcsec',\n 'CDELT1': 2,\n 'CRPIX1': 5,\n 'CRVAL1': 0,\n\n 'CTYPE2': 'HPLN-TAN',\n 'CUNIT2': 'arcsec',\n 'CDELT2': 4,\n 'CRPIX2': 5,\n 'CRVAL2': 0,\n }\n return WCS(header=spatial)\n\n@pytest.fixture\ndef wcs_1d():\n spatial = {\n 'CNAME1': 'spectral',\n 'CTYPE1': 'WAVE',\n 'CUNIT1': 'nm',\n 'CDELT1': 0.5,\n 'CRPIX1': 2,\n 'CRVAL1': 0.5,\n }\n return WCS(header=spatial)\n\n\ndef data_nd(shape):\n nelem = np.product(shape)\n return np.arange(nelem).reshape(shape)\n\n\ndef generate_time_extra_coord(data_cube):\n shape = data_cube.shape[-1]\n lut = Time(\"2020-02-02T00:00:00\", format=\"isot\") + np.linspace(0, shape * 10, num=shape, endpoint=False) * u.s\n return ExtraCoords.from_lookup_tables([\"extra_time\"], [0], [lut])\n\n\n@pytest.fixture\ndef ndcube_4d_simple(wcs_4d):\n shape = (5, 8, 10, 12)\n data_cube = data_nd(shape)\n return NDCube(data_cube, wcs=wcs_4d)\n\n\n@pytest.fixture\ndef ndcube_4d_uncertainty(wcs_4d):\n shape = (5, 8, 10, 12)\n data_cube = data_nd(shape)\n uncertainty = np.sqrt(data_cube)\n return NDCube(data_cube, wcs=wcs_4d, uncertainty=uncertainty)\n\n\n@pytest.fixture\ndef ndcube_4d_mask(wcs_4d):\n shape = (5, 8, 10, 12)\n data_cube = data_nd(shape)\n uncertainty = np.sqrt(data_cube)\n mask = data_cube % 2\n return NDCube(data_cube, wcs=wcs_4d, uncertainty=uncertainty, mask=mask)\n\n\n@pytest.fixture\ndef ndcube_4d_extra_coords(wcs_4d):\n shape = (5, 8, 10, 12)\n data_cube = data_nd(shape)\n extra_coords = generate_time_extra_coord(data_cube)\n return NDCube(data_cube, wcs=wcs_4d, extra_coords=extra_coords)\n\n\n@pytest.fixture\ndef ndcube_4d_unit_uncertainty(wcs_4d):\n shape = (5, 8, 10, 12)\n data_cube = data_nd(shape)\n uncertainty = np.sqrt(data_cube)\n return NDCube(data_cube, wcs=wcs_4d,\n unit=u.J, uncertainty=uncertainty)\n\n\n@pytest.fixture\ndef ndcube_4d(request):\n \"\"\"\n This is a meta fixture for parametrizing all the 4D ndcubes.\n \"\"\"\n return request.getfixturevalue(\"ndcube_4d_\" + request.param)\n\n\n@pytest.fixture\ndef ndcube_2d_simple(wcs_2d_spatial):\n shape = (10, 12)\n data_cube = data_nd(shape)\n return NDCube(data_cube, wcs=wcs_2d_spatial)\n\n\n@pytest.fixture\ndef ndcube_2d(request):\n \"\"\"\n This is a meta fixture for parametrizing all the 2D ndcubes.\n \"\"\"\n return request.getfixturevalue(\"ndcube_2d_\" + request.param)\n\n\n@pytest.fixture\ndef ndcube_1d_simple(wcs_1d):\n shape = (10,)\n data_cube = data_nd(shape)\n return NDCube(data_cube, wcs=wcs_1d)\n","sub_path":"ndcube/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"52952937","text":"# -*- coding: utf-8 -*-\n# MinIO Python Library for Amazon S3 Compatible Cloud Storage.\n# Copyright (C) 2016 MinIO, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and\n# my-objectname are dummy values, please replace them with original values.\n\nfrom minio import Minio\nimport os\nimport shlex, subprocess\nimport json\nimport infer_tap\nimport tap as tap\n\nbucket = 'preprocess'\n\nclient = Minio('localhost:9000',\n access_key='admin',\n secret_key='password',\n secure=False)\n\n# Put a file with default content-type.\nevents = client.listen_bucket_notification(bucket, '',\n '',\n ['s3:ObjectCreated:*'])\n\nuser = 'raymond'\ntempFolder = '/home/'+user+'/Tap/dataset/tap/temp/'\n\nfor event in events:\n\t#print (event)\n\tfileName = event[\"Records\"][0][\"s3\"][\"object\"][\"key\"]\n\tfileNameWOExt = os.path.splitext(fileName)[0]\n\tprint (fileNameWOExt)\n\tprint(client.fget_object(bucket, fileName, tempFolder+fileName))\n\t\n\ttap.create_manifest(data_dir='./dataset/tap/temp', manifest_path='manifest.TAP')\n\tinfer_tap.infer(fileNameWOExt)","sub_path":"listen_preprocess_notif.py","file_name":"listen_preprocess_notif.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"551118227","text":"from typing import Counter\r\n\r\n\r\ntry:\r\n def solve(word):\r\n if len(word)%2==0:\r\n mid=len(word)//2\r\n start=word[:mid]\r\n end=word[mid:]\r\n if Counter(start)==Counter(end):\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n else:\r\n mid=len(word)//2\r\n start=word[:mid]\r\n end=word[mid+1:]\r\n if Counter(start)==Counter(end):\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n\r\n\r\n if __name__==\"__main__\":\r\n n=int(input())\r\n for i in range(n):\r\n word=input()\r\n solve(word)\r\nexcept:\r\n pass","sub_path":"CodeChef/DSA_Learning_Series _Contest_1/Lapindromes.py","file_name":"Lapindromes.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"564335174","text":"#import PizzaProblem\nimport random\nimport os\nimport sys\n\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\ninputFileNames = {\n \"a\": \"a_example\",\n \"b\": \"b_small\",\n \"c\": \"c_medium\",\n \"d\": \"d_quite_big\",\n \"e\": \"e_also_big\"\n}\n\nfolder = (\"/out\")\n\ndef save_file(pizza_no_chosen, inputName, total_slices):\n outputFileName = \"{}/{}/{}_out_{}.out\".format(__location__,folder, inputName, \"score_\" + str(total_slices))\n with open(outputFileName, \"w\") as out:\n out.write('{}\\n'.format(len(pizza_no_chosen)))\n for value in pizza_no_chosen.keys():\n out.write('{} '.format(value))\n #for slide in slideshow:\n # out.write(str(slide) + \"\\n\")\n #with open(outputFileName, \"w\") as out:\n # out.write(str(len(slideshow)) + \"\\n\")\n # for slide in slideshow:\n # out.write(str(slide) + \"\\n\")\n\ndef main_run():\n arguments = [x.lower() for x in sys.argv[1::]]\n if len(arguments) == 0:\n letter = \"d\"\n else:\n letter = arguments[0]\n\n print(\"\\nSTARTED...\")\n \n if not os.path.exists(folder):\n os.makedirs(folder)\n inputName = inputFileNames[letter]\n try:\n with open(\"input/{}.in\".format(inputName), \"r\") as inputFile:\n print(\"yes\")\n file = inputFile.readlines()\n line1 = file[0]\n max_slices = int(line1.split(\" \")[0])\n different_types_of_pizza = line1[1]\n slices_in_pizza = (file[1].split(\" \"))\n slices_in_pizza[-1] = slices_in_pizza[-1].strip()\n print(\"max_slices: {}, pizza_types: {}\".format(max_slices, different_types_of_pizza))\n print(\"slices_in_pizza: {}\".format(slices_in_pizza))\n\n total_slices = 0\n pizza_no_chosen = {}\n #slices_in_pizza = reversed(slices_in_pizza)\n i = len(slices_in_pizza) - 1\n while i >= 0:\n temp = total_slices + int(slices_in_pizza[i])\n if temp <= max_slices:\n total_slices = temp\n pizza_no_chosen[i]=slices_in_pizza[i]\n i-=1\n else:\n i-=1\n\n print(\"total_slices: {}\".format(total_slices))\n print(\"pizzas_no_chosen: {}\".format(pizza_no_chosen))\n\n \n #for line in inputFile:\n # if (lineNumber == 0):\n # line1 = line.split(\" \")\n # max_slices = line1[0]\n # different_types_of_pizza = line1[1]\n \n # else:\n # slices_in_pizza = line.split(\" \")\n # lineNumber += 1\n # print(\"max_slices: {}, pizza_types: {}\".format(max_slices, different_types_of_pizza))\n # print(\"slices_in_pizza: {}\".format(slices_in_pizza))\n\n #photos = []\n #lineNumber = 0\n #for line in file:\n #if (lineNumber != 0):\n #photos.append(Photo(lineNumber-1, line))\n #lineNumber += 1\n \n except IOError:\n print(\"!!! {}.txt NOT FOUND IN INPUT FOLDER !!!\".format(inputName))\n exit()\n\n save_file(pizza_no_chosen, inputName, total_slices)\n \n\n\nif __name__ == \"__main__\":\n main_run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"43451909","text":"import os\n\nfrom fabric.api import local\n\nfrom pynb.notebook import Notebook\n\n\nclass MyTestNotebook(Notebook):\n def cells(self, N):\n int((N * (N + 1)) / 2)\n\n\ndef main():\n nb = MyTestNotebook()\n nb.add_argument('--N', default=10, type=int)\n nb.run()\n nb.export_ipynb('-')\n\n\ndef test_custom_nbapp():\n cmd = 'python3 {} --N 10000 --disable-cache'\n output = local(cmd.format(os.path.realpath(__file__)), capture=True)\n assert '50005000' in output\n\n\n#############################################################################\nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/test_class.py","file_name":"test_class.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"469225118","text":"\n# Import required libraries and dependencies\nfrom .converters import ode_to_sympy\nfrom sympy import *\nimport numpy as np\n\ndef load_ODE_model(n_states, n_params = 0):\n x, f, P = ode_to_sympy(n_states, n_params)\n return x, f, P\n\nclass System(object):\n '''\n Class that stores the system model in this form: x_dot = f(x, theta), y = Cx.\n '''\n def __init__(self, x, f, params = None, C = None, g = None, h = None, u = None,\n params_values = [], x_init = [], input_values = []):\n \"\"\"\n The general system dynamics : x_dot = f(x, P) + g(x, P)u, y = h(x,P)\n Use the utility function ode_to_sympy to write these.\n\n x : (Symbolic) state variable vector\n\n f : The system model dynamics. Writted symbolically with symbols x = [x_0, x_1, ...]\n for states and P = [P_0, P_1, ...] for parameters.\n\n params : (Symbolic) parameters used to define f, g, h. None if no symbolic parameters.\n\n g : The actuator / input dynamics. None by default if the system is autonomous.\n\n C : The output matrix for y = Cx, size of C must be #outputs times #states. If None,\n the argument h is expected. Cannot set C and h both.\n\n h : The output description y = h(x, P) where x are states and P are parameters.\n params_values : Values for model parameters\n\n u : List of inputs\n\n x_init : Model initial condition\n \"\"\"\n\n self.x = x\n self.n = len(x)\n self.f = f\n self.params = params\n self.C = C\n self.g = g\n self.h = h\n self.u = u\n self.params_values = params_values\n self.input_values = input_values\n self.x_init = x_init\n return\n\n def set_dynamics(self, f = None, g = None, h = None, C = None, u = None, params = []):\n \"\"\"\n Set either f, g, h, or C to the System object or parameter values using P.\n \"\"\"\n if f:\n self.f = f\n if g:\n self.g = g\n if h:\n self.h = h\n if C:\n self.C = C\n if u:\n self.u = u\n if params:\n self.params = params\n return self\n\n def evaluate(self, f, x, P, u = None):\n \"\"\"\n Evaluate the given symbolic function (f) that is part of the System\n at the values given by x for self.x and P for self.params\n \"\"\"\n fs = []\n for i in range(len(f)):\n fi = f[i]\n fi = fi.subs(list(zip(self.x, x)))\n if self.u is not None:\n fi = fi.subs(list(zip(self.u, u)))\n fi = fi.subs(list(zip(self.params, P)))\n fs.append(fi)\n return fs\n\n def set_parameters(self, params_values = [], x_init = []):\n \"\"\"\n Set model parameters and initial conditions\n \"\"\"\n f_new = []\n if params_values:\n self.params_values = [pi for pi in params_values]\n if self.params:\n for fi in self.f:\n f_new.append(fi.subs(list(zip(self.params, self.params_values))))\n if x_init:\n self.x_init = [pi for pi in x_init]\n self.f = f_new\n return f_new\n\n def load_SBML_model(self, filename):\n raise NotImplementedError\n\n\n def load_Sympy_model(self, sympy_model):\n raise NotImplementedError","sub_path":"modelling/SURF_2020/autoReduce2/auto_reduce/.ipynb_checkpoints/system-checkpoint.py","file_name":"system-checkpoint.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"152721560","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nimport constants\nfrom game_ac_network import GameACFFNetwork, GameACLSTMNetwork\n\n\ndef _main():\n # use CPU for weight visualize tool\n device = \"/cpu:0\"\n Network = GameACLSTMNetwork if constants.USE_LSTM else GameACFFNetwork\n global_network = Network(constants.ACTION_SIZE, -1, device)\n\n sess = tf.Session()\n init = tf.global_variables_initializer()\n sess.run(init)\n\n saver = tf.train.Saver()\n checkpoint = tf.train.get_checkpoint_state(constants.CHECKPOINT_DIR)\n if checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"checkpoint loaded:\", checkpoint.model_checkpoint_path)\n else:\n print(\"Could not find old checkpoint\")\n\n W_conv1 = sess.run(global_network.W_conv1)\n\n # show graph of W_conv1\n fig, axes = plt.subplots(\n 4, 16, figsize=(12, 6),\n subplot_kw={'xticks': [], 'yticks': []})\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n\n for ax, i in zip(axes.flat, range(4*16)):\n inch = i//16\n outch = i % 16\n img = W_conv1[:, :, inch, outch]\n ax.imshow(img, cmap=plt.cm.gray, interpolation='nearest')\n ax.set_title(str(inch) + \",\" + str(outch))\n plt.show()\n\n\nif __name__ == '__main__':\n _main()\n","sub_path":"a3c_visualize.py","file_name":"a3c_visualize.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"63911449","text":"import argparse\nimport csv\nfrom os import listdir\nfrom os.path import exists, join, splitext\nfrom pathlib import Path\n\nimport cv2\nfrom shapely.affinity import scale\nfrom shapely.geometry import Polygon\nfrom shapely.ops import unary_union\nfrom tqdm import tqdm\n\n\nclass Prediction():\n def __init__(self, polygon, conf):\n self.polygon = polygon\n self.conf = conf\n\n\ndef main(opt):\n Path(opt.crop_dir).mkdir(parents=True, exist_ok=True)\n\n file_to_bboxs = [['name', 'x1', 'y1', 'x2', 'y2']]\n for input_text_name in tqdm(listdir(opt.prediction_dir)):\n name, ext = splitext(input_text_name)\n name = name[4:]\n if ext == '.txt':\n file_to_bbox = [name]\n text_path = join(opt.prediction_dir, input_text_name)\n img_path = join(opt.img_dir, f'{name}.jpg')\n assert exists(img_path), f'{img_path} does not exist'\n\n predictions = []\n with open(text_path, 'r') as f:\n lines = f.readlines()\n\n for l in lines:\n # Last number is confidence\n polygon_and_conf = l.split(',')\n conf = float(polygon_and_conf[-1])\n polygon_coords = [int(x) for x in polygon_and_conf[:-1]]\n polygon_coords = zip(\n polygon_coords[::2], polygon_coords[1::2])\n p = Polygon(polygon_coords)\n predictions.append(Prediction(p, conf))\n\n if predictions:\n # sort by confidence\n predictions = sorted(\n predictions, key=lambda p: p.conf, reverse=True)\n\n main_polygon = predictions[0].polygon\n main_conf = predictions[0].conf\n region = scale(main_polygon, xfact=5, yfact=1.5)\n\n for p in predictions[1:]:\n in_region = region.intersects(p.polygon)\n if p.conf > 0.45 and in_region:\n main_polygon = unary_union([p.polygon, main_polygon])\n\n main_polygon = [int(x) for x in main_polygon.bounds]\n img = cv2.imread(img_path)\n cropped = img[main_polygon[1]:main_polygon[3],\n main_polygon[0]:main_polygon[2]]\n h, w, c = cropped.shape\n if w > 0 and h > 0:\n output_path = join(opt.crop_dir, f'{name}.jpg')\n cv2.imwrite(output_path, cropped)\n file_to_bbox += main_polygon\n else:\n print(f'{img_path} has no polygon')\n\n file_to_bboxs.append(file_to_bbox)\n\n with open(opt.crop_csv_path, 'w') as f:\n csv_writer = csv.writer(f)\n csv_writer.writerows(file_to_bboxs)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--img_dir', help='')\n parser.add_argument('--prediction_dir', help='')\n parser.add_argument('--crop_dir', help='')\n parser.add_argument('--crop_csv_path', help='')\n opt = parser.parse_args()\n main(opt)\n","sub_path":"china_steel/crop_img.py","file_name":"crop_img.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"517258279","text":"import random as rd\r\n\r\nThree_board = [0, 0, 1]\r\n\r\nclass Audience():\r\n def __init__(self,name):\r\n self.name = name\r\n self.score_A = 0\r\n self.score_B = 0\r\n self.index = 0\r\n def get_board(self):\r\n self.index = rd.sample(range(0,3),1)[0]\r\n def select_A(self):\r\n self.score_A += Three_board[self.index]\r\n def select_B(self,sheep_index):\r\n self.score_B += Three_board[3-self.index-sheep_index]\r\n\r\nclass Host():\r\n def __init__(self,name):\r\n self.name = name\r\n self.sheep_index = 0\r\n\r\n def Shuffle(self):\r\n rd.shuffle(Three_board)\r\n \r\n def get_sheep_index(self, Audience_index):\r\n if Three_board.index(1) == 0:\r\n if Audience_index == 0:\r\n self.sheep_index = rd.sample(range(1,3),1)[0]\r\n else:\r\n self.sheep_index = 3-Audience_index\r\n elif Three_board.index(1) == 1:\r\n if Audience_index == 1:\r\n self.sheep_index = rd.sample(range(0,2),1)[0]*2\r\n else:\r\n self.sheep_index = 2-Audience_index\r\n else:\r\n if Audience_index == 2:\r\n self.sheep_index = rd.sample(range(0,2),1)[0]\r\n else:\r\n self.sheep_index = 1-Audience_index\r\n\r\nhost = Host('Bob')\r\naudience = Audience('Jack')\r\n\r\nfor i in range(0,90000):\r\n host.Shuffle()\r\n audience.get_board()\r\n host.get_sheep_index(audience.index)\r\n audience.select_A()\r\n audience.select_B(host.sheep_index)\r\n\r\nprint('score_A =',audience.score_A/90000)\r\nprint('score_B =',audience.score_B/90000)\r\n\r\n'''\r\nA = 0\r\nB = 0\r\nC = 0\r\nfor i in range(0,900000):\r\n index = rd.sample(range(0,3),1)\r\n if index[0] == 0:\r\n A +=1\r\n elif index[0] == 1:\r\n B +=1\r\n else:\r\n C +=1\r\nprint('A:', A)\r\nprint('B:', B)\r\nprint('C:', C)\r\n'''","sub_path":"Hull_test.py","file_name":"Hull_test.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"48499971","text":"from numpy import zeros, float64\r\nfrom time import perf_counter\r\nimport matplotlib.pylab as plt\r\nN=[1,5,10,20,40,50,80,100,150,200,300,500,1000,1500,2000,5000,10000] #,2000,5000,10000\r\n\r\nUsoMemoria=[]\r\nUsoTiempo=[]\r\nNlista=[]\r\nfor a in range(10):\r\n for i in N:\r\n A=zeros((i,i), dtype=float64)+1\r\n B=zeros((i,i), dtype=float64)+2\r\n t1=perf_counter()\r\n C=A@B\r\n t2=perf_counter()\r\n t=t2-t1\r\n UsoTiempo.append(t)\r\n memoria= A.nbytes + B.nbytes + C.nbytes\r\n UsoMemoria.append(memoria)\r\n Nlista.append(i)\r\n\r\n\r\n\r\n#Creacion del archivo para guardar datos\r\narchivo= open(\"usorecursos.txt\", \"w+\")\r\nfor i in range(len(N*10)):\r\n archivo.write(str(UsoMemoria[i])+\" \"+str(UsoTiempo[i])+\" \"+str(Nlista[i])+\" \"+\"\\n\")\r\narchivo.close()\r\n","sub_path":"timing_matmul.py","file_name":"timing_matmul.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"635180345","text":"\"\"\"\nThis module allowed to create player\n\"\"\"\nimport math\nimport pygame\nimport random\n\nfrom core.generator.materials.imaterials import IMaterial\nfrom core.generator.materials.imaterials import Collision\n\n\n# This function give y position for a power and a distance from jump start\ndef jump_equation(x, power):\n return int((power * (0.5 * math.pow(x, 2) - (4.0 * x) + 8) + 1) * 100) / 100\n\n\nclass IPlayer(IMaterial):\n\n def __init__(self):\n super().__init__(texture_path=\"assets/textures/player/player_texture$0.png\")\n self.id = -1\n self.can_spawn = True\n self.is_in_jump = False\n self.jump_x = -0.1\n self.power = 0.0\n self.last_y = 0.0\n self.is_auto_down = False\n self.down_x = 3.9\n\n self.jump_core = None\n\n # Spawn the player\n def spawn(self, screen, position, size):\n if not self.can_spawn:\n return\n self.jump_core = JumpCore(screen, size)\n self.pop_gen(pos=position, size=size)\n self.pop(screen)\n\n # Return true if the player can spawn\n def can_spawn(self):\n return self.can_spawn\n\n # Do action for a collision value\n def collision(self, level, block, windows_size, screen, game):\n self.jump_core.show(game)\n windows_y_modulo = windows_size[1] % 50\n if not self.is_in_jump and not self.is_auto_down and (windows_y_modulo - self.get_position_y()) % 50 != 0:\n position = self.get_position()\n position[1] -= position[1] % 50\n position[1] += windows_y_modulo\n self.set_position(position)\n\n for material in block:\n collision = Collision(material=material).is_in_collision(player=self, screen=screen,\n windows_size=windows_size,\n level=level)\n if collision == 1:\n self.stop_jump()\n self.stop_down()\n return\n if collision == 2:\n self.auto_down()\n return\n if collision == 3 or collision == 4:\n self.kill_(game, collision)\n return\n if collision == 5:\n self.win_(game)\n\n # Kill\n @staticmethod\n def kill_(game, cause):\n game.reset(cause)\n\n # Win\n @staticmethod\n def win_(game):\n game.win_()\n\n # Start a jump\n def jump(self, power, last_y):\n if not self.is_in_jump:\n self.is_in_jump = True\n self.jump_x = -0.1\n self.power = power\n self.last_y = last_y\n sound_index = random.randint(0, 6)\n pygame.mixer.Channel(1).play(pygame.mixer.Sound('assets/sounds/player/jump_sound/jump$' + str(sound_index) + '.wav'))\n\n # Jump loop\n def jumping(self, game, level, screen):\n self.jump_x += 0.1\n temp = int(self.jump_x * 10) / 10\n position = self.get_position()\n if temp <= 4.0:\n position[1] -= int(jump_equation(x=temp, power=self.power))\n else:\n position[1] += int(jump_equation(x=temp, power=self.power))\n block_position = self.get_location().get_block_location()\n id = level.get_block_id_at(block_position[0], block_position[1] - 1)\n id_1 = level.get_block_id_at(block_position[0] + 1, block_position[1] - 1)\n id_2 = level.get_block_id_at(block_position[0], block_position[1])\n id_3 = level.get_block_id_at(block_position[0] + 1, block_position[1])\n self.set_position(position=position)\n self.pop(screen=screen)\n if (id_2 != 0 and id_2 != 3) and (id_3 != 0 and id_3 != 3):\n self.kill_(game, 6)\n if id == 3 or id_1 == 3:\n self.stop_jump()\n return\n\n # Stop the jump\n def stop_jump(self):\n self.jump_x = -0.1\n self.power = 0.0\n self.is_in_jump = False\n self.last_y = 0.0\n\n # Start auto down\n def auto_down(self):\n if not self.is_auto_down:\n self.is_auto_down = True\n self.down_x = 3.9\n\n # Stop auto down\n def stop_down(self):\n self.down_x = 3.9\n self.is_auto_down = False\n\n # Auto down loop\n def down(self, level, screen):\n self.down_x += 0.1\n temp = int(self.down_x * 10) / 10\n position = self.get_position()\n position[1] += int(jump_equation(x=temp, power=1))\n self.set_position(position=position)\n self.pop(screen=screen)\n block_position = self.get_location().get_block_location()\n id = level.get_block_id_at(block_position[0], block_position[1] - 1)\n id_1 = level.get_block_id_at(block_position[0] + 1, block_position[1] - 1)\n if id == 3 or id_1 == 3:\n self.stop_down()\n return\n\n # Obtain JumpCore\n def get_jump_core(self):\n return self.jump_core\n\n\n# This class is used for manage player jump and graphic jump\nclass JumpCore(pygame.sprite.Sprite):\n\n def __init__(self, screen, window_size):\n super().__init__()\n self.screen = screen\n self.window_size = window_size\n self.bar_image = pygame.image.load(\"assets/textures/ig/jump/jump_load_bar/jump_bar$0.png\").convert_alpha()\n self.bar_rect = self.bar_image.get_rect()\n self.bar_rect.x = self.window_size[0] / 2 - 200\n self.bar_rect.y = 20\n\n self.progression_image = pygame.image.load(\"assets/textures/ig/jump/jump_load_bar/jump_bar_progression$0.png\") \\\n .convert_alpha()\n self.progression_rect = self.progression_image.get_rect()\n self.progression_rect.x = self.window_size[0] / 2 - 199\n self.progression_rect.y = 21\n\n self.is_charged = False\n self.x = 0\n self.incrementation = 2\n\n # Manage jump bar\n def show(self, game):\n if game.key.get(pygame.K_SPACE):\n self.incrementation = 2\n if self.x < 399:\n self.x += 10\n self.is_charged = True\n elif self.x > 0:\n self.incrementation -= 1\n self.is_charged = self.incrementation > 0\n self.x -= 10\n self.screen.blit(self.bar_image, self.bar_rect)\n self.screen.blit(self.progression_image, self.progression_rect, (0, 0, self.x, 100))\n\n # Obtain current jump power\n def get_jump_power(self):\n if self.x > 275:\n return 0.5 + (((275 - (self.x - 275)) / 275) * 1.2)\n return 0.5 + (self.x / 275) * 1.2\n\n # Return True if the player can jump\n def is_charged_(self):\n return self.is_charged\n","sub_path":"SquareJumper/source/core/player/iplayer.py","file_name":"iplayer.py","file_ext":"py","file_size_in_byte":6617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"603733779","text":"import pandas as pd\nimport pickle\nimport numpy as np\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.sequence import pad_sequences\n\nfrom keras.models import Sequential\nfrom keras.layers import Embedding, Flatten, Dense, LSTM\n\nimport matplotlib.pyplot as plt\n\nwith open('time_converted_df.pickle', 'rb') as f:\n [event_dict, df] = pickle.load(f)\n\nmax_len_event_id = df.events.apply(len).idxmax()\n# print(max_len_event_id)\n\nmax_len_event = df.iloc[max_len_event_id]\n\nmaxlen = len(max_len_event.events)\nprint(maxlen)\nexit(0)\nreversed_dict = {}\nfor k, v in event_dict.items():\n reversed_dict[v] = k\n\n# print(reversed_dict)\n\ndef map_event_list_to_idxs(event_list):\n list_idxs = []\n for event in event_list:\n idx = reversed_dict[event]\n list_idxs.append(idx)\n return list_idxs\n\nmap_event_list_to_idxs(max_len_event.events)\nsequences = df.events.apply(map_event_list_to_idxs).tolist()\n# print(map_event_list_to_idxs(max_len_event.events))\n# print(sequences[:5])\ndata = pad_sequences(sequences, maxlen=maxlen)\n# print(data)\nlabels = np.array(df.label)\nnp.random.seed(12)\n\nindices = np.arange(data.shape[0])\nnp.random.shuffle(indices)\ndata = data[indices]\nlabels = labels[indices]\n\ntraining_samples = int(len(indices) * .8)\nvalidation_samples = len(indices) - training_samples\n\nX_train = data[:training_samples]\ny_train = labels[:training_samples]\nX_valid = data[training_samples: training_samples + validation_samples]\ny_valid = labels[training_samples: training_samples + validation_samples]\n# print(X_train)\nnum_events = len(event_dict) + 1\n\nembedding_dim = 20\nembedding_matrix = np.random.rand(num_events, embedding_dim)\n\nunits = 32\n\nmodel = Sequential()\nmodel.add(Embedding(num_events, embedding_dim))\nmodel.add(LSTM(units, dropout=0.2, recurrent_dropout=0.2))\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.layers[0].set_weights([embedding_matrix])\nmodel.layers[0].trainable = True\n\nmodel.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['acc'])\nhistory = model.fit(X_train, y_train,\n epochs=50,\n batch_size=32,\n validation_data=(X_valid, y_valid))\nmodel.save(\"mymodel_embedding_trainable_with_dropout.h5\")\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"60883573","text":"\"\"\"\nthis is the most basic form of the guessing game\n\n\"\"\"\n\n\nimport random \n\nMAX_NUMBER= 10 #max number\nMIN_NUMBER = 1 #min number \n\nrandom_number = int(random.randint(MIN_NUMBER, MAX_NUMBER)) #making random number \n\n#this is just to help me so i know what the number is \n#print(random_number)\nwhile True: #main game loop \n try:\n #asking use for an number \n user_answer = int(input('guess a number bettween {} and {}: '.format(MIN_NUMBER, MAX_NUMBER)))\n \n if(user_answer == random_number): #they guessed the number\n print('you have guessed the number!!!')\n\n elif(user_answer < MIN_NUMBER) or (user_answer > MAX_NUMBER): #the number is too small or too big \n print('that number is not bettween {} and {}'.format(MIN_NUMBER, MAX_NUMBER))\n\n elif(user_answer > random_number): #if number is bigger than the random number\n print('that number is too big')\n \n elif(user_answer < random_number):#if number is smaller that the random number \n print('that number is to small')\n\n else: #anything else \n print('that is not an answer i can use')\n print('guess a number {} and {}'.format(MIN_NUMBER, MAX_NUMBER))\n\n except ValueError: #if they dont user a number \n print('that is not an answer i can use')\n print('guess a number {} and {}'.format(MIN_NUMBER, MAX_NUMBER))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"331606666","text":"import os\nimport sys\nimport time\nimport theano\nimport theano.tensor as T\nimport cPickle as pickle\nimport numpy as np\nfrom actlearn.data.casas import load_casas_from_file\nfrom actlearn.training_algorithms.cross_validation import run_cross_validation\nfrom actlearn.models.StackedDenoisingAutoencoder import StackedDenoisingAutoencoder\nfrom actlearn.utils.confusion_matrix import get_confusion_matrix\nfrom actlearn.utils.classifier_performance import performance_index\nfrom actlearn.data.AlFeature import AlFeature\n\n\ndef run_test(act_feature):\n \"\"\"\n :type act_feature: AlFeature\n :param act_feature:\n :return:\n \"\"\"\n x = act_feature.x\n y = act_feature.y\n num_classes = act_feature.num_enabled_activities\n input_x = T.matrix('x')\n # Fold number is set to 3\n num_fold = 3\n # Number of perceptrons in hidden layer\n num_hidden = 500\n numpy_rng = np.random.RandomState(int(time.clock()))\n model = StackedDenoisingAutoencoder(numpy_rng=numpy_rng, input=input_x,\n n_ins=x.shape[1], n_outs=num_classes,\n hidden_layers_sizes=[500, 500, 500],\n corruption_levels=[0.1, 0.2, 0.3])\n\n # After Get the feature, run 3-fold cross validation and search for best hyper-parameters\n performance = run_cross_validation(n=num_fold, num_classes=num_classes,\n data=x, label=y,\n train_func=casas_train, test_func=casas_test,\n model=model)\n sys.stdout.write('%22s\\t' % ' ')\n for performance_label in performance_index:\n sys.stdout.write('%20s\\t' % performance_label)\n sys.stdout.write('\\n')\n num_performance = len(performance_index)\n for i in range(num_classes):\n activity_label = act_feature.get_activity_by_index(i)\n sys.stdout.write('%22s\\t' % activity_label)\n for j in range(num_performance):\n sys.stdout.write('%20.5f\\t' % (performance[i][j] * 100))\n sys.stdout.write('\\n')\n\n\ndef casas_train(x, y, model):\n \"\"\"\n Training Function using Logistic Regression\n :param x: numpy.array training data\n :param y: numpy.array training labels\n :param model: an model to be trained (in this case: Logistic Regression Object)\n :return: None\n \"\"\"\n assert(type(model) == StackedDenoisingAutoencoder)\n assert(x.shape[0] == y.shape[0])\n\n x_tensor = theano.shared(np.asarray(x, dtype=theano.config.floatX), borrow=True)\n y_tensor = T.cast(theano.shared(y, borrow=True), 'int32')\n\n model.do_pretraining(data=x_tensor,\n num_data=x.shape[0], batch_size=10,\n learning_rate_array=[0.005, 0.005, 0.005, 0.005, 0.005, 0.005],\n num_epochs=15)\n model.do_fine_tuning(data=x_tensor, label=y_tensor,\n num_data=x.shape[0], batch_size=10,\n learning_rate_array=[0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005],\n num_epochs=36)\n\n\ndef casas_test(x, y, num_classes, model):\n \"\"\"\n Test Trained Logistic Regression Model\n :param x: numpy.array training data\n :type y: numpy.array\n :param y: numpy.array training labels\n :param num_classes: integer number of enabled classes\n :param model: an model to be trained (in this case: Logistic Regression Object)\n :return: numpy.array (confusion matrix)\n \"\"\"\n assert(type(model) == StackedDenoisingAutoencoder)\n x_tensor = theano.shared(np.asarray(x, dtype=theano.config.floatX), borrow=True)\n result = model.classify(x_tensor)\n predicted_y = result[0]\n confusion_matrix = get_confusion_matrix(num_classes=num_classes, label=y, predicted=predicted_y)\n return confusion_matrix\n\n\nif __name__ == '__main__':\n # Set current directory to local directory\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n # Go through all bosch datasets\n datasets = ['b1']\n for datafile in datasets:\n feature_filename = 'feature_' + datafile + '.pkl'\n # Looking for processed feature data\n if os.path.exists(feature_filename):\n feature_file = open(feature_filename, mode='r')\n feature_dict = pickle.load(feature_file)\n feature = AlFeature()\n feature.load_from_dict(feature_dict)\n else:\n feature = load_casas_from_file(datafile, datafile + '.translate')\n feature_file = open(feature_filename, mode='w')\n pickle.dump(feature.export_to_dict(), feature_file, protocol=-1)\n feature_file.close()\n run_test(feature)\n","sub_path":"examples/casas_sda/casas_sda.py","file_name":"casas_sda.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"425261969","text":"'''题目\n由于业绩优秀,公司给小Q放了 n 天的假,身为工作狂的小Q打算在在假期中工作、锻炼或者休息。\n他有个奇怪的习惯:不会连续两天工作或锻炼。只有当公司营业时,小Q才能去工作,只有当健身房营业时,\n小Q才能去健身,小Q一天只能干一件事。给出假期中公司,健身房的营业情况,求小Q最少需要休息几天。\n第一行一个整数 表示放假天数\n第二行 n 个数 每个数为0或1,第 i 个数表示公司在第 i 天是否营业\n第三行 n 个数 每个数为0或1,第 i 个数表示健身房在第 i 天是否营业\n(1为营业 0为不营业)\n一个整数,表示小Q休息的最少天数\n4\n1 1 0 0\n0 1 1 0\n\n2\n'''\n\nimport sys\nn = list(map(int,sys.stdin.readline().strip().split()))[0]\ncompany = list(map(int,sys.stdin.readline().strip().split()))\nexer = list(map(int,sys.stdin.readline().strip().split()))\ncompany2 = exer\nassert n == len(company) == len(exer)\n# 以上是公共代码\n\n# 0休息 1 工作 2健身 第i天选择为0/1/2下的最大工作天数\ndp = [[0 for i in range(n)] for _ in range(3)]\n'''\n注意第一天,选择休息,则工作天数=0,跟公司健身房开不开没有半毛钱关系!!!!\n'''\ndp[0][0] = 0\ndp[1][0] = 1 if company[0] == 1 else 0\ndp[2][0] = 1 if exer[0] == 1 else 0\n\n# 开始状态转换\nfor i in range(1,n):\n # 不论是否开门,都可以选择休息!!!!!!!不是只有今天公司和健身房都不开门才休息\n dp[0][i] = max(dp[0][i-1],dp[1][i-1],dp[2][i-1])\n if company[i] == 1:\n # 公司开门\n dp[1][i] = max(dp[0][i-1],dp[2][i-1]) + 1\n if exer[i] == 1:\n # 健身房开门\n dp[2][i] = max(dp[0][i-1],dp[1][i-1]) + 1\nprint(n-max(dp[0][-1],dp[1][-1],dp[2][-1]))\n\n\n\n\n# 状态机那个图很重要!!!理解\n\n\n# 以下是错误解法,妄图使用一个dp表示一天的三种选择!!!!\n# dp = [0]*n\n# # 初始化第一天\n# dp[0] = 1 if company[0] == 1 or exer[0] == 1 else 0\n# # 初始化第二天\n\n# # 第二天全关\n# if company[1] == 0 and exer[1] == 0:\n# dp[1] = dp[0]\n# # 第一二天交叉开\n# elif (company[1] == exer[0] == 1) or (company[0] == exer[1] == 1):\n# dp[1] = 2\n# # 连续两天只有同一家开\n# elif (company[1] == company[0] == 1 and exer[1]== exer[0]== 0 ) or (company[1] == company[0] == 0 and exer[1]== exer[0]== 1 ):\n# dp[1] = 1\n# # 第一天不开第二天开\n# elif company[0] == 0 and exer[0] == 0 and ((company[1] == 1 or exer[1] == 1)):\n# dp[1] = 1\n# for i in range(2,n):\n# if exer[i] == company[i] == 1:\n# dp[i] = dp[i-1] + 1\n# elif exer[i] == company[i] == 0:\n# dp[i] = dp[i-1]\n# elif exer[i] == 1 and company[i] == 0:\n# # 健身房营业但是公司不营业\n# # if exer[i-1] == 1 and company[i-1] == 0:\n# # 前一天仍然同样情况\n# if company[i-1]== 1:\n# dp[i] = dp[i-1]+1\n# else:\n# dp[i] = max(dp[i-1],dp[i-2]+1)\n# elif exer[i] == 0 and company[i] == 1:\n# # 公司营业但是健身房不营业\n# # if exer[i-1] == 0 and company[i-1] == 1:\n# # 前一天仍然同样情况\n# if exer[i-1]== 1:\n# dp[i] = dp[i-1]+1\n# else:\n# dp[i] = max(dp[i-1],dp[i-2]+1)\n# print(n-dp[-1])","sub_path":"TX2020校园招聘-后台-假期.py","file_name":"TX2020校园招聘-后台-假期.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"431198312","text":"from collections import namedtuple\nfrom subprocess import Popen, PIPE\nfrom re import compile, IGNORECASE, MULTILINE\nfrom urllib.request import urlopen\n\nfrom six.moves.urllib.request import urlopen\n\n\n# min/avg/max/stddev\nPING_RESULTS_PATTERN = b\"(\\d+\\.\\d+)/(\\d+\\.\\d+)/(\\d+\\.\\d+)/(\\d+\\.\\d+)\"\nPACKET_LOSS_PATTERN = b\"(\\d+\\.?\\d+)%\"\ncompiled_ping_results_pattern = compile(PING_RESULTS_PATTERN, IGNORECASE)\ncompiled_packet_loss_pattern = compile(PACKET_LOSS_PATTERN, IGNORECASE)\n\n\nclass PingResults(namedtuple(\"PingResults\", [\"host\", \"count\", \"min\", \"max\", \"avg\", \"stddev\", \"percent_packet_loss\"])):\n def __new__(cls, host, count, min=None, max=None, avg=None, stddev=None, percent_packet_loss=None):\n return super(PingResults, cls).__new__(cls, host, count, min, max, avg, stddev, percent_packet_loss)\n\n def __str__(self):\n return \"Ping Results - host: {0.host} min/max/avg: {0.min}/{0.max}/{0.avg} loss: {0.percent_packet_loss}%\".format(self)\n\n def __repr__(self):\n return \"{0}(host={1.host}, min={1.min}, max={1.max}, avg={1.avg}, stddev={1.stddev}, percent_packet_loss={1.percent_packet_loss}%)\".format(self.__class__.__name__, self)\n\n\nclass PingFailedError(RuntimeError):\n def __init__(self, host):\n super(PingFailedError, self).__init__(\"Failed to ping {0}. Host possibly down.\".format(host))\n\n\ndef ping(host, ping_count=3):\n call_list = [\"ping\", \"-c\", str(ping_count), host]\n process = Popen(call_list, stdout=PIPE)\n results, err = process.communicate()\n time_results = compiled_ping_results_pattern.search(results)\n packet_loss_results = compiled_packet_loss_pattern.search(results)\n if time_results is None:\n raise PingFailedError(host)\n time_result_matches = time_results.groups()\n packet_loss_matches = packet_loss_results.groups()\n return PingResults(host=host,\n count=ping_count,\n min=float(time_result_matches[0]),\n avg=float(time_result_matches[1]),\n max=float(time_result_matches[2]),\n stddev=float(time_result_matches[3]),\n percent_packet_loss=float(packet_loss_matches[0]))\n\n\ndef is_online(host, ping_count=3):\n try:\n results = ping(host, ping_count=ping_count)\n return results.percent_packet_loss < 100.0\n\n except PingFailedError:\n return False\n\n\ndef get_external_ip_address():\n response = urlopen('http://www.myglobalip.com/')\n codec = response.info().get_param('charset', 'utf8')\n\n html = response.read()\n html = html.decode(codec)\n\n regex = compile(\"(.*?)\", MULTILINE)\n\n results = regex.search(html)\n\n ip_address = results.groups()[0]\n\n return ip_address","sub_path":"pytools/network/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"438713747","text":"# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz)\n# Center for Machine Perception, Czech Technical University in Prague\n\n# Calculates error of 6D object pose estimates.\n\nimport os\nimport sys\nimport glob\nimport yaml\n\nsys.path.append(os.path.abspath('..'))\nfrom pysixd import inout, pose_error, misc\nfrom params.dataset_params import get_dataset_params\n\n# Paths\n#-------------------------------------------------------------------------------\n# Sets of results to be evaluated\nresult_base = '/home/tom/th_data/cmp/projects/sixd/sixd_results/'\nresult_paths = [\n result_base + 'hodan-iros15-forwacv17_tless_primesense'\n]\n\n# Mask of path to the output file with calculated errors\nerrs_mpath = '{result_path}_eval/{scene_id:02d}_{eval_desc}.yml'\n\n# Parameters\n#-------------------------------------------------------------------------------\n# Top N pose estimates (according to their score) to be evaluated for each\n# object in each image\ntop_n_ests = 1 # None to consider all estimates\n\n# Pose error function\npose_error_fun = 'vsd' # 'vsd', 'adi', 'add', 'cou', 're', 'te'\n\n# VSD parameters\ndelta = 15\ntau = 20\n\neval_desc = 'error=' + pose_error_fun\nif pose_error_fun == 'vsd':\n eval_desc += '-delta=' + str(delta) + '-tau=' + str(tau)\n #eval_desc += '-tau=' + str(tau)\n\n# Error calculation\n#-------------------------------------------------------------------------------\nfor result_path in result_paths:\n info = os.path.basename(result_path).split('_')\n method = info[0]\n dataset = info[1]\n test_type = info[2] if len(info) > 2 else ''\n path = result_path\n\n # Select a type of the 3D object model for evaluation\n if dataset == 'tless':\n model_type = 'cad'\n else:\n model_type = ''\n\n # Load dataset parameters\n dp = get_dataset_params(dataset, model_type=model_type, test_type=test_type)\n\n # Load object models\n models = {}\n for obj_id in range(1, dp['obj_count'] + 1):\n models[obj_id] = inout.load_ply(dp['model_mpath'].format(obj_id))\n\n # Directories with results for individual scenes\n scene_dirs = glob.glob(os.path.join(result_path, '*'))\n\n for scene_dir in scene_dirs:\n scene_id = int(os.path.basename(scene_dir))\n\n # Load info and GT poses for the current scene\n scene_info = inout.load_info(dp['scene_info_mpath'].format(scene_id))\n scene_gt = inout.load_gt(dp['scene_gt_mpath'].format(scene_id))\n\n res_paths = glob.glob(os.path.join(scene_dir, '*.txt'))\n errs = {}\n for res_path in res_paths:\n # Parse image ID and object ID from the file name\n im_id, obj_id = os.path.basename(res_path).split('.')[0].split('_')\n\n # Load depth image if VSD is used for the evaluation\n if pose_error_fun == 'vsd':\n depth_path = dp['test_depth_mpath'].format(scene_id, im_id)\n depth_im = inout.read_depth(depth_path)\n depth_im *= dp['cam']['depth_scale'] # to [mm]\n\n # Load camera matrix\n if pose_error_fun in ['vsd', 'cou']:\n K = scene_info[im_id]['cam_K']\n\n # Get GT poses\n gts = [gt for gt in scene_gt[im_id] if gt['obj_id'] == obj_id]\n\n # Load pose estimates\n ests = inout.load_poses(res_path)\n\n # Sort the estimates by score\n ests = sorted(ests, key=lambda x: x['score'])\n\n # Consider only the top N estimated poses\n ests = ests[slice(0, top_n_ests)]\n\n errs.setdefault(im_id, {}).setdefault(obj_id, [])\n model = models[obj_id]\n for est in ests:\n est_errs = []\n R_est = est['cam_R_m2c']\n t_est = est['cam_t_m2c']\n\n for gt in gts:\n err = -1.0\n R_gt = gt['cam_R_m2c']\n t_gt = gt['cam_t_m2c']\n\n if pose_error_fun == 'vsd':\n err = pose_error.vsd(R_est, t_est, R_gt, t_gt, model,\n depth_im, delta, tau, K)\n elif pose_error_fun == 'add':\n err = pose_error.add(R_est, t_est, R_gt, t_gt, model)\n elif pose_error_fun == 'adi':\n err = pose_error.adi(R_est, t_est, R_gt, t_gt, model)\n elif pose_error_fun == 'cou':\n err = pose_error.cou(R_est, t_est, R_gt, t_gt, model,\n dp['test_im_size'], K)\n elif pose_error_fun == 're':\n err = pose_error.re(R_est, R_gt)\n elif pose_error_fun == 'te':\n err = pose_error.te(t_est, t_gt)\n\n est_errs.append(err)\n errs[im_id][obj_id].append(est_errs)\n\n print('Saving calculated errors...')\n errs_path = errs_mpath.format(result_path=result_path,\n scene_id=scene_id,\n eval_desc=eval_desc)\n misc.ensure_dir(os.path.basename(errs_path))\n with open(errs_path, 'w') as f:\n yaml.dump(errs, f, width=10000, Dumper=yaml.CDumper)\n","sub_path":"tools/eval_calc_errors.py","file_name":"eval_calc_errors.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"625206181","text":"# https://leetcode.com/problems/combination-sum/\n\nfrom typing import List\n\n\nclass Solution:\n # RECURSION + BACKTRACKING (DFS)\n # Add numbers to the combination until you hit the target\n # If adding the number would exceed target, do not add it\n # TIME: O(N^# combinations + 1)\n # This is DFS of n-ary tree where # of steps = # of nodes in the tree\n # At each node, constant time to process\n # Duplicating the array (to store in output) would, at worst case be at a leaf node of this n-ary tree\n # Which means it would take O(target / min(candidate))\n # Number of nodes in an N-ary tree of (# combinations) height is N^(# combiations + 1)\n # SPACE: O(number of combinations)\n # Prefix / recursion stack is upper bounded by O(target / min(candidates))\n # Output array is upper bounded by # of combinations possible\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n self.combinations = []\n candidates.sort()\n self.findCombinations(candidates, target, 0, [])\n return self.combinations\n\n # pass in i to avoid duplicate combinations in a different order\n def findCombinations(self, candidates, remaining, i, prefix):\n if remaining == 0:\n self.combinations.append(list(prefix)) # deep copy\n return\n\n for curI in range(i, len(candidates)):\n num = candidates[curI]\n\n # this is where sorting the array could be an optimization\n # but that depends on the distribution of candidates, w.r.t value of target\n if remaining - num < 0:\n break\n\n prefix.append(num)\n self.findCombinations(candidates, remaining - num, curI, prefix)\n prefix.pop()\n\n\ns = Solution()\nprint(s.combinationSum(candidates=[2, 3, 6, 7], target=7)) # [[2,2,3],[7]]\nprint(\n s.combinationSum(candidates=[2, 3, 5], target=8)\n) # [[2, 2, 2, 2], [2, 3, 3], [3, 5]]\nprint(s.combinationSum(candidates=[2], target=1)) # []\n","sub_path":"combination-sum/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"75715504","text":"import segyio\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm_notebook as tqdm\nfrom obspy.io.segy.core import _read_segy\n\nSEGYIO_HEADER_ITEMS = {\n 'EnergySourcePoint': \"SPID\",\n 'SourceX': \"SRCX\",\n 'SourceY': \"SRCY\",\n 'GroupX': \"GRPX\",\n 'GroupY': \"GRPY\",\n\n 'offset': \"OFFSET\",\n 'INLINE_3D': \"INLINE\",\n 'CROSSLINE_3D': \"XLINE\",\n}\n\nSEISEE_HEADER_ITEMS = {\n \"Trace index in file\": \"IDX\",\n \"Trace number within field record\": \"TRCFLD\",\n \"SP - Energy source point number\": \"SPID\",\n \"CDP ensemble number\": \"CDP\",\n \"Distance from source point to receiv grp\": \"OFFSET\",\n \"Receiver group elevation\": \"GRPZ\",\n \"Surface elevation at source\": \"SRCZ\",\n \"Source X coordinate\": \"SRCX\",\n \"Source Y coordinate\": \"SRCY\",\n \"Group X coordinate\": \"GRPX\",\n \"Group Y coordinate\": \"GRPY\",\n \"CDP X\": \"CDPX\",\n \"CDP Y\": \"CDPY\",\n \"Inline Number\": \"ILINE\",\n \"Clossline Number\": \"XLINE\",\n}\n\n\nPRIME_TIME_COLUMNS = ['SRCX', 'SRCY', 'SRCZ', 'GRPX', 'GRPY', 'GRPZ', 'FB']\n\nsrc_cols = ['SRCX', 'SRCY', 'SRCZ']\ngrp_cols = ['GRPX', 'GRPY', 'GRPZ']\n\nsrc_o_cols = ['SRCX', 'SRCY']\ngrp_o_cols = ['GRPX', 'GRPY']\n\n\ndef header_info(df, name=None):\n \"\"\"\n Print statistics of segy header\n :param df: sgy header DataFrame\n :param name: header name\n :return:\n \"\"\"\n if not isinstance(name, str):\n name = str(name)\n\n heading = \"{} SGY HEADER: '{:^20}' {}\".format('>' * 10, name, '<' * 10)\n print(heading)\n print('df columns: ', np.sort(df.columns.tolist()))\n print('>>> {:15} {:.0f}'.format('df len:', len(df)))\n if 'SRCID' in df.columns:\n print('>>> {:15} {:.0f}'.format('No of Shots:', df['SRCID'].nunique()))\n\n if 'GRPID' in df.columns:\n print('>>> {:15} {:.0f}'.format('No of Groups:', df['GRPID'].nunique()))\n\n info_min = '>>> min'\n info_max = '>>> max'\n info_line = ' ' * 7\n for c in ['SRCX', 'SRCY', 'SRCZ']:\n if c not in df.columns:\n continue\n info_min += \" |'{}' {:12.2f}|\".format(c[-1], df[c].min())\n info_max += \" |'{}' {:12.2f}|\".format(c[-1], df[c].max())\n info_line += '-' * 19\n\n if any(np.intersect1d(['SRCX', 'SRCY', 'SRCZ'], df.columns)):\n print('\\n')\n print('>>>>>> Source Geometry Info')\n print(info_min)\n print(info_line)\n print(info_max)\n\n info_min = '>>> min'\n info_max = '>>> max'\n info_line = ' ' * 7\n for c in ['GRPX', 'GRPY', 'GRPZ']:\n if c not in df.columns:\n continue\n info_min += \" |'{}' {:12.2f}|\".format(c[-1], df[c].min())\n info_max += \" |'{}' {:12.2f}|\".format(c[-1], df[c].max())\n info_line += '-' * 19\n\n if any(np.intersect1d(['GRPX', 'GRPY', 'GRPZ'], df.columns)):\n print('\\n')\n print('>>>>>> Group Geometry Info')\n print(info_min)\n print(info_line)\n print(info_max)\n\n print('>' * 55)\n\n\ndef read_sgy_traces(filename, idx, verbose=True, ignore_geometry=True):\n \"\"\"\n Reading set of traces by its ID from '.*sgy' file. Reading by 'segyio'.\n :param filename: str path to sgy file\n :param idx: 1D list or array of traces ID\n :param verbose: show reading progress bar True/False\n :param ignore_geometry: ignore geometry checking of 'sgy' True/False\n :return: 2D numpy array of traces (nr - num. of traces, ns - num. of samples)\n \"\"\"\n data = []\n if verbose:\n iteration = tqdm(idx)\n else:\n iteration = idx\n\n with segyio.open(filename, ignore_geometry=ignore_geometry) as src:\n for i in iteration:\n tmp = src.trace[i]\n data.append(tmp)\n\n return np.array(data, ndmin=2, dtype=np.float32)\n\n\ndef read_segy_file_obspy(filename):\n \"\"\"\n Read segy with obspy\n :param filename:\n :return:\n \"\"\"\n segy = _read_segy(filename)\n return np.array([x.data for x in segy], ndmin=2, dtype=np.float32)\n\n\ndef read_header_segyio(filename, fields=None, ignore_geometry=True, converter=SEGYIO_HEADER_ITEMS, verbose=False):\n \"\"\"\n Reading header of 'sgy' with 'segyio'.\n :param filename: str path to sgy file\n :param ignore_geometry: ignore_geometry: ignore geometry checking of 'sgy' True/False\n :param fields: list of 'sgy' headers to use. Default :\n 'EnergySourcePoint',\n 'SourceX',\n 'SourceY',\n 'GroupX',\n 'GroupY',\n 'offset',\n 'INLINE_3D',\n 'CROSSLINE_3D',\n :param converter: rename column names to be more useful\n :param verbose: ...\n :return: pandas DataFrame\n \"\"\"\n if not fields:\n fields = list(SEGYIO_HEADER_ITEMS.keys())\n head = {}\n with segyio.open(filename, ignore_geometry=ignore_geometry) as segyfile:\n for h in fields:\n column = converter[h]\n head[column] = segyfile.attributes(eval('segyio.TraceField.{}'.format(h)))[:]\n df = pd.DataFrame(head)\n\n df['IDX'] = df.index\n return df\n\n\ndef read_header_segyio_full(filename, ignore_geometry=True, drop_nonunique=True):\n \"\"\"\n Read all fields of segy file by segyio to DataFrame\n :param filename:\n :param ignore_geometry:\n :return:\n \"\"\"\n\n with segyio.open(filename, ignore_geometry=ignore_geometry) as segyfile:\n columns = [str(x) for x in segyfile.header[0].keys()]\n\n values = [dict(x.items()).values() for x in segyfile.header]\n\n header = pd.DataFrame(values, columns=columns)\n header['IDX'] = header.index\n\n header = header.T[header.nunique() > 1].T\n header = header.rename(columns=SEGYIO_HEADER_ITEMS)\n\n return header\n\n\ndef read_seisee_header_info(filename):\n \"\"\"\n Read header of 'sgy' header written by Seisee\n :param filename: \n :return: \n \"\"\"\n header_info = []\n i = 0\n with open(filename, \"r\") as f:\n while True:\n line = f.readline()\n\n if line.startswith(\"+-\"):\n break\n\n line = line.replace(\"*\", \" \")\n line = line.replace(\"+\", \" \")\n line = line[8:]\n line = \" \".join(line.split())\n header_info.append([i, line])\n i += 1\n return header_info\n\n\ndef read_header_seisee(filename, fields=None, converter=SEISEE_HEADER_ITEMS, verbose=False):\n \"\"\"\n Read Seisee header to Pandas DataFrame \n :param filename: \n :param fields: \n :param converter: \n :param verbose: \n :return: \n \"\"\"\n if not fields:\n fields = list(SEISEE_HEADER_ITEMS.keys())\n\n header_info = read_seisee_header_info(filename)\n use_cols = [x[0] for x in header_info if x[1] in fields]\n names = [converter[x[1]] for x in header_info if x[1] in fields]\n skip_rows = len(header_info) + 1\n\n df = pd.read_csv(\n filename,\n skiprows=skip_rows,\n sep=\"\\s+\",\n header=None,\n usecols=use_cols,\n names=names,\n dtype=int,\n )\n df[\"IDX\"] -= 1\n return df","sub_path":"src/fbpick/read_segy.py","file_name":"read_segy.py","file_ext":"py","file_size_in_byte":7628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"150074477","text":"from Network import CriticNet\nfrom Hyparameter import sampling_size\nfrom PSR import PSR\nfrom Environment import POMDPEnvironment\nimport numpy as np\nfrom LogUtil import logger\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()\n\n\n# agent module for learning\nclass agent(object):\n def __init__(self, action_space, length_or):\n self.action_space = action_space\n self.epsilon = 0.5\n self.initial_epsilon = 0.5\n self.final_epsilon = 0.05\n self.explore = 500\n self._memory = []\n self.max_memory = 2000 # number of previous transitions to remember\n self.observation_id = None\n self.sampling_size = sampling_size\n self.count = 0\n self.length_or = length_or\n\n # set the dimensionality of predictive state and action space and state space and discount rate for neural network\n def set_state_dim(self, state_dim, action_space, state_space, discount_rate):\n self.Net = CriticNet(state_dim=state_dim, action_space=action_space, state_space=state_space, discount_rate=discount_rate, length_or=self.length_or)\n\n # return the action_index it will act\n def taking_action(self, Predictive_State):\n if np.random.rand() < self.epsilon:\n action_index = np.random.randint(low=0, high=len(self.action_space), size=1, dtype=np.int)\n else:\n action_index, Optimal_pro_z, all_Dis = self.Net.selecting_optimal_action(Predictive_State=Predictive_State, net='origin')\n # print('under current predictive state:', Predictive_State)\n # for i in range(len(all_Dis[0])):\n # print('for action id:' + str(i), 'expectation:'+str(all_Dis[0][i]))\n if action_index.shape == (1,):\n action_index = action_index[0]\n else:\n print('exception on taking action function')\n return action_index\n\n def train_agent(self, PSR):\n if self.epsilon > self.final_epsilon:\n self.epsilon -= (self.initial_epsilon - self.final_epsilon) / self.explore\n if self.sampling_size > len(self._memory):\n _index = np.random.randint(low=0, high=len(self._memory), size=len(self._memory), dtype=np.int)\n else:\n _index = np.random.randint(low=0, high=len(self._memory), size=self.sampling_size, dtype=np.int)\n samples = np.array(self._memory)[_index]\n length = len(samples)\n #######################################################################################\n #try all possible actions\n _length_a = len(PSR.Actions)\n samples = list(samples) * _length_a\n _samples = None\n for i in range(length):\n a = samples[i:len(samples):length]\n if _samples is None:\n _samples = a\n else:\n _samples = np.concatenate([_samples, a], axis=0)\n _action = np.array(list(np.reshape(a=np.arange(0, _length_a, 1, np.int), newshape=(-1,))) * length)\n _samples[:, 1] = _action\n samples = _samples[:, :]\n #######################################################################################\n length = len(samples)\n _length_or = (len(PSR.Observations) * len(PSR.R_list))\n samples = list(samples) * _length_or\n _samples = None\n for i in range(length):\n a = samples[i:len(samples):length]\n if _samples is None:\n _samples = a\n else:\n _samples = np.concatenate([_samples, a], axis=0)\n length_o = len(PSR.Observations)\n length_r = len(PSR.R_list)\n o = np.array(list(np.reshape(a=np.arange(0, length_o, 1, np.int), newshape=(-1, 1))) * length_r)\n r = np.array(list(np.reshape(a=np.arange(0, length_r, 1, np.int), newshape=(-1, 1))) * length_o)\n _o = None\n for i in range(length_o):\n a = o[i:len(o):length_o]\n if _o is not None:\n _o = np.concatenate([_o, a], axis=0)\n else:\n _o = a\n _or = np.concatenate([_o, r], axis=-1)\n _or = np.array(list(_or)*length)\n _samples = np.array(_samples)\n samples = np.concatenate([_samples, _or], axis=1)\n\n a_id = samples[:, 1]\n o_id = samples[:, 2]\n r_id = samples[:, 3]\n R_id = np.asarray(r_id, np.int32)\n R = np.array(PSR.R_list)[R_id]\n R = np.reshape(R, (-1, 1))\n samples = np.concatenate([samples, R], axis=1)\n Predictive_State = samples[:, 0]\n Next_Predictive_State, Next_Predictive_State_Probability = PSR.update_batch(a_id=a_id, o_id=o_id, r_id=r_id, Predictive_State=Predictive_State)\n #######################################################################################\n Next_Predictive_State_Probability = np.reshape(a=Next_Predictive_State_Probability, newshape=(-1, 1))\n Next_Predictive_State = np.reshape(a=Next_Predictive_State, newshape=(-1, np.shape(PSR.b_h)[1]))\n samples = np.concatenate([samples, Next_Predictive_State_Probability, Next_Predictive_State], axis=1)\n\n self.Net._train(samples=samples)\n self.count = self.count + 1\n if self.count % 2 == 0:\n self.Net.target_train()\n self.Net.origin_Net.save_weights(filepath='origin_net_Mountain_Car.h5', overwrite=True)\n self.Net.target_Net.save_weights(filepath='target_net_Mountain_Car.h5', overwrite=True)\n\n def randomly_sample_an_predictive_state(self):\n index = np.random.randint(low=0, high=len(self._memory), size=1)\n return self._memory[index[0]][0]\n\n # storing the action and relative information in memory for learning\n def replay_memory(self, Last_Predictive_State):\n self._memory.append((Last_Predictive_State, 0))\n if len(self._memory) > self.max_memory:\n self._memory.pop(-1)\n# Environment Module for responding actions from agent\n# class Environment(object):\n# def __init__(self, R, Observation, O, State_Sapce, Transition_Matrix):\n# self.tiger_state_index = np.random.randint(low=0, high=len(State_Sapce), size=1, dtype=np.int)[0]\n# self.R = R\n# self.OBSERVATION = Observation\n# self.O = O\n# self.State_Space = State_Sapce\n# self.Transition_Matrix = Transition_Matrix\n#\n# # tiger will randomly move after an agent acts open-left or open-right\n# def tiger_shift(self, action_idx):\n# state = np.arange(0, len(self.State_Space), 1, np.int32)\n# T = self.Transition_Matrix[action_idx][self.tiger_state_index]\n# new_state = np.random.choice(a=state, size=1, p=T)\n# self.tiger_state_index = new_state[0]\n# return self.tiger_state_index\n#\n# # rendering the corresponding reward\n# def obtain_reward(self, agent_action_index, current_state, next_state, observation):\n# R = self.R[(agent_action_index, current_state, next_state, observation)]\n# return R\n#\n# # receiving the action taken by an agent and transit back the observation and reward\n# def receive_action(self, action_idx):\n# current_state = self.tiger_state_index\n# next_state = current_state\n# if action_idx == 2:\n# next_state = self.tiger_shift(action_idx=action_idx)\n# if next_state != current_state:\n# print('error on tiger shift!')\n# print('the tiger actually is in s'+str(current_state))\n# else:\n# next_state = self.tiger_shift(action_idx=action_idx)\n# o_list = []\n# for i in range(len(self.OBSERVATION)):\n# o_list.append(self.O[(action_idx, next_state, i)])\n# o_id_list = np.arange(0, len(self.OBSERVATION), 1, dtype=np.int)\n# o_list = np.array(o_list)\n# o_id = np.random.choice(a=o_id_list, size=1, p=o_list)\n# print('the observation probability:', o_list)\n# reward = self.obtain_reward(action_idx, current_state, next_state, o_id[0])\n# return [reward, o_id[0]]\n\n#this environment is driven by PSR\n\nclass Environment(object):\n def __init__(self, Predictive_State, State_Space, Observation_Space, Reward_Space, m_ao, m_name):\n self.Predictive_State = Predictive_State\n self.S_Space = State_Space\n self.O_Space = Observation_Space\n self.R_Space = Reward_Space\n self.m_name = m_name\n self.m_ao = m_ao\n\n def get_all_possible_or(self, a_id):\n length_o = len(self.O_Space)\n length_r = len(self.R_Space)\n o = np.array(list(np.reshape(a=np.arange(0, length_o, 1, np.int), newshape=(-1, 1))) * length_r)\n r = np.array(list(np.reshape(a=np.arange(0, length_r, 1, np.int), newshape=(-1, 1))) * length_o)\n _o = None\n for i in range(length_o):\n a = o[i:len(o):length_o]\n if _o is not None:\n _o = np.concatenate([_o, a], axis=0)\n else:\n _o = a\n _a = np.ones(shape=(len(_o), 1), dtype=np.int) * a_id\n return [_a, _o, r]\n\n def Update_Predictive_State(self, Predictive_State):\n self.Predictive_State = Predictive_State\n\n def receive_action(self, action_idx):\n a_id_int, o_id_int, r_id_int = self.get_all_possible_or(a_id=action_idx)\n a_id = np.array(a_id_int, dtype=np.str)\n o_id = np.array(o_id_int, dtype=np.str)\n r_id = np.array(r_id_int, dtype=np.str)\n a = np.broadcast_to(array='a', shape=np.shape(a_id))\n o = np.broadcast_to(array='o', shape=np.shape(o_id))\n r = np.broadcast_to(array='r', shape=np.shape(r_id))\n a_id = np.core.defchararray.add(a, a_id)\n o_id = np.core.defchararray.add(o, o_id)\n r_id = np.core.defchararray.add(r, r_id)\n test = np.core.defchararray.add(a_id, np.core.defchararray.add(o_id, r_id))\n index = np.searchsorted(self.m_name, test)\n m_ao = np.array(self.m_ao)[index]\n m_ao = np.reshape(a=m_ao, newshape=(-1, np.shape(self.m_ao[0])[0], np.shape(self.m_ao[0])[1]))\n P_aor = np.matmul(a=self.Predictive_State.T, b=m_ao)\n P_aor = np.reshape(a=P_aor, newshape=(-1, ))\n P_aor = np.round(a=P_aor, decimals=12)\n value = P_aor[P_aor < 0]\n if len(value) > 0:\n print('error on receive action')\n print('the predictive state is:', self.Predictive_State.T)\n tmp = P_aor[:]\n tmp[tmp < 0] = 0\n print('the probability of sum:', np.sum(a=tmp, axis=-1))\n P_aor[P_aor<0] = 0\n index = np.arange(start=0, stop=len(test), step=1, dtype=np.int)\n _aor_idx = np.random.choice(a=index, size=1, p=P_aor)\n _aor_idx = int(_aor_idx)\n R = self.R_Space[int(r_id_int[_aor_idx])]\n o = o_id_int[_aor_idx]\n return [R, o[0]]\n\n\nif __name__ == \"__main__\":\n #####initialization########################################################\n ## dynamic loading an pomdp environment file and generate transition matrix T and observation matrix O with others\n EnvObject = POMDPEnvironment(filename='MountainCar.POMDP')\n T = EnvObject._obtain_transition()\n O = EnvObject._obtain_observation()\n Z = EnvObject.Z\n R_Matrix = EnvObject._obtain_reward_matrix()\n b_h = EnvObject._obtain_b_h()\n b_h = np.reshape(a=b_h, newshape=(1, -1))\n discount_rate = EnvObject.discount\n Observations = EnvObject.observations\n States = EnvObject.states\n Actions = EnvObject.actions\n length_or = len(Observations) * len(R_Matrix)\n ############################################################################\n TOTAL_REWARD = 0\n EPISODE_REWARD = 0\n maximum_episode = 40000\n episode_length = 30\n # initializing Agent Env and PSR three modules\n Agent = agent(action_space=Actions, length_or=length_or)\n PSR = PSR(T=T, O=O, b_h=b_h, Observations=Observations, Actions=Actions, R_Matrix = R_Matrix)\n testset = PSR.generate_tests()\n U_T = PSR.Computing_U_T()\n U_Q_Name, U_Q = PSR.generate_U_Q()\n PSR.Predictive_State = PSR.return_predictive_state()\n Agent.set_state_dim(state_dim=PSR.gain_core_tests_dim(), action_space=Actions, state_space=States, discount_rate=discount_rate)\n #Agent.Net.origin_Net.load_weights(filepath='origin_net_Mountain_Car_uniform_noise.h5')\n #Agent.Net.target_Net.load_weights(filepath='target_net_Mountain_Car_uniform_noise.h5')\n PSR.gain_m()\n PSR.gain_M_ao()\n #set an initial predictive state and starts learning process\n Current_Predictive_State = PSR.Predictive_State\n initial_Predictive_State = Current_Predictive_State\n print('predictive state:', Current_Predictive_State)\n Env = Environment(Predictive_State=Current_Predictive_State, State_Space=States, Observation_Space=Observations, Reward_Space=PSR.R_list, m_ao=PSR.m, m_name=PSR.m_name)\n\n ###################################################################\n #measurement\n E_Optimal_Actions = []\n E_Optimal_Actions_std = []\n Epoch_Open_Count = []\n Epoch_Open_Reward = []\n Open_Reward = 0\n Avg_R_open = []\n count_open = 0\n ###################################################################\n\n for j in range(maximum_episode):\n for i in range(episode_length):\n action_idx = Agent.taking_action(Predictive_State=Current_Predictive_State)\n reward, Agent.observation_id = Env.receive_action(action_idx=action_idx)\n EPISODE_REWARD += reward\n r_id = PSR.R_list.index(reward)\n Next_Predictive_State = PSR.update(action_idx=action_idx, observation_id=Agent.observation_id, r_id=r_id)\n\n ############################################################\n if action_idx == 1 or action_idx == 2:\n count_open = count_open + 1\n Open_Reward = Open_Reward + reward\n #############################################################\n\n Agent.replay_memory(Last_Predictive_State=Current_Predictive_State)\n if (i+1) % episode_length == 0:\n print('the i is:', i)\n Agent.train_agent(PSR=PSR)\n #############################################################\n if count_open != 0:\n Avg_Reward = EPISODE_REWARD / np.float(count_open)\n Avg_Real_Reward_Open = Open_Reward / np.float(count_open)\n Avg_R_open.append(Avg_Reward)\n Epoch_Open_Reward.append(Avg_Real_Reward_Open)\n Epoch_Open_Count.append(count_open)\n a_id, Optimal_pro_z, all_Dis = Agent.Net.selecting_optimal_action(\n Predictive_State=initial_Predictive_State, net='origin')\n std = np.sqrt(np.sum(Optimal_pro_z[0] * np.square(Agent.Net.z - np.sum(Agent.Net.z*Optimal_pro_z[0]))))\n E = Agent.Net.z * Optimal_pro_z[0]\n E = np.sum(a=E, axis=-1)\n E_Optimal_Actions.append(E)\n E_Optimal_Actions_std.append(std)\n\n np.save(file='Avg_reward_open.npy', arr=Avg_R_open)\n np.save(file='E_Optimal_Actions.npy', arr=E_Optimal_Actions)\n np.save(file='E_Optimal_Actions_std.npy', arr=E_Optimal_Actions_std)\n np.save(file='Avg_real_reward_open.npy', arr=Epoch_Open_Reward)\n np.save(file='Epoch_Open_Count.npy', arr=Epoch_Open_Count)\n #############################################################\n Current_Predictive_State = Next_Predictive_State\n Env.Update_Predictive_State(Predictive_State=Current_Predictive_State)\n logger.info('this' + str(j) +'episode rewards is:'+str(EPISODE_REWARD))\n TOTAL_REWARD += EPISODE_REWARD\n EPISODE_REWARD = 0\n count_open = 0\n Open_Reward = 0\n Predictive_State = Agent.randomly_sample_an_predictive_state()\n PSR.Predictive_State = Predictive_State\n Env.Update_Predictive_State(Predictive_State=Predictive_State)\n logger.info('after 600 episode, the total reward is:'+str(TOTAL_REWARD))\n np.save(file='Avg_reward_open.npy', arr=Avg_R_open)\n np.save(file='E_Optimal_Actions.npy', arr=E_Optimal_Actions)\n np.save(file='E_Optimal_Actions_std.npy', arr=E_Optimal_Actions_std)\n np.save(file='Avg_real_reward_open.npy', arr=Epoch_Open_Reward)\n np.save(file='Epoch_Open_Count.npy', arr=Epoch_Open_Count)","sub_path":"MC_Measure_different_range_fix_uniform_noise/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":16413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"108049017","text":"import asyncio\n\nimport dotenv\n\ndotenv.load_dotenv('.test.env') # noqa\n\nimport pytest\n\nfrom moonwalking import main\nfrom moonwalking import settings\n\nfrom eth_keys.datatypes import PublicKey, PrivateKey\nfrom eth_utils.currency import to_wei\nfrom eth_utils.address import to_checksum_address\n\n\ndef private_key_to_checksum_address(key):\n if key.startswith('0x'):\n key = key[2:]\n return PublicKey.from_private(\n PrivateKey(bytes.fromhex(key))\n ).to_checksum_address()\n\n\nclass EthHelperMixin:\n MAIN_ADDR = private_key_to_checksum_address(settings.BUFFER_ETH_PRIV)\n\n @classmethod\n def register(cls):\n pass\n\n\nclass EthHelper(EthHelperMixin, main.Ethereum):\n async def send_money(self, addr, amount):\n nonce = await self.post(\n 'eth_getTransactionCount',\n self.MAIN_ADDR,\n )\n tx = {\n 'from': self.MAIN_ADDR,\n 'to': addr,\n 'value': to_wei(amount, 'ether'),\n 'gas': 22000,\n 'gasPrice': to_wei(8, 'gwei'),\n 'chainId': 1,\n 'nonce': nonce,\n }\n return await self.post('eth_sendTransaction', tx)\n\n\nclass LndHelper(EthHelperMixin, main.Lendingblock):\n async def create_contract(self):\n tx_hash = await self.post('eth_sendTransaction', {\n 'from': self.MAIN_ADDR,\n 'gas': 4000000,\n 'gasPrice': 100,\n 'data': settings.LND_CONTRACT['bytecode'],\n })\n receipt = await self.post(\n 'eth_getTransactionReceipt',\n tx_hash\n )\n return receipt['contractAddress']\n\n\n@pytest.fixture(autouse=True)\ndef loop():\n \"\"\"Return an instance of the event loop.\"\"\"\n loop = asyncio.new_event_loop()\n yield loop\n loop.close()\n\n\n@pytest.fixture()\ndef eth_helper():\n yield EthHelper()\n\n\n@pytest.fixture()\nasync def lnd_helper(mocker):\n lnd_helper = LndHelper()\n contract_addr = await lnd_helper.create_contract()\n mocker.patch(\n 'moonwalking.blocks.eth_generic.EthereumGeneric.get_contract_addr',\n lambda self: to_checksum_address(contract_addr),\n )\n yield lnd_helper\n\n\nasync def calc_fee_mock(self, tx):\n return 500\n\n\nasync def get_gas_price_mock(self):\n return to_wei(10, 'gwei')\n\n\n@pytest.fixture()\nasync def fee_mocker(mocker):\n mocker.patch(\n 'moonwalking.main.Bitcoin.calc_fee',\n calc_fee_mock\n )\n mocker.patch(\n 'moonwalking.main.Litecoin.calc_fee',\n calc_fee_mock\n )\n mocker.patch(\n 'moonwalking.main.BitcoinCash.calc_fee',\n lambda x, y, z: 500\n )\n mocker.patch(\n 'moonwalking.blocks.eth_generic.EthereumGeneric.get_gas_price',\n get_gas_price_mock\n )\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"509721853","text":"from flask import Flask, render_template, request, jsonify, redirect, flash, url_for\nfrom database import db_session\nfrom models import URLModel\nimport string\n\napp = Flask(__name__)\napp.config['SERVER_NAME'] = 'localhost:5000'\napp.secret_key = 'some_secret'\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n\tdb_session.remove()\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n\ndef genCode(num):\n\tarr = []\n\twhile num > 0:\n\t\tarr.append(num % 62)\n\t\tnum //= 62\n\t\n\tarr.reverse()\n\tcode = ''\n\t\n\tfor item in arr:\n\t\tif item < 26:\n\t\t\tcode += chr(ord('a') + item)\n\t\telif item < 52:\n\t\t\tcode += chr(ord('A') + item - 26)\n\t\telse:\n\t\t\tcode += chr(ord('0') + item - 52)\n\treturn code\n\n\n@app.route('/generate/')\ndef generate():\n\trequest_url = request.args.get('url')\n\n\t# redirect won't work unless you have the scheme\n\tif string.find(request_url, 'http') == -1:\n\t\trequest_url = 'http://' + request_url\n\n\t# prevent looping\n\tif string.find(request_url, app.config['SERVER_NAME']) != -1:\n\t\treturn jsonify(hasError=True)\n\n\tlong_url = URLModel.query.filter(URLModel.longURL == request_url).first()\n\tif long_url:\n\t\treturn jsonify(shortURL=long_url.shortURL)\n\telse:\n\t\tnewURL = URLModel(request_url, '')\n\t\tdb_session.add(newURL)\n\t\tdb_session.flush()\n\t\tnewURL.shortURL = genCode(newURL.id)\n\t\tdb_session.commit()\n\t\treturn jsonify(shortURL=newURL.shortURL)\n\n\n@app.route('/url/')\ndef reroute(url_id):\n\trequest_url = URLModel.query.filter(URLModel.shortURL == url_id).first()\n\tif not request_url:\n\t\tflash('This shortcode (%s) is not in the system!' % (url_id))\n\t\treturn redirect(url_for('index'))\n\telse:\n\t\treturn redirect(request_url.longURL, code=302)\n\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n","sub_path":"compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"33326079","text":"import numpy as np\n\n\n# set_of_known_documents_space and unknown_document_space in representation space\ndef dissimilarity_counter_method(set_of_known_documents_space, unknown_document_space, similarity_measure, threshold=None):\n if threshold is None:\n threshold = len(set_of_known_documents_space)/2\n count = 0\n for known_document in set_of_known_documents_space:\n smin = 1\n for other_known_document in set_of_known_documents_space:\n if id(known_document) == id(other_known_document):\n continue\n similarity = similarity_measure(known_document, other_known_document)\n if smin > similarity:\n smin = similarity\n if similarity_measure(unknown_document_space, known_document) > smin:\n count += 1\n if count > threshold:\n return True\n else:\n return False\n\n\n# Returns a random decision if there is no majority\ndef dissimilarity_counter_method_voting(sets_of_known_documents_space, unknown_documents_space, threshold, similarity_measure):\n known = 0\n unknown = 0\n for (set_of_known_documents, unknown_document) in zip(sets_of_known_documents_space, unknown_documents_space):\n result = dissimilarity_counter_method(set_of_known_documents, unknown_document,threshold, similarity_measure)\n if result is True:\n known += 1\n else:\n unknown += 1\n if known > unknown:\n return True\n if unknown > known:\n return False\n else:\n print('Returning random decision because there is no majority')\n if np.random.random() < 0.5:\n return True\n else:\n return False","sub_path":"dissimilarity_counter_method.py","file_name":"dissimilarity_counter_method.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"11697875","text":"# Here's an attempt to recode the perl script that threads the QTL finding wrapper into python.\n# Instead of having a wrapper to call python scripts, we'll use a single script to launch everything. This avoids having to reparse the data (even though it is fast).\n\n# Ok, so now we're going to try a heuristic to accelerate the QTL addition step.\n# The heuristic will be to scan every X QTLs instead of every single one. Once we find a good one, we only scan the x*2 positions around the top hit. I am hoping that this will give at least 2 times faster searches.\n\nimport string\nimport numpy as np\nfrom scipy import linalg\nimport sys\nimport csv\nimport itertools\nimport time\nimport random\nimport argparse\nimport os\ncwd = os.getcwd()\nimport psutil\nprocess = psutil.Process(os.getpid())\n\nimport multiprocessing as mp\nfrom multiprocessing import Pool\n\n#sys.path.append('/n/desai_lab/users/klawrence/BBQ/alldata')\n#sys.path.append('/n/home00/nnguyenba/scripts/BBQ/alldata')\ntry:\n\tsys.path.append('/n/home00/nnguyenba/scripts/BBQ/alldata')\nexcept:\n\tsys.path.append('/n/holyscratch01/desai_lab/nnguyenba/BBQ/all_data')\n\tpass\nfrom spore_defs import *\n\n# Read SNP map\n#SNP_reader = csv.reader(open('/n/desai_lab/users/klawrence/BBQ/alldata/BYxRM_nanopore_SNPs.txt','r'),delimiter='\\t')\n#SNP_reader = csv.reader(open('/n/home00/nnguyenba/scripts/BBQ/alldata/BYxRM_nanopore_SNPs.txt','r'),delimiter='\\t')\nSNP_reader = csv.reader(open('/n/holyscratch01/desai_lab/nnguyenba/BBQ/all_data/BYxRM_nanopore_SNPs.txt','r'),delimiter='\\t')\n\ngenome_str = genome_str_to_int(next(SNP_reader))\nSNP_list = genome_to_chroms(genome_str)\nnum_chroms = len(SNP_list)\nnum_SNPs = [len(x) for x in SNP_list]\nnum_SNPs_total = sum(num_SNPs)\n#print(num_SNPs,file=sys.stdout,flush=True)\n#print(num_SNPs_total,file=sys.stdout,flush=True)\nchrom_startpoints = get_chrom_startpoints(genome_str)\nchrom_endpoints = get_chrom_endpoints(genome_str)\n\n# print(chrom_startpoints) [0, 996, 4732, 5291, 9327, 11187, 12476, 16408, 18047, 20126, 23101, 26341, 30652, 33598, 35398, 39688]\n# print(chrom_endpoints) [994, 4730, 5289, 9325, 11185, 12474, 16406, 18045, 20124, 23099, 26339, 30650, 33596, 35396, 39686, 41608]\n# print(num_SNPs) [995, 3735, 558, 4035, 1859, 1288, 3931, 1638, 2078, 2974, 3239, 4310, 2945, 1799, 4289, 1921]\n#exit()\n\n# Systematically check every positions\n\nfrom argparse import ArgumentParser, SUPPRESS\n# Disable default help\nparser = ArgumentParser(add_help=False)\nrequired = parser.add_argument_group('required arguments')\noptional = parser.add_argument_group('optional arguments')\n\n# Add back help \noptional.add_argument(\n '-h',\n '--help',\n action='help',\n default=SUPPRESS,\n help='show this help message and exit'\n)\nrequired.add_argument('--fit', help='Plain text two-column file containing the fitnesses and the standard errors.')\noptional.add_argument('--log', help='Plain text file logging the progress of the QTL search.', default=\"output.txt\")\noptional.add_argument('--oCV', help='Outside cross-validation value (k = 0-9)', type=int, default=0)\noptional.add_argument('--iCV', help='Inside cross-validation value (l = 0-8)', type=int, default=0)\noptional.add_argument('--model', help='Whether to fit on the training set (m = 0), on the train+test set (m = 1) or on the complete data (m = 2)', type=int, default=0)\noptional.add_argument('--dir', help='Directory where intermediate files are found.', default=cwd)\noptional.add_argument('--scratch', help='Local scratch directory', default='/n/holyscratch01/desai_lab/nnguyenba/BBQ/all_data/genomes/')\noptional.add_argument('--refine', help='Refine every X QTLs, default is 5. 0 means never refine.', default=5, type=int)\noptional.add_argument('--unweighted', help='Only run the forward search on unweighted data.', default=0, type=int)\noptional.add_argument('--cpu', help='Number of threads to run on.', default=16, type=int)\noptional.add_argument('--nosave', help='Set to 1 to avoid saving the npy progress files.', default=0, type=int)\noptional.add_argument('--maxqtl', help='Number of QTLs to find.', default=300, type=int)\noptional.add_argument('--downsample', help='Number of segregants to downsample.', default=0, type=int)\noptional.add_argument('--sporelist', help='Restrict searches to a list of spores.')\n\nargs = parser.parse_args()\n\nprint(args, file=sys.stderr)\n\noutside_CV = args.oCV # Goes from 0 to 9 # k = 10\ninside_CV = args.iCV # Goes from 0 to 8 # l = 9\n\nif(outside_CV > 9 or outside_CV < 0):\n\tprint(\"--oCV must be [0,9]\")\n\texit()\n\nif(inside_CV > 8 or inside_CV < 0):\n\tprint(\"--iCV must be [0,8]\")\n\texit()\n\nif(~np.isin(args.model , range(3))):\n\tprint(\"--model must be [0,2]\")\n\texit()\n\nif(args.refine == 0):\n\targs.refine = np.Infinity\n\n# Read in the fitness data\nfitnesses_data = np.loadtxt(args.fit)\n# Parse and see if it has standard errors\n\nif(len(fitnesses_data.shape) != 2 or args.unweighted == 1):\n\t# No errors found, assume all errors the same.\n\n\tif(len(fitnesses_data.shape) == 1):\n\t\tfitnesses_data = np.reshape(fitnesses_data,(-1,1))\n\n\tfitnesses = fitnesses_data[:,0]\n\terrors = np.ones(len(fitnesses_data))\nelse:\n\tfitnesses = fitnesses_data[:,0]\n\terrors = fitnesses_data[:,1]\n\nerrors = np.square(errors)\nerrors = np.reciprocal(errors)\n\n\nseed = 100000\nnp.random.seed(seed) # This allows us to keep the same cross validation sets.\n\n# If we are restricting search to a list of spores, then need to parse the list of spores.\nsporelist = np.array(range(len(fitnesses)))\nif(args.sporelist):\n\tsporelist = np.loadtxt(args.sporelist, dtype=int)\n\n# First let's take care of the outside CV\n\nif(args.downsample > 0 and args.downsample < len(sporelist)):\n\t#fitnesses = fitnesses[0:args.downsample]\n\t#errors = errors[0:args.downsample]\n\tsporelist = sporelist[0:args.downsample]\n\nperm = np.random.permutation(sporelist)\ntrain_perm = perm.copy()\n\nif(args.model != 2):\n\ttrain_perm = np.delete(train_perm, np.r_[outside_CV/10 * len(sporelist):(outside_CV + 1)/10 * len(sporelist)].astype(int),axis=0)\n\tvalidation_perm = np.take(perm, np.r_[outside_CV/10 * len(sporelist):(outside_CV + 1)/10 * len(sporelist)].astype(int))\n\n\tif(args.model != 1):\n\t\t# Ok now let's take care of the inside CV\n\t\t# To do this, we split the train_perm into a train/test permutation\n\t\ttest_perm = np.take(train_perm, np.r_[inside_CV/9 * len(train_perm):(inside_CV + 1)/9 * len(train_perm)].astype(int))\n\t\ttrain_perm = np.delete(train_perm, np.r_[inside_CV/9 * len(train_perm):(inside_CV + 1)/9 * len(train_perm)].astype(int))\n\n\n# We're doing a k*l fold validation procedure, where l = k-1.\n# This allows us to only create 10 test sets, and only 10 validation sets, so the cross validation loops do not explode.\n# For example, let the 80 - 10 - 10 (train - test - validation) split\n# We can use the same validation for the following split: 10 - 80 -10 (test - train - validation)\n# Now looking at that split, we can use the same test to do the following: 10 - 10 - 80 (test - validation - train)\n\n# We will only 'train' on a subset of the data\ntrain_set = np.take(fitnesses,train_perm) # This is 80% of the fitness data\nerrors = np.take(errors,train_perm)\n\nphenotypes = train_set[~np.isnan(train_set)] # Is a numpy.ndarray\nmean_phenotypes = np.mean(phenotypes)\nTSS = np.sum((phenotypes-mean_phenotypes)**2)\nerrors = errors[~np.isnan(train_set)]\nnum_usable_spores = len(phenotypes)\n\n# Open all the genotype files\ngenotypes_file = []\nnum_lines_genotypes = []\nchr_to_scan = []\nstart = time.perf_counter()\nfor i in range(16):\n\t#genotypes_file.append(np.load(str(args.scratch) + \"/chr\"+str(i+1)+\"_pos_major.npy\", mmap_mode=\"r\")) # Uses 30 gb. Need to load once to cache into memory. Then subsequent searches are near instant.\n\tgenotypes_file.append(np.load(str(args.scratch) + \"/chr\"+str(i+1)+\"_pos_major.npy\"))\n\tnum_lines_genotypes.append(genotypes_file[i].shape[0])\n\tchr_to_scan.append(i)\n\tprint(str(i) + \"\t\" + str(time.perf_counter() - start) + \"\t\" + str(process.memory_info().rss/1024/1024),file=sys.stderr)\n\n\n# Here we will handle whether the script has been restart or whether we are starting from scratch.\n# Open the log file.\ncurrent_likelihood = np.Infinity\ncurrent_pos_line = \"\"\ncurrent_beta_line = \"\"\ncurrent_progress_line = \"\"\nflag_refined_pos = 0\n\ngeno_file = \"\"\nQ_file = \"\"\nR_file = \"\"\nnum_QTLs = 0\n\nif(os.path.isfile(args.dir + \"/\" + args.log)):\n\twith open(args.dir + \"/\" + args.log,'r') as readfile:\n\t\tlinecount = 0\n\t\tfor line in readfile:\n\t\t\tline = line.rstrip()\n\t\t\tif(linecount % 4 == 0):\n\t\t\t\tcurrent_likelihood = line\n\t\t\telif(linecount % 4 == 1):\n\t\t\t\tcurrent_pos_line = line\n\t\t\telif(linecount % 4 == 2):\n\t\t\t\tcurrent_beta_line = line\n\t\t\telif(linecount % 4 == 3):\n\t\t\t\tcurrent_progress_line = line\n\t\t\tlinecount = linecount + 1\n\n\t\t# split the progress_line into the relevant flags\n\t\tif(linecount > 0):\n\t\t\tarr = current_progress_line.split(\"\\t\")\n\t\t\tgeno_file = arr[0]\n\t\t\tQ_file = arr[1]\n\t\t\tR_file = arr[2]\n\t\t\tif(arr[3] == \"find_new\"):\n\t\t\t\tflag_refined_pos = 1 # Need to refine\n\t\t\tnum_QTLs = int(arr[4])\n\n\n# Read in the file of previous computations if we have found QTLs before. Otherwise, generate them.\nprev_pos = []\nprev_genotypes = []\nprev_pos = np.array(prev_pos, dtype=np.int32)\nprev_genotypes = np.array(prev_genotypes)\nq = []\nr = []\nif(num_QTLs != 0):\n\t# This is restarting.\n\tprev_pos = np.fromstring(current_pos_line, dtype=int, sep=\"\t\")\n\tflag_load_prev = 0\n\n\ttry:\n\t\tprev_genotypes = np.load(args.dir + \"/\" + geno_file)\n\texcept:\n\t\tflag_load_prev = 1\n\t\tpass\n\n\tsize_of_prev_genome = (prev_pos.size)\n\n\t# Consistent prev_pos and prev_genotypes?\n\tif(flag_load_prev == 1 or prev_genotypes.shape[1] != size_of_prev_genome):\n\t\t# We have to remake it from the prev_pos line.\n\t\tprev_genotypes = np.ones((num_usable_spores,size_of_prev_genome))\n\t\tfor pos_index in range(len(prev_pos)):\n\t\t\tpos = prev_pos[pos_index]\n\t\t\tchr_qtl = np.searchsorted(np.array(chrom_startpoints), pos+0.5)\n\t\t\tstart_of_chr = chrom_startpoints[chr_qtl-1]\n\t\t\tpos_in_chr = pos - start_of_chr\n\n\t\t\tpos_line = genotypes_file[chr_qtl-1][pos_in_chr]\n\t\t\tpos_line = np.take(pos_line, train_perm)\n\t\t\tpos_line = pos_line[~np.isnan(train_set)]\n\t\t\t\n\t\t\tprev_genotypes[:,pos_index] = pos_line.copy()\n\t\t\n\t\tbase_genotypes = np.ones((num_usable_spores,1+size_of_prev_genome))\n\t\tbase_genotypes[:,1:] = prev_genotypes # First index is the intercept.\n\t\tq,r = np.linalg.qr(base_genotypes * np.sqrt(np.reshape(errors,(num_usable_spores,1))))\n\n\telse:\t\n\t\n\t\t# Do we have q,r?\n\t\tflag_remake = 0\n\t\tif(os.path.isfile(args.dir + \"/\" + Q_file) and os.path.isfile(args.dir + \"/\" + R_file)):\n\t\t\t#q = np.load(args.dir + \"/\" + Q_file) \n\t\t\t#r = np.load(args.dir + \"/\" + R_file)\n\n\t\t\ttry:\n\t\t\t\tq = np.load(args.dir + \"/\" + Q_file)\n\t\t\texcept:\n\t\t\t\tflag_remake = 1\n\t\t\t\tpass\n\n\t\t\ttry:\n\t\t\t\tr = np.load(args.dir + \"/\" + R_file)\n\t\t\texcept:\n\t\t\t\tflag_remake = 1\n\t\t\t\tpass\n\t\telse:\n\t\t\tflag_remake = 1\n\n\t\tif(flag_remake == 1):\n\t\t\t# Remake\n\t\t\tbase_genotypes = np.ones((num_usable_spores,1+size_of_prev_genome))\n\t\t\tbase_genotypes[:,1:] = prev_genotypes # First index is the intercept.\n\t\t\tq,r = np.linalg.qr(base_genotypes * np.sqrt(np.reshape(errors,(num_usable_spores,1))))\n\n\t\nelse:\n\tsize_of_prev_genome = 0\n\n\n# Ok, we've now reloaded all the previous computations. \n# Set up computation settings\npoolcount = args.cpu*2\nnum_chrom_to_scan = len(genotypes_file)\n\ndef find_QTL(num):\n\tlowest_RSS = np.Infinity\n\tgenome_at_lowest_RSS = []\n\tpos_index_at_lowest_RSS = 0\n\tlast_q = []\n\n\t#start = time.clock()\n\tfor chr in range(num_chrom_to_scan):\n\t\tloc = chrom_startpoints[chr_to_scan[chr]]\n\t\tfor i in range(0 + num, num_lines_genotypes[chr_to_scan[chr]], poolcount):\n\t\t\tif(np.isin(loc+i, prev_pos)):\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tgenome_line = genotypes_file[chr_to_scan[chr]][i]\n\t\t\t# Remove genomes that have no phenotypes\n\t\t\t# We need to remove genomes that have no phenotypes and genomes that aren't in the train set\n\t\t\tgenomes = np.take(genome_line,train_perm)\n\t\t\tgenomes = genomes[~np.isnan(train_set)] \n\t\t\tgenomes = np.reshape(genomes,(num_usable_spores,1)) # A N row by 1 column matrix\n\t\t\t\n\t\t\tWX = genomes * np.sqrt(np.reshape(errors,(num_usable_spores,1))) # X = X * sqrt(W) -> N by 1\n\t\t\tQtX = np.dot(np.transpose(q),WX) # Gets the scale for each vectors in Q. # Q^t * X -> k by 1\n\t\t\tQtX_Q = np.einsum('ij,j->i',q,np.ravel(QtX)) # Dot product of Q and Q^t * X, but shaped as a single vector. This is the sum of all the projections of the new genotype on Q\n\t\t\torthogonalized = WX-np.reshape(QtX_Q,(num_usable_spores,1)) # Orthogonalize: Remove the projections from the real vector.\n\t\t\tnew_q = orthogonalized/np.linalg.norm(orthogonalized) # Orthonormalize: Now do final conversion.\n\t\t\t# This gets the last column of Q.\n\t\t\t# We only need the last column of Q to get the new residuals. We'll assemble the full Q or the full R if we need it (i.e. to obtain betas).\n\n\t\t\tq_upTy = np.einsum('i,i', np.ravel(new_q), phenotypes * np.sqrt(errors))\n\t\t\tq_upq_upTy = np.ravel(new_q) * q_upTy\n\t\t\tpredicted_fitnesses = initial_predicted_fitnesses + q_upq_upTy/np.sqrt(errors)\n\n\t\t\t# Scale the intercept term\n\t\t\tmean_predicted_fitnesses = np.mean(predicted_fitnesses)\n\t\n\t\t\t# RSS\n\t\t\tRSS = np.sum((phenotypes - mean_phenotypes - predicted_fitnesses + mean_predicted_fitnesses)**2) # This is the RSS for 1:1 line.\n\t\t\t#print(str(loc+i) + \"\t\" + str(RSS))\n\n\t\t\tif(RSS < lowest_RSS):\n\t\t\t\tlowest_RSS = RSS\n\t\t\t\tgenome_at_lowest_RSS = genomes.copy()\n\t\t\t\tpos_index_at_lowest_RSS = loc+i # Position is zero indexed.\n\t\t\t\tlast_q = np.ravel(new_q)\n\t\t\t\t#last_r = np.ravel(np.dot(np.transpose(q_up),WX))\n\n\n\t# Now return the values\n\treturn lowest_RSS,genome_at_lowest_RSS,pos_index_at_lowest_RSS, last_q\n\n\ndef refine_positions(num):\n\tlowest_RSS = np.Infinity\n\tgenome_line_lowest_RSS = []\n\tpos_index_at_lowest_RSS = -1\n\tlast_q = []\n\tstart_index = chrom_startpoints[chr_of_snp-1]\n\tfor scan_pos in range(left_bracket + num, right_bracket + 1, poolcount):\n\t\tgenome_line = genotypes_file[chr_of_snp-1][scan_pos-start_index]\n\n\t\t# Remove the genomes that are not in the train set\n\t\tpos_line = np.take(genome_line,train_perm)\n\t\t\t\n\t\t# Remove the genomes that have no phenotypes\n\t\tpos_line = pos_line[~np.isnan(train_set)]\n\t\tpos_line = np.reshape(pos_line,(num_usable_spores,1)) # A N row by 1 column matrix\n\n\t\tWX = pos_line * np.sqrt(np.reshape(errors,(num_usable_spores,1)))\n\t\tQtX = np.dot(np.transpose(q_down),WX) # Gets the scale for each vectors in Q.\n\t\tQtX_Q = np.einsum('ij,j->i',q_down,np.ravel(QtX))\n\t\torthogonalized = WX-np.reshape(QtX_Q,(num_usable_spores,1)) # Orthogonalize\n\t\tnew_q = orthogonalized/np.linalg.norm(orthogonalized) # Orthonormalize\n\t\t# This gets the last column of Q.\n\t\t# Now let's get the last column of R.\n\t\t# Assemble q_up\n\t\tq_upTy = np.einsum('i,i', np.ravel(new_q), phenotypes * np.sqrt(errors))\n\t\tq_upq_upTy = np.ravel(new_q) * q_upTy\n\t\tpredicted_fitnesses = initial_predicted_fitnesses + q_upq_upTy/np.sqrt(errors)\n\t\t\t\n\t\t# Scale the intercept term\n\t\tmean_predicted_fitnesses = np.mean(predicted_fitnesses)\n\t\n\t\t# RSS\n\t\tRSS = np.sum((phenotypes - mean_phenotypes - predicted_fitnesses + mean_predicted_fitnesses)**2) # This is the RSS for 1:1 line.\n\t\t#print(str(scan_pos) + \"\t\" + str(RSS))\n\t\tif(RSS < lowest_RSS):\n\t\t\tlowest_RSS = RSS\n\t\t\tgenome_line_lowest_RSS = pos_line.copy()\n\t\t\tpos_index_at_lowest_RSS = scan_pos\n\t\t\tlast_q = np.ravel(new_q)\n\n\n\t# Return the values\n\treturn lowest_RSS,genome_line_lowest_RSS,pos_index_at_lowest_RSS, last_q\n\n# Let's code it to run a loop of fixed size instead for now, so it'll be easier to think about it.\n\nwhile(num_QTLs < args.maxqtl):\n\n\t# Put the intercept\n\tbase_genotypes = np.ones((num_usable_spores,1+size_of_prev_genome))\n\tbase_genotypes[:,1:] = prev_genotypes # First index is the intercept.\n\n\tif(num_QTLs == 0): # If num_QTLs is not zero, then it was loaded previously.\n\t\tq,r = np.linalg.qr(base_genotypes * np.sqrt(np.reshape(errors,(num_usable_spores,1))))\n\n\tif(flag_refined_pos == 0):\n\t\tstart_find_new = time.time()\n\t\t# Obtain the initial predicted fitness\n\t\tinitial_beta = linalg.solve_triangular(r,np.dot(np.transpose(q), phenotypes * np.sqrt(errors)), check_finite=False) # 3.49s for 10000 loops\n\t\t# first beta index is the intercept term.\n\t\tinitial_predicted_fitnesses = np.dot(q,np.dot(r,initial_beta))*1/np.sqrt(errors) # Optimal multiplication order\n\n\t\t# Time to search for a new QTL\n\t\tp = Pool(poolcount)\n\t\tresults = p.map(find_QTL, range(poolcount))\n\t\tp.close()\n\t\tp.join()\n\n\t\t# Now parse the results\n\t\tlowest_RSS = np.Infinity\n\t\tgenome_at_lowest_RSS = []\n\t\tpos_index_at_lowest_RSS = 0\n\t\tbeta_at_lowest_RSS = []\n\t\tlast_q = []\n\n\t\tfor i in range(len(results)):\n\t\t\tRSS = results[i][0]\n\n\t\t\tif(RSS < lowest_RSS):\n\t\t\t\tlowest_RSS = RSS\n\t\t\t\tgenome_at_lowest_RSS = results[i][1]\n\t\t\t\tpos_index_at_lowest_RSS = results[i][2] # Position is zero indexed\n\t\t\t\tlast_q = results[i][3]\t\n\n\t\t\n\t\t# Update Q/R to get the the new Beta\n\t\tq_up = np.zeros([q.shape[0],q.shape[1]+1])\n\t\tq_up[:,:-1] = q\n\t\tq_up[:,-1] = last_q\n\t\t# Compute R\n\t\tlast_r = np.ravel(np.dot(np.transpose(q_up),genome_at_lowest_RSS * np.sqrt(np.reshape(errors,(num_usable_spores,1)))))\n\t\tr_up = np.zeros([r.shape[0]+1,r.shape[1]+1])\n\t\tr_up[:-1,:-1] = r\n\t\tr_up[:,-1] = last_r\n\t\tbeta = linalg.solve_triangular(r_up,np.dot(np.transpose(q_up), phenotypes * np.sqrt(errors)), check_finite=False) \n\n\t\t# Update the logs\n\t\tlikelihood = num_usable_spores * math.log(lowest_RSS/num_usable_spores)\n\t\twith open(args.dir + \"/\" + args.log, \"a+\") as logfile:\n\t\t\tprint(likelihood, file=logfile)\n\t\t\tif(size_of_prev_genome > 0):\n\t\t\t\tprint(*prev_pos, sep=\"\\t\", end=\"\", file=logfile)\n\t\t\t\tprint(\"\\t\", end=\"\", file=logfile)\n\t\t\tprint(pos_index_at_lowest_RSS, file=logfile)\n\t\t\tprint(*beta[1:len(beta)], file=logfile)\n\t\t\tprint(\"pickle_geno.npy\" + \"\t\" + \"pickle_q.npy\" + \"\t\" + \"pickle_r.npy\" + \"\t\" + \"find_new\" + \"\t\" + str(num_QTLs + 1), file=logfile)\n\t\t\n\n\t\t# Update the values (remove intercept)\n\t\tbase_genotypes[:,:-1] = prev_genotypes\n\t\tbase_genotypes[:,size_of_prev_genome] = np.matrix.flatten(genome_at_lowest_RSS)\n\t\tq = q_up\n\t\tr = r_up\n\t\tif(args.nosave == 0):\n\t\t\tnp.save(args.dir + \"/\" + \"pickle_geno\", base_genotypes) # No longer has the intercept term\n\t\t\tnp.save(args.dir + \"/\" + \"pickle_q\", q)\n\t\t\tnp.save(args.dir + \"/\" + \"pickle_r\", r)\n\n\t\tnum_QTLs = num_QTLs + 1\n\t\tflag_refined_pos = 1\n\t\tprev_pos = np.append(prev_pos, pos_index_at_lowest_RSS)\n\t\tprev_genotypes = base_genotypes\n\t\tsize_of_prev_genome = (prev_pos.size)\n\t\tchr_of_snp = np.searchsorted(np.array(chrom_startpoints),pos_index_at_lowest_RSS + 0.5) # Deal with ties\n\n\t\tprint(\"Found new QTL (\" + str(num_QTLs) + \") @ \" + str(chr_of_snp) + \". Took : \" + str(time.time() - start_find_new) + \" seconds. Likelihood: \" + str(likelihood), file=sys.stderr)\n\t\t\n\n\t#elif(flag_refined_pos == 1 and num_QTLs > 1):\n\telif(flag_refined_pos == 1 and num_QTLs % args.refine == 0):\n\t\t# Must refine the positions\n\t\t# We need to sort all the positions and the genotyping array.\n\t\tstart_refine = time.time()\n\n\t\t# What was the chr of the last index?\n\t\tchr_last_qtl = np.searchsorted(np.array(chrom_startpoints),prev_pos[len(prev_pos)-1]+0.5)\n\n\t\tsorted_indexes = np.argsort(prev_pos) \n\t\t# prev_pos is a list of QTL positions. sorted_index gives the indexes that would sort prev_pos. So prev_pos has QTL positions (0 to 45000), such as 2,8,5,6\n\t\t# So sorted_indxes returns an array of size num_QTLs, where each value is the order you would have to take the index to sort the array. in the example, it would be 0, 2, 3, 1\n\t\tsorted_prev_pos = np.take(prev_pos,sorted_indexes) # sorted_prev_pos is a sorted prev_pos array. # We then sort prev_pos with the sorted index. Returns an array 2,5,6,8\n\t\treverse_sorted_indexes = np.argsort(sorted_indexes) # This reverses the sorting procedure, which returns the 'order' of the original list, or the position where the value has ended up in. It would be: 0, 3, 1, 2\n\n\t\t# We sorted the positions so that we know where to refine the positions of a QTL.\n\t\titerations_max = len(sorted_prev_pos)\n\t\tlowest_RSS = np.Infinity\n\t\tprevious_refined = -1\n\t\tfor iterations in range(iterations_max):\n\t\t\tsorted_pos_index = np.random.randint(0, len(sorted_prev_pos)) # Numpy is range exclusive (can never return len(sorted_prev_pos) in this case).\n\t\t\tchr_of_snp = np.searchsorted(np.array(chrom_startpoints),sorted_prev_pos[sorted_pos_index] + 0.5) # Deal with ties\n\n\n\t\t\tif(sorted_pos_index == previous_refined):\n\t\t\t#if(sorted_pos_index == previous_refined or chr_of_snp != chr_last_qtl): # Heuristic to only refine the last chromosome\n\t\t\t\tcontinue\n\n\t\t\tprevious_refined = sorted_pos_index\n\n\n\t\t\t# Column downdating of QR.\n\t\t\tq_down,r_down = linalg.qr_delete(q,r, sorted_indexes[sorted_pos_index]+1,1,\"col\",check_finite=False)\n\t\t\t\n\t\t\tinitial_beta = linalg.solve_triangular(r_down,np.dot(np.transpose(q_down), phenotypes * np.sqrt(errors)), check_finite=False) # 3.49s for 10000 loops # Beta for the WEIGHTED phenotypes.\n\t\t\t# first beta index is the intercept term.\n\t\t\tinitial_predicted_fitnesses = np.dot(q_down,np.dot(r_down,initial_beta))*1/np.sqrt(errors) # Optimal multiplication order # Obtain the predicted fitnesses in the unweighted world.\n\n\t\t\t# Now we go through the bracketed positions, and obtain the likelihoods for every position\n\t\t\t# First, let's check the left and right side of the position of interest. If the likelihood is worse, we do not update.\n\t\n\t\t\tleft_bracket = chrom_startpoints[chr_of_snp-1] # Minimally the beginning of the chromosome\n\t\t\tif(sorted_prev_pos[sorted_pos_index]-16 > left_bracket):\n\t\t\t\tleft_bracket = sorted_prev_pos[sorted_pos_index]-16\n\t\t\tif(sorted_pos_index > 0):\n\t\t\t\tif(sorted_prev_pos[sorted_pos_index-1]+1 > left_bracket):\n\t\t\t\t\tleft_bracket = sorted_prev_pos[sorted_pos_index-1]+1\n\n\t\t\t# Now find the right bracket\n\t\t\tright_bracket = chrom_endpoints[chr_of_snp-1]\n\t\t\tif(sorted_prev_pos[sorted_pos_index]+16 < right_bracket):\n\t\t\t\tright_bracket = sorted_prev_pos[sorted_pos_index]+16\n\t\t\tif(sorted_pos_index < len(sorted_prev_pos)-1):\n\t\t\t\tif(sorted_prev_pos[sorted_pos_index+1]-1 < right_bracket):\n\t\t\t\t\tright_bracket = sorted_prev_pos[sorted_pos_index+1]-1\n\n\t\t\tleft_bracket = int(left_bracket)\n\t\t\tright_bracket = int(right_bracket)\n\n\t\t\tp = Pool(poolcount)\n\t\t\tresults = p.map(refine_positions, range(poolcount))\n\t\t\tp.close()\n\t\t\tp.join()\n\n\t\t\t# Parse the results\n\t\t\tlowest_RSS = np.Infinity\n\t\t\tgenome_line_lowest_RSS = []\n\t\t\tpos_index_at_lowest_RSS = -1\n\t\t\tfor i in range(len(results)):\n\t\t\t\tRSS = results[i][0]\n\t\t\t\tif(RSS < lowest_RSS):\n\t\t\t\t\tlowest_RSS = RSS\n\t\t\t\t\tgenome_line_lowest_RSS = results[i][1]\n\t\t\t\t\tpos_index_at_lowest_RSS = results[i][2]\n\n\t\t\t# We now have the best results.\n\t\t\t# Did the position change? If not, then we do nothing.\n\t\t\tif(pos_index_at_lowest_RSS != sorted_prev_pos[sorted_pos_index]):\n\t\t\t\t# Else, we update the position\n\t\t\t\tsorted_prev_pos[sorted_pos_index] = pos_index_at_lowest_RSS\n\t\t\n\t\t\t\t# We update the genome line\n\t\t\t\tprev_genotypes[:,sorted_indexes[sorted_pos_index]] = genome_line_lowest_RSS[:,0]\n\n\t\t\t\t# Update the QR\n\t\t\t\tq,r = linalg.qr_insert(q_down,r_down,genome_line_lowest_RSS * np.sqrt(np.reshape(errors,(num_usable_spores,1))),sorted_indexes[sorted_pos_index]+1,'col',check_finite=False) # Update the QR decomposition.\n\n\t\t# Done iterating.\n\t\t# Obtain betas\n\n\t\tbeta = linalg.solve_triangular(r,np.dot(np.transpose(q), phenotypes * np.sqrt(errors)), check_finite=False)\n\t\tprev_pos = sorted_prev_pos[reverse_sorted_indexes]\n\n\t\t# Output to log\n\t\t# Update the logs\n\t\tlikelihood = num_usable_spores * math.log(lowest_RSS/num_usable_spores)\n\n\t\twith open(args.dir + \"/\" + args.log, \"a+\") as logfile:\n\t\t\tprint(num_usable_spores * math.log(lowest_RSS/num_usable_spores), file=logfile)\n\t\t\tprint(*prev_pos, sep=\"\\t\", file=logfile)\n\t\t\tprint(*beta[1:len(beta)], file=logfile)\n\t\t\tprint(\"pickle_geno.npy\" + \"\t\" + \"pickle_q.npy\" + \"\t\" + \"pickle_r.npy\" + \"\t\" + \"refine\" + \"\t\" + str(num_QTLs), file=logfile)\n\n\t\t# Update the values\n\t\tif(args.nosave == 0):\n\t\t\tnp.save(args.dir + \"/\" + \"pickle_geno\", prev_genotypes) # No longer has the intercept term\n\t\t\tnp.save(args.dir + \"/\" + \"pickle_q\", q)\n\t\t\tnp.save(args.dir + \"/\" + \"pickle_r\", r)\n\t\tflag_refined_pos = 0\n\t\tprint(\"Attempted to refine QTL. Took : \" + str(time.time() - start_refine) + \" seconds. Likelihood: \" + str(likelihood), file=sys.stderr)\n\telse:\n\t\tflag_refined_pos = 0\nexit()\n\n\t\t\t\n","sub_path":"4_qtl_inference/step_QTL.py","file_name":"step_QTL.py","file_ext":"py","file_size_in_byte":24073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"416889404","text":"from django import template\n\nfrom day.models.delivery_models import Delivery\n\nregister = template.Library()\n\n@register.inclusion_tag('tags/delivery_table_template.html')\ndef unclaimed_delivery_table(date, user):\n deliveries = Delivery.find_deliveries(date, user)['unclaimed_deliveries']\n return {\n\t\t'deliveries': deliveries,\n\t\t'title': 'Unclaimed deliveries',\n\t\t'icon_type': 'warning-sign',\n\t\t'table_type': 'unclaimed',\n }\n\n@register.inclusion_tag('tags/delivery_table_template.html')\ndef your_delivery_table(date, user):\n deliveries = Delivery.find_deliveries(date, user)['your_in_progress_deliveries']\n return {\n\t\t'deliveries': deliveries,\n\t\t'title': 'Your in progress deliveries',\n\t\t'icon_type': 'asterisk',\n\t\t'table_type': 'your',\n\t}\n \n@register.inclusion_tag('tags/delivery_table_template.html')\ndef others_delivery_table(date, user):\n deliveries = Delivery.find_deliveries(date, user)['others_in_progress_deliveries']\n return {\n\t\t'deliveries': deliveries,\n\t\t'title': 'Others in progress deliveries',\n\t\t'icon_type': 'flag',\n\t\t'table_type': 'others',\n\t}\n\t\n@register.inclusion_tag('tags/delivery_table_template.html')\ndef paid_delivery_table(date, user):\n deliveries = Delivery.find_deliveries(date, user)['paid_deliveries']\n return {\n\t\t'deliveries': deliveries,\n\t\t'title': 'Paid deliveries',\n\t\t'icon_type': \"credit-card\",\n\t\t'table_type': 'paid',\n\t}\n\n@register.inclusion_tag('tags/delivery_table_template.html')\ndef billed_delivery_table(date, user):\n deliveries = Delivery.find_deliveries(date, user)['billed_deliveries']\n return {\n\t\t'deliveries': deliveries,\n\t\t'title': 'Billed deliveries',\n\t\t'icon_type': \"list-alt\",\n\t\t'table_type': 'billed',\n\t}\n\n@register.inclusion_tag('tags/delivery_summary_modal.html')\t\ndef delivery_summary_modal(delivery_id, size):\n\tdelivery = Delivery.objects.get(pk=delivery_id)\n\treturn {\n\t\t'delivery': delivery,\n\t\t'size': size,\n\t}\n\t\n@register.inclusion_tag('tags/delivery_view_basic_delivery_info.html')\ndef delivery_view_basic_delivery_info(delivery_id):\n\tdelivery = Delivery.objects.get(pk=delivery_id)\n\treturn {\n\t\t'delivery': delivery,\n\t}\n\t\n@register.inclusion_tag('tags/delivery_view_partnered_item.html')\ndef delivery_view_partnered_item(delivery_id):\n\tdelivery = Delivery.objects.get(pk=delivery_id)\n\treturn {\n\t\t'delivery': delivery,\n\t}\n\t\n@register.inclusion_tag('tags/delivery_view_non_partnered_item.html')\ndef delivery_view_non_partnered_item(delivery_id):\n\tdelivery = Delivery.objects.get(pk=delivery_id)\n\treturn {\n\t\t'delivery': delivery,\n\t}\n\t\n@register.inclusion_tag('tags/delivery_view_payment.html')\ndef delivery_view_payment(delivery_id):\n\tdelivery = Delivery.objects.get(pk=delivery_id)\n\treturn {\n\t\t'delivery': delivery,\n\t}\n\t\n@register.inclusion_tag('tags/delivery_view_delivery_totals.html')\ndef delivery_view_delivery_totals(delivery_id):\n\tdelivery = Delivery.objects.get(pk=delivery_id)\n\treturn {\n\t\t'delivery': delivery,\n\t}\n","sub_path":"day/templatetags/delivery_tags.py","file_name":"delivery_tags.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"108171390","text":"import numpy as np\nimport sys\n\nclass NearestNeighbor(object):\n \"\"\"\n This class implements the Nearest Neighbour Algorithm.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n pass\n\n def train(self, x, y):\n \"\"\"\n This function trains the nearest neighbor model.\n using x as the input data and y as the desired output.\n Arguments:\n x: input features. This must be a N x D matrix.\n y: desired outcome.\n \"\"\"\n # Nearest neighbor model just remembers all training data.\n self.Xtrain = x\n self.Ytrain = y\n\n\n def predict(self, x, n=1, k=1):\n \"\"\"\n This function predicts the data provided based\n on the trained model.\n Arguments:\n x: test data to be predicted. N x D matrix.\n n: N to be used in LNNorm.\n Returns:\n y: prediction of test data based on trained model.\n \"\"\"\n num_test = x.shape[0] # get number of test data.\n # instantiate y to all zeroes and ensure that it is same datatype as Ytrain.\n y = np.zeros(num_test, dtype = self.Ytrain.dtype)\n\n # Loop over test rows\n for i in xrange(num_test):\n if n > 0:\n distances = self.LNorm(x[i,:], n)\n else:\n print(\"Please select apropriate value for n.\")\n sys.exit(1)\n # Get the index of the K smallest distances\n k_smallest_index = np.argpartition(distances, k)[:k]\n # This returns bincount, where number is in index. eg count of 0, 1, 2, 3 etc.\n classes = np.bincount(self.Ytrain[k_smallest_index])\n y[i] = np.argmax(classes)\n return y\n\n def LNorm(self, x, n):\n \"\"\"\n This function returns the distance of the L N norm.\n Arguments:\n x: is a vector of features for 1 example.\n Returns:\n l: The L N distance between XTrain and the current vector.\n \"\"\"\n return np.power(np.sum(np.power(np.abs(self.Xtrain - x), n), axis = 1), 1/float(n))\n\n def accuracy(self, y_pred, y_true):\n \"\"\"\n This function returns the accuracy of the prediction.\n Arguments:\n y_pred: predicted outcome.\n y_true: actual true outcome.\n Returns:\n Accuracy of prediction compared to actual output.\n \"\"\"\n return np.mean(y_pred==y_true)\n","sub_path":"MLModels/NearestNeighbour.py","file_name":"NearestNeighbour.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"425573674","text":"import os\nimport sys\nimport json\nimport gi\ngi.require_version(\"Gtk\", \"3.0\")\ngi.require_version(\"Notify\", \"0.7\")\nfrom gi.repository import Gtk, Gdk, Gio, GObject, GdkPixbuf, Notify\nfrom random import randint\n\n\nSETTINGS_SCHEMA = 'com.github.huluti.Coulr'\n\n\nclass CoulrWindow(Gtk.ApplicationWindow):\n __gtype_name__ = 'CoulrWindow'\n\n _settings = Gio.Settings.new(SETTINGS_SCHEMA)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.app = kwargs['application']\n\n \"\"\"Initialize app\"\"\"\n self.app_name = \"Coulr\"\n self.set_border_width(15)\n self.set_size_request(600, -1)\n self.set_resizable(False)\n self.set_position(Gtk.WindowPosition.CENTER)\n\n self.connect('delete-event', self.quit_app)\n\n # Enable notifications\n Notify.init(self.app_name)\n\n # Main vars\n self.rgb_color = None\n self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)\n\n # Header bar\n header_bar = Gtk.HeaderBar()\n header_bar.set_show_close_button(True)\n header_bar.props.title = self.app_name\n header_bar.set_subtitle(_(\"Enjoy colors and feel happy!\"))\n self.set_titlebar(header_bar)\n\n # About button\n button_about = Gtk.Button()\n button_about.set_tooltip_text(_(\"About\"))\n icon_about = Gio.ThemedIcon(name=\"help-about-symbolic\")\n image_about = Gtk.Image.new_from_gicon(icon_about, Gtk.IconSize.BUTTON)\n button_about.add(image_about)\n button_about.connect(\"clicked\", self.about_dialog)\n header_bar.pack_end(button_about)\n\n # Copy button\n button_copy = Gtk.Button()\n button_copy.set_tooltip_text(_(\"Copy color\"))\n icon_copy = Gio.ThemedIcon(name=\"edit-copy-symbolic\")\n image_copy = Gtk.Image.new_from_gicon(icon_copy, Gtk.IconSize.BUTTON)\n button_copy.add(image_copy)\n button_copy.connect(\"clicked\", self.copy_output)\n header_bar.pack_end(button_copy)\n\n # Random button\n self.button_random = Gtk.Button()\n self.button_random.set_tooltip_text(_(\"Generate random color\"))\n icon_random = Gio.ThemedIcon(name=\"media-playlist-shuffle-symbolic\")\n image_random = Gtk.Image.new_from_gicon(icon_random, Gtk.IconSize.BUTTON)\n self.button_random.add(image_random)\n self.button_random.connect(\"clicked\", self.random_button_clicked)\n header_bar.pack_end(self.button_random)\n\n # Main wrappers\n main_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n layout1 = Gtk.Grid(row_spacing=30, column_spacing=10, valign=Gtk.Align.CENTER)\n layout2 = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5, valign=Gtk.Align.CENTER)\n main_box.add(layout1)\n main_box.add(layout2)\n self.add(main_box)\n\n # RGB\n\n # Red label\n label = Gtk.Label(_(\"R\"))\n layout1.attach(label, 0, 1, 1, 1)\n\n # Red spinner\n adj = Gtk.Adjustment(0, 0, 255, 1, 10, 0)\n self.spinbutton_r = Gtk.SpinButton(adjustment=adj)\n self.red_sb_id = self.spinbutton_r.connect(\"value-changed\", self.rgb_spin_changed)\n layout1.attach(self.spinbutton_r, 1, 1, 1, 1)\n\n # Red slider\n adj = Gtk.Adjustment(0, 0, 255, 2, 10, 0)\n self.slider_r = Gtk.Scale(adjustment=adj, draw_value=False)\n self.slider_r.set_hexpand(True)\n self.red_s_id = self.slider_r.connect(\"value-changed\", self.rgb_slider_moved)\n layout1.attach(self.slider_r, 2, 1, 2, 1)\n\n # Green label\n label = Gtk.Label(_(\"G\"))\n layout1.attach(label, 0, 2, 1, 1)\n\n # Green spinner\n adj = Gtk.Adjustment(0, 0, 255, 1, 10, 0)\n self.spinbutton_g = Gtk.SpinButton(adjustment=adj)\n self.green_sb_id = self.spinbutton_g.connect(\"value-changed\", self.rgb_spin_changed)\n layout1.attach(self.spinbutton_g, 1, 2, 1, 1)\n\n # Green slider\n adj = Gtk.Adjustment(0, 0, 255, 2, 10, 0)\n self.slider_g = Gtk.Scale(adjustment=adj, draw_value=False)\n self.slider_g.set_hexpand(True)\n self.green_s_id = self.slider_g.connect(\"value-changed\", self.rgb_slider_moved)\n layout1.attach(self.slider_g, 2, 2, 2, 1)\n\n # Blue label\n label = Gtk.Label(_(\"B\"))\n layout1.attach(label, 0, 3, 1, 1)\n\n # Blue spinner\n adj = Gtk.Adjustment(0, 0, 255, 1, 10, 0)\n self.spinbutton_b = Gtk.SpinButton(adjustment=adj)\n self.blue_sb_id = self.spinbutton_b.connect(\"value-changed\", self.rgb_spin_changed)\n layout1.attach(self.spinbutton_b, 1, 3, 1, 1)\n\n # Blue slider\n adj = Gtk.Adjustment(0, 0, 255, 2, 10, 0)\n self.slider_b = Gtk.Scale(adjustment=adj, draw_value=False)\n self.slider_b.set_hexpand(True)\n self.blue_s_id = self.slider_b.connect(\"value-changed\", self.rgb_slider_moved)\n layout1.attach(self.slider_b, 2, 3, 2, 1)\n\n # Layout 2\n # Output mode\n self.combo_output = Gtk.ComboBoxText()\n self.combo_output.append(\"hex\", _(\"Hexadecimal\"))\n self.combo_output.append(\"rgb\", _(\"RGB\"))\n self.combo_output.set_active(0)\n self.combo_output.connect(\"changed\", self.change_output)\n\n # Output entry\n self.output = Gtk.Entry()\n self.output_id = self.output.connect(\"changed\", self.output_entry_changed)\n\n # Preview color with square\n self.square = Gtk.Frame()\n self.square.set_size_request(150, 150)\n\n layout2.add(self.square)\n layout2.add(self.combo_output)\n layout2.add(self.output)\n\n if self._settings.get_string(\"last-color\"):\n color = hex_to_rgb(self._settings.get_string(\"last-color\").lstrip(\"#\"))\n else:\n color = random_rgb()\n self.change_color(color)\n self.show_all()\n\n def change_color(self, rgb):\n \"\"\"Refresh preview and set values of all fields.\n :param rgb: rgb color values\n :type rgb: tuple\n \"\"\"\n\n rgba = Gdk.RGBA()\n rgba.parse(\"rgb({},{},{})\".format(*rgb))\n self.square.override_background_color(Gtk.StateType.NORMAL, rgba)\n\n GObject.signal_handler_block(self.spinbutton_r, self.red_sb_id)\n self.spinbutton_r.set_value(rgb[0])\n GObject.signal_handler_unblock(self.spinbutton_r, self.red_sb_id)\n GObject.signal_handler_block(self.slider_r, self.red_s_id)\n self.slider_r.set_value(rgb[0])\n GObject.signal_handler_unblock(self.slider_r, self.red_s_id)\n\n GObject.signal_handler_block(self.spinbutton_g, self.green_sb_id)\n self.spinbutton_g.set_value(rgb[1])\n GObject.signal_handler_unblock(self.spinbutton_g, self.green_sb_id)\n GObject.signal_handler_block(self.slider_g, self.green_s_id)\n self.slider_g.set_value(rgb[1])\n GObject.signal_handler_unblock(self.slider_g, self.green_s_id)\n\n GObject.signal_handler_block(self.spinbutton_b, self.blue_sb_id)\n self.spinbutton_b.set_value(rgb[2])\n GObject.signal_handler_unblock(self.spinbutton_b, self.blue_sb_id)\n GObject.signal_handler_block(self.slider_b, self.blue_s_id)\n self.slider_b.set_value(rgb[2])\n GObject.signal_handler_unblock(self.slider_b, self.blue_s_id)\n\n GObject.signal_handler_block(self.output, self.output_id)\n self.output.set_text(rgb_to_hex(rgb))\n GObject.signal_handler_unblock(self.output, self.output_id)\n\n self.rgb_color = rgb\n self.change_output()\n\n def change_output(self, event=None):\n \"\"\"Set output field\"\"\"\n combo_id = self.combo_output.get_active_id()\n if combo_id == \"hex\":\n output = rgb_to_hex(self.rgb_color)\n elif combo_id == \"rgb\":\n output = \"rgb({},{},{})\".format(*self.rgb_color)\n\n self.output.set_text(output)\n\n def rgb_spin_changed(self, event):\n \"\"\"RGB spinners values changed\"\"\"\n spin_red = self.spinbutton_r.get_value_as_int()\n spin_green = self.spinbutton_g.get_value_as_int()\n spin_blue = self.spinbutton_b.get_value_as_int()\n\n self.change_color((spin_red, spin_green, spin_blue))\n\n def rgb_slider_moved(self, event):\n \"\"\"RGB sliders values changed\"\"\"\n slider_red = int(self.slider_r.get_value())\n slider_green = int(self.slider_g.get_value())\n slider_blue = int(self.slider_b.get_value())\n\n self.change_color((slider_red, slider_green, slider_blue))\n\n def output_entry_changed(self, event):\n \"\"\"Hex entry value changed\"\"\"\n value = self.output.get_text().lstrip(\"#\")\n\n if len(value) == 6:\n rgb = hex_to_rgb(value)\n self.change_color(rgb)\n\n def random_button_clicked(self, event):\n \"\"\"Random button clicked\"\"\"\n self.change_color(random_rgb())\n\n def copy_output(self, event):\n \"\"\"Copy output to clipboard\"\"\"\n color = self.output.get_text()\n self.clipboard.set_text(color, -1)\n\n notification = Notify.Notification.new(_(\"Color copied\"), color)\n notification.show()\n\n def about_dialog(self, event):\n \"\"\"About dialog\"\"\"\n about_dialog = Gtk.AboutDialog(self)\n about_dialog.set_program_name(self.app_name)\n about_dialog.set_version(\"1.8\")\n about_dialog.set_copyright(\"Hugo Posnic\")\n about_dialog.set_comments(_(\"Enjoy colors and feel happy!\"))\n about_dialog.set_website(\"https://github.com/Huluti/{}\"\n .format(self.app_name))\n about_dialog.set_website_label(\"GitHub\")\n about_dialog.set_authors([\"Hugo Posnic\"])\n about_dialog.set_logo_icon_name('com.github.huluti.Coulr')\n about_dialog.set_license(self.app_name + \" \" + _(\"is under MIT Licence.\"))\n about_dialog.set_transient_for(self)\n about_dialog.run()\n about_dialog.destroy()\n\n def quit_app(self, *args):\n \"\"\"Quit app and save current color\"\"\"\n self._settings.set_string(\"last-color\", rgb_to_hex(self.rgb_color))\n self.app.quit()\n\n\ndef rgb_to_hex(rgb):\n \"\"\"Convert RGB color to hex color.\"\n :param rgb: RGB color\n :type rgb: tuple\n :return: Hex color\n :rtype: str\n \"\"\"\n return \"#{0:02x}{1:02x}{2:02x}\".format(*rgb)\n\n\ndef hex_to_rgb(hexa):\n \"\"\"Convert hex color to RGB color.\n :param hexa: Hex color\n :type hexa: str\n :return: RGB color\n :rtype: tuple\n \"\"\"\n return tuple(int(hexa[i:i+2], 16) for i in (0, 2, 4))\n\n\ndef random_rgb():\n \"\"\"Random rgb values.\n :return: Random RGB color\n :rtype: tuple\n \"\"\"\n return (randint(0, 255), randint(0, 255), randint(0, 255))\n","sub_path":"src/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":10643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"279199104","text":"import pandas as pd\nfrom sklearn.preprocessing import StandardScaler as SkScaler\n\nfrom evalml.pipelines.components.transformers import Transformer\nfrom evalml.utils.gen_utils import (\n _convert_to_woodwork_structure,\n _convert_woodwork_types_wrapper\n)\n\n\nclass StandardScaler(Transformer):\n \"\"\"Standardize features: removes mean and scales to unit variance.\"\"\"\n name = \"Standard Scaler\"\n hyperparameter_ranges = {}\n\n def __init__(self, random_state=0, **kwargs):\n parameters = {}\n parameters.update(kwargs)\n\n scaler = SkScaler(**parameters)\n super().__init__(parameters=parameters,\n component_obj=scaler,\n random_state=random_state)\n\n def transform(self, X, y=None):\n X = _convert_to_woodwork_structure(X)\n X = _convert_woodwork_types_wrapper(X.to_dataframe())\n X_t = self._component_obj.transform(X)\n X_t_df = pd.DataFrame(X_t, columns=X.columns, index=X.index)\n return _convert_to_woodwork_structure(X_t_df)\n\n def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X, y)\n","sub_path":"evalml/pipelines/components/transformers/scalers/standard_scaler.py","file_name":"standard_scaler.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"71091194","text":"from flask import Flask, render_template, request\nfrom models import Catto\nimport os\nfrom utils import save_plot\n\napp = Flask(__name__)\n\napp.config['UPLOAD_FOLDER'] = 'uploads'\n\nmodel = Catto()\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/infer', methods=['POST'])\ndef success():\n if request.method == 'POST':\n try:\n os.remove('./static/output.png')\n except:\n pass\n f = request.files['file']\n saveLocation = f.filename\n f.save(saveLocation)\n preds = model.infer(saveLocation)\n num = save_plot(preds,model.original)\n # delete file after making an inference\n os.remove(saveLocation)\n # respond with the inference\n return render_template('inference.html', num=num)\n\n\nif __name__ == '__main__':\n app.debug = True\n port = int(os.environ.get(\"PORT\", 80))\n app.run(host='0.0.0.0', port=port, debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"499684202","text":"from django.db.models import Q\nfrom django.shortcuts import get_object_or_404, redirect, render,HttpResponse\nfrom django.db.models import Model\nfrom bloh.models import *\nfrom customer.models import CustomerModel\nfrom .forms import Accountsetting, Commentform, Creat, Upgrade\nfrom django.contrib import messages\nfrom django.forms.models import inlineformset_factory\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n\ndef index(request):\n istifadeci=CustomerModel.objects.all()\n madel=Addermodel.objects.all()\n erazi=AreaCategorymodel.objects.all()\n if request.method =='GET':\n if request.user.is_authenticated:\n context={\n 'madel':madel,\n 'istifadeci':istifadeci,\n 'erazi' :erazi\n }\n return render(request,'index.html',context)\n messages.success(request,'İlk öncə qeydiyyatdan keçin')\n return redirect('customer:login')\ndef statistika(request):\n return render(request,'statistika.html')\n\n \n@login_required(login_url='/')\ndef create(request):\n form=Creat()\n if request.method =='POST':\n form=Creat(request.POST,request.FILES)\n if form.is_valid():\n article=form.save(commit=False)\n article.author=request.user\n article.save()\n messages.success(request,'Yazı Yaradıldı')\n return redirect('/')\n contex={\n 'form':form\n }\n return render(request,'create.html',contex)\n\n@login_required(login_url='/')\ndef update(request,id):\n obyekt=Addermodel.objects.get(id=id)\n form=Upgrade(instance=obyekt)\n if request.method=='POST':\n form=Upgrade(request.POST,request.FILES,instance=obyekt)\n if form.is_valid():\n form.save()\n messages.success(request,'dəyişikliklər edildi')\n return redirect('index')\n mal={\n 'form':form\n }\n\n return render(request,'update.html',mal)\n@login_required(login_url='/')\ndef delete(request,id):\n Entry=Addermodel.objects.filter(id=id)\n if request.method =='POST':\n Entry.delete()\n messages.success(request,'Başarıyla silindi')\n return redirect('index')\n context={\n 'Entry': Entry\n }\n return render(request,'delete.html',context)\n@login_required(login_url='/')\ndef dashboard(request):\n customer=request.user\n sorgu=request.GET.get('sorgu')\n Entri=Addermodel.objects.filter(author=customer)\n if sorgu:\n Entri = Entri.filter(\n Q(name__icontains=sorgu) |\n Q(content__icontains=sorgu)\n ).distinct()\n context={\n 'Entri':Entri,\n 'customer':customer\n }\n return render(request,'dashboard.html',context)\n@login_required(login_url='/')\ndef account_settings(request):\n users=request.user\n form=Accountsetting(instance=users)\n if request.method=='POST':\n form=Accountsetting(request.POST,request.FILES,instance=users)\n if form.is_valid():\n form.save()\n messages.success(request,'profil dəyişikliyi edildi')\n return redirect('customer:profil')\n can={\n 'form':form\n }\n\n return render(request,'account_settings.html',can)\n@login_required(login_url='/')\ndef commentcreate(request):\n entry=request\n form1=Commentform()\n if request.method=='POST':\n form1=Commentform(request.POST)\n if form1.is_valid():\n form1.save()\n return redirect('customer:profil')\n conten={\n 'form1':form1\n }\n return render(request,'commentcreate.html',conten)\ndef updatec(request,id):\n comm=Commentarticle.objects.get(id=id)\n formm=Commentform(instance=comm)\n if request.method =='POST':\n formm=Commentform(request.POST,instance=comm)\n if formm.is_valid():\n formm.save()\n return redirect('customer:profil')\n terkibb={\n 'formm':formm\n }\n return render(request,'updatec.html',terkibb)\ndef deletec(request,id):\n comment=Commentarticle.objects.filter(id=id)\n if request.method=='POST':\n comment.delete()\n return redirect('customer:profil')\n terkib={\n 'comment':comment\n }\n return render(request,'deletec.html',terkib)","sub_path":"bloh/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"527491674","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 19 20:30:03 2016\n\n@author: johannes\n\"\"\"\n#ÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜÜ\n\nimport csv\nimport os\nimport io\nimport numpy as np\nimport time\n#from dbfread import DBF\nfrom Dictionaries import Dictionaries\n\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom matplotlib import pyplot as plt\n\nfrom DataIO_Helper import DataIO_Helper\n\n#from dbf import dbf\n\nclass DataIO:\n \n def __init__(self, filepath_import, filepath_export):\n '''\n DataIO for Import and Export of data\n '''\n self.__filepath_import = filepath_import\n self.__filepath_export = filepath_export\n \n def importCSV(self, filename_import, dtype = None, startrow = 0, delimiter=\";\", columnofdate=None, dateformat=None):\n '''\n imports CSV and replaces first comma start counting from right\n ####### \n return:\n iterable object\n '''\n if os.path.isfile(self.__filepath_import + os.sep + os.path.splitext(filename_import)[0] + \"_pointAsDec\" + os.path.splitext(filename_import)[1]):\n '''opens file and reads it into memory'''\n start_time = time.clock() \n self.__dataArray = np.genfromtxt(self.__filepath_import + os.sep + os.path.splitext(filename_import)[0] + \"_pointAsDec\" + os.path.splitext(filename_import)[1], \n dtype = dtype, \n delimiter = delimiter, \n skip_header = startrow +1,\n converters = DataIO_Helper(columnofdate = columnofdate, dateformat = dateformat).str2date())\n print(time.clock() - start_time)\n \n\n else:\n '''opens file and changes decimal spererator to point, delets all other points or commas: except of date --> saves file into input/+_pointAsDec'''\n self.__dataArray = ''\n self.__dataArrayHeader = ''\n with open(self.__filepath_import + os.sep + filename_import, encoding = 'utf-8') as csvfile:\n readCSV = csv.reader(csvfile, delimiter = delimiter)\n \n start_time = time.clock()\n index = 0\n for row in readCSV:\n '''saves the header of csv-file into self.__dataArrayHeader'''\n self.__dataArrayHeader = self.__dataArrayHeader + delimiter.join(x for x in row) + '\\n'\n if startrow - 2 < index: \n break\n index += 1\n \n \n print(self.__dataArrayHeader)\n for row0 in readCSV:\n row1 = []\n for (index,item) in enumerate(row0):\n if index == columnofdate:\n row1.append(item)\n else:\n item = item.replace(\",\", \".\")\n item = item.replace(\".\",\"\", item.count(\".\") - 1)\n row1.append(item)\n self.__dataArray = self.__dataArray + delimiter.join(x for x in row1) + '\\n'\n print(time.clock() - start_time)\n csvfile.close\n with open(self.__filepath_import + os.sep + os.path.splitext(filename_import)[0] + \"_pointAsDec\" + os.path.splitext(filename_import)[1], \"w\") as text_file:\n text_file.write(self.__dataArrayHeader + self.__dataArray)\n text_file.close() \n \n self.__dataArray = np.genfromtxt(io.BytesIO(self.__dataArray.encode('utf-8')), \n dtype = dtype, \n delimiter = delimiter, \n converters = DataIO_Helper(columnofdate = columnofdate, dateformat = dateformat).str2date())\n return self.__dataArray\n \n \n \n def exportCSV(self, filename, results, delimiter = \";\"):\n '''\n exports results as CSV with \",\" as decimal seperator\n filename = string\n results = [] \n '''\n self.__results = results\n '''opens file and writes into it'''\n with open(self.__filepath_export + \"\\\\\" + filename + \".csv\", 'w')as pyfile:\n for item0 in self.__results: \n for item1 in item0:\n if type(item1) == float:\n item1 = str(item1)\n item1 = item1.replace(\".\", \",\")\n pyfile.write(\n item1 + delimiter \n )\n \n pyfile.write(\"\\n\") \n pyfile.close()\n \n \n# def importDBF(self, filename):\n# '''\n# imports a dBASE\n# #######\n# return:\n# iterable object\n# '''\n# self.__dataArraydbf = []\n# for record in DBF(self.__filepath_import + \"\\\\\" + filename, encoding = \"cp437\", load = True):\n# self.__dataArraydbf.append(record)\n# \n# return self.__dataArraydbf\n\n def exportFig(self, filename, fig):\n '''\n exports matplotlib grafik as pdf and png\\n\n #######\\n\n return:\\n\n pp.close()\n '''\n self.__seperator = \"\\\\\"\n if self.__filepath_export == \"\":\n self.__seperator = \"\"\n \n fig.savefig(self.__filepath_export + self.__seperator + filename + \".png\", bbox_inches = 'tight', dpi = 220)\n print(\"Saved PNG to: \" +str(self.__filepath_export + self.__seperator + filename + \".png\"))\n fig.savefig(self.__filepath_export + self.__seperator + filename + \".pdf\", filetype = \"pdf\", bbox_inches = 'tight', dpi = 220)\n print(\"Saved PDF to: \" +str(self.__filepath_export + self.__seperator + filename + \".pdf\"))\n \n def writeColHeader(self, filepathAndfilename, tableHeadername):\n self.__filepathAndfilename = filepathAndfilename\n with open(self.__filepathAndfilename, \"r\") as text_file:\n text_old = text_file.read()\n text_file.close()\n with open(self.__filepathAndfilename, \"w\") as text_file:\n text_file.write(text_old[0:0] + str(Dictionaries(\"\").csvHeader(tableHeadername)) + text_old[:])\n text_file.close()\n \n","sub_path":"DataIO.py","file_name":"DataIO.py","file_ext":"py","file_size_in_byte":6543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"292629614","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 24 00:19:03 2020\n\n@author: sruth\n\"\"\"\n\nn = int(input(\"Input a number: \"))\nfor i in range(1,11):\n print(n,'x',i,'=',n*i)\n","sub_path":"Multiplication of a number.py","file_name":"Multiplication of a number.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"422778639","text":"from Role import *\n\n\n\nclass RoleV(Role):\n\n def __init__(self, map: Map):\n cost_switch = {\n 'q': 3,\n 'v': 0,\n 'p': 1,\n 'e': 2}\n super().__init__(map, cost_switch)\n\n def generate_path(self, start_zone: Zone, end_zone: Zone):\n\n #Establish the final and start nodes taken as the bottom left nodes of the entered zones\n start: Node = start_zone.down_left_node\n cur: Node = start\n targ: Node = end_zone.down_left_node\n\n #caclulate the heuristic values of the start node\n cur.g_value = 0.0\n cur.f_value = self.get_heuristic_estimate(cur, targ)\n self.priority_queue_push(cur, cur.f_value)\n\n #Begin iterating throught he openlist\n while len(self.openList)>0:\n\n #take the current as the node with the lowest F value\n cur = self.priority_queue_pop()\n\n #if the current is the target then the algorithm is complete\n if cur == targ:\n print(\"Found target\")\n break\n\n #iterate through the diagonals \n diag: Node.DiagonalConnection \n for diag in cur.diags.values():\n n:Node = diag.node\n tentative_g = self.get_diag_cost(diag)+cur.g_value\n #if tentative g score is less then the current g score then reparent the node and recalculate its \n #g and f scores, if its not on the open list add it\n if tentative_g < diag.node.g_value:\n n.prevNode = cur\n n.g_value = tentative_g\n n.f_value = n.g_value+self.get_heuristic_estimate(n, targ)\n if n not in self.openList:\n self.priority_queue_push(n, n.f_value)\n\n #iterate through the cardinals \n card: Node.CardinalConnection\n for card in cur.cardinals.values():\n n:Node = card.node\n tentative_g = self.get_cost_cardinal(card.edge)+cur.g_value\n if tentative_g < n.g_value:\n n.prevNode = cur\n n.g_value = tentative_g\n n.f_value = n.g_value+self.get_heuristic_estimate(n, targ)\n if n not in self.openList:\n self.priority_queue_push(n, n.f_value)\n\n #the path is constructed by starting at the goal and appending all the previious nodes until the start node is added\n self.path = []\n self.path.insert(0, cur)\n while cur != start:\n cur = cur.prevNode\n self.path.insert(0, cur)\n\n def get_heuristic_estimate(self, cur_node: Node, targ_node: Node):\n temp_node = cur_node\n cost: float = 0.0\n #loop while the temp node is not the target\n while temp_node != targ_node:\n #distance to goal horizontally\n dx = targ_node.x-temp_node.x\n #distance to goal vertically\n dy = targ_node.y-temp_node.y\n\n #If the goal is vertically algined, move only vertically\n if dx == 0:\n if dy > 0:\n cost += self.get_cost_cardinal(temp_node.down_edge)\n temp_node = temp_node.lower_node\n elif dy < 0:\n cost += self.get_cost_cardinal(temp_node.up_edge)\n temp_node = temp_node.upper_node\n #If the goal is horizontally alligned, move only horizontally\n elif dy == 0:\n if dx > 0:\n cost += self.get_cost_cardinal(temp_node.right_edge)\n temp_node = temp_node.right_node\n elif dx<0:\n cost += self.get_cost_cardinal(temp_node.left_edge)\n temp_node = temp_node.left_node\n #Otherwise move diagonally until its horizontally/vertically alligned\n elif dx > 0 and dy > 0:\n cost += self.get_diag_cost(temp_node.diags[Diagonal.DOWN_RIGHT])\n temp_node = temp_node.lower_right_node\n elif dx < 0 and dy > 0:\n cost += self.get_diag_cost(temp_node.diags[Diagonal.DOWN_LEFT])\n temp_node = temp_node.lower_left_node\n elif dx < 0 and dy < 0:\n cost += self.get_diag_cost(temp_node.diags[Diagonal.UP_LEFT])\n temp_node = temp_node.upper_left_node\n elif dx >0 and dy <0:\n cost += self.get_diag_cost(temp_node.diags[Diagonal.UP_RIGHT])\n temp_node = temp_node.upper_right_node\n return cost\n\n\n\n\n def get_diag_cost(self, diag: Node.DiagonalConnection):\n cost1 : float = math.sqrt(self.get_cost_cardinal(diag.e00) ** 2 + self.get_cost_cardinal(diag.e01) ** 2)\n cost2 : float = math.sqrt(self.get_cost_cardinal(diag.e10) ** 2 + self.get_cost_cardinal(diag.e11) ** 2)\n return max(cost1, cost2)\n\n\n","sub_path":"COMP472Assignment1/RoleV.py","file_name":"RoleV.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"327416259","text":"import re\n\ndef bruteforce(teks, pattern) :\n n = len(teks)\n m = len(pattern)\n for i in range(n - m + 1) :\n j = 0\n while j < m and teks[i+j].lower() == pattern[j].lower() :\n j += 1\n if j == m :\n return i\n \n return -1\n\n\ndef borderFunction(pattern) :\n fail = [0 for i in range(len(pattern))]\n fail[0] = 0\n\n m = len(pattern)\n j = 0\n i = 1\n while i < m :\n if pattern[j].lower() == pattern[i].lower() :\n fail[i] = j + 1\n i += 1\n j += 1\n elif j > 0 :\n j = fail[j - 1]\n else :\n fail[i] = 0\n i += 1\n \n return fail\n\n\ndef kmp(teks, pattern):\n border = borderFunction(pattern)\n n = len(teks)\n m = len(pattern)\n\n i = 0\n j = 0\n\n while i < n :\n if pattern[j].lower() == teks[i].lower() :\n if j == m - 1 :\n return i - m + 1\n i += 1\n j += 1\n elif j > 0 :\n j = border[j - 1]\n else :\n i += 1\n \n return -1\n\n\n\ndef buildLastOccurence(teks, pattern) :\n # membentuk last occurence\n lo = {}\n\n # inisialisasi -1\n for char in teks.lower() :\n lo[char] = -1\n\n # membentuk last occurence dari pattern\n for i, char in enumerate(pattern) :\n lo[char.lower()] = i\n\n return lo\n\ndef boyerMoore(teks, pattern) :\n lo = buildLastOccurence(teks, pattern)\n\n n = len(teks)\n m = len(pattern)\n i = m - 1\n if(i > n - 1) :\n return -1 #artinya tidak ada\n\n j = m - 1\n\n while True :\n if teks[i].lower() == pattern[j].lower() : # kalau character match\n if j == 0 : # udah ketemu\n return i\n else :\n i -= 1\n j -= 1\n else : # terjadi mismatch\n lastOccur = lo[teks[i].lower()]\n i = i + m - min(j, 1 + lastOccur)\n j = m - 1\n \n if i > n - 1 :\n break\n \n return -1\n\ndef regexMatch(teks, pattern) :\n pattern_lower = pattern.lower()\n teks_lower = teks.lower()\n regex_pattern = r\"(\" + pattern_lower +\")\"\n hasil = re.findall(regex_pattern, teks_lower)\n\n # apabila pattern terdapat pada teks\n if len(hasil) > 0 :\n return teks_lower.find(hasil[0])\n else : # pattern tidak terdapat pada teks\n return -1\n","sub_path":"src/stringmatching.py","file_name":"stringmatching.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"468319253","text":"# Opus/UrbanSim urban simulation software.\r\n# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington\r\n# See opus_core/LICENSE \r\n\r\nfrom baseline_travel_model_2020_biaspf_varpf import BaselineTravelModel2020BiaspfVarpf\r\n\r\nclass BaselineTravelModel2020Nobias(BaselineTravelModel2020BiaspfVarpf):\r\n \r\n \r\n def __init__(self):\r\n BaselineTravelModel2020BiaspfVarpf.__init__(self)\r\n self['travel_model_configuration']['bm_module_class_pair'] = ('inprocess.hana.uncertainty.bm_no_bias', 'BmNoBias')\r\n self['travel_model_configuration'][2020]['bank'] = [ '2020_nb', ]\r\n","sub_path":"psrc_parcel/configs/baseline_travel_model_2020_nobias.py","file_name":"baseline_travel_model_2020_nobias.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"564694818","text":"\n\n\nimport numpy as np\nimport cv2\nimport os\nimport sys\npath = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n\n#鼠标事件的回调函数\ndef on_mouse(event,x,y,flag,param): \n global rect\n global leftButtonDowm\n global leftButtonUp\n \n #鼠标左键按下\n if event == cv2.EVENT_LBUTTONDOWN:\n rect[0] = x\n rect[2] = x\n rect[1] = y\n rect[3] = y\n leftButtonDowm = True\n leftButtonUp = False\n \n #移动鼠标事件\n if event == cv2.EVENT_MOUSEMOVE:\n if leftButtonDowm and not leftButtonUp:\n rect[2] = x\n rect[3] = y \n \n #鼠标左键松开\n if event == cv2.EVENT_LBUTTONUP:\n if leftButtonDowm and not leftButtonUp:\n x_min = min(rect[0],rect[2])\n y_min = min(rect[1],rect[3])\n \n x_max = max(rect[0],rect[2])\n y_max = max(rect[1],rect[3])\n \n rect[0] = x_min\n rect[1] = y_min\n rect[2] = x_max\n rect[3] = y_max\n leftButtonDowm = False \n leftButtonUp = True\n\n\nimg = cv2.imread(path + '/imgs/low_quality1.jpg')\nmask = np.zeros(img.shape[:2],np.uint8)\n\n\nbgdModel = np.zeros((1,65),np.float64) #背景模型\nfgdModel = np.zeros((1,65),np.float64) #前景模型\nrect = [0,0,0,0] #设定需要分割的图像范围\n \n\nleftButtonDowm = False #鼠标左键按下\nleftButtonUp = True #鼠标左键松开\n\ncv2.namedWindow('img') #指定窗口名来创建窗口\ncv2.setMouseCallback('img',on_mouse) #设置鼠标事件回调函数 来获取鼠标输入\ncv2.imshow('img',img) #显示图片\n\n\nwhile cv2.waitKey(2) == -1:\n #左键按下,画矩阵\n if leftButtonDowm and not leftButtonUp: \n img_copy = img.copy()\n cv2.rectangle(img_copy,(rect[0],rect[1]),(rect[2],rect[3]),(0,255,0),2) \n cv2.imshow('img',img_copy)\n \n #左键松开,矩形画好 \n elif not leftButtonDowm and leftButtonUp and rect[2] - rect[0] != 0 and rect[3] - rect[1] != 0:\n rect[2] = rect[2]-rect[0]\n rect[3] = rect[3]-rect[1]\n rect_copy = tuple(rect.copy()) \n rect = [0,0,0,0]\n #物体分割\n cv2.grabCut(img,mask,rect_copy,bgdModel,fgdModel,2,cv2.GC_INIT_WITH_RECT)\n \n mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')\n img_show = img*mask2[:,:,np.newaxis]\n #显示图片分割后结果--显示原图\n cv2.imshow('grabcut',img_show)\n cv2.imshow('img',img) \n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"Tencent/Sexy_Video_Detection/grab_cut2.py","file_name":"grab_cut2.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"216390919","text":"import ee\nimport pandas as pd\n\n\ndef coll_filter_conditional(coll, filter_by='NODATA_PIXEL_PERCENTAGE',\n filter_type='less_than', filter_thresh=10,\n cond_type='gte', cond_thresh=30,\n print_size=False, print_mod=None):\n if filter_by is not None:\n coll = coll.filterMetadata(filter_by, filter_type, filter_thresh)\n \n size = coll.size()\n \n if print_size:\n print(f'Size {print_mod}:', size.getInfo())\n \n flag = getattr(size, cond_type)(cond_thresh)\n \n return ee.Algorithms.If(flag, True, False).getInfo()\n\n\ndef collection_greater_than(coll, threshold, print_mod=None):\n return coll_filter_conditional(coll, filter_by=None, cond_thresh=threshold, print_mod=print_mod)\n\n\ndef collection_quality_test_no_filter(coll, best, coll_thresh=30, best_thresh=1):\n best_is_good = collection_greater_than(best, best_thresh)\n if best_is_good:\n return True\n \n return collection_greater_than(coll, coll_thresh)\n\n\ndef collection_quality_test_filter(coll, best, coll_min=30, best_min=1,\n filter_by='NODATA_PIXEL_PERCENTAGE',\n filter_type='less_than', filter_thresh=10):\n\n # Test to see if there's at least one good image with low nodata pixels\n best_is_good = coll_filter_conditional(best, filter_by=filter_by, filter_type=filter_type,\n filter_thresh=filter_thresh, cond_thresh=best_min,\n print_mod='of best filtered')\n \n # If there is, the collection is fine!\n if best_is_good:\n return True\n \n # If there isn't a sufficient good image, test the size of the collection as a whole\n coll_is_good = collection_greater_than(coll, coll_min, print_mod='of entire coll')\n\n return coll_is_good\n\ndef image_collection_secondary_sort(col,primary_sort=None,secondary_sort=None):\n\n new_list_of_images = []\n primary_list = col.aggregate_array(primary_sort)\n secondary_list = col.aggregate_array(secondary_sort)\n # image_id_list = col.aggregate_array('system:index')\n \n \n # sort_dic = \\\n # {primary_sort:primary_list,\n # secondary_sort:secondary_list,\n # \"id\":image_id_list}\n\n sort_dic = \\\n {primary_sort:primary_list,\n secondary_sort:secondary_list}\n \n\n new_sort_dic = {}\n #print('secondary sort -- getting infos')\n for key in sort_dic:\n new_sort_dic[key] = sort_dic[key].getInfo()\n #print(f'got info for {key}')\n \n df = pd.DataFrame(new_sort_dic)\n \n df = df.sort_values(by=[primary_sort,secondary_sort],ascending=False)\n \n df = df.reset_index()\n \n df = df.reset_index()\n \n df.rename(columns = {'index':'current_position', 'level_0':'new_position'}, inplace = True) \n \n \n list_of_images = col.toList(col.size())\n \n\n for row in df.iterrows():\n origin = row[1][1]\n\n img_dest = ee.Image(list_of_images.get(origin))\n\n new_list_of_images.append(img_dest)\n\n \n return ee.ImageCollection(new_list_of_images)\n \n","sub_path":"scripts/utils/dynamic_date_range.py","file_name":"dynamic_date_range.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"509356733","text":"\"\"\"\nFlask app for testing the OpenID Connect extension.\n\"\"\"\n\nimport json\nfrom unittest.mock import MagicMock, Mock\n\nfrom flask import Flask, g\nimport flask_oidc\nfrom tests.json_snippets import *\n\noidc = None\n\n\ndef index():\n return \"too many secrets\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_rt():\n return oidc.get_refresh_token(), 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test1():\n return \"successful call to test1\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test2():\n return \"successful call to test2\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test3():\n return \"successful call to test3\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_unprotected():\n return \"successful call to unprotected\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\ndef api():\n return json.dumps(raw_api())\n\n\ndef get_test4():\n return \"successful call to test4\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ncallback_method = Mock()\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n # Check standalone usage\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key='Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n\n configure_keycloak_test_uris(app)\n\n # Check combination with an external API renderer like Flask-RESTful\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api)\n return app\n\n\ndef configure_keycloak_test_uris(app):\n test1 = app.oidc.check_authorization(True)(get_test1)\n app.route('/test1', methods=['GET', 'POST'])(test1)\n test2 = app.oidc.check_authorization(True)(get_test2)\n app.route('/test2', methods=['GET', 'POST'])(test2)\n test3 = app.oidc.check_authorization(True)(get_test3)\n app.route('/test3', methods=['GET', 'POST'])(test3)\n\n callback_method.return_value = True\n\n test4 = app.oidc.check_authorization(True, validation_func=callback_method)(get_test4)\n app.route('/test4', methods=['GET', 'POST'])(test4)\n\n unprotected = app.oidc.check_authorization(False)(get_unprotected)\n app.route('/unprotected', methods=['GET'])(unprotected)\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = \"abc\"\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test1_and_test2\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1, resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test3\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\ndef configure_mock_version3(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = None\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n","sub_path":"tests/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"622670619","text":"from flask import Flask\nfrom config import Config\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_seeder import FlaskSeeder\n\n#Inisialisasi JWT\nfrom flask_jwt_extended import JWTManager\n\n#email\nfrom flask_mail import Mail\n\n#backgroun task\nimport time\nimport atexit\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\n#queue worker redis\nimport os\nimport redis\nfrom rq import Queue\n\n\napp = Flask(__name__)\napp.config.from_object(Config)\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\njwt = JWTManager(app)\nmail = Mail(app)\n\n# Untuk koneksi ke Redis Queue\nr = redis.Redis()\nq = Queue(connection=r)\n\n# bagian upload file\nUPLOAD_FOLDER = 'app/static/uploads/'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n\n#seeder\nseeder = FlaskSeeder()\nseeder.init_app(app,db)\n\n#model\nfrom app.model import prodi, user, sidang, berkasSidang\n\n#routes\nfrom app.route import routes, routeUser, routeProdi, routeSidang\n\n\n\n# Backgrountask example\nscheduler = BackgroundScheduler()\n\ndef print_date_time():\n print(time.strftime(\"%A, %d. %B %Y %I:%M:%S %p\"))\n # pass\n\nscheduler.add_job(func=print_date_time, trigger=\"interval\", minutes=3)\nscheduler.start()\n\n# Shut down the scheduler when exiting the app\natexit.register(lambda: scheduler.shutdown())\n\n\n\n\n# Create a working task queue \ndef background_task(n):\n try:\n \"\"\" Function that \n returns len(n) and \n simulates a delay \"\"\"\n\n # format second (atau setelah 2 detik)\n delay = 2 \n\n print(\"Task running\")\n print(f\"Simulating a {delay} second delay\")\n\n time.sleep(delay)\n\n print(len(n))\n print(\"Task complete\")\n\n return len(n)\n except Exception as e:\n print(e)\n print(f\"tes {n}\")\n return \"tes\"\n\n\n@app.route(\"/task\")\ndef jj():\n # try:\n\n banyak = 0\n if request.args.get(\"n\"):\n\n job = q.enqueue(background_task, request.args.get(\"n\"))\n # banyak = len(q)\n return f\"Task ({job.id}) added to queue at {job.enqueued_at}\"\n banyak = len(q)\n return f\"No value for count provided {banyak}\"\n # except HorseMonitorTimeoutException as e:\n print(e)","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"13377413","text":"import csv\nimport xlrd\n\n\ndef xlsx_reader(path):\n rb = xlrd.open_workbook(path, formatting_info=False)\n sheet = rb.sheet_by_index(0)\n return [sheet.row_values(i) for i in range(sheet.nrows)]\n\ndef csv_writer(data, path):\n \"\"\"\n Write data to a CSV file path\n \"\"\"\n with open(path, \"w\", newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=';',\n quotechar='\"', quoting=csv.QUOTE_ALL)\n for line in data:\n writer.writerow(line)\n\n\nif __name__ == \"__main__\":\n data = [\"first_name,last_name,city\".split(\",\"),\n \"Tyrese Tyrese,Hirthe,Strackeport\".split(\",\"),\n \"Jules,Dicki,Lake Nickolasville\".split(\",\"),\n \"Dedric,Medhurst,Stiedemannberg\".split(\",\")\n ]\n # 1) записываем в файл output.csv содержимой словаря data\n print(data)\n out_file = \"output.csv\"\n csv_writer(data, out_file)\n\n # 2) получает данные из файле ddata.xlsx\n # записывает эти данные в файл\n in_file = 'ddata.xlsx'\n out_file = \"output_1.csv\"\n\n data = xlsx_reader(in_file)\n csv_writer(data, out_file)\n","sub_path":"any/xlsx_to_csv/xlsx_to_csv.py","file_name":"xlsx_to_csv.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"34269613","text":"\"\"\"empty message\n\nRevision ID: 503e11ce43d4\nRevises: b51d0ad676d1\nCreate Date: 2021-04-16 15:25:19.424646\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '503e11ce43d4'\ndown_revision = 'b51d0ad676d1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('s1-data', 'first_section')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('s1-data', sa.Column('first_section', mysql.VARCHAR(length=15), nullable=True))\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/503e11ce43d4_.py","file_name":"503e11ce43d4_.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"77883850","text":"import imagehash\nfrom threading import Thread\nfrom persistence.hashes import MD5Inventory\nfrom configuration import Configuration\n\ninventory = MD5Inventory()\nconf = Configuration()\n\nclass ValidateLoadedImages(Thread):\n def __init__(self, queue_input, queue_good, queue_bad):\n Thread.__init__(self)\n self.setDaemon(True)\n self.__queue_input = queue_input\n self.__queue_good = queue_good\n self.__queue_bad = queue_bad\n\n\n def run(self):\n \"Calculate the MD5 hash of an image and store them with the has as filename\"\n\n while True:\n try:\n image_meta = self.__queue_input.get()\n\n # avoid duplicate\n if not inventory.has_hash(image_meta[\"md5\"]):\n # avoid unsupported image format\n img = image_meta[\"image\"]\n if conf.image_descriptor_by_type(img.format) is not None:\n self.__queue_good.put(image_meta)\n else:\n # store unsupported files to avoid duplicate loading\n self.__queue_bad.put(image_meta)\n\n except OSError:\n pass\n","sub_path":"src/010-crawler/appraiser/impl.py","file_name":"impl.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"473221794","text":"import sys\r\nfrom IPython import get_ipython\r\n\r\nsolver_number = sys.argv[1]\r\nsolver_file = \"{}.py\".format(solver_number)\r\nipython = get_ipython()\r\nipython.run_cell_magic(\"capture\", \"\", \"%cd ../solvers/\")\r\nipython.run_line_magic(\"load\", solver_file)\r\nexec(\"from {} import solver\".format(solver_number))\r\nprint(\"Solution: {}\".format(solver()))\r\nipython.run_cell_magic(\"timeit\", \"\", \"%run {}\".format(solver_file))","sub_path":"notebooks/solver_loader.py","file_name":"solver_loader.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"540002838","text":"# -*- coding: utf-8 -*- \n'''\nNo.492\nFor a web developer, it is very important to know how to design a web page's size. So, given a specific rectangular web page’s area, your job by now is to design a rectangular web page, \nwhose length L and width W satisfy the following requirements:\n1. The area of the rectangular web page you designed must equal to the given target area.\n2. The width W should not be larger than the length L, which means L >= W.\n3. The difference between length L and width W should be as small as possible.\nYou need to output the length L and the width W of the web page you designed in sequence.\n'''\nclass Solution(object):\n def constructRectangle(self, area):\n \"\"\"\n :type area: int\n :rtype: List[int]\n #��好的情况就是可以开平方,这样差值为0,如果不行,则从开平方取整向下找,找到的第一个即为差值最小的数(小的那个)\n 然后添加即可\n \"\"\"\n a = int(area**.5)\n while a>=1:\n if area%a == 0:\n return [area/a,a]\n a -= 1\n","sub_path":"Construct_the_Rectangle.py","file_name":"Construct_the_Rectangle.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"481735799","text":"import re\nimport sys\n\nserver = '192.168.0.254'\n\ndef pxeesxi(mac):\n\t# il faut que l'adresse mac soit séparé par des \"-\" et en minuscule\n\t# On vérifie que l'adresse MAC en soit bien une\n\tX='([a-fA-F0-9]{2}[\" \":\\-]?){6}'\n\tismac = re.compile(X).match(mac)\n\tif ismac:\n\t\tpass\n\telse:\n\t\tprint('Adresse MAC invalide. Quitte le script')\n\t\tsys.exit\n\n\t# On veut une adresse MAC en minuscule, séparée par \"-\"\n\t# On commence par séparer l'adresse MAC dans une list\n\tlistmac = re.findall('[a-fA-F0-9]{2}',mac)\n\t# Puis on remet en string, séparée par \":\"\n\tmac = \"-\".join(listmac)\n\t# On met en minuscule\n\tmac = mac.lower()\n\t# Nom du fichier pxe\n\tfile = \"/srv/tftp/esxi/pxelinux.cfg/01-\"+mac\n\t# On crée le fichier pxe\n\twith open(file, 'w') as fichier:\n\t\tfichier.write('DEFAULT esxi\\n')\n\t\tfichier.write('nohalt 1\\n')\n\t\tfichier.write('prompt 0\\n')\n\t\tfichier.write('timeout 300\\n')\n\t\tfichier.write('\\nLABEL esxi\\n')\n\t\tfichier.write(' menu label ESXi Installation\\n')\n\t\tfichier.write(' KERNEL esxi6/mboot.c32\\n')\n\t\tfichier.write(' APPEND -c esxi6/boot.cfg ks=http://{}/{}esxi.cfg'.format(server, mac))\n\t","sub_path":"vieuxtrucs/esxi/fichierpxeesxi.py","file_name":"fichierpxeesxi.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"553638046","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 22 21:59:55 2018\n\n@author: joshapanavicius\n\"\"\"\n\ndef f1(x):\n return x**3 - 2*x**2 -5\n\ndef f3(x):\n return x**4 + x**3 + 3*x**2 + 2*x + 2\n\ndef f7(x):\n return 600*x**4 - 550*x**3 + 200*x**2 - 20*x - 1\n\n#Muller's Method\ndef muller_method(p0, p1, p2, tol, N, fn):\n \"\"\"Short summary.\n\n Args:\n p0 (type): Description of parameter `p0`.\n p1 (type): Description of parameter `p1`.\n p2 (type): Description of parameter `p2`.\n tol (type): Description of parameter `tol`.\n N (type): Description of parameter `N`.\n fn (type): Description of parameter `fn`.\n\n Returns:\n type: Description of returned object.\n\n \"\"\"\n h1 = p1 - p0\n h2 = p2 - p1\n s1 = (fn(p1) - fn(p0))/h1\n s2 = (fn(p2) - fn(p1))/h2\n d = (s1 - s2)/(h2 + h1)\n i = 3\n while i <= N:\n b = s2 + h2*d\n D = (b**(2) - 4*fn(p2)*d)**0.5\n if abs(b - D) < abs(b + D):\n E = b + D\n else:\n E = b - D\n h = -2*fn(p2)/E\n p = p2 + h\n print('i =', i, 'p =', p)\n if abs(h) < tol:\n return print('After', i, 'iterations, we found a root at p =', p)\n p1 = p2\n p2 = p\n h1 = p1 - p0\n h2 = p2 - p1\n s1 = (fn(p1) - fn(p0))/h1\n s2 = (fn(p2) - fn(p1))/h2\n d = (s1 - s2)/(h2 + h1)\n i += 1\n return print('After', N, 'iterations.')\n\n#Newton's Method\ndef newtonMethod(p0, tol, N, fn, dfn):\n \"\"\"Finds a solution to f(x) = 0 given an initial approximation p0.\n\n Number, Number, Number, Function, Function -> Number\"\"\"\n #Start iterations\n i = 1\n while i <= N:\n #Compute pi\n p = p0 - (fn(p0)/dfn(p0))\n print('i =', i, 'p =', p)\n if abs(p - p0) < tol:\n #Procedure was successful\n return print(\"After\", i, \"iterations, a root was found at p =\", p)\n i += 1 #Continue iterations\n p0 = p #Updates p0\n #Procedure was unsuccessful\n return print(\"The method failed after\", N, \"iterations.\")\n\nmuller_method(-0.5, 0, 0.5, 0.00001, 20, f7)\n","sub_path":"müllers_method.py","file_name":"müllers_method.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"77934297","text":"import pickle\n\nwith open('./outputs/restart.pkl', 'rb') as rf:\n restarter = pickle.load(rf)\n\nsim_manager = restarter.new_sim_manager()\n\n\ndef main(continue_run, n_cycles, steps, n_walkers, n_workers=1, debug_prints=False, seed=None):\n\n ### RUN the simulation\n print(\"Starting run\")\n sim_manager.continue_run_simulation(continue_run, n_cycles, steps, debug_prints=True)\n print(\"Finished run\")\n\nif __name__ == \"__main__\":\n\n import time\n import multiprocessing as mp\n import sys\n import logging\n\n # needs to call spawn for starting processes due to CUDA not\n # tolerating fork\n mp.set_start_method('spawn')\n mp.log_to_stderr(logging.INFO)\n\n if sys.argv[1] == \"--help\" or sys.argv[1] == '-h':\n print(\"arguments: continue_run_idx, n_cycles, n_steps, n_walkers, n_workers\")\n else:\n\n continue_run = int(sys.argv[1])\n n_cycles = int(sys.argv[2])\n n_steps = int(sys.argv[3])\n n_walkers = int(sys.argv[4])\n n_workers = int(sys.argv[5])\n\n print(\"Number of steps: {}\".format(n_steps))\n print(\"Number of cycles: {}\".format(n_cycles))\n\n steps = [n_steps for i in range(n_cycles)]\n\n start = time.time()\n main(continue_run, n_cycles, steps, n_walkers, n_workers=n_workers,\n debug_prints=True)\n end = time.time()\n\n print(\"time {}\".format(end-start))\n\n if sys.argv[1] == \"--help\" or sys.argv[1] == '-h':\n print(\"arguments: n_runs, n_cycles, n_steps, n_walkers, n_workers\")\n else:\n\n n_runs = int(sys.argv[1])\n n_cycles = int(sys.argv[2])\n n_steps = int(sys.argv[3])\n n_walkers = int(sys.argv[4])\n n_workers = int(sys.argv[5])\n\n print(\"Number of steps: {}\".format(n_steps))\n print(\"Number of cycles: {}\".format(n_cycles))\n\n steps = [n_steps for i in range(n_cycles)]\n\n start = time.time()\n main(n_runs, n_cycles, steps, n_walkers, n_workers, debug_prints=True)\n end = time.time()\n\n print(\"time {}\".format(end-start))\n","sub_path":"examples/seh_tppu_unbinding/we_restart.py","file_name":"we_restart.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"588925845","text":"import cv2 as cv\nimport os\n\nVIDEO_EXTENSIONS = ['.mp4', '.avi']\ndef isVideoFile(filename):\n return any(filename.endswith(extension) for extension in VIDEO_EXTENSIONS)\n\n\n\"\"\"\n :function:\n 视频每隔n秒截取一帧图像并保存\n :parameter\n videoPath: 视频路径\n imgSavePath: 图片保存路径和图片名\n interval: 每隔多少秒截取一帧\n :return\n 视频保存的总帧数\n\"\"\"\ndef videoToImage(videoPath, imgSavePath, interval = 1):\n cap = cv.VideoCapture(videoPath)\n\n fps = cap.get(5)\n fps = int(fps)\n frame_count = cap.get(7)\n\n print(\"fps : \"+str(fps))\n\n print(\"总贞数:\"+str(frame_count))\n\n ret = True\n imgIndex = 0\n\n while (ret):\n imgIndex += 1\n\n ret, frame = cap.read()\n\n if(imgIndex%(interval*fps)==0):\n cv.imwrite(imgSavePath+str(imgIndex)+\".png\",frame)\n print(imgSavePath+str(imgIndex)+\".png\")\n\n cap.release()\n return int(imgIndex/fps)\n pass\n\n\ndef cutVideosToImage(dataroot,img_save_path):\n if(os.path.isdir(dataroot)):\n\n for fileOrDir in os.listdir(dataroot):\n if os.path.isdir(dataroot+\"/\"+fileOrDir):\n cutVideosToImage(dataroot+\"/\"+fileOrDir,img_save_path+\"/\"+fileOrDir)\n\n\n elif isVideoFile(fileOrDir):\n # print(img_save_path+\"/\"+fileOrDir)\n if not os.path.exists(img_save_path):\n os.makedirs(img_save_path)\n videoToImage(dataroot+\"/\"+fileOrDir,img_save_path+\"/\"+fileOrDir.replace(\".mp4\",\"_\"))\n pass\n elif os.path.isfile(dataroot):\n if isVideoFile(dataroot):\n videoToImage(dataroot,img_save_path)\n return\n\n\nif __name__ == '__main__':\n\n command = os.popen(\"pwd\")\n dataroot = command.read()\n command.close()\n # dataroot = \"/home/letmesleep/data/test\"\n videos = cutVideosToImage(dataroot,dataroot+\"_img\")\n\n\n\n","sub_path":"src/ProjectUtil/VideoTOImage.py","file_name":"VideoTOImage.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"378187953","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nTRAIN_LOSS_PATH = '%s/run-train-tag-Loss_Epoch_%s.csv'\nTEST_LOSS_PATH = '%s/run-test-tag-Loss_Epoch_%s.csv'\nMETRIC_PATH = '%s/run-test-tag-Metric_%s.csv'\n\n\ndef plot_cleargrasp_loss(density):\n assert density in ['Dense', 'Sparse']\n\n cleargrasp_train = pd.read_csv(TRAIN_LOSS_PATH % ('cleargrasp', density))\n cleargrasp_test = pd.read_csv(TEST_LOSS_PATH % ('cleargrasp', density))\n norm_train = pd.read_csv(TRAIN_LOSS_PATH % ('cleargrasp-norm', density))\n norm_test = pd.read_csv(TEST_LOSS_PATH % ('cleargrasp-norm', density))\n\n plt.plot(cleargrasp_train['Step'], cleargrasp_train['Value'], 'b-', label='baseline-train')\n plt.plot(cleargrasp_test['Step'], cleargrasp_test['Value'], 'b--', label='baseline-test')\n plt.plot(norm_train['Step'], norm_train['Value'], 'r-', label='normalized-train')\n plt.plot(norm_test['Step'], norm_test['Value'], 'r--', label='normalized-test')\n\n plt.title('%s Point Cloud' % density)\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.yscale('log')\n plt.legend()\n plt.show()\n\n\ndef plot_cleargrasp_metric(metric):\n assert metric in ['F-Score', 'ChamferDistance']\n\n cleargrasp = pd.read_csv(METRIC_PATH % ('cleargrasp', metric))\n norm = pd.read_csv(METRIC_PATH % ('cleargrasp-norm', metric))\n\n plt.plot(cleargrasp['Step'], cleargrasp['Value'], '', label='baseline')\n plt.plot(norm['Step'], norm['Value'], 'r', label='normalized')\n\n plt.title(metric)\n plt.xlabel('epoch')\n if metric == 'ChamferDistance':\n plt.ylim(0, 3)\n plt.legend()\n plt.show()\n\n\ndef plot_frankascan_loss(density):\n assert density in ['Dense', 'Sparse']\n\n fs_norm_train = pd.read_csv(TRAIN_LOSS_PATH % ('frankascan-norm', density))\n fs_norm_test = pd.read_csv(TEST_LOSS_PATH % ('frankascan-norm', density))\n fs_recenter_train = pd.read_csv(TRAIN_LOSS_PATH % ('frankascan-recenter', density))\n fs_recenter_test = pd.read_csv(TEST_LOSS_PATH % ('frankascan-recenter', density))\n fs_gtpcd_train = pd.read_csv(TRAIN_LOSS_PATH % ('frankascan-gtpcd', density))\n fs_gtpcd_test = pd.read_csv(TEST_LOSS_PATH % ('frankascan-gtpcd', density))\n\n plt.plot(fs_norm_train['Step'], fs_norm_train['Value'], 'b-', label='normalized-train')\n plt.plot(fs_norm_test['Step'], fs_norm_test['Value'], 'b--', label='normalized-test')\n plt.plot(fs_recenter_train['Step'], fs_recenter_train['Value'], 'r-', label='recentered-train')\n plt.plot(fs_recenter_test['Step'], fs_recenter_test['Value'], 'r--', label='recentered-test')\n plt.plot(fs_gtpcd_train['Step'], fs_gtpcd_train['Value'], 'g-', label='complete GT-train')\n plt.plot(fs_gtpcd_test['Step'], fs_gtpcd_test['Value'], 'g--', label='complete GT-test')\n\n plt.title('%s Point Cloud' % density)\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.yscale('log')\n plt.legend()\n plt.show()\n\n\ndef plot_frankascan_metric(metric):\n assert metric in ['F-Score', 'ChamferDistance']\n\n norm = pd.read_csv(METRIC_PATH % ('frankascan-norm', metric))\n recenter = pd.read_csv(METRIC_PATH % ('frankascan-recenter', metric))\n gtpcd = pd.read_csv(METRIC_PATH % ('frankascan-gtpcd', metric))\n\n plt.plot(norm['Step'], norm['Value'], '', label='normalized')\n plt.plot(recenter['Step'], recenter['Value'], 'r', label='recentered')\n plt.plot(gtpcd['Step'], gtpcd['Value'], 'g', label='complete GT')\n\n plt.title(metric)\n plt.xlabel('epoch')\n if metric == 'ChamferDistance':\n plt.ylim(0, 25)\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n plot_cleargrasp_loss('Dense')\n plot_cleargrasp_loss('Sparse')\n plot_cleargrasp_metric('F-Score')\n plot_cleargrasp_metric('ChamferDistance')\n\n plot_frankascan_loss('Dense')\n plot_frankascan_loss('Sparse')\n plot_frankascan_metric('F-Score')\n plot_frankascan_metric('ChamferDistance')\n","sub_path":"output/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"215706957","text":"from collections import deque\nS = deque(input())\nQ = int(input())\nquery = [input().split() for _ in range(Q)]\nreverse = False\nfor q in query:\n if q[0] == '1':\n reverse = not reverse\n else:\n if q[1] == '1':\n S.append(q[2]) if reverse else S.appendleft(q[2])\n else:\n S.appendleft(q[2]) if reverse else S.append(q[2])\nif reverse:\n S.reverse()\nprint(''.join(S))\n","sub_path":"abc/158/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"309764421","text":"import numpy as np\n\nfrom collections import namedtuple\nfrom collections import defaultdict\nfrom scipy.spatial import distance\n\nMine = namedtuple(\"Mine\", [\"name\", \"x\", \"y\", \"blast_radius\"])\n\ndef get_dist(x1, y1, x2, y2):\n return distance.euclidean([x1, y1], [x2, y2])\n\n\ndef longest_chain(mines):\n # create a m * n array based on number of mines\n n = len(mines)\n M = np.zeros((n, n))\n\n \n\n for mine in mines:\n for otherMine in mines:\n if mine == otherMine:\n continue\n else:\n dist = get_dist(mine.x, mine.y, otherMine.x, otherMine.y)\n if dist <= mine.blast_radius:\n conn[mine.name].add(otherMine.name)\n\n # for k, v in conn.iteritems():\n\n return conn\n\n\n# Diagram: https://i.imgur.com/xEZZQKP.png\nmines = [\n Mine(\"A\", 7, 13, 3),\n Mine(\"B\", 6.5, 17, 5),\n Mine(\"C\", 12, 10, 4.5),\n Mine(\"D\", 14.5, 7, 3.5),\n Mine(\"E\", 17, 9, 2),\n Mine(\"F\", 7, 11, 2.5),\n Mine(\"G\", 8.5, 11.5, 3),\n]\n\nprint(longest_chain(mines))\n# print(longest_chain(mines) == ('C', 6))\n","sub_path":"exercises/mines2.py","file_name":"mines2.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"183253199","text":"import os,sys,os.path,numpy as np\nimport contratio as crat\nimport astropy.io.fits as pyfits\nimport scipy.ndimage as nd\nnameList = sys.argv[5:len(sys.argv)]\nif len(sys.argv)<4:\n print('Useage: crat_from_object.py raw_directory cube_directory plot_directory num_cals object_name (with spaces)')\n sys.exit()\n#Combine name into single string\nname = ''\nif len(nameList)>1:\n for ii in range(0,len(nameList)):\n name+=nameList[ii]\n if ii=0 or jj=0 and os.path.isfile(cubeDir+'/cube'+str(all_elements[ii-2])+'.fits') and ii not in lineNums:\n cal_els.append(all_elements[ii-2])\n ii-=1\n elif not os.path.isfile(cubeDir+'/cube'+str(all_elements[ii-2])+'.fits') or ii in lineNums:\n ii-=1\n if len(cal_els)==numCals:\n break\n if jj=len(all_elements) or not os.path.isfile(cubeDir+'/cube'+str(all_elements[jj-2])+'.fits') or jj in lineNums:\n jj+=1\n if len(cal_els)==numCals:\n break\n if len(cal_els)==numCals:\n break\n\ntgt_cubes = []\ncal_cubes = []\ntgt_ims = []\ncal_ims = []\npas = []\n#Create target cube list\nfor ii in range(0,len(elements)):\n tgt_cubes.append(cubeDir+'/cube'+str(elements[ii])+'.fits')\n cube = pyfits.getdata(tgt_cubes[ii])\n pa = pyfits.getdata(tgt_cubes[ii],1)['pa']\n for jj in range(0,len(cube)):\n tgt_ims.append(cube[jj])\n pas.append(pa[jj])\n#Create calibrator list\ncal_objects = []\n#cal_els = [299]\nfor ii in range(0,len(cal_els)):\n cal_cubes.append(cubeDir+'/cube'+str(cal_els[ii])+'.fits')\n cube = pyfits.getdata(cal_cubes[ii])\n cal = pyfits.getheader(cal_cubes[ii])['OBJECT']\n for jj in range(0,len(cube)):\n cal_ims.append(cube[jj])\n cal_objects.append(cal)\n \ntgt_ims = np.array(tgt_ims)\ncal_ims = np.array(cal_ims)\nall_ims = []\ntempFile = plotDir+'/temp.fits'\nheader = pyfits.getheader(tgt_cubes[0])\nradec = [header['RA'],header['DEC']]\nhdu1 = pyfits.PrimaryHDU(tgt_ims, header)\nhdu2 = pyfits.ImageHDU(cal_ims)\ncol1 = pyfits.Column(name='pa', format='E', array=pas)\ncol2 = pyfits.Column(name='cal_objects', format='A40', array=cal_objects)\nhdu3 = pyfits.BinTableHDU.from_columns(pyfits.ColDefs([col1,col2]))\nhdulist = pyfits.HDUList([hdu1,hdu2,hdu3])\nhdulist.writeto(tempFile, clobber=True)\n\ncrat_file = crat.best_psf_subtract(tempFile,plotDir)\nos.system('rm -rf '+tempFile)\n\ncrat_im = pyfits.getdata(crat_file)\npas = pyfits.getdata(crat_file,1)['pa']\n#Take average over frames\nresult = np.mean(crat_im,axis=0)\nnewPA = np.mean(pas)\n#Set up rotation angle so image is approximately aligned\n#Make sure north is never more than 60 degrees from vertical\nnewRot = newPA\nwhile newRot>180:\n newRot-=360\nwhile newRot<-180:\n newRot+=360\nnewRot = 90-newRot\nwhile newRot>60 or newRot<-60:\n if newRot<0:\n newRot+=90\n elif newRot>0:\n newRot-=90\n \nresult = nd.rotate(result,newRot,reshape=True)\nsize = result.shape[0]\nsz = 128\nresult = result[size//2-sz//2:size//2+sz//2,size//2-sz//2:size//2+sz//2]\nhdu = pyfits.PrimaryHDU(result)\ncosterm = np.cos(np.radians(newRot))*0.01/3600.\nsinterm = np.sin(np.radians(newRot))*0.01/3600.\nheader = pyfits.getheader(tgt_cubes[0])\nhdu.header = header\nhdu.header['CRVAL1']=radec[0]\nhdu.header['CRVAL2']=radec[1]\nhdu.header['CTYPE1']='RA---TAN'\nhdu.header['CTYPE2']='DEC--TAN'\nhdu.header['CRPIX1']=sz//2\nhdu.header['CRPIX2']=sz//2\nhdu.header['CD1_1']=-costerm\nhdu.header['CD2_2']=costerm\nhdu.header['CD1_2']=sinterm\nhdu.header['CD2_1']=sinterm\nhdu.header['OBJECT']=name\n#hdu.header['RADECSYS']='FK5'\nhdulist = pyfits.HDUList([hdu])\nhdulist.writeto(plotDir+'/ave_crat_'+objName+'.fits', clobber=True)\n#Find weighted mean\nweights = np.zeros(crat_im.shape[0])\nfor ii in range(0,len(weights)):\n\trms = np.sqrt(np.mean(crat_im[ii]**2))\n\tweights[ii] = 1./rms\n\nmean_im = np.zeros(crat_im[0].shape)\nfor ii in range(0,len(weights)):\n\tmean_im+=weights[ii]*crat_im[ii]\nmean_im/=sum(weights)\nmean_im = nd.rotate(mean_im,newRot,reshape=True)\nsize = mean_im.shape[0]\nmean_im = mean_im[size//2-sz//2:size//2+sz//2,size//2-sz//2:size//2+sz//2]\nhdu2 = pyfits.PrimaryHDU(mean_im)\nhdu2.header = hdu.header\nhdulist = pyfits.HDUList([hdu2])\nhdulist.writeto(plotDir+'/weighted_mean_'+objName+'.fits',clobber=True)","sub_path":"crat_from_object.py","file_name":"crat_from_object.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"17137401","text":"\"\"\"This module contains various decorators.\n\nThere are two kinds of decorators defined in this module which consists of either two or\nthree nested functions. The former are decorators without and the latter with arguments.\n\nFor more information on decorators, see this `guide`_ on https://realpython.com which\nprovides a comprehensive overview.\n\n.. _guide:\n https://realpython.com/primer-on-python-decorators/\n\n\"\"\"\nimport functools\nimport itertools\nimport traceback\nfrom datetime import datetime as dt\n\nimport numpy as np\nimport pandas as pd\n\nfrom estimagic.config import MAX_CRITERION_PENALTY\nfrom estimagic.logging.update_database import append_rows\nfrom estimagic.logging.update_database import update_scalar_field\nfrom estimagic.optimization.reparametrize import reparametrize_from_internal\n\n\ndef numpy_interface(params, constraints=None):\n \"\"\"Convert x to params.\n\n This decorator receives a NumPy array of parameters and converts it to a\n :class:`pandas.DataFrame` which can be handled by the user's criterion function.\n\n Args:\n params (pandas.DataFrame): See :ref:`params`.\n constraints (list of dict): Contains constraints.\n\n \"\"\"\n\n def decorator_numpy_interface(func):\n @functools.wraps(func)\n def wrapper_numpy_interface(x, *args, **kwargs):\n # Handle usage in :func:`internal_function` for gradients.\n if constraints is None:\n p = params.copy()\n p[\"value\"] = x\n\n # Handle usage in :func:`internal_criterion`.\n else:\n p = reparametrize_from_internal(\n internal=x,\n fixed_values=params[\"_internal_fixed_value\"].to_numpy(),\n pre_replacements=params[\"_pre_replacements\"].to_numpy().astype(int),\n processed_constraints=constraints,\n post_replacements=(\n params[\"_post_replacements\"].to_numpy().astype(int)\n ),\n processed_params=params,\n )\n\n criterion_value = func(p, *args, **kwargs)\n\n if isinstance(criterion_value, (pd.DataFrame, pd.Series)):\n criterion_value = criterion_value.to_numpy()\n\n return criterion_value\n\n return wrapper_numpy_interface\n\n return decorator_numpy_interface\n\n\ndef expand_criterion_output(criterion):\n \"\"\"Handle one- or two-element criterion returns.\n\n There are three cases:\n\n 1. The criterion function returns a scalar. Then, do not include any comparison plot\n data.\n 2. If the criterion functions returns an array as with maximum likelihood estimation\n or while using POUNDERs, use the array as data for the comparison plot.\n 3. If the criterion function returns a criterion value and the data for the\n comparison plot, the return is a tuple.\n\n \"\"\"\n\n @functools.wraps(criterion)\n def wrappper_expand_criterion_output(*args, **kwargs):\n out = criterion(*args, **kwargs)\n if np.isscalar(out):\n criterion_value = out\n comparison_plot_data = pd.DataFrame({\"value\": [np.nan]})\n elif isinstance(out, np.ndarray):\n criterion_value = out\n comparison_plot_data = pd.DataFrame({\"value\": criterion_value})\n elif isinstance(out, tuple):\n criterion_value, comparison_plot_data = out[0], out[1]\n else:\n raise NotImplementedError\n\n return criterion_value, comparison_plot_data\n\n return wrappper_expand_criterion_output\n\n\ndef negative_criterion(criterion):\n \"\"\"Turn maximization into minimization by switching the sign.\"\"\"\n\n @functools.wraps(criterion)\n def wrapper_negative_criterion(*args, **kwargs):\n criterion_value, comparison_plot_data = criterion(*args, **kwargs)\n\n return -criterion_value, comparison_plot_data\n\n return wrapper_negative_criterion\n\n\ndef negative_gradient(gradient):\n \"\"\"Switch the sign of the gradient.\"\"\"\n if gradient is None:\n wrapper_negative_gradient = None\n else:\n\n @functools.wraps(gradient)\n def wrapper_negative_gradient(*args, **kwargs):\n return -1 * gradient(*args, **kwargs)\n\n return wrapper_negative_gradient\n\n\ndef log_evaluation(func=None, *, database, tables):\n \"\"\"Log parameters and fitness values.\n\n This decorator can be used with and without parentheses and accepts only keyword\n arguments.\n\n \"\"\"\n\n def decorator_log_evaluation(func):\n @functools.wraps(func)\n def wrapper_log_evaluation(params, *args, **kwargs):\n criterion_value, comparison_plot_data = func(params, *args, **kwargs)\n\n if database:\n adj_params = params.copy().set_index(\"name\")[\"value\"]\n cp_data = {\"value\": comparison_plot_data[\"value\"].to_numpy()}\n crit_val = {\"value\": criterion_value}\n timestamp = {\"value\": dt.now()}\n\n append_rows(\n database=database,\n tables=tables,\n rows=[adj_params, crit_val, cp_data, timestamp],\n )\n\n return criterion_value\n\n return wrapper_log_evaluation\n\n if callable(func):\n return decorator_log_evaluation(func)\n else:\n return decorator_log_evaluation\n\n\ndef aggregate_criterion_output(aggregation_func):\n \"\"\"Aggregate the return of of criterion functions with non-scalar output.\n\n This helper allows to conveniently alter the criterion function of the user for\n different purposes. For example, the criterion function for maximum likelihood\n estimation passed to :func:`~estimagic.estimation.estimate.maximize_log_likelihood`\n returns the log likelihood contributions. For the maximization, we need the mean log\n likelihood and the sum for the standard error calculations.\n\n \"\"\"\n\n def decorator_aggregate_criterion_output(func):\n @functools.wraps(func)\n def wrapper_aggregate_criterion_output(params, *args, **kwargs):\n criterion_values, comparison_plot_data = func(params, *args, **kwargs)\n\n criterion_value = aggregation_func(criterion_values)\n\n return criterion_value, comparison_plot_data\n\n return wrapper_aggregate_criterion_output\n\n return decorator_aggregate_criterion_output\n\n\ndef log_gradient(database, names):\n \"\"\"Log the gradient.\n\n The gradient is a vector containing the partial derivatives of the criterion\n function at each of the internal parameters.\n\n \"\"\"\n\n def decorator_log_gradient(func):\n @functools.wraps(func)\n def wrapper_log_gradient(*args, **kwargs):\n gradient = func(*args, **kwargs)\n\n if database:\n data = [dict(zip(names, gradient))]\n append_rows(database, [\"gradient_history\"], data)\n\n return gradient\n\n return wrapper_log_gradient\n\n return decorator_log_gradient\n\n\ndef log_gradient_status(func=None, *, database, n_gradient_evaluations):\n \"\"\"Log the gradient status.\n\n The gradient status is between 0 and 1 and shows the current share of finished\n function evaluations to compute the gradients.\n\n This decorator can be used with and without parentheses and accepts only keyword\n arguments.\n\n \"\"\"\n counter = itertools.count(1)\n\n def decorator_log_gradient_status(func):\n @functools.wraps(func)\n def wrapper_log_gradient_status(params, *args, **kwargs):\n criterion_value, _ = func(params, *args, **kwargs)\n\n if database:\n c = next(counter)\n if n_gradient_evaluations is None:\n status = c\n else:\n status = (c % n_gradient_evaluations) / n_gradient_evaluations\n status = 1 if status == 0 else status\n update_scalar_field(database, \"gradient_status\", status)\n\n return criterion_value\n\n return wrapper_log_gradient_status\n\n if callable(func):\n return decorator_log_gradient_status(func)\n else:\n return decorator_log_gradient_status\n\n\ndef handle_exceptions(database, params, constraints, start_params, general_options):\n \"\"\"Handle exceptions in the criterion function.\n\n This decorator catches any exceptions raised inside the criterion function. If the\n exception is a :class:`KeyboardInterrupt` or a :class:`SystemExit`, the user wants\n to stop the optimization and the exception is raised\n\n For other exceptions, it is assumed that the optimizer proposed parameters which\n could not be handled by the criterion function. For example, the parameters formed\n an invalid covariance matrix which lead to an :class:`numpy.linalg.LinAlgError` in\n the matrix decompositions. Then, we calculate a penalty as a function of the\n criterion value at the initial parameters and some distance between the initial and\n the current parameters.\n\n \"\"\"\n\n def decorator_handle_exceptions(func):\n @functools.wraps(func)\n def wrapper_handle_exceptions(x, *args, **kwargs):\n try:\n out = func(x, *args, **kwargs)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception as e:\n # Adjust the criterion value at the start.\n start_criterion_value = general_options[\"start_criterion_value\"]\n constant, slope = general_options.get(\n \"criterion_exception_penalty\", (None, None)\n )\n constant = 2 * start_criterion_value if constant is None else constant\n slope = 0.1 * start_criterion_value if slope is None else slope\n raise_exc = general_options.get(\"criterion_exception_raise\", False)\n\n if raise_exc:\n raise e\n else:\n if database:\n exception_info = traceback.format_exc()\n p = reparametrize_from_internal(\n internal=x,\n fixed_values=params[\"_internal_fixed_value\"].to_numpy(),\n pre_replacements=params[\"_pre_replacements\"]\n .to_numpy()\n .astype(int),\n processed_constraints=constraints,\n post_replacements=(\n params[\"_post_replacements\"].to_numpy().astype(int)\n ),\n processed_params=params,\n )\n msg = (\n exception_info\n + \"\\n\\n\"\n + \"The parameters are\\n\\n\"\n + p[\"value\"].to_csv(sep=\"\\t\", header=True)\n )\n append_rows(database, \"exceptions\", {\"value\": msg})\n\n out = min(\n MAX_CRITERION_PENALTY,\n constant + slope * np.linalg.norm(x - start_params),\n )\n\n return out\n\n return wrapper_handle_exceptions\n\n return decorator_handle_exceptions\n\n\ndef nan_if_exception(func):\n \"\"\"Wrap func such that np.nan is returned if func raises an exception.\n\n KeyboardInterrupt and SystemExit are still raised.\n\n Examples:\n\n >>> @nan_if_exception\n ... def f(x, y):\n ... assert x + y >= 5\n >>> f(1, 2)\n nan\n\n >>> def f(x, y):\n ... assert x + y >= 5\n >>> g = nan_if_exception(f)\n >>> g(1, 2)\n nan\n\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_nan_if_exception(params, *args, **kwargs):\n try:\n out = func(params, *args, **kwargs)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n out = np.nan\n return out\n\n return wrapper_nan_if_exception\n\n\ndef de_scalarize(x_was_scalar):\n \"\"\"Create a function with non-scalar input and output.\n\n Examples:\n\n >>> @de_scalarize(True)\n ... def f(x):\n ... return x\n\n >>> f(3)\n Traceback (most recent call last):\n ...\n TypeError: 'int' object is not subscriptable\n\n >>> f(np.array([3]))\n array([3])\n\n >>> @de_scalarize(True)\n ... def g(x):\n ... return 3\n\n >>> g(np.ones(3))\n array([3])\n\n \"\"\"\n\n def decorator_de_scalarize(func):\n @functools.wraps(func)\n def wrapper_de_scalarize(x, *args, **kwargs):\n x = x[0] if x_was_scalar else x\n return np.atleast_1d(func(x, *args, **kwargs))\n\n return wrapper_de_scalarize\n\n return decorator_de_scalarize\n","sub_path":"estimagic/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":12717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"626400247","text":"#!/usr/bin/env python3\nimport sys\nimport time\nimport argparse\nfrom pwn import *\ncontext.update(arch='i386', os='linux')\n\ndef wait_for_prompt(r):\n print(r.recvuntil(b\"MUHAHAHAH: \"))\n\n#--------------------------------------------------------------------------\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Exploit the bins.')\n parser.add_argument('--dbg' , '-d', action=\"store_true\")\n args = parser.parse_args()\n exe = './bof3'\n\n if args.dbg:\n r = gdb.debug([exe], gdbscript=\"\"\"\n b *func\n continue\n \"\"\")\n else:\n r = process(exe)\n\n wait_for_prompt(r)\n payload = cyclic(1050)\n r.sendline(payload) \n\n # Drop to interactive console\n r.interactive()\n\n","sub_path":"bof3/step1.py","file_name":"step1.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"47093662","text":"\"\"\"Here functions are collected which prettify the plots and the way of coding.\"\"\"\n\ndef isValidFile( fileName ):\n\t\"\"\"This function can be used for argparse to check if the file exists.\"\"\"\n\timport argparse\n\timport os\n\tif not os.path.isfile( fileName ):\n\t\traise argparse.ArgumentTypeError( \"File %s does not exist\"%fileName )\n\telse:\n\t\treturn fileName\n\ndef getDatasetAbbr( fileName, slim=True ):\n\t\"\"\"Parses the abbrevation for a sample from a root fileName.\"\"\"\n\timport re\n\tprefix = \"slim\" if slim else \"\"\n\tmatch = re.match(\"%s(.*)_V.*_tree.root\"%prefix, fileName.split(\"/\")[-1] )\n\tif match:\n\t\treturn match.groups()[0]\n\telse:\n\t\treturn fileName\n\ndef datasetToLatex( datasetAbbr ):\n\t\"\"\"Translates the dataset name to a TLatex name\"\"\"\n\tsets = { \"AllQCD\": \"(#gamma+)QCD\",\n\t\t\t\"GJets\": \"#gamma+QCD\",\n\t\t\t\"TTbar\": \"t#bar{t}\",\n\t\t\t\"WJet\": \"W\"\n\t\t\t}\n\tfor part, label in sets.iteritems():\n\t\tif part in datasetAbbr:\n\t\t\treturn label\n\treturn datasetAbbr\n\ndef createDatasetLabel( datasetAbbr ):\n\timport ROOT\n\t\"\"\"Creates sample info which then can be printed.\"\"\"\n\tdatasetLabel = ROOT.TPaveText(.4,.94,.6,1, \"ndc\")\n\tdatasetLabel.SetFillColor(0)\n\tdatasetLabel.SetBorderSize(0)\n\tdatasetLabel.AddText( datasetToLatex(datasetAbbr) )\n\treturn datasetLabel\n\ndef randomName():\n\t\"\"\"\n\tGenerate a random string. This function is useful to give ROOT objects\n\tdifferent names to avoid overwriting.\n\t\"\"\"\n\tfrom random import randint\n\tfrom sys import maxint\n\treturn \"%x\"%(randint(0, maxint))\n\ndef roundToSignificantDigits(x, sig=2):\n\t\"\"\"Round number to 'sig' significant digits. If the number is large enough,\n\tjust print the integer.\n\t\"\"\"\n\tfrom math import log10, floor\n\tif x >= 10**(sig-1):\n\t\treturn int(round(x))\n\tif x>0:\n\t\treturn round(x, sig-int(floor(log10(x)))-1)\n\telif x<0:\n\t\treturn round(-x, sig-int(floor(log10(-x)))-1)\n\treturn x\n\ndef myLegend( x1, y1, x2=0,y2=0 ):\n\timport ROOT\n\tif x2==0 or y2==0:\n\t\tstyle = ROOT.gROOT.GetStyle(\"tdrStyle\")\n\t\tx2 = 1 - style.GetPadRightMargin()\n\t\ty2 = 1 - style.GetPadTopMargin()\n\tleg = ROOT.TLegend(x1,y1,x2,y2)\n\tleg.SetFillColor(0)\n\tleg.SetBorderSize(0)\n\treturn leg\n\ndef readAxisConf( plot, configurationFileName=\"axis.cfg\" ):\n\t\"\"\"Read the configuration file for the axis.\n\treturns the label, the unit and the binning as list if avaible\n\t\"\"\"\n\timport ConfigParser\n\tconfiguration = ConfigParser.SafeConfigParser()\n\tconfiguration.read( configurationFileName )\n\t#brackets are identified as sections, so they have to be deleted\n\tplot = plot.replace(\"[\",\"\").replace(\"]\",\"\")\n\tif not configuration.has_section( plot ):\n\t\treturn \"\",\"\",\"\"\n\tlabel = configuration.get( plot, \"label\" )\n\tunit = configuration.get( plot, \"unit\" )\n\tbinning = configuration.get( plot, \"binning\" )\n\tif binning:\n\t\tbinning = map(float, binning.split(\" \"))\n\telse:\n\t\tbinning = []\n\treturn label, unit, binning\n\ndef getLumiWeight( datasetAbbr, nGenEvents, integratedLumi=19800, configName=\"dataset.cfg\" ):\n\timport ConfigParser\n\tdatasetConf = ConfigParser.SafeConfigParser()\n\tdatasetConf.read( configName )\n\tif datasetConf.has_option( datasetAbbr, \"crosssection\" ):\n\t\tcrosssection = datasetConf.getfloat( datasetAbbr, \"crosssection\" )\n\telse:\n\t\traise NameError( \"Configuration for %s not found\"%datasetAbbr )\n\n\treturn 1. * integratedLumi * crosssection / nGenEvents\n\ndef manipulateSaveName( saveName ):\n\t\"\"\"Replace some charakters, so root nor unix have problems to read them.\"\"\"\n\t#saveName = saveName.replace(\"/\",\"VS\")\n\tsaveName = saveName.replace(\" \",\"_\")\n\tunallowedCharacters = [\"{\",\"}\",\"(\",\")\",\"#\",\"|\",\".\",\"[\",\"]\",\"/\",\"$\"]\n\tfor char in unallowedCharacters:\n\t\tsaveName = saveName.replace( char, \"\" )\n\treturn saveName\n\ndef SaveAs( can, name, folder=\"plots\", endings=[\"pdf\"] ):\n\t\"\"\"Save ROOT.TCanvas in specified folder with a cleaned plot name.\"\"\"\n\tfor ending in endings:\n\t\tcan.SaveAs( folder+\"/\"+manipulateSaveName( name )+\".\"+ending )\n\nclass PlotCaption:\n\t\"\"\"Creates the superscription for each plot, eg\n\t'19fb^{-1} sqrt{s)=8TeV #geq1#gamma #geq2jets'\n\t\"\"\"\n\tdef __init__( self, x0=.96, y0=.96, analysisInfo=True, option=\"ndc\" ):\n\t\timport ROOT\n\t\tself.x0 = x0\n\t\tself.text = ROOT.TLatex( x0, y0, \"\" )\n\t\tself.text.SetTextSize(0.03)\n\t\tself.text.SetNDC()\n\t\tif analysisInfo:\n\t\t\tself.addAnalysisInfo()\n\n\tdef addAnalysisInfo( self, lumi=19800, e=8, defaultcuts=\"#geq1#gamma,#geq2jets\" ):\n\t\tself.text.SetText( self.text.GetX(), self.text.GetY(), \"%.1ffb^{-1} #sqrt{s}=%sTeV %s\"%( lumi/1000., e, defaultcuts ) )\n\n\tdef appendEnd( self, string ):\n\t\tnewText = self.text.GetTitle() + string\n\t\tself.text.Clear()\n\t\tself.text.SetText( self.text.GetX(), self.text.GetY(), newText )\n\n\tdef appendFront( self, string ):\n\t\tnewText = string + self.text.GetTitle()\n\t\tself.text.Clear()\n\t\tself.text.SetText( self.text.GetX(), self.text.GetY(), newText )\n\n\tdef controlCut( self ):\n\t\tself.appendEnd(\",#slash{E}_{T}<100GeV\")\n\n\tdef signalCut( self ):\n\t\tself.appendEnd(\",#slash{E}_{T}#geq100GeV\")\n\n\tdef Draw( self ):\n\t\timport ROOT\n\t\tshiftNDC = self.text.GetXsize() / ( ROOT.gPad.GetX2() - ROOT.gPad.GetX1() )\n\t\tself.text.SetX( self.x0-shiftNDC )\n\t\tself.text.Draw()\n\n\n","sub_path":"plotTree/prettifyFunctions.py","file_name":"prettifyFunctions.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"214627293","text":"# HELPER FUNCTIONS FOR ATTENTION AND MEMORY EXPERIMENT\n\n# Imports\nimport pandas as pd\nfrom psychopy import visual, event, core, data, gui, logging\nimport random\nimport os\nimport time\nimport csv\nimport pickle\n\n# Tiny helpers\n\ndef group_it(data, num):\n '''\n input: list of data items of any types\n output: ordered list containing length num sublists of inputted data\n '''\n return([data[i:i+num] for i in range(0, len(data), num)])\n\ndef flatten(the_list):\n '''\n input: list of lists\n output: single list containing all data, ordered, from nested lists\n '''\n return([val for sublist in the_list for val in sublist])\n\n# Data entry & organization functions\n\ndef subject_info(header):\n '''\n input: text to show at top of pop-up (string)\n path to data directory (string)\n Creates pop up box to obtain subject# and run#\n '''\n info = {}\n info['participant'] = ''\n info['run'] = ''\n dlg = gui.DlgFromDict(dictionary=info, title=header)\n if dlg.OK:\n return(info)\n else:\n print(\"Error!\")\n\ndef subject_directory(info, data_path, path_only=False):\n '''\n input: info - subject information (dictionary)\n data_path - path to data directory (string)\n output: path to subject-specific directory (string)\n If NOT path_only --> Creates subject directory if does not exist\n '''\n\n dir_name = data_path + str(info['participant']) + '_' + data.getDateStr()[0:11] + '/'\n\n if not os.path.exists(dir_name) and not path_only:\n os.makedirs(dir_name)\n with open(dir_name + 'buttons_full.csv','wb') as output:\n wr = csv.writer(output, dialect='excel')\n wr.writerows([['Button', 'Time']])\n else:\n if not path_only:\n print('WARNING: subject directory exists already!')\n return(dir_name)\n\ndef pre_questionnaire(info, save=True, save_path='.'):\n '''\n Create pop up box to obtain and save subject's demographic info\n input: info - dictionary containing participant# and run#\n save - boolean indicating whether to autosave\n save_path - if save==True, path to data save location\n output: if save==True, save out data, return nothing\n if save==False, return questionnaire data\n '''\n\n preDlg = gui.Dlg()\n\n preDlg.addField('1. age')\n preDlg.addField('2. sex:', choices=['--', \"Male\", \"Female\", \"Other\", \"No Response\"])\n preDlg.addField('3. Are you hispanic or latino?', choices=['--', \"Yes\", \"No\"])\n preDlg.addText('')\n preDlg.addText('4. Race (check all that apply):')\n preDlg.addField('White', False)\n preDlg.addField('Black or African American', False)\n preDlg.addField('Native Hawaiian or other Pacific Islander', False)\n preDlg.addField('Asian', False)\n preDlg.addField('American Indian or Alaskan Native', False)\n preDlg.addField('Other', False)\n preDlg.addField('No Response', False)\n preDlg.addText('')\n preDlg.addField('5. Highest Degree Achieved:', choices = ['--', 'some high school', 'high school graduate', 'some college', \\\n 'college graduate', 'some graduate training', \"Master's\", 'Doctorate'])\n preDlg.addText('')\n preDlg.addText('6. Do you have any reading impairments')\n preDlg.addField('(e.g. dyslexia, uncorrected far-sightedness, etc.)', choices = ['--', \"Yes\", \"No\"])\n preDlg.addText('')\n preDlg.addField('7. Do you have normal color vision?', choices = ['--', \"Yes\", \"No\"])\n preDlg.addText('')\n preDlg.addText('8. Are you taking any medications or have you had')\n preDlg.addText('any recent injuries that could')\n preDlg.addField('affect your memory or attention?', choices = ['--', \"Yes\", \"No\"])\n preDlg.addText('')\n preDlg.addField('9. If yes to question above, describe')\n preDlg.addText('')\n preDlg.addText('10. How many hours of sleep did')\n preDlg.addField('you get last night? (enter only a number)')\n preDlg.addText('')\n preDlg.addText('11. How many cups of coffee')\n preDlg.addField('have you had today? (enter only a number)')\n preDlg.addText('')\n preDlg.addField('12. How alert are you feeling?:', choices=['--', \"Very sluggish\", \"A little slugglish\", \"Neutral\", \"A little alert\", \"Very alert\"])\n\n end_data = preDlg.show()\n\n if save == True:\n name = save_path + 'pre_questionnaire_' + info['participant'] + '.pkl'\n with open(name, 'wb') as f:\n pickle.dump(end_data, f)\n else:\n return(end_data)\n\ndef post_questionnaire(info, save=True, save_path='.'):\n '''\n Create pop up box to obtain and save subject's demographic info\n input: info - dictionary containing participant# and run#\n save - boolean indicating whether to autosave\n save_path - if save==True, path to data save location\n output: if save==True, save out data, return nothing\n if save==False, return questionnaire data\n '''\n\n # end of task questionnaire\n postDlg = gui.Dlg(title=\"Post Questionnaire\")\n postDlg.addField('1. How engaging did you find this experiment?', choices=['--', \"Very engaging\", \"A little engaging\", \"Neutral\", \"A little boring\", \"Very boring\"])\n postDlg.addField('2. How tired do you feel?', choices=['--', \"Very tired\", \"A little tired\", \"Neutral\", \"A little alert\", \"Very alert\"])\n postDlg.addField('3. Did you find one category easier to remember? If so, which one and why?')\n postDlg.addField('4. Did you find one side easier to attend to? If so, which one?')\n postDlg.addField('5. What strategies did you use (if any) to help remember the attended images?')\n\n end_data = postDlg.show()\n\n if save == True:\n name = save_path + 'post_questionnaire_' + info['participant'] + '.pkl'\n with open(name, 'wb') as f:\n pickle.dump(end_data, f)\n else:\n return(end_data)\n\ndef buttons_full(paths, keys, absolute_time):\n '''\n Appends key press and time stamp to subject's key-press log (buttons_full.csv)\n input: paths - paths to relevant directories (dictionary)\n keys - button presses to be saved to file (list of strings or nested lists of strings)\n absolute_time - timestamp to be written to file with key press\n '''\n with open(paths['subject'] + 'buttons_full.csv','a') as output:\n wr = csv.writer(output, dialect='excel')\n wr.writerows([[keys, absolute_time]])\n\n\n# Functions for Creating Trial Parameters & Visual Stimuli\n\ndef cue_create(params):\n '''\n input: params - experiment parameters (stimulus display times, etc.) (dictionary)\n output: three lists (length total-trials-in-experiment) assigning cued side,\n cued category, and cue validity for each trial\n '''\n\n presentations_per_run = params['presentations_per_run']\n runs = params['runs']\n\n # create tuples, one per trial, chunked by block, that assign: cued side, cued category\n cued_side = ['<']*int(presentations_per_run*runs/2)+['>']*int(presentations_per_run*runs/2)\n cued_category = flatten([['Face']*int(presentations_per_run*runs/4)+['Place']*int(presentations_per_run*runs/4)]*2)\n\n # validity (attention RT)\n raw_invalid = int(params['invalid_cue_percentage']*presentations_per_run*runs/100)\n num = (presentations_per_run*runs)-raw_invalid\n validity = [0]*raw_invalid+[1]*num\n validity = random.sample(validity, len(validity))\n\n # chunk trials by block and randomize\n cue_tuples_0 = list(zip(cued_side, cued_category))\n chunk_tuples = [cue_tuples_0[i:i+presentations_per_run] for i in range(0, len(cue_tuples_0), presentations_per_run)]\n\n # while any blocks repeat cues back-to-back, reshuffle\n cue_tuples = random.sample(chunk_tuples, len(chunk_tuples))\n reshuffle = True\n\n while reshuffle==True:\n for idx,x in enumerate(cue_tuples[1:-1]):\n if x[0]==cue_tuples[idx+1][0] or x[0]==cue_tuples[idx-1][0]:\n cue_tuples = random.sample(chunk_tuples, len(chunk_tuples))\n pass\n elif idx==len(cue_tuples[1:-1])-1 and not (x[0]==cue_tuples[idx+1][0] or x[0]==cue_tuples[idx-1][0]):\n reshuffle=False\n\n cue_tuples = flatten(cue_tuples)\n final = [[x[0] for x in cue_tuples],[x[1] for x in cue_tuples],validity]\n\n # return list for each\n return(final)\n\ndef trial_setup(params):\n '''\n input: params - experiment parameters (stimulus display times, etc.) (dictionary)\n output: lists to assign subject number, run number, and trial type to every row\n of trial x parameter dataframe for single subject\n '''\n run = []\n trial_type = []\n\n for x in range(params['runs']):\n trial_type.extend(['Presentation']*params['presentations_per_run'])\n trial_type.extend(['Memory']*params['presentations_per_run']*params['mem_to_pres'])\n run.extend([x]*params['presentations_per_run']*(params['mem_to_pres']+1))\n\n return([run, trial_type])\n\ndef presentation_images(presentation):\n '''\n input: list of composite images for display in presentation runs\n output: dict with keys 'Cued' and 'Uncued', each containing three lists\n (composite images, single place images, and single face images)\n '''\n images = {}\n cued = presentation[0:int(len(presentation)/2)]\n uncued = presentation[int(len(presentation)/2):]\n\n for x,y in zip(['Cued','Uncued'],[cued, uncued]):\n images[x] = {'composite':y, 'place':img_split(y, cat=True)['place_im'], 'face':img_split(y, cat=True)['face_im']}\n\n return(images)\n\ndef img_split(image_list, cat = False):\n '''\n Splits overlay image filenames into filenames of the original, single images\n input : list of composite image filenames\n output : if cat==False, list of single image filenames\n if cat==True, two lists of single image filenames (listed by category)\n '''\n\n split = [words for segments in image_list for words in segments.split('_')]\n a = [word+'.jpg' for word in split if word[-3:]!='jpg']\n b = [word for word in split if word[-3:]=='jpg']\n glom = a+b\n\n if cat == False:\n return(glom)\n\n else:\n return({'face_im':a, 'place_im':b})\n\ndef memory_image(presentation, memory):\n '''\n inputs: list of all presentation images\n list of all novel memory images\n outputs: list of all images for memory trials\n (half novel, and even proportions of prev seen cued/uncued, face/house)\n '''\n\n # parse cued/uncued presentation composites\n cued = presentation[0:int(len(presentation)/2)]\n uncued = presentation[int(len(presentation)/2):]\n # parse novel single images\n memory_face = img_split(memory, cat=True)['face_im']\n memory_place = img_split(memory, cat=True)['place_im']\n\n # group by trials\n cued = group_it(cued, 10)\n uncued = group_it(uncued, 10)\n memory_face = group_it(memory_face, 10)\n memory_place = group_it(memory_place, 10)\n\n # append the split singles from all selected images (1/2 prev seen, and all chosen for memory)\n all_singles = []\n for x in range(len(cued)):\n singles = []\n singles.extend(img_split(random.sample(cued[x],int(len(cued[x])/2))))\n singles.extend(img_split(random.sample(uncued[x],int(len(uncued[x])/2))))\n singles.extend(memory_face[x])\n singles.extend(memory_place[x])\n singles = random.sample(singles, len(singles))\n all_singles.extend(singles)\n return(all_singles)\n\ndef initialize_df(info, categories, paths, params):\n '''\n Creates dataframe for all trials (presentation and memory) for a single subject,\n including all trial-wise parameters (with empty cells as placeholders (None type)\n for impending data collection) and saves a copy to csv in subject's data directory\n input: info- subject information (dictionary)\n categories- image categories information (dictionary)\n paths- paths to subject-relevant directories (dictionary)\n params- experiment parameters (dictionary)\n output: dataframe containing parameters, image stim, and info, for all trials\n '''\n\n total_pres = params['presentations_per_run']*params['runs']\n\n # create column names\n columns = ['Subject', 'Trial Type', 'Run', 'Cued Composite', 'Uncued Composite', 'Cued Face',\n 'Cued Place', 'Uncued Face', 'Uncued Place', 'Memory Image', 'Category', 'Cued Side',\n 'Cued Category', 'Attention Reaction Time (s)', 'Familiarity Reaction Time (s)',\n 'Familiarity Rating', 'Attention Level', 'Cue Validity', 'Post Invalid Cue', 'Pre Invalid Cue',\n 'Attention Button', 'Rating History', 'Stimulus Onset', 'Stimulus End', 'Attention Probe']\n\n df = pd.DataFrame(index = range(total_pres*5), columns=columns)\n\n # add subject#, run#, trial types, cues\n df['Subject'] = info['participant']\n df['Run'],df['Trial Type'] = trial_setup(params)\n mask = df['Trial Type']=='Presentation'\n df.loc[mask,'Cued Side'],df.loc[mask,'Cued Category'],df.loc[mask,'Cue Validity'] = cue_create(params)\n df.loc[mask, 'Attention Probe'] = random.sample(['o']*(len(df.loc[mask].index)/2) + ['x']*(len(df.loc[mask].index)/2), len(df.loc[mask].index))\n\n # Select composite images\n composites = random.sample(os.listdir(paths['stim_path']+'composite/'), total_pres*(params['mem_to_pres']-1))\n presentation = composites[0:int(len(composites)*2/3)]\n memory = composites[int(len(composites)*2/3):]\n\n # add presentation images\n pres_dict = presentation_images(presentation)\n\n for cue in ['Cued','Uncued']:\n df.loc[mask, cue+' Composite']=pres_dict[cue]['composite']\n df.loc[mask, cue+' Face']=pres_dict[cue]['face']\n df.loc[mask, cue+' Place']=pres_dict[cue]['place']\n\n # add memory images\n mask2 = df['Trial Type']=='Memory'\n df.loc[mask2, 'Memory Image']= memory_image(presentation, memory)\n\n # save dataframe\n df.to_csv(paths['subject']+'intial_df.csv')\n return(df)\n\ndef cue_stim(win, side, category, stim_dir):\n '''\n inputs: win (psychopy visual window), cue side (string),\n cue category (string), stimulus directory (string)\n outputs: appropriate cue or fixation stimulus for center screen\n '''\n\n stim1 = visual.ImageStim(win, image=stim_dir+'cue/'+category+'.png', size=2)\n\n stim2 = visual.TextStim(win=win, ori=0, name='cue_side', text = side, font='Arial',\n height=2, color='lightGrey', colorSpace='rgb', opacity=1, depth=0.0)\n\n return([stim1,stim2])\n\ndef fix_stim(win):\n \"\"\"\n input: psychopy visual window\n output: central fixation stimulus for display in window\n \"\"\"\n stim1 = visual.TextStim(win=win, ori=0, name='fixation_cross', text='+', font='Arial',\n height = 2, color='lightGrey', colorSpace='rgb', opacity=1, depth=0.0)\n return(stim1)\n\ndef cued_pos(side, validity=True):\n \"\"\"\n input: cued side for a given trial (string), desired stimulus validity (bool)\n output: x-axis screen location for the stimulus (int)\n \"\"\"\n\n if side == '>' and validity==True:\n pos = 8\n if side == '>' and validity==False:\n pos = -8\n if side == '<' and validity==True:\n pos = -8\n else:\n pos = 8\n\n return(pos)\n\ndef composite_pair(win, cued, uncued, side, stim_dir, practice=False):\n \"\"\"\n input: win (psychopy visual window), cue side (string),\n cue category (string), stimulus directory (string)\n output: list of two composite image stimuli (with stim location, size, etc)\n for display in presentation trial\n \"\"\"\n cued_position = cued_pos(side)\n\n if practice:\n dir = 'practice_composite/'\n else:\n dir = 'composite/'\n\n cued = stim_dir+dir+cued\n uncued = stim_dir+dir+uncued\n\n probe1 = visual.ImageStim(win, cued, size=7, name='Probe1')\n probe1.setPos( [cued_position, 0] )\n\n probe2 = visual.ImageStim(win, uncued, size=7, name='Probe2')\n probe2.setPos( [-cued_position, 0] )\n\n return(probe1, probe2)\n\ndef probe_stim(win, cued_side, validity, text):\n \"\"\"\n input: trial-wise cued side and validity\n output: attention check stimulus for display (with location, size, etc)\n \"\"\"\n probe = visual.TextStim(win=win, ori=0, name='posner', text=text, font='Arial', height = 2, color='lightGrey',\n colorSpace='rgb', opacity=1, depth=0.0)\n\n cued_position = cued_pos(cued_side, validity=validity)\n probe.setPos([cued_position, 0])\n return(probe)\n\ndef display(win, stim_list, frames, accepted_keys=None, trial=0, df=None, path=None):\n \"\"\"\n Displays all stimuli (from stim_list) in window simultaneously, for desired number of frames.\n If accepted_keys list passed, displays until key press; else, displays for 'frames' number of frames\n if both dataframe and trial# passed, saves reaction time to corresponding trial row in df\n inputs:\n win - visual window\n stim_list - list of psychopy visual Stimuli\n frames - int\n accepted_keys - list of strings, or None\n trial - int\n df - pandas dataframe of trial information\n \"\"\"\n\n rt = None\n resp = None\n resp_clock = core.Clock()\n\n for x in stim_list:\n x.setAutoDraw(True)\n win.flip()\n\n for frame_n in range(frames):\n absolute_time = time.time()\n\n # if not visual rating scale\n if not any(type(x) is visual.RatingScale for x in stim_list):\n keys = event.getKeys(timeStamped=True)\n else:\n keys=[]\n\n # if displaying images\n if df is not None:\n if keys != []:\n buttons_full(path, keys, absolute_time)\n if frame_n == 0:\n df.loc[trial, 'Stimulus Onset'] = absolute_time\n if frame_n == range(frames)[-1]:\n df.loc[trial, 'Stimulus End'] = absolute_time\n\n # attention probe\n elif type(accepted_keys)==list:\n\n if frame_n == 0:\n resp_clock.reset()\n\n if keys != []:\n if any(x[0] in accepted_keys for x in keys):\n if not any(type(x) is visual.RatingScale for x in stim_list):\n resp = keys[0][0]\n rt = resp_clock.getTime()\n break\n else:\n buttons_full(path, keys, rt)\n else:\n buttons_full(path, keys, absolute_time)\n\n if resp == None and frame_n == range(frames)[-1] and type(x) is not visual.RatingScale:\n key_wait = event.waitKeys(keyList = accepted_keys)\n resp = key_wait[0]\n rt = resp_clock.getTime()\n\n # fixation\n else:\n if keys != []:\n buttons_full(path, keys, absolute_time)\n\n win.flip()\n\n for x in stim_list:\n x.setAutoDraw(False)\n\n if type(x) is visual.RatingScale:\n choice_history = x.getHistory()\n df[\"Familiarity Rating\"].loc[trial],df['Familiarity Reaction Time (s)'].loc[trial] = rating_pull(choice_history)\n\n win.flip()\n\n return([rt, resp])\n\n\ndef pause(win, frames):\n \"\"\"\n Pauses experiment in given window (win) for 'frames' (int) number of frames\n input: win- psychopy visual window\n frames- number of frames (int)\n \"\"\"\n for frame_n in range(frames):\n win.flip()\n\ndef memory_stim(win, image, stim_dir, practice=False, practice_single=False):\n \"\"\"\n Return single image stimulus for display in memory trial\n \"\"\"\n if practice:\n image = stim_dir+'practice_composite/'+image\n elif practice_single:\n image = stim_dir+'practice_single/'+image\n else:\n image = stim_dir+'single/'+image\n\n im = visual.ImageStim(win, image, size=7, name='mem_image')\n im.setPos([0, 0])\n return(im)\n\ndef rating_pull(rating_tuple):\n '''\n Pulls subject's rating out of rating tuple\n input- rating scale tuple\n '''\n if len(rating_tuple)>1:\n rating = rating_tuple[1][0]\n rt = rating_tuple[1][1]\n else:\n rating = rating_tuple[0][0]\n rt = rating_tuple[0][1]\n return(rating, rt)\n\n# Functions to Execute Presentation & Memory Runs\n\ndef presentation_run(win, run, pres_df, params, timing, paths):\n \"\"\"\n Displays a full presentation run, saves out data to csv\n inputs:\n win - visual window\n run - run number (int)\n paths - paths to subject-relevant directories (dictionary)\n params - experiment parameters (dictionary)\n timing - stimulus display times (dictionary)\n pres_df - all trial info for current presentation block (dataframe)\n \"\"\"\n\n first_row = pres_df.index.values[0]\n\n # Create cue, fixation, and validity stim\n cue1, cue2 = cue_stim(win, pres_df['Cued Side'][first_row], pres_df['Cued Category'][first_row], paths['stim_path'])\n cue1.setPos( [0, 2] )\n cue2.setPos( [0, 0] )\n fixation = fix_stim(win)\n\n # flash cue\n display(win, [cue1,cue2], timing['cue'], path = paths)\n\n # start fixation\n fixation.setAutoDraw(True)\n pause(win, timing['pause'])\n\n for trial in pres_df.index.values:\n\n # make stim\n images = composite_pair(win, pres_df['Cued Composite'].loc[trial],pres_df['Uncued Composite'].loc[trial], pres_df['Cued Side'][trial], paths['stim_path'])\n circle = probe_stim(win, pres_df['Cued Side'][trial], pres_df['Cue Validity'][trial], pres_df['Attention Probe'][trial])\n\n # display images\n display(win, images, timing['probe'], accepted_keys=None, trial=trial, df=pres_df, path = paths)\n pres_df['Attention Reaction Time (s)'].loc[trial], pres_df['Attention Button'].loc[trial] = display(win, [circle], timing['probe'], accepted_keys=['1','3'], path = paths)\n pause(win, timing['pause'])\n\n pres_df.to_csv(paths['subject']+'pres'+str(run)+'.csv')\n\n fixation.setAutoDraw(False)\n\ndef memory_run(win, run, mem_df, params, timing, paths, test = False):\n \"\"\"\n Displays full memory run, saves out data to csv\n inputs:\n win - visual window\n run - run number (int)\n paths - paths to subject-relevant directories (dictionary)\n params - experiment parameters (dictionary)\n timing - stimulus display times (dictionary)\n mem_df - all trial info for current memory block (dataframe)\n \"\"\"\n fixation = fix_stim(win)\n\n for trial in mem_df.index.values:\n\n display(win, [fixation], timing['pause'], path = paths)\n\n rating_scale = visual.RatingScale( win, low = 1, high = 4, labels=['unfamiliar','familiar'], scale='1 2 3 4',\n pos = [0,-.42], acceptPreText = '-',\n maxTime=3.0, minTime=0, marker = 'triangle', showAccept=False, acceptSize=0, singleClick = True)\n\n resp_clock = core.Clock()\n im_path = paths['stim_path']+'single/'+mem_df['Memory Image']\n image = memory_stim(win, mem_df['Memory Image'][trial], paths['stim_path'])\n\n display(win, [image, rating_scale], timing['mem'], accepted_keys=['1','2','3','4'], trial=trial, df=mem_df, path = paths)\n mem_df.to_csv(paths['subject']+'mem'+str(run)+'.csv')\n\n\n# Functions to Display Instruction Text and Practice Trials\n\ndef pract_text(trial):\n \"\"\"\n input: current practice trial # (int)\n output: practice instruction text (string) for given practice trial\n \"\"\"\n\n intro = '\\n\\n Thank you for participating in this experiment! ' \\\n '\\n\\n In the experiment, you will pay attention to specific items on the screen.' \\\n '\\n Then, we will test your memory for some of the items you have seen. ' \\\n '\\n\\n Press any key to continue... '\n\n # PRACTICE\n pract1 = ' You will see many images like the one below.' \\\n '\\n You will need to pay special attention to either the FACE or SCENE. ' \\\n '\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n Press any key to continue...'\n\n pract2 = ' Let\\'s practice now! \\n Look straight at the image and focus as hard as you can on the FACE. ' \\\n '\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n When you can focus on the FACE well, press any key... '\n\n pract3 = ' Great job! ' \\\n '\\n Now, focus as hard as you can on the SCENE. ' \\\n '\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n When you can focus on the SCENE well, press any key... '\n\n pract4 = ' Next, you will see a cross and two images on the screen. ' \\\n '\\n\\n Keep your eyes staring straight at the cross, ' \\\n 'but try to focus on the SCENE on the LEFT. ' \\\n '\\n\\n Only your attention should shift, not your eyes!' \\\n '\\n You will not see the image perfectly clearly, just do your best, and feel free to ask questions!' \\\n '\\n\\n Press any key to begin. '\n\n pract5 = '\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n When you are done, press any key to continue... '\n\n pract6 = '\\n\\n Great job! ' \\\n '\\n This time, keeping your eyes at center, try and focus on the FACE on the RIGHT.' \\\n '\\n\\n Press any key to begin.' \\\n\n pract7 = '\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n When you are done, press any key to continue... '\n\n pract8 = ' Now, you will practice ' \\\n 'attending to parts of images based on cue icons. ' \\\n '\\n\\n First, you\\'ll see a pair of cue icons: ' \\\n '\\n One arrow icon pointing left or right (< or >) ' \\\n ' and one image icon (face or scene): ' \\\n '\\n\\n\\n\\n\\n\\n After the cue icons, you will see several image pairs in a row. You\\'ll attend to the SAME cued side and image part for EVERY pair.' \\\n ' Remember to keep your eyes fixated on the cross! ' \\\n '\\n\\n Press any key to begin.'\n\n pract9 = ' Great job, let\\'s try it one more time!' \\\n '\\n\\n This time will be the same, but after each pair, a letter (\"x\" or \"o\") will appear on one side.' \\\n '\\n When you see the letter, you should immediately press a button! ' \\\n '\\n\\n If the \"x\" appears, press 1 ' \\\n '\\n If the \"o\" appears, press 3 ' \\\n '\\n\\n Remember to respond as quickly as you can!' \\\n '\\n Press any key to begin.'\n\n pract10 = '\\n\\n Finally, you will practice reporting which images you remember. ' \\\n '\\n You will use the following scale to rate individual images displayed on the screen: ' \\\n '\\n\\n (1) I definitely have not seen the image before' \\\n '\\n (2) I probably have not seen the image before' \\\n '\\n (3) I probably have seen the image before' \\\n '\\n (4) I definitely have seen the image before' \\\n '\\n\\n You will need to respond quickly -- you\\'ll have just 2 seconds!' \\\n '\\n\\n When you\\'re ready to begin, press any key.'\n\n instructions = [intro, pract1, pract2, pract3, pract4, pract5, pract6, pract7, pract8, pract9, pract10]\n\n return(instructions[trial])\n\ndef mem_text(trial):\n \"\"\"\n input: current memory trial # (int)\n output: memory instruction text (string) for given memory trial\n \"\"\"\n\n mem1 = ' Now we\\'re going to test your memory. ' \\\n '\\n Just like the practice round, you will rate single images using the following scale: ' \\\n '\\n\\n (1) I definitely have not seen the image before' \\\n '\\n (2) I probably have not seen the image before' \\\n '\\n (3) I probably have seen the image before' \\\n '\\n (4) I definitely have seen the image before' \\\n '\\n\\n You will need to make your responses quickly -- you\\'ll have just 2 seconds. ' \\\n ' If you aren\\'t sure what to say for a particular image, make your best guess! ' \\\n '\\n\\n Press any key to begin.'\n\n mem2 = ' MEMORY BLOCK. ' \\\n '\\n\\n Press any key to begin.'\n\n instructions = [mem1, mem2]\n\n if trial >= 1:\n num = 1\n else:\n num = 0\n\n return(instructions[num])\n\ndef pres_text(trial):\n \"\"\"\n input: current presentation trial # (int)\n output: presentation instruction text (string) for given presentation trial\n \"\"\"\n pres1 = ' Now we will begin the main experiment! ' \\\n 'Again you will see cue icons, followed by a series of image pairs and letters (and a fixation cross).' \\\n '\\n\\n Remember to: ' \\\n '\\n\\n Keep your eyes staring at the cross' \\\n '\\n Shift your attention to the SAME cued side and part for EACH pair' \\\n '\\n Immeditaely press 1 (\"x\") or 3 (\"o\") when you see the letter ' \\\n '\\n\\n Do you have questions? Ask them now! ' \\\n '\\n Otherwise, position your hand over the 1 and 3 buttons, clear your mind, and press any key to begin. '\n\n pres2 = ' Feel free to take a moment to rest, if you like! ' \\\n ' When you\\'re ready, we will do another round with a cue, followed by image pairs and letters.' \\\n ' \\n\\n Remember to: ' \\\n '\\n Keep your eyes staring at the cross' \\\n '\\n Shift your attention to the SAME cued side and part for EACH pair' \\\n '\\n Immeditaely press 1 (\"x\") or 3 (\"o\") when you see the letter ' \\\n '\\n\\n Press any key to begin. '\n\n instructions = [pres1, pres2]\n\n if trial >= 1:\n num = 1\n else:\n num = 0\n\n return(instructions[num])\n\n\ndef text_present(win, text, close=False, timing=None):\n '''\n Displays text on screen, until button press\n input: window (psychopy object) and text (str)\n '''\n instruction = visual.TextStim(win, text=text, wrapWidth=40)\n instruction.setAutoDraw(True)\n win.flip()\n\n key = event.waitKeys(keyList=None)\n\n if close:\n pause(win, timing['pause'])\n win.close()\n else:\n instruction.setAutoDraw(False)\n win.flip()\n\ndef practice_instructions(win, paths, text, pract_run, timing, acceptedKeys = [], practice=False):\n '''\n Sequentially presents instruction text, images, and practice trials\n '''\n\n # make list of stim for this practice_round\n cat_cues = [paths['stim_path']+'cue/'+x for x in ['Face.png', 'Place.png']]\n composites = os.listdir(paths['stim_path']+'practice_composite')\n singles = os.listdir(paths['stim_path']+'practice_single')\n instruction = visual.TextStim(win, text=text, wrapWidth=40)\n ims = [instruction]\n\n # center composite\n if pract_run in [1,2,3]:\n ims.append(memory_stim(win, composites[pract_run-1], paths['stim_path'], practice=True))\n\n # composite pair, fixation\n elif pract_run in [5,7]:\n image1,image2 = [composites[pract_run-1], composites[pract_run-2]]\n ims.extend(composite_pair(win, composites[pract_run-1], composites[pract_run-2], '>', paths['stim_path'], practice=True))\n ims.append(fix_stim(win))\n\n # face cue, place cue\n elif pract_run == 8:\n for x,pos in zip(cat_cues, [2.5, -2.5]):\n cue = visual.ImageStim(win, x, size=2)\n cue.setPos([pos, 0])\n ims.append(cue)\n\n # display stim until button press\n for x in ims:\n x.setAutoDraw(True)\n\n win.flip()\n event.waitKeys(keyList=None)\n\n for x in ims:\n x.setAutoDraw(False)\n win.flip()\n\n # dynamic practice trials\n # pract_pres1\n if pract_run ==8:\n pract_pres(win, paths, composites[-12:-6], timing, circle=False)\n\n # pract_pres2\n if pract_run == 9:\n pract_pres(win, paths, composites[-6:], timing, circle=True)\n\n # pract_mem\n elif pract_run == 10:\n pract_mem(win, singles, paths, timing)\n\ndef pract_pres(win, paths, im_list, timing, circle=False):\n \"\"\"\n Present dynamic practice presentation runs\n \"\"\"\n\n cue1, cue2 = cue_stim(win, '>', 'Face', paths['stim_path'])\n\n cue1.setPos([0, 2])\n cue2.setPos([0, 0])\n\n display(win, [cue1,cue2], timing['cue'], path = paths)\n pause(win, timing['pause'])\n\n fix = fix_stim(win)\n fix.setAutoDraw(True)\n text=['x','o','o']\n validity_list = [1, 1, 0]\n\n for x in range(3):\n stim = composite_pair(win, im_list[x*2], im_list[x*2+1], '<', paths['stim_path'], practice=True)\n display(win, stim, timing['probe'], path = paths)\n\n if circle:\n circle = probe_stim(win, '<', validity_list[x], text=text[x])\n display(win, [circle], timing['probe'], accepted_keys=['1','3'], path = paths)\n\n pause(win, timing['pause'])\n fix.setAutoDraw(False)\n\ndef pract_mem(win, im_list, paths, timing):\n \"\"\"\n Display dynamic practice memory runs\n \"\"\"\n\n fixation = fix_stim(win)\n\n for trial in range(4):\n rating_scale = visual.RatingScale( win, low = 1, high = 4, labels=['unfamiliar','familiar'], scale='1 2 3 4',\n singleClick = True, pos = [0,-.42], acceptPreText = '-',\n maxTime=3.0, minTime=0, marker = 'triangle', showAccept=False, acceptSize=0)\n\n image = memory_stim(win, im_list[trial], paths['stim_path'], practice_single=True)\n display(win, [fixation], timing['pause'], path = paths)\n\n event.getKeys(keyList = None)\n for frame_n in range(timing['mem']):\n image.setAutoDraw(True)\n rating_scale.setAutoDraw(True)\n win.flip()\n choice_history = rating_scale.getHistory()\n rating_scale.setAutoDraw(False)\n image.setAutoDraw(False)\n win.flip()\n","sub_path":"sustained_attention_experiment/code/experiment_helpers.py","file_name":"experiment_helpers.py","file_ext":"py","file_size_in_byte":34054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"372467544","text":"from Tkinter import *\n\n#to initialise , we need a root widget - a window with \n#title bar & other decoration as provided by window manager\nroot = Tk()\n\n#create label as child of the root window (can contain icons or images too)\n#fill with test\nlabel = Label( root, text=\"Hello, world!\")\n\n#pack sizes widget to fit the parent element and make itself visible\nlabel.pack()\n\n#begin the main event loop of our root element - handles events from\n#user (such as clicks and keys) and windowing system events and Tkinter's\n#events too including geometry management and display updates\n\n#makes the window appear\nroot.mainloop()\n","sub_path":"python/tkinter/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"53713829","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_blobs\nimport pylab\nimport time\nfrom sklearn.cluster import KMeans\nimport pandas as pd\nfrom mpl_toolkits import mplot3d\n\nstrats = (\"plusplus\", \"uniform\", \"random\", \"choice\")\nmsg = (\"KMeans++\\n\", \"Равномерное распределение\\n\", \"Случайные числа\\n\", \"Выбор из данных точек\\n\")\n\n\ndef uniform(ratio, k, eps=0.1):\n a = int(np.sqrt(k))\n b = k // a + (k % a != 0)\n while abs(a / b - ratio) >= eps:\n if a == 1:\n break\n a -= 1\n b = k // a + (k % a != 0)\n return a, b\n\n\ndef dist(x1, y1, x2, y2):\n return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n\ndef strat_of_init(matr, strat=\"uniform\", k=2, show_work=-1, eps=0.0001):\n if k < 2:\n k = 2\n x = np.asarray(matr[:, 0])\n y = np.asarray(matr[:, 1])\n length = len(x)\n xc = np.zeros(k)\n yc = np.zeros(k)\n \n if strat == \"random\":\n xc = np.random.rand(k) * (x.max() - x.min()) + x.min()\n yc = np.random.rand(k) * (y.max() - y.min()) + y.min()\n if show_work >= 0:\n pylab.figure(show_work)\n plt.scatter(xc, yc, marker = \"*\", c = \"r\")\n return xc, yc\n if strat == \"choice\":\n r = np.random.choice(length, k, replace = False)\n xc = x[r]\n yc = y[r]\n if show_work >= 0:\n pylab.figure(show_work)\n plt.scatter(xc, yc, marker = \"*\", c = \"r\")\n return xc, yc\n if strat == \"plusplus\":\n r = np.random.choice(k, replace = False)\n xc[0] = x[r]\n yc[0] = y[r]\n for ready in range(k - 1):\n dx2 = np.zeros(length)\n sumdx2 = 0\n for i in range(length):\n dx2[i] = dist(xc[ready], yc[ready], x[i], y[i])\n for j in range(ready):\n dx2[i] = min(dx2[i], dist(xc[j], yc[j], x[i], y[i]))\n sumdx2 += dx2[i]\n sumdx2 *= np.random.rand()\n sumdx = 0\n for i in range(length):\n sumdx += dx2[i]\n if sumdx > sumdx2:\n xc[ready + 1] = x[i]\n yc[ready + 1] = y[i]\n break\n return xc, yc\n x_len = x.max()-x.min()\n y_len = y.max()-y.min()\n uni_x, uni_y = uniform(x_len/y_len, k, eps=eps)\n units = np.random.choice(uni_x * uni_y, k, replace=False)\n xc = x.min() + x_len / (2 * uni_x) + (units % uni_x) * (x_len / uni_x)\n yc = y.min() + y_len / (2 * uni_y) + (units // uni_y) * (y_len / uni_y)\n if show_work >= 0:\n plt.scatter(xc, yc, marker=\"*\", c=\"r\")\n for i in range(uni_x + 1):\n plt.axvline(x.min() + i * x_len / uni_x)\n for i in range(uni_y + 1):\n plt.axhline(y.min() + i * y_len / uni_y)\n return xc, yc\n\n\ndef filter_by_clust(a, i, length):\n return list(filter(lambda q: a[q] == i, range(length)))\n\n\ndef KMeans4(matr, k=2, seed=0, eps=0.0001, limit_of_rec=500, show_work=-1, strat = \"plusplus\"):\n np.random.seed(seed)\n x = np.asarray(matr[:, 0])\n y = np.asarray(matr[:, 1])\n length = len(x)\n \n xc, yc = strat_of_init(matr, strat = strat, k = k, show_work=show_work)\n matr = np.concatenate([x, y, np.random.randint(0, k, length, dtype = int)])\n matr = matr.reshape(3, length).T\n limit = limit_of_rec\n while True:\n limit -= 1\n if limit == 0:\n if show_work >= 0:\n print(\"Calculated with the achievement of the limit number of iterations: \", limit_of_rec)\n break\n for i in range(length):\n clast = k - 1\n dist_to_clast = dist(matr[i][0], matr[i][1], xc[k - 1], yc[k - 1])\n for j in range(k - 1):\n cur_dist = dist(matr[i][0], matr[i][1], xc[j], yc[j])\n if cur_dist < dist_to_clast:\n dist_to_clast = cur_dist\n clast = j\n matr[i][2] = clast\n sums = 0\n for i in range(k):\n fil = filter_by_clust(matr[:, 2], i, length)\n if len(fil) == 0:\n sums = eps + 1\n r = np.random.randint(length)\n xc[i] = x[r]\n yc[i] = y[r]\n else:\n xcc = xc[i]\n ycc = yc[i]\n xc[i] = np.mean(x[fil])\n yc[i] = np.mean(y[fil])\n sums += dist(xc[i], yc[i], xcc, ycc)\n if sums < eps:\n if show_work >= 0:\n print(\"Calculated with given accuracy, number of iterations =\", limit_of_rec - limit)\n break\n if show_work >= 0:\n plt.scatter(xc, yc, marker = \"v\", c = \"black\")\n return matr[:, 2], xc, yc\n\n\ndef best_K(matr, k_max=0, show_work=-1, eps=0.25, d_eps=0.0001, strat_mean = True, strat = \"plusplus\"):\n x = np.asarray(matr[:, 0])\n y = np.asarray(matr[:, 1])\n length = len(x)\n if k_max == 0:\n k_max = int(pow(length, 1/3)) + 2\n if k_max < 2:\n k_max = 3\n arr = np.array(range(k_max - 2)) + 2\n inter_max = np.zeros(k_max - 2)\n exter_min = np.zeros(k_max - 2)\n inter_mean = np.zeros(k_max - 2)\n \n for e in range(k_max - 2):\n a, xc, yc = KMeans4(matr, k = arr[e], strat = strat)\n flag = True\n inter_num = 0\n for i in range(arr[e]):\n j = i + 1\n while j < arr[e]:\n d = dist(xc[i], yc[i], xc[j], yc[j])\n if flag:\n exter_min[e] = d\n flag = False\n exter_min[e] = min(exter_min[e], d)\n j += 1\n fil = filter_by_clust(a, i, length)\n for j in fil:\n inter_num += 1\n d = dist(xc[i], yc[i], x[j], y[j])\n inter_mean[e] += d\n inter_max[e] = max(inter_max[e], d)\n inter_mean[e] /= inter_num\n out = np.diff(exter_min/inter_max)\n arr = arr[:-1]\n if strat_mean:\n out = np.diff(inter_mean)\n f = list(filter(lambda i: abs(out[i]) < eps, range(len(out))))\n while len(f)==0:\n eps += d_eps\n f = list(filter(lambda i: abs(out[i]) < eps, range(len(out))))\n if show_work >= 0:\n pylab.figure(0, figsize = (15, 10))\n plt.plot(arr, out)\n if strat_mean:\n return arr[f[0]], eps\n return arr[f[0] - 1], eps\n\n\ndef clusters_sorted(clust, xc, yc, x_s = 0, y_s = 0):\n k = xc.shape[0]\n length = clust.shape[0]\n cen_clust_sorted = sorted(list(range(k)), key = lambda i: dist(xc[i], yc[i], x_s, y_s))\n clust += k\n for i in range(k):\n f = filter_by_clust(clust, cen_clust_sorted[i] + k, length)\n clust[f] = i\n return clust, xc[cen_clust_sorted], yc[cen_clust_sorted]\n\n\ndef sort_by_clust(matr, clust, xc, yc, corner_left_lower = [True, True]):\n clust = np.asarray(clust)\n length = len(clust)\n k = len(xc)\n dot = np.zeros(2)\n for i in range(2):\n if corner_left_lower[i]:\n dot[i] = matr[:, i].min()\n else:\n dot[i] = matr[:, i].max()\n clust, xc, yc = clusters_sorted(clust, xc, yc, dot[0], dot[1])\n matr = np.concatenate([matr.T.ravel(), clust]).reshape(3, length).T\n matr = np.array(sorted(matr, key = lambda x: x[2]))\n return matr[:, :2], clust, xc, yc\n\n\ndef clusters_centers(matr, clust):\n k = clust.max() + 1\n length = matr.shape[0]\n x = np.asarray(matr[:, 0])\n y = np.asarray(matr[:, 1])\n xc = np.zeros(k)\n yc = np.zeros(k)\n for i in range(k):\n fil = filter_by_clust(clust, i, length)\n xc[i] = np.mean(x[fil])\n yc[i] = np.mean(y[fil])\n return xc, yc\n\n\ndef init_t_and_a_arr(n_tests):\n mytype = [(\"plusplus\", 'float32'), (\"uniform\", 'float32'), (\"random\", 'float32'), (\"choice\", 'float32')]\n t = np.array(np.zeros(n_tests), dtype = mytype)\n a = np.array(np.zeros(n_tests), dtype = mytype)\n return t, a\n\n\ndef strats_compar(k = 2, n = 400, n_tests = 10, gen_tests = True, tests = ()):\n if gen_tests:\n tests = tuple([(n, k)] * n_tests)\n else:\n n_tests = tests.shape[0]\n times, accurs = init_t_and_a_arr(n_tests)\n for i in range(len(tests)):\n x, _ = make_blobs(n_samples = tests[i][0], n_features = 2, centers = tests[i][1], random_state = i)\n true_time = time.time()\n true_cl = KMeans(n_clusters = tests[i][1], random_state = 4).fit_predict(x)\n true_time = time.time() - true_time\n xc, yc = clusters_centers(x, true_cl)\n true_cl, _, _ = clusters_sorted(true_cl, xc, yc)\n for strat in strats:\n t = time.time()\n cl, xc, yc = KMeans4(x, k = tests[i][1], strat = strat)\n t = time.time() - t\n cl, _, _ = clusters_sorted(cl, xc, yc)\n times[strat][i] = t / true_time\n overl = list(true_cl == cl).count(True)\n accurs[strat][i] = overl / tests[i][0]\n \n if gen_tests:\n pylab.figure(0, figsize = (15, 10))\n pylab.subplot(211)\n for i in range(len(strats)):\n plt.plot(range(n_tests), times[strats[i]], label = msg[i])\n plt.legend(loc=(1, 0.4))\n plt.grid(lw=2)\n plt.xlabel(\"Номера тестов\")\n plt.ylabel(\"Затраченное время\")\n pylab.subplot(212)\n for i in range(len(strats)):\n plt.plot(range(n_tests), accurs[strats[i]], label = msg[i])\n plt.legend(loc=(1, 0.4))\n plt.grid(lw=2)\n plt.xlabel(\"Номера тестов\")\n plt.ylabel(\"Точность\")\n plt.show()\n \n return times, accurs\n \n \ndef show_best_K(k, n, n_tests, strat_mean = False):\n for i in range(n_tests):\n x, _ = make_blobs(n_samples = n, n_features = 2, centers = k, random_state = i)\n best_K(x, show_work = 0, strat_mean = strat_mean)\n plt.xlabel(\"Количество кластеров\")\n if strat_mean:\n plt.ylabel(\"Среднее внутрикластерное расстояние\")\n else:\n plt.ylabel(\"Отношение минимального внешнекластерного и\\nмаксимального внутрикластерного расстояний\")\n plt.axvline(k)\n \n \ndef show_strats(n, k_for_means, k_for_blobs, after_dot = 7):\n x, _ = make_blobs(n_samples = n, n_features = 2, centers = k_for_blobs, random_state = 4)\n xtext = x[:, 0].min() + 1\n ytext = x[:, 1].max() - 1\n pylab.figure(0, figsize = (15, 10))\n for i in range(len(strats)):\n t = time.time()\n st, xc, yc = KMeans4(x, k = k_for_means, strat = strats[i])\n t = time.time() - t\n st, _, _ = clusters_sorted(st, xc, yc)\n pylab.subplot(2, 2, i + 1)\n plt.scatter(x[:, 0], x[:, 1], c = st)\n pylab.text(xtext, ytext, msg[i] + \"Время: \" + str(t)[:after_dot])\n plt.show()\n \n\ndef test_eps(tests, epss, strat = \"plusplus\", strat_mean = True):\n epss = np.asarray(epss)\n num_tests = tests.shape[0]\n num_eps = epss.shape[0]\n accurs = np.zeros(num_eps)\n for i in range(num_eps):\n accurs[i] = 0\n for j in range(num_tests):\n x, _ = make_blobs(n_samples = tests[j][0], n_features = 2, centers = tests[j][1], random_state = 4)\n k, _ = best_K(x, eps = epss[i], strat = strat, strat_mean = strat_mean)\n if k == tests[j][1]:\n accurs[i] += 1\n accurs /= num_tests\n plt.plot(epss, accurs, label = \"Среднее n = \" + \"%.0f\"%np.mean(tests[:, 0]))\n plt.scatter(0., 1., c = \"w\")\n plt.xlabel(\"eps\")\n plt.ylabel(\"Точность предположения\")\n plt.legend(loc=(0.1, 0.2))\n return epss[filter_by_clust(accurs, accurs.max(), num_eps)]\n\n\ndef test_time_once(n, k):\n x, _ = make_blobs(n_samples = n, n_features = 2, centers = k, random_state = 4)\n t = time.time()\n KMeans4(x, k = k, strat = \"plusplus\")\n t = time.time() - t\n return t\n\n\ndef test_time_mean(n, k, times):\n s = 0\n for i in range(times):\n s += test_time_once(n, k)\n return s / times\n\n\ndef test_time(n_start = 100, n_end = 1000, n_tests = 10, k_start = 2, k_end = 4, times = 10):\n k_tests = k_end - k_start + 1\n n = np.linspace(n_start, n_end, n_tests, dtype = np.int32)\n k = np.linspace(k_start, k_end, k_tests, dtype = np.int32)\n tests = np.zeros(n_tests * k_tests).reshape(n_tests, k_tests)\n for i in range(n_tests):\n for j in range(k_tests):\n tests[i][j] = test_time_mean(n[i], k[j], times)\n k, n = np.meshgrid(k, n)\n \n ax = plt.axes(projection='3d')\n pylab.xlabel = (\"Количество точек\")\n pylab.ylabel = (\"Количество кластеров\")\n ax.plot_surface(k, n, tests, cmap = \"gist_rainbow\")\n \n \ndef test_time_best_K(n_start = 100, n_end = 1000, n_tests = 10, times = 10):\n n = np.linspace(n_start, n_end, n_tests, dtype = np.int32)\n tests = np.zeros(n_tests)\n for i in range(n_tests):\n for j in range(times):\n x, _ = make_blobs(n_samples = n[i], n_features = 2, centers = 4, random_state = j)\n t = time.time()\n best_K(x, strat_mean = False, strat = \"plusplus\")\n tests[i] += time.time() - t\n tests /= times\n plt.plot(n, tests, c=\"r\")\n plt.xlabel(\"Количество точек\")\n plt.ylabel(\"Продолжительность работы функции\")\n \n\ndef make_matr_dist(matr):\n length = matr.shape[0]\n matr_dist = np.zeros(length * length).reshape(length, length)\n for i in range(length):\n matr_dist[i] = dist(matr[i][0], matr[i][1], matr[:, 0], matr[:, 1])\n return matr_dist","sub_path":"py/KMeans4 main/main/mylib.py","file_name":"mylib.py","file_ext":"py","file_size_in_byte":13620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"374716307","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2015 Lynx OS\n# Basado en Codigo Deepin\n\nimport subprocess\nimport os\nfrom ConfigParser import RawConfigParser as ConfigParser\n\ndef remove_directory(path):\n \"\"\"equivalent to command `rm -rf path`\"\"\"\n if os.path.exists(path):\n for i in os.listdir(path):\n full_path = os.path.join(path, i)\n if os.path.isdir(full_path):\n remove_directory(full_path)\n else:\n os.remove(full_path)\n os.rmdir(path)\n\ndef create_directory(directory, remove_first=False):\n '''Create directory.'''\n if remove_first and os.path.exists(directory):\n remove_directory(directory)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef update_pot():\n # Read config options.\n config_parser = ConfigParser()\n config_parser.read(\"locale_config.ini\")\n project_name = config_parser.get(\"locale\", \"project_name\")\n source_dir = config_parser.get(\"locale\", \"source_dir\")\n locale_dir = os.path.abspath(config_parser.get(\"locale\", \"locale_dir\"))\n create_directory(locale_dir)\n\n # Get input arguments.\n include_qml = False\n py_source_files = []\n for root, dirs, files in os.walk(source_dir):\n for each_file in files:\n if each_file.endswith(\".qml\") and not include_qml:\n include_qml = True\n if each_file.endswith(\".py\") and not each_file.startswith(\".\"):\n py_source_files.append(os.path.join(root, each_file))\n\n pot_filepath = os.path.join(locale_dir, project_name + \".pot\")\n if os.path.exists(pot_filepath):\n os.remove(pot_filepath)\n\n if include_qml:\n ts_filepath = os.path.join(locale_dir, project_name + \".ts\")\n\n # Generate ts file\n subprocess.call(\n \"deepin-lupdate -recursive %s -ts %s\" % (os.path.realpath(source_dir), ts_filepath),\n shell=True)\n\n # convert to pot file.\n subprocess.call(\n \"lconvert -i %s -o %s\" % (ts_filepath, pot_filepath),\n shell=True)\n\n # clean string\n clean_str = \"\"\n with open(pot_filepath) as fp:\n for line in fp:\n if not line.startswith(\"msgctxt\"):\n clean_str += line\n\n with open(pot_filepath, \"wb\") as fp:\n fp.write(clean_str)\n\n # Merge pot file.\n if os.path.exists(pot_filepath):\n command = \"xgettext -j -k_ -o %s %s\" % (pot_filepath, ' '.join(py_source_files))\n else:\n command = \"xgettext -k_ -o %s %s\" % (pot_filepath, ' '.join(py_source_files))\n subprocess.call(command, shell=True)\n\nif __name__ == \"__main__\":\n update_pot()\n\n","sub_path":"tools/update_pot.py","file_name":"update_pot.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"493446169","text":"\"\"\"\nJupyterHub config for the littlest jupyterhub.\n\"\"\"\nimport os\nfrom os import makedirs, chown, chmod, listdir, walk\nfrom os.path import isdir, isfile, expanduser, isfile, join\nimport stat\nfrom shutil import copyfile\nimport pwd\nimport grp\n\nfrom systemdspawner import SystemdSpawner\nfrom tljh import user, configurer\nfrom git import Repo\nfrom git.cmd import Git\n\nINSTALL_PREFIX = os.environ.get('TLJH_INSTALL_PREFIX', '/opt/tljh')\nUSER_ENV_PREFIX = os.path.join(INSTALL_PREFIX, 'user')\n\nclass CustomSpawner(SystemdSpawner):\n def start(self):\n \"\"\"\n Perform system user activities before starting server\n \"\"\"\n # FIXME: Move this elsewhere? Into the Authenticator?\n user.ensure_user(self.user.name)\n user.ensure_user_group(self.user.name, 'jupyterhub-users')\n if self.user.admin:\n user.ensure_user_group(self.user.name, 'jupyterhub-admins')\n else:\n user.remove_user_group(self.user.name, 'jupyterhub-admins')\n\n NOTEBOOKS_REPO_URL = 'git@gitlab.com:climate-modelling-climate-change-erth90026/notebooks.git'\n NOTEBOOKS_REPO_DIR = '/data/notebooks-repo'\n NOTEBOOKS_SRC_DIR = join(NOTEBOOKS_REPO_DIR, 'notebooks')\n NOTEBOOKS_SRC_SUBDIRS_TO_COPY = [\n 'tutorials',\n join('tutorials', 'imgs'),\n 'assignments',\n ]\n NOTEBOOKS_SRC_SUBDIRS_TO_LOCK = [\n 'assignments-solutions',\n ]\n USER_ROOT = join('/home', self.user.name)\n # NOTEBOOKS_USER_DIR = join(USER_ROOT, 'notebooks', 'tutorials')\n NOTEBOOKS_USER_DIR = join(USER_ROOT, 'notebooks')\n\n if not isdir(NOTEBOOKS_REPO_DIR):\n Repo.clone_from(NOTEBOOKS_REPO_URL, NOTEBOOKS_REPO_DIR)\n\n notebooks_repo = Git(NOTEBOOKS_REPO_DIR)\n notebooks_repo.pull()\n\n root_uid = pwd.getpwnam(\"root\").pw_uid\n root_gid = grp.getgrnam(\"root\").gr_gid\n for src_subdir_to_lock in NOTEBOOKS_SRC_SUBDIRS_TO_LOCK:\n dir_to_lock = join(NOTEBOOKS_SRC_DIR, src_subdir_to_lock)\n nrdmode = os.stat(dir_to_lock)\n if (nrdmode.st_mode & stat.S_IRWXO != 0) or (nrdmode.st_mode & stat.S_IRWXG != 0):\n chown(dir_to_lock, root_uid, root_gid)\n chmod(dir_to_lock, 0o700)\n\n if not isdir(NOTEBOOKS_USER_DIR):\n makedirs(NOTEBOOKS_USER_DIR)\n\n for src_subdir in NOTEBOOKS_SRC_SUBDIRS_TO_COPY:\n src_dir = join(NOTEBOOKS_SRC_DIR, src_subdir)\n\n usr_dir = join(NOTEBOOKS_USER_DIR, src_subdir)\n if not isdir(usr_dir):\n makedirs(usr_dir)\n\n files_to_copy = [\n f for f in listdir(src_dir)\n if f.endswith(('.ipynb', '.png'))\n ]\n for file_notebook in files_to_copy:\n source_notebook = join(src_dir, file_notebook)\n user_notebook = join(usr_dir, file_notebook)\n if not isfile(user_notebook):\n copyfile(source_notebook, user_notebook)\n\n user_uid = pwd.getpwnam(self.user.name).pw_uid\n user_gid = grp.getgrnam(self.user.name).gr_gid\n for root, dirs, files in walk(USER_ROOT):\n for momo in dirs:\n chown(join(root, momo), user_uid, user_gid)\n for momo in files:\n chown(join(root, momo), user_uid, user_gid)\n\n return super().start()\n\nc.JupyterHub.spawner_class = CustomSpawner\n\n# use SSL port\nc.JupyterHub.port = 443\nletsencrypt_folder = '/etc/letsencrypt/live'\ndomain_folder = listdir(letsencrypt_folder)[0]\nc.JupyterHub.ssl_key = join(letsencrypt_folder, domain_folder, 'privkey.pem')\nc.JupyterHub.ssl_cert = join(letsencrypt_folder, domain_folder, 'fullchain.pem')\n\n# redirect http queries to https\nc.ConfigurableHTTPProxy.command = ['configurable-http-proxy', '--redirect-port', '80']\n\nfrom oauthenticator.gitlab import LocalGitLabOAuthenticator\nc.JupyterHub.authenticator_class = LocalGitLabOAuthenticator\n# make a user on the system if they don't already exist\nc.LocalGitLabOAuthenticator.create_system_users = True\nc.LocalGitLabOAuthenticator.delete_invalid_users = True\n\nc.SystemdSpawner.extra_paths = [os.path.join(USER_ENV_PREFIX, 'bin')]\nc.SystemdSpawner.default_shell = '/bin/bash'\n# Drop the '-singleuser' suffix present in the default template\nc.SystemdSpawner.unit_name_template = 'jupyter-{USERNAME}'\n\n# limit memory usage for each user\nc.SystemdSpawner.mem_limit = '0.5G'\n\nconfigurer.apply_yaml_config(os.path.join(INSTALL_PREFIX, 'config.yaml'), c)\n","sub_path":"tljh/jupyterhub_config.py","file_name":"jupyterhub_config.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"440574521","text":"#!/usr/bin/python2.7\n# -*- coding:utf-8 -*-\n#\n# シャットダウンスイッチのハンドリング\n# use GPIO 17 as input, upll-up\n#\n# GPIO17を3秒程度 Lo に保持すると、シャットダウンプロセスを起動するスクリプト\n# 短時間の押下では反応しないようになっている\n\nimport RPi.GPIO as GPIO\nimport time\nimport logging\nimport os\n\n# GPIO assign\nshutdownSw = 17\nshutdownLED = 21\n\n# log level setting\nlogLevel=logging.INFO\nlogFile ='/var/log/shutdwnSwitch.log'\n#\n# logger setup\n#\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logLevel)\n\n# create a file handler\nhandler = logging.FileHandler(logFile)\nhandler.setLevel(logLevel)\n\n# create a logging format\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\n\n# add the handlers to the logger\nlogger.addHandler(handler)\n\n# GPIO setup\nGPIO.setmode(GPIO.BCM)\n\ndef gpioInterrupt(channel):\n# print(\"Capture the falling edge !\")\n sw_counter=0\n\n while True:\n sw_status = GPIO.input(shutdownSw)\n sw_counter = sw_counter + 1\n\n if sw_status == 0:\n if sw_counter >= 300:\n # in case the signal fixed to \"Low\" in 3sec\n # print(\" shutdown process activated...\")\n logger.debug(\".. Will you stop, Dave? Stop, Dave. I\\'m afraid....\")\n logger.info('Shutdown SW acceptted..')\n GPIO.output(shutdownLED, GPIO.HIGH)\n os.system(\"sudo shutdown -h now\")\n break\n else:\n # in case the signal got back to stedy state within 3sec\n logger.debug(\"I'm sorry Dave, I'm afraid You can't do that\")\n logger.info(\"shutdown SW was touched. No action activated. : %d\",sw_counter )\n break\n\n time.sleep(0.01)\n return sw_counter\n\n# make the 'pin' input and pulled up.\nGPIO.setup(shutdownSw, GPIO.IN, GPIO.PUD_UP)\n# capture the falling edge of the signal, estimated bounce would be 0.5s and call a funcgtion\nGPIO.add_event_detect(shutdownSw, GPIO.FALLING, callback=gpioInterrupt, bouncetime=500)\n# setup LED indicating sutting down\nGPIO.setup(shutdownLED, GPIO.OUT, initial=GPIO.LOW)\n\ntry:\n while(True):\n time.sleep(10)\n\nexcept KeyboardInterrupt:\n print(\"break\")\n GPIO.cleanup()\n","sub_path":"shutdwnSwitch.py","file_name":"shutdwnSwitch.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"635147745","text":"\"\"\"\nFile: menu.py\nAuthor: Ping\nEmail: billy3962@hotmail.com\nGithub: Ping-Lin\nDescription: menu let user to select which mode want to play\n\"\"\"\n\nimport pygame\nimport pygame.freetype\nimport sys\nfrom pygame.locals import *\nimport gbv\nfrom menuButton import MenuButton\nimport eztext\n\ndef runGame(buttonGroup, txtImgs, txtbox):\n page = [1]\n errorFlag = 0\n while True:\n events = pygame.event.get()\n for event in events:\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONUP:\n clickPos = pygame.mouse.get_pos()\n # print clickPos\n if page[0] == 1: #1p vs 1p or connect\n buttonGroup[0].update(clickPos, page)\n elif page[0] == 2: # server or client\n buttonGroup[1].update(clickPos, page)\n elif page[0] == 3: # input server address\n buttonGroup[2].update(clickPos, page, txtbox.value)\n if page[0] == -1:\n page[0] = 3\n errorFlag = 1\n\n # show the surface\n DISPLAYSURF.fill(gbv.BGCOLOR)\n if page[0] == 1:\n buttonGroup[0].draw(DISPLAYSURF)\n elif page[0] == 2:\n buttonGroup[1].draw(DISPLAYSURF)\n elif page[0] == 3:\n txtbox.update(events)\n txtbox.draw(DISPLAYSURF)\n buttonGroup[2].draw(DISPLAYSURF)\n DISPLAYSURF.blit(txtImgs[1], (390, 315))\n if errorFlag <= 15 and errorFlag >= 1:\n DISPLAYSURF.blit(txtImgs[2], (640, 600))\n errorFlag += 1\n else:\n errorFlag = 0\n\n\n DISPLAYSURF.blit(txtImgs[0], (155, 140))\n pygame.display.update()\n CLOCK.tick(20)\n\n\ndef main():\n global DISPLAYSURF, CLOCK\n pygame.init()\n pygame.display.set_icon(pygame.image.load('icon.icns'))\n DISPLAYSURF = pygame.display.set_mode((gbv.WINWIDTH, gbv.WINHEIGHT))\n pygame.display.set_caption('PikaBall X Connect')\n CLOCK = pygame.time.Clock()\n FONT = pygame.font.Font(None, 140)\n\n buttonGroup = []\n buttonGroup.append(pygame.sprite.Group())\n buttonGroup.append(pygame.sprite.Group())\n buttonGroup.append(pygame.sprite.Group())\n\n # the last argument is the option{i}.bmp, can see from the folder\n for i in xrange(1, 3):\n button = MenuButton(pygame.Rect(460, 100+200*i, 300, 150), i)\n buttonGroup[0].add(button)\n\n for i in xrange(3, 5):\n button = MenuButton(pygame.Rect(460, 100+200*(i-2), 300, 150), i)\n buttonGroup[1].add(button)\n\n button = MenuButton(pygame.Rect(390, 600, 225, 112), 5)\n buttonGroup[2].add(button)\n\n txtImgs = []\n txtImgs.append(FONT.render('PikaBall X Connect', 1, (255, 0, 0)))\n txtImgs.append(FONT.render('Server IP', 1, (255, 204, 34)))\n txtImgs.append(FONT.render('Error!', 1, (244, 102, 220)))\n\n txtbox = eztext.Input(x = 270, y = 450, maxlength = 15, color=(244, 240, 102), prompt='', font=FONT)\n\n runGame(buttonGroup, txtImgs, txtbox)\n\nif __name__ == '__main__':\n main()\n","sub_path":"pika.app/Contents/Resources/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"344143907","text":"import datetime as dt\nfrom unittest.mock import call, patch\n\nfrom django.utils import timezone as djangotime\nfrom model_bakery import baker\n\nfrom tacticalrmm.test import TacticalTestCase\n\nfrom .models import AutomatedTask\nfrom .serializers import AutoTaskSerializer\nfrom .tasks import create_win_task_schedule, remove_orphaned_win_tasks, run_win_task\n\n\nclass TestAutotaskViews(TacticalTestCase):\n def setUp(self):\n self.authenticate()\n self.setup_coresettings()\n\n @patch(\"automation.tasks.generate_agent_autotasks_task.delay\")\n @patch(\"autotasks.tasks.create_win_task_schedule.delay\")\n def test_add_autotask(\n self, create_win_task_schedule, generate_agent_autotasks_task\n ):\n url = \"/tasks/automatedtasks/\"\n\n # setup data\n script = baker.make_recipe(\"scripts.script\")\n agent = baker.make_recipe(\"agents.agent\")\n policy = baker.make(\"automation.Policy\")\n check = baker.make_recipe(\"checks.diskspace_check\", agent=agent)\n\n # test script set to invalid pk\n data = {\"autotask\": {\"script\": 500}}\n\n resp = self.client.post(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n # test invalid policy\n data = {\"autotask\": {\"script\": script.id}, \"policy\": 500}\n\n resp = self.client.post(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n # test invalid agent\n data = {\n \"autotask\": {\"script\": script.id},\n \"agent\": 500,\n }\n\n resp = self.client.post(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n # test add task to agent\n data = {\n \"autotask\": {\n \"name\": \"Test Task Scheduled with Assigned Check\",\n \"run_time_days\": [\"Sunday\", \"Monday\", \"Friday\"],\n \"run_time_minute\": \"10:00\",\n \"timeout\": 120,\n \"enabled\": True,\n \"script\": script.id,\n \"script_args\": None,\n \"task_type\": \"scheduled\",\n \"assigned_check\": check.id,\n },\n \"agent\": agent.id,\n }\n\n resp = self.client.post(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n\n create_win_task_schedule.assert_called()\n\n # test add task to policy\n data = {\n \"autotask\": {\n \"name\": \"Test Task Manual\",\n \"run_time_days\": [],\n \"timeout\": 120,\n \"enabled\": True,\n \"script\": script.id,\n \"script_args\": None,\n \"task_type\": \"manual\",\n \"assigned_check\": None,\n },\n \"policy\": policy.id, # type: ignore\n }\n\n resp = self.client.post(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n\n generate_agent_autotasks_task.assert_called_with(policy=policy.id) # type: ignore\n\n self.check_not_authenticated(\"post\", url)\n\n def test_get_autotask(self):\n\n # setup data\n agent = baker.make_recipe(\"agents.agent\")\n baker.make(\"autotasks.AutomatedTask\", agent=agent, _quantity=3)\n\n url = f\"/tasks/{agent.id}/automatedtasks/\"\n\n resp = self.client.get(url, format=\"json\")\n serializer = AutoTaskSerializer(agent)\n\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data, serializer.data) # type: ignore\n\n self.check_not_authenticated(\"get\", url)\n\n @patch(\"autotasks.tasks.enable_or_disable_win_task.delay\")\n @patch(\"automation.tasks.update_policy_autotasks_fields_task.delay\")\n def test_update_autotask(\n self, update_policy_autotasks_fields_task, enable_or_disable_win_task\n ):\n # setup data\n agent = baker.make_recipe(\"agents.agent\")\n agent_task = baker.make(\"autotasks.AutomatedTask\", agent=agent)\n policy = baker.make(\"automation.Policy\")\n policy_task = baker.make(\"autotasks.AutomatedTask\", policy=policy)\n\n # test invalid url\n resp = self.client.patch(\"/tasks/500/automatedtasks/\", format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n url = f\"/tasks/{agent_task.id}/automatedtasks/\" # type: ignore\n\n # test editing agent task\n data = {\"enableordisable\": False}\n\n resp = self.client.patch(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n enable_or_disable_win_task.assert_called_with(pk=agent_task.id) # type: ignore\n\n url = f\"/tasks/{policy_task.id}/automatedtasks/\" # type: ignore\n\n # test editing policy task\n data = {\"enableordisable\": True}\n\n resp = self.client.patch(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n update_policy_autotasks_fields_task.assert_called_with(\n task=policy_task.id, update_agent=True # type: ignore\n )\n\n self.check_not_authenticated(\"patch\", url)\n\n @patch(\"autotasks.tasks.delete_win_task_schedule.delay\")\n @patch(\"automation.tasks.delete_policy_autotasks_task.delay\")\n def test_delete_autotask(\n self, delete_policy_autotasks_task, delete_win_task_schedule\n ):\n # setup data\n agent = baker.make_recipe(\"agents.agent\")\n agent_task = baker.make(\"autotasks.AutomatedTask\", agent=agent)\n policy = baker.make(\"automation.Policy\")\n policy_task = baker.make(\"autotasks.AutomatedTask\", policy=policy)\n\n # test invalid url\n resp = self.client.delete(\"/tasks/500/automatedtasks/\", format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n # test delete agent task\n url = f\"/tasks/{agent_task.id}/automatedtasks/\" # type: ignore\n resp = self.client.delete(url, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n delete_win_task_schedule.assert_called_with(pk=agent_task.id) # type: ignore\n\n # test delete policy task\n url = f\"/tasks/{policy_task.id}/automatedtasks/\" # type: ignore\n resp = self.client.delete(url, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n self.assertFalse(AutomatedTask.objects.filter(pk=policy_task.id)) # type: ignore\n delete_policy_autotasks_task.assert_called_with(task=policy_task.id) # type: ignore\n\n self.check_not_authenticated(\"delete\", url)\n\n @patch(\"autotasks.tasks.run_win_task.delay\")\n def test_run_autotask(self, run_win_task):\n # setup data\n agent = baker.make_recipe(\"agents.agent\", version=\"1.1.0\")\n task = baker.make(\"autotasks.AutomatedTask\", agent=agent)\n\n # test invalid url\n resp = self.client.get(\"/tasks/runwintask/500/\", format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n # test run agent task\n url = f\"/tasks/runwintask/{task.id}/\" # type: ignore\n resp = self.client.get(url, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n run_win_task.assert_called()\n\n self.check_not_authenticated(\"get\", url)\n\n\nclass TestAutoTaskCeleryTasks(TacticalTestCase):\n def setUp(self):\n self.authenticate()\n self.setup_coresettings()\n\n @patch(\"agents.models.Agent.nats_cmd\")\n def test_remove_orphaned_win_task(self, nats_cmd):\n self.agent = baker.make_recipe(\"agents.agent\")\n self.task1 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 1\",\n win_task_name=AutomatedTask.generate_task_name(),\n )\n\n # test removing an orphaned task\n win_tasks = [\n \"Adobe Acrobat Update Task\",\n \"AdobeGCInvoker-1.0\",\n \"GoogleUpdateTaskMachineCore\",\n \"GoogleUpdateTaskMachineUA\",\n \"OneDrive Standalone Update Task-S-1-5-21-717461175-241712648-1206041384-1001\",\n self.task1.win_task_name,\n \"TacticalRMM_fixmesh\",\n \"TacticalRMM_SchedReboot_jk324kajd\",\n \"TacticalRMM_iggrLcOaldIZnUzLuJWPLNwikiOoJJHHznb\", # orphaned task\n ]\n\n self.calls = [\n call({\"func\": \"listschedtasks\"}, timeout=10),\n call(\n {\n \"func\": \"delschedtask\",\n \"schedtaskpayload\": {\n \"name\": \"TacticalRMM_iggrLcOaldIZnUzLuJWPLNwikiOoJJHHznb\"\n },\n },\n timeout=10,\n ),\n ]\n\n nats_cmd.side_effect = [win_tasks, \"ok\"]\n ret = remove_orphaned_win_tasks.s(self.agent.pk).apply()\n self.assertEqual(nats_cmd.call_count, 2)\n nats_cmd.assert_has_calls(self.calls)\n self.assertEqual(ret.status, \"SUCCESS\")\n\n # test nats delete task fail\n nats_cmd.reset_mock()\n nats_cmd.side_effect = [win_tasks, \"error deleting task\"]\n ret = remove_orphaned_win_tasks.s(self.agent.pk).apply()\n nats_cmd.assert_has_calls(self.calls)\n self.assertEqual(nats_cmd.call_count, 2)\n self.assertEqual(ret.status, \"SUCCESS\")\n\n # no orphaned tasks\n nats_cmd.reset_mock()\n win_tasks.remove(\"TacticalRMM_iggrLcOaldIZnUzLuJWPLNwikiOoJJHHznb\")\n nats_cmd.side_effect = [win_tasks, \"ok\"]\n ret = remove_orphaned_win_tasks.s(self.agent.pk).apply()\n self.assertEqual(nats_cmd.call_count, 1)\n self.assertEqual(ret.status, \"SUCCESS\")\n\n @patch(\"agents.models.Agent.nats_cmd\")\n def test_run_win_task(self, nats_cmd):\n self.agent = baker.make_recipe(\"agents.agent\")\n self.task1 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 1\",\n win_task_name=AutomatedTask.generate_task_name(),\n )\n nats_cmd.return_value = \"ok\"\n ret = run_win_task.s(self.task1.pk).apply()\n self.assertEqual(ret.status, \"SUCCESS\")\n\n @patch(\"agents.models.Agent.nats_cmd\")\n def test_create_win_task_schedule(self, nats_cmd):\n self.agent = baker.make_recipe(\"agents.agent\")\n\n task_name = AutomatedTask.generate_task_name()\n # test scheduled task\n self.task1 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 1\",\n win_task_name=task_name,\n task_type=\"scheduled\",\n run_time_bit_weekdays=127,\n run_time_minute=\"21:55\",\n )\n self.assertEqual(self.task1.sync_status, \"initial\")\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(pk=self.task1.pk).apply()\n self.assertEqual(nats_cmd.call_count, 1)\n nats_cmd.assert_called_with(\n {\n \"func\": \"schedtask\",\n \"schedtaskpayload\": {\n \"type\": \"rmm\",\n \"trigger\": \"weekly\",\n \"weekdays\": 127,\n \"pk\": self.task1.pk,\n \"name\": task_name,\n \"hour\": 21,\n \"min\": 55,\n },\n },\n timeout=5,\n )\n self.task1 = AutomatedTask.objects.get(pk=self.task1.pk)\n self.assertEqual(self.task1.sync_status, \"synced\")\n\n nats_cmd.return_value = \"timeout\"\n ret = create_win_task_schedule.s(pk=self.task1.pk).apply()\n self.assertEqual(ret.status, \"SUCCESS\")\n self.task1 = AutomatedTask.objects.get(pk=self.task1.pk)\n self.assertEqual(self.task1.sync_status, \"initial\")\n\n # test runonce with future date\n nats_cmd.reset_mock()\n task_name = AutomatedTask.generate_task_name()\n run_time_date = djangotime.now() + djangotime.timedelta(hours=22)\n self.task2 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 2\",\n win_task_name=task_name,\n task_type=\"runonce\",\n run_time_date=run_time_date,\n )\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(pk=self.task2.pk).apply()\n nats_cmd.assert_called_with(\n {\n \"func\": \"schedtask\",\n \"schedtaskpayload\": {\n \"type\": \"rmm\",\n \"trigger\": \"once\",\n \"pk\": self.task2.pk,\n \"name\": task_name,\n \"year\": int(dt.datetime.strftime(self.task2.run_time_date, \"%Y\")),\n \"month\": dt.datetime.strftime(self.task2.run_time_date, \"%B\"),\n \"day\": int(dt.datetime.strftime(self.task2.run_time_date, \"%d\")),\n \"hour\": int(dt.datetime.strftime(self.task2.run_time_date, \"%H\")),\n \"min\": int(dt.datetime.strftime(self.task2.run_time_date, \"%M\")),\n },\n },\n timeout=5,\n )\n self.assertEqual(ret.status, \"SUCCESS\")\n\n # test runonce with date in the past\n nats_cmd.reset_mock()\n task_name = AutomatedTask.generate_task_name()\n run_time_date = djangotime.now() - djangotime.timedelta(days=13)\n self.task3 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 3\",\n win_task_name=task_name,\n task_type=\"runonce\",\n run_time_date=run_time_date,\n )\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(pk=self.task3.pk).apply()\n self.task3 = AutomatedTask.objects.get(pk=self.task3.pk)\n self.assertEqual(ret.status, \"SUCCESS\")\n\n # test checkfailure\n nats_cmd.reset_mock()\n self.check = baker.make_recipe(\"checks.diskspace_check\", agent=self.agent)\n task_name = AutomatedTask.generate_task_name()\n self.task4 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 4\",\n win_task_name=task_name,\n task_type=\"checkfailure\",\n assigned_check=self.check,\n )\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(pk=self.task4.pk).apply()\n nats_cmd.assert_called_with(\n {\n \"func\": \"schedtask\",\n \"schedtaskpayload\": {\n \"type\": \"rmm\",\n \"trigger\": \"manual\",\n \"pk\": self.task4.pk,\n \"name\": task_name,\n },\n },\n timeout=5,\n )\n self.assertEqual(ret.status, \"SUCCESS\")\n\n # test manual\n nats_cmd.reset_mock()\n task_name = AutomatedTask.generate_task_name()\n self.task5 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 5\",\n win_task_name=task_name,\n task_type=\"manual\",\n )\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(pk=self.task5.pk).apply()\n nats_cmd.assert_called_with(\n {\n \"func\": \"schedtask\",\n \"schedtaskpayload\": {\n \"type\": \"rmm\",\n \"trigger\": \"manual\",\n \"pk\": self.task5.pk,\n \"name\": task_name,\n },\n },\n timeout=5,\n )\n self.assertEqual(ret.status, \"SUCCESS\")\n","sub_path":"api/tacticalrmm/autotasks/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":15190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"339542069","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, unicode_literals, print_function\n\nimport io\nimport os\n\nfrom .. import asdf\nfrom .. import asdftypes\nfrom . import helpers\n\nfrom astropy.extern.six.moves.urllib.parse import urljoin\nfrom astropy.extern.six.moves.urllib.request import pathname2url\n\nTEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')\n\n\ndef test_custom_tag():\n import fractions\n\n class FractionType(asdftypes.AsdfType):\n name = 'fraction'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n types = [fractions.Fraction]\n\n @classmethod\n def to_tree(cls, node, ctx):\n return [node.numerator, node.denominator]\n\n @classmethod\n def from_tree(cls, tree, ctx):\n return fractions.Fraction(tree[0], tree[1])\n\n class FractionExtension(object):\n @property\n def types(self):\n return [FractionType]\n\n @property\n def tag_mapping(self):\n return [('tag:nowhere.org:custom',\n 'http://nowhere.org/schemas/custom{tag_suffix}')]\n\n @property\n def url_mapping(self):\n return [('http://nowhere.org/schemas/custom/1.0.0/',\n urljoin('file:', pathname2url(os.path.join(\n TEST_DATA_PATH))) + '/{url_suffix}.yaml')]\n\n class FractionCallable(FractionExtension):\n @property\n def tag_mapping(self):\n def check(tag):\n prefix = 'tag:nowhere.org:custom'\n if tag.startswith(prefix):\n return 'http://nowhere.org/schemas/custom' + tag[len(prefix):]\n return [check]\n\n yaml = \"\"\"\na: !\n [2, 3]\nb: !core/complex\n 0j\n \"\"\"\n\n buff = helpers.yaml_to_asdf(yaml)\n ff = asdf.AsdfFile.read(\n buff, extensions=FractionExtension())\n assert ff.tree['a'] == fractions.Fraction(2, 3)\n\n buff = io.BytesIO()\n ff.write_to(buff)\n\n buff = helpers.yaml_to_asdf(yaml)\n ff = asdf.AsdfFile.read(\n buff, extensions=FractionCallable())\n assert ff.tree['a'] == fractions.Fraction(2, 3)\n\n buff = io.BytesIO()\n ff.write_to(buff)\n","sub_path":"pyasdf/tests/test_asdftypes.py","file_name":"test_asdftypes.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"472002189","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[125]:\n\n\n#monty hall\nimport random\nimport numpy as np\n\nprint('Número de iterações: ')\nn = input() #número de repetições\nn= int(n)\ntroca = 0\nmantem = 0\n\nfor i in range(0,n):\n listaPortas=[1,2,3]\n portaCerta = random.randrange(1,4)\n portaEscolhida= random.randrange(1,4)\n #print('portaCerta: ',portaCerta)\n #print('portaEscolhida:', portaEscolhida)\n \n pErradas = listaPortas.copy()\n pErradas.remove(portaCerta)\n #print('pErradas ', pErradas)\n \n pSobrando = listaPortas.copy()\n pSobrando.remove(portaEscolhida)\n #print('pSobrando ',pSobrando)\n \n pElim = np.intersect1d(pErradas,pSobrando)\n pFinal= pErradas.copy()\n pFinal.remove(pElim[0])\n portaRestante = pFinal[0]\n #print('portaRestante ',portaRestante)\n \n \n if portaEscolhida==portaCerta:\n mantem=mantem + 1\n else:\n troca = troca+1\n if i%1000==0:\n print(i,'/',n)\nprint('Trocar ganha',troca,'vezes (',troca/n*100,'%)')\nprint('Manter ganha',mantem,'vezes (',mantem/n*100,'%)')\n\n\n\n","sub_path":"Monty Hall.py","file_name":"Monty Hall.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"336736731","text":"# https://atcoder.jp/contests/abc117/tasks/abc117_b\n\nN = int(input())\nL = [int(_) for _ in input().split()]\nL.sort(reverse=True)\n \nif L.pop(0) < sum(L):\n print('Yes')\nelse:\n print('No')","sub_path":"abc/abc117/abc117_b.py","file_name":"abc117_b.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"234971999","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport gym\nimport tensorboard\nimport collections\nimport cv2\nimport time\n\n\n# In[21]:\n\nclass DenseModel():\n def __init__(self, name_scope):\n self.name_scope = name_scope\n self.model(self.name_scope)\n# self.graphkeys = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES)\n \n def model(self, name_scope):\n #create prediction model\n with tf.variable_scope(name_scope): \n self.target_placeholder = tf.placeholder(dtype=tf.float32, shape=[None],name='Target_placeholder')\n self.state_placeholder = tf.placeholder(dtype=tf.float32, shape=[None, 4], name='State_placeholder') #(batch_size, Pixel_w, Pixel_h, n_frames)\n self.action_placeholder = tf.placeholder(dtype=tf.int32, shape=[None], name='Action_placeholder') \n \n #conv_layers\n layer1 = tf.layers.dense(inputs=self.state_placeholder, units=32, activation=tf.nn.relu, name='layer1')\n layer2 = tf.layers.dense(inputs=layer1, units=64, activation=tf.nn.relu, name='layer2')\n layer3 = tf.layers.dense(inputs=layer2, units=32, activation=tf.nn.relu, name='layer3')\n self.Q_value_predictions = tf.layers.dense(inputs=layer2, units=2,) #self.predictions gives the Q-value per action\n \n self.Q_value_predictions_flatten = tf.reshape(self.Q_value_predictions, shape=[1,-1], )\n #select only the Q_values of actions_taken \n self.action_predictions = tf.gather(self.Q_value_predictions_flatten, self.action_placeholder, axis=1)[0]\n \n #compute_loss\n self.losses = tf.squared_difference(self.target_placeholder, self.action_predictions)\n self.loss = tf.reduce_mean(self.losses, name='loss')\n \n #optimizer\n self.optimizer = tf.train.AdamOptimizer(0.001)\n self.train_op = self.optimizer.minimize(self.loss)\n \n \n\n\n# In[22]:\n\ndef store_experience(memory, state, action, reward, done, next_state):\n #receive stacked state/next_state\n memory.append((state, action, reward, done, next_state))\n\n\n# In[23]:\n\nclass Network():\n \n def __init__(self, sess, epsilon_start, epsilon_n_reductions, epsilon_min, env, experience_limit, batch_size, gamma, learning_rate, replace_target_pointer, name_scope_target, name_scope_prediction, save_iter=1000):\n self.epsilon_start = epsilon_start\n self.epsilon_min = epsilon_min\n self.epsilon_reduce = (epsilon_start - epsilon_min) / epsilon_n_reductions\n self.epsilon = epsilon_start\n self.env = env\n self.memory = collections.deque(maxlen=experience_limit)\n self.sess = sess\n self.batch_size = batch_size\n self.gamma = gamma\n self.learning_rate = learning_rate\n self.replace_target_pointer = replace_target_pointer\n self.q_predict = DenseModel(name_scope_prediction)\n self.q_target = DenseModel(name_scope_target)\n self.prediction_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name_scope_prediction)\n self.target_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name_scope_target)\n self.saver = tf.train.Saver() \n self.save_iter = save_iter\n \n def prediction(self, state, model):\n #model = self.q_predict or self.q_target\n return self.sess.run(model.Q_value_predictions, feed_dict={model.state_placeholder:state})\n \n def target(self, model, states, actions, rewards, dones, next_states):\n self.next_states_values = self.sess.run(tf.reduce_max(model.Q_value_predictions, axis=1),feed_dict={model.state_placeholder:next_states}) #compute max expected Q-value\n self.next_states_values[dones] = 0 #if episode is done, no discounted Q value (important for convergence)\n self.target_state_action_values = self.next_states_values * self.gamma + rewards #this is target_value (y)\n return self.target_state_action_values\n \n def optimize(self, states, actions, targets):\n rows = np.arange(self.batch_size)\n rows = rows*2\n actions = actions + rows \n \n feed_dict = {self.q_predict.target_placeholder:targets, self.q_predict.state_placeholder:states, self.q_predict.action_placeholder:actions}\n _, loss = self.sess.run([self.q_predict.train_op, self.q_predict.loss], feed_dict=feed_dict)\n return loss\n \n def action_epsilon(self, state, model):\n if np.random.uniform(low=0., high=1.) < self.epsilon:\n action = env.action_space.sample()\n else:\n Q_values_for_given_obs = self.prediction(state, model)\n action = np.argmax(Q_values_for_given_obs, axis=1)[0]\n self.epsilon = self.epsilon - self.epsilon_reduce if self.epsilon > self.epsilon_min else self.epsilon_min\n return action \n \n def update_target_params(self, target_params, estimator_params):\n assign_operation = [tf.assign(t,e) for t,e in zip(target_params, estimator_params)]\n return self.sess.run(assign_operation)\n \n# def replace_target_parameters(self, global_counter):\n# if global_counter % self.replace_target_pointer == 0:\n \n\n\n# In[24]:\n\ndef select_random_batch(memory, batch_size):\n #select a random batch and create arrays of the different components (states, actions, rewards, dones, next_states)\n\n indices = np.random.choice(len(memory), batch_size)\n batch = [memory[i] for i in indices]\n states = np.array([[[i][0][0] for i in batch]])\n states = np.reshape(states, newshape=(batch_size,4))\n next_states = np.array([[i][0][4] for i in batch])\n next_states = np.reshape(next_states, newshape=(batch_size,4))\n dones = np.array([[i][0][3] for i in batch])\n actions = np.array([[i][0][1] for i in batch])\n rewards = np.array([[i][0][2] for i in batch])\n return batch, states, actions, rewards, dones, next_states \n\n\n# In[25]:\n\ntf.reset_default_graph()\n\nloss_values = tf.placeholder(dtype=tf.float32, shape=[])\nreward_mean_values = tf.placeholder(dtype=tf.float32, shape=[])\nreward_bound_values = tf.placeholder(dtype=tf.float32, shape=[])\n\nloss_mapping = tf.summary.scalar('loss', loss_values)\nreward_mean_mapping = tf.summary.scalar('mean_reward', reward_mean_values)\nreward_bound_mapping = tf.summary.scalar('bound_reward', reward_bound_values)\nepisode_reward_list = []\nloss_reward_list =[]\nwith tf.Session() as sess:\n \n s_time = time.time()\n env = gym.make('CartPole-v0')\n global_counter = 0\n DQN = Network(env=env, epsilon_start=1.0, epsilon_min=0.02, epsilon_n_reductions=20000, sess=sess, experience_limit=4000, batch_size=64, gamma=0.99, learning_rate=1e-4, replace_target_pointer=250, name_scope_prediction='prediction', name_scope_target='target', save_iter=500)\n n_episodes = 10000\n episode_rewards_buffer = collections.deque(maxlen=50)\n loss_buffer = []\n \n \n writer = tf.summary.FileWriter(\"/Users/stefruinard/Desktop/RL_models/DQN/Save_sess/\", sess.graph)\n \n sess.run(tf.global_variables_initializer())\n for episode in range(n_episodes):\n s_start = time.time()\n state = env.reset()\n state = state.reshape(1,4)\n episode_reward = 0.0\n \n \n while True:\n \n action = DQN.action_epsilon(model=DQN.q_predict, state=state)\n state_, reward, done, _ = env.step(action)\n store_experience(memory=DQN.memory, action=action, done=done, next_state=state_, reward=reward, state=state)\n if len(DQN.memory) < (DQN.memory.maxlen):\n if done:\n break\n continue\n \n \n #obtain a batch from memory\n \n #env.render()\n #compute target labels and perform an optimization_step\n \n# if global_counter > DQN.memory.maxlen:\n \n \n \n #loss_buffer.append(loss)\n if global_counter > DQN.memory.maxlen and global_counter % DQN.replace_target_pointer == 0:\n print(\"TARGET REPLACED\")\n DQN.update_target_params(estimator_params=DQN.prediction_params, target_params=DQN.target_params)\n\n if global_counter % DQN.save_iter ==0:\n DQN.saver.save(sess, '/Users/stefruinard/Desktop/RL_models/DQN/Save_sess/Temp_save', global_step=global_counter)\n# update target_network_parameters\n \n episode_reward += reward\n if done:\n break\n \n \n \n state=state_\n state=state.reshape(-1,4)\n global_counter+=1 \n \n \n if global_counter > DQN.memory.maxlen:\n batch, states, actions, rewards, dones, next_states = select_random_batch(DQN.memory, DQN.batch_size)\n #print(\"OPTIMIZIE\")\n q_target_values = DQN.target(actions=actions, dones=dones, model=DQN.q_predict, next_states=next_states, rewards=rewards, states=states)\n loss = DQN.optimize(actions=actions, states=states, targets=q_target_values) #this step also runs a optimization step\n\n loss_reward_list.append(loss)\n loss_value = sess.run(loss_mapping, feed_dict={loss_values:loss})\n writer.add_summary(global_step=global_counter, summary=loss_value)\n \n s_end = time.time() \n print('Episode_reward: ', episode_reward, 'Time:', (s_end - s_start), 'Episode_number: ', episode, 'Epsilon:', DQN.epsilon)\n episode_rewards_buffer.append(episode_reward)\n episode_reward_list.append(episode_reward)\n reward_value = sess.run(reward_mean_mapping, feed_dict={reward_mean_values:episode_reward})\n writer.add_summary(global_step=global_counter, summary=reward_value)\n \n merged = tf.summary.merge_all()\n \n if np.mean(episode_rewards_buffer) > 195:\n DQN.saver.save(sess, '/Users/stefruinard/Desktop/RL_models/DQN/Save_sess/final_summary', global_step=global_counter)\n break","sub_path":"DQN/DQN.py","file_name":"DQN.py","file_ext":"py","file_size_in_byte":10246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"636045618","text":"'''\nCreated on Nov 3, 2018\n\n@author: daniel\n'''\n\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom datetime import datetime\nfrom UNetFactory.createUNetInception import createUNetInception\nfrom DataHandlers.UNetDataHandler import UNetDataHandler\n\nfrom keras.callbacks import CSVLogger, LearningRateScheduler\nfrom CustomLosses import dice_coef, dice_coef_loss, dice_coef_multilabel, dice_coef_multilabel_loss, dice_coef_reg_1, dice_coef_reg_2, dice_coef_reg_3\nfrom Generators.CustomImageAugmentationGenerator import CustomImageAugmentationGenerator\nfrom Generators.CustomImageGenerator2 import CustomImageGenerator2\nfrom random import shuffle\nimport shutil\nimport math\nfrom keras.optimizers import Adam\nfrom Utils.HardwareHandler import HardwareHandler\nimport tensorflow as tf\nfrom keras.utils.training_utils import multi_gpu_model\n\n\ndef step_decay(epoch):\n initial_lrate = 0.1\n #drop = 0.5\n epochs_drop = 10.0\n lrate = initial_lrate * math.exp(-math.floor((1+epoch)/epochs_drop))\n #lrate = initial_lrate * math.pow(drop, \n # math.floor((1+epoch)/epochs_drop))\n return lrate\n\ndef main():\n\n hardwareHandler = HardwareHandler()\n numGPUs = hardwareHandler.getAvailableGPUs() \n now = datetime.now()\n date_string = now.strftime('%Y-%m-%d-%H:%M')\n \n num_training_patients = 200\n num_validation_patients = 0\n n_labels = 1\n\n data_gen = None\n modes = [\"flair\", \"t1ce\", \"t2\"]\n \n dataDirectory = \"Data/BRATS_2018/HGG\" \n validationDataDirectory = \"Data/BRATS_2018/HGG_Validation\"\n testingDataDirectory = \"Data/BRATS_2018/HGG_Testing\"\n modelDirectory = \"Models\"\n \n ## create testing, validation, and model directories\n if not os.path.exists(validationDataDirectory):\n os.makedirs(validationDataDirectory)\n if not os.path.exists(testingDataDirectory):\n os.makedirs(testingDataDirectory)\n if not os.path.exists(modelDirectory):\n os.makedirs(modelDirectory)\n\n ### Move a random subset of files into validation directory\n if len(os.listdir(validationDataDirectory)) <= 0:\n listOfDirs = os.listdir(dataDirectory)\n shuffle(listOfDirs)\n validation_data = listOfDirs[0:num_validation_patients]\n for datum in validation_data:\n shutil.move(dataDirectory + \"/\" + datum, validationDataDirectory)\n \n\n \n \n dataHandler = UNetDataHandler(\"Data/BRATS_2018/HGG\", \n num_patients = num_training_patients, \n modes = modes,\n n_labels = n_labels)\n dataHandler.loadData()\n x_train = dataHandler.X\n x_seg_train = dataHandler.labels\n dataHandler.clear()\n \n dataHandler.setDataDirectory(\"Data/BRATS_2018/HGG_Validation\")\n dataHandler.setNumPatients(num_validation_patients)\n dataHandler.loadData()\n x_val = dataHandler.X\n x_seg_val = dataHandler.labels\n dataHandler.clear()\n\n input_shape = (dataHandler.W,dataHandler.H, len(modes))\n \n normalize = True\n augmentations = False\n \n if n_labels > 1:\n output_mode = \"sigmoid\"\n else:\n output_mode = \"sigmoid\"\n\n if augmentations:\n data_gen = CustomImageAugmentationGenerator()\n else:\n data_gen = CustomImageGenerator2()\n \n num_epochs = 25\n adam = Adam(lr = 0.1)\n batch_size = 64\n \n validation_data_gen = CustomImageGenerator2()\n \n if numGPUs > 1:\n with tf.device('/cpu:0'):\n unet_to_save = createUNetInception(input_shape, output_mode, n_labels)\n unet = multi_gpu_model(unet_to_save, numGPUs)\n else:\n unet = createUNetInception(input_shape, output_mode, n_labels)\n\n \n\n if n_labels > 1:\n unet.compile(optimizer=adam, loss=dice_coef_multilabel_loss, metrics=[dice_coef_multilabel])\n if numGPUs > 1:\n unet_to_save.compile(optimizer=adam, loss=dice_coef_multilabel_loss, metrics=[dice_coef_multilabel])\n\n else:\n unet.compile(optimizer=adam, loss=dice_coef_loss, metrics=[dice_coef])\n if numGPUs > 1:\n unet_to_save.compile(optimizer=adam, loss=dice_coef_loss, metrics=[dice_coef])\n\n\n\n model_directory = \"Models/unet_\" + date_string \n if not os.path.exists(model_directory):\n os.makedirs(model_directory)\n \n log_info_filename = 'model_loss_log.csv'\n csv_logger = CSVLogger(model_directory + '/' + log_info_filename, append=True, separator=',')\n \n ## Log additional data about model\n ## Note: should be in a logging class\n model_info_filename = 'model_info.txt'\n model_info_file = open(model_directory + '/' + model_info_filename, \"w\") \n model_info_file.write('Number of Patients (training): ' + str(num_training_patients) + '\\n')\n model_info_file.write('Number of Patients (validation): ' + str(num_validation_patients) + '\\n')\n model_info_file.write('\\n\\n')\n unet.summary(print_fn=lambda x: model_info_file.write(x + '\\n'))\n model_info_file.close();\n \n print(\"Training on \" + str(numGPUs) + \" GPUs\")\n unet.fit_generator(generator = data_gen.generate(x_train, \n x_seg_train, \n batch_size, \n n_labels,\n normalize), \n epochs = num_epochs,\n steps_per_epoch = len(x_train) / batch_size, \n callbacks = [csv_logger], \n use_multiprocessing = True, \n workers = 4,\n shuffle=True,\n validation_steps= len(x_val) / batch_size,\n validation_data = validation_data_gen.generate(x_val, \n x_seg_val, \n batch_size, \n n_labels, \n normalize))\n \n \n if numGPUs > 1:\n unet_to_save.save(model_directory + '/model.h5')\n else:\n unet.save(model_directory + '/model.h5')\n\n \n \n\n\n \n\nif __name__ == \"__main__\":\n main() \n","sub_path":"TestingUNetInception.py","file_name":"TestingUNetInception.py","file_ext":"py","file_size_in_byte":6404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"519964308","text":"import time\nfrom bus.scripts import *\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n print(\"Start\")\n\n start = time.time()\n\n origin = 808\n destination = 809\n route = \"46A\"\n pattern = 1\n hour = 14\n day = 3\n weather = 1\n #\n # for i in range(10):\n # predict(origin, destination, route, pattern, hour, day, weather)\n\n filename = os.path.join(settings.DATA_PATH, 'sklearn_models/' + route + '.sav')\n\n # loading pickled model\n model = pickle.load(open(filename, 'rb'))\n\n for i in range(1000):\n pred1 = query_model(model, origin, pattern, hour, day, weather)\n pred2 = query_model(model, destination, pattern, hour, day, weather)\n\n\n total = time.time() - start\n\n print(\"Took\", total, \"seconds\")","sub_path":"bus/prediction_time.py","file_name":"prediction_time.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"467386972","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\n\nfrom logger import Logger\nfrom datetime import datetime\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom IPython import display\nimport os\nfrom PIL import Image\nfrom torch.utils.data.dataset import Dataset\nfrom scipy.misc import imread\n\n# %matplotlib inline\nTENSORBOARD_DIR = '/usr/WS1/hammel1/proj/tensorboard/'\nCHECKPOINT_DIR = '/usr/WS1/hammel1/proj/checkpoints/'\nDATA_DIR = '/usr/WS1/hammel1/proj/data/'\nexperiment_id = datetime.now().isoformat()\n\ntorch.set_default_tensor_type('torch.cuda.FloatTensor')\nUSE_GPU = torch.cuda.is_available()\ndevice = torch.device('cuda' if USE_GPU else 'cpu')\n\nif USE_GPU:\n print(\"=\"*80)\n print(\"Model is using GPU\")\n print(\"=\"*80)\n\n\nclass NN(nn.Module):\n \n def __init__(self, input_size, hidden_size, output_size):\n super(NN, self).__init__()\n self.fc1 = nn.Linear(input_size, hidden_size)\n self.out = nn.Linear(hidden_size, output_size)\n \n def forward(self, x):\n output = self.fc1(x)\n #output = F.torch.sigmoid(output)\n output = F.relu(output)\n output = self.out(output)\n return output\n\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import transforms, utils\nimport time\nimport pca_data_hu_ndl2_train_test as pca_data_hu\n\n\nclass DataSet(Dataset):\n\n def __init__(self, X, Y, transform=False):\n self.X = X\n self.Y = Y\n self.transform = transform\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, idx):\n x = self.X[idx].astype(np.float32)\n y = self.Y[idx].astype(np.float32)\n return torch.tensor(x, device=device), torch.tensor(y, device=device)\n\n\nhu, PC, X_pca_test, X_pca, X, X_test, Y, Y_test = pca_data_hu.pca_data()\nnp.save(f\"../data/{experiment_id}\", [hu, PC, X_pca_test, X_pca, X, X_test, Y, Y_test])\n\nbch_sz = 2048\n\ntraining_set = DataSet(X_pca, Y, transform=False)\ntraining_generator = DataLoader(training_set, batch_size=bch_sz, shuffle=True)\n\ntest_set = DataSet(X_pca_test, Y_test, transform=False)\ntest_generator = DataLoader(test_set, batch_size=bch_sz, shuffle=True)\n\nimg, lab = training_set.__getitem__(0)\nprint('PCA input shape at the first row : {}'.format(img.size())) \nprint('label shape at the first row : {}'.format(lab.size())) \nprint(np.array(training_set).shape) \n#print(np.array(test_set).shape)\n\"\"\"\nimage shape at the first row : torch.Size([30] #PCA input shape)\nlabel shape at the first row : torch.Size([1]) #label shape\n\"\"\"\n\ntrain_loader_check = DataLoader(training_set, batch_size=bch_sz, shuffle=True)\ntrain_iter_check = iter(train_loader_check)\nprint(type(train_iter_check))\n\nimages, labels = train_iter_check.next()\n\nprint('images shape on batch size = {}'.format(images.size()))\nprint('labels shape on batch size = {}'.format(labels.size()))\n\"\"\"\nimages shape on batch size = torch.Size([2048, 30])\nlabels shape on batch size = torch.Size([2048, 1])\n\"\"\"\n# Break\n#pdb.set_trace()\n\nnet = NN(PC, 100, 1)\n\n\nif USE_GPU:\n net.cuda()\n\nimport pyro\nfrom pyro.distributions import Normal, Uniform, Delta\nfrom pyro.infer import SVI, Trace_ELBO\nfrom pyro.optim import Adam\nfrom pyro.infer import EmpiricalMarginal, SVI, Trace_ELBO, TracePredictive\nfrom pyro.infer.mcmc import MCMC, NUTS\nimport pyro.optim as optim\n\nlog_softmax = nn.LogSoftmax(dim=1)\n\ndef model(x_data, y_data):\n \n fc1w_prior = Normal(loc=torch.zeros_like(net.fc1.weight), scale=torch.ones_like(net.fc1.weight))\n fc1b_prior = Normal(loc=torch.zeros_like(net.fc1.bias), scale=torch.ones_like(net.fc1.bias))\n \n outw_prior = Normal(loc=torch.zeros_like(net.out.weight), scale=torch.ones_like(net.out.weight))\n outb_prior = Normal(loc=torch.zeros_like(net.out.bias), scale=torch.ones_like(net.out.bias))\n \n priors = {'fc1.weight': fc1w_prior, 'fc1.bias': fc1b_prior, 'out.weight': outw_prior, 'out.bias': outb_prior}\n # lift module parameters to random variables sampled from the priors\n lifted_module = pyro.random_module(\"module\", net, priors)\n # sample a regressor (which also samples w and b)\n lifted_reg_model = lifted_module()\n \"\"\"\n lhat = log_softmax(lifted_reg_model(x_data))\n pyro.sample(\"obs\", Categorical(logits=lhat), obs=y_data)\n \"\"\"\n # run the regressor forward conditioned on inputs\n prediction_mean = lifted_reg_model(x_data).squeeze(-1)\n pyro.sample(\"obs\", Normal(prediction_mean, 1),\n obs=y_data)\n return prediction_mean\n\n\nsoftplus = torch.nn.Softplus()\n\n\"\"\"\nfrom pyro.contrib.autoguide import AutoDiagonalNormal \nguide = AutoDiagonalNormal(model)\n\"\"\"\ndef guide(x_data, y_data):\n \n # First layer weight distribution priors\n fc1w_mu = torch.randn_like(net.fc1.weight)\n fc1w_sigma = torch.randn_like(net.fc1.weight)\n fc1w_mu_param = pyro.param(\"fc1w_mu\", fc1w_mu)\n fc1w_sigma_param = softplus(pyro.param(\"fc1w_sigma\", fc1w_sigma))\n fc1w_prior = Normal(loc=fc1w_mu_param, scale=fc1w_sigma_param)\n # First layer bias distribution priors\n fc1b_mu = torch.randn_like(net.fc1.bias)\n fc1b_sigma = torch.randn_like(net.fc1.bias)\n fc1b_mu_param = pyro.param(\"fc1b_mu\", fc1b_mu)\n fc1b_sigma_param = softplus(pyro.param(\"fc1b_sigma\", fc1b_sigma))\n fc1b_prior = Normal(loc=fc1b_mu_param, scale=fc1b_sigma_param)\n # Output layer weight distribution priors\n outw_mu = torch.randn_like(net.out.weight)\n outw_sigma = torch.randn_like(net.out.weight)\n outw_mu_param = pyro.param(\"outw_mu\", outw_mu)\n outw_sigma_param = softplus(pyro.param(\"outw_sigma\", outw_sigma))\n outw_prior = Normal(loc=outw_mu_param, scale=outw_sigma_param).independent(1)\n # Output layer bias distribution priors\n outb_mu = torch.randn_like(net.out.bias)\n outb_sigma = torch.randn_like(net.out.bias)\n outb_mu_param = pyro.param(\"outb_mu\", outb_mu)\n outb_sigma_param = softplus(pyro.param(\"outb_sigma\", outb_sigma))\n outb_prior = Normal(loc=outb_mu_param, scale=outb_sigma_param)\n priors = {'fc1.weight': fc1w_prior, 'fc1.bias': fc1b_prior, 'out.weight': outw_prior, 'out.bias': outb_prior}\n \n lifted_module = pyro.random_module(\"module\", net, priors)\n return lifted_module()\n\n\n\n# Reducing Learning Rate.\n# ReduceOnPlateau is not supported.\nAdamArgs = { 'lr': 1e-2 }\noptimizer = torch.optim.Adam\nscheduler = pyro.optim.ExponentialLR({'optimizer': optimizer, 'optim_args': AdamArgs, 'gamma': 0.99995 })\nsvi = SVI(model, guide, scheduler, loss=Trace_ELBO())\n\n\"\"\"\n\n#Fixed Learning Rate\noptimizer = Adam({\"lr\": 0.01})\nsvi = SVI(model, guide, optimizer, loss=Trace_ELBO())\n\"\"\"\n\n\"\"\"\nnum_iterations = 1\nfor j in range(num_iterations): \n print(\"Epoch \", j) \n for batch_id, data in enumerate(training_generator): \n print(\"batch_id\", batch_id, data[1][:,-1])\n\"\"\"\n\n\nprint('Logging experiment as: ', experiment_id)\n\nlogger = Logger(os.path.join(TENSORBOARD_DIR, experiment_id))\n\nnum_iterations = 50\nloss = 0\nlosses = []\nfor j in range(num_iterations):\n #print(\"Epoch \", j) \n loss = 0\n for batch_id, data_train in enumerate(training_generator):\n # calculate the loss and take a gradient step\n loss += svi.step(data_train[0], data_train[1][:,-1])\n #loss += svi.step(x, y)\n normalizer_train = len(training_generator.dataset)\n total_epoch_loss_train = loss / normalizer_train\n \n losses.append(total_epoch_loss_train)\n print(\"Epoch \", j, \" Loss \", total_epoch_loss_train)\n\nplt.plot(losses)\nplt.title(\"ELBO\")\nplt.xlabel(\"step\")\nplt.ylabel(\"Epoch loss\")\n\ntorch.save(model, os.path.join(CHECKPOINT_DIR, f'{experiment_id}_latest'))\n\n# Break\n#pdb.set_trace()\n\n\"\"\"\n#Non-preferred method:\noutput = { \n 'guide': guide, \n 'state_dict': NN(PC,100,1).state_dict(), \n 'params': pyro.param \n} \ntorch.save(output,os.path.join(CHECKPOINT_DIR, f'{experiment_id}_params')) \n\"\"\"\n#preferred method\ntorch.save(NN(PC,100,1).state_dict(), f'{experiment_id}_params') \n#Look at state_dict\n\n#Print model's state_dic \nprint(\"Model's state_dict:\") \nfor param_tensor in NN(PC,100,1).state_dict(): \n print(param_tensor, \"\\t\", NN(PC,100,1).state_dict()[param_tensor].size()) \n\"\"\"\nModel's state_dict:\nfc1.weight torch.Size([100, 30])\nfc1.bias torch.Size([100])\nout.weight torch.Size([1, 100])\nout.bias torch.Size([1])\n\"\"\"\nnum_samples = 100\ndef predict(x):\n sampled_models = [guide(None, None) for _ in range(num_samples)]\n yhats = [model(x).data for model in sampled_models]\n mean = torch.mean(torch.stack(yhats), 0)\n return mean.cpu().numpy, yhats\n\nprint('Prediction for unshifted spectra')\n\n#This instead ???\n\"\"\"\ndef predict(x): \n sampled_models = [guide(None, None) for _ in range(num_samples)] \n yhats = [model[(x)] for model in sampled_models] \n mean = torch.mean(torch.stack(yhats), 0) \n print(mean.cpu().numpy.shape, yhats.shape) \n return mean.cpu().numpy, yhats \n\"\"\"\ncorrect = 0\ntotal = 0\n\n\"\"\"\nNeed to change labels.flatten() to labels.cpu().flatten()\nIn [66]: labels.cpu().data \nOut[66]: \ntensor([[6.],\n ...,\n [6.]], device='cpu')\n\nIn [67]: labels.cpu().data.shape \nOut[67]: torch.Size([1028, 1])\n\nIn [27]: labels.cpu().data.flatten().numpy() \nOut[27]: \narray([6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6.,\n 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6., 6.,\nIn [28]: labels.cpu().data.flatten().numpy().sum().item() \nOut[28]: 4488.0\n\"\"\"\n\naccept = []\ntol = 0.1\n\n#This still doesn't work\nfor j, data in enumerate(training_generator):\n images, labels = data\n predicted = predict(images)\n npredicted=np.array([_.cpu().numpy() for _ in predicted[1]])[:,:,0] \n total += labels.size(0)\n tolLo = (1.- tol) * labels.cpu().data.flatten().numpy()\n tolHi = (1 + tol) * labels.cpu().data.flatten().numpy()\n accept += (tolLo.all() <= npredicted.all() <= tolHi.all())\n #.sum().item()\n#print(\"accuracy: %d %%\" % (100 * accept / total))\n\n#print(images, predicted)\n\n\"\"\"\nIn [67]: labels.cpu().data.shape \nOut[67]: torch.Size([1028, 1])\n\nIn [68]: tolLow.shape \nOut[68]: (1028,)\n\nIn [69]: tolHi.shape \nOut[69]: (1028,)\n\nJ.Field's fix\n\n[ins] In [91]: predicted[1][0].cpu().numpy() \nOut[91]: \narray([[7.4891357],\n ...,\n [7.4891357]], dtype=float32)\n\n[ins] In [92]: predicted[1][0].cpu().numpy().shape \nOut[92]: (1028, 1)\n\n[ins] In [93]: predicted[0] \nOut[93]: \n\n[ins] In [94]: np.array([_.cpu().numpy() for _ in predicted[1]]).shape \nOut[94]: (100, 1028, 1)\n\n[ins] In [95]: xx=np.array([_.cpu().numpy() for _ in predicted[1]])[:,:,0] \n\nsa[ins] In [96]: xx.shape \nOut[96]: (100, 1028)\n\n[ins] In [97]: xx \nOut[97]: \narray([[7.4891357, 7.4891357, 7.4891357, ..., 7.4891357, 7.4891357,\n 7.4891357],\n ...,\n [4.506528 , 4.506528 , 4.506528 , ..., 4.506528 , 4.506528 ,\n 4.506528 ]], dtype=float32)\n\"\"\"\n\nlabels.data.shape \n#labels.shape \nnp.array(predicted).shape\n\nfrom functools import partial\nimport pandas as pd\n\n\nfor name, value in pyro.get_param_store().items():\n print(name, pyro.param(name))\n\nfor name, value in pyro.get_param_store().items(): \n print(name, pyro.param(name).cpu().detach().numpy().mean()) \n\n\"\"\"\nfor name, value in pyro.get_param_store().items(): \n print(name, pyro.param(name).cpu().detach().numpy().mean()) \nfc1w_mu 0.020490551\nfc1w_sigma 0.49763533\nfc1b_mu -0.95366895\nfc1b_sigma -0.8260457\noutw_mu 0.052733693\noutw_sigma -0.46032602\noutb_mu 1.2478061\noutb_sigma -3.6989684\n\"\"\"\n\nget_marginal = lambda traces, sites:EmpiricalMarginal(traces, sites)._get_samples_and_weights()[0].detach().cpu().numpy()\n\ndef summary(traces, sites):\n marginal = get_marginal(traces, sites)\n site_stats = {}\n for i in range(marginal.shape[1]):\n site_name = sites[i]\n marginal_site = pd.DataFrame(marginal[:, i]).transpose()\n describe = partial(pd.Series.describe, percentiles=[.05, 0.25, 0.5, 0.75, 0.95])\n site_stats[site_name] = marginal_site.apply(describe, axis=1) \\\n [[\"mean\", \"std\", \"5%\", \"25%\", \"50%\", \"75%\", \"95%\"]]\n return site_stats\n\ndef wrapped_model(x_data, y_data):\n pyro.sample(\"prediction\", Delta(model(x_data, y_data)))\n\n\n\n\nposterior = svi.run(data_train[0], data_train[1][:,-1])\n\n# Break\n#pdb.set_trace()\n\n# posterior predictive distribution we can get samples from\ntrace_pred = TracePredictive(wrapped_model,\n posterior,\n num_samples=100)\npost_pred = trace_pred.run(data_train[0], None) #inputing pca components?\npost_summary = summary(post_pred, sites= ['prediction', 'obs'])\nmu = post_summary[\"prediction\"]\ny = post_summary[\"obs\"]\nmu.insert(0, 'true', data_train[1].cpu().numpy())\ny.insert(0, 'true', data_train[1].cpu().numpy())\n\n\nprint(\"sample mu data:\")\nprint(mu.head(10))\n#What's the difference between mu and y? Means are the same but sigma is very different.\nprint(\"sample y data:\")\nprint(y.head(10))\n\ndf = pd.DataFrame(mu) \nnx = df.reset_index() #insert a first row in Dataframe for index\nnx = nx.values #Convert Dataframe to array\nfig = plt.figure(dpi=100, figsize=(5, 4))\nplt.scatter(nx[:,0],nx[:,1], c='b') \n#plt.scatter(nx[:,0],nx[:,2], c='r') \nplt.errorbar(nx[:,0],nx[:,2], yerr=nx[:,3], fmt='o', c='r')\n#plt.ylim(0.5,0.7)\nplt.ylabel('mu')\nplt.xlabel('sample')\n\n# Break\n#pdb.set_trace()\n\nprint('Prediction when data is shifted')\n\nfor j in range(num_iterations):\n #print(\"Epoch \", j) \n for batch_id, data_test in enumerate(test_generator): \n temp=batch_id\n\n\n\nposterior = svi.run(data_test[0], data_test[1][:,-1])\n\n\n# posterior predictive distribution we can get samples from\ntrace_pred = TracePredictive(wrapped_model,\n posterior,\n num_samples=100)\npost_pred = trace_pred.run(data_test[0], None)\npost_summary = summary(post_pred, sites= ['prediction', 'obs'])\nmu = post_summary[\"prediction\"]\ny = post_summary[\"obs\"]\nmu.insert(0, 'true', data_test[1].cpu().numpy())\ny.insert(0, 'true', data_test[1].cpu().numpy())\n\nprint(\"sample mu data:\")\nprint(mu.head(10))\nprint(\"sample y data:\")\nprint(y.head(10))\ndf = pd.DataFrame(mu) \n\nnx = df.reset_index() #insert a first row in Dataframe for index\nnx = nx.values #Convert Dataframe to array\nfig = plt.figure(dpi=100, figsize=(5, 4))\nplt.scatter(nx[:,0],nx[:,1], c='b') \n#plt.scatter(nx[:,0],nx[:,2], c='r') \nplt.errorbar(nx[:,0],nx[:,2], yerr=nx[:,3], fmt='o', c='r')\n#plt.ylim(0.5,0.7)\nplt.ylabel('mu')\nplt.xlabel('sample')\n\n\"\"\"\nUnable to reload guide and parameters\n\nIn [8]: torch.load(os.path.join(CHECKPOINT_DIR, '2019-03-10T18:31:12.698383_params')) \n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n in \n----> 1 torch.load(os.path.join(CHECKPOINT_DIR, '2019-03-10T18:31:12.698383_params'))\n\n/usr/WS1/hammel1/proj/gpu_venv/lib/python3.6/site-packages/torch/serialization.py in load(f, map_location, pickle_module)\n 365 f = open(f, 'rb')\n 366 try:\n--> 367 return _load(f, map_location, pickle_module)\n 368 finally:\n 369 if new_fd:\n\n/usr/WS1/hammel1/proj/gpu_venv/lib/python3.6/site-packages/torch/serialization.py in _load(f, map_location, pickle_module)\n 536 unpickler = pickle_module.Unpickler(f)\n 537 unpickler.persistent_load = persistent_load\n--> 538 result = unpickler.load()\n 539 \n 540 deserialized_storage_keys = pickle_module.load(f)\n\nAttributeError: Can't get attribute 'guide' on \n\"\"\"\n\n\n\"\"\"\nR^2 (coefficient of determination) regression score function.\nBest possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). \nA constant model that always predicts the expected value of y, \ndisregarding the input features, would get a R^2 score of 0.0.\n\nIn [23]: preds = [] \n ...: \n ...: for i in range(1000): \n ...: sampled_reg_model = guide(None, None) \n ...: pred = sampled_reg_model(data_train[0]).cpu().data.numpy().flatten() \n ...: preds.append(pred) \n ...: \n ...: all_preds = np.stack(preds).mean(0) \n ...: r2_score(data_train[1][:,-1].cpu(), all_preds) \nOut[23]: -0.12804690200144275 #Seems to vary between -0.1 and +0.1\n\n[ins] In [24]: all_preds.shape \nOut[24]: (848,)\n[ins] In [25]: data_train[1][:,-1].cpu().shape \nOut[25]: torch.Size([848])\n\n[ins] In [31]: all_preds[1:30] \nOut[31]: \narray([6.020882 , 6.023583 , 6.0278573, 5.8618426, 6.014196 , 5.88622 ,\n 5.971507 , 5.9299164, 6.025372 , 6.024828 , 6.0221314, 6.024089 ,\n 5.971507 , 6.024828 , 5.9490256, 5.894002 , 6.0085297, 5.9859567,\n 5.908943 , 5.844442 , 5.853278 , 5.923161 , 5.960731 , 5.9364977,\n 5.994445 , 6.0225916, 5.9813895, 5.923161 , 5.9364977],\n dtype=float32)\n\n[ins] In [32]: data_train[1][:,-1].cpu()[1:30] \nOut[32]: \ntensor([6.0184, 6.0527, 6.0404, 5.9449, 6.0110, 5.9522, 5.9841, 5.9669, 6.0257,\n 6.0502, 6.0551, 6.0233, 5.9841, 6.0502, 5.9743, 5.9547, 6.0061, 5.9914,\n 5.9596, 5.9400, 5.9424, 5.9645, 5.9792, 5.9694, 5.9963, 6.0208, 5.9890,\n 5.9645, 5.9694], device='cpu')\n\n\n\n*** If I do r2 score with identical values I DO get 1.0 ***\n[ins] In [34]: preds = [] \n ...: \n ...: for i in range(10000): \n ...: sampled_reg_model = guide(None, None) \n ...: pred = sampled_reg_model(data_train[0]).cpu().data.numpy().flatten() \n ...: preds.append(pred) \n ...: \n ...: all_preds = np.stack(preds).mean(0) \n ...: r2_score(data_train[1][:,-1].cpu(), data_train[1][:,-1].cpu()) \nOut[34]: 1.0\n\"\"\"\n","sub_path":"development_scripts/mnist_bay_nn_gauss_1HL_model1_mod1.py","file_name":"mnist_bay_nn_gauss_1HL_model1_mod1.py","file_ext":"py","file_size_in_byte":23188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"510810929","text":"from django.core.management.base import BaseCommand\n\nfrom peterbecom.apps.podcasttime.scraper import fix_podcast_images\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **kwargs):\n max_ = 10\n verbose = int(kwargs['verbosity']) >= 2\n fix_podcast_images(max_=max_, verbose=verbose)\n","sub_path":"peterbecom/apps/podcasttime/management/commands/fix-podcast-images.py","file_name":"fix-podcast-images.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"428542682","text":"'''\nNew conv1d model for mfcc40 features\nCreated on December 8, 2020 at 1838\n'''\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, Input\nfrom tensorflow.keras.layers import Activation, BatchNormalization\nfrom tensorflow.keras.layers import Conv1D, Conv2D\nfrom tensorflow.keras.layers import AveragePooling1D, GlobalAveragePooling2D, MaxPooling1D\nfrom tensorflow.keras.models import Model, model_from_json, Sequential\n\n# notes with Tom on December 8, 2020 at 1558\n# try changing 8 to 3, this is taking too many timestamps at a time\n# edit starting from small to large, not from 256\n# could add globalaverage1d instead of flatten\n\ndef model_d_conv1d(input_shape):\n model = Sequential()\n model.add(Conv1D(32, 3, padding='same',input_shape=input_shape)) # X_train.shape[1] = No. of Columns (216)\n model.add(Activation('relu'))\n model.add(Conv1D(32, 3, padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dropout(0.25))\n model.add(MaxPooling1D(pool_size=(3)))\n model.add(Conv1D(64, 3, padding='same'))\n model.add(Activation('relu'))\n model.add(Conv1D(64, 3, padding='same'))\n model.add(Dropout(0.25))\n model.add(MaxPooling1D(pool_size=(3))) # added drop out and maxpooling layer on 20201209 at 1330\n model.add(Activation('relu'))\n model.add(Conv1D(64, 3, padding='same'))\n model.add(Activation('relu'))\n model.add(Conv1D(64, 3, padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dropout(0.25))\n model.add(MaxPooling1D(pool_size=(3)))\n model.add(Conv1D(128, 3, padding='same'))\n model.add(Activation('relu'))\n model.add(Conv1D(128, 3, padding='same'))\n model.add(Activation('relu'))\n model.add(Conv1D(256, 3, padding='same'))\n model.add(Activation('relu'))\n model.add(Flatten())\n model.add(Dense(3)) # Target class number\n model.add(Activation('softmax'))\n\n # model optimizer\n optimizer = tf.keras.optimizers.RMSprop(lr=0.000001, decay=1e-6)\n # compile model\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n return model","sub_path":"models/model_d_conv1d_mfcc40.py","file_name":"model_d_conv1d_mfcc40.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"612421610","text":"from morango.constants import transfer_stages\nfrom morango.constants import transfer_statuses\nfrom morango.errors import MorangoContextUpdateError\nfrom morango.models.certificates import Filter\nfrom morango.models.core import SyncSession\nfrom morango.models.core import TransferSession\nfrom morango.utils import parse_capabilities_from_server_request\nfrom morango.utils import CAPABILITIES\n\n\nclass SessionContext(object):\n \"\"\"\n Class that holds the context of a transfer, for executing transfer ops through the middleware\n \"\"\"\n\n __slots__ = (\n \"sync_session\",\n \"transfer_session\",\n \"filter\",\n \"is_push\",\n \"capabilities\",\n \"error\",\n )\n\n def __init__(\n self,\n sync_session=None,\n transfer_session=None,\n sync_filter=None,\n is_push=None,\n capabilities=None,\n ):\n \"\"\"\n :param sync_session: The sync session instance\n :type sync_session: SyncSession|None\n :param transfer_session: The current transfer session that will be operated against\n :type transfer_session: TransferSession|None\n :param sync_filter The sync filter to use for the TransferSession\n :type sync_filter Filter|None\n :param is_push: A boolean indicating whether or not the transfer is a push or pull\n :type is_push: bool\n :param capabilities: Capabilities set that is combined (union) against our own capabilities\n :type capabilities: set|None\n \"\"\"\n self.sync_session = sync_session\n self.transfer_session = transfer_session\n self.filter = sync_filter\n self.is_push = is_push\n self.capabilities = set(capabilities or []) & CAPABILITIES\n self.error = None\n\n if self.transfer_session:\n self.sync_session = transfer_session.sync_session or self.sync_session\n self.is_push = transfer_session.push or self.is_push\n if transfer_session.filter:\n self.filter = transfer_session.get_filter()\n\n def update(\n self,\n transfer_session=None,\n sync_filter=None,\n is_push=None,\n stage=None,\n stage_status=None,\n capabilities=None,\n error=None,\n ):\n \"\"\"\n Updates the context\n :type transfer_session: TransferSession|None\n :type sync_filter Filter|None\n :type is_push: bool\n :type stage: str|None\n :type stage_status: str|None\n :type capabilities: str[]|None\n :type error: BaseException|None\n \"\"\"\n if transfer_session and self.transfer_session:\n raise MorangoContextUpdateError(\"Transfer session already exists\")\n elif (\n transfer_session\n and self.sync_session\n and transfer_session.sync_session_id != self.sync_session.id\n ):\n raise MorangoContextUpdateError(\"Sync session mismatch\")\n\n if sync_filter and self.filter:\n raise MorangoContextUpdateError(\"Filter already exists\")\n\n if is_push is not None and self.is_push is not None:\n raise MorangoContextUpdateError(\"Push/pull method already exists\")\n\n self.transfer_session = transfer_session or self.transfer_session\n self.filter = sync_filter or self.filter\n self.is_push = is_push if is_push is not None else self.is_push\n self.capabilities = set(capabilities or self.capabilities) & CAPABILITIES\n self.update_state(stage=stage, stage_status=stage_status)\n self.error = error or self.error\n\n # if transfer session was passed in, that takes precedence\n if transfer_session:\n self.sync_session = transfer_session.sync_session\n self.is_push = transfer_session.push\n if transfer_session.filter:\n self.filter = transfer_session.get_filter()\n\n @property\n def is_pull(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n return not self.is_push\n\n @property\n def stage(self):\n \"\"\"\n The stage of the transfer context\n :return: A transfer_stages.* constant\n :rtype: str\n \"\"\"\n raise NotImplementedError(\"Context `stage` getter is missing\")\n\n @property\n def stage_status(self):\n \"\"\"\n The status of the transfer context's stage\n :return: A transfer_statuses.* constant\n :rtype: str\n \"\"\"\n raise NotImplementedError(\"Context `stage_status` getter is missing\")\n\n def update_state(self, stage=None, stage_status=None):\n \"\"\"\n Updates the stage state\n :type stage: transfer_stages.*|None\n :type stage_status: transfer_statuses.*|None\n \"\"\"\n raise NotImplementedError(\"Context `update_state` method is missing\")\n\n def __getstate__(self):\n \"\"\"Return dict of simplified data for serialization\"\"\"\n return dict(\n sync_session_id=self.sync_session.id if self.sync_session else None,\n transfer_session_id=(\n self.transfer_session.id if self.transfer_session else None\n ),\n filter=str(self.filter),\n is_push=self.is_push,\n stage=self.stage,\n stage_status=self.stage_status,\n capabilities=self.capabilities,\n error=self.error,\n )\n\n def __setstate__(self, state):\n \"\"\"Re-apply dict state after serialization\"\"\"\n sync_session_id = state.get(\"sync_session_id\", None)\n if sync_session_id is not None:\n self.sync_session = SyncSession.objects.get(pk=sync_session_id)\n\n transfer_session_id = state.get(\"transfer_session_id\", None)\n if transfer_session_id is not None:\n self.transfer_session = TransferSession.objects.get(pk=transfer_session_id)\n if self.sync_session is None:\n self.sync_session = self.transfer_session.sync_session\n\n sync_filter = state.get(\"filter\", None)\n if sync_filter is not None:\n self.filter = Filter(sync_filter)\n\n self.is_push = state.get(\"is_push\", None)\n self.capabilities = state.get(\"capabilities\", None)\n\n stage = state.get(\"stage\", None)\n stage_status = state.get(\"stage_status\", None)\n self.update_state(stage=stage, stage_status=stage_status)\n self.error = state.get(\"error\", None)\n\n\nclass LocalSessionContext(SessionContext):\n \"\"\"\n Class that holds the context for operating on a transfer locally\n \"\"\"\n\n __slots__ = (\n \"request\",\n \"is_server\",\n )\n\n def __init__(self, request=None, **kwargs):\n \"\"\"\n :param request: If acting as the server, it should pass in the request, but the request\n is not serialized into context. See `is_server` prop for determining if request was\n passed in.\n :type request: django.http.request.HttpRequest\n \"\"\"\n capabilities = kwargs.pop(\"capabilities\", [])\n if request is not None:\n capabilities = parse_capabilities_from_server_request(request)\n\n super(LocalSessionContext, self).__init__(capabilities=capabilities, **kwargs)\n self.request = request\n self.is_server = request is not None\n\n @property\n def _has_transfer_session(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n return getattr(self, \"transfer_session\", None) is not None\n\n @property\n def stage(self):\n \"\"\"\n :return: A transfer_stage.* constant\n \"\"\"\n stage = transfer_stages.INITIALIZING\n if self._has_transfer_session:\n stage = self.transfer_session.transfer_stage or stage\n return stage\n\n @property\n def stage_status(self):\n \"\"\"\n :return: A transfer_statuses.* constant\n \"\"\"\n stage_status = transfer_statuses.PENDING\n if self._has_transfer_session:\n stage_status = self.transfer_session.transfer_stage_status or stage_status\n return stage_status\n\n @property\n def is_receiver(self):\n \"\"\"\n Whether or not the context indicates that the current local instance is receiving data,\n which means either:\n - A server context and a push transfer, or\n - A client context and a pull transfer\n :return: bool\n \"\"\"\n return self.is_push == self.is_server\n\n @property\n def is_producer(self):\n \"\"\"\n The opposite of `is_receiver`, meaning either:\n - A server context and a pull transfer, or\n - A client context and a push transfer\n :return: bool\n \"\"\"\n return not self.is_receiver\n\n def update_state(self, stage=None, stage_status=None):\n \"\"\"\n Passes through updating state to `TransferSession`, refreshing it from the DB in case it\n has changed during operation\n\n :param stage: Target stage for update\n :param stage_status: Target status for update\n \"\"\"\n if self._has_transfer_session:\n self.transfer_session.refresh_from_db()\n self.transfer_session.update_state(stage=stage, stage_status=stage_status)\n\n def __getstate__(self):\n \"\"\"Return dict of simplified data for serialization\"\"\"\n state = super(LocalSessionContext, self).__getstate__()\n state.update(is_server=self.is_server)\n return state\n\n def __setstate__(self, state):\n \"\"\"Re-apply dict state after serialization\"\"\"\n self.is_server = state.pop(\"is_server\", False)\n super(LocalSessionContext, self).__setstate__(state)\n\n\nclass NetworkSessionContext(SessionContext):\n \"\"\"\n Class that holds the context for operating on a transfer remotely through network connection\n \"\"\"\n\n __slots__ = (\"connection\", \"_stage\", \"_stage_status\")\n\n def __init__(self, connection, **kwargs):\n \"\"\"\n :param connection: The sync client connection that allows operations to execute API calls\n against the remote Morango server instance\n :type connection: NetworkSyncConnection\n \"\"\"\n self.connection = connection\n super(NetworkSessionContext, self).__init__(\n capabilities=self.connection.server_info.get(\"capabilities\", []), **kwargs\n )\n\n # since this is network context, keep local reference to state vars\n self._stage = transfer_stages.INITIALIZING\n self._stage_status = transfer_statuses.PENDING\n\n @property\n def stage(self):\n \"\"\"\n :return: A transfer_stage.* constant\n \"\"\"\n return self._stage\n\n @property\n def stage_status(self):\n \"\"\"\n :return: A transfer_statuses.* constant\n \"\"\"\n return self._stage_status\n\n def update_state(self, stage=None, stage_status=None):\n \"\"\"\n :param stage: Target stage for update\n :param stage_status: Target status for update\n \"\"\"\n self._stage = stage or self._stage\n self._stage_status = stage_status or self._stage_status\n","sub_path":"morango/sync/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":10989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"319836874","text":"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nimport cv2\r\nimport os\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\n\r\ndef enumerate_files(dirs, path='All_gray_1_32_32/', n_poses=3, n_samples=20):\r\n file_names = []\r\n targets = []\r\n for p in dirs:\r\n for n in range(n_poses):\r\n for j in range(3):\r\n dir_name = path + p + '/000' + str(n*3+j) + '/'\r\n for s in range(n_samples):\r\n d = dir_name + '%04d/' % s\r\n for f in os.listdir(d):\r\n if f.endswith('jpg'):\r\n file_names += [d + f]\r\n targets.append(n)\r\n return file_names, targets\r\n\r\ndef read_image(files):\r\n imgs = []\r\n for file in files:\r\n img = cv2.imread(file, cv2.IMREAD_GRAYSCALE) / 255.0\r\n imgs.append(img)\r\n return imgs\r\n\r\ndef conv2d(img, w, b):\r\n return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides = [1,1,1,1], padding = 'SAME'),b))\r\n\r\ndef max_pooling(img, k):\r\n return tf.nn.max_pool(img, ksize = [1,k,k,1], strides = [1,k,k,1], padding ='SAME')\r\n\r\ndef model(n_classes = 3, lr = 0.001):\r\n x = tf.placeholder(tf.float32,[None, 32, 32])\r\n _X = tf.reshape(x, shape = [-1, 32, 32, 1])\r\n y = tf.placeholder(tf.float32,[None,n_classes])\r\n \r\n wc1 = tf.get_variable(\"wc1\",[5,5,1,32], initializer = tf.random_normal_initializer(), dtype= tf.float32)\r\n bc1 = tf.get_variable(\"bc1\",[32],initializer = tf.zeros_initializer(),dtype = tf.float32)\r\n conv1 = conv2d(_X, wc1, bc1)\r\n conv1 = max_pooling(conv1, k = 2)\r\n keep_prob = tf.placeholder(tf.float32)\r\n conv1 = tf.nn.dropout(conv1, keep_prob)\r\n \r\n wc2 = tf.get_variable(\"wc2\",[5,5,32,64], initializer = tf.random_normal_initializer(), dtype= tf.float32)\r\n bc2 = tf.get_variable(\"bc2\",[64],initializer = tf.zeros_initializer(),dtype = tf.float32)\r\n conv2 = conv2d(conv1, wc2, bc2)\r\n conv2 = max_pooling(conv2, k = 2)\r\n conv1 = tf.nn.dropout(conv2, keep_prob)\r\n \r\n wd = tf.get_variable(\"wd\",[8*8*64,1024], initializer = tf.random_normal_initializer(), dtype= tf.float32)\r\n bd = tf.get_variable(\"bd\",[1024], initializer = tf.random_normal_initializer(), dtype= tf.float32)\r\n dense = tf.reshape(conv2,[-1, wd.get_shape().as_list()[0]])\r\n dense = tf.nn.relu(tf.add(tf.matmul(dense,wd),bd))\r\n dense = tf.nn.dropout(dense, keep_prob)\r\n \r\n wout = tf.get_variable(\"wout\",[1024,n_classes], initializer = tf.random_normal_initializer(), dtype= tf.float32)\r\n bout = tf.get_variable(\"bout\",[n_classes], initializer = tf.random_normal_initializer(), dtype= tf.float32)\r\n #pred = tf.nn.softmax(tf.nn.relu(tf.add(tf.matmul(dense,wout),bout)))\r\n pred = tf.add(tf.matmul(dense,wout),bout)\r\n \r\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\r\n optimizer = tf.train.AdamOptimizer(learning_rate= lr).minimize(cost)\r\n correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n \r\n return x, y, pred, keep_prob, cost, optimizer, correct_pred, accuracy\r\n\r\ndef load_tr_data():\r\n train_sets = ['Set1','Set2','Set3']\r\n tr_filenames, tr_labels = enumerate_files(train_sets)\r\n tr_list_of_arrays = read_image(tr_filenames)\r\n tr_array = np.array(tr_list_of_arrays)\r\n tr_labels = np.array(tr_labels)\r\n return tr_array, tr_labels\r\n\r\ndef load_ts_data():\r\n test_sets = ['Set4','Set5']\r\n ts_filenames, ts_labels = enumerate_files(test_sets)\r\n ts_list_of_arrays = read_image(ts_filenames)\r\n ts_array = np.array(ts_list_of_arrays)\r\n ts_labels = np.array(ts_labels)\r\n batch_size = ts_labels.size\r\n batch_y = np.zeros(batch_size*3).reshape(batch_size, 3)\r\n for i in range(batch_size):\r\n batch_y[i][int(ts_labels[i])] = 1\r\n return ts_array, batch_y\r\n \r\ndef next_batch(tr_array, tr_labels):\r\n batch_size = tr_labels.size\r\n batch_x = np.zeros_like(tr_array)\r\n batch_y = np.zeros(batch_size*3).reshape(batch_size, 3)\r\n index = np.arange(batch_size)\r\n np.random.shuffle(index)\r\n for i in range(batch_size):\r\n idx = index[i]\r\n batch_x[i] = tr_array[idx]\r\n batch_y[i][int(tr_labels[idx])] = 1\r\n return batch_x, batch_y\r\n\r\n\r\ndef train(tr_iters = 100000, display_step = 10, dropout = 0.7):\r\n x, y, pred, keep_prob, cost, optimizer, correct_pred, accuracy = model()\r\n \r\n init = tf.global_variables_initializer()\r\n saver = tf.train.Saver()\r\n sess = tf.InteractiveSession() \r\n sess.run(init)\r\n \r\n tr_array, tr_labels= load_tr_data()\r\n batch_size = tr_labels.size\r\n \r\n step = 1\r\n while step * batch_size < tr_iters:\r\n batch_x, batch_y = next_batch(tr_array,tr_labels)\r\n sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})\r\n if step % display_step == 0:\r\n acc = sess.run(accuracy, feed_dict={x:batch_x, y: batch_y, keep_prob: 1.})\r\n loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})\r\n print (\"Iter \" + str(step*batch_size) +\", Minibatch Loss= \" + \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \"{:.5f}\".format(acc))\r\n \r\n step += 1\r\n \r\n print(\"Optimization Finished\")\r\n \r\n #ts_array, ts_labels = load_ts_data()\r\n #print (\"Testing Accuracy:\", sess.run(accuracy, feed_dict={x: ts_array, y: ts_labels, keep_prob: 1.}))\r\n \r\n save_path = saver.save(sess, \"./model\")\r\n print(\"Model saved in file: %s\" % save_path)\r\n sess.close()\r\n\r\ndef test(is_trained = True):\r\n if is_trained == False:\r\n train()\r\n tf.reset_default_graph()\r\n with tf.Session() as sess:\r\n x, y, pred, keep_prob, cost, optimizer, correct_pred, accuracy = model()\r\n saver = tf.train.Saver()\r\n saver.restore(sess, tf.train.latest_checkpoint('./'))\r\n \r\n ts_array, ts_labels = load_ts_data()\r\n print (\"Testing Accuracy:\", sess.run(accuracy, feed_dict={x: ts_array, y: ts_labels, keep_prob: 1.}))\r\n confusion_matrix = tf.confusion_matrix(tf.argmax(pred,1), tf.argmax(y,1),num_classes=3)\r\n cm = sess.run(confusion_matrix , feed_dict={x: ts_array, y: ts_labels, keep_prob: 1.})\r\n cm_display(cm)\r\n\r\ndef cm_display(cm):\r\n mat = np.array(cm, dtype = np.int32)\r\n temp = np.zeros(3*3).reshape(3,3)\r\n for i in range(3) :\r\n temp[i] = mat[i]/ np.sum(mat[i])\r\n \r\n fig = plt.figure()\r\n plt.clf()\r\n ax = fig.add_subplot(111)\r\n ax.set_aspect(1)\r\n res = ax.imshow(temp, cmap=plt.cm.Wistia, \r\n interpolation='nearest')\r\n for x in range(3):\r\n for y in range(3):\r\n ax.annotate(str(mat[x][y]), xy=(y, x), \r\n horizontalalignment='center',\r\n verticalalignment='center')\r\n fig.colorbar(res)\r\n plt.xticks(np.arange(3), ['0', '1', '2'])\r\n plt.yticks(np.arange(3), ['0', '1', '2'])\r\n plt.show()\r\n plt.close()\r\n\r\n","sub_path":"cnn/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":7038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"556182676","text":"class Solution(object):\r\n def factor(self, n):\r\n if(n <= 0):\r\n return 1\r\n factor = 1\r\n while(n > 0):\r\n factor *= n\r\n n -= 1\r\n return factor\r\n \r\n def getRow(self, RowIndex):\r\n \"\"\"\r\n :type RowIndex: int\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n n = RowIndex\r\n if(n == 0):\r\n return [1]\r\n List = [1] * (n + 1)\r\n for j in range(0, n + 1):\r\n if(n == j):\r\n List[j] = 1 \r\n elif(j == 0):\r\n List[j] = 1\r\n elif(j == 1):\r\n List[j] = n\r\n else:\r\n List[j] = self.factor(n) // (self.factor(j) * self.factor(n - j))\r\n\r\n return List\r\n \r\n \r\nMy = Solution()\r\nn = 5\r\nprint(My.getRow(5))\r\nprint(My.getRow(0))\r\nprint(My.getRow(3))","sub_path":"src/Pascal'sTriangle/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"471228093","text":"#!/usr/bin/python3\n# _*_ coding:utf-8 _*_\n\"\"\"\n@author:GT\n@file:Spiders.py\n@time:5/15/201810:47 AM\n\"\"\"\nimport scrapy\n\nclass QuotesSpider(scrapy.Spider):\n name = 'quotes'\n\n def start_requests(self):\n urls = [\n 'http://quotes.toscrape.com/page/1/',\n 'http://quotes.toscrape.com/page/2/',\n ]\n for url in urls:\n yield scrapy.Request(url,callback=self.parse)\n def parse(self,response):\n page = response.url.split('/')[-2]\n filename = 'quotes-%s.html' % page\n with open(filename,'wb') as f:\n f.write(response.body)\n self.log('Saved file %s' % filename)","sub_path":"PaChong/scrapy-master/tests/tutorial/Spiders.py","file_name":"Spiders.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"399247597","text":"# Lint as: python3\n\"\"\"Tests for AutoEnsembleTPUEstimator.\n\nCopyright 2019 The AdaNet Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport sys\n\nfrom absl import flags\nfrom absl.testing import parameterized\nfrom adanet import tf_compat\nfrom adanet.autoensemble.estimator import AutoEnsembleTPUEstimator\nimport tensorflow as tf\n\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.estimator.export import export\nfrom tensorflow_estimator.python.estimator.canned import dnn\n# pylint: enable=g-direct-tensorflow-import\n\n\nclass _DNNTPUEstimator(tf.estimator.tpu.TPUEstimator):\n\n def __init__(self, head, hidden_units, feature_columns, optimizer, use_tpu):\n config = tf.estimator.tpu.RunConfig()\n\n def model_fn(features, labels, mode=None, params=None, config=None):\n del params # Unused.\n\n return dnn._dnn_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n hidden_units=hidden_units,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n config=config,\n use_tpu=use_tpu)\n\n super(_DNNTPUEstimator, self).__init__(\n model_fn=model_fn, config=config, train_batch_size=64)\n\n\nclass AutoEnsembleTPUEstimatorTest(parameterized.TestCase, tf.test.TestCase):\n\n def setUp(self):\n super(AutoEnsembleTPUEstimatorTest, self).setUp()\n # Setup and cleanup test directory.\n # Flags are not automatically parsed at this point.\n flags.FLAGS(sys.argv)\n self.test_subdirectory = os.path.join(flags.FLAGS.test_tmpdir, self.id())\n shutil.rmtree(self.test_subdirectory, ignore_errors=True)\n os.makedirs(self.test_subdirectory)\n\n def tearDown(self):\n super(AutoEnsembleTPUEstimatorTest, self).tearDown()\n shutil.rmtree(self.test_subdirectory, ignore_errors=True)\n\n # pylint: disable=g-long-lambda\n @parameterized.named_parameters(\n {\n \"testcase_name\":\n \"tpu_estimator_candidate_pool_not_use_tpu\",\n \"candidate_pool\":\n lambda head, feature_columns, optimizer: {\n \"dnn\":\n _DNNTPUEstimator(\n head=head,\n feature_columns=feature_columns,\n optimizer=optimizer,\n hidden_units=[3],\n use_tpu=False),\n \"wider_dnn\":\n _DNNTPUEstimator(\n head=head,\n feature_columns=feature_columns,\n optimizer=optimizer,\n hidden_units=[6],\n use_tpu=False),\n },\n \"use_tpu\":\n False,\n \"want_loss\":\n 0.315863,\n },\n {\n \"testcase_name\":\n \"estimator_candidate_pool_not_use_tpu\",\n \"candidate_pool\":\n lambda head, feature_columns, optimizer: {\n \"dnn\":\n tf.estimator.DNNEstimator(\n head=head,\n feature_columns=feature_columns,\n optimizer=optimizer,\n hidden_units=[3]),\n \"linear\":\n tf.estimator.LinearEstimator(\n head=head,\n feature_columns=feature_columns,\n optimizer=optimizer),\n },\n \"use_tpu\":\n False,\n \"want_loss\":\n 0.315863,\n },\n )\n # pylint: enable=g-long-lambda\n # TODO: Ensure AutoEnsembleTPUEstimator tets also work for TF 2.0.\n @tf_compat.skip_for_tf2\n def test_auto_ensemble_estimator_lifecycle(self, candidate_pool, use_tpu,\n want_loss):\n features = {\"xor\": [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]}\n labels = [[0.], [1.], [1.], [0.]]\n\n run_config = tf.estimator.tpu.RunConfig(master=\"\", tf_random_seed=42)\n head = tf.contrib.estimator.regression_head(\n loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=.01)\n optimizer = tf.tpu.CrossShardOptimizer(optimizer)\n feature_columns = [tf.feature_column.numeric_column(\"xor\", shape=[2])]\n\n def train_input_fn(params):\n del params # Unused.\n\n input_features = {}\n for key, feature in features.items():\n input_features[key] = tf.constant(feature, name=key)\n input_labels = tf.constant(labels, name=\"labels\")\n return input_features, input_labels\n\n def test_input_fn(params):\n del params # Unused.\n\n input_features = tf.data.Dataset.from_tensors(\n tf.constant(features[\"xor\"])).make_one_shot_iterator().get_next()\n return {\"xor\": input_features}, None\n\n estimator = AutoEnsembleTPUEstimator(\n head=head,\n candidate_pool=candidate_pool(head, feature_columns, optimizer),\n max_iteration_steps=10,\n model_dir=self.test_subdirectory,\n config=run_config,\n use_tpu=use_tpu,\n train_batch_size=64,\n force_grow=True)\n\n # Train for three iterations.\n estimator.train(input_fn=train_input_fn, max_steps=30)\n\n # Evaluate.\n eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)\n\n self.assertAllClose(30, eval_results[\"global_step\"])\n self.assertAllClose(want_loss, eval_results[\"loss\"], atol=.3)\n\n # Predict.\n predictions = estimator.predict(input_fn=test_input_fn)\n for prediction in predictions:\n self.assertIsNotNone(prediction[\"predictions\"])\n\n # Export SavedModel.\n def serving_input_fn():\n \"\"\"Input fn for serving export, starting from serialized example.\"\"\"\n serialized_example = tf.placeholder(\n dtype=tf.string, shape=(None), name=\"serialized_example\")\n for key, value in features.items():\n features[key] = tf.constant(value)\n return export.SupervisedInputReceiver(\n features=features,\n labels=tf.constant(labels),\n receiver_tensors=serialized_example)\n\n export_dir_base = os.path.join(self.test_subdirectory, \"export\")\n export_saved_model_fn = getattr(estimator, \"export_saved_model\", None)\n if not callable(export_saved_model_fn):\n export_saved_model_fn = estimator.export_savedmodel\n export_saved_model_fn(\n export_dir_base=export_dir_base,\n serving_input_receiver_fn=serving_input_fn)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","sub_path":"adanet/autoensemble/tpu_estimator_test.py","file_name":"tpu_estimator_test.py","file_ext":"py","file_size_in_byte":7166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"209688992","text":"#!/usr/bin/env python\n# _*_coding:utf-8_*_\n\nfrom AppTest.Common import *\ntitle = \"zhangsen_title\"\ncontent = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaasssssssssssssssssss\"\n\n\nclass MyTestCase(unittest.TestCase):\n @classmethod\n def setUp(self):\n self.case_name = os.path.basename(__file__)\n browse = BrowserEngine(self)\n self.web_driver = browse.open_browser(self, url=WebControlServer.web_url)\n\n @classmethod\n def tearDown(self):\n Common.report_screen_shot(self, self.case_name)\n logger.info(\"收尾工作\")\n Common.connect_sql(self, \"delete from sys_message where title='%s'\" % title, \"scap\")\n Common.quit(self)\n\n def test_step(self):\n u\"\"\"通知管理页面点击所有分类下方显示所有分类\"\"\"\n logger.info(\"输入账号密码进行登录\")\n Common.login_web_portal(self, Content.register_count, Content.login_password)\n\n logger.info(\"点击运维管理\")\n Common.touch_text_by_class_name(self, ClassName.ivu_menu_item, \"运维管理\")\n Common.wait(self, 2)\n\n logger.info(\"点击发布事件\")\n Common.touch_text_by_class_name(self, ClassName.ivu_btn_primary, \"发布事件\", \"button\")\n\n logger.info(\"编辑发布的事件\")\n Common.send_text_by_class_name_and_palceholder(self, ClassName.ivu_input, \"标题\", title)\n con = Common.get_result_by_class_name_blank(self, \"div\", ClassName.qleditor_blank)\n Common.send_text_by_element(self, con, \"标题\", title)\n\n logger.info(\"选择分类\")\n Common.touch_text_by_class_name(self, ClassName.ivu_select_placeholder, \"请选择\")\n Common.touch_text_by_class_name(self, ClassName.ivu_select_item, \"系统公告\")\n Common.touch_text_by_class_name(self, ClassName.ivu_radio_wrapper_group_item, \"系统公告\", \"label\")\n\n logger.info(\"点击发送\")\n Common.touch_text_by_class_name(self, ClassName.ivu_reload_success, \"发送\", \"button\")\n\n logger.info(\"撤回公告消息\")\n Common.touch_text_by_class_name(self, ClassName.ivu_menu_item, \"所有\")\n Common.touch_class_name_by_name(self, title, \"button\", ClassName.ivu_btn_error_small)\n Common.touch_text_by_class_name(self, ClassName.ivu_btn_large, \"确定\", \"button\")\n\n logger.info(\"判断是否撤回成功\")\n ele_list = Common.get_results_by_class_name_blank(self, \"tr\", ClassName.ivu_table_row)\n text_list = Common.get_text_by_elements(self, ele_list)\n result = Common.check_text_in_list(self, text_list, title)\n self.assertFalse(result)\n\n\n\n \n","sub_path":"AppTest/testCase/Sprint5/case_Sprint5_portal_approval_notice_0008.py","file_name":"case_Sprint5_portal_approval_notice_0008.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"246878362","text":"from collections import defaultdict\nimport matplotlib.pyplot as plt\nfrom github import Github\nfrom matplotlib.ticker import MultipleLocator, FuncFormatter\n\nGITHUB_TOKEN = \"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\"\ng = Github(GITHUB_TOKEN)\n\nORGANIZATION = \"ivelum\"\nREPOSITORY = \"teamplify\"\nSTART_WORKFLOW = \"test.yaml\"\nEND_WORKFLOW = \"deploy.yaml\"\nLAST_N_RUNS = 50\n\n\ndef get_time_series():\n repo = g.get_organization(ORGANIZATION).get_repo(REPOSITORY)\n\n runs_by_sha = defaultdict(lambda: dict(workflow_names=[], start=None, end=None))\n for workflow_name in [START_WORKFLOW, END_WORKFLOW]:\n workflow = repo.get_workflow(workflow_name)\n\n all_runs = iter(workflow.get_runs(status=\"completed\"))\n\n for _ in range(LAST_N_RUNS):\n workflow_run = next(all_runs)\n\n if workflow_run.status != \"completed\":\n continue\n\n if workflow_run.conclusion != \"success\":\n continue\n\n stats = runs_by_sha[workflow_run.head_sha]\n\n stats[\"workflow_names\"].append(workflow_name)\n\n if not stats[\"start\"] or stats[\"start\"] > workflow_run.created_at:\n stats[\"start\"] = workflow_run.created_at\n\n if not stats[\"end\"] or stats[\"end\"] < workflow_run.updated_at:\n stats[\"end\"] = workflow_run.updated_at\n\n with_start_and_end = [\n stats\n for stats in runs_by_sha.values()\n if set(stats[\"workflow_names\"]) == {START_WORKFLOW, END_WORKFLOW}\n ]\n by_start = sorted(with_start_and_end, key=lambda stats: stats[\"start\"])\n\n starts = []\n durations = []\n for stats in by_start:\n starts.append(stats[\"start\"].strftime(\"%-dth\\n%H:%M\"))\n duration = stats[\"end\"] - stats[\"start\"]\n durations.append(duration.seconds)\n\n return starts, durations\n\n\ndef format_seconds(total_seconds, position=None):\n minutes = total_seconds // 60\n seconds = total_seconds % 60\n return f\"{minutes:.0f}:{seconds:02.0f}\"\n\n\nif __name__ == \"__main__\":\n plt.style.use(\"seaborn-v0_8\")\n\n starts, durations = get_time_series()\n\n ax = plt.subplot()\n bars = ax.bar(starts, durations)\n ax.bar_label(bars, labels=[format_seconds(duration) for duration in durations])\n ax.yaxis.set_major_locator(MultipleLocator(base=60))\n ax.yaxis.set_major_formatter(FuncFormatter(format_seconds))\n ax.set_ylim(bottom=0)\n plt.show()\n","sub_path":"workflow_stats.py","file_name":"workflow_stats.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"177134731","text":"from linked_list import node\n\n\nclass Singly_LinkedList():\n\n def __init__(self):\n self.head = None # initialize the head pointer: Each SLL instance will maintain single ref to 1st node\n\n def create_list(self):\n n = int(input(\"Enter the number of nodes\"))\n if n == 0:\n return\n for i in range(n):\n print(\"Enter element at position\", i + 1)\n data = int(input(\"Enter element \"))\n self.insert_end(data)\n print(\"Entering new node\")\n\n def display_list(self):\n p = self.head\n if p is None:\n print('List is empty')\n return\n while p is not None:\n print(p.data)\n p = p.next\n\n def count(self):\n p = self.head\n count = 0\n if p is None:\n print(\"Empty List\")\n return\n while p is not None:\n count += 1\n p = p.next\n return count\n\n def search(self, item): # if elem exists , return position\n p = self.head\n position = 1\n if p is None:\n print(\"Empty List\")\n return\n while p is not None:\n if p.data == item:\n print(\"Condition matched\")\n print(item, \"is at position\", position)\n return True\n p = p.next\n position += 1\n\n print(\"Element not found\")\n return False\n\n ### Insert methods ###\n\n def insert_begin(self, data):\n new = node.Node(data)\n if self.head is None:\n self.head = new\n return\n\n new.next = self.head\n self.head = new\n #print(\"Inserted at beginning\")\n\n def insert_end(self, data):\n new = node.Node(data)\n\n if self.head is None:\n print(\"Empty List, adding first element\")\n self.head = new\n return\n\n p = self.head\n while p.next is not None:\n p = p.next\n p.next = new\n print(\"Inserted at end\")\n\n def data_at_position(self, k):\n p = self.head\n position = 0\n while p.next is not None:\n print(\"inside while loop\")\n if position == k - 1:\n print(\"in condition\")\n return p.data\n p = p.next\n position += 1\n\n print(\"Position not found\")\n return\n\n def insert_at_position(self, data, k):\n new = node.Node(data)\n p = self.head\n idx = 0\n while p.next is not None:\n if idx == k - 1:\n new.next = p.next\n p.next = new\n print(\"Inserted at position\", k)\n return\n p = p.next\n idx += 1\n print(\"Position not found\")\n\n def insert_after(self, item,val):\n p = self.head\n position = 1\n\n if p is None:\n print(\"Empty List\")\n\n while p is not None:\n if p.data == item:\n self.insert_at_position(val, position)\n p = p.next\n position += 1\n\n def insert_before(self, item, val):\n p = self.head\n position = 1\n\n if p is None:\n print(\"Empty List\")\n\n while p is not None:\n if p.data == item:\n self.insert_at_position(val, position - 1)\n p = p.next\n position += 1\n\n def del_node(self, item):\n p = self.head\n\n if p is None:\n print(\"Empty List\")\n\n while p is not None:\n if p.next.data == item:\n p.next = p.next.next\n return\n p = p.next\n print(\"Node not found\")\n\n def del_first_node(self):\n self.head = self.head.next\n\n def del_last_node(self):\n p = self.head\n while p.next.next is not None:\n p = p.next\n\n p.next = None\n\n\ndef main():\n new_list = Singly_LinkedList()\n print(new_list.display_list())\n new_list.insert_begin(4)\n print(new_list.display_list())\n new_list.insert_end(5)\n new_list.insert_at_position(10, 1)\n new_list.insert_after(10, 25)\n new_list.insert_before(3, 39)\n print(new_list.display_list()) # Why does none get printed?\n print(\"####\")\n # new_list.search(3)\n # new_list.del_last_node()\n # new_list.display_list()\n # print(\"####\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"linked_list/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"348576000","text":"import numpy as np\nimport matplotlib.pylab as plt\n\ndef sigmoid(x, derivative=False):\n return x*(1-x) if derivative else 1/(1+np.exp(-x))\n\n\ndef forwardProp(x, theta):\n\n\tz = []\n\ta = []\n\t\n\t#sets a[0] to input activations\n\ta.append(x)\n\n\t#computes hidden layer activations\n\ta.append(sigmoid(np.dot(a[0], theta[0])))\n\n\t#computes output layer activation\n\ta.append(sigmoid(np.dot(a[1], theta[1])))\n\t\n\treturn a\n\t\n\t\n\t\n\n\n\nexamples = np.array([[[0,0], [0]],\n [[0,1], [1]],\n [[1,0], [1]],\n [[1,1], [0]]])\n \nlearningRate = 0.1\n\n\ntheta = np.array([np.random.random((2,2))*4. - 2., np.random.random((2,1))*4. - 2.])\n\n\ndeltaSum = np.array([np.zeros((2, 2)), np.zeros((2, 1))])\n\n\nfor x in xrange(0, 1):\n \n\t#for e in xrange(0, len(examples)):\n\tfor e in xrange(2,3):\n\t\n\t\ta = forwardProp(examples[e][0], theta)\n\t\tE = []\n\t\n\t\t#****Need to change to work with multiple output neurons\n\t\tE.insert(0, (examples[e][1] - a[2])*sigmoid(a[2], True))\n\n\t\t#****Need to change to work with multiple output neurons\n\t\tE.insert(0, theta[1] * E[0][0] * sigmoid(a[1], True))\n\n\t\tdelta = np.array([np.zeros((2, 2)), np.zeros((2, 1))])\n\t\n\t\tfor l in xrange(len(delta)):\n\t\t\tfor i in xrange(0, len(delta[l])):\n\t\t\t\tfor j in xrange(0, len(delta[l][i])):\n\t\t\t\t\tdelta[l][i][j] = a[l][i] * E[l][j]\n\t\t\n\t\n\t\tdeltaSum += delta\n\t\n\ttheta += learningRate * (1.0/len(examples)) * deltaSum\n\n\t\n\n#for each in examples:\n#\tprint forwardProp(each[0], theta)\n\n","sub_path":"npbasicnn.py","file_name":"npbasicnn.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"27989323","text":"from flask import Flask\n\nfrom flask_cors import cross_origin\nfrom flask import render_template, url_for, flash, redirect, request, Response\nimport datetime\nimport cv2 \nimport threading\n\napp = Flask(__name__)\n\n@app.route('/record', methods=['POST'])\n@cross_origin()\ndef record():\n video = cv2.VideoCapture('rtsp://admin:12345678a@@192.168.0.252:8554/fhd') \n if (video.isOpened() == False): \n print(\"Error reading video file\") \n time = datetime.datetime.now().strftime('%x')\n time = str(time).replace('/', '-')\n print(time)\n filename = f'{time}.avi'\n print(filename) \n frame_width = int(video.get(3)) \n frame_height = int(video.get(4)) \n size = (frame_width, frame_height) \n result = cv2.VideoWriter(filename,cv2.VideoWriter_fourcc(*'MJPG'),20, size) \n # global stt\n # stt = request.json['status'] \n while(True): \n ret, frame = video.read()\n if ret == True:\n result.write(frame) \n cv2.imshow('FramFe', frame) \n if cv2.waitKey(1) & 0xFF == ord('s'):\n break \n \n video.release() \n result.release() \n \n cv2.destroyAllWindows() \n return \"stop\"\n\nprint(\"The video was successfully saved\") \nif __name__ == \"__main__\" :\n # live_demo()\n app.run(host=\"0.0.0.0\",debug=True)","sub_path":"app_person_detection/record2.py","file_name":"record2.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"435207605","text":"# Futures #\nfrom __future__ import division\n\n# Built-in modules #\nimport os, socket\n\n# Internal modules #\nfrom gefes.common import flatten\nfrom gefes.common.autopaths import AutoPaths\nfrom gefes.common.cache import property_cached\nfrom gefes.graphs import assembly_plots\nfrom gefes.helper.contig import Contig\nfrom gefes.fasta.single import FASTA\nfrom gefes.common.slurm import nr_threads\n\n# Third party modules #\nimport sh\n\n# Constant #\nhostname = socket.gethostname()\n\n###############################################################################\nclass Assembly(object):\n \"\"\"The co-assembly of all pools. Will call the Ray assembler.\n https://github.com/sebhtml/ray\"\"\"\n\n short_name = 'ray'\n executable = 'Ray23'\n\n all_paths = \"\"\"\n /graphs/\n /ray_output/\n /ray_output/Contigs.fasta\n /ray_output/report.txt\n \"\"\"\n\n def __repr__(self): return '<%s object of %s>' % (self.__class__.__name__, self.parent)\n\n def __init__(self, aggregate):\n # Save parent #\n self.parent, self.aggregate = aggregate, aggregate\n # Auto paths #\n self.base_dir = self.parent.p.assembly_dir\n self.p = AutoPaths(self.base_dir, self.all_paths)\n # Convenience objects #\n self.contigs_fasta = FASTA(self.p.Contigs)\n # Graphs #\n self.graphs = [getattr(assembly_plots, cls_name)(self) for cls_name in assembly_plots.__all__]\n\n @property_cached\n def contigs(self):\n \"\"\"A list of all the contigs produced as custom objects\"\"\"\n return [Contig(self, s) for s in self.contigs_fasta]\n\n def assemble(self):\n # Ray needs a non-existing directory #\n out_dir = self.p.output_dir\n out_dir.remove()\n # Make the pairs of fastq #\n pairs = flatten([('-p', p.cleaner.fwd.path, p.cleaner.rev.path) for p in self.parent])\n # Call Ray on the cray #\n if os.environ.get('CSCSERVICE') == 'sisu':\n stats = sh.aprun('-n', nr_threads, self.executable, '-k', 81, '-o', out_dir, *pairs)\n # Call Ray on Kalkyl #\n elif os.environ.get('SNIC_RESOURCE') == 'kalkyl':\n stats = sh.mpiexec('-n', nr_threads, self.executable, '-k', 81, '-o', out_dir, *pairs)\n # Call Ray just locally #\n else:\n command = sh.Command(self.executable)\n stats = command('-k', 81, '-o', out_dir, *pairs)\n # Print the report #\n with open(self.p.report, 'w') as handle: handle.write(str(stats))\n\n def index(self):\n \"\"\"Create two indexes. For both bowtie2 and samtools on the contigs fasta file.\"\"\"\n sh.bowtie2_build(self.contigs_fasta, self.contigs_fasta)\n sh.samtools('faidx', self.contigs_fasta)\n\n def make_plots(self):\n for graph in self.graphs: graph.plot()","sub_path":"gefes/helper/assembler.py","file_name":"assembler.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"388195344","text":"# ----------------------------------------------------------------------------\n# Copyright 2015 Grammarly, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\nimport numpy as np\nfrom unittest import TestCase\nfrom quagga.context import Context\nfrom quagga.matrix import GpuMatrix\nfrom quagga.connector import Connector\nfrom quagga.blocks import MeanPoolingBlock\n\n\nclass TestMeanPoolingBlock(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.rng = np.random.RandomState(seed=42)\n cls.N = 50\n\n @classmethod\n def get_random_array(cls, shape=None):\n if shape:\n a = 4 * cls.rng.rand(*shape) - 2\n else:\n nrows, ncols = cls.rng.randint(low=1, high=7000, size=2)\n a = 4 * cls.rng.rand(nrows, ncols) - 2\n return a.astype(dtype=np.float32)\n\n def test_fprop(self):\n r = []\n\n for i in xrange(self.N):\n a = self.get_random_array()\n a_gpu = Connector(GpuMatrix.from_npa(a, 'float'))\n vpooling_block = MeanPoolingBlock(a_gpu, axis=0)\n hpooling_block = MeanPoolingBlock(a_gpu, axis=1)\n\n vpooling_block.fprop()\n r.append(np.allclose(vpooling_block.output.to_host(),\n np.mean(a, axis=0, keepdims=True),\n atol=1e-6))\n hpooling_block.fprop()\n r.append(np.allclose(hpooling_block.output.to_host(),\n np.mean(a, axis=1, keepdims=True),\n atol=1e-6))\n\n self.assertEqual(sum(r), 2 * self.N)\n\n def test_bprop(self):\n r = []\n\n context = Context()\n for i in xrange(self.N):\n a = self.get_random_array()\n a_gpu = Connector(GpuMatrix.from_npa(a, 'float'), bu_device_id=context)\n vpooling_block = MeanPoolingBlock(a_gpu, axis=0)\n voutput, dL_dvoutput = vpooling_block.output.register_usage(context, context)\n _dL_voutput = self.get_random_array((dL_dvoutput.nrows, dL_dvoutput.ncols))\n GpuMatrix.from_npa(_dL_voutput, 'float').copy_to(context, dL_dvoutput)\n\n hpooling_block = MeanPoolingBlock(a_gpu, axis=1)\n houtput, dL_dhoutput = hpooling_block.output.register_usage(context, context)\n _dL_houtput = self.get_random_array((dL_dhoutput.nrows, dL_dhoutput.ncols))\n GpuMatrix.from_npa(_dL_houtput, 'float').copy_to(context, dL_dhoutput)\n\n vpooling_block.fprop()\n vpooling_block.bprop()\n dL_dmatrix = vpooling_block.dL_dmatrix.to_host()\n r.append(np.allclose(dL_dmatrix,\n np.repeat(_dL_voutput/a.shape[0], a.shape[0], 0),\n atol=1e-6))\n\n hpooling_block.fprop()\n hpooling_block.bprop()\n hpooling_block.dL_dmatrix.to_host()\n dL_dmatrix = hpooling_block.dL_dmatrix.to_host()\n r.append(np.allclose(dL_dmatrix,\n np.repeat(_dL_houtput/a.shape[1], a.shape[1], 1),\n atol=1e-6))\n\n self.assertEqual(sum(r), 2 * self.N)","sub_path":"tests/blocks/test_MeanPoolingBlock.py","file_name":"test_MeanPoolingBlock.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"80330092","text":"import itertools\nucnum = int(input())\nans = list()\nfor i in range(ucnum):\n num = int(input())\n strs = input().split()\n maxs = 0\n for lists in itertools.permutations(strs):\n string = ''\n for j in lists:\n string+=j\n index = int(string)\n if index>maxs:\n max = index\n ans.append(index)\nfor i in ans:\n print(i)\n","sub_path":"Code/CodeRecords/2365/60716/290541.py","file_name":"290541.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"159997395","text":"import time\nfrom bs4 import BeautifulSoup as bs\nfrom splinter import Browser\n\ndef init_browser():\n executable_path = {\"executable_path\": \"/usr/local/bin/chromedriver\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\ndef scrape():\n\n browser = init_browser()\n\n # create surf_data dict that we can insert into mongo\n\n #First, scrape the news\n mars_data = {}\n\n # Visit mars.nasa.gov to scrape news\n url = \"https://mars.nasa.gov/news/\"\n browser.visit(url)\n\n time.sleep(1)\n html = browser.html\n soup = bs(html, \"html.parser\")\n\n result = soup.find(\"li\", class_=\"slide\")\n\n news_title = result.find(\"div\",class_=\"content_title\").text\n news_p = result.find(\"div\",class_=\"article_teaser_body\").text\n mars_data[\"news_title\"] = news_title\n mars_data[\"news_para\"] = news_p\n\n #Scrape images\n url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(url)\n\n img_html = browser.html\n soup = bs(img_html, \"html.parser\")\n\n img_results = soup.find(\"div\", class_=\"carousel_items\").find(\"article\")[\"style\"]\n featured_image_url = \"https://www.jpl.nasa.gov\"+ img_results.split(\"'\")[1]\n\n mars_data[\"img_link\"] = featured_image_url\n\n # print(mars_data)\n return mars_data\n\n# if __name__ == \"__main__\":\n# scrape()","sub_path":"scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"547034468","text":"# -*- coding: utf-8 -*-\n\nimport base64\nimport hmac\nimport hashlib\nimport json\n\nimport urllib\nimport datetime\nimport requests\n#import urlparse # urllib.parse in python 3\n\n# timeout in 5 seconds:\nTIMEOUT = 5\n\n#各种请求,获取数据方式\ndef http_get_request(url, params, add_to_headers=None):\n headers = {\n \"Content-type\": \"application/x-www-form-urlencoded\",\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'\n }\n if add_to_headers:\n headers.update(add_to_headers)\n postdata = urllib.parse.urlencode(params)\n try:\n response = requests.get(url, postdata, headers=headers, timeout=TIMEOUT)\n if response.status_code == 200:\n return response.json()\n else:\n return {\"status\":\"fail\"}\n except Exception as e:\n print(\"httpGet failed, detail is:%s\" %e)\n return {\"status\":\"fail\",\"msg\": \"%s\"%e}\n\ndef http_post_request(url, params, add_to_headers=None):\n headers = {\n \"Accept\": \"application/json\",\n 'Content-Type': 'application/json',\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'\n }\n if add_to_headers:\n headers.update(add_to_headers)\n postdata = json.dumps(params)\n try:\n response = requests.post(url, postdata, headers=headers, timeout=TIMEOUT)\n if response.status_code == 200:\n return response.json()\n else:\n return response.json()\n except Exception as e:\n print(\"httpPost failed, detail is:%s\" % e)\n return {\"status\":\"fail\",\"msg\": \"%s\"%e}\n\n\ndef api_key_get(url, request_path, params, ACCESS_KEY, SECRET_KEY):\n method = 'GET'\n timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')\n params.update({'AccessKeyId': ACCESS_KEY,\n 'SignatureMethod': 'HmacSHA256',\n 'SignatureVersion': '2',\n 'Timestamp': timestamp})\n\n host_name = host_url = url\n #host_name = urlparse.urlparse(host_url).hostname\n host_name = urllib.parse.urlparse(host_url).hostname\n host_name = host_name.lower()\n\n params['Signature'] = createSign(params, method, host_name, request_path, SECRET_KEY)\n url = host_url + request_path\n return http_get_request(url, params)\n\n\ndef api_key_post(url, request_path, params, ACCESS_KEY, SECRET_KEY):\n method = 'POST'\n timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')\n params_to_sign = {'AccessKeyId': ACCESS_KEY,\n 'SignatureMethod': 'HmacSHA256',\n 'SignatureVersion': '2',\n 'Timestamp': timestamp}\n\n host_url = url\n #host_name = urlparse.urlparse(host_url).hostname\n host_name = urllib.parse.urlparse(host_url).hostname\n host_name = host_name.lower()\n params_to_sign['Signature'] = createSign(params_to_sign, method, host_name, request_path, SECRET_KEY)\n url = host_url + request_path + '?' + urllib.parse.urlencode(params_to_sign)\n return http_post_request(url, params)\n\n\ndef createSign(pParams, method, host_url, request_path, secret_key):\n sorted_params = sorted(pParams.items(), key=lambda d: d[0], reverse=False)\n encode_params = urllib.parse.urlencode(sorted_params)\n payload = [method, host_url, request_path, encode_params]\n payload = '\\n'.join(payload)\n payload = payload.encode(encoding='UTF8')\n secret_key = secret_key.encode(encoding='UTF8')\n digest = hmac.new(secret_key, payload, digestmod=hashlib.sha256).digest()\n signature = base64.b64encode(digest)\n signature = signature.decode()\n return signature\n","sub_path":"src/core/coin/lib/huobipro_api/HuobiDMUtil.py","file_name":"HuobiDMUtil.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"164134543","text":"\nimport numpy as np\nimport os\nimport graphcnn_option\n\n\nfilename = './hier_rootstr'\nfr_rootstr = open(filename, 'w')\nfilename = './hier_rootlist'\nfr_rootlist = open(filename, 'w')\n\n\nTHRESHOLD1 = 1200\nTHRESHOLD2 = 900\nTHRESHOLD3 = 900\nTHRESHOLD4 = 600\n# SUM = 0\nROOT = 2143406 # the root of subgraph\n\nDATA_PATH = graphcnn_option.DATA_PATH # Path to data directory\nTRAIN_DATA_DIR = graphcnn_option.TRAIN_DATA_DIR\n\ndef generate_labels_list_per_example(data_dir = DATA_PATH):\n \"\"\" get example(graph)-labels file:\n 1 3 4\n 6 9\n ...\n\n \"\"\"\n filename = os.path.join(data_dir, 'data.train')\n fr = open(filename,'r')\n graphlines = fr.readlines()\n fr.close()\n filename = os.path.join(data_dir, 'example-labels')\n fr = open(filename, 'w')\n index = 1\n for line in graphlines:\n if index % 4 == 0:\n line = line.strip() # remove the '\\n',' ' on the head and end\n print(line, file=fr)\n index = index + 1\n fr.close()\n\ndef group_labels_by_examples(data_dir = DATA_PATH):\n '''According to the examples(graphs), labels can be divided into different groups.\n examples for each group are disjoint from other groups.\n\n Return:\n label groups list and example groups list\n '''\n\n label_groups_list = []\n example_groups_list = []\n\n # example-labels file\n filename = '../data/example-labels'\n fr = open(filename, 'r')\n example_labels_lines = fr.readlines()\n fr.close()\n examples_number = len(example_labels_lines)\n labels_number = 36504\n examples_flag = np.zeros([examples_number],dtype=int)\n for label in range(0,labels_number):\n # step1\n flag = 0\n for group in label_groups_list:\n if label in group:\n flag = 1\n break\n if flag == 1:\n continue\n\n # step2\n label_group = []\n example_group = []\n label_group.append(label)\n count = 0\n while count < len(label_group):\n label_count = label_group[count]\n count += 1\n # step3\n for i in range(0,examples_number):\n if examples_flag[i] == 0:\n line = example_labels_lines[i]\n line = line.strip()\n linelist = line.split(' ')\n linelist = [int(j) for j in linelist]\n if label_count in linelist:\n examples_flag[i] = 1\n example_group.append(i)\n for j in linelist:\n if j not in label_group:\n label_group.append(j)\n label_groups_list.append(label_group)\n example_groups_list.append(example_group)\n\n filename = '../data/label_groups'\n fr = open(filename, 'w')\n for list_i in label_groups_list:\n for i in list_i:\n print(i,end=' ',file=fr)\n print('',file=fr)\n fr.close()\n\n filename = '../data/example_groups'\n fr = open(filename, 'w')\n for list_i in example_groups_list:\n for i in list_i:\n print(i, end=' ', file=fr)\n print('', file=fr)\n fr.close()\n\n# 生成原始的样本标签\ndef generate_example_labels_orig():\n\n filename = '../data/remap'\n remap = np.loadtxt(filename, dtype=int)\n filename = '../data/example_labels_remap'\n fr = open(filename, 'r')\n lines = fr.readlines()\n fr.close()\n filename = '../data/example_labels_orig'\n fr = open(filename, 'w')\n for line in lines:\n line = line.strip()\n linelist = line.split(' ')\n linelist = [int(j) for j in linelist]\n labels_remap = remap[linelist, 0]\n for i in labels_remap:\n print(i, end=' ', file=fr)\n print('', file=fr)\n fr.close()\n\n# 将hierarchy的父子关系合并,统计每个Node的所有直接父亲和孩子\ndef generate_hier_dict_with_parent_child():\n\n filename = '../data/lshtc/hierarchyWikipediaMedium.txt'\n parent_child_array = np.loadtxt(filename,dtype=int)\n\n hier_dict = {}\n for i in range(0,np.size(parent_child_array,0)):\n parent = parent_child_array[i][0]\n child = parent_child_array[i][1]\n if parent in hier_dict.keys():\n hier_dict[parent][1].append(child)\n else:\n parent_list = []\n child_list = [child]\n hier_dict[parent] = [parent_list, child_list]\n if child in hier_dict.keys():\n hier_dict[child][0].append(parent)\n else:\n parent_list = [parent]\n child_list = []\n hier_dict[child] = [parent_list, child_list]\n filename = '../data/hier_relation'\n fr = open(filename,'w')\n for k,v in hier_dict.items():\n print(k,file=fr)\n for hier_list in v:\n for label in hier_list:\n print(label,end=' ',file=fr)\n print('',file=fr)\n fr.close()\n sum=0\n for k, v in hier_dict.items():\n if len(v[0]) != 0:\n for i in range(0, np.size(parent_child_array, 0)):\n if parent_child_array[i][0] in v[0] and parent_child_array[i][1] in v[0]:\n if len(v[1])!=0:\n print(True)\n return\n\n print(sum)\n return hier_dict\n\n# 生成扩展后的样本标签\ndef generate_example_labels_orig_expand():\n filename = '../data/example_labels_orig'\n fr = open(filename, 'r')\n lines = fr.readlines()\n fr.close()\n hier_dict = generate_hier_dict_with_parent_child()\n filename = '../data/example_labels_orig_expand'\n fr = open(filename, 'w')\n for line in lines:\n line = line.strip()\n linelist = line.split(' ')\n linelist = [int(j) for j in linelist]\n count = 0\n while count < len(linelist):\n label_count = linelist[count]\n count += 1\n parent_list = hier_dict[label_count][0]\n for i in parent_list:\n if i not in linelist:\n linelist.append(i)\n for i in linelist:\n print(i, end=' ', file=fr)\n print('', file=fr)\n fr.close()\n\n# 根据hierarchy生成最顶层的根节点列表(可能不唯一)\n# root: 2143406\ndef generate_hier0_data(): # 2143406\n ''' hier0_remap,hier0_labels,hier0_graphs_index,hier0_graphs'''\n filename = '../data/lshtc/hier_relation'\n fr = open(filename,'r')\n lines = fr.readlines()\n fr.close()\n hier_dict = {}\n index = 0\n for line in lines:\n index_mod = index % 3\n if index_mod == 0:\n line = line.strip()\n label = int(line)\n elif index_mod == 1:\n line = line.strip()\n if len(line)==0:\n parent_list = []\n else:\n linelist = line.split(' ')\n parent_list = [int(j) for j in linelist]\n elif index_mod == 2:\n line = line.strip()\n if len(line) == 0:\n child_list = []\n else:\n linelist = line.split(' ')\n child_list = [int(j) for j in linelist]\n hier_dict[label] = [parent_list,child_list]\n index +=1\n\n hier0_remap = []\n # find root nodes:(who has no parent)\n for k in hier_dict.keys():\n if len(hier_dict[k][0])==0:\n hier0_remap.append(k)\n\n hier0_remap_len = len(hier0_remap)\n filename = '../data/lshtc/hier0_remap'\n fr = open(filename,'w')\n for i in range(0,hier0_remap_len):\n print('%d %d' % (hier0_remap[i],i), file=fr)\n fr.close()\n\n# 生成root为根的子树的叶子节点列表\ndef _compute_leaf_node_of_sub_tree(hier_dict,root,fr):\n hier_remap = hier_dict[root][1]\n hier_remap_len = len(hier_remap)\n if hier_remap_len == 0: # leaf node\n print('%d %d'%(root,root),file=fr)\n return [root]\n leaf_root = []\n for label in hier_remap:\n leaf_list = _compute_leaf_node_of_sub_tree(hier_dict,label,fr)\n for one in leaf_list:\n if one not in leaf_root:\n leaf_root.append(one)\n print('%d' % (root), end=' ', file=fr)\n for one in leaf_root:\n print('%d' % (one), end=' ', file=fr)\n print('',file=fr)\n return leaf_root\n\n\ndef _subfunc1(hier_remap, root, level):\n hier_remap_len = len(hier_remap)\n filename = '../data/lshtc/hier%d_%d_remap' % (level, root)\n fr = open(filename, 'w')\n for i in range(0, hier_remap_len):\n print('%d %d' % (hier_remap[i], i), file=fr)\n fr.close()\n filename = '../data/lshtc/hier%d_%d_labels' % (level, root)\n fr_label = open(filename, 'w')\n filename = '../data/lshtc/hier%d_%d_graphs_index' % (level, root)\n fr_graph = open(filename, 'w')\n filename = '../data/lshtc/example_labels_orig_expand'\n fr = open(filename, 'r')\n lines = fr.readlines()\n fr.close()\n for i in range(0, len(lines)):\n line = lines[i]\n line = line.strip()\n linelist = line.split(' ')\n linelist = [int(k) for k in linelist]\n flag = 0\n for j in range(0, len(hier_remap)):\n if hier_remap[j] in linelist:\n flag = 1\n print(j, end=' ', file=fr_label)\n if flag == 1:\n print(i, file=fr_graph)\n print('', file=fr_label)\n fr_graph.close()\n fr_label.close()\n\ndef _generate_hier_n_data_subfunc(hier_dict,hier_leafList_dict,root,level,subtrees_used):\n subtrees_used.append(root)\n root_leaf_list = hier_leafList_dict[root]\n if len(root_leaf_list) == 1 and root_leaf_list[0] == root: # leaf node\n return\n if len(root_leaf_list) < THRESHOLD1:\n _subfunc1(root_leaf_list, root, level)\n return\n hier_remap = hier_dict[root][1]\n _subfunc1(hier_remap,root,level)\n for label in hier_remap:\n if label not in subtrees_used:\n _generate_hier_n_data_subfunc(hier_dict,hier_leafList_dict,label,level+1,subtrees_used)\n\ndef generate_hier_n_data_root2leaf(): # 2143406\n '''\n hierN_XXX_remap\n hierN_XXX_labels\n hierN_XXX_graphs_index\n hierN_XXX_graphs\n '''\n\n filename = '../data/lshtc/hier_relation'\n fr = open(filename,'r')\n lines = fr.readlines()\n fr.close()\n hier_dict = {}\n index = 0\n for line in lines:\n index_mod = index % 3\n if index_mod == 0:\n line = line.strip()\n label = int(line)\n elif index_mod == 1:\n line = line.strip()\n if len(line)==0:\n parent_list = []\n else:\n linelist = line.split(' ')\n parent_list = [int(j) for j in linelist]\n elif index_mod == 2:\n line = line.strip()\n if len(line) == 0:\n child_list = []\n else:\n linelist = line.split(' ')\n child_list = [int(j) for j in linelist]\n hier_dict[label] = [parent_list,child_list]\n index +=1\n\n root = 2143406 # the root of subgraph\n\n # filename = '../data/lshtc/hier_relation_leafList'\n # fr = open(filename,'w')\n # print(len(_compute_leaf_node_of_sub_tree(hier_dict,root,fr)))\n # fr.close()\n\n filename = '../data/lshtc/hier_relation_leafList'\n fr = open(filename,'r')\n hier_leafList = fr.readlines()\n fr.close()\n hier_leafList_dict = {}\n for line in hier_leafList:\n line = line.strip()\n linelist = line.split(' ')\n linelist = [int(j) for j in linelist]\n hier_leafList_dict[linelist[0]] = linelist[1:]\n subtrees_used = []\n _generate_hier_n_data_subfunc(hier_dict,hier_leafList_dict,root,1,subtrees_used)\n\n\n\ndef _subfunc1_compute_leaf_node_of_sub_tree(hier_dict,root,hier_leafList_dict):\n '''计算以root为根节点的树的叶节点列表'''\n if root in hier_leafList_dict.keys():\n return\n hier_remap = hier_dict[root][1]\n hier_remap_len = len(hier_remap)\n if hier_remap_len == 0: # leaf node\n hier_leafList_dict[root]=[root]\n return\n leaf_root = []\n for label in hier_remap:\n _subfunc1_compute_leaf_node_of_sub_tree(hier_dict,label,hier_leafList_dict)\n leaf_list = hier_leafList_dict[label]\n for one in leaf_list:\n if one not in leaf_root:\n leaf_root.append(one)\n hier_leafList_dict[root] = leaf_root\n return\n\ndef _subfunc2_generate_hier_remap(hier_leafList_dict, root_list):\n hier_remap = []\n root_str = '_'\n for root in root_list:\n root_str += '%d_' % root\n root_leaf = hier_leafList_dict[root]\n for one in root_leaf:\n if one not in hier_remap:\n hier_remap.append(one)\n hier_remap_len = len(hier_remap)\n if len(root_list) > 3:\n filename = '../data/lshtc/hier_%d_%d_others_'%(root_list[0],root_list[1])+'rootstr'\n fr = open(filename, 'w')\n print(root_str,file=fr)\n fr.close()\n root_str = '_%d_%d_others_'%(root_list[0],root_list[1])\n # filename = '../data/lshtc/hier_%d_%d_others_'%(root_list[0],root_list[1])+'remap'\n filename = '../data/lshtc/hier'+root_str+'remap'\n fr = open(filename, 'w')\n for i in range(0, hier_remap_len):\n print('%d %d' % (hier_remap[i], i), file=fr)\n fr.close()\n filename = '../data/lshtc/hier'+root_str+'labels'\n fr_label = open(filename, 'w')\n filename = '../data/lshtc/hier'+root_str+'graphs_index'\n fr_graph = open(filename, 'w')\n filename = '../data/lshtc/example_labels_orig_expand'\n fr = open(filename, 'r')\n lines = fr.readlines()\n fr.close()\n for i in range(0, len(lines)):\n line = lines[i]\n line = line.strip()\n linelist = line.split(' ')\n linelist = [int(k) for k in linelist]\n flag = 0\n for j in range(0, len(hier_remap)):\n if hier_remap[j] in linelist:\n flag = 1\n print(j, end=' ', file=fr_label)\n if flag == 1:\n print(i, file=fr_graph)\n print('', file=fr_label)\n fr_graph.close()\n fr_label.close()\n\n\n# def _subfunc2_generate_hier_remap(hier_leafList_dict, root_list):\n# hier_remap = []\n# root_str = '_'\n# for root in root_list:\n# print(root,end=' ',file=fr_rootlist)\n# root_str += '%d_' % root\n# root_leaf = hier_leafList_dict[root]\n# for one in root_leaf:\n# if one not in hier_remap:\n# hier_remap.append(one)\n# hier_remap_len = len(hier_remap)\n# if len(root_list) > 3:\n# root_str = '_%d_%d_others_'%(root_list[0],root_list[1])\n# print(root_str,file = fr_rootstr)\n# print('',file = fr_rootlist)\n#\n\n\ndef _subfunc4(hier_dict,root,hier_leafList_dict,level):\n if len(hier_dict[root][1])==0:\n return\n leafList = hier_leafList_dict[root]\n print('%d %d'%(root,len(leafList)))\n if len(leafList) <=500:\n _subfunc5(root,leafList,level)\n return\n _subfunc5(root, hier_dict[root][1], level)\n\n for one in hier_dict[root][1]:\n _subfunc4(hier_dict,one,hier_leafList_dict,level+1)\n\ndef _subfunc5(root, leafList, level):\n hier_remap = leafList\n root_str = '_%d_%d_'%(level,root)\n\n hier_remap_len = len(hier_remap)\n\n filename = '../data/lshtc/hier'+root_str+'remap'\n fr = open(filename, 'w')\n for i in range(0, hier_remap_len):\n print('%d %d' % (hier_remap[i], i), file=fr)\n fr.close()\n filename = '../data/lshtc/hier'+root_str+'labels'\n fr_label = open(filename, 'w')\n filename = '../data/lshtc/hier'+root_str+'graphs_index'\n fr_graph = open(filename, 'w')\n filename = '../data/lshtc/example_labels_orig_expand'\n fr = open(filename, 'r')\n lines = fr.readlines()\n fr.close()\n for i in range(0, len(lines)):\n line = lines[i]\n line = line.strip()\n linelist = line.split(' ')\n linelist = [int(k) for k in linelist]\n flag = 0\n for j in range(0, len(hier_remap)):\n if hier_remap[j] in linelist:\n flag = 1\n print(j, end=' ', file=fr_label)\n if flag == 1:\n print(i, file=fr_graph)\n print('', file=fr_label)\n fr_graph.close()\n fr_label.close()\n\ndef _subfunc3_generate_hier_n_data_leaf2root(hier_dict,hier_leafList_dict,root):\n child_list = hier_dict[root][1]\n for one in child_list:\n if len(hier_leafList_dict[one]) > THRESHOLD1:\n _subfunc3_generate_hier_n_data_leaf2root(hier_dict,hier_leafList_dict,one)\n # hier_leafList_dict.clear()\n # _subfunc1_compute_leaf_node_of_sub_tree(hier_dict, ROOT, hier_leafList_dict)\n for one in child_list:\n if len(hier_leafList_dict[one]) >= THRESHOLD2:\n _subfunc2_generate_hier_remap(hier_leafList_dict,[one])\n hier_leafList_dict[one] = [one]\n one_child_list = hier_dict[one][1]\n hier_dict[one][1] = []\n for one_child in one_child_list:\n if one in hier_dict[one_child][0]:\n hier_dict[one_child][0].remove(one)\n hier_leafList_dict.clear()\n _subfunc1_compute_leaf_node_of_sub_tree(hier_dict, ROOT, hier_leafList_dict)\n\n if len(hier_leafList_dict[root]) > THRESHOLD1:\n for one in child_list:\n if len(hier_leafList_dict[one]) >= THRESHOLD3:\n _subfunc2_generate_hier_remap(hier_leafList_dict, [one])\n hier_leafList_dict[one] = [one]\n one_child_list = hier_dict[one][1]\n hier_dict[one][1] = []\n for one_child in one_child_list:\n if one in hier_dict[one_child][0]:\n hier_dict[one_child][0].remove(one)\n hier_leafList_dict.clear()\n _subfunc1_compute_leaf_node_of_sub_tree(hier_dict, ROOT, hier_leafList_dict)\n # if flag == 1:\n # tmp_root_list = []\n # for one in child_list:\n # father = hier_dict[one][0]\n # for two in father:\n # if two not in tmp_root_list:\n # tmp_root_list.append(two)\n # for tmp_root in tmp_root_list:\n # tmp_child_list = hier_dict[tmp_root][1]\n # tmp_root_leaf = []\n # for one in tmp_child_list:\n # for two in hier_leafList_dict[one]:\n # if two not in tmp_root_leaf:\n # tmp_root_leaf.append(two)\n # hier_leafList_dict[tmp_root] = tmp_root_leaf\n if len(hier_leafList_dict[root]) > THRESHOLD1:\n child_not_zero = []\n for one in child_list:\n if len(hier_leafList_dict[one]) != 1:\n child_not_zero.append(one)\n while len(child_not_zero)>0:\n root_list = []\n root_list_leaf = []\n for one in child_not_zero:\n leaf_sum = len(root_list_leaf)\n leaf_list = hier_leafList_dict[one]\n for two in leaf_list:\n if two not in root_list_leaf:\n leaf_sum +=1\n if leaf_sum <= THRESHOLD1: # ?????????????????????????\n root_list.append(one)\n for two in leaf_list:\n if two not in root_list_leaf:\n root_list_leaf.append(two)\n if len(root_list_leaf) < THRESHOLD4 and len(root_list)==len(child_not_zero):# ????????\n break\n _subfunc2_generate_hier_remap(hier_leafList_dict, root_list)\n for one in root_list:\n child_not_zero.remove(one)\n hier_leafList_dict[one] = [one]\n one_child_list = hier_dict[one][1]\n hier_dict[one][1] = []\n for one_child in one_child_list:\n if one in hier_dict[one_child][0]:\n hier_dict[one_child][0].remove(one)\n hier_leafList_dict.clear()\n _subfunc1_compute_leaf_node_of_sub_tree(hier_dict, ROOT, hier_leafList_dict)\n # print(len(hier_leafList_dict[root]))\n\ndef generate_hier_n_data_leaf2root():\n '''\n hier_XXX_remap\n hier_XXX_labels\n hier_XXX_graphs_index\n hier_XXX_graphs\n '''\n\n filename = '../data/lshtc/hier_relation'\n fr = open(filename, 'r')\n lines = fr.readlines()\n fr.close()\n hier_dict = {}\n index = 0\n for line in lines:\n index_mod = index % 3\n if index_mod == 0:\n line = line.strip()\n label = int(line)\n elif index_mod == 1:\n line = line.strip()\n if len(line) == 0:\n parent_list = []\n else:\n linelist = line.split(' ')\n parent_list = [int(j) for j in linelist]\n elif index_mod == 2:\n line = line.strip()\n if len(line) == 0:\n child_list = []\n else:\n linelist = line.split(' ')\n child_list = [int(j) for j in linelist]\n hier_dict[label] = [parent_list, child_list]\n index += 1\n root = ROOT\n hier_leafList_dict = {}\n # _subfunc1_compute_leaf_node_of_sub_tree(hier_dict,root,hier_leafList_dict)\n\n filename = '../data/lshtc/hier_2143406_remap'\n root_remap = np.loadtxt(filename,dtype=int)\n root_labels = root_remap[:,0]\n for one in root_labels:\n one_child_list = hier_dict[one][1]\n hier_dict[one][1] = []\n for one_child in one_child_list:\n if one in hier_dict[one_child][0]:\n hier_dict[one_child][0].remove(one)\n # hier_leafList_dict.clear()\n _subfunc1_compute_leaf_node_of_sub_tree(hier_dict, ROOT, hier_leafList_dict)\n\n _subfunc4(hier_dict,ROOT,hier_leafList_dict,0)\n\n\n\n\n\n\n\n\n\n\n\ndef main(argv=None):\n generate_hier_n_data_leaf2root()\nif __name__ == '__main__':\n main()\n","sub_path":"GraphCNN/utils/lshtc_utils2.py","file_name":"lshtc_utils2.py","file_ext":"py","file_size_in_byte":21817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"419406936","text":"import pygame as pyg\nimport time\nimport datetime\n\n\ndef playmus(file,stopper):\n pyg.mixer.init()\n pyg.mixer.music.load(file)\n pyg.mixer.music.play()\n while True:\n stop=input()\n if stop==stopper:\n pyg.mixer.music.stop()\n break\n\ndef log_req(string):\n with open(\"my_logs.txt\",\"a\") as f:\n f.write(f\"{string} at {datetime.datetime.now()} \\n \" )\n\n\n\nif __name__==\"__main__\":\n\n init_water=time.time()\n init_eyes = time.time()\n init_exercise = time.time()\n\n water_after_every=35*60\n eyes_after_every=30*60\n exercise_after_every=45*60\n\n\n print(\"I AM HEALTH MANEGER PROG , I HELP U BY MANAGING YOUR HEALTH\")\n while True:\n\n if time.time()-init_water>water_after_every:\n print(\"this is the time to drink water , enter 'drank' after drinking\")\n playmus(\"Guitar Sikhda-(SwagyJatt.CoM).mp3\",\"drank\")\n log_req(\"Drank water\")\n init_water=time.time()\n\n if time.time() - init_eyes > eyes_after_every:\n print(\"this is the time for your eyes exercise , enter 'done' after doing it\")\n playmus(\"Backbone-Hardy-Sandhu-(DesiTrack.Com).mp3\", \"done\")\n log_req(\"eyes exercise\")\n init_eyes = time.time()\n\n if time.time() - init_exercise > exercise_after_every:\n print(\"this is the time for your physical exercise , enter 'doneexer' after doing it\")\n playmus(\"Bom_Diggy_Diggy__(VIDEO)___Zack_Knight___Jasmin_Walia___Sonu_Ke_Titu_Ki_Swee.mp3\", \"doneexer\")\n log_req(\"physical exercise done\")\n init_exercise = time.time()\n","sub_path":"healthy programmer.py","file_name":"healthy programmer.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"173782532","text":"import random\nimport torch\nimport numpy as np\nimport torch.utils.data as data\nfrom pathlib import Path\nfrom PIL import Image\n\nfrom src.utils import LMDB, IMG, FPHA, DATA_DIR\n\nclass FPHA_Hand_Flip(data.Dataset):\n \"\"\" FPHA image GT, hand keypoint GT each batch is flipped \n the same as implemented by a custom collate_fn \"\"\"\n def __init__(self, cfg, split_set=None):\n super().__init__()\n self.split_set = split_set\n self.img_dir = cfg['img_dir']\n self.xyz_gt_env = None\n self.shape = (int(cfg['img_size']), int(cfg['img_size']))\n keys_path = Path(DATA_DIR)/(self.split_set + '_keys_cache.p')\n self.keys = LMDB.get_keys(keys_path)\n \n if cfg['len'] == 'max':\n self.num_data = len(self.keys)\n else:\n self.num_data = int(cfg['len'])\n\n self.is_aug = cfg['aug']\n if self.is_aug:\n self.jitter = float(cfg['jitter'])\n self.hue = float(cfg['hue'])\n self.sat = float(cfg['sat'])\n self.exp = float(cfg['exp'])\n self.rot = float(cfg['rot'])\n \n def __init_db(self):\n # necessary for loading env into dataloader\n # https://github.com/chainer/chainermn/issues/129\n pth = str(Path(DATA_DIR)/(self.split_set + '_xyz_gt.lmdb'))\n self.xyz_gt_env = LMDB.get_env(pth)\n \n def aug(self, img, uvd_gt):\n # Image augmentation\n # Rotate\n rot = random.uniform(-self.rot, self.rot)\n new_img = img.rotate(rot)\n # Translate\n new_img, ofs_info = IMG.jitter_img(new_img, self.jitter, self.shape)\n # Distort HSV\n new_img = IMG.distort_image_HSV(new_img, self.hue, self.sat, self.exp)\n\n # Point augmentation\n # Rotate\n new_uvd_gt = uvd_gt.copy()\n new_uvd_gt = IMG.scale_points_WH(new_uvd_gt,\n (1,1), \n img.size)\n new_uvd_gt[:, :2] = IMG.rotate_points(new_uvd_gt[:, :2],\n rot,img.size[0]/2,\n img.size[1]/2,\n img.size[0],\n img.size[1])\n new_uvd_gt = IMG.scale_points_WH(new_uvd_gt,\n img.size,\n (1,1))\n # Translate\n new_uvd_gt = IMG.jitter_points(new_uvd_gt, ofs_info)\n\n new_uvd_gt = new_uvd_gt.astype('float32')\n return new_img, new_uvd_gt\n\n def __getitem__(self, index):\n \"\"\"\n Out:\n img : Input image\n uvd_gt : Hand keypoints (21, 3)\n \"\"\" \n if self.xyz_gt_env is None:\n self.__init_db() \n \n key = self.keys[index]\n img = Image.open(Path(DATA_DIR)/self.img_dir/key)\n xyz_gt = LMDB.read_lmdb_env(key, self.xyz_gt_env, 'float32', (21, 3))\n uvd_gt = FPHA.xyz2uvd_color(xyz_gt)\n uvd_gt = IMG.scale_points_WH(uvd_gt, \n (FPHA.ORI_WIDTH, FPHA.ORI_HEIGHT), (1,1))\n uvd_gt[..., 2] /= FPHA.REF_DEPTH\n\n if self.is_aug:\n img, uvd_gt = self.aug(img, uvd_gt)\n else:\n img = img.resize(self.shape)\n \n return (img, uvd_gt)\n\n def __len__(self):\n return self.num_data","sub_path":"old_stuff/code/aaron-cv/src/datasets/fpha_hand_flip.py","file_name":"fpha_hand_flip.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"139307913","text":"from features_repository import FeaturesRepository\nfrom files_scanner import TargetFile\nfrom file_features import FileFeatures\n#Google Cloud client library\nfrom google.cloud import vision\nfrom google.cloud.vision import types\n\nimport exifread\nimport os\nimport datetime\n\nclass FeaturesExtractor:\n def __init__(self, features_repo: FeaturesRepository):\n self.__featuresRepo = features_repo\n\n def extractFeatures(self, targetFiles: [TargetFile]):\n featuresData = list(map(lambda file: self.__extractFeaturesFromFile(file), targetFiles))\n return featuresData\n\n def __extractFeaturesFromFile(self, targetFile: TargetFile):\n #Google Cloud client instance\n googleClient = vision.ImageAnnotatorClient()\n previousFeatures = self.__featuresRepo.getFeatureData(targetFile.fileHash)\n if previousFeatures:\n return FileFeatures.from_dict(previousFeatures)\n\n with open(targetFile.filePath, 'rb') as fileHandle:\n #Load file in memory\n content = fileHandle.read()\n exifData = exifread.process_file(fileHandle)\n\n #Content convert\n img = types.Image(content=content)\n\n date = os.path.getmtime(targetFile.filePath)\n (latitude, longitude) = self._get_exif_location(exifData)\n\n if latitude == None:\n latitude = 0\n\n if longitude == None:\n longitude = 0\n\n dateObject = self._get_if_exist(exifData, \"EXIF DateTimeDigitized\")\n if dateObject:\n dateString = dateObject.values\n date = datetime.datetime.strptime(dateString, \"%Y:%m:%d %H:%M:%S\")\n\n #Label detection\n response = googleClient.label_detection(image=img)\n labels = response.label_annotations\n #Transform labels\n labs = \"\"\n\n #Emotions detection\n response = googleClient.face_detection(image=img)\n faces = response.face_annotations\n #Transform faces\n facs = \"\"\n\n #Color detection\n response = googleClient.image_properties(image=img)\n colors = response.image_properties_annotation.dominant_colors.colors\n #for color in colors:\n r = colors[0].color.red\n g = colors[0].color.green\n b = colors[0].color.blue\n domColor = [str(r), str(g), str(b)]\n\n #Text detection\n response = googleClient.text_detection(image=img)\n jText = response.text_annotations\n if len(jText) > 0:\n text = jText[0].description\n else:\n text = \"\"\n\n features = FileFeatures(targetFile.fileHash, latitude, longitude, date, labs, facs, domColor, text)\n self.__featuresRepo.setFeatureData(targetFile.fileHash, features)\n\n return features\n\n # Taken from https://gist.github.com/snakeye/fdc372dbf11370fe29eb\n def _get_if_exist(self, data, key):\n if key in data:\n return data[key]\n\n return None\n\n def _convert_to_degress(self, value):\n \"\"\"\n Helper function to convert the GPS coordinates stored in the EXIF to degress in float format\n :param value:\n :type value: exifread.utils.Ratio\n :rtype: float\n \"\"\"\n d = float(value.values[0].num) / float(value.values[0].den)\n m = float(value.values[1].num) / float(value.values[1].den)\n s = float(value.values[2].num) / float(value.values[2].den)\n\n return d + (m / 60.0) + (s / 3600.0)\n\n def _get_exif_location(self, exif_data):\n \"\"\"\n Returns the latitude and longitude, if available, from the provided exif_data (obtained through get_exif_data above)\n \"\"\"\n lat = None\n lon = None\n\n gps_latitude = self._get_if_exist(exif_data, 'GPS GPSLatitude')\n gps_latitude_ref = self._get_if_exist(exif_data, 'GPS GPSLatitudeRef')\n gps_longitude = self._get_if_exist(exif_data, 'GPS GPSLongitude')\n gps_longitude_ref = self._get_if_exist(exif_data, 'GPS GPSLongitudeRef')\n\n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = self._convert_to_degress(gps_latitude)\n if gps_latitude_ref.values[0] != 'N':\n lat = 0 - lat\n\n lon = self._convert_to_degress(gps_longitude)\n if gps_longitude_ref.values[0] != 'E':\n lon = 0 - lon\n\n return lat, lon\n","sub_path":"src/features_extractor.py","file_name":"features_extractor.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"290771712","text":"from collections import Counter\n\n\ndef solution(tickets):\n # 딕셔너리 생성 {시작점 : [끝점]}\n routes = dict()\n for (start, end) in tickets:\n routes[start] = routes.get(start, []) + [end]\n # get(key) -> key로 value 얻기\n # key에 해당하는 value가 없을 때는 디폴트 값을 정해둔다. get(key, default)\n\n # 시작점 : [끝점] 역순 정렬\n for route in routes.keys():\n routes[route].sort(reverse=True)\n\n # DFS 알고리즘으로 path 만들어줌\n st = [\"ICN\"]\n path = []\n\n while st:\n top = st[-1]\n\n # 루트에 존재하지 않거나 top-key에 대한 값들의 길이가 0일 때 path에 추가\n if top not in routes or len(routes[top]) == 0:\n # 원소가 존재하지 않기 때문에 path에 넣어준다 -> 이때 역순으로 들어가게 됨\n path.append(st.pop())\n\n # 루트에 존재하고 top-key에 대한 값들의 길이가 1이상일때(=존재할때)\n else:\n # key에 대한 value리스트의 맨 끝을 st에 넣기\n st.append(routes[top][-1])\n routes[top] = routes[top][:-1]\n return path[::-1]\n\n\ndef my_failed_solution(tickets):\n '''\n 마지막 목적지를 지정해놓고 풀었더니 변수가 너무 많은 것같다.\n 테스트 1, 2에서 런타임 에러가 발생\n '''\n\n\n answer = []\n depart_ticket = []\n\n tickets.sort()\n\n # 첫 시작이 ICN인 티켓 찾기\n for ticket in tickets:\n if ticket[0] == \"ICN\":\n depart_ticket.append(ticket)\n\n # 출발지점\n answer.append(depart_ticket[0][0])\n answer.append(depart_ticket[0][1])\n tickets.remove(depart_ticket[0])\n\n # 도착지점\n odd_num_city = [k for k, v in Counter(list(y for x in tickets for y in x)).items() if v % 2 == 1]\n print(odd_num_city)\n if len(odd_num_city) > 1 and 'ICN' in odd_num_city:\n odd_num_city.remove('ICN')\n destination = odd_num_city[0]\n\n while tickets:\n able_tickets = []\n for ticket in tickets:\n if ticket[0] == answer[-1]:\n able_tickets.append(ticket)\n\n for able_ticket in able_tickets:\n if able_ticket[0][1] == destination:\n able_tickets.remove(able_ticket)\n\n answer.append(able_tickets[0][1])\n tickets.remove(able_tickets[0])\n\n return answer\n\n\ntickets = [[\"ICN\", \"JFK\"], [\"HND\", \"IAD\"], [\"JFK\", \"HND\"]]\ntickets2 = [[\"ICN\", \"SFO\"], [\"ICN\", \"ATL\"], [\"SFO\", \"ATL\"], [\"ATL\", \"ICN\"], [\"ATL\", \"SFO\"]]\ntickets3 = [[\"ICN\",\"BOO\"],[\"ICN\",\"COO\"],[\"COO\",\"ICN\"]]\n\n\nprint(solution(tickets3))","sub_path":"programers/여행경로/여행경로.py","file_name":"여행경로.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"424150366","text":"import logging\nfrom casexml.apps.case.mock import CaseBlock\nfrom corehq.apps.commtrack.models import Product, CommtrackConfig,\\\n CommtrackActionConfig, SupplyPointType, SupplyPointProductCase, SupplyPointCase\nfrom corehq.apps.commtrack import const\nfrom casexml.apps.case.xml import V2\nimport uuid\nfrom corehq.apps.hqcase.utils import submit_case_blocks\nfrom xml.etree import ElementTree\nfrom corehq.apps.users.cases import get_owner_id, reconcile_ownership\n\n\"\"\"\nhelper code to populate the various commtrack models, for ease of\ndevelopment/testing, before we have proper UIs and imports\n\"\"\"\n\ndef make_product(domain, name, code):\n p = Product()\n p.domain = domain\n p.name = name\n p.code = code.lower()\n p.save()\n return p\n\ndef make_supply_point(domain, location, owner_id=None):\n return SupplyPointCase.create_from_location(domain, location, owner_id)\n\ndef make_supply_point_product(supply_point_case, product_uuid, owner_id=None):\n domain = supply_point_case.domain\n id = uuid.uuid4().hex\n user_id = const.get_commtrack_user_id(domain)\n owner_id = owner_id or get_owner_id(supply_point_case) or user_id\n username = const.COMMTRACK_USERNAME\n product_name = Product.get(product_uuid).name\n caseblock = CaseBlock(\n case_id=id,\n create=True,\n version=V2,\n case_name=product_name,\n user_id=user_id,\n owner_id=owner_id,\n case_type=const.SUPPLY_POINT_PRODUCT_CASE_TYPE,\n update={\n \"product\": product_uuid\n },\n index={\n const.PARENT_CASE_REF: (const.SUPPLY_POINT_CASE_TYPE,\n supply_point_case._id),\n }\n )\n casexml = ElementTree.tostring(caseblock.as_xml())\n submit_case_blocks(casexml, domain, username, user_id,\n xmlns=const.COMMTRACK_SUPPLY_POINT_PRODUCT_XMLNS)\n sppc = SupplyPointProductCase.get(id)\n sppc.bind_to_location(supply_point_case.location)\n sppc.save()\n return sppc\n\ndef make_psi_config(domain):\n c = CommtrackConfig(\n domain=domain,\n multiaction_enabled=True,\n multiaction_keyword='s',\n actions=[\n CommtrackActionConfig(\n action_type='stockedoutfor',\n keyword='d',\n caption='Stock-out Days'\n ),\n CommtrackActionConfig(\n action_type='receipts',\n keyword='r',\n caption='Other Receipts'\n ),\n CommtrackActionConfig(\n action_type='stockonhand',\n keyword='b',\n caption='Balance'\n ),\n CommtrackActionConfig(\n action_type='receipts',\n name='sales',\n keyword='p',\n caption='Placements'\n ),\n ],\n supply_point_types=[\n SupplyPointType(name='CHC', categories=['Public']),\n SupplyPointType(name='PHC', categories=['Public']),\n SupplyPointType(name='SC', categories=['Public']),\n SupplyPointType(name='MBBS', categories=['Private']),\n SupplyPointType(name='Pediatrician', categories=['Private']),\n SupplyPointType(name='AYUSH', categories=['Private']),\n SupplyPointType(name='Medical Store / Chemist', categories=['Traditional']),\n SupplyPointType(name='RP', categories=['Traditional']),\n SupplyPointType(name='Asha', categories=['Frontline Workers']),\n SupplyPointType(name='AWW', categories=['Public', 'Frontline Workers']),\n SupplyPointType(name='NGO', categories=['Non-traditional']),\n SupplyPointType(name='CBO', categories=['Non-traditional']),\n SupplyPointType(name='SHG', categories=['Non-traditional']),\n SupplyPointType(name='Pan Store', categories=['Traditional']),\n SupplyPointType(name='General Store', categories=['Traditional']),\n ]\n )\n c.save()\n return c\n","sub_path":"corehq/apps/commtrack/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"608243313","text":"import subprocess\nimport time\n\n\ndef capture_traffic(interface):\n\twhile True:\n\t\tfile_name = \"pcap/\"+str(int(time.time()*1000))+\".pcap\"\n\t\tpcap_cmd = [\"sudo\",\"tcpdump\", \"-i\", interface, \"-s\", \"0\", \"-w\",file_name ,\"-c\",\"5000\"]\n\t\t# pcap_cmd = [\"sudo\",\"tcpdump\", \"-i\", interface, \"-s\", \"0\", \"-w\",\"-\" ,\"-c\",\"5000\"]\n\t\tprocess = subprocess.Popen(pcap_cmd,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)\n\t\tprocess.wait()","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"10583970","text":"# coding=utf-8\nimport configparser\nimport os\nfrom utils.BaseUtil import *\nimport logging\n\n\n@singleton\nclass ConfigureUtils(object):\n def __init__(self, model=None):\n self.model = model\n self.verification()\n logging.info(\"初始化配置工具类\")\n logging.info(\"配置工具类模式: {}\".format(self.model))\n\n self.public = \"public\"\n\n configure_path = os.path.dirname(os.path.realpath(__file__)) + \"/configure.conf\"\n self.config_parser = configparser.ConfigParser()\n self.config_parser.read(configure_path, encoding='utf-8')\n\n def get(self, option):\n # 判断该配置是否为本地配置\n local_configure = self.config_parser.has_option(self.model, option)\n if local_configure:\n return self.config_parser.get(self.model, option)\n else:\n return self.config_parser.get(self.public, option)\n\n def getint(self, option):\n # 判断该配置是否为本地配置\n local_configure = self.config_parser.has_option(self.model, option)\n if local_configure:\n return self.config_parser.getint(self.model, option)\n else:\n return self.config_parser.getint(self.public, option)\n\n def verification(self):\n if not self.model:\n print(\"配置工具未初始化\")\n raise Exception(\"配置工具未初始化\")\n\n\"\"\"该方法请勿轻易调用,该方法应该在服务入口处调用\"\"\"\ndef init_configure_utils(model=\"test\"):\n if model:\n configure_utils = ConfigureUtils(model)\n else:\n configure_utils = ConfigureUtils()\n return configure_utils\n\ndef get_configure_utils():\n configure_utils = ConfigureUtils()\n return configure_utils\n","sub_path":"predict_user_attribute/old/gender/configure/ConfigureUtils.py","file_name":"ConfigureUtils.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"18737039","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# 引入必要的module\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport pickle\nimport time\nimport os\nimport math\nimport urllib\nimport serial\nimport socket\nimport threading\nimport pymysql\nimport datetime\nfrom collections import deque\nflag = 0 # for initial system\nflag_database = 0 # for database thread\nall_sensors_data = {'yaw_one': 0, 'pitch_one': 2.3, 'roll_one': 0, 'yaw_two': 0, 'pitch_two': 2.91, 'roll_two': 0, 'height': 0, 'predictions': 2}\nthreadLock = threading.Lock()\nsensors_data = {'yaw_one': 0, 'pitch_one': 0, 'roll_one': 0, 'yaw_two': 0, 'pitch_two': 0, 'roll_two': 0, 'height': 0, 'predictions': 0}\n# contrast with twice web_sensors_data\nweb_sensors_data = {'yaw_one': 0, 'pitch_one': 0, 'roll_one': 0, 'yaw_two': 0, 'pitch_two': 0, 'roll_two': 0, 'height': 0, 'predictions': 0}\nlast_web_sensors_data = {'yaw_one': 0, 'pitch_one': 0, 'roll_one': 0, 'yaw_two': 0, 'pitch_two': 0, 'roll_two': 0, 'height': 0, 'predictions': 0}\nweb_sensors_data_stack = deque()\nnum = 0 # valid data sets number\n# contrast with twice sensor_data [pitch_one, pitch_two, height]\ncurrent_sensor_data = [0, 0, 0]\nlast_sensor_data = [0, 0, 0]\n\n# set predictions_data = 5, represent disconnect\npredictions_data = 5\n\n# need a global variable to add a new feature run\nvelocity = 0\n\n# cmp comprise two dict\ndef cmp_dict(src_data, dst_data):\n sum_v_d = 0\n sum_v_d1 = 0\n for value in src_data.values():\n sum_v_d = value + sum_v_d\n for value in dst_data.values():\n sum_v_d1 = value + sum_v_d1\n if (sum_v_d == sum_v_d1):\n return 0\n return 1\n\n\n# this method is used to obtain type int data for each data\ndef slice_data(r):\n global velocity\n global all_sensors_data\n sensor_data = []\n if len(r) < 10:\n return [0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n yaw_str_one = ''.join(r[0:2])\n yaw_int_one = int(yaw_str_one, 16)\n yaw_one_sign = int(r[2], 16)\n if yaw_one_sign == 0:\n yaw_int_one = yaw_int_one\n else:\n yaw_int_one = -yaw_int_one\n yaw_int_one = yaw_int_one / 100\n sensors_data['yaw_one'] = yaw_int_one\n velocity = yaw_int_one\n \n pitch_str_one = ''.join(r[3:5])\n pitch_int_one = int(pitch_str_one, 16)\n pitch_one_sign = int(r[5], 16)\n if pitch_one_sign == 0:\n pitch_int_one = pitch_int_one\n else:\n pitch_int_one = -pitch_int_one\n pitch_int_one = pitch_int_one / 100\n sensor_data.append(pitch_int_one)\n sensors_data['pitch_one'] = pitch_int_one\n\n roll_str_one = ''.join(r[6:8])\n roll_int_one = int(roll_str_one, 16)\n roll_one_sign = int(r[8], 16)\n if roll_one_sign == 0:\n roll_int_one = roll_int_one\n else:\n roll_int_one = -roll_int_one\n roll_int_one = roll_int_one / 100\n sensors_data['roll_one'] = roll_int_one\n\n yaw_str_two = ''.join(r[9:11])\n yaw_int_two = int(yaw_str_two, 16)\n yaw_two_sign = int(r[11], 16)\n if yaw_two_sign == 0:\n yaw_int_two = yaw_int_two\n else:\n yaw_int_two = -yaw_int_two\n yaw_int_two = yaw_int_two / 100\n sensors_data['yaw_two'] = yaw_int_two\n\n pitch_str_two = ''.join(r[12:14])\n pitch_int_two = int(pitch_str_two, 16)\n pitch_two_sign = int(r[14], 16)\n if pitch_two_sign == 0:\n pitch_int_two = pitch_int_two\n else:\n pitch_int_two = -pitch_int_two\n pitch_int_two = pitch_int_two / 100\n sensor_data.append(pitch_int_two)\n sensors_data['pitch_two'] = pitch_int_two\n\n roll_str_two = ''.join(r[15:17])\n roll_int_two = int(roll_str_two, 16)\n roll_int_sign = int(r[17], 16)\n if roll_int_sign == 0:\n roll_int_two = roll_int_two\n else:\n roll_int_two = -roll_int_two\n roll_int_two = roll_int_two / 100\n sensors_data['roll_two'] = roll_int_two\n\n height_str = ''.join(r[18:20])\n height_int = int(height_str, 16)\n height_int_sign = int(r[20], 16)\n if height_int_sign == 0:\n height_int = height_int\n else:\n height_int = -height_int\n sensor_data.append(height_int)\n sensors_data['height'] = height_int\n\n threadLock.acquire()\n all_sensors_data = sensors_data.copy()\n threadLock.release()\n return sensor_data\n\n\n# angle1 is 0~180, -180~-0, angle2 is 0~180, -180~-0\ndef offset_angle(angle1, angle2):\n if (math.fabs(angle1 - angle2) > 180):\n angle = 360 - (math.fabs(angle1) + math.fabs(angle2))\n else:\n angle = math.fabs(angle1) - math.fabs(angle2)\n return math.fabs(angle)\n\n\n# Data validation: floating between two sets of data does not exceed 10%, if it is right, return true, else return false\ndef data_validation(sensor_data):\n global current_sensor_data\n global last_sensor_data\n current_sensor_data = sensor_data\n # assign an initial value to last_sensor_data, when it is the first\n if (last_sensor_data[0] == 0 and last_sensor_data[1] == 0 and last_sensor_data[2] == 0):\n last_sensor_data = current_sensor_data\n return False\n # validate data in the range of 10%\n if (offset_angle(last_sensor_data[0], current_sensor_data[0]) > 10):\n last_sensor_data = current_sensor_data\n return False\n if (offset_angle(last_sensor_data[1], current_sensor_data[1]) > 10):\n last_sensor_data = current_sensor_data\n return False\n # height need particularly handle, as its range isn't 0~180,-0~-180\n if (math.fabs(last_sensor_data[2]) - 30 > math.fabs(current_sensor_data[2]) or math.fabs(\n current_sensor_data[2]) > math.fabs(last_sensor_data[2]) + 30):\n last_sensor_data = current_sensor_data\n return False\n return True\n\n\n# initial system, command dog to stand up until it return 10 sets valid data\ndef initial_system(sensor_data):\n global num\n global last_sensor_data\n\n last_sensor_data = current_sensor_data\n\n # Data validation: floating between two sets of data does not exceed 10%, if it is right, return true, else return false\n def data_validation(sensor_data):\n global current_sensor_data\n global last_sensor_data\n current_sensor_data = sensor_data\n # validate data in the range of 10%\n if (offset_angle(last_sensor_data[0], current_sensor_data[0]) > 10):\n last_sensor_data = current_sensor_data\n return False\n if (offset_angle(last_sensor_data[1], current_sensor_data[1]) > 10):\n last_sensor_data = current_sensor_data\n return False\n # height need particularly handle, as its range isn't 0~180,-0~-180\n if (math.fabs(last_sensor_data[2]) - 30 > math.fabs(current_sensor_data[2]) or math.fabs(\n current_sensor_data[2]) > math.fabs(last_sensor_data[2]) + 30):\n last_sensor_data = current_sensor_data\n return False\n return True\n\n if (data_validation(sensor_data)):\n num = num + 1\n else:\n num = 0\n # finish initial system, predictions is 3, or predictions is 4\n global predictions_data\n if (num > 5):\n predictions_data = 3\n return True\n else:\n predictions_data = 4\n return False\n\ndef predictions_decision_tree(sensor_data, velocity):\n global all_sensors_data\n global web_sensors_data\n global predictions_data\n global flag\n flag = 1\n if (flag == 0):\n if (initial_system(sensor_data)):\n flag = 1\n print('initial finished')\n print(current_sensor_data)\n else:\n if (math.fabs(velocity) > 1) :\n print('run')\n # run\n predictions_data = 6\n else :\n # initial sensor datas should be changed by the real deployed environment, so the initial_sensor_data is [-170, -170, 300]. Especially, the inital value of height is invariable.\n # initial_sensor_data = [-173.0, -5.12, 300]\n initial_sensor_data = [-170, -1.0, 300] \n # height need particularly handle, as its range isn't 0~180,-0~-180\n if (offset_angle(sensor_data[0], initial_sensor_data[0]) > 30):\n print('down1')\n # down\n predictions_data = 2\n else:\n if (((math.fabs(sensor_data[2]) < 100))):\n \n print('lay')\n # lay\n predictions_data = 0\n\n else:\n # up\n print('up')\n predictions_data = 1\n\n\n threadLock.acquire()\n all_sensors_data['predictions'] = predictions_data\n threadLock.release()\n web_sensors_data = all_sensors_data.copy()\n\n threadLock.acquire()\n all_sensors_data['predictions'] = predictions_data\n threadLock.release()\n web_sensors_data = all_sensors_data.copy()\n\t\ndef weblink(c, addr):\n try:\n a = pickle.dumps(web_sensors_data)\n c.send(a)\n if len(web_sensors_data_stack) > 3:\n web_sensors_data_stack.pop()\n c.close()\n except EOFError:\n print('eoferror')\n\n\ndef web_server():\n # client server\n server = socket.socket()\n host_server = '0.0.0.0'\n port_server = 12342\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind((host_server, port_server))\n server.listen(5)\n\n while True:\n c, addr = server.accept()\n web_t = threading.Thread(target=weblink, args=(c, addr))\n web_t.start()\n server.close()\n\n\ndef tcplink(sock, addr):\n print('Accept new connection from %s:%s...' % addr)\n global predictions_data\n # set predictions_data = 4, represent connect\n predictions_data = 4\n # in order to initial system every time, when a new connection is constructed, so set flag = 0\n global flag\n flag = 0\n\n while True:\n client_data = sock.recv(1024)\n if not client_data:\n print('disconnect')\n\n # set predictions_data = 5, represent disconnect\n predictions_data = 5\n break\n try:\n pickle_data = pickle.loads(client_data)\n except:\n print('pickle.loads error')\n continue\n sensor_data = slice_data(pickle_data)\n web_data = predictions_decision_tree(sensor_data, velocity)\n web_sensors_data_stack.appendleft(web_data)\n sock.close()\n print('Connection from %s:%s closed.' % addr)\n\n\ndef tcp_server():\n s = socket.socket()\n host = '0.0.0.0'\n port = 12343\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((host, port))\n s.listen(5)\n\n while True:\n sock, addr = s.accept()\n print('tcp client addr: ', addr)\n t = threading.Thread(target=tcplink, args=(sock, addr))\n t.start()\n\n\n\ndef diff_web_sensors_data():\n global flag_database\n last_web_sensors_data = web_sensors_data\n while (True):\n if (cmp_dict(web_sensors_data, last_web_sensors_data) == 0):\n flag_database = 0\n else:\n flag_database = 1\n last_web_sensors_data = web_sensors_data\n time.sleep(0.5)\n\n\ndef mysql_server():\n # open database\n db = pymysql.connect(\"localhost\", \"root\", \"123\", \"dog_project\")\n\n # use function cursor() build a object cursor\n cursor = db.cursor()\n while True:\n\n while (flag_database):\n dt = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n # SQL insert sentence\n sql = \"INSERT INTO dog_tbl (dog_name, yaw_one, \\\n yaw_two, pitch_one, pitch_two, roll_one, \\\n roll_two, height, time) \\\n VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')\" % \\\n ('p001', web_sensors_data['yaw_one'], web_sensors_data['yaw_two'], web_sensors_data['pitch_one'],\n web_sensors_data['pitch_two'], web_sensors_data['roll_one'], web_sensors_data['roll_two'],\n web_sensors_data['height'], dt)\n\n try:\n # execute sql sentence\n cursor.execute(sql)\n\n db.commit()\n except:\n # if occur errors, then rollback\n db.rollback()\n time.sleep(1)\n\n db.close()\n\n\n# as a tcp server receive the data of dog police pose, also as a web client, send the results of pose\ndef main():\n try:\n t1 = threading.Thread(target=tcp_server)\n t2 = threading.Thread(target=web_server)\n t3 = threading.Thread(target=mysql_server)\n t1.start()\n t2.start()\n t3.start()\n diff_web_sensors_data()\n except:\n print('Error: unable to start thread')\n while 1:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dogs_server_blackant_new.py","file_name":"dogs_server_blackant_new.py","file_ext":"py","file_size_in_byte":12649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"404794193","text":"\"\"\"\nAuthor: Edward\n10/29/20\nSaw this and thought it should be in it's own file.\n\"\"\"\nimport unittest\nfrom main import marriedPercentage\nfrom main import negative\n\n#printWhole(text_file)\n#sprintOneTestFunctionOne()\nclass TestStringMethods(unittest.TestCase):\n # test function to test equality of two value\n def test_negative(self):\n g = float(8/9)\n firstValue = g\n secondValue = marriedPercentage()\n # error message in case if test case got failed\n message = \"First value and second value are not equal !\"\n # assertEqual() to check equality of first & second value\n self.assertEqual(firstValue, secondValue, message)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"SSW555Project02FINALVERSION/test_mainy.py","file_name":"test_mainy.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"211743013","text":"import torch\nimport torch.nn as nn\nfrom einops import rearrange, repeat, reduce\n\nfrom .modules import Encoder, Aligner, Fusion, Decoder, ChannelAttention\n\n\nclass DeepBurstSR(nn.Module):\n def __init__(self, config=None):\n super(DeepBurstSR, self).__init__()\n self.config = config \n self.encode = Encoder(in_ch=4, hidden_ch=64, out_ch=256)\n self.align = Aligner(in_ch=256, hidden_ch=64)\n self.fuse = Fusion(in_ch=64, hidden_ch=128, out_ch=256)\n self.decode = Decoder(in_ch=256, out_ch=3, scale=4)\n self.softmax = ChannelAttention()\n \n def forward(self, x, flow):\n batch_size = x.shape[0]\n burst_size = x.shape[1]\n\n x = rearrange(x, 'b k c h w -> (b k) c h w')\n flow = rearrange(flow, 'b k c h w -> (b k) c h w')\n x = self.encode(x)\n x_aligned = self.align(x, flow)\n temp = rearrange(x_aligned, '(b k) c h w -> b k c h w', k=burst_size)\n x_base = repeat(temp[:,0,:,:,:], 'b c h w -> b k c h w', k=burst_size)\n x_base = rearrange(x_base, 'b k c h w -> (b k) c h w')\n w = self.fuse(x_base, x_aligned, torch.remainder(flow, 1))\n\n w = rearrange(w, '(b k) c h w -> b k c h w', k=burst_size)\n x = rearrange(x, '(b k) c h w -> b k c h w', k=burst_size)\n w = self.softmax(w)\n x = x * w\n x = reduce(x, 'b k c h w -> b c h w', 'sum')\n x = self.decode(x)\n return x\n","sub_path":"models/dbsr.py","file_name":"dbsr.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"455822321","text":"import net.base as base\nimport caffe\n\ndef conv_relu(bottom, num_filters, kh, kw, ph=0, pw=0):\n conv1 = base.conv_wh(bottom, num_filters, \n kh, kw, pad_h=ph, pad_w=pw)\n relu1 = base.relu(conv1, in_place=True)\n return relu1\n\ndef inc(bottom, num_filters):\n relu1 = conv_relu(bottom, num_filters, 3, 1, ph=1) \n relu2 = conv_relu(relu1, num_filters, 1, 3, pw=1)\n\n return relu2\n\ndef get_net(data, label, num_classes):\n\n net = caffe.NetSpec()\n n.data = data\n\n n.inc1 = inc(n.data, 16)\n n.pool1 = base.pool(n.inc1, 3, 2, method='max')\n\n n.inc2 = inc(n.pool1, 32)\n n.pool2 = base.pool(n.inc2, 3, 2, method='max')\n\n n.inc3 = inc(n.pool2, 64)\n n.inc4 = inc(n.inc3, 64)\n n.pool4 = base.pool(n.inc4, method='ave', is_global=True)\n n.fc = base.ip(n.pool4, num_classes)\n\n if label is None:\n n.prob = base.softmax(n.fc)\n else:\n n.label = label\n n.loss = base.softmax_with_loss(n.fc, n.label)\n n.accuracy = base.accuracy(n.fc, n.label)\n return n\n","sub_path":"net/examples/incccc.py","file_name":"incccc.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"511369249","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .forms import UserForm\nimport re\nimport sqlite3\nfrom fractions import Fraction\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pylab\nfrom pylab import *\nimport PIL, PIL.Image\nfrom io import StringIO,BytesIO\n\n\n# Create your views here.\ndef index(request):\n if request.method==\"POST\":\n eq=request.POST.get(\"equation\").replace(\" \",\"\").split('=')\n ls=eq[0]\n rs=eq[1]\n t_start=request.POST.get(\"t_start\")\n t_stop=request.POST.get(\"t_stop\")\n t_step=request.POST.get(\"t_step\")\n \n #Регулярным выражением разбираем строку на коэффициенты и элементы\n pattern=r\"([\\d\\/\\d]+)(\\w+(\\(\\w\\))?)\"\n ls_matches=re.findall(pattern,ls)\n rs_matches=re.findall(pattern,rs)\n \n #Получаем коэффициенты при элементах и сами элементы в виде списка\n l_k=[]\n l_el=[]\n for i in ls_matches:\n l_k.append(list(i)[0])\n l_el.append(list(i)[1])\n r_k=[]\n r_el=[]\n for i in rs_matches:\n r_k.append(list(i)[0])\n r_el.append(list(i)[1])\n \n #Получаем значения энтальпий и энтропий для каждого елемента\n\n #Подключение к БД\n conn = sqlite3.connect('tab2.sqlite')\n cursor = conn.cursor()\n l_h=[]\n for i in l_el:\n cursor.execute(\"SELECT H_298_kJ_mol FROM chem WHERE Formula= :lim\",{\"lim\":i})\n # Получаем результат сделанного запроса\n l_h.append(cursor.fetchall())\n conn.close()\n #Подключение к БД\n conn = sqlite3.connect('tab2.sqlite')\n cursor = conn.cursor()\n l_s=[]\n for i in l_el:\n cursor.execute(\"SELECT S_298_J_mol_K FROM chem WHERE Formula= :lim\",{\"lim\":i})\n l_s.append(cursor.fetchall())\n conn.close()\n #Подключение к БД\n conn = sqlite3.connect('tab2.sqlite')\n cursor = conn.cursor()\n r_h=[]\n for i in r_el:\n cursor.execute(\"SELECT H_298_kJ_mol FROM chem WHERE Formula= :lim\",{\"lim\":i})\n r_h.append(cursor.fetchall())\n conn.close()\n #Подключение к БД\n conn = sqlite3.connect('tab2.sqlite')\n cursor = conn.cursor()\n r_s=[]\n for i in r_el:\n cursor.execute(\"SELECT S_298_J_mol_K FROM chem WHERE Formula= :lim\",{\"lim\":i})\n r_s.append(cursor.fetchall())\n conn.close()\n \n ll_h=[]\n for i in l_h:\n for x in i:\n ll_h.append(list(x)[0])\n ll_s=[]\n for i in l_s:\n for x in i:\n ll_s.append(list(x)[0]) \n rr_h=[]\n for i in r_h:\n for x in i:\n rr_h.append(list(x)[0]) \n rr_s=[]\n for i in r_s:\n for x in i:\n rr_s.append(list(x)[0])\n\n ll_k=[]\n for i in l_k:\n ll_k.append(float(Fraction(i)))\n\n rr_k=[]\n for i in r_k:\n rr_k.append(float(Fraction(i)))\n\n #Рассчитываем общую энтальпию и энтропию реакции\n tot_h=sum([x * y for x, y in zip(rr_h, rr_k)])-sum([x * y for x, y in zip(ll_h, ll_k)])\n tot_s=sum([x * y for x, y in zip(rr_s, rr_k)])-sum([x * y for x, y in zip(ll_s, ll_k)])\n G=[]\n for i in range(int(t_start),int(t_stop),int(t_step)):\n G.append((tot_h*1000)-((tot_s*i)))\n t_o=((tot_h*1000)/(tot_s))\n # Construct the graph\n plot(range(int(t_start),int(t_stop),int(t_step)), G)\n xlabel('Temperature,[K]')\n ylabel('Gibbs energy, [J]')\n title('Gibbs energy for reaction: {}=>{} \\n G={},kJ - {},J/K * T,K \\n Reaction starts after {},K'.format(ls,rs,round(tot_h,2),round(tot_s,2),round(t_o,2)))\n grid(True)\n\n # Store image in a string buffer\n buffer = BytesIO()\n canvas = pylab.get_current_fig_manager().canvas\n canvas.draw()\n pilImage = PIL.Image.frombytes(\"RGB\", canvas.get_width_height(), canvas.tostring_rgb())\n pilImage.save(buffer, \"PNG\")\n pylab.close()\n \n # Send buffer in a http response the the browser with the mime type image/png set\n return HttpResponse(buffer.getvalue(), \"image/png\")\n else:\n userform=UserForm()\n return render(request,\"index.html\",{\"form\":userform})\n","sub_path":"calculation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"183757511","text":"import pandas as pd\nfrom django.shortcuts import render\nfrom user_form import classification\n\n# Create your views here.\n\ndef Reccommendation(model, pred_acc):\n\tfeedback_data = pd.read_csv(\"feedback.csv\", error_bad_lines = False)\n\tunpredected_data = feedback_data.tail(1)\n\tfeedback_data = feedback_data.iloc[:-1, :]\n\tfeedback_data.to_csv('feedback.csv', index=False)\n\t\n\tunpredected_data['isLockerRecommended'] = pred_acc[0]\n\tif pred_acc[0] == 0:\n\t\tunpredected_data['isLockerUsed'] = 0\n\twith open('feedback.csv', 'a') as f:\n\t\tunpredected_data.to_csv(f, header=False, index=False)\n\t\t\n\tif pred_acc[0] == 1:\n\t\tcontent = {\n\t\t\t'model': model,\n\t\t\t'prediction': \"Locker is Recommended\",\n\t\t\t'predictionBool' : pred_acc[0], \n\t\t\t'accuracy': pred_acc[1]\n\t\t}\n\telse:\n\t\t\n\t\tcontent = {\n\t\t\t'model': model,\n\t\t\t'prediction': \"Locker is Not Recommended\",\n\t\t\t'predictionBool' : pred_acc[0],\n\t\t\t'accuracy': pred_acc[1]\n\t\t}\n\n\treturn content\n\ndef Classify(request):\n\t\n\tuser_data = {}\n\tuser_data['Transaction_ID'] = int(request.POST.get('Transaction_ID'))\n\tuser_data['Address(Home/Office)'] = int(request.POST.get('Address'))\n\tuser_data['Item_Weight'] = float(request.POST.get('Item_Weight'))\n\tuser_data['Item_Length'] = float(request.POST.get('Item_Length'))\n\tuser_data['Item_Breadth'] = float(request.POST.get('Item_Breadth'))\n\tuser_data['Item_Height'] = float(request.POST.get('Item_Height'))\n\tuser_data['Item_Price'] = float(request.POST.get('Item_Price'))\n\tuser_data['Hazardous'] = int(request.POST.get('Hazardous'))\n\tuser_data['FulfilledByAmazon'] = int(request.POST.get('FulfilledByAmazon'))\n\tuser_data['Subscribed'] = int(request.POST.get('Subscribed'))\n\tuser_data['ReleaseDate'] = int(request.POST.get('ReleaseDate'))\n\t\n\twith open('feedback.csv', 'a') as feedback_data:\n\t\tpd.DataFrame(user_data, index=[0]).to_csv(feedback_data, header=False, index=False)\n \n\tif request.POST.get('Models') == 'Bayes':\n\t\tpred_acc = classification.Gaussian(request)\n\t\tcontent = Reccommendation(\"Gaussian Naive Bayes Model\",pred_acc)\n\t\treturn render(request, 'hello.html',content)\n\telif request.POST.get('Models') == 'Logistic':\n\t\tpred_acc = classification.Logistic_Regression(request)\n\t\tcontent = Reccommendation(\"Logistic Regression Model\", pred_acc)\n\t\treturn render(request, 'hello.html',content)\n\telif request.POST.get('Models') == 'Rfc':\n\t\tpred_acc = classification.rfc(request)\n\t\tcontent = Reccommendation(\"Random Forest Classifier Model\", pred_acc)\n\t\treturn render(request, 'hello.html',content)\n\telif request.POST.get('Models') == 'Knn':\n\t\tpred_acc = classification.knn(request)\n\t\tcontent = Reccommendation(\"K-Nearest Neighbour Model\", pred_acc)\n\t\treturn render(request, 'hello.html',content)\n\telif request.POST.get('Models') == 'Tree':\n\t\tpred_acc = classification.decision_tree(request)\n\t\tcontent = Reccommendation(\"Decision Tree Model\", pred_acc)\n\t\treturn render(request, 'hello.html',content)\n\ndef SaveFeedback(request):\n\t\n\tfeedback_data = pd.read_csv(\"feedback.csv\", error_bad_lines = False)\n\tunfed_data = feedback_data.tail(n=1)\n\tunfed_data['isLockerUsed'] = request.POST.get('feedback')\n\tfeedback_data = feedback_data.iloc[:-1]\n\tfeedback_data.to_csv('feedback.csv', index=False)\n\t\n\twith open('feedback.csv', 'a') as f:\n\t\tunfed_data.to_csv(f, header=False, index=False)\n\t\t\n\treturn render(request, 'final.html')\n\t\ndef homepage(request):\n\treturn render(request, 'form.html')\n","sub_path":"AmazonLocker/ACMS_Form/class_form/user_form/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"276097295","text":"#!/usr/bin/env python\n# 6 July 2019 Kevin Walchko, MIT License\n# Grabs IMU data and stores it to a bag file\n\nfrom nxp_imu import IMU\nimport time\nfrom the_collector import BagIt, Pickle\nfrom collections import namedtuple\n\n\"\"\"\nnxp_imu\naccel/mag - 0x1f\ngyro - 0x21\npi@r2d2 nxp $ sudo i2cdetect -y 1\n 0 1 2 3 4 5 6 7 8 9 a b c d e f\n00: -- -- -- -- -- -- -- -- -- -- -- -- --\n10: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 1f\n20: -- 21 -- -- -- -- -- -- -- -- -- -- -- -- -- --\n30: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n40: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n50: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n60: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --\n70: -- -- -- -- -- -- -- --\n\"\"\"\n\nclass Rate(object):\n \"\"\"\n Uses sleep to keep a desired message/sample rate.\n \"\"\"\n def __init__(self, hertz):\n self.last_time = time.time()\n self.dt = 1/hertz\n\n def sleep(self):\n \"\"\"\n This uses sleep to delay the function. If your loop is faster than your\n desired Hertz, then this will calculate the time difference so sleep\n keeps you close to you desired hertz. If your loop takes longer than\n your desired hertz, then it doesn't sleep.\n \"\"\"\n now = time.time()\n diff = now - self.last_time\n if diff < self.dt:\n new_sleep = self.dt - diff\n time.sleep(new_sleep)\n\n # now that we hav slept a while, set the current time\n # as the last time\n self.last_time = time.time()\n\nData = namedtuple('Data', 'data timestamp')\n\nif __name__ == \"__main__\":\n bag = BagIt(Pickle)\n\n # Open the NXP IMU with:\n # accel: 2 g's\n # gyros: 250 degrees per sec\n imu = IMU(gs=2, dps=250)\n\n rate = Rate(200)\n\n try:\n for i in range(10000):\n # while True:\n a, m, g = imu.get()\n ts = time.time()\n bag.push('accel', Data(a,ts))\n bag.push('mag', Data(m,ts))\n bag.push('gyro', Data(g,ts))\n\n # pts = lidar.read()\n # bag.push('lidar', pts)\n\n # print(\"[{}] {}\".format(i, g))\n # print(type(g))\n if i%20 == 0:\n print(i)\n\n rate.sleep()\n\n except Exception as e:\n print(e)\n except KeyboardInterrupt:\n pass\n\n bag.write('data-still', timestamp=False)\n # bag.write('data', timestamp=False)\n # lidar.close()\n print('Done ...')\n","sub_path":"calibration/grab.py","file_name":"grab.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"49704009","text":"#!/usr/local/bin/python\r\nimport copy\r\n\r\n\"\"\"This file holds the ASA Classes to include\r\n ASAobject\r\n The big class that holds all of the other ASA objects\r\n This class will have lists(arrays) of each kind of sub-object.\r\n so one list of Network Objects, one list of Service objects, one list of ACL's\r\n \r\n Network Objects\r\n ASA Network Object Hosts have a single IP Address\r\n ASA Network Object Networks have a network assigned (ex. /24)\r\n Service Objects\r\n ASA TCP and UDP ports\r\n the working version is \"ASAclassesMonth\" while the backup versions are ASAclassesYYYYmmmDD \r\n\"\"\"\r\n#copyright (c)2017 Douglas J. Sheehan, Doug Sheehan IT Consulting. Free to use. Credit where due please.\r\n \r\nclass ASAobject():\r\n \"\"\" this is the big class\r\n Service Objects and Network Object groups are special cases of the network object\r\n ASAobject will have an array(list) of network objects called networkobjarray(element starts with )\r\n ASAobject will have an array(list) of network group objects called netGroupobjarray(element starts with object-group network )\r\n ASAobject will have an array(list) of service/port objects called serviceobjarray (element starts with )\r\n ASAobject will have an array(list) of service/port group objects called svcGroupobjarray (element starts with object-group service)\r\n ASAobject will have an array(list) of ACL objects called aclobjarray\r\n \r\n \r\n \r\n object-group network (starts with )\r\n will have a method to load array\r\n will have a method to sort array\r\n \r\n \"\"\"\r\n ASAnumber=0 #this is a class attribute, shared by all instances, increments each time a new ASAobject is created.\r\n \r\n def __init__ (self, name, asafilename=\"DC2-ASA-01_2017apr06th-1000hrs.txt\"): \r\n \r\n \"\"\" init docstring \"\"\"\r\n # asafilename=\"shortASA-NetObjTest.txt\" default filename added for testing\r\n self.networkobjarray=[] #instance list of complete network objects\r\n self.netobjgrouparray=[] #instance list of complete network object groups\r\n self.serviceobjarray=[] #instance list of complete service(port) objects\r\n self.aclobjarray=[] #instance list of complete ACL objects\r\n self.sortednetworkobjarray=[] #instance list of complete network objects\r\n self.sortedNetObjGrouparray=[] #instance list of complete network objects\r\n self.sortedServiceObjarray=[] #instance list of complete network objects\r\n self.sortedACLobjarray=[] #instance list of complete network objects\r\n self.name=name\r\n self.asafilename = asafilename\r\n return #end init\r\n \r\n \r\n def loadarray (self):\r\n \"\"\" vars\r\n asafilename\r\n\r\n datasource\r\n dataline\r\n tempstring\r\n objindex\r\n objname\r\n objtype\r\n \"\"\" \r\n paramlist=[] #parameter list\r\n with open(self.asafilename, 'r') as datasource:\r\n \r\n for dataline in datasource: #each iteration reads a line from file\r\n tempstring=dataline\r\n while \"object network \" in tempstring: #load up parameter list \r\n #[0:15] is slice that contains obj network\r\n objindex=0\r\n typestart=tempstring.find(\"object network \")\r\n objname=tempstring[typestart+15:-1]#-1 to remove the newline\r\n #print(\"object name is \", objname)\r\n objtype=tempstring[typestart:typestart+15]\r\n #print(\"object type is \", objtype)\r\n tempstring=datasource.readline()\r\n while tempstring.startswith(\" \"):\r\n tempstring=tempstring.strip() #remove leading/trailing spaces\r\n if (len(tempstring)>1) and (len(paramlist)>objindex):\r\n paramlist[objindex]=tempstring #add tempstring to the the list\r\n else:\r\n paramlist.append(tempstring)\r\n \r\n objindex+=1 #increment param list index\r\n tempstring=datasource.readline()\r\n #end while - load up param list\r\n #now create object and add to networkobjarray\r\n\r\n tempnetobj=NetworkObject(objname, objtype, paramlist)\r\n paramlist.clear() #\r\n tempnetobj.printobj() \r\n self.networkobjarray.append(tempnetobj)\r\n \r\n # end while-create network object\r\n \r\n while \"object-group network \" in tempstring: #load up parameter list \r\n #[0:20] is slice that contains obj network\r\n objindex=0\r\n typestart=tempstring.find(\"object-group network \")\r\n objname=tempstring[typestart+20:-1]#-1 to remove the newline\r\n #print(\"object name is \", objname)\r\n objtype=tempstring[typestart:typestart+20]\r\n #print(\"object type is \", objtype)\r\n tempstring=datasource.readline()\r\n while tempstring.startswith(\" \"):\r\n tempstring=tempstring.strip() #remove leading/trailing spaces\r\n if (len(tempstring)>1) and (len(paramlist)>objindex):\r\n paramlist[objindex]=tempstring #add tempstring to the the list\r\n else:\r\n paramlist.append(tempstring)\r\n \r\n objindex+=1 #increment param list index\r\n tempstring=datasource.readline()\r\n #end while - load up param list\r\n #now create object and add to networkobjarray\r\n\r\n tempnetobj=NetworkObject(objname, objtype, paramlist)\r\n \r\n tempnetobj.printobj() \r\n self.networkobjarray.append(tempnetobj)\r\n paramlist.clear() #\r\n # end while-create network object group\r\n \r\n while \"object-group service \" in tempstring: #load up parameter list \r\n #[0:20] is slice that contains object-group service\r\n objindex=0\r\n typestart=tempstring.find(\"object-group service \")\r\n objname=tempstring[typestart+20:-1]#-1 to remove the newline\r\n #print(\"object name is \", objname)\r\n objtype=tempstring[typestart:typestart+20]\r\n #print(\"object type is \", objtype)\r\n tempstring=datasource.readline()\r\n while tempstring.startswith(\" \"):\r\n tempstring=tempstring.strip() #remove leading/trailing spaces\r\n if (len(tempstring)>1) and (len(paramlist)>objindex):\r\n paramlist[objindex]=tempstring #add tempstring to the the list\r\n else:\r\n paramlist.append(tempstring)\r\n \r\n objindex+=1 #increment param list index\r\n tempstring=datasource.readline()\r\n #end while - load up param list\r\n #now create object and add to networkobjarray\r\n\r\n tempnetobj=NetworkObject(objname, objtype, paramlist)\r\n paramlist.clear() #\r\n tempnetobj.printobj() \r\n self.serviceobjarray.append(tempnetobj)\r\n # end while-create object-group service\r\n \r\n if \"access-list \" in tempstring:\r\n #[0:11] is slice that contains access-list \r\n objindex=0\r\n typestart=tempstring.find(\"access-list \")\r\n objstring=tempstring[:-1]#-1 to remove the newline\r\n print(\"acl name is \", objstring)\r\n\r\n tempstring=datasource.readline()\r\n \r\n #now create object and add to acl object array\r\n tempaclobj=ACLObject(objstring) #\r\n self.aclobjarray.append(tempaclobj)\r\n # end while-create acl object\r\n return() #end load array\r\n\r\n\r\n \r\n def wastefullsort(self, networkobjarray, sortednetworkobjarray):\r\n #sort network objects by name\r\n #print(\"Wasteful Sort Network Objects by Name\")\r\n \"\"\"\r\n if element is first element, append to sortednetworkobjarray (position 0)\r\n if element is greater than last element in sortednetworkobjarray, append to list\r\n if element is less than last element, count backwards until it is greater and insert\r\n #A>proceed to while loop\")\r\n \r\n while (testobject.name < sortednetworkobjarray[count-1].name) and (count>0):\r\n count-=1 #decrement counter same as count = count-1\r\n \r\n #end while\r\n #print(testobject.name, \" < \", sortednetworkobjarray[count-1].name)\r\n (testobject.name).strip() #remove leading/trailing spaces\r\n sortednetworkobjarray.insert(count,testobject)\r\n #end if-find sort position \r\n # print(\"testobject \", testobject.name, id(testobject))\r\n #print(\"testobject \", testobject.objtype)\r\n #print(\"sort index \", sortindex, sortednetworkobjarray[sortindex].name)\r\n #print(\"\\n *************************************\\n\")\r\n sortindex +=1 #increment index\r\n \r\n #end for loop \r\n #print(\"end wastefull sort\")\r\n return() #end Wastefull Sort\r\n \r\n\r\n \r\n def printarray(self, testarray):\r\n \"\"\" prints networkobjarray or sortednetworkobjarray\"\"\"\r\n print(\"inside printarray\")\r\n for testobject in testarray:\r\n print (testobject.name)\r\n \r\n return #end print array \r\n\r\n \r\n\r\n #end class ASAobject\r\n \r\n# ************* begin Network object class *************\r\nclass NetworkObject():\r\n \"\"\" ASA Network Object will have a name, type, description, ip address or network\"\"\"\r\n \"\"\"\r\n Variables declared at the class level are not default values\r\n name = string\r\n type = string (object network, object-group network, object service, object-group service)\r\n ipv4 = ip address(host) or network\r\n description = string\r\n paramlist should be ipv4 then description\r\n methods\r\n init(name,host,description)\r\n \"\"\" \r\n \r\n\r\n def __init__(self, name, objtype, paramlist):\r\n self.name=name\r\n self.objtype=objtype\r\n self.paramlist = list(paramlist) #copy the list\r\n #print(\"new network object, \", name)\r\n #for field in self.paramlist:\r\n #print( \"paramlist \", field)\r\n return #end init\r\n \r\n def printobj(self):\r\n print(\"obj name \", self.name)\r\n print(\"obj type \", self.objtype)\r\n for field in self.paramlist:\r\n print( \" \", field)\r\n return (self.name)\r\n \r\n def onelist(self):\r\n \"\"\"take the name,type, and param list and form into a single list\"\"\"\r\n \r\n newlist=[]\r\n if len(self.name)<38:\r\n newlist.append(self.name.ljust(38))\r\n else:\r\n newlist.append(self.name)\r\n \r\n newlist.append(self.objtype.ljust(38))\r\n leftstring,rightstring=\"init\",\"init\"\r\n \r\n for element in self.paramlist:\r\n leftstring=str(element)\r\n \r\n while rightstring: #keep repeating until rightstring is empty\r\n leftstring,rightstring=self.set38chars(leftstring)\r\n #print(leftstring, \"left string ** right string \", rightstring)\r\n #print()\r\n newlist.append(leftstring) #write it to the list\r\n leftstring=str(rightstring) #move the leftovers and rerun\r\n #end while\r\n rightstring=\"init\" #re-enable rightstring for next element\r\n #print(\"onelist newlist \", newlist)\r\n newlist.append(\"\\n\") #add a blank line to the end of the list\r\n return(newlist) #return a list of strings of the name, type, and details of network objects\r\n\r\n def set38chars(self, teststring):\r\n \"\"\"this function will adjust to 38 chars for print and file output\r\n teststring may be greater or less than 38 chars\r\n shortstring is the new string. extrastring is the leftovers\r\n realisticly, only description will be longer than 38 chars\r\n leftstring is trimmed, rightstring is extra\"\"\"\r\n leftstring=teststring.strip() \r\n stringlen=len(leftstring)\r\n rightstring = \"eggs\"\r\n #print(\"Length of teststring is \", stringlen) \r\n if stringlen<=38:\r\n #print(\"string is less than than 38\")\r\n leftstring=leftstring.ljust(38)\r\n rightstring=\"\" #null out rightstring\r\n #alternate method for setting 38 chars\r\n #teststring=('{:^38}').format(teststring)#set width=38\r\n \r\n elif stringlen>38 and leftstring.startswith(\"description \"):#will not execute if desc less than 38\r\n #check if starts with description (descriptions will have spaces)\r\n #print(\"starts with description\")\r\n rightstring=leftstring[12:] \r\n leftstring=leftstring[:12]\r\n \r\n else:#string is greater than 38. find the last space before 38 and trim.\r\n #otherwise trim at 38\r\n #print(\"string is greater than 38\")\r\n rightspace=leftstring.rfind(\" \",0,38)\r\n if rightspace==-1: #rfind returns -1 if \" \" is not in string\r\n print(\"rightspace true, no spaces found \")\r\n rightspace=38\r\n \r\n rightstring=leftstring[rightspace:]\r\n leftstring=leftstring[:rightspace]\r\n \r\n \r\n print(leftstring, \" << 38char left string ** remainder right string >> \", rightstring)\r\n #print(\"Length of adjusted teststring is \", len(leftstring)) \r\n return(leftstring, rightstring)\r\n \r\n##************* end network object class *************\r\n \r\n\r\nclass ACLObject():\r\n \"\"\" ASA ACL Object is sufficiently different to need a new class.\r\n will have a full-string, name, index, type (permit/deny) TCP/UDP, source, destination, ports and parameter list\r\n Variables declared at the class level are not default values\r\n name = string\r\n aclstring = string (complete acl string)\r\n seq = int >sequence number - ACLs are evaluated in order\r\n could add source, dest, sourceport and destport in the future\r\n \r\n \"\"\" \r\n def __init__(self, aclstring):\r\n self.aclstring=aclstring\r\n #self.objtype=objtype\r\n #self.paramlist = list(paramlist) #copy the list\r\n typestart=aclstring.find(\"access-list \")\r\n typeend=aclstring.find(\" extended \")\r\n self.name=aclstring[typestart+11:typeend]#\r\n self.seq=0\r\n pass\r\n return #end init\r\n \r\n \r\n \r\n \r\n##************* end ACL object class *************\r\n \r\n\"\"\" \r\n Access list Object \r\n --------------------------------------------------------------------------------------------------------------\r\n \"access-list\"+ space + ACL name (interface name could be very long)+ space + \"extended\" (or \"remark\") + space \r\n --------------------------------------------------------------------------------------------------------------\r\n ---------------------------------------------------------\r\n + \"permit/deny\" + space + \"tcp/udp/icmp/protocol\" || or port object group + space \r\n --------------------------------------------------------- \r\n ---------------------------------------------------------\r\n + source + space + destination + space + ports\r\n --------------------------------------------------------- \r\n --------------------------------------------------------------------------------------------------------\r\n source>> \"any\"|| or \"host\" + space + IP Address || or network object group \"object-group\"//\r\n network object \"object\"\r\n --------------------------------------------------------------------------------------------------------\r\n --------------------------------------------------------------------------------------------------------\r\n destination>> \"any\" || or \"host\" + space + IP Address (#.#.#.#) || \r\n or network object group \"object-group\"// network object \"object\"\r\n --------------------------------------------------------------------------------------------------------\r\n --------------------------------------------------------------------------------------------------------\r\n ports >> \"any\"|| or \"host\" + space + IP Address || or network object group \"object-group\"//\r\n network object \"object\" \r\n ----------------------------------------------------------------------------------------------------\r\n\"\"\" \r\n##************* end ACL object description *************\r\n \r\nif __name__==\"__main__\":\r\n templist=[]\r\n datalist=[]\r\n remarklist=[]\r\n def writedata (writelist):\r\n \"\"\"write to a file. asa_outfile is filename\r\n writelist is list of lines to be appended to file.\r\n \"\"\"\r\n asa_outfile=\"PARSED-DC2-ASA-01_2017apr20th-1550hrs.txt\"\r\n fileout=open(asa_outfile, 'a')#open filename, append to end\r\n fileout.write(asa_outfile + \"\\n\"+\"\\n\")\r\n for element in writelist:\r\n #print(\"writelist element \", element, \"type \", type(element))\r\n fileout.write(element+ \"\\n\")\r\n #fileout.write(\"\\n\")\r\n \r\n \r\n fileout.close() #close the file \r\n return \r\n \r\n \r\n print(\"running __main__ ASAclassesFeb\")\r\n asa1obj=ASAobject(\"ASA1\",\"DC2-ASA-01_2017apr20th-1550hrs.txt\") #create the ASAobject\r\n asa1obj.loadarray() #execute the load array function \r\n \r\n asa1obj.wastefullsort(asa1obj.networkobjarray,asa1obj.sortednetworkobjarray)\r\n asa1obj.wastefullsort(asa1obj.netobjgrouparray,asa1obj.sortedNetObjGrouparray) \r\n asa1obj.wastefullsort(asa1obj.serviceobjarray,asa1obj.sortedServiceObjarray)\r\n \r\n asa1obj.printarray(asa1obj.aclobjarray)\r\n \r\n # this part creates a single list from the sorted arrays - for writing to a file\r\n for element in asa1obj.sortednetworkobjarray: #element is a network object\r\n templist.append(\" **** this is the sorted Network Object Array ***** \")\r\n templist=element.onelist()\r\n for inside in templist: #inside will be a string\r\n datalist.append(inside)\r\n\r\n \r\n templist.append(\" **** this is the sorted Network Object Groups ***** \")\r\n for element in asa1obj.sortedNetObjGrouparray: #element is a network object\r\n templist=element.onelist()\r\n for inside in templist: #inside will be a string\r\n #print(\"inside element \", inside, \" type \", type(inside))\r\n datalist.append(inside)\r\n\r\n \r\n templist.append(\" **** this is the sorted Service Object Array ***** \")\r\n for element in asa1obj.sortedServiceObjarray: #element is a network object\r\n templist=element.onelist()\r\n for inside in templist: #inside will be a string\r\n #print(\"inside element \", inside, \" type \", type(inside))\r\n datalist.append(inside)\r\n\r\n \r\n templist.append(\" **** this is the un-sorted ACL Object Array ***** \")\r\n for inside in asa1obj.aclobjarray: #element is a ACL object\r\n #ACL obj array is already a single list of strings\r\n checkstring=inside.aclstring\r\n if checkstring.find(\"remark\")==-1: #don't write the remarks in the ACL list\r\n datalist.append(inside.aclstring)\r\n else: #put remarks in a remarks list\r\n remarklist.append(inside.aclstring)\r\n writedata(datalist)\r\n writedata(remarklist)#put remarks at end of file\r\n \r\n #end test main ASAclasses","sub_path":"ASAclasses.py","file_name":"ASAclasses.py","file_ext":"py","file_size_in_byte":21449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"218692981","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pyarrow as pa\nimport pyarrow.parquet as pq\n\n\ndef preprocess_weather_data(file):\n \"\"\"\n This function is used to process weather json file,\n parse nested json data, and load it to Panda DataFrame\n\n input: json file\n output: Panda DataFrame\n \"\"\"\n # Load json file\n weather_data = pd.read_json(file)\n\n # Select data and load it to Panda DataFrame\n columns = ['datetime', 'external temp']\n temp_list = []\n for x in weather_data.loc[0].forecast[0]['list']:\n temp_list.append({'datetime': x['dt'], 'external temp': x['main']['temp']})\n weather_df = pd.DataFrame(temp_list)\n\n # Convert datetime to Panda format\n weather_df['datetime'] = pd.to_datetime(weather_df['datetime'])\n weather_df['datetime'] = weather_df['datetime'].values.astype(' 'foo'\n kwargs['unmerged_name'] = 'z'\n\n Mocks = collections.namedtuple('Mocks', [\n 'gs_context',\n 'run_command',\n 'uncompress_file',\n 'compress_file',\n 'upload',\n ])\n\n def MockList(*_args, **_kwargs):\n MockGsFile = collections.namedtuple('MockGsFile',\n ['url', 'creation_time'])\n num_files = 7\n results = []\n for i in range(1, num_files+1):\n now = datetime.datetime(year=1990, month=1, day=1+i)\n url = os.path.join(afdo.GSURL_BASE_BENCH, 'foo-%d%s%s' %\n (i, afdo.AFDO_SUFFIX, afdo.COMPRESSION_SUFFIX))\n results.append(MockGsFile(url=url, creation_time=now))\n\n return results\n\n mock_gs = mock.Mock()\n mock_gs.List = MockList\n run_command = self.PatchObject(cros_build_lib, 'RunCommand')\n uncompress_file = self.PatchObject(cros_build_lib, 'UncompressFile')\n compress_file = self.PatchObject(cros_build_lib, 'CompressFile')\n upload = self.PatchObject(afdo, 'GSUploadIfNotPresent')\n upload.return_value = upload_ok\n merged_name, uploaded = afdo.CreateAndUploadMergedAFDOProfile(mock_gs,\n '/buildroot',\n **kwargs)\n return merged_name, uploaded, Mocks(\n gs_context=mock_gs,\n run_command=run_command,\n uncompress_file=uncompress_file,\n compress_file=compress_file,\n upload=upload\n )\n\n def testCreateAndUploadMergedAFDOProfileWorksInTheHappyCase(self):\n merged_name, uploaded, mocks = \\\n self.runCreateAndUploadMergedAFDOProfileOnce(recent_to_merge=5)\n\n self.assertTrue(uploaded)\n # Note that we always return the *basename*\n self.assertEqual(merged_name, 'foo-7-merged' + afdo.AFDO_SUFFIX)\n\n self.assertTrue(uploaded)\n mocks.run_command.assert_called_once()\n\n # Note that these should all be in-chroot names.\n expected_ordered_args = ['llvm-profdata', 'merge', '-sample']\n expected_unordered_args = [\n '-output=/tmp/foo-7-merged' + afdo.AFDO_SUFFIX,\n '/tmp/foo-3' + afdo.AFDO_SUFFIX,\n '/tmp/foo-4' + afdo.AFDO_SUFFIX,\n '/tmp/foo-5' + afdo.AFDO_SUFFIX,\n '/tmp/foo-6' + afdo.AFDO_SUFFIX,\n '/tmp/foo-7' + afdo.AFDO_SUFFIX,\n ]\n\n args = mocks.run_command.call_args[0][0]\n ordered_args = args[:len(expected_ordered_args)]\n self.assertEqual(ordered_args, expected_ordered_args)\n\n unordered_args = args[len(expected_ordered_args):]\n self.assertItemsEqual(unordered_args, expected_unordered_args)\n self.assertEqual(mocks.gs_context.Copy.call_count, 5)\n\n self.assertEqual(mocks.uncompress_file.call_count, 5)\n\n def call_for(n):\n basis = '/buildroot/chroot/tmp/foo-%d%s' % (n, afdo.AFDO_SUFFIX)\n return mock.call(basis + afdo.COMPRESSION_SUFFIX, basis)\n\n\n mocks.uncompress_file.assert_has_calls(\n any_order=True, calls=[call_for(x) for x in range(3, 8)])\n\n compressed_target = '/buildroot/chroot/tmp/foo-7-merged%s%s' % \\\n (afdo.AFDO_SUFFIX, afdo.COMPRESSION_SUFFIX)\n mocks.compress_file.assert_called_once()\n args = mocks.compress_file.call_args[0]\n self.assertEqual(args, (\n compressed_target[:-len(afdo.COMPRESSION_SUFFIX)],\n compressed_target,\n ))\n\n mocks.upload.assert_called_once()\n args = mocks.upload.call_args[0]\n\n self.assertEqual(args, (\n mocks.gs_context,\n compressed_target,\n '%s/foo-7-merged%s%s' %\n (afdo.GSURL_BASE_BENCH, afdo.AFDO_SUFFIX, afdo.COMPRESSION_SUFFIX),\n ))\n\n def testCreateAndUploadMergedAFDOProfileSucceedsIfUploadFails(self):\n merged_name, uploaded, _ = \\\n self.runCreateAndUploadMergedAFDOProfileOnce(upload_ok=False)\n self.assertIsNotNone(merged_name)\n self.assertFalse(uploaded)\n\n def testMergeIsOKIfWeFindFewerProfilesThanWeWant(self):\n merged_name, uploaded, mocks = \\\n self.runCreateAndUploadMergedAFDOProfileOnce(recent_to_merge=1000,\n max_age_days=1000)\n self.assertTrue(uploaded)\n self.assertIsNotNone(merged_name)\n self.assertEqual(mocks.gs_context.Copy.call_count, 7)\n\n def testNoProfileIsGeneratedIfNoFilesBeforeMergedNameExist(self):\n merged_name, uploaded, _ = \\\n self.runCreateAndUploadMergedAFDOProfileOnce(\n unmerged_name='foo-0' + afdo.AFDO_SUFFIX)\n self.assertIsNone(merged_name)\n self.assertFalse(uploaded)\n\n merged_name, uploaded, _ = \\\n self.runCreateAndUploadMergedAFDOProfileOnce(\n unmerged_name='foo-1' + afdo.AFDO_SUFFIX)\n self.assertIsNone(merged_name)\n self.assertFalse(uploaded)\n\n merged_name, uploaded, _ = \\\n self.runCreateAndUploadMergedAFDOProfileOnce(\n unmerged_name='foo-2' + afdo.AFDO_SUFFIX)\n self.assertIsNotNone(merged_name)\n self.assertTrue(uploaded)\n\n def testNoFilesAfterUnmergedNameAreIncluded(self):\n max_name = 'foo-3' + afdo.AFDO_SUFFIX\n merged_name, uploaded, mocks = \\\n self.runCreateAndUploadMergedAFDOProfileOnce(unmerged_name=max_name)\n\n self.assertEqual('foo-3-merged' + afdo.AFDO_SUFFIX, merged_name)\n self.assertTrue(uploaded)\n\n # Note that these should all be in-chroot names.\n expected_ordered_args = ['llvm-profdata', 'merge', '-sample']\n expected_unordered_args = [\n '-output=/tmp/foo-3-merged' + afdo.AFDO_SUFFIX,\n '/tmp/foo-1' + afdo.AFDO_SUFFIX,\n '/tmp/foo-2' + afdo.AFDO_SUFFIX,\n '/tmp/foo-3' + afdo.AFDO_SUFFIX,\n ]\n\n args = mocks.run_command.call_args[0][0]\n ordered_args = args[:len(expected_ordered_args)]\n self.assertEqual(ordered_args, expected_ordered_args)\n\n unordered_args = args[len(expected_ordered_args):]\n self.assertItemsEqual(unordered_args, expected_unordered_args)\n\n self.assertEqual(mocks.gs_context.Copy.call_count, 3)\n self.assertEqual(mocks.uncompress_file.call_count, 3)\n\n\n def testMergeDoesntHappenIfNoProfilesAreMerged(self):\n runs = [\n self.runCreateAndUploadMergedAFDOProfileOnce(recent_to_merge=1),\n self.runCreateAndUploadMergedAFDOProfileOnce(max_age_days=0),\n ]\n\n for merged_name, uploaded, mocks in runs:\n self.assertIsNone(merged_name)\n self.assertFalse(uploaded)\n mocks.gs_context.Copy.assert_not_called()\n mocks.run_command.assert_not_called()\n mocks.uncompress_file.assert_not_called()\n mocks.compress_file.assert_not_called()\n mocks.upload.assert_not_called()\n\n\n def testFindLatestProfile(self):\n versions = [[1, 0, 0, 0], [1, 2, 3, 4], [2, 2, 2, 2]]\n self.assertEqual(afdo.FindLatestProfile([0, 0, 0, 0], versions), None)\n self.assertEqual(afdo.FindLatestProfile([1, 0, 0, 0], versions),\n [1, 0, 0, 0])\n self.assertEqual(afdo.FindLatestProfile([1, 2, 0, 0], versions),\n [1, 0, 0, 0])\n self.assertEqual(afdo.FindLatestProfile([9, 9, 9, 9], versions),\n [2, 2, 2, 2])\n\n def testPatchKernelEbuild(self):\n before = [\n 'The following line contains the version:',\n 'AFDO_PROFILE_VERSION=\"R63-9901.21-1506581597\"',\n 'It should be changed.'\n ]\n after = [\n 'The following line contains the version:',\n 'AFDO_PROFILE_VERSION=\"R12-3456.78-9876543210\"',\n 'It should be changed.'\n ]\n tf = os.path.join(self.tempdir, 'test.ebuild')\n osutils.WriteFile(tf, '\\n'.join(before))\n afdo.PatchKernelEbuild(tf, [12, 3456, 78, 9876543210])\n x = osutils.ReadFile(tf).splitlines()\n self.assertEqual(after, x)\n\n def testGetAvailableKernelProfiles(self):\n def MockGsList(path):\n unused = {'content_length':None,\n 'creation_time':None,\n 'generation':None,\n 'metageneration':None}\n path = path.replace('*', '%s')\n return [\n gs.GSListResult(\n url=(path % ('4.4', 'R63-9901.21-1506581597')), **unused),\n gs.GSListResult(\n url=(path % ('3.8', 'R61-9765.70-1506575230')), **unused),\n ]\n\n self.PatchObject(gs.GSContext, 'List',\n lambda _, path, **kwargs: MockGsList(path))\n profiles = afdo.GetAvailableKernelProfiles()\n self.assertIn([63, 9901, 21, 1506581597], profiles['4.4'])\n self.assertIn([61, 9765, 70, 1506575230], profiles['3.8'])\n\n def testFindKernelEbuilds(self):\n ebuilds = [(os.path.basename(ebuild[0]), ebuild[1])\n for ebuild in afdo.FindKernelEbuilds()]\n self.assertIn(('chromeos-kernel-4_4-9999.ebuild', '4.4'), ebuilds)\n self.assertIn(('chromeos-kernel-3_8-9999.ebuild', '3.8'), ebuilds)\n\n def testProfileAge(self):\n self.assertEqual(\n 0,\n afdo.ProfileAge([0, 0, 0, int(time.time())])\n )\n self.assertEqual(\n 1,\n afdo.ProfileAge([0, 0, 0, int(time.time() - 86400)])\n )\n\n def testGetCWPProfile(self):\n profiles = ['R62-3202.43-320243.afdo.xz',\n 'R63-3223.0-233200.afdo.xz',\n 'R63-3239.20-323920.afdo.xz',\n 'R63-3239.42-323942.afdo.xz',\n 'R63-3239.50-323950.afdo.xz',\n 'R63-3239.50-323999.afdo.xz',\n 'R64-3280.5-328005.afdo.xz',\n 'R64-3282.41-328241.afdo.xz',\n 'R65-3299.0-329900.afdo.xz']\n\n def MockGsList(path):\n unused = {'content_length':None,\n 'creation_time':None,\n 'generation':None,\n 'metageneration':None}\n return [gs.GSListResult(url=os.path.join(path, f),\n **unused) for f in profiles]\n\n self.PatchObject(gs.GSContext, 'List',\n lambda _, path, **kwargs: MockGsList(path))\n\n def _test(version, idx):\n unused = {'pv':None,\n 'package':None,\n 'version_no_rev':None,\n 'rev':None,\n 'category':None,\n 'cpv': None,\n 'cp': None,\n 'cpf': None}\n cpv = portage_util.CPV(version=version, **unused)\n profile = afdo.GetCWPProfile(cpv, 'silvermont', 'unused', gs.GSContext())\n # Expect the most recent profile on the same branch.\n self.assertEqual(profile, profiles[idx][:-3])\n\n _test('66.0.3300.0_rc-r1', 8)\n _test('65.0.3283.0_rc-r1', 7)\n _test('65.0.3283.1_rc-r1', 7)\n _test('64.0.3282.42_rc-r1', 7)\n _test('64.0.3282.40_rc-r1', 6)\n _test('63.0.3239.30_rc-r1', 2)\n _test('63.0.3239.42_rc-r0', 2)\n _test('63.0.3239.10_rc-r1', 1)\n\n def testCWPProfileToVersionTuple(self):\n self.assertEqual(\n afdo.CWPProfileToVersionTuple('gs://chromeos-prebuilt/afdo-job/cwp/'\n 'chrome/R66-3325.65-1519321598.afdo.xz'),\n [66, 3325, 65, 1519321598])\n self.assertEqual(\n afdo.CWPProfileToVersionTuple('R66-3325.65-1519321598.afdo.xz'),\n [66, 3325, 65, 1519321598])\n\n def testPatchChromeEbuildAFDOFile(self):\n before = [\n 'The following line contains the version:',\n 'AFDO_FILE[\"benchmark\"]=\"chromeos-chrome-amd64-67.0.3379.0_rc-r1.afdo\"',\n 'AFDO_FILE[\"silvermont\"]=\"R67-3359.31-1522059092.afdo\"',\n 'AFDO_FILE[\"airmont\"]=\"airmont_before.afdo\"',\n 'AFDO_FILE[\"haswell\"]=\"haswell_before.afdo\"',\n 'AFDO_FILE[\"broadwell\"]=\"broadwell_before.afdo\"',\n 'It should be changed.'\n ]\n after = [\n 'The following line contains the version:',\n 'AFDO_FILE[\"benchmark\"]=\"chromeos-chrome-amd64-67.0.3388.0_rc-r1.afdo\"',\n 'AFDO_FILE[\"silvermont\"]=\"R67-3360.42-153456789.afdo\"',\n 'AFDO_FILE[\"airmont\"]=\"airmont_after.afdo\"',\n 'AFDO_FILE[\"haswell\"]=\"haswell_after.afdo\"',\n 'AFDO_FILE[\"broadwell\"]=\"broadwell_after.afdo\"',\n 'It should be changed.'\n ]\n\n self.PatchObject(path_util, 'FromChrootPath', lambda x: x)\n\n tf = os.path.join(self.tempdir, 'test.ebuild')\n osutils.WriteFile(tf, '\\n'.join(before))\n afdo.PatchChromeEbuildAFDOFile(\n tf,\n {'benchmark': 'chromeos-chrome-amd64-67.0.3388.0_rc-r1.afdo',\n 'haswell': 'haswell_after.afdo',\n 'broadwell': 'broadwell_after.afdo',\n 'airmont': 'airmont_after.afdo',\n 'silvermont': 'R67-3360.42-153456789.afdo'})\n x = osutils.ReadFile(tf).splitlines()\n self.assertEqual(after, x)\n","sub_path":"src/third_party/chromite/cbuildbot/afdo_unittest.py","file_name":"afdo_unittest.py","file_ext":"py","file_size_in_byte":13147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"189580869","text":"import numpy as np\nimport matplotlib.pyplot as plt \nimport sys\nimport random\nfrom skimage import color,io\n\n\ndef smoothing_func(alpha,m,n):\n kernel = np.zeros((m,))\n for i in range(0,m+1):\n \n if(i(m-int(alpha*m))):\n print(i)\n kernel[i-1] = np.exp(-1/(1-((2*i/alpha)-1)**2))\n else:\n kernel[i-1] = 1\n print(kernel)\n\ndef rpn(img,output_shape):\n m,n=img.shape\n output_img = np.zeros(output_shape)\n output_img.fill(img.mean())\n out_h = output_shape[0]\n out_w = output_shape[1]\n new_img = img\n output_img[out_h//2 - m//2:out_h//2+m//2,out_w//2 - n//2:out_w//2+n//2] = new_img\n random_phase_ = random_phase1(out_h,out_w)\n fft = np.fft.fft2(output_img)\n k_shift = np.fft.fftshift(fft)\n k_shift = np.real(k_shift)*np.exp(1j*random_phase_)\n orig_img = np.fft.ifft2(np.fft.ifftshift(k_shift))\n # fft_mag = np.real(periodic_component)\n # fft_phase = np.imag(periodic_component)\n\n # random_uniform_phase = random_phase1(m,n)\n # fft_phase_2 = fft_phase+random_uniform_phase\n # modified_period_comp = fft_mag*np.exp(1j*random_uniform_phase)\n # output_img[out_h//2 - m//2:out_h//2+m//2,out_w//2 - n//2:out_w//2+n//2] = modified_period_comp\n plt.imshow(np.absolute(orig_img),cmap='gray')\n plt.imsave(\"output.jpg\",np.absolute(orig_img),cmap='gray')\n plt.show()\n \n\ndef random_phase1(shapex,shapey):\n mat1 = np.random.uniform(low=0,high=np.pi,size=(shapex//2,shapey//2))\n mat2 = np.random.uniform(low=0,high=np.pi,size=(shapex//2,shapey//2))\n mat_flip = np.hstack([mat2,mat1])\n mat_flip2 = np.hstack([np.flip(mat1),np.flip(mat2)])\n phase = np.vstack([mat_flip2,mat_flip])\n phase[int(phase.shape[0]//2),int(phase.shape[1]//2)]=0\n return phase\n\ndef random_phase(shapex,shapey):\n half_x = shapex//2\n half_y = shapey//2\n x_even = shapex%2\n y_even = shapey%2\n sx = int((shapex/2)+1)\n output_img = np.zeros((shapex,shapey),dtype=complex)\n invN = float(1/float(shapex*shapey))\n sign=None\n for y in range(0,shapey):\n for x in range(0,shapex):\n if(((x==0) | ((x_even==0) & (x==half_x))) & ((y==0) | ((y_even==0) & (y==half_x)))):\n if((x==0)&(y==0)):\n sign=1\n else:\n num = random.uniform(0,1)\n if(num<0.5):\n sign=1\n else:\n sign = -1\n output_img[y][x] = sign*invN + 0j\n elif(((x==0) | ((x_even==0) & (x==half_x)))& (y>half_y)):\n addres_pixel =(shapey-y) +x \n output_img[y][x] = output_img[addres_pixel][y]\n else:\n theta = (2*random.uniform(0,1)-1)*np.pi \n costheta = float(np.cos(theta))\n sintheta = float(np.sin(theta))\n output_img[y][x] = (costheta*invN) + (1j*sintheta*invN)\n return output_img\n \n\n\nif __name__ == \"__main__\":\n img_path = sys.argv[1]\n img = color.rgb2gray(io.imread(img_path))\n rpn(img,(1000,1000))","sub_path":"random_phase_noise.py","file_name":"random_phase_noise.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"330826726","text":"#!usr/bin/env python\n#-*- coding:utf-8 _*-\n\"\"\"\n@python: v3.7\n@author:thinking\n@file: lagouJob.py\n@time: 2018/10/07 21:46\n\"\"\"\nimport csv\nimport requests\nfrom lxml import etree\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.remote.webelement import WebElement\nimport time\nfrom pyquery import PyQuery as pq\n\ndef parse_page(browser, writer):\n doc = browser.page_source\n selector = etree.HTML(doc)\n joblist = selector.xpath('//li[@class=\"con_list_item default_list\"]')\n for list in joblist:\n # print(list)\n comname = list.xpath('div//div[@class=\"company_name\"]/a/text()')[0]\n comurl = list.xpath('div//div[@class=\"company_name\"]/a/@href')[0]\n jobname = list.xpath('div//h3/text()')[0]\n joburl = list.xpath('div//a[@class=\"position_link\"]/@href')[0]\n comdescr = list.xpath('div//div[@class=\"industry\"]/text()')[0].replace(\"\\n\",\"\").replace(\" \",\"\")\n jobsalary = list.xpath('div//div[@class=\"li_b_l\"]/span[@class=\"money\"]/text()')[0].replace(\"\\n\",\"\").replace(\" \",\"\")\n requires = list.xpath('div//div[@class=\"li_b_l\"]/text()')\n jobrequ = \"\"\n for word in requires:\n word = word.replace(\"\\n\",\"\").replace(\" \",\"\")\n if word !=\"\":\n jobrequ = jobrequ + word\n jobtime = list.xpath('div//span[@class=\"format-time\"]/text()')[0]\n joblist = [comname,comurl,jobname,joburl, comdescr, jobsalary, jobrequ, jobtime]\n writer.writerow(joblist)\n print(comname,comurl,jobname,joburl, comdescr, jobsalary, jobrequ, jobtime)\nif __name__==\"__main__\":\n file = open('social jobs of lagou.csv', 'a', newline='', encoding=\"UTF8\")\n writer = csv.writer(file)\n writer.writerow([\"公司\",\"公司介绍\", \"职位名称\",\"职位网址\",\"公司标签\", \"薪水\", \"工作经验\",\"发布时间\"])\n options = webdriver.ChromeOptions()\n options.add_argument(\"headless\")\n browser = webdriver.Chrome('D:\\Program\\chromedriver\\chromedriver.exe', options=options)\n wait = WebDriverWait(browser, 2)\n url = \"https://www.lagou.com/jobs/list_python?px=default&city=深圳#filterBox\"\n browser.get(url)\n input = wait.until(ec.presence_of_element_located((By.CSS_SELECTOR,'#keyword')))\n submit = wait.until(ec.element_to_be_clickable((By.CSS_SELECTOR,'#submit')))\n input.clear()\n input.send_keys('python')\n submit.click()\n time.sleep(2)\n doc = browser.page_source\n selector = etree.HTML(doc)\n num = selector.xpath('//ul[@class=\"order\"]//span[@class=\"span totalNum\"]/text()')[0]\n num = int(num)\n # wait.until(ec.element_to_be_clickable((By.CSS_SELECTOR, '#s_position_list > div.item_con_pager > div > span.pager_next')))\n for i in range(num):\n parse_page(browser, writer)\n print(\"\".center(40,\"-\"))\n clickpage = wait.until(ec.element_to_be_clickable((By.CSS_SELECTOR,'#order > li > div.item.page > div.next_disabled.next')))\n clickpage.click()\n time.sleep(2)\n file.close()\n\n","sub_path":"lite/lagouJob.py","file_name":"lagouJob.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"542505468","text":"# -*- coding: utf-8 -*-\n\nfrom plugin import Plugin\n\nfrom flask import jsonify, abort\n\nclass IntentsPlugin(Plugin):\n name = \"intents\"\n\n def __init__(self, *args, **kwargs):\n super(Plugin, self).__init__(*args, **kwargs)\n\n def on_get_all(self, request):\n intents = self.system['mongo'].db.intents\n output = []\n for intent in intents.find():\n output.append({'id': intent['id'], 'lang': intent['lang'], 'name': intent['name']})\n return jsonify(output), 200, {\"X-Total-Count\": str(len(output)), \n \"Content-Type\": \"application/json; charset=utf-8\",\n \"Access-Control-Expose-Headers\": \"X-Total-Count\"}\n\n def on_get(self, id, request):\n intents = self.system['mongo'].db.intents\n intent = intents.find_one({'id' : id})\n if intent:\n output = {'id': intent['id'], \n 'lang': intent['lang'], \n 'name': intent['name'],\n 'says': intent['says'], \n 'action': intent['action'],\n 'slot': intent['slot'], \n 'responses': intent['responses']\n }\n else:\n abort(404)\n return jsonify(output)\n\n def on_create(self, request):\n intents = self.system['mongo'].db.intents\n id = request.json['name'] + '.' + request.json['lang']\n intent_id = intents.insert({'id': id, \n 'name': request.json['name'], \n 'lang': request.json['lang'], \n 'says': request.json['says'], \n 'action': request.json['action'], \n 'slot': request.json['slot'], \n 'responses': request.json['responses']})\n new_intent = intents.find_one({'_id': intent_id })\n output = {'id' : new_intent['id']}\n return jsonify(output)\n\n def on_update(self, id, request):\n intents = self.system['mongo'].db.intents\n intent = intents.find_one({'id' : id})\n intents.update_one({\n '_id': intent['_id']\n },{\n '$set': {\n 'says': request.json['says'],\n 'action': request.json['action'],\n 'slot': request.json['slot'], \n 'responses': request.json['responses']\n }\n }, upsert=False)\n output = {'id' : id}\n return jsonify(output)\n\n def on_delete(self, id, request):\n intents = self.system['mongo'].db.intents\n intent = intents.delete_one({'id' : id})\n if intent:\n output = {'id' : id}\n else:\n abort(404)\n return jsonify(output)\n","sub_path":"docker-images/bot/plugins/intents.py","file_name":"intents.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"422924097","text":"from django.contrib import admin\n# 支出\nfrom django.core.paginator import Paginator\n\nfrom apps.bill.models import Excome, Income\n\n\n# Register your models here.\n\n\n@admin.register(Excome)\nclass ExcomeAdmin(admin.ModelAdmin):\n admin.site.site_title = \"科研皮肤管理后台管理\"\n admin.site.site_header = \"科研皮肤管理后台\"\n # 列表\n list_display = ['id', 'p_name', 'pub_time', 'ex_info', 'excome_kpi', 'ex_pay_method', 'news_time', 'update_time',\n 'cc', 'upload_img'\n ]\n # 点击跳转\n list_display_links = ['p_name', ]\n # 搜索\n search_fields = ['pub_time', 'p_name', ]\n # 过滤器\n list_filter = ['pub_time', 'p_name', 'ex_pay_method', ]\n # ordering设置默认排序字段,负号表示降序排序\n ordering = ('-pub_time',)\n # 详细时间分层筛选\n date_hierarchy = 'pub_time'\n fieldsets = [(\"项目类型明细修改\", {\n \"fields\": ['p_name', 'pub_time', 'ex_info', 'ex_money', 'ex_pay_method', 'cc', 'upload_img']\n\n }), ]\n readonly_fields = ('news_time', 'update_time',)\n # 分页\n list_per_page = 20\n paginator = Paginator\n list_max_show_all = 20000\n save_as_continue = False\n save_on_top = False\n\n\n# 收入\n@admin.register(Income)\nclass IncomeAdmin(admin.ModelAdmin):\n # 列表\n list_display = ['id', 'pub_time', 'p_name', 'income_kpi', 'in_info', 'in_pay_method', 'news_time', 'update_time',\n 'cc'\n ]\n # 点击跳转\n list_display_links = ['p_name', ]\n # 搜索\n search_fields = ['pub_time', 'p_name', ]\n # 过滤器\n list_filter = ['pub_time', 'p_name', 'in_pay_method', ]\n # ordering设置默认排序字段,负号表示降序排序\n ordering = ('-pub_time',)\n # 详细时间分层筛选\n date_hierarchy = 'pub_time'\n fieldsets = [(\"项目类型明细修改\", {\n \"fields\": ['pub_time', 'p_name', 'in_moneys', 'in_info', 'in_pay_method', 'news_time', 'update_time',\n 'cc']\n }), ]\n readonly_fields = ('news_time', 'update_time',)\n # 分页\n list_per_page = 20\n paginator = Paginator\n list_max_show_all = 20000\n\n save_as_continue = False\n save_on_top = False\n","sub_path":"apps/bill/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"489379000","text":"import sys\nCHARACTER_SIZE=127\nclass Node:\n def __init__(self):\n self.next=[None for i in range(CHARACTER_SIZE)]\n self.msg=None\n self.failed=None\n return\n\n def go(self,ch,createIfNotExist=False):\n if createIfNotExist and self.next[ord(ch)] == None:\n self.next[ord(ch)]=Node() \n return self.next[ord(ch)]\n\nclass Trie: \n def __init__(self):\n self.root=Node()\n return \n\n def insert(self,str,msg): \n cur_node=self.root \n for ch in str:\n cur_node = cur_node.go(ch,True)\n cur_node.msg = msg\n \n # Find first string in trie ,then return msg. else return None\n def findUntilEndpoint(self,str):\n try:\n cur_node=self.root\n for ch in str:\n cur_node = cur_node.go(ch)\n if cur_node == None:\n return None \n if cur_node.msg != None:\n return cur_node.msg\n return None\n except: # not ascii character would raise exception\n return None\n\n def getPrefixCategory(self,str):\n cur_node=self.root\n for ch in str:\n cur_node=cur_node.go(ch)\n if cur_node==None:\n return None\n if cur_node.msg!=None:\n return cur_node.msg\n print(\"[*]unkown reason:%s \" %(str)) # url is prefix of some entrylist\n return None\n \n","sub_path":"URLKeyword/TrieTree.py","file_name":"TrieTree.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"42811642","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom EdfFile import EdfFile\nfrom TiffI0 import TiffIO\nimport numpy as np\nimport matplotlib.image as mpimg\nimport pydicom as dicom\nimport scipy.io as sio\n\nimport ImageProcessing as IP\n\n\nclass STLReader():\n\n def __init__(self,filename):\n\n self.fileName = filename\n\n def data(self):\n data = IP.importSTL(self.fileName)\n return data\n\nclass MatReader():\n def __init__(self,filename):\n self.fileName = filename\n self.info = {}\n\n self.seriesDescription = []\n self.seriesImageSize = {}\n\n def getListScan(self):\n self.dicmat = sio.loadmat(self.fileName)\n return self.dicmat.keys()\n\nclass DicomReader():\n def __init__(self,filesname):\n self.filesName = filesname\n self.info = {}\n\n self.seriesDescription = []\n self.seriesImageSize = {}\n self.pixel_size = [1,1,1]\n\n def getListScan(self):\n\n self.filesName.sort()\n if len(self.filesName) > 1:\n fieldsToTest = ['SeriesDescriptiilon','PixelSpacing', 'Columns','Rows']\n for fileName in self.filesName:\n image=dicom.read_file(fileName,stop_before_pixels=True, force = True )\n self.patientName = str(image.PatientName)\n if all(keyTT in image for keyTT in fieldsToTest):\n if not image.SeriesDescription in self.seriesDescription:\n self.seriesDescription.append(image.SeriesDescription)\n try:\n self.pixel_size[0] = abs(float(image.SpacingBetweenSlices))\n except:\n try:\n self.pixel_size[0] = abs(float(image.SliceThickness))\n except:\n self.pixel_size[0] = abs(float(image.PixelSpacing[0]))\n \n self.pixel_size[1] = abs(float(image.PixelSpacing[0]))\n self.pixel_size[2] = abs(float(image.PixelSpacing[1]))\n self.seriesImageSize[image.SeriesDescription] = [1,int(image.Rows),int(image.Columns)]\n else:\n self.seriesImageSize[image.SeriesDescription][0] +=1\n\n\n else:\n self.seriesDescription = []\n image=dicom.read_file(self.filesName[0], force = True)\n self.pixel_size[0] = abs(float(image.SpacingBetweenSlices))\n self.pixel_size[1] = abs(float(image.PixelSpacing[0]))\n self.pixel_size[2] = abs(float(image.PixelSpacing[1]))\n self.SerieDescription = image.SeriesDescription\n self.b = image.RescaleIntercept \n self.a = image.RescaleSlope\n self.data = self.a*image.pixel_array+self.b\n self.patientName = str(image.PatientName)\n return self.seriesDescription\n\n \n def importScan(self,scanToImport):\n self.scanToImport = scanToImport\n self.inputData = {}\n\n for scan in self.scanToImport:\n arrayToImport = np.zeros((self.seriesImageSize[scan][0],self.seriesImageSize[scan][1],self.seriesImageSize[scan][2]), np.float32)\n self.inputData[scan] = arrayToImport\n\n for fileName in self.filesName:\n image=dicom.read_file(fileName,stop_before_pixels=True, force = True)\n \n if not image.SeriesDescription in self.info:\n self.info[image.SeriesDescription] = image\n\n if image.SeriesDescription in self.scanToImport:\n image=dicom.read_file(fileName, force = True)\n b = image.RescaleIntercept \n a = image.RescaleSlope\n self.inputData[image.SeriesDescription][image.InstanceNumber-1,:,:] = ( a * image.pixel_array) + b\n\n\n for scan in self.scanToImport:\n while np.all(self.inputData[scan][0]==0):\n self.inputData[scan]=np.delete(self.inputData[scan],0,0)\n while np.all(self.inputData[scan][self.inputData[scan].shape[0]-1]==0) and self.inputData[scan].shape[0]>1:\n self.inputData[scan]=np.delete(self.inputData[scan],-1,0)\n return self.inputData\n\n\nclass ImageReader(object) :\n\n def __init__(self,filename,access=None,format='edf') :\n self.fileName = filename\n self.File = 0\n self.width=0\n self.height=0\n self.data=None\n self.slice=None\n self.File = open(self.fileName, access)\n self.dtype=np.float32\n\n def getData(self) :\n\n if(self.fileName.endswith('.raw') or self.fileName.endswith('.RAW') or self.fileName.endswith('.img')) :\n File = open(str(self.fileName),\"rb\")\n size = os.path.getsize(str(self.fileName)) / 4.\n self.width=int(size**.5)\n self.height=self.width\n self.currentSlice = np.fromfile(File,dtype=' %(max_pop)s',\n 'parameters': {'max_pop': 5000000},\n }\n expected_columns = {'id', 'name', 'countrycode', 'district', 'population'}\n data_source = PostgresDataSource(**data_source_spec)\n df = postgres_connector.get_df(data_source)\n\n assert not df.empty\n assert set(df.columns) == expected_columns\n assert df.shape == (24, 5)\n\n\ndef test_get_df_array_interpolation(postgres_connector):\n data_source_spec = {\n 'domain': 'Postgres test',\n 'type': 'external_database',\n 'name': 'Some Postgres provider',\n 'database': 'postgres_db',\n 'query': 'SELECT * FROM City WHERE id in %(ids)s',\n 'parameters': {'ids': [1, 2]},\n }\n data_source = PostgresDataSource(**data_source_spec)\n df = postgres_connector.get_df(data_source)\n assert not df.empty\n assert df.shape == (2, 5)\n\n\ndef test_get_form_empty_query(postgres_connector):\n \"\"\"It should give suggestions of the databases without changing the rest\"\"\"\n current_config = {}\n form = PostgresDataSource.get_form(postgres_connector, current_config)\n assert form['properties']['database'] == {'$ref': '#/definitions/database'}\n assert form['definitions']['database'] == {\n 'title': 'database',\n 'description': 'An enumeration.',\n 'type': 'string',\n 'enum': ['postgres', 'postgres_db'],\n }\n\n\ndef test_get_form_query_with_good_database(postgres_connector, mocker):\n \"\"\"It should give suggestions of the collections\"\"\"\n current_config = {'database': 'postgres_db'}\n form = PostgresDataSource.get_form(postgres_connector, current_config)\n assert form['properties']['database'] == {'$ref': '#/definitions/database'}\n assert form['definitions']['database'] == {\n 'title': 'database',\n 'description': 'An enumeration.',\n 'type': 'string',\n 'enum': ['postgres', 'postgres_db'],\n }\n assert form['properties']['table'] == {'$ref': '#/definitions/table'}\n assert form['definitions']['table'] == {\n 'title': 'table',\n 'description': 'An enumeration.',\n 'type': 'string',\n 'enum': ['city', 'country', 'countrylanguage'],\n }\n\n\ndef test_get_form_connection_fails(mocker, postgres_connector):\n \"\"\"It should return a form even if connect fails\"\"\"\n mocker.patch.object(pgsql, 'connect').side_effect = IOError\n form = PostgresDataSource.get_form(postgres_connector, current_config={})\n assert 'table' in form['properties']\n","sub_path":"tests/postgres/test_postgres.py","file_name":"test_postgres.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"486755420","text":"def build(bld):\n bld.headers = []\n bld.recurse('jsonconfig')\n\n bld.program(\n features='gtest',\n source='jsonconfig_test.cpp',\n target='jsonconfig_test',\n use='PFICOMMON jsonconfig'\n )\n headers = ['jsonconfig.hpp']\n bld.headers.extend(headers)\n\n bld.install_files('${PREFIX}/include',\n list(set(bld.headers)),\n relative_trick=True)\n","sub_path":"src/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"63765518","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"docker-cp\",\n version=\"0.1.0a1\",\n author=\"Juliano Luiz Fernandes\",\n author_email=\"julianofernandes@gmail.com\",\n description=\"Copy files from or to a Docker container\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/julianolf/docker-cp\",\n packages=setuptools.find_packages(exclude=[\"tests.*\", \"tests\"]),\n classifiers=[\n \"Programming Language :: Python :: 3 :: Only\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 2 - Pre-Alpha\",\n \"Environment :: Console\",\n ],\n keywords=\"docker cli copy\",\n install_requires=[\"docker>=4.0.2\", \"docopt>=0.6.2\", \"schema>=0.7.0\"],\n python_requires=\">=3.6\",\n entry_points={\"console_scripts\": [\"docker-cp=docker_cp.cli:main\"]},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"222601504","text":"n = int(input())\n\ns = [int(i) for i in input().split(' ')]\n\n\n\ndef main(s,l):\n for x in l:\n if x == -1:\n continue\n else:\n print(s.index(x)+1, end=\" \")\n i = s.index(x)\n\n s[i] = -1\n\nmain(s,sorted(s))\n","sub_path":"2021-10/2021.01.18_Lipetsk/H.py","file_name":"H.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"515167558","text":"import forca\r\nimport adivinhacao\r\n\r\ndef escolhe_jogo():\r\n print(\"**********************************\")\r\n print(\"*********Escolha se Jogo!*********\")\r\n print(\"**********************************\")\r\n\r\n print(\"1 - Forca, 2 - Adivinhação\")\r\n\r\n jogo = int(input(\"Qual Jogo? \"))\r\n\r\n if(jogo == 1):\r\n print(\"Jogando Forca\")\r\n forca.jogo_forca()\r\n elif(jogo == 2):\r\n print(\"Jogando Adivinhação\")\r\n adivinhacao.jogo_adivinhacao()\r\n\r\nif(__name__ == \"__main__\"):\r\n escolhe_jogo()\r\n\r\n","sub_path":"jogos.py","file_name":"jogos.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"502153576","text":"import os\nimport glob\nimport psycopg2\nimport pandas as pd\nfrom sql_queries import *\n\n\ndef process_song_file(cur, filepath):\n \n \"\"\"\n Description: This function is responsible for reading the json file, passing it in a Dataframe\n filtering the relavant fields and inserting them in songs & artists tables.\n\n Arguments:\n cur: the cursor object.\n filepath: song data file path.\n\n Returns:\n None\n \"\"\"\n \n \n # open song file\n df = pd.read_json(filepath,typ='series')\n\n # insert song record\n song_data = [df.values[6],df.values[7] ,df.values[1], df.values[9],df.values[8]]\n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n artist_data = [df.values[1],df.values[5],df.values[4],df.values[2],df.values[3]]\n cur.execute(artist_table_insert, artist_data)\n\n\ndef process_log_file(cur, filepath):\n \n \"\"\"\n Description: This function is responsible for reading the json file, passing it in a Dataframe\n filtering the relavant fields and inserting them in time & users tables.\n - filter on page = \"NextSong\"\n - convert timestamp to datetime\n \n For each row of the file, the function calls a SELECT function to match a song, artist, length\n with a record in songs & artists so to retrieve user_id & artist_id. Then calls a fucntion to\n insert values into songplays.\n Arguments:\n cur: the cursor object.\n filepath: log data file path.\n\n Returns:\n None\n \"\"\"\n \n \n # open log file\n df = pd.read_json(filepath, lines=True)\n\n # filter by NextSong action\n df = df[df.page == \"NextSong\"]\n\n # convert timestamp column to datetime\n t = pd.to_datetime(df['ts'],unit=\"ms\")\n \n # insert time data records\n time_data = [df['ts'],t.dt.hour,t.dt.day,t.dt.week,t.dt.month,t.dt.year,t.dt.weekday]\n column_labels = [\"timestamp\", \"hour\", \"day\",\"week\", \"month\", \"year\", \"weekday\"]\n time_dict = dict(zip(column_labels,time_data))\n time_df = pd.DataFrame.from_dict(time_dict)\n\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n # load user table\n user_df = df[[\"userId\",\"firstName\",\"lastName\",\"gender\",\"level\"]]\n\n # insert user records\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (row.ts,row.userId,row.level,songid,artistid,row.sessionId,row.location,row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)\n\n\ndef process_data(cur, conn, filepath, func):\n \"\"\"\n Description: This function is responsible for listing the files in a directory,\n and then executing the ingest process for each file according to the function\n that performs the transformation to save it to the database.\n\n Arguments:\n cur: the cursor object.\n conn: connection to the database.\n filepath: log data or song data file path.\n func: function that transforms the data and inserts it into the database.\n\n Returns:\n None\n \n \"\"\"\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))\n\n\ndef main():\n \"\"\"\n Description: This function is the main function of this file.\n It creates the connection to the DB and its cursor.\n Then calls the respective process_data functions to\n execute the complete etl pipeline.\n\n Arguments:\n None\n\n Returns:\n None\n \"\"\"\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"576211036","text":"\"\"\"myproject URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.urls import path\r\nfrom todo_list.views import index,addTodo,completeTodo,deleteCompleted,deleteAll\r\nurlpatterns = [\r\n path('',index,name='index'),\r\n path('add',addTodo,name='add'),\r\n path('complete/',completeTodo,name='completed'),\r\n path('deletecomplete',deleteCompleted,name='deletecomplete'),\r\n path('deleteall',deleteAll,name='deleteall')\r\n]\r\n","sub_path":"src/myproject/todo_list/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"14920","text":"import termios, sys, os\nimport serial\nimport time\nimport struct\n\nfrom UDP_SOCK import UDP\n\n# set up serial port\nserialPortString = '/dev/ttyACM0'\nser = serial.Serial(serialPortString, 9600)\nser.open()\nser.write(chr(0xa1))\nser.timeout = 0.1\n\nmovingForward = 1\nspeedValue = 75\n\nauto = False\nlastCode = 0x00\nlastTimeStamp = 0\n\n#socket for serial communication\ns = UDP(5000)\n\n#******************************************************************************\n# Functions for creating packets and retrieving data from packets\n#******************************************************************************\n\n#get a time stamp\ndef genTimeStamp():\n\treturn int(time.time())\n\n#gen a check sum for the msg\ndef genCheckSum(msg):\n\tchkSum = 0x00\n\n\tfor char in msg:\n\t\tchkSum ^= ord(char)\n\n\treturn chr(chkSum)\n\n#encode an int\ndef encodeInt(num):\n\treturn struct.pack(\"!i\", int(num))\n\n#create a move msg\ndef createMoveMsg(code):\n\tmsg = chr(code)+encodeInt(genTimeStamp())\n\t# append the checksum\n\tmsg += genCheckSum(msg)\n\n\treturn msg\n\n#get the timestamp from a move msg\ndef getTimeStamp(msg):\n\treturn struct.unpack(\"!i\", msg[1:5])[0]\n\n# verify the msg checksum\ndef verifyMsg(msg):\n\tchckSum = msg[len(msg)-1]\n\n\tif(genCheckSum(msg[0:len(msg)-1]) == chckSum):\n\t\treturn True\n\n\treturn False\n\n#create a msg containing data on what bump sensors have been hit\ndef createBumpMsg(snsrInfo):\n\tmsg = chr(0xF7) + chr(snsrInfo)\n\tmsg += genCheckSum(msg)\n\n\treturn msg\n\n#get the info on the current state of the bump sensors\ndef getBumpInfo(msg):\n\treturn msg[1:2]\n\n#create a packet that sends the current set speed of the left and right track\ndef createTrackSpeedMsg(lTrack, rTrack):\n\tmsg = chr(0xF8) + chr(lTrack) + chr(rTrack)\n\tmsg += genCheckSum(msg)\n\n\treturn msg\n\n#get the current set speed of the left and right tracks from the message\ndef getTrackSpeed(msg):\n\tspd = []\n\tspd[0] = ord(msg[1:2])\n\tspd[1] = ord(msg[2:3])\n\n\treturn spd\n\n#creare a msg to send back the rot of the arm mortars\n#NOTE: packet size now 6 instead of 4 bytes\ndef createArmRotMsg(code, armRot):\n\tmsg = chr(code) + encodeInt(armRot)\n\tmsg += genCheckSum(msg)\n\n\treturn msg\n\n#get the rot of the arm motors from\ndef getArmRot(msg):\n\treturn struct.unpack(\"!i\", msg[1:5])[0]\n\n# Park - Stops all motors\ndef park():\n\tser.write(chr(0xff) + chr(0x00) + chr(123)) #Left Drive?\n\tser.write(chr(0xff) + chr(0x01) + chr(123)) #Right Drive?\n\tser.write(chr(0xff) + chr(0x02) + chr(123)) #Left Actuator\n\tser.write(chr(0xff) + chr(0x03) + chr(123)) #Right Actuator\n\tser.write(chr(0xff) + chr(0x04) + chr(123)) #Bucket Actuator\n\tser.write(chr(0xff) + chr(0x05) + chr(123)) #Winch Drive\n\n# Move Forward\ndef func_moveForward():\n#\tpark()\n\tglobal speedValue\n\tglobal movingForward\n\tmovingForward = 1\n\tspeedValue = 77\n\tmove()\n\n# Rotate Left\ndef func_rotateLeft():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x00) + chr(200))\n\tser.write(chr(0xff) + chr(0x01) + chr(55))\n\n# Curve Left Forward\ndef func_curveLeftForward():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x00) + chr(75))\n\tser.write(chr(0xff) + chr(0x01) + chr(27))\n\n# Curve Left Reverse\ndef func_curveLeftReverse():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x00) + chr(154))\n\tser.write(chr(0xff) + chr(0x01) + chr(179))\n\n# Curve Right Reverse\ndef func_curveRightReverse():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x01) + chr(154))\n\tser.write(chr(0xff) + chr(0x00) + chr(179))\n\n# Reverse\ndef func_reverse():\n#\tpark()\n\tglobal speedValue\n\tglobal movingForward\n\tmovingForward = 0\n\tspeedValue = 177\n\tmove()\n\n# Rotate Right\ndef func_rotateRight():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x00) + chr(55))\n\tser.write(chr(0xff) + chr(0x01) + chr(200))\n\n# Curve Right Forward\ndef func_curveRightForward():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x00) + chr(75))\n\tser.write(chr(0xff) + chr(0x01) + chr(27))\n\n# Move (this is called by speedUp and slowDown)\ndef move():\n\tglobal speedValue\n\tser.write(chr(0xff) + chr(0x00) + chr(speedValue))\n\tser.write(chr(0xff) + chr(0x01) + chr(speedValue))\n\n# Slow Down\ndef func_slowDown():\n\tglobal speedValue\n\tif (movingForward == 1):\n\t\tif ( speedValue <= 50):\n\t\t\tspeedValue = speedValue + 25\n\telse:\n\t\tif (speedValue >= 204):\n\t\t\tspeedValue = speedValue - 25\n\n# Speed Up\ndef func_speedUp():\n\tglobal speedValue\n\tif (movingForward == 1):\n\t\tif (speedValue >= 25):\n\t\t\tspeedValue = speedValue - 25\n\telse:\n\t\tif (speedValue <= 229):\n\t\t\tspeedValue = speedValue + 25\n\n# Dig\n#def func_dig():\n#\tpark()\n#\tarms.write('e')\n#\tout = ''\n#\ttime.sleep(0.5)\n#\twhile (arms.inWaiting() > 0):\n#\t\tout += arms.read(1)\n#\t\t#print out\n#\tarms.write('e')\n#\twhile (arms.read() != 'p'):\n#\t\ttime.sleep(.01)\n\n# Take a dump\n#def func_dump():\n#\tpark()\n#\tarms.write('d')\n#\tout = ''\n#\ttime.sleep(0.5)\n#\twhile (arms.inWaiting() > 0):\n#\t\tout += arms.read(1)\n\t\t#print out\n#\tarms.write('d')\n#\twhile (arms.read() != 'p'):\n#\t\ttime.sleep(.01)\n\n#Bring main arms up\ndef func_mainArmsUp():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x02) + chr(254)) #Left Actuator\n\tser.write(chr(0xff) + chr(0x03) + chr(254)) #Right Actuator\n\n#Bring main arms down\ndef func_mainArmsDown():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x02) + chr(0)) #Left Actuator\n\tser.write(chr(0xff) + chr(0x03) + chr(0)) #Right Actuator\n\n#Left Actuator Up\ndef func_leftActuatorUp():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x02) + chr(254))\n\n#Left Actuator Down\ndef func_leftActuatorDown():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x02) + chr(0))\n\n#Right Actuator Up\ndef func_rightActuatorUp():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x03) + chr(254))\n\n#Right Actuator Down\ndef func_rightActuatorDown():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x03) + chr(0))\n\n#Scondary Actuator Up\ndef func_bucketActuatorUp():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x04) + chr(254))\n\n#Scondary Actuator down\ndef func_bucketActuatorDown():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x04) + chr(0))\n\n#forward winch\ndef func_forwardWinch():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x05) + chr(254))\n\n#reverse winch\ndef func_reverseWinch():\n#\tpark()\n\tser.write(chr(0xff) + chr(0x05) + chr(0))\n\n####################################################################################################\n#misc functions\n####################################################################################################\n\n\n####################################################################################################\n#Interpreter Function\n####################################################################################################\n\ndef intrp(addr, msg):\n\n# while len(msg) > 0:\n\tif(verifyMsg(msg)):\n\t\tif(ord(msg[0]) == 0x00):\n\t\t\t#Move forward\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_moveForward()\n\t\t\ttime.sleep(.1)\n\t\t\tpark()\n\t\t\tprint(\"Move Forward\")\n\n\t\t\t#clear the buffer of all codes with same time stamp\n# while len(msg) > 0 and lastTimeStamp >= getTimeStamp(msg) and lastCode == msg[0]:\n# for x in range(0, 5) and len(msg) > 0:\n\t\t\t\t\t#clear the packet\n# msg.pop[0]\n\n\t\telif(ord(msg[0]) == 0x01):\n\t\t\t#Move backward\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_reverse()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Move Backward\")\n\n\t\telif(ord(msg[0]) == 0x02):\n\t\t\t#Rot Left\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_rotateLeft()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Rot Left\")\n\n\t\telif(ord(msg[0]) == 0x03):\n\t\t\t#Rot Right\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_rotateRight()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Rot Right\")\n\n\t\telif(ord(msg[0]) == 0x04):\n\t\t\t#Curve Forward Left\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_curveLeftForward()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Forward Left\")\n\n\t\telif(ord(msg[0]) == 0x05):\n\t\t\t#Curve Backward Left\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_curveLeftReverse()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Backward Left\")\n\n\t\telif(ord(msg[0]) == 0x06):\n\t\t\t#Curve Forward Right\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_curveRightForward()\n\t\t\ttime.sleep(.1)\n\t\t\tpark()\n\t\t\tprint(\"Forward Right\")\n\n\t\telif(ord(msg[0]) == 0x07):\n\t\t\t#Curve Backward Right\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_curveRightReverse()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Backward Right\")\n\n\t\telif(ord(msg[0]) == 0x08):\n\t\t\t#Increase Speed\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_speedUp()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Increase Speed\")\n\n\t\telif(ord(msg[0]) == 0x09):\n\t\t\t#Decrease Speed\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_slowDown()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Decrease Speed\")\n\n\t\telif(ord(msg[0]) == 0x0A):\n\t\t\t#Dig\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n#\t\t\tfunc_dig()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n#\t\t\tprint(\"Dig\")\n\n\t\telif(ord(msg[0]) == 0x0B):\n\t\t\t#Dump\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n#\t\t\tfunc_dump()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n#\t\t\tprint(\"Dump\")\n\n\t\telif(ord(msg[0]) == 0x0C):\n\t\t\t#Stop Movement\n\t\t\tlastTimeStamp = 0 #reset the time stamp\n\t\t\tpark()\n#\t\t\tprint(\"Stop\")\n\n\t\telif(ord(msg[0]) == 0x0D):\n\t\t\t#Main Arm Up\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_mainArmsUp()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Main Arm Up\")\n\n\t\telif(ord(msg[0]) == 0x0E):\n\t\t\t#Main Arm Down\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_mainArmsDown()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Main Arm Down\")\n\n\t\telif(ord(msg[0]) == 0x0F):\n\t\t\t#Bucket Actuator Out\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_bucketActuatorUp()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Bucket Actuator Out\")\n\n\t\telif(ord(msg[0]) == 0x10):\n\t\t\t#Bucket Actuator In\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_bucketActuatorDown()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Bucket Actuator In\")\n\n\t\telif(ord(msg[0]) == 0x11):\n\t\t\t#L Actuator Up\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_leftActuatorUp()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"L Actuator Up\")\n\n\t\telif(ord(msg[0]) == 0x12):\n\t\t\t#R Actuator Up\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_rightActuatorUp()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"R Actuator Up\")\n\n\t\telif(ord(msg[0]) == 0x13):\n\t\t\t#L Actuator Down\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_leftActuatorDown()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"L Actuator Down\")\n\n\t\telif(ord(msg[0]) == 0x14):\n\t\t\t#R Actuator Down\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_rightActuatorDown()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"R Actuator Down\")\n\n\t\telif(ord(msg[0]) == 0x15):\n\t\t\t#Winch Forward\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_forwardWinch()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Winch Forward\")\n\n\t\telif(ord(msg[0]) == 0x16):\n\t\t\t#Winch Reverse\n\t\t\tlastCode = msg[0]\n\t\t\tlastTimeStamp = getTimeStamp(msg)\n\t\t\tfunc_reverseWinch()\n\t\t\ttime.sleep(.01)\n\t\t\tpark()\n\t\t\tprint(\"Winch Reverse\")\n\n\t\telif(ord(msg[0]) == 0xEF):\n\t\t\t#Resume\n\t\t\tpass\n\n\t\telif(ord(msg[0]) == 0xF0):\n\t\t\t#Start\n\t\t\tpass\n\n\t\telif(ord(msg[0]) == 0xF1):\n\t\t\t#Stop\n\t\t\tpass\n\n\t\telif(ord(msg[0]) == 0xF2):\n\t\t\t#Manual Control\n\t\t\tpass\n\t\t\tauto = False\n\t\t\t#send the confirm a few times to assure receiving\n\t\t\t#for x in range(0, 5):\n\t\t\t# s.send(0xF2,addr)\n\nif __name__ == '__main__':\n\ts.startReceive(intrp)\n\twhile 1:\n\t\ttime.sleep(5)\n\t\tpass\n","sub_path":"newVersion/RobotCtrlSvr.py","file_name":"RobotCtrlSvr.py","file_ext":"py","file_size_in_byte":11321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"418736288","text":"from django.shortcuts import render\r\nfrom .backend.status import Status\r\nfrom .backend.status2 import Status2\r\nfrom django.http import HttpResponse\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom .backend.backendInterface import Tree\r\nfrom .backend.backendInterface import Two\r\nimport json\r\n\r\nob = []\r\nob2 = []\r\n# Create your views here.\r\ndef home(request):\r\n return render(request, 'filesystem/home.html')\r\n\r\n@csrf_exempt\r\ndef tree(request, choice):\r\n global ob\r\n ob = Tree()\r\n Status.choice = choice\r\n Status.flag = 0\r\n Status.path = ['root']\r\n Status.filelis = []\r\n dictionary = {'filelis' : Status.filelis, 'path' : Status.path, 'flag' : Status.flag}\r\n return render(request, 'filesystem/tree.html', {\"dictionary\" : dictionary})\r\n\r\n@csrf_exempt\r\ndef single(request, choice):\r\n global ob\r\n ob = Tree()\r\n Status.choice = choice\r\n Status.flag = 0\r\n Status.path = ['root']\r\n Status.filelis = []\r\n dictionary = {'filelis' : Status.filelis, 'path' : Status.path, 'flag' : Status.flag}\r\n return render(request, 'filesystem/single.html', {\"dictionary\" : dictionary})\r\n\r\n@csrf_exempt\r\ndef two(request, choice):\r\n global ob2\r\n ob2 = Two()\r\n Status2.choice = choice\r\n Status2.flag = 0\r\n Status2.flag1 = 0\r\n Status2.flag2 = 0\r\n Status2.path = ['root']\r\n Status2.filelis = []\r\n dictionary = {'filelis' : Status2.filelis, 'path' : Status2.path, 'flag' : Status2.flag, 'flag1' : Status2.flag1, 'flag2' : Status2.flag2}\r\n return render(request, 'filesystem/two.html', {\"dictionary\" : dictionary})\r\n\r\n@csrf_exempt\r\ndef process(request):\r\n path = request.POST.getlist('path[]')\r\n command = request.POST.get('command')\r\n ob.passCmd(path, command)\r\n dictionary = {'filelis' : Status.filelis, 'path' : Status.path, 'flag' : Status.flag}\r\n return HttpResponse(json.dumps({\"dictionary\" : dictionary}), content_type=\"application/json\")\r\n\r\n@csrf_exempt\r\ndef process2(request):\r\n path = request.POST.getlist('path[]')\r\n command = request.POST.get('command')\r\n ob2.passCmd(path, command)\r\n dictionary = {'filelis' : Status2.filelis, 'path' : Status2.path, 'flag' : Status2.flag, 'flag1' : Status2.flag1, 'flag2' : Status2.flag2}\r\n return HttpResponse(json.dumps({\"dictionary\" : dictionary}), content_type=\"application/json\")\r\n","sub_path":"ossim/filesystem/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"14706622","text":"from Framework.utility.Constants import get_XPATH, get_projectLogger\nfrom Framework.utility.SeleniumWebScraper import SWS, Attr\n\n\n# Project constants\nlogger = get_projectLogger()\nXPATH = get_XPATH()\n\n\ndef multi_villages_status(sws: SWS):\n \"\"\"\n Checks if the user has multiple villages.\n\n Parameters:\n - sws (SWS): Selenium Web Scraper.\n\n Returns:\n - True if the account has multiple villages, False otherwise.\n \"\"\"\n return sws.isVisible(XPATH.ALL_VILLAGES_LINKS)\n\n\ndef get_all_villages_name(sws: SWS):\n \"\"\"\n Gets the name of all villages:\n\n Parameters:\n - sws (SWS): Selenium Web Scraper.\n\n Returns:\n - [str] if operation is successful, None otherwise.\n \"\"\"\n ret = None\n if multi_villages_status(sws):\n villageNames = sws.getElementsAttribute(XPATH.ALL_VILLAGES_LINKS, Attr.TEXT)\n if villageNames:\n ret = villageNames\n else:\n logger.error('In get_all_villages_name: Failed to extract villages name')\n else:\n logger.warning('In get_all_villages_name: multi_villages_status() failed')\n return ret\n\n\ndef get_current_village(sws: SWS):\n \"\"\"\n Gets the currently selected village.\n\n Parameters:\n - sws (SWS): Selenium Web Scraper.\n\n Returns:\n - str if operation was successful, None otherwise.\n \"\"\"\n ret = None\n if multi_villages_status(sws):\n selectedVillage = sws.getElementAttribute(XPATH.SELECTED_VILLAGE, Attr.TEXT)\n if selectedVillage:\n ret = selectedVillage\n else:\n logger.error('In get_current_village: Failed to get selected village')\n else:\n logger.warning('In get_current_village: multi_villages_status() failed')\n return ret\n\n\ndef select_village(sws: SWS, villageName: str):\n \"\"\"\n Attempts to select a village.\n\n Parameters:\n - sws (SWS): Selenium Web Scraper.\n - villageName (str): Desired village.\n\n Returns:\n - True if operation was successful, None otherwise.\n \"\"\"\n ret = False\n if multi_villages_status(sws):\n if villageName in get_all_villages_name(sws):\n if sws.clickElement(XPATH.SELECT_VILLAGE % villageName, refresh=True):\n if get_current_village(sws) == villageName:\n ret = True\n else:\n logger.error(f'In select_village: Operation failed {villageName} was not selected')\n else:\n logger.error(f'In select_village: Failed to click on village option')\n else:\n logger.warning(f'In select_village: {villageName} not in villages')\n else:\n logger.warning('In select_village: multi_villages_status() failed')\n return ret\n\n\ndef village_send_goods(sws: SWS, villageName: str, ammount: list):\n \"\"\"\n Sends goods to desired village.\n\n Parameters:\n - sws (SWS): Selenium Web Scraper.\n - villageName (str): Desired village.\n - ammount (list): Contains 4 integers denoting how much resources to send.\n\n Returns:\n - True if operation was successful, None otherwise.\n \"\"\"\n return True\n\n\ndef village_send_troops(sws: SWS, villageName: str):\n \"\"\"\n Will send goods to desired village.\n\n Parameters:\n - sws (SWS): Selenium Web Scraper.\n - villageName (str): Desired village.\n - ammount (list): Contains 4 integers denoting how much resources to send.\n\n Returns:\n - True if operation was successful, None otherwise.\n \"\"\"\n return True\n","sub_path":"Framework/screen/MultiVillage.py","file_name":"MultiVillage.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"403810167","text":"def primes1(n):\n \"\"\"\n This method is taken from a StackOverflow question about calculation of primes performance.\n Once I saw how the sieve of Eratosthenes worked in O(n) time I was hoping to find something\n a litte more performant, so I put this method in here as a comparison. This method is from\n http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n/3035188#3035188\n It turns out that this implementation is not Python3 compatible and runs a good 2-3 times faster\n than my sieve function\n \"\"\"\n\n \"\"\" Returns a list of primes < n \"\"\"\n sieve = [True] * (n/2)\n for i in xrange(3, int(n**0.5)+1, 2):\n if sieve[i / 2]:\n sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1)\n return [2] + [2*i+1 for i in xrange(1, n/2) if sieve[i]]\n\n\ndef sieve_of_eratosthenes(n):\n \"\"\"\n sieve of eratosthenes\n\n I created this algorithm from the description in Wikipeadia http://en.wikipedia.org/wiki/Sieve_of_Eratosthenes\n\n given a number N find all primes less than N\n We can do this by setting up a list of numbers up to N, then starting with the number 2, eliminate the multiples of\n that value. The next value to check for multiples is the next value of the list that hasn't been eliminated\n \"\"\"\n import math\n\n np1 = n + 1\n p = [True] * np1\n\n # according to the Wikipeadia article on the sieve of Eratosthenes we can optimize by only going to SQRT(N)\n # we essentially truncate the floating point value then add 1\n max_value = int(math.sqrt(n)) + 1\n\n # using slices since I just learned how they work to mark the first two positions (0, 1) as not prime (FALSE)\n p[:2] = [False] * 2\n\n # this is the cool part. Starting at 2, mark all items in positions that are multiple as not prime\n # what's cool about is the combination of the optimization of starting with the square of the value (2*2, 3*3, 5*5)\n # combined with slicing using a step size - it makes for a really compact expression\n for i in range(2, max_value):\n if p[i]:\n p[i*i:np1:i] = [False] * len(range(i*i, np1, i))\n\n # now we can iterate the list and print the position value of those marked as prime (True)\n # for i in range(2, np1):\n # if p[i]:\n # print(i)\n\n return [i for i in range(2, np1) if p[i]]\n\n # So how can I build a tree in Python? I'm going to try to do this with tuples, where the items in the tuple\n # are either a factor or another tuple. That would make the leaves of the tree just the factor, but it doesn't\n # seem to be very efficient tree because we have to walk the entire tree looking for the leaves.\n\n\n# This is my recursion function to walk the tuple tree and harvest the factors\ndef get_factors(factor_tree):\n factor_list = []\n for eachItem in factor_tree:\n if type(eachItem) is tuple:\n factor_list.extend(get_factors(eachItem))\n else:\n factor_list.extend([eachItem])\n return factor_list\n\n\ndef factor(n, p):\n \"\"\"\n Create the tuple tree - I think there's a recursive way to do this as well\n given a number n, first check to see if it is in the list of primes\n if it is not prime, check to see if it is divisible by any of the primes using modulo operator\n if n is found to be evenly divisible, return that prime and the factor as a tuple\n if no prime is found, return n\n \"\"\"\n if n not in p:\n for prime in p:\n if n % prime == 0:\n return prime, factor(int(n/prime), p)\n\n return n\n\n# ---------------------------------------------------------------------------------------\n#\n# Prime factorization\n#\n# ---------------------------------------------------------------------------------------\n\n# This is the number we want to factor\nnumber = 1234567890\n\nprint(\"Calculating the sieve of Eratosthenes\")\n# primes = sieveOfEratosthenes(number)\nprimes = primes1(number)\n\nprint(\"factoring...\")\n# This is the factor tree of our number\nfTree = factor(number, primes)\nprint (fTree)\n\nprint(\"Harvesting...\")\n# Now traverse the tree to harvest the factors\nfactors = get_factors(fTree)\n\nprint (factors)\n\nuniqueFactors = set()\n# Count the number of unique prime factors\nfor factor in factors:\n uniqueFactors.add(factor)\n\n\nprint (\"Unique factors {}\".format(len(uniqueFactors)))\n\n","sub_path":"sieve.py","file_name":"sieve.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"221048247","text":"#looping through numerical lists\nmultiples=[]\nfor value in range(1,31):\n value=value*3\n multiples.append(value)\nprint(multiples)\n\n\n #4-4 one to one million\nvalues = list(range(1,1000001)) # i donno whats the problem\nfor i in values:\n i=int(i)\nprint(max(i))\nprint(min(i))\nprint(sum(i))","sub_path":"python_basics/multiples_of_three.py","file_name":"multiples_of_three.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"175742208","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\nimport os\n\n\nAUTHOR = u'Rodrigo Guzman'\nSITENAME = u''\nSITEURL = ''\n\nPATH = 'content'\n\nTIMEZONE = 'US/Eastern'\n\nDFAULT_LANG = u'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n\n# Blogroll\n# LINKS = (('Pelican', 'http://getpelican.com/'),\n# ('Python.org', 'http://python.org/'),\n# ('Jinja2', 'http://jinja.pocoo.org/'),\n# ('You can modify those links in your config file', '#'),)\n\n# Social widget\n# SOCIAL = (('You can add links in your config file', '#'),\n# ('Another social link', '#'),)\nDEFAULT_PAGINATION = False\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\nDEFAULT_DATE = 'fs'\nDISPLAY_PAGES_ON_MENU = False\nDISPLAY_CATEGORIES_ON_MENU = False\n\n# theme and theme-specific settings\nTHEME = 'scratchpad-theme'\nCSS_OVERRIDE = 'css/overrides.css'\nCOLOR_SCHEME_CSS = 'github_jekyll.css'\nHEADER_COLOR = 'black'\nHEADER_COVER = ''\n\nARTICLE_PATHS = ['articles']\nPAGE_PATHS = ['pages']\nSTATIC_PATHS = ['images', 'css']\n\nPAGE_URL = '{slug}/'\nPAGE_SAVE_AS = '{slug}/index.html'\n\n\n# override the footer\nFOOTER_INCLUDE = 'custom_footer.html'\nIGNORE_FILES = [FOOTER_INCLUDE]\nEXTRA_TEMPLATES_PATHS = [os.path.join(os.path.dirname(__file__), 'templates')]\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"183264017","text":"# Import libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\nfrom datetime import timedelta\r\nimport seaborn as sn\r\nfrom IPython.display import display\r\nimport statsmodels.api as sm\r\nfrom statsmodels.formula.api import ols\r\n\r\n'''-> to display all shrinked columns'''\r\npd.set_option('display.max_columns', 30)\r\ndf = pd.read_excel(\"InternetData.xlsx\")\r\n#display(df.head())\r\n'''Understandig the data'''\r\n# describe the structure of data\r\ndf.info()\r\n\r\n#display the summary or descriptive statistics of the data\r\nprint(df.describe().transpose())\r\n #2: Do the unique page views depends on the visits?\r\n\r\nmod_uniq_pg = ols('Uniquepageviews ~ Visits',\r\n data=df).fit()\r\n \r\naov_table = sm.stats.anova_lm(mod_uniq_pg, typ=2)\r\ndisplay(pd.DataFrame(aov_table))\r\n#3: Exit page analysis\r\n\r\nmod_exits = ols('Exits ~ Timeinpage+Continent+Sourcegroup+Bounces+Uniquepageviews+Visits',\r\n data=df).fit()\r\n \r\naov_table1 = sm.stats.anova_lm(mod_exits, typ=2)\r\ndisplay(pd.DataFrame(aov_table1))\r\n\r\n#4: Time on page depends on?\r\n\r\nmod_time = ols('Timeinpage ~ Exits+Continent+Sourcegroup+Bounces+Uniquepageviews+Visits',\r\n data=df).fit()\r\n \r\naov_table2 = sm.stats.anova_lm(mod_time, typ=2)\r\n#display(pd.DataFrame(aov_table2))\r\n\r\n#5: Bounce rate\r\n\r\n'''-> Fitting a Linear Regression model '''\r\n##X = df['Insured'].values.reshape(-1,1)\r\n##y = df['Payment'].values.reshape(-1,1)\r\n##reg = LinearRegression()\r\n##reg.fit(X, y)\r\n''' varience inflation factor'''\r\n##vif1 = pd.DataFrame()\r\n##vif1[\"VIF Factor\"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]\r\n##vif1[\"features\"] = X.columns\r\n##vif1.round(1)\r\n\r\n\r\ny = df['Bounces']\r\nX = np.column_stack((df['Visits'],df['Timeinpage'], df['Exits'],df['Uniquepageviews']))\r\nX2 = sm.add_constant(X)\r\n\r\ndef regression(y,X2):\r\n est = sm.OLS(y, X2)\r\n est2 = est.fit()\r\n display(est2.summary())\r\n #df.plot()\r\n #plt.show()\r\n\r\nregression(y,X2)\r\n\r\n","sub_path":"internet.py","file_name":"internet.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"197381037","text":"## Toggle an LED when the GUI button is pressed ##\n##https://core-electronics.com.au/tutorials/raspberry-pi-workshop-for-beginners.html#ch4\n##https://medium.com/@pranav.keyboard/installing-opencv-for-python-on-windows-using-anaconda-or-winpython-f24dd5c895eb\n##https://learn.sparkfun.com/tutorials/how-to-run-a-raspberry-pi-program-on-startup/all\nfrom tkinter import *\nimport tkinter.font\n\n# from gpiozero import LED\n# import RPi.GPIO\n# RPi.GPIO.setmode(RPi.GPIO.BCM)\n#\n# ### HARDWARE DEFINITIONS ###\n# led=LED(14)\n\n### GUI DEFINITIONS ###\nwin = Tk()\nwin.title(\"LED Toggler\")\nmyFont = tkinter.font.Font(family = 'Helvetica', size = 12, weight = \"bold\")\nglobal led\nled = False\n\n\n### Event Functions ###\ndef ledToggle():\n if led:\n print(\"LED OFF\")\n led = False\n\n else:\n print(\"LED ON\")\n led = True\n\n # if led.is_lit:\n # led.off()\n # ledButton[\"text\"]=\"Turn LED on\" # Change only the button text property\n # else:\n # led.on()\n # ledButton[\"text\"]=\"Turn LED off\"\n\ndef close():\n #RPi.GPIO.cleanup()\n print(\"Window closed\")\n win.destroy()\n\n\n\n### WIDGETS ###\n\n# Button, triggers the connected command when it is pressed\nledButton = Button(win, text='Turn LED on', font=myFont, command=ledToggle, bg='bisque2', height=1, width=24)\nledButton.grid(row=0,column=1)\n\nexitButton = Button(win, text='Exit', font=myFont, command=close, bg='red', height=1, width=6)\nexitButton.grid(row=2, column=1)\n\nwin.protocol(\"WM_DELETE_WINDOW\", close) # cleanup GPIO when user closes window\n\nwin.mainloop() # Loops forever\n","sub_path":"closingWIn.py","file_name":"closingWIn.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"151270951","text":"from django.shortcuts import render,redirect\nfrom django.http import JsonResponse\nfrom sh_user import user_decorate\nfrom cart.models import *\nfrom sh_goods.models import *\nimport redis\nfrom sh_user import user_decorate\nfrom django.core.cache import cache\nfrom django_redis import get_redis_connection\n\n@user_decorate.login\ndef cart(request):\n good_list = []\n count_list = []\n uid=request.session['user_id']\n conn=get_redis_connection('default')\n #从数据库中取出当前用户的购物车\n cart_key='cart_%d'%uid\n goodsid=conn.hkeys(cart_key)\n #转字符\n for i in range(len(goodsid)):\n goodsid[i]=goodsid[i].decode()\n for goodid in goodsid:\n good_list.append(GoodsInfo.objects.get(id=int(goodid)))\n count_list.append(conn.hget(cart_key,goodid).decode())\n ziped=list(zip(good_list,count_list))\n dic=dict((a,b) for a,b in ziped)\n context={\n 'title':'购物车',\n 'page_name':1,\n 'good_list':good_list,\n 'count_list':count_list,\n 'dic':dic\n }\n return render(request,'sh_cart/cart.html',context)\n@user_decorate.login\ndef edit(request,gid,count):\n try:\n good_list = []\n count_list = []\n uid = request.session['user_id']\n conn = get_redis_connection('default')\n # 从数据库中取出当前用户的购物车\n cart_key = 'cart_%d' % uid\n goodsid = conn.hkeys(cart_key)\n # 转字符\n for i in range(len(goodsid)):\n goodsid[i] = goodsid[i].decode()\n for goodid in goodsid:\n good_list.append(GoodsInfo.objects.get(id=int(goodid)))\n count_list.append(conn.hget(cart_key, goodid).decode())\n ziped = list(zip(good_list, count_list))\n dic = dict((a, b) for a, b in ziped)\n for key, value in dic.items():\n if key.gstorage < int(value):\n return JsonResponse({'ok': 1, 'message': '该商品库存不足'})\n #修改对应缓存的数量\n count1=conn.hget(cart_key,gid).decode()\n conn.hset(cart_key,gid,count)\n data={'ok':0}\n except Exception as e:\n data={'ok': count1,'e':e}\n return JsonResponse(data)\n@user_decorate.login\ndef delete(request,gid):\n uid = request.session['user_id']\n conn = get_redis_connection('default')\n # 从数据库中取出当前用户的购物车\n cart_key = 'cart_%d' % uid\n #删除对应缓存\n conn.hdel(cart_key,gid)\n cart_count = conn.hlen(cart_key)\n if request.is_ajax():\n return JsonResponse({'count':cart_count,'ok':1})\n else:\n return redirect('/cart')\n@user_decorate.login\ndef add(request,gid,count):\n gid=int(gid)\n count=int(count)\n conn=get_redis_connection('default')\n uid = request.session['user_id']\n cart_key='cart_%d'%uid\n cart_count=conn.hget(cart_key,gid)\n if cart_count:\n count+=int(cart_count)\n conn.hset(cart_key,gid,count)\n cart_count=conn.hlen(cart_key)\n if request.is_ajax():\n return JsonResponse({'count':cart_count})\n else:\n return redirect('/cart')\n# Create your views here.\n","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"223854314","text":"##\n## Imprima los valores unicos e la columna _c4 de \n## de la tabla tbl1 en mayusculas\n## \nimport pandas as pd\nimport numpy as np\ntbl1 = pd.read_csv(\"tbl1.tsv\",\n\tsep = '\\t', \n\tthousands = None, \n\tdecimal = '.')\nx=pd.unique(tbl1['_c4'].str.upper()).tolist()\nprint(sorted(x))\n","sub_path":"q04.py","file_name":"q04.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"293070273","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 2 20:57:07 2019\r\n\r\n@author: mayur.v\r\n\"\"\"\r\n\r\nimport nltk\r\nimport re\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom nltk.tokenize import sent_tokenize\r\nfrom nltk.chunk import conlltags2tree, tree2conlltags\r\nfrom nltk import word_tokenize, pos_tag, ne_chunk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tree import Tree\r\nfrom nltk import everygrams\r\nfrom fuzzywuzzy import fuzz\r\nfrom fuzzywuzzy import process \r\nimport sys\r\n\r\n## Read Input data files\r\ndf = pd.read_csv(sys.argv[1], delimiter = ',',encoding = 'latin1',error_bad_lines=False)\r\n#df = pd.read_csv(\"C:/Users/mayur.v/Desktop/BHGE/Issue Trending/Issue Trending/EBS_PGT25.csv\",delimiter = ',',encoding = 'latin1')\r\n\r\ndf1 = pd.read_csv(sys.argv[2], delimiter = ',',encoding = 'latin1',error_bad_lines=False)\r\n#df1 = pd.read_csv(\"C:/Users/mayur.v/Desktop/BHGE/Issue Trending/Issue Trending/Instrument List.csv\",delimiter = ',',encoding = 'latin1')\r\n\r\ndf2 = pd.read_csv(sys.argv[3], delimiter = ',',encoding = 'latin1',error_bad_lines=False)\r\n#df2 = pd.read_csv(\"C:/Users/mayur.v/Desktop/BHGE/Issue Trending/Issue Trending/Equipment List.csv\",delimiter = ',',encoding = 'latin1')\r\n\r\n\r\n## Unique list function\r\n\r\ndef unique(list1): \r\n \r\n # intilize a null list \r\n unique_list = [] \r\n \r\n # traverse for all elements \r\n for x in list1: \r\n # check if exists in unique_list or not \r\n if x not in unique_list: \r\n unique_list.append(x) \r\n # print list \r\n for x in unique_list: \r\n return(x)\r\n\r\n##### Processing on EBS Input Data file to extract dump of unique keywords\r\n \r\niob_tagged = []\r\nfor i in range(0,len(df['PMHD_TA004_SYS_T_DES'])):\r\n #print(i)\r\n ne_tree = ne_chunk(pos_tag(word_tokenize(df['PMHD_TA004_SYS_T_DES'][i])))\r\n iob_tagged.append(tree2conlltags(ne_tree))\r\n \r\n\r\ns1 = []\r\nfor i in range(0,len(iob_tagged)):\r\n s1.append([i[0] for i in iob_tagged[i]])\r\n \r\ns2 = []\r\nfor i in range(0,len(iob_tagged)):\r\n s2.append([i[1] for i in iob_tagged[i]])\r\n \r\ns3 = []\r\nfor i in range(0,len(iob_tagged)):\r\n s3.append([i[2] for i in iob_tagged[i]])\r\n\r\ns01 = []\r\nfor i in range(0,len(s1)):\r\n s01.append(set(s1[i]))\r\n \r\ns11 = []\r\nfor i in range(0,len(s2)):\r\n s11.append(set(s2[i]))\r\n\r\ns22 = []\r\nfor i in range(0,len(s3)):\r\n s22.append(set(s3[i]))\r\n\r\ns_l01 = unique(s01)\r\ns_l1 = unique(s11)\r\ns_l2 = unique(s22)\r\n\r\n\r\nfin_pos = []\r\nfor i in range(0,len(iob_tagged)): \r\n #print(i)\r\n if len(iob_tagged[i])==0:\r\n fin_pos.append('')\r\n else:\r\n fin_pos.append(list(zip(*iob_tagged[i]))[0])\r\n \r\nunique_data = [list(x) for x in set(tuple(x) for x in fin_pos)]\r\n \r\nsa = ' '.join(str(r) for v in unique_data for r in v)\r\nsa = set(sa.split(' '))\r\n\r\n#### Processing on EBS Input Data file to extract dump of unique keywords\r\n\r\niob_tagged1 = []\r\nfor i in range(0,len(df['PMHD_TA005_GRP_T_DES'])):\r\n #print(i)\r\n ne_tree = ne_chunk(pos_tag(word_tokenize(df['PMHD_TA005_GRP_T_DES'][i])))\r\n iob_tagged1.append(tree2conlltags(ne_tree))\r\n \r\n\r\nr1 = []\r\nfor i in range(0,len(iob_tagged1)):\r\n r1.append([i[0] for i in iob_tagged1[i]])\r\n \r\nr2 = []\r\nfor i in range(0,len(iob_tagged1)):\r\n r2.append([i[1] for i in iob_tagged1[i]])\r\n \r\nr3 = []\r\nfor i in range(0,len(iob_tagged1)):\r\n r3.append([i[2] for i in iob_tagged1[i]])\r\n\r\nr01 = []\r\nfor i in range(0,len(r1)):\r\n r01.append(set(r1[i]))\r\n \r\nr11 = []\r\nfor i in range(0,len(r2)):\r\n r11.append(set(r2[i]))\r\n\r\nr22 = []\r\nfor i in range(0,len(r3)):\r\n r22.append(set(r3[i]))\r\n\r\nr_l01 = unique(r01) \r\nr_l1 = unique(r11)\r\nr_l2 = unique(r22)\r\n\r\nfin_pos1 = []\r\nfor i in range(0,len(iob_tagged1)): \r\n #print(i)\r\n if len(iob_tagged1[i])==0:\r\n fin_pos1.append('')\r\n else:\r\n fin_pos1.append(list(zip(*iob_tagged1[i]))[0])\r\n \r\n\r\nunique_data1 = [list(x) for x in set(tuple(x) for x in fin_pos1)]\r\n\r\nsa1 = ' '.join(str(r) for v in unique_data1 for r in v)\r\nsa1 = set(sa1.split(' ')) \r\n\r\n##### Processing on Instrument list Input Data file to extract dump of unique keywords\r\n\r\niob_tagged2 = []\r\nfor i in range(0,len(df1['System code'])):\r\n #print(i)\r\n ne_tree = ne_chunk(pos_tag(word_tokenize(df1['System code'][i])))\r\n iob_tagged2.append(tree2conlltags(ne_tree))\r\n\r\n \r\nt1 = []\r\nfor i in range(0,len(iob_tagged2)):\r\n t1.append([i[0] for i in iob_tagged2[i]])\r\n \r\nt2 = []\r\nfor i in range(0,len(iob_tagged2)):\r\n t2.append([i[1] for i in iob_tagged2[i]])\r\n \r\nt3 = []\r\nfor i in range(0,len(iob_tagged2)):\r\n t3.append([i[2] for i in iob_tagged2[i]])\r\n \r\nt01 = []\r\nfor i in range(0,len(t1)):\r\n t01.append(set(t1[i]))\r\n \r\nt11 = []\r\nfor i in range(0,len(t2)):\r\n t11.append(set(t2[i]))\r\n\r\nt22 = []\r\nfor i in range(0,len(t3)):\r\n t22.append(set(t3[i]))\r\n\r\nt_l01 = unique(t01) \r\nt_l1 = unique(t11)\r\nt_l2 = unique(t22)\r\n\r\nfin_pos2 = []\r\nfor i in range(0,len(iob_tagged2)): \r\n #print(i)\r\n if len(iob_tagged2[i])==0:\r\n fin_pos2.append('')\r\n else:\r\n fin_pos2.append(list(zip(*iob_tagged2[i]))[0])\r\n \r\nunique_data2 = [list(x) for x in set(tuple(x) for x in fin_pos2)]\r\n\r\nsa2 = ' '.join(str(r) for v in unique_data2 for r in v)\r\nsa2 = set(sa2.split(' ')) \r\n\r\n#### Processing on Instrument list Input Data file to extract dump of unique keywords\r\n\r\niob_tagged3 = []\r\nfor i in range(0,len(df1['Type desc.'])):\r\n #print(i)\r\n ne_tree = ne_chunk(pos_tag(word_tokenize(df1['Type desc.'][i])))\r\n iob_tagged3.append(tree2conlltags(ne_tree))\r\n\r\n \r\nu1 = []\r\nfor i in range(0,len(iob_tagged3)):\r\n u1.append([i[0] for i in iob_tagged3[i]])\r\n \r\nu2 = []\r\nfor i in range(0,len(iob_tagged3)):\r\n u2.append([i[1] for i in iob_tagged3[i]])\r\n \r\nu3 = []\r\nfor i in range(0,len(iob_tagged3)):\r\n u3.append([i[2] for i in iob_tagged3[i]])\r\n \r\nu01 = []\r\nfor i in range(0,len(u1)):\r\n u01.append(set(u1[i]))\r\n \r\nu11 = []\r\nfor i in range(0,len(u2)):\r\n u11.append(set(u2[i]))\r\n\r\nu22 = []\r\nfor i in range(0,len(u3)):\r\n u22.append(set(u3[i]))\r\n\r\nu_l01 = unique(u01) \r\nu_l1 = unique(u11)\r\nu_l2 = unique(u22)\r\n\r\nfin_pos3 = []\r\nfor i in range(0,len(iob_tagged3)): \r\n #print(i)\r\n if len(iob_tagged3[i])==0:\r\n fin_pos3.append('')\r\n else:\r\n fin_pos3.append(list(zip(*iob_tagged3[i]))[0])\r\n \r\nunique_data3 = [list(x) for x in set(tuple(x) for x in fin_pos3)]\r\n\r\nsa3 = ' '.join(str(r) for v in unique_data3 for r in v)\r\nsa3 = set(sa3.split(' ')) \r\n \r\n#### Processing on Instrument list Input Data file to extract dump of unique keywords\r\n\r\niob_tagged4 = []\r\nfor i in range(0,len(df1['Service'])):\r\n #print(i)\r\n ne_tree = ne_chunk(pos_tag(word_tokenize(df1['Service'][i])))\r\n iob_tagged4.append(tree2conlltags(ne_tree))\r\n\r\n \r\nv1 = []\r\nfor i in range(0,len(iob_tagged4)):\r\n v1.append([i[0] for i in iob_tagged4[i]])\r\n \r\nv2 = []\r\nfor i in range(0,len(iob_tagged4)):\r\n v2.append([i[1] for i in iob_tagged4[i]])\r\n \r\nv3 = []\r\nfor i in range(0,len(iob_tagged4)):\r\n v3.append([i[2] for i in iob_tagged4[i]])\r\n \r\nv01 = []\r\nfor i in range(0,len(v1)):\r\n v01.append(set(v1[i]))\r\n \r\nv11 = []\r\nfor i in range(0,len(v2)):\r\n v11.append(set(v2[i]))\r\n\r\nv22 = []\r\nfor i in range(0,len(v3)):\r\n v22.append(set(v3[i]))\r\n\r\nv_l01 = unique(v01) \r\nv_l1 = unique(v11)\r\nv_l2 = unique(v22)\r\n\r\nfin_pos4 = []\r\nfor i in range(0,len(iob_tagged4)): \r\n #print(i)\r\n if len(iob_tagged4[i])==0:\r\n fin_pos4.append('')\r\n else:\r\n fin_pos4.append(list(zip(*iob_tagged4[i]))[0])\r\n \r\nunique_data4 = [list(x) for x in set(tuple(x) for x in fin_pos4)]\r\n\r\nsa4 = ' '.join(str(r) for v in unique_data4 for r in v)\r\nsa4 = set(sa4.split(' ')) \r\n \r\n#### Processing on Equipment list Input Data file to extract dump of unique keywords\r\n\r\n\r\ndf2['Description'] = df2['Description'].replace(np.nan, 'NA', regex=True)\r\niob_tagged5 = []\r\nfor i in range(0,len(df2['Description'])):\r\n #print(i)\r\n ne_tree = ne_chunk(pos_tag(word_tokenize(df2['Description'][i])))\r\n iob_tagged5.append(tree2conlltags(ne_tree))\r\n\r\n \r\na1 = []\r\nfor i in range(0,len(iob_tagged5)):\r\n a1.append([i[0] for i in iob_tagged5[i]])\r\n \r\na2 = []\r\nfor i in range(0,len(iob_tagged5)):\r\n a2.append([i[1] for i in iob_tagged5[i]])\r\n \r\na3 = []\r\nfor i in range(0,len(iob_tagged5)):\r\n a3.append([i[2] for i in iob_tagged5[i]])\r\n \r\na01 = []\r\nfor i in range(0,len(a1)):\r\n a01.append(set(a1[i]))\r\n \r\na11 = []\r\nfor i in range(0,len(a2)):\r\n a11.append(set(a2[i]))\r\n\r\na22 = []\r\nfor i in range(0,len(a3)):\r\n a22.append(set(a3[i]))\r\n\r\na_l01 = unique(a01) \r\na_l1 = unique(a11)\r\na_l2 = unique(a22)\r\n\r\nfin_pos5 = []\r\nfor i in range(0,len(iob_tagged5)): \r\n #print(i)\r\n if len(iob_tagged5[i])==0:\r\n fin_pos5.append('')\r\n else:\r\n fin_pos5.append(list(zip(*iob_tagged5[i]))[0])\r\n\r\nunique_data5 = [list(x) for x in set(tuple(x) for x in fin_pos5)]\r\n\r\nsa5 = ' '.join(str(r) for v in unique_data5 for r in v)\r\nsa5 = set(sa5.split(' ')) \r\n\r\n#### Processing on Eqipment list Input Data file to extract dump of unique keywords\r\n\r\ndf2['Type'] = df2['Type'].replace(np.nan, 'NA', regex=True)\r\niob_tagged6 = []\r\nfor i in range(0,len(df2['Type'])):\r\n #print(i)\r\n ne_tree = ne_chunk(pos_tag(word_tokenize(df2['Type'][i])))\r\n iob_tagged6.append(tree2conlltags(ne_tree))\r\n\r\n \r\nb1 = []\r\nfor i in range(0,len(iob_tagged6)):\r\n b1.append([i[0] for i in iob_tagged6[i]])\r\n \r\nb2 = []\r\nfor i in range(0,len(iob_tagged6)):\r\n b2.append([i[1] for i in iob_tagged6[i]])\r\n \r\nb3 = []\r\nfor i in range(0,len(iob_tagged6)):\r\n b3.append([i[2] for i in iob_tagged6[i]])\r\n \r\nb01 = []\r\nfor i in range(0,len(b1)):\r\n b01.append(set(b1[i]))\r\n \r\nb11 = []\r\nfor i in range(0,len(b2)):\r\n b11.append(set(b2[i]))\r\n\r\nb22 = []\r\nfor i in range(0,len(b3)):\r\n b22.append(set(b3[i]))\r\n\r\nb_l01 = unique(b01) \r\nb_l1 = unique(b11)\r\nb_l2 = unique(b22)\r\n\r\n\r\nfin_pos6 = []\r\nfor i in range(0,len(iob_tagged6)): \r\n #print(i)\r\n if len(iob_tagged6[i])==0:\r\n fin_pos6.append('')\r\n else:\r\n fin_pos6.append(list(zip(*iob_tagged6[i]))[0])\r\n\r\nunique_data6 = [list(x) for x in set(tuple(x) for x in fin_pos6)]\r\n\r\nsa6 = ' '.join(str(r) for v in unique_data6 for r in v)\r\nsa6 = set(sa6.split(' ')) \r\n\r\n#### Processing on Eqipment list Input Data file to extract dump of unique keywords\r\n\r\ndf2['Sys. Name'] = df2['Sys. Name'].replace(np.nan, 'NA', regex=True)\r\niob_tagged7 = []\r\nfor i in range(0,len(df2['Sys. Name'])):\r\n #print(i)\r\n ne_tree = ne_chunk(pos_tag(word_tokenize(df2['Sys. Name'][i])))\r\n iob_tagged7.append(tree2conlltags(ne_tree))\r\n\r\n \r\nc1 = []\r\nfor i in range(0,len(iob_tagged7)):\r\n c1.append([i[0] for i in iob_tagged7[i]])\r\n \r\nc2 = []\r\nfor i in range(0,len(iob_tagged7)):\r\n c2.append([i[1] for i in iob_tagged7[i]])\r\n \r\nc3 = []\r\nfor i in range(0,len(iob_tagged7)):\r\n c3.append([i[2] for i in iob_tagged7[i]])\r\n \r\nc01 = []\r\nfor i in range(0,len(c1)):\r\n c01.append(set(c1[i]))\r\n \r\nc11 = []\r\nfor i in range(0,len(c2)):\r\n c11.append(set(c2[i]))\r\n\r\nc22 = []\r\nfor i in range(0,len(c3)):\r\n c22.append(set(c3[i]))\r\n\r\nc_l01 = unique(c01) \r\nc_l1 = unique(c11)\r\nc_l2 = unique(c22)\r\n\r\nfin_pos7 = []\r\nfor i in range(0,len(iob_tagged7)): \r\n #print(i)\r\n if len(iob_tagged7[i])==0:\r\n fin_pos7.append('')\r\n else:\r\n fin_pos7.append(list(zip(*iob_tagged7[i]))[0])\r\n\r\nunique_data7 = [list(x) for x in set(tuple(x) for x in fin_pos7)]\r\n\r\nsa7 = ' '.join(str(r) for v in unique_data7 for r in v)\r\nsa7 = set(sa7.split(' ')) \r\nsa7 = tuple(sa7)\r\n\r\n### keywords extracted from from all input files.\r\n\r\nfrom itertools import chain\r\nkey_dump = set(chain(sa,sa1,sa2,sa3,sa4,sa5,sa6,sa7))\r\nkey_dump = tuple(key_dump)\r\nkey_dump = [w.lower() for w in key_dump]\r\n\r\n##### Processing on BHGE main Input Data file to extract Named Entities\r\n\r\n#df_fin = pd.read_csv(\"C:/Users/mayur.v/Desktop/BHGE/BHGE_Dataset.csv\",delimiter = ',',encoding = 'latin1')\r\ndf_fin = pd.read_csv(sys.argv[4],delimiter = ',',encoding = 'latin1')\r\n\r\n\r\ndef pre_process(text):\r\n \r\n #remove tags\r\n text=re.sub(\"(\\\\d|\\\\W)+\",\" \",text)\r\n \r\n #text = re.sub(r'\\b[A-Z]+\\b', '', text)\r\n\r\n text = ' '.join( [w for w in text.split() if len(w)>2] )\r\n # remove special characters and digits\r\n \r\n return text\r\n \r\ndf_fin['Problem Description'] = df_fin['Problem Description'].apply(lambda x:pre_process(x))\r\n\r\nlist_pos = []\r\nfor i in range(0,len(df_fin['Problem Description'])):\r\n #print(i)\r\n ne_tree = ne_chunk(pos_tag(word_tokenize(df_fin['Problem Description'][i])))\r\n list_pos.append(tree2conlltags(ne_tree))\r\n\r\n\r\npos_1 = []\r\nfor i in range(0,len(list_pos)):\r\n #print(i)\r\n pos_1.append(list(filter(lambda x: (x[1] == 'NNP' or x[1] == 'VBG' or x[1] == 'NNS') and (x[2] == 'O' or x[2] == 'I-PERSON' or x[2] == 'B-PERSON'), list_pos[i])))\r\n\r\n \r\npos = []\r\nfor i in range(0,len(list_pos)):\r\n #print(i)\r\n pos.append(list(filter(lambda x: (x[1] == 'NNP' or x[1] == 'NN' or x[1] == 'VBP' or x[1] == 'CC' or x[1] == 'NNS' or x[1] == 'VBG' or x[1] == 'VBP' or x[1] == 'DT' or x[1] == 'JJ' or x[1] == 'IN') and\r\n (x[2] == 'O' or x[2] == 'B-ORGANIZATION' or x[2] == 'B-GPE' or x[2] == 'B-GSP' or x[2] == 'B-GPE' or x[2] == 'I-PERSON' or x[2] == 'B-PERSON'),\r\n list_pos[i])))\r\n \r\nfin_pos_df = []\r\nfor i in range(0,len(pos)):\r\n #print(i)\r\n if len(pos[i])==0:\r\n fin_pos_df.append('')\r\n else:\r\n fin_pos_df.append(list(zip(*pos[i]))[0])\r\n \r\n\r\nds = [[x.lower() for x in element] for element in fin_pos_df]\r\n\r\nfin_key_pos = []\r\nfor i in range(0,len(ds)):\r\n #print(i)\r\n fin_key_pos.append(list(set(ds[i])&set(key_dump)))\r\n\r\n\r\nfin_key_pos_f1 = []\r\nfor i in range(0,len(ds)):\r\n #print(i)\r\n fin_key_pos_f1.append(list(set(fin_key_pos[i]+ds[i])))\r\n\r\n\r\nFin_Named_Entity_list_11 = [] \r\nfor i in range(0,len(fin_key_pos_f1)):\r\n #print(i)\r\n Fin_Named_Entity_list_11.append(' '.join(fin_key_pos_f1[i]))\r\n \r\n# Remove Stopword \r\nstop_words = set(stopwords.words('english')) \r\n\r\nword_tok = [] \r\nfor i in range(0,len(Fin_Named_Entity_list_11)):\r\n #print(i)\r\n word_tok.append(word_tokenize(Fin_Named_Entity_list_11[i]))\r\n \r\n# Domain specific stopwords\r\nstop = ['and','number','all','look','note','like','attache','consist','once','suitable','enclosure','some','request','kindly','kind','please','are','eps','from','with',\r\n 'uld','used','additional','iecx','upon','able','oct','order','pgt','details','bhge','while','both','ptg','furthermore','current','gtg','fuond','allowing','tomorrow','observations','iecex','doesnt','does','cpy','list','attach','reach','withouts','fact','expected',\r\n 'or','the','for','have','way','due','therefore','true','matching','between','equal','valid','validity','arrangement','proven','sep','aplng','further',\r\n 'cant','running','form','types','site','found','other','attempts','several','despite','believe','above','into','hence','kindly','this','name','sac','dear',\r\n 'providing','need','pre','that','after','but','provide','ready','days','per','reply','From','Considering','considering','allow',\r\n 'approval','confirm','otherwise','around','differences','among','view','information','customer','total','desired','against','according','present','your','house','anything','following','requriment','taken',\r\n 'april','about','regards','actual','regarding','today','year','inhibit','similar','different','updated','first','attachment','next','some',\r\n 'need','during','mkvie','purpose','putting', 'bring','thanks','such','various','able','reference','readings','permited','using','call','could','required','neither','mismatch','comparing','require','every','projects','documents','possible','particular','section','thing','improve','because','top','items','item','detail','since','any','advise','want','info','email','som','below','further','see','new','use','page','same','each','attached','refer','check','without','pictures','feedback',\r\n 'ourselves', 'hers', 'between', 'yourself','followings', 'but', 'again', 'there', 'about', 'once', 'during','understand',\r\n 'out', 'very', 'having', 'with', 'they', 'own', 'an', 'be', 'some', 'for', 'do', 'its', 'yours',\r\n 'such', 'into', 'of', 'most', 'itself', 'other', 'off', 'is', 's', 'am', 'or', 'who', 'as', 'from',\r\n 'him', 'each', 'the', 'themselves', 'until', 'below', 'are', 'we', 'these', 'your', 'his', 'through',\r\n 'don', 'nor', 'me', 'were', 'her', 'more', 'himself', 'this', 'down', 'should', 'our', 'their',\r\n 'while', 'above', 'both', 'up', 'to', 'ours', 'had', 'she', 'all', 'no', 'when', 'at', 'any', \r\n 'before', 'them', 'same', 'and', 'been', 'have', 'in', 'will', 'on', 'does', 'yourselves', 'then',\r\n 'that', 'because', 'what', 'over', 'why', 'so', 'can', 'did', 'not', 'now', 'under', 'he', 'you',\r\n 'herself', 'has', 'just', 'where', 'too', 'only', 'myself', 'which', 'those', 'i', 'after', 'few',\r\n 'whom', 't', 'being', 'if', 'theirs', 'my', 'against', 'a', 'by', 'doing', 'it', 'how', 'further',\r\n 'was', 'here', 'than']\r\n\r\n\r\nner_sentence = []\r\nfor i in range(0,len(word_tok)): \r\n ner_sentence.append([i for i in word_tok[i] if i not in stop])\r\n\r\nner_sentence1 = [] \r\nfor i in range(0,len(ner_sentence)):\r\n #print(i)\r\n ner_sentence1.append(' '.join(ner_sentence[i]))\r\n\r\n \r\nDF_NER = pd.DataFrame(ner_sentence1)\r\ndf_fin['Named_Entity'] = DF_NER\r\n\r\n##### Processing on BHGE main Input Data file to extract dump of unique keywords\r\n\r\n#df_fin = pd.read_csv(\"C:/Users/mayur.v/Desktop/BHGE/BHGE_Dataset.csv\",delimiter = ',',encoding = 'latin1')\r\n#df_fin = pd.read_csv(\"C:/Users/mayur.v/Desktop/BHGE/BHGE_Dataset.csv\",delimiter = ',',encoding = 'latin1')\r\n\r\ndf_fin['Expected Deliverable and proposed solution'] = df_fin['Expected Deliverable and proposed solution'].replace(np.nan, 'NA', regex=True)\r\n\r\ndf_fin['Expected Deliverable and proposed solution'] = df_fin['Expected Deliverable and proposed solution'].apply(lambda x:pre_process(x))\r\n\r\n\r\n### extarct named entities\r\n\r\nlist_pos_e1 = []\r\nfor i in range(0,len(df_fin['Expected Deliverable and proposed solution'])):\r\n #print(i)\r\n ne_tree = ne_chunk(pos_tag(word_tokenize(df_fin['Expected Deliverable and proposed solution'][i])))\r\n list_pos_e1.append(tree2conlltags(ne_tree))\r\n\r\n\r\npos_e1 = []\r\nfor i in range(0,len(list_pos_e1)):\r\n #print(i)\r\n pos_e1.append(list(filter(lambda x: (x[1] == 'NNP' or x[1] == 'NN' or x[1] == 'VBP' or x[1] == 'CC' or x[1] == 'NNS' or x[1] == 'VBG' or x[1] == 'VBP' or x[1] == 'DT' or x[1] == 'JJ' or x[1] == 'IN') and\r\n (x[2] == 'O' or x[2] == 'B-ORGANIZATION' or x[2] == 'B-GPE' or x[2] == 'B-GSP' or x[2] == 'B-GPE' or x[2] == 'I-PERSON' or x[2] == 'B-PERSON'),\r\n list_pos_e1[i])))\r\n\r\n \r\nfin_pos_df_e1 = []\r\nfor i in range(0,len(pos_e1)):\r\n #print(i)\r\n if len(pos_e1[i])==0:\r\n fin_pos_df_e1.append('')\r\n else:\r\n fin_pos_df_e1.append(list(zip(*pos_e1[i]))[0])\r\n \r\n\r\nds = [[x.lower() for x in element] for element in fin_pos_df_e1]\r\n\r\nfin_key_pos = []\r\nfor i in range(0,len(ds)):\r\n #print(i)\r\n fin_key_pos.append(list(set(ds[i])&set(key_dump)))\r\n\r\n\r\nfin_key_pos_ee1 = []\r\nfor i in range(0,len(ds)):\r\n #print(i)\r\n #fin_key_pos_f1.append(list(set(fin_key_pos[i]+ds[1])))\r\n fin_key_pos_ee1.append(list(set(fin_key_pos[i]+ds[i])))\r\n\r\nFin_Named_Entity_list_e1 = [] \r\nfor i in range(0,len(fin_key_pos_ee1)):\r\n #print(i)\r\n Fin_Named_Entity_list_e1.append(' '.join(fin_key_pos_ee1[i]))\r\n \r\nword_tok_e1 = [] \r\nfor i in range(0,len(Fin_Named_Entity_list_e1)):\r\n #print(i)\r\n word_tok_e1.append(word_tokenize(Fin_Named_Entity_list_e1[i]))\r\n \r\n\r\nner_sentence_e1 = []\r\nfor i in range(0,len(word_tok_e1)): \r\n ner_sentence_e1.append([i for i in word_tok_e1[i] if i not in stop])\r\n\r\n \r\nner_sentence1_e1 = [] \r\nfor i in range(0,len(ner_sentence_e1)):\r\n #print(i)\r\n ner_sentence1_e1.append(','.join(ner_sentence_e1[i]))\r\n\r\n \r\nDF_NER1 = pd.DataFrame(ner_sentence1_e1)\r\ndf_fin['Named_Entity_Expected Deliverable_and proposed solution'] = DF_NER1\r\n#df_fin.to_csv('C:/Users/mayur.v/Desktop/BHGE/Named_Entity_Model_New_v3.2.csv')\r\n\r\n## Maaping of extracted entities with EBS list\r\n#sys.argv[0]\r\n#df3 = pd.read_csv(\"C:/Users/mayur.v/Desktop/BHGE/Issue Trending/Issue Trending/EBS_PGT25.csv\",delimiter = ',',encoding = 'latin1')\r\n\r\n## pre-processing function\r\ndef pre_process1(text):\r\n \r\n #remove tags\r\n text=re.sub(\"(\\\\d|\\\\W)+\",\" \",text)\r\n \r\n text = re.sub(r'(|)', '', text)\r\n\r\n #text = ' '.join( [w for w in text.split() if len(w)>2] )\r\n\r\n return text\r\n \r\ndf['PMHD_TA005_GRP_T_DES'] = df['PMHD_TA005_GRP_T_DES'].apply(lambda x:pre_process(x))\r\n\r\n \r\ncompo_list = []\r\nfor i in range(0,len(df['PMHD_TA005_GRP_T_DES'])):\r\n #print(i)\r\n compo_list.append(df['PMHD_TA005_GRP_T_DES'][i])\r\n \r\ncompo_list = [x.lower() for x in compo_list]\r\n\r\nmapped_entities = [] \r\nfor i in range(0,len(df_fin['Named_Entity'])):\r\n #print(i)\r\n mapped_entities.append(set(process.extract(df_fin['Named_Entity'][i], compo_list, limit=5)))\r\n\r\nmapped_entities_list = list(mapped_entities)\r\n\r\nextracted_ner_entities = []\r\nfor i in range(0,len(mapped_entities_list)):\r\n #print(i)\r\n extracted_ner_entities.append(''.join(str(mapped_entities_list[i])))\r\n\r\n \r\nextracted_ner = pd.DataFrame(extracted_ner_entities)\r\ndf_fin['Named_Entity_EBS_Mapping'] = extracted_ner\r\ndf_fin.to_csv(sys.argv[4])\r\n","sub_path":"fin_ner.py","file_name":"fin_ner.py","file_ext":"py","file_size_in_byte":21813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"538276001","text":"\"\"\" Run the program with a GUI. \"\"\"\r\n\r\nimport docx_to_xlsx\r\nimport os\r\nimport subprocess\r\nimport re\r\nimport time\r\nimport tkinter as tk\r\nfrom tkinter import filedialog, Frame, BOTH, Button, RIGHT, RAISED,\\\r\n LEFT\r\n\r\n\r\nclass TrendProg(Frame):\r\n\r\n def __init__(self, parent):\r\n Frame.__init__(self, parent, background='white')\r\n # saved reference to parent widget. \"Tk root window\"\r\n self.parent = parent\r\n self._workbook = None\r\n self._file_path = None\r\n self._folder_path = None\r\n\r\n # set properties of buttons\r\n self.frame_1 = Frame(self, relief=RAISED)\r\n self.run_button = Button(self, text='Run', width=10,\r\n command=self.run_program)\r\n self.file_button = Button(self.frame_1, text='Select File',\r\n width=15, command=self.get_file)\r\n self.folder_button = Button(self.frame_1, text='Select Folder',\r\n width=15, command=self.get_folder)\r\n self.close_button = Button(self, text='Close', width=10,\r\n command=self.quit)\r\n self.init_gui()\r\n\r\n def init_gui(self):\r\n self.parent.title('Trending Analysis')\r\n # fill frame to take up whole of root window\r\n self.pack(fill=BOTH, expand=True)\r\n self.frame_1.pack(fill=BOTH, expand=True)\r\n\r\n # put buttons on GUI\r\n self.folder_button.pack(side=RIGHT, padx=5)\r\n self.file_button.pack(side=LEFT, padx=5, pady=5)\r\n self.close_button.pack(side=RIGHT, padx=5, pady=5)\r\n self.run_button.pack(side=RIGHT, pady=5)\r\n\r\n def get_file(self):\r\n self._file_path = filedialog.askopenfilename()\r\n if self._file_path != '':\r\n self.file_button.config(text='File Selected!')\r\n self.file_button.pack(fill=BOTH, expand=True, padx=5, pady=5)\r\n self.folder_button.destroy()\r\n\r\n def get_folder(self):\r\n self._folder_path = filedialog.askdirectory()\r\n if self._folder_path != '':\r\n self.folder_button.config(text='Folder Selected!')\r\n self.folder_button.pack(fill=BOTH, expand=True, padx=5, pady=5)\r\n self.file_button.destroy()\r\n\r\n def run_program(self):\r\n workbook = 'Draft_Detail_Findings.xlsx'\r\n worksheet = 'Template'\r\n # user selected one CAPA\r\n print('=' * 75)\r\n if self._folder_path == '' or self._folder_path is None:\r\n self._file_path = self.convert_to_docx(self._file_path)\r\n docx_to_xlsx.main(self._file_path, workbook, worksheet)\r\n print('=' * 75)\r\n # user selected a folder of CAPA's\r\n elif self._file_path == '' or self._file_path is None:\r\n for f in os.listdir(self._folder_path):\r\n # get full path name\r\n file_name = str(self._folder_path + '/' + f)\r\n file_name = self.convert_to_docx(file_name)\r\n docx_to_xlsx.main(file_name, workbook, worksheet)\r\n print('=' * 75)\r\n\r\n # get ready to end the program\r\n # pd = project_data.TrendData(workbook, worksheet)\r\n print('Done.')\r\n self.frame_1.destroy()\r\n self.run_button.destroy()\r\n self.close_button.config(text='Done.')\r\n self.close_button.pack(fill=BOTH, expand=True, padx=10, pady=10)\r\n\r\n @classmethod\r\n def convert_to_docx(cls, file_selected):\r\n \"\"\" Check that file(s) selected is .docx NOT .doc and convert if needed. \"\"\"\r\n if str(file_selected).endswith('.docx'):\r\n return file_selected\r\n else:\r\n new_file_name = re.sub('.doc', '.docx', file_selected)\r\n # full path to wordconv.exe\r\n word_conv = r'C:\\Program Files (x86)\\Microsoft Office\\Office12\\wordconv.exe'\r\n commands = ['wordconv.exe', '-oice', '-nme', file_selected, new_file_name]\r\n try:\r\n print('CONVERTING {}'.format(file_selected))\r\n subprocess.Popen(commands, executable=word_conv)\r\n # wait for converted file to be created\r\n while not os.path.exists(new_file_name):\r\n time.sleep(1.5)\r\n print('REMOVING old .doc file ...')\r\n os.remove(file_selected)\r\n return new_file_name\r\n except OSError:\r\n print('FAILED to convert file. Check to see if it exists.')\r\n\r\n\r\ndef main():\r\n \"\"\" Run the gui and program. \"\"\"\r\n root = tk.Tk()\r\n root.geometry(\"250x100+300+300\")\r\n TrendProg(root)\r\n root.mainloop()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"trend.py","file_name":"trend.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"501370466","text":"# Code from https://github.com/rusty1s/pytorch_geometric\nimport torch\nfrom torch import Tensor\nfrom copy import copy\nimport numpy as np\nimport random\nfrom typing import Optional, Union, Tuple\nfrom torch_sparse_reimpl import coalesce\n\n\ndef to_undirected(edge_index: Tensor, edge_attr: Optional[Tensor] = None,\n num_nodes: Optional[int] = None,\n reduce: str = \"add\") -> Union[Tensor, Tuple[Tensor, Tensor]]:\n r\"\"\"Converts the graph given by :attr:`edge_index` to an undirected graph,\n so that :math:`(j,i) \\in \\mathcal{E}` for every edge :math:`(i,j) \\in\n \\mathcal{E}`.\n Args:\n edge_index (LongTensor): The edge indices.\n edge_attr (Tensor, optional): Edge weights or multi-dimensional\n edge features. (default: :obj:`None`)\n num_nodes (int, optional): The number of nodes, *i.e.*\n :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)\n reduce (string, optional): The reduce operation to use for merging edge\n features. (default: :obj:`\"add\"`)\n :rtype: (:class:`LongTensor`, :class:`Tensor`) if :obj:`edge_attr` is not\n :obj:`None`, otherwise :class:`LongTensor`\n \"\"\"\n # Maintain backward compatibility to `to_undirected(edge_index, num_nodes)`\n if isinstance(edge_attr, int):\n edge_attr = None\n num_nodes = edge_attr\n\n num_nodes = maybe_num_nodes(edge_index, num_nodes)\n\n row, col = edge_index\n row, col = torch.cat([row, col], dim=0), torch.cat([col, row], dim=0)\n edge_index = torch.stack([row, col], dim=0)\n if edge_attr is not None:\n edge_attr = torch.cat([edge_attr, edge_attr], dim=0)\n edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes,\n num_nodes, reduce)\n\n if edge_attr is None:\n return edge_index\n else:\n return edge_index, edge_attr\n\n\ndef sample(high: int, size: int, device=None):\n size = min(high, size)\n return torch.tensor(random.sample(range(high), size), device=device)\n\n\n@torch.jit._overload\ndef maybe_num_nodes(edge_index, num_nodes=None):\n # type: (Tensor, Optional[int]) -> int\n pass\n\n\n@torch.jit._overload\ndef maybe_num_nodes(edge_index, num_nodes=None):\n # type: (SparseTensor, Optional[int]) -> int\n pass\n\n\ndef maybe_num_nodes(edge_index, num_nodes=None):\n if num_nodes is not None:\n return num_nodes\n elif isinstance(edge_index, Tensor):\n return int(edge_index.max()) + 1\n else:\n return max(edge_index.size(0), edge_index.size(1))\n\n\ndef maybe_num_nodes_dict(edge_index_dict, num_nodes_dict=None):\n num_nodes_dict = {} if num_nodes_dict is None else copy(num_nodes_dict)\n\n found_types = list(num_nodes_dict.keys())\n\n for keys, edge_index in edge_index_dict.items():\n\n key = keys[0]\n if key not in found_types:\n N = int(edge_index[0].max() + 1)\n num_nodes_dict[key] = max(N, num_nodes_dict.get(key, N))\n\n key = keys[-1]\n if key not in found_types:\n N = int(edge_index[1].max() + 1)\n num_nodes_dict[key] = max(N, num_nodes_dict.get(key, N))\n\n return num_nodes_dict\n\n\ndef negative_sampling(edge_index, num_nodes=None, num_neg_samples=None,\n method=\"sparse\", force_undirected=False):\n r\"\"\"Samples random negative edges of a graph given by :attr:`edge_index`.\n\n Args:\n edge_index (LongTensor): The edge indices.\n num_nodes (int, optional): The number of nodes, *i.e.*\n :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)\n num_neg_samples (int, optional): The (approximate) number of negative\n samples to return. If set to :obj:`None`, will try to return a\n negative edge for every positive edge. (default: :obj:`None`)\n method (string, optional): The method to use for negative sampling,\n *i.e.*, :obj:`\"sparse\"` or :obj:`\"dense\"`.\n This is a memory/runtime trade-off.\n :obj:`\"sparse\"` will work on any graph of any size, while\n :obj:`\"dense\"` can perform faster true-negative checks.\n (default: :obj:`\"sparse\"`)\n force_undirected (bool, optional): If set to :obj:`True`, sampled\n negative edges will be undirected. (default: :obj:`False`)\n\n :rtype: LongTensor\n \"\"\"\n\n num_nodes = maybe_num_nodes(edge_index, num_nodes)\n num_neg_samples = num_neg_samples or edge_index.size(1)\n\n # Handle '|V|^2 - |E| < |E|'.\n size = num_nodes * num_nodes\n num_neg_samples = min(num_neg_samples, size - edge_index.size(1))\n\n row, col = edge_index\n\n if force_undirected:\n num_neg_samples = num_neg_samples // 2\n\n # Upper triangle indices: N + ... + 1 = N (N + 1) / 2\n size = (num_nodes * (num_nodes + 1)) // 2\n\n # Remove edges in the lower triangle matrix.\n mask = row <= col\n row, col = row[mask], col[mask]\n\n # idx = N * i + j - i * (i+1) / 2\n idx = row * num_nodes + col - row * (row + 1) // 2\n else:\n idx = row * num_nodes + col\n\n # Percentage of edges to oversample so that we are save to only sample once\n # (in most cases).\n alpha = abs(1 / (1 - 1.1 * (edge_index.size(1) / size)))\n\n if method == 'dense':\n mask = edge_index.new_ones(size, dtype=torch.bool)\n mask[idx] = False\n mask = mask.view(-1)\n\n perm = sample(size, int(alpha * num_neg_samples),\n device=edge_index.device)\n perm = perm[mask[perm]][:num_neg_samples]\n\n else:\n perm = sample(size, int(alpha * num_neg_samples))\n mask = torch.from_numpy(np.isin(perm, idx.to('cpu'))).to(torch.bool)\n perm = perm[~mask][:num_neg_samples].to(edge_index.device)\n\n if force_undirected:\n # (-sqrt((2 * N + 1)^2 - 8 * perm) + 2 * N + 1) / 2\n row = torch.floor((-torch.sqrt((2. * num_nodes + 1.)**2 - 8. * perm) +\n 2 * num_nodes + 1) / 2)\n col = perm - row * (2 * num_nodes - row - 1) // 2\n neg_edge_index = torch.stack([row, col], dim=0).long()\n neg_edge_index = to_undirected(neg_edge_index)\n else:\n row = perm // num_nodes\n col = perm % num_nodes\n neg_edge_index = torch.stack([row, col], dim=0).long()\n\n return neg_edge_index\n\n\ndef add_self_loops(edge_index, edge_weight: Optional[torch.Tensor] = None,\n fill_value: float = 1., num_nodes: Optional[int] = None):\n r\"\"\"Adds a self-loop :math:`(i,i) \\in \\mathcal{E}` to every node\n :math:`i \\in \\mathcal{V}` in the graph given by :attr:`edge_index`.\n In case the graph is weighted, self-loops will be added with edge weights\n denoted by :obj:`fill_value`.\n\n Args:\n edge_index (LongTensor): The edge indices.\n edge_weight (Tensor, optional): One-dimensional edge weights.\n (default: :obj:`None`)\n fill_value (float, optional): If :obj:`edge_weight` is not :obj:`None`,\n will add self-loops with edge weights of :obj:`fill_value` to the\n graph. (default: :obj:`1.`)\n num_nodes (int, optional): The number of nodes, *i.e.*\n :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)\n\n :rtype: (:class:`LongTensor`, :class:`Tensor`)\n \"\"\"\n N = maybe_num_nodes(edge_index, num_nodes)\n\n loop_index = torch.arange(0, N, dtype=torch.long, device=edge_index.device)\n loop_index = loop_index.unsqueeze(0).repeat(2, 1)\n\n if edge_weight is not None:\n assert edge_weight.numel() == edge_index.size(1)\n loop_weight = edge_weight.new_full((N, ), fill_value)\n edge_weight = torch.cat([edge_weight, loop_weight], dim=0)\n\n edge_index = torch.cat([edge_index, loop_index], dim=1)\n\n return edge_index, edge_weight\n\n\ndef remove_self_loops(edge_index, edge_attr: Optional[torch.Tensor] = None):\n r\"\"\"Removes every self-loop in the graph given by :attr:`edge_index`, so\n that :math:`(i,i) \\not\\in \\mathcal{E}` for every :math:`i \\in \\mathcal{V}`.\n\n Args:\n edge_index (LongTensor): The edge indices.\n edge_attr (Tensor, optional): Edge weights or multi-dimensional\n edge features. (default: :obj:`None`)\n\n :rtype: (:class:`LongTensor`, :class:`Tensor`)\n \"\"\"\n mask = edge_index[0] != edge_index[1]\n edge_index = edge_index[:, mask]\n if edge_attr is None:\n return edge_index, None\n else:\n return edge_index, edge_attr[mask]\n\nclass InnerProductDecoder(torch.nn.Module):\n def forward(self, z, edge_index, sigmoid=True):\n r\"\"\"Decodes the latent variables :obj:`z` into edge probabilities for\n the given node-pairs :obj:`edge_index`.\n\n Args:\n z (Tensor): The latent space :math:`\\mathbf{Z}`.\n sigmoid (bool, optional): If set to :obj:`False`, does not apply\n the logistic sigmoid function to the output.\n (default: :obj:`True`)\n \"\"\"\n value = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=1)\n return torch.sigmoid(value) if sigmoid else value\n\n def forward_all(self, z, sigmoid=True):\n r\"\"\"Decodes the latent variables :obj:`z` into a probabilistic dense\n adjacency matrix.\n\n Args:\n z (Tensor): The latent space :math:`\\mathbf{Z}`.\n sigmoid (bool, optional): If set to :obj:`False`, does not apply\n the logistic sigmoid function to the output.\n (default: :obj:`True`)\n \"\"\"\n adj = torch.matmul(z, z.t())\n return torch.sigmoid(adj) if sigmoid else adj\n\n\nclass GAE(torch.nn.Module):\n r\"\"\"The Graph Auto-Encoder model from the\n `\"Variational Graph Auto-Encoders\" `_\n paper based on user-defined encoder and decoder models.\n\n Args:\n encoder (Module): The encoder module.\n decoder (Module, optional): The decoder module. If set to :obj:`None`,\n will default to the\n :class:`torch_geometric.nn.models.InnerProductDecoder`.\n (default: :obj:`None`)\n \"\"\"\n def __init__(self, encoder, decoder=None):\n super(GAE, self).__init__()\n self.encoder = encoder\n self.decoder = InnerProductDecoder() if decoder is None else decoder\n GAE.reset_parameters(self)\n\n\n def encode(self, *args, **kwargs):\n r\"\"\"Runs the encoder and computes node-wise latent variables.\"\"\"\n return self.encoder(*args, **kwargs)\n\n\n def decode(self, *args, **kwargs):\n r\"\"\"Runs the decoder and computes edge probabilities.\"\"\"\n return self.decoder(*args, **kwargs)\n\n\n def recon_loss(self, z, pos_edge_index, neg_edge_index=None):\n r\"\"\"Given latent variables :obj:`z`, computes the binary cross\n entropy loss for positive edges :obj:`pos_edge_index` and negative\n sampled edges.\n\n Args:\n z (Tensor): The latent space :math:`\\mathbf{Z}`.\n pos_edge_index (LongTensor): The positive edges to train against.\n neg_edge_index (LongTensor, optional): The negative edges to train\n against. If not given, uses negative sampling to calculate\n negative edges. (default: :obj:`None`)\n \"\"\"\n pos_loss = -torch.log(\n self.decoder(z, pos_edge_index, sigmoid=True) + 1e-15).mean()\n\n # Do not include self-loops in negative samples\n pos_edge_index, _ = remove_self_loops(pos_edge_index)\n pos_edge_index, _ = add_self_loops(pos_edge_index)\n if neg_edge_index is None:\n neg_edge_index = negative_sampling(pos_edge_index, z.size(0))\n neg_loss = -torch.log(1 -\n self.decoder(z, neg_edge_index, sigmoid=True) +\n 1e-15).mean()\n\n return pos_loss + neg_loss\n\n","sub_path":"deeply/torch_reimpl.py","file_name":"torch_reimpl.py","file_ext":"py","file_size_in_byte":11784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"218134741","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nimport csv\n\ndef winner(d):\n wait = WebDriverWait(d, timeout)\n playButton = wait.until(ec.visibility_of_element_located((By.XPATH, \"//*[contains(text(), 'Play (music off)')]\")))\n playButton.click()\n goTo = wait.until(ec.visibility_of_element_located((By.XPATH, \"//*[contains(text(), ' Go to turn...')]\")))\n goTo.click()\n alert = d.switch_to_alert()\n alert.send_keys('10000')\n alert.accept()\n try: \n battleHistory = d.find_elements_by_css_selector(\"div.battle-history\")\n winner = battleHistory[-1].find_element_by_tag_name(\"strong\").text\n player = d.find_element_by_css_selector(\"a.subtle\").text\n if player == winner: \n return 1\n else: \n return 0\n except (NoSuchElementException, TimeoutException) as exception:\n return 1\n\ndef teams(d):\n wait = WebDriverWait(d, timeout)\n wait.until(ec.visibility_of_element_located((By.CSS_SELECTOR, \"div.battle-history\")))\n t = d.find_elements_by_css_selector(\"div.battle-history\")\n playerTeam = t[0].find_element_by_tag_name(\"em\").text\n oppTeam = t[1].find_element_by_tag_name(\"em\").text\n return (playerTeam, oppTeam)\n\nopts = Options()\ndriver = webdriver.Chrome(options=opts)\ndriver.get('https://replay.pokemonshowdown.com')\ntimeout = 10\nwait = WebDriverWait(driver, timeout)\ngenInput = wait.until(ec.visibility_of_element_located((By.NAME, \"format\")))\ngenInput.send_keys(\"gen7ou\")\ngenSearch = wait.until(ec.visibility_of_element_located((By.XPATH, \"//*[contains(text(), 'Search by format')]\")))\ngenSearch.click()\n\ngamesList = wait.until(ec.visibility_of_element_located((By.CSS_SELECTOR, \"ul + ul.linklist\")))\n_wait = WebDriverWait(gamesList, timeout)\n_wait.until(ec.visibility_of_element_located((By.TAG_NAME, \"li\")))\ngames = gamesList.find_elements_by_tag_name(\"li\")\n\nwhile len(games) < 1000: \n more = wait.until(ec.visibility_of_element_located((By.XPATH, \"//*[contains(text(), 'More')]\")))\n more.click()\n gamesList = wait.until(ec.visibility_of_element_located((By.CSS_SELECTOR, \"ul + ul.linklist\")))\n _wait = WebDriverWait(gamesList, timeout)\n _wait.until(ec.visibility_of_element_located((By.TAG_NAME, \"li\")))\n games = gamesList.find_elements_by_tag_name(\"li\")\n\ngameURLS = list(map(lambda x: x.find_element_by_tag_name(\"a\").get_attribute('href'), games))\n\n\nwith open('train.csv', mode='w') as replays:\n replay_writer = csv.writer(replays, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n for game in gameURLS:\n driver.get(game)\n victor = str(winner(driver))\n playerTeam, oppTeam = teams(driver)\n replay_writer.writerow([victor, playerTeam + \" | \" + oppTeam])\n\n\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"214423559","text":"from app.Utils import RespUtil, ErrorUtil\n\nfrom app.Utils.Exceptions.AuthNoneError import AuthNoneError\nfrom app.Utils.Exceptions.BodyFormKeyError import BodyFormKeyError\nfrom app.Utils.Exceptions.BodyRawJsonError import BodyRawJsonError\nfrom app.Utils.Exceptions.QueryError import QueryError\n\ndef register_global_error_handler(error: TypeError):\n \n if isinstance(error, AuthNoneError): # 没有认证头\n return RespUtil.jsonRet(\n dict=ErrorUtil.getErrorMessageJson(error=error, title=\"Auth Token Error\"),\n code=ErrorUtil.UnAuthorized\n )\n elif isinstance(error, BodyFormKeyError): # Body form 参数错误\n return RespUtil.jsonRet(\n dict=ErrorUtil.getErrorMessageJson(error=error, title=\"Body Form Error\"),\n code=ErrorUtil.BadRequest\n )\n elif isinstance(error, BodyRawJsonError): # Body json 参数错误\n return RespUtil.jsonRet(\n dict=ErrorUtil.getErrorMessageJson(error=error, title=\"Body Json Error\"),\n code=ErrorUtil.BadRequest\n )\n elif isinstance(error, QueryError): # 查询参数错误\n return RespUtil.jsonRet(\n dict=ErrorUtil.getErrorMessageJson(error=error, title=\"Query Param Error\"),\n code=ErrorUtil.BadRequest\n )\n else:\n return None","sub_path":"app/Routes/GlobalErrorHandle.py","file_name":"GlobalErrorHandle.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"643036014","text":"#!/usr/bin/env python\nimport time\nimport rospy\nimport csv \nimport sys\nfrom math import pow, sqrt\nfrom diagnostic_msgs.msg import DiagnosticArray\nfrom nav_msgs.msg import Odometry\nfrom sensor_msgs.msg import Imu\n\nclass Log():\n \"\"\"docstring for Log\"\"\"\n def __init__(self):\n self._sub = rospy.Subscriber(\"/diagnostics\", DiagnosticArray, self.diagnosticsCallback)\n self._sub = rospy.Subscriber(\"/odom\", Odometry, self.odomCallback)\n self._sub = rospy.Subscriber(\"/imu_data\", Imu, self.imuCallback)\n\n self.dict_log = dict()\n self.datas_log = []\n self.time_init = [0, 0, 0]\n self.time_now = [0, 0, 0]\n self.time_use = [0, 0, 0]\n self.time_log = 0\n self.file_name = ''\n self.time_now_name = ''\n self.args_num = len(sys.argv)\n self.ROCS_writer = False\n self.IMU_writer = False\n self.ODOM_writer = False\n self.distance = 0\n self.pose_x_last = 0\n self.pose_y_last = 0\n\n def diagnosticsCallback(self, msg):\n if self.ROCS_writer:\n self.time_log = self.time_now[0] - self.time_init[0]\n if msg.status[0].name == 'hongfu_bms_status_node: BMS':\n self.time_now[0] = msg.header.stamp.secs\n if self.time_init[0] == 0 or self.time_log >= 600:\n ROCS = msg.status[0].values[0].value\n dict_ROCS = {'ROCS':ROCS, 'time1':self.time_use[0]}\n print(dict_ROCS)\n # dict_log.update(xx)\n # self.datas_log.append(self.dict_log)\n self.logWriter('ROCS', 'ab+', 'time1', dict_ROCS)\n self.time_init[0] = self.time_now[0]\n self.time_use[0] += 10\n\n def imuCallback(self, msg):\n # rospy.loginfo('imu1')\n if self.IMU_writer:\n self.time_log = self.time_now[1] - self.time_init[1]\n self.time_now[1] = msg.header.stamp.secs\n if self.time_init[1] == 0 or self.time_log >= 1800:\n x = msg.orientation.x\n y = msg.orientation.y\n z = msg.orientation.z\n w = msg.orientation.w\n orientation = 'x:' + str(x) + ' y:' + str(y) + ' z:' + str(z) +' w:' + str(w)\n dict_orientation = {'orientation':orientation, 'time2':self.time_use[1]}\n # self.dict_log.update(dict_orientation)\n print(self.dict_log)\n self.logWriter('orientation', 'ab+', 'time2', dict_orientation)\n self.time_init[1] = self.time_now[1]\n self.time_use[1] += 30\n\n def odomCallback(self, msg):\n if self.ODOM_writer:\n self.pose_x_now = msg.pose.pose.position.x\n self.pose_y_now = msg.pose.pose.position.y\n if self.pose_x_last == 0:\n self.pose_x_last = msg.pose.pose.position.x\n self.pose_y_last = msg.pose.pose.position.y\n distance_last = sqrt(pow(self.pose_x_now - self.pose_x_last, 2) + pow(self.pose_y_now - self.pose_y_last, 2))\n self.distance = self.distance + distance_last\n self.time_log = self.time_now[2] - self.time_init[2]\n self.time_now[2] = msg.header.stamp.secs \n if self.time_init[2] == 0 or self.time_log >= 120:\n dict_odom = {'odom':self.distance, 'time3':self.time_use[2]}\n # self.dict_log.update(dict_odom)\n print(self.dict_log)\n self.logWriter('odom', 'wb+', 'time3', dict_odom)\n self.time_init[2] = self.time_now[2]\n self.time_use[2] += 2\n\n def logWriter(self, status, file_option, time_option, dict_option):\n # rospy.loginfo('writer1')\n if self.file_name == '':\n # rospy.loginfo('writer2')\n self.time_now_name = time.strftime(\"%Y%m%d%H%M\", time.localtime())\n self.file_name = 'caster-test-log-' + status + self.time_now_name + '.csv'\n with open('/home/caster/Documents/caster-tests/logs/'+self.file_name, file_option) as f:\n writer = csv.DictWriter(f, [status, time_option])\n rospy.loginfo('write' + status) \n # if csv.Sniffer().has_header(f.read(1024)):\n if f.read(1024) == '':\n writer.writeheader()\n # for row in dict_log:\n writer.writerow(dict_option)\n # writer.writerows(self.dict_log)\n\n def getArgs(self):\n args_number = len(sys.argv)\n args_list = sys.argv\n if args_number != 1:\n for index in range(args_number):\n if args_list[index] == 'ROCS':\n self.ROCS_writer = True\n elif args_list[index] == 'IMU':\n self.IMU_writer = True\n elif args_list[index] == 'ODOM': \n self.ODOM_writer = True\n else:\n rospy.logerr('please input args')\n rospy.signal_shutdown('please input args')\n\ndef main():\n rospy.init_node('caster_test_log')\n Logs = Log()\n Logs.getArgs()\n rospy.spin()\n\nif __name__ == \"__main__\":\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n\n","sub_path":"caster_app/script/caster_log.py","file_name":"caster_log.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"462111955","text":"# recon range: [-1,1], need * detector radius\nimport numpy as np\nimport scipy, h5py\nimport scipy.stats as stats\nimport os,sys\nimport tables\nimport scipy.io as scio\n#import matplotlib.pyplot as plt\nimport uproot, argparse\nfrom scipy.optimize import minimize\nfrom scipy import interpolate\n#from numpy.polynomial import legendre as LG\nfrom numba import jit\nfrom scipy import special\nfrom scipy.linalg import norm\nfrom scipy.stats import norm as normpdf\nfrom scipy.spatial import distance\nimport warnings\nwarnings.filterwarnings('ignore')\nsys.stdout.flush()\nnp.set_printoptions(precision=3, suppress=True)\n\nGain = np.loadtxt('/mnt/stage/PMTGainCalib_Run0257toRun0271.txt',\\\n skiprows=0, usecols=np.hstack((np.arange(0,8), np.arange(9,14))))\n\n# boundaries\nshell = 17\n\n@jit(nopython=True)\ndef legval(x, c):\n \"\"\"\n stole from the numerical part of numpy.polynomial.legendre\n\n \"\"\"\n if len(c) == 1:\n return c[0]\n elif len(c) == 2:\n c0 = c[0]\n c1 = c[1]\n else:\n nd = len(c)\n c0 = c[-2]\n c1 = c[-1]\n for i in range(3, len(c) + 1):\n tmp = c0\n nd = nd - 1\n c0 = c[-i] - (c1*(nd - 1))/nd\n c1 = tmp + (c1*x*(2*nd - 1))/nd\n return c0 + c1*x\n\ndef readtpl():\n # Read MC grid recon result\n h = tables.open_file(\"../MC/template.h5\")\n tp = h.root.template[:]\n bins = np.vstack((h.root.x[:], h.root.y[:], h.root.z[:])).T\n h.close()\n return tp, bins\n\ndef load_coeff():\n # spherical harmonics coefficients for time and PEmake \n h = tables.open_file('../calib_JUNO/PE_coeff_1t_20_20.h5','r')\n coeff_pe = h.root.coeff_L[:]\n h.close()\n cut_pe, fitcut_pe = coeff_pe.shape\n\n h = tables.open_file('../calib_JUNO/Time_coeff_1t_18_20.h5','r')\n coeff_time = h.root.coeff_L[:]\n h.close()\n cut_time, fitcut_time = coeff_time.shape\n return coeff_pe, coeff_time, cut_pe, fitcut_pe, cut_time, fitcut_time\n\ndef LoadBase():\n '''\n # to vanish the PMT difference, just a easy script\n # output: relative different bias\n '''\n path = '../calib_JUNO/base.h5'\n h1 = tables.open_file(path)\n base = h1.root.base[:]\n h1.close()\n return base\n\ndef r2c(c):\n v = np.zeros(3)\n v[2] = c[0] * np.cos(c[1]) #z\n rho = c[0] * np.sin(c[1])\n v[0] = rho * np.cos(c[2]) #x\n v[1] = rho * np.sin(c[2]) #y\n return v\n\ndef c2r(c):\n v = np.zeros(3)\n v[0] = norm(c)\n v[1] = np.arccos(c[2]/(v[0]+1e-6))\n #v[2] = np.arctan(c[1]/(c[0]+1e-6)) + (c[0]<0)*np.pi\n v[2] = np.arctan2(c[1],c[0])\n return v\ndef ReadPMT():\n A = np.loadtxt('/junofs/users/junoprotondecay/xubd/harvest/data/geo.csv')\n x = 17.5 * np.sin(A[:,1]/180*np.pi) * np.cos(A[:,2]/180*np.pi)\n y = 17.5 * np.sin(A[:,1]/180*np.pi) * np.sin(A[:,2]/180*np.pi)\n z = 17.5 * np.cos(A[:,1]/180*np.pi)\n \n Gdata = np.loadtxt('/cvmfs/juno.ihep.ac.cn/sl6_amd64_gcc830/Pre-Release/J20v1r0-Pre2/data/Simulation/ElecSim/pmtdata.txt',dtype=bytes).astype('str')\n G = np.setdiff1d(Gdata[:,0].astype('int'),A[:,0])\n \n GG = Gdata[:,0].astype('int')\n id1 = np.setdiff1d(GG,A[:,0])\n \n Gtype = Gdata[GG!=id1,1]\n GGain = Gdata[GG!=id1,2].astype('float')\n Gain = np.zeros_like(GGain)\n #for name in np.unique(Gtype):\n # Gain[Gtype==name] = np.mean(S[Gtype==name])/np.mean(GGain[Gtype==name])*GGain[Gtype==name]\n \n PMT_pos = np.vstack((A[:,0],x,y,z,Gain))\n return PMT_pos.T, Gtype\n\ndef Likelihood(vertex, *args):\n '''\n vertex[1]: r\n vertex[2]: theta\n vertex[3]: phi\n '''\n coeff_time, coeff_pe, PMT_pos, fired_PMT, time_array, pe_array, cut_time, cut_pe= args\n z, x = Calc_basis(vertex, PMT_pos, np.max((cut_time, cut_pe)))\n L1, E = Likelihood_PE(z, x, coeff_pe, pe_array, cut_pe)\n L2 = Likelihood_Time(z, x, vertex[4], coeff_time, fired_PMT, time_array, cut_time)\n return L1 + L2\n\ndef Calc_basis(vertex, PMT_pos, cut): \n # boundary\n v = r2c(vertex[1:4])\n z = norm(v)\n if z > 1-1e-3:\n z = 1-1e-3\n # calculate cos theta\n cos_theta = np.dot(v, PMT_pos.T) / (norm(v)*norm(PMT_pos,axis=1))\n ### Notice: Here may not continuous! ###\n cos_theta[np.isnan(cos_theta)] = 1 # for v in detector center \n \n # Generate Legendre basis\n # x = legval(cos_theta, np.diag((np.ones(cut)))).T \n x = legval(cos_theta, np.eye(cut).reshape((cut,cut,1))).T\n return z, x\n \ndef Likelihood_PE(z, x, coeff, pe_array, cut):\n # Recover coefficient\n k = legval(z, coeff_pe.T)\n # Recover expect\n expect = np.exp(np.dot(x,k))/2000*np.exp(base)\n # Energy fit \n # nml = np.sum(expect)/np.sum(pe_array)\n # expect = expect/nml\n # k[0] = k[0] - np.log(nml) # 0-th\n\n # Poisson likelihood\n # p(q|lambda) = sum_n p(q|n)p(n|lambda)\n # = sum_n Gaussian(q, n, sigma_n) * exp(-expect) * expect^n / n!\n # int p(q|lambda) dq = sum_n exp(-expect) * expect^n / n! = 1\n a0 = expect ** pe_array\n a2 = np.exp(-expect)\n\n # -ln Likelihood\n L = - np.sum(np.sum(np.log(a0*a2)))\n # avoid inf (very impossible vertex) \n if(np.isinf(L) or L>1e20):\n L = 1e20\n return L, k[0]\n\ndef Likelihood_Time(z, x, T0, coeff, fired_PMT, time_array, cut):\n x = x[fired_PMT][:,:cut]\n \n # Recover coefficient\n k = np.atleast_2d(legval(z, coeff_time.T)).T\n k[0,0] = T0\n \n # Recover expect\n T_i = np.dot(x, k)\n \n # Likelihood\n L = - np.nansum(Likelihood_quantile(time_array, T_i[:,0], 0.1, 2.6))\n return L\n\ndef Likelihood_quantile(y, T_i, tau, ts):\n # less = T_i[y=T_i] - T_i[y>=T_i] \n # R = (1-tau)*np.sum(less) + tau*np.sum(more)\n \n # since lucy ddm is not sparse, use PE as weight\n L = (T_i-y) * (y=T_i) * tau\n #nml = tau*(1-tau)/ts\n #L_norm = np.exp(-np.atleast_2d(L).T) * nml / ts\n #L = np.sum(np.log(L_norm), axis=1)\n L0 = - L/ts\n return L0\n\ndef recon(fid, fout):\n\n '''\n reconstruction\n\n fid: root reference file convert to .h5\n fout: output file\n '''\n event_count = 0\n # Create the output file and the group\n print(fid) # filename\n class ReconData(tables.IsDescription):\n EventID = tables.Int64Col(pos=0) # EventNo\n # inner recon\n E_sph_in = tables.Float16Col(pos=1) # Energy\n x_sph_in = tables.Float16Col(pos=2) # x position\n y_sph_in = tables.Float16Col(pos=3) # y position\n z_sph_in = tables.Float16Col(pos=4) # z position\n t0_in = tables.Float16Col(pos=5) # time offset\n success_in = tables.Int64Col(pos=6) # recon status \n Likelihood_in = tables.Float16Col(pos=7)\n \n # outer recon\n E_sph_out = tables.Float16Col(pos=8) # Energy\n x_sph_out = tables.Float16Col(pos=9) # x position\n y_sph_out = tables.Float16Col(pos=10) # y position\n z_sph_out = tables.Float16Col(pos=11) # z position\n t0_out = tables.Float16Col(pos=12) # time offset\n success_out = tables.Int64Col(pos=13) # recon status \n Likelihood_out = tables.Float16Col(pos=14)\n \n # Create the output file and the group\n h5file = tables.open_file(fout, mode=\"w\", title=\"OneTonDetector\",\n filters = tables.Filters(complevel=9))\n group = \"/\"\n # Create tables\n ReconTable = h5file.create_table(group, \"Recon\", ReconData, \"Recon\")\n recondata = ReconTable.row\n # Loop for event\n\n h1 = tables.open_file(fid,'r')\n\n truthtable = h1.root.SimEvent.SimCDHit\n EventID = truthtable[:]['event_id']\n ChannelID = truthtable[:]['pmt_id']\n HitPosInWindow = truthtable[:]['hit_time']\n h1.close()\n diff = np.min(idx[idx>100000]) - (np.max(idx[idx<100000])+1)\n size = PMT_pos[:,0].shape[0] \n for EventNo in np.unique(EventID):\n fired_PMT = np.zeros(0) # Hit PMT (PMT Seq can be repeated)\n time_array = np.zeros(0, dtype=int) # Time info (Hit number)\n\n pe_array = np.zeros(size) \n x = np.zeros(np.int(max_idx))\n Q = np.bincount(ChannelID[EventID==EventNo])\n x[0:Q.shape[0]] = Q\n\n pe_array += x[idx.astype('int')]\n\n fired_PMT = ChannelID[EventID==EventNo].astype('int')\n \n fired_PMT[fired_PMT>100000] = fired_PMT[fired_PMT>100000] - diff\n\n time_array = HitPosInWindow[EventID==EventNo]\n # For hit info\n # pe_array, cid = np.histogram(chl, bins=np.arange(31)) \n # For very rough estimate\n # pe_array = np.round(pe_array)\n\n # calculate pdf template\n '''\n ## DO NOT USE IN LUCY DDM \n N0 = np.atleast_2d(np.round(PE/Gain)).T \\\n - np.atleast_2d(np.arange(-3,3)) # range: -10:10\n sigma_array = sigma/Gain*np.sqrt(N0)\n pdf_weight = normpdf.pdf(np.atleast_2d(PE/Gain).T,\\\n N0, \\\n np.atleast_2d(sigma_array)+1e-6 \\\n )\n pdf_weight[N0<0] = 0\n N0[N0<0] = 0\n '''\n\n if np.sum(pe_array)!=0:\n # Constraints (log scale)\n x0 = c2r(1.5 * np.sum(np.atleast_2d(pe_array).T*PMT_pos,axis=0)/np.sum(pe_array))\n \n E_min = -20\n E_max = 10\n \n # inner recon\n # initial value\n # Energy recon will be removed later\n x0_in = np.zeros((1,5))\n x0_in[0][0] = 7.6 - np.log(2000) + np.log(np.sum(pe_array)/2500)\n x0_in[0][4] = np.quantile(time_array,0.1)\n x0_in[0][1:4] = x0\n print(x0_in)\n result_in = minimize(Likelihood, x0_in, method='SLSQP',bounds=((E_min, E_max), (-1, 1), (None, None), (None, None), (None, None)), args = (coeff_time, coeff_pe,\n PMT_pos, fired_PMT, time_array, pe_array, cut_time, cut_pe), tol=1e-12)\n #z, x = Calc_basis(result_in.x, PMT_pos, cut_pe)\n #L, E_in = Likelihood_PE(z, x, coeff_pe, pe_array, cut_pe)\n\n # xyz coordinate\n in2 = r2c(result_in.x[1:4])*shell\n recondata['x_sph_in'] = in2[0]\n recondata['y_sph_in'] = in2[1]\n recondata['z_sph_in'] = in2[2]\n recondata['success_in'] = result_in.success\n recondata['Likelihood_in'] = result_in.fun\n base_in = legval(result_in.x[0], coeff_pe.T)\n print(f'inner: {np.exp(result_in.x[0] - base_in[0] + np.log(2000))}')\n recondata['E_sph_in'] = 2000*np.exp(result_in.x[0] - base_in[0])\n\n print('inner')\n print(f'Template likelihood: {-np.max(result_in.fun)}')\n print('%d vertex: [%+.2f, %+.2f, %+.2f] radius: %+.2f, Likelihood: %+.6f' % (event_count, in2[0], in2[1], in2[2], norm(in2), result_in.fun))\n \n else:\n recondata['x_sph_in'] = 0\n recondata['y_sph_in'] = 0\n recondata['z_sph_in'] = 0\n recondata['E_sph_in'] = 0\n recondata['success_in'] = 0\n recondata['Likelihood_in'] = 0\n \n recondata['x_sph_out'] = 0\n recondata['y_sph_out'] = 0\n recondata['z_sph_out'] = 0\n recondata['E_sph_out'] = 0\n recondata['success_out'] = 0\n recondata['Likelihood_out'] = 0\n print('empty event!')\n print('-'*60)\n recondata.append()\n event_count = event_count + 1\n sys.stdout.flush()\n\n # Flush into the output file\n ReconTable.flush()\n h5file.close()\n\n# Automatically add multiple root files created a program with max tree size limitation.\n\nif len(sys.argv)!=3:\n print(\"Wront arguments!\")\n print(\"Usage: python Recon.py MCFileName[.root] outputFileName[.h5]\")\n sys.exit(1)\n\n# Read PMT position\nPMT_pos, Gtype = ReadPMT()\nmax_idx = np.max(PMT_pos[:,0]+1)\nidx = PMT_pos[:,0]\nPMT_pos = PMT_pos[:,1:4]\nbase = np.log(LoadBase())\n# Reconstruction\nfid = sys.argv[1] # input file .h5\nfout = sys.argv[2] # output file .h5\ncoeff_pe, coeff_time, cut_pe, fitcut_pe, cut_time, fitcut_time\\\n = load_coeff()\n#tp, bins = readtpl()\n#args = PMT_pos, tp, bins\n\nrecon(fid, fout)\n","sub_path":"Recon1tonSim/Recon_test.py","file_name":"Recon_test.py","file_ext":"py","file_size_in_byte":12113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"333703327","text":"import csv\nimport json\nfrom operator import itemgetter, attrgetter, methodcaller\nfrom column_dictionary import column_dic\nfrom liquidation import liquid_test\n\n\n'''\nthis module removes and LP returns data where there was no info within there\nfirst year of the fund close\n'''\ntester = {'tester': 5}\ndef filter_early(fund_data):\n dic = column_dic()\n remove_data = []\n\n\n for key, val in fund_data.iteritems():\n if len(val) == 0:\n remove_data.append(key)\n\n for ids in remove_data:\n del fdata[ids]\n approved_returns = dict()\n fdata = fund_data\n for key, val in fdata.iteritems():\n\n returns_len = len(val)\n early_return = val[-1]\n t_from_close = int(early_return[dic['Close to Report Q']])\n if t_from_close <= 8:\n approved_returns[key] = val\n else:\n pass\n return approved_returns\n","sub_path":"Fund Index Reduex/missingearly.py","file_name":"missingearly.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"570873682","text":"# Imports\nimport sys\nsys.path.append('..')\nsys.path.append('../Biblioteca')\nfrom Tablero import Tablero\nfrom Pieza import Pieza\nfrom Arbitro_Abstracto import Arbitro_Abstracto\n\n# Clase Arbitro de conecta 4\nclass Arbitro_Conecta_4(Arbitro_Abstracto):\n def __init__(self, _jugador1, _jugador2):\n \"\"\"\n Se inicializa el árbitro con los jugadores que recibe como parámetro.\n Se instancia el tablero con el que se trabajará.\n Se establece el turno que empieza (jugador 1) \n\n Parámetros:\n jugador1 -- Primer jugador (contiene el código para enviar mensajes)\n jugador2 -- Segundo jugador (contiene el código para enviar mensajes)\n \"\"\"\n super().__init__(_jugador1, _jugador2)\n self.tablero = Tablero(6,7) # Tablero en el que se trabaja\n\n def turnoActual(self):\n \"\"\"\n Se devuelve el turno actual,para ello, se llama a la función turnoActual del padre (Arbitro_Abstracto)\n \"\"\"\n super().turnoActual()\n\n def cambiarTurno(self):\n \"\"\"\n Se cambia el turno del jugador,para ello, se llama a la función cambiarTurno del padre (Arbitro_Abstracto)\n \"\"\"\n super().cambiarTurno()\n\n def dibujarTablero(self):\n \"\"\"\n Se devuelve la representación del tablero actual\n \n Return:\n self.tablero.tableroToArray() -- Representación del tablero\n \"\"\"\n return self.tablero.tableroToArray()\n\n def realizarMovimiento(self, mov):\n \"\"\"\n Se comprueba si el movimiento pasado por parámetro es correcto.\n Si lo es, se coloca la ficha en la posición indicada por el movimiento.\n Se cambia de turno.\n\n Parámetros:\n mov -- Coordenada de la columna con el destino del movimiento\n \"\"\"\n tab = self.tablero.getTablero()\n\n # Movimiento correcto y actualizado en el tablero\n if (self.comprobarMovimiento(mov)):\n for i in tab.getTamX()-1:\n if(tab[i][mov]==None):\n self.tablero.setPieza(Pieza(self.turno), i,mov)\n return True, self.tablero.getTablero()\n \n # Movimiento incorrecto, vuelve a solicitar el movimiento al jugador.\n else:\n return False,0\n\n def comprobarMovimiento(self, mov):\n \"\"\"\n Se comprueba si el movimiento pasado por parámetros es válido.\n Para ello tiene en cuenta el jugador que ha enviado el movimiento \n y el estado del tablero.\n\n Parámetros:\n mov -- y del destino del movimiento\n\n Return: \n bool -- booleano que indica si es correcto o no\n \"\"\"\n tab = self.tablero.getTablero()\n\n # Si el movimiento es correcto\n if (mov < tab.getTamX() and mov > 0 and tab[tab.getTamY()-1][mov] == None):\n return True # Movimiento correcto\n\n else: # Movimiento incorrecto\n return False\n\n def esFin(self):\n \"\"\"\n Se obtiene el tablero y busca jugadas ganadoras o si el tablero está lleno.\n\n Return:\n int -- Entero con el turno del ganador o 0 si es empate \n \"\"\"\n tab = self.tablero.getTablero()\n #Valor que se devolverá en el return(con el turno del ganador o 0 si es empate)\n turno=-1\n \n if(self.tablero.estaLleno()):\n return 0\n \n for i in tab.getTamX()-4: #TODO:El -4 es para que al hacer +3 no salte el error de irse de rango\n for j in tab.getTamY()-4:\n if (tab[i][j]==tab[i+1][j+1]==tab[i+2][j+2]==tab[i+3][j+3]==self.turno):\n turno=self.turno\n \n elif(tab[i][j]==tab[i][j+1]==tab[i][j+2]==tab[i][j+3]==self.turno):\n turno=self.turno\n \n elif(tab[i][j]==tab[i+1][j]==tab[i+2][j]==tab[i+3][j]==self.turno):\n turno=self.turno\n \n return turno\n","sub_path":"src/Servidor/Arbitro_Conecta_4.py","file_name":"Arbitro_Conecta_4.py","file_ext":"py","file_size_in_byte":3946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"426985650","text":"# -*- coding: utf-8 -*-\n\nimport os\n\nfrom dal.identifier import Identifier\nfrom models.song import Song\n\n\nclass Collector():\n FORMATS = ['mp3', 'flac', 'aac', 'opus', 'ogg', 'oga', 'ape', 'mpc', 'wav', 'pcm', 'aiff', 'm4a']\n\n def __init__(self):\n self.identifier = Identifier()\n\n def search_dir(self, work_queue, path_obj):\n found = []\n directory = os.walk(path_obj.path)\n for root, dirs, files in directory:\n for file in files:\n if file.split('.').pop() in self.FORMATS:\n song = Song(os.path.join(root, file))\n self.identifier.identify(song)\n found.append(song)\n work_queue.put(found)\n","sub_path":"dal/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"512532430","text":"import time\n\nimport datetime\n\nclass tweet():\n\n _author = \"\"\n\n _text = \"\"\n\n _age=datetime.datetime.now()\n\n def __init__(self, author, text):\n\n self._author=author\n\n self._text=text\n\n self._age=datetime.datetime.now()\n\n def get_author(self):\n\n return self._author\n\n def get_text(self):\n\n return self._text\n\n def get_age(self):\n\n now=datetime.datetime.now()\n time = now-self._age\n time = time.seconds\n seconds = \"\"\n \n if ( time > 60):\n\n time = time/60\n\n seconds = str(int(time)) + \"m\"\n\n if ( time > 60):\n\n seconds = str(time * 1 / 60) + \"H\"\n\n else:\n\n seconds = str(time) + \"s\"\n\n return seconds\n","sub_path":"tweetmanager/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"23074137","text":"import cv2\n\nfrom scipy import signal\nfrom scipy import misc\nfrom scipy.ndimage import gaussian_filter\n\nfrom pylab import *\n\n# Project 5: Eduardo Gonzalez\n\n# Outputs a standard Gaussian Kernel\ndef gaussianKernel(size):\n\n x, y = mgrid[-size:size+1, -size:size+1]\n g = exp(-(x**2/float(size)+y**2/float(size)))\n return g / g.sum()\n\ndef imageCorners(image):\n\n size = 3\n\n y, x = mgrid[-size:size+1, -size:size+1]\n\n # We create a gaussianX and a gaussianY\n gaussianX = - x * exp(-(x**2/float((0.5*size)**2)+y**2/float((0.5*size)**2)))\n gaussianY = - y * exp(-(x**2/float((0.5*size)**2)+y**2/float((0.5*size)**2)))\n\n imageX = signal.convolve(im, gaussianX, mode='same')\n imageY = signal.convolve(im, gaussianY, mode='same')\n\n # Gaussian filter to blur the image\n gaussian = gaussianKernel(3)\n\n # We combine the possible outcomes\n finalXX = signal.convolve(imageX*imageX, gaussian, mode='same')\n finalXY = signal.convolve(imageX*imageY, gaussian, mode='same')\n finalYY = signal.convolve(imageY*imageY, gaussian, mode='same')\n\n aux = finalXX * finalYY - finalXY**2\n\n return aux / (finalXX + finalYY)\n\ndef getPoints(im):\n\n # We find the coordanates for the top corner above 0.1 (Threshold)\n # Modify the Threshold to find more/less features\n corner = (im > max(im.ravel()) * 0.2).nonzero()\n arrayCoordinates = [(corner[0][k], corner[1][k]) for k in range(len(corner[0]))]\n\n # We fill an array with all the values we found\n val = argsort([im[j[0]][j[1]] for j in arrayCoordinates])\n\n # We create an array to store all, this also contains the min number of pixels\n # between points (I decided 10 pixels)\n pointsArray = zeros(im.shape)\n pointsArray[10:-10, 10:-10] = 1\n\n final = []\n\n for i in val:\n if pointsArray[arrayCoordinates[i][0]][arrayCoordinates[i][1]] == 1:\n final.append(arrayCoordinates[i]) # if == 1, append\n pointsArray[(arrayCoordinates[i][0]-10):(arrayCoordinates[i][0]+10), (arrayCoordinates[i][1]-10):(arrayCoordinates[i][1]+10)] = 0\n\n return final\n\n\n\n\n\n\n\n# Read the image\nim = misc.imread('../data/Notre Dame/4191453057_c86028ce1f_o.jpg',flatten=1)\nimDisplay = misc.imread('../data/Notre Dame/4191453057_c86028ce1f_o.jpg')\n\n#im = misc.imread('../data/Notre Dame/921919841_a30df938f2_o.jpg',flatten=1)\n#imDisplay = misc.imread('../data/Notre Dame/921919841_a30df938f2_o.jpg')\n\n# Get the points from the data that we got from the corners\npoints = getPoints(imageCorners(im))\n\n# Plot\nplt.gray()\nplt.imshow(imDisplay)\nplot([p[1] for p in points],[p[0] for p in points],'o')\nplt.axis('off')\nplt.show()\n","sub_path":"Project5Eduardo_Gonzalez/code/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"584276758","text":"# coding: utf-8\n\n\nimport os\nimport shutil\nimport scipy.io\nimport warnings\nimport numpy as np\nimport pandas as pd\nfrom time import time\nfrom tqdm import tqdm\nimport mpl_toolkits\nimport mpl_toolkits.axes_grid1\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport torch\nimport torchvision\nimport pytorch_ssim\nfrom utils import normalize\n\n\nwarnings.simplefilter('ignore')\ndevice = 'cpu'\nplt.rcParams['image.cmap'] = 'gray'\n\n\ndef compare_sam(x, y):\n x_sqrt = np.linalg.norm(x, axis=-1)\n y_sqrt = np.linalg.norm(y, axis=-1)\n xy = (x * y).sum(axis=-1)\n metrics = xy / (x_sqrt * y_sqrt + 1e-6)\n angle = np.arccos(metrics)\n return angle.mean()\n\n\nclass RMSEMetrics(torch.nn.Module):\n\n def __init__(self):\n super(RMSEMetrics, self).__init__()\n self.criterion = torch.nn.MSELoss().eval()\n\n def forward(self, x, y):\n return torch.sqrt(self.criterion(x, y))\n\n\nclass PSNRMetrics(torch.nn.Module):\n\n def __init__(self):\n super(PSNRMetrics, self).__init__()\n self.criterion = torch.nn.MSELoss().eval()\n\n def forward(self, x, y):\n return 10. * torch.log10(1. / self.criterion(x, y))\n\n\nclass SAMMetrics(torch.nn.Module):\n\n def forward(self, x, y):\n x_sqrt = torch.norm(x, dim=1)\n y_sqrt = torch.norm(y, dim=1)\n xy = torch.sum(x * y, dim=1)\n metrics = xy / (x_sqrt * y_sqrt + 1e-6)\n angle = torch.acos(metrics)\n return torch.mean(angle)\n\n\nclass Evaluater(object):\n\n def __init__(self, data_name, save_img_path='output_img', save_mat_path='output_mat', save_csv_path='output_csv', filter_path=None, **kwargs):\n self.data_name = data_name\n self.save_alls_path = save_img_path\n self.save_mat_path = save_mat_path\n self.save_csv_path = save_csv_path\n self.output_ch = {'CAVE': (26, 16, 9), 'Harvard': (21, 11, 12), 'ICVL': (26, 16, 9)}\n shape = kwargs.get('shape')\n if shape is None:\n shape = (512, 512, 31)\n self.zeros = torch.zeros(shape)\n self.ones = torch.ones(shape)\n os.makedirs(self.save_alls_path, exist_ok=True)\n os.makedirs(save_mat_path, exist_ok=True)\n\n def _plot_img(self, ax, img, title='None', colorbar=False):\n if colorbar is not False:\n divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)\n cax = divider.append_axes('right', '5%', pad='3%')\n im = ax.imshow(img, cmap='jet')\n plt.colorbar(im, cax=cax)\n else:\n im = ax.imshow(img)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title(title)\n return self\n\n def _save_all(self, i, inputs, outputs, labels):\n save_alls_path = 'save_all'\n _, c, h, w = outputs.size()\n diff = torch.abs(outputs - labels).squeeze().numpy()\n diff = diff.transpose(1, 2, 0).mean(axis=-1)\n diff = normalize(diff)\n inputs = normalize(inputs.squeeze().numpy())\n outputs = outputs.squeeze().numpy().transpose(1, 2, 0)\n outputs = normalize(outputs[:, :, self.output_ch[self.data_name]])\n labels = labels.squeeze().numpy().transpose(1, 2, 0)\n labels = normalize(labels[:, :, self.output_ch[self.data_name]])\n fig_num = 4\n plt.figure(figsize=(16, 9))\n ax = plt.subplot(1, 4, 1)\n if inputs.shape[0] == 32:\n inputs = inputs[0]\n ax.imshow(inputs)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title('input')\n figs = [outputs, labels]\n titles = ['output', 'label']\n for j, (fig, title) in enumerate(zip(figs, titles)):\n ax = plt.subplot(1, fig_num, j + 2)\n self._plot_img(ax, fig, title)\n ax = plt.subplot(1, fig_num, fig_num)\n self._plot_img(ax, diff, title='diff', colorbar=True)\n plt.tight_layout()\n plt.savefig(os.path.join(self.save_alls_path, f'output_alls_{i}.png'), bbox_inches='tight')\n plt.close()\n return self\n\n def _save_mat(self, i, idx, output):\n output_mat = output.squeeze().to('cpu').detach().numpy().copy()\n output_mat = output_mat.transpose(1, 2, 0)\n scipy.io.savemat(os.path.join(self.save_mat_path, f'{i:05d}.mat'), {'data': output_mat, 'idx': idx})\n return self\n\n def _save_csv(self, output_evaluate, header):\n header.append('Time')\n output_evaluate_np = np.array(output_evaluate, dtype=np.float32)\n means = list(np.mean(output_evaluate_np, axis=0))\n output_evaluate.append(means)\n output_evaluate_csv = pd.DataFrame(output_evaluate)\n output_evaluate_csv.to_csv(self.save_csv_path, header=header)\n print(means)\n return self\n\n def _step_show(self, pbar, *args, **kwargs):\n if device == 'cuda':\n kwargs['Allocate'] = f'{torch.cuda.memory_allocated(0) / 1024 ** 3:.3f}GB'\n kwargs['Cache'] = f'{torch.cuda.memory_cached(0) / 1024 ** 3:.3f}GB'\n pbar.set_postfix(kwargs)\n return self\n\n\nclass ReconstEvaluater(Evaluater):\n\n def metrics(self, model, dataset, evaluate_fn, header=None, hcr=False, mode='normal'):\n model.eval()\n output_evaluate = []\n # _, columns = os.popen('stty size', 'r').read().split()\n # columns = int(columns) // 2\n columns = 200\n with torch.no_grad():\n # with tqdm(dataset, desc=desc_str, ncols=columns, unit='step', ascii=True) as pbar:\n with tqdm(dataset, ncols=columns, ascii=True) as pbar:\n for i, (idx, inputs, labels) in enumerate(pbar):\n evaluate_list = []\n inputs = inputs.unsqueeze(0).to(device)\n labels = labels.unsqueeze(0).to(device)\n if hcr is True:\n start_time = time()\n _, _, output = model(inputs)\n finish_time = time() - start_time\n elif mode == 'ista':\n start_time = time()\n output, _ = model(inputs)\n finish_time = time()\n else:\n start_time = time()\n output = model(inputs)\n finish_time = time() - start_time\n metrics_output = torch.clamp(output, min=0., max=1.)\n metrics_labels = torch.clamp(labels, min=0., max=1.)\n for metrics_func in evaluate_fn:\n metrics = metrics_func(metrics_output, metrics_labels)\n evaluate_list.append(f'{metrics.item():.7f}')\n evaluate_list.append(f'{finish_time:.5f}')\n output_evaluate.append(evaluate_list)\n show_evaluate = np.mean(np.array(output_evaluate, dtype=np.float32), axis=0)\n self._step_show(pbar, Metrics=show_evaluate)\n del show_evaluate\n self._save_all(i, inputs, output, labels)\n self._save_mat(i, idx, output)\n self._save_csv(output_evaluate, header)\n return self\n\n\nclass ReconstEvaluater_skimage(Evaluater):\n\n def metrics(self, model, dataset, evaluate_fn, header=None, hcr=False):\n model.eval()\n output_evaluate = []\n # _, columns = os.popen('stty size', 'r').read().split()\n # columns = int(columns) // 2\n columns = 200\n with torch.no_grad():\n # with tqdm(dataset, desc=desc_str, ncols=columns, unit='step', ascii=True) as pbar:\n with tqdm(dataset, ncols=columns, ascii=True) as pbar:\n for i, (idx, inputs, labels) in enumerate(pbar):\n evaluate_list = []\n inputs = inputs.unsqueeze(0).to(device)\n labels = labels.unsqueeze(0).to(device)\n if hcr is True:\n start_time = time()\n _, _, output = model(inputs)\n finish_time = time() - start_time\n else:\n start_time = time()\n output = model(inputs)\n finish_time = time() - start_time\n metrics_output = np.clip(output.squeeze().numpy().transpose(1, 2, 0), 0., 1.)\n metrics_labels = np.clip(labels.squeeze().numpy().transpose(1, 2, 0), 0., 1.)\n for metrics_func in evaluate_fn:\n metrics = metrics_func(metrics_output, metrics_labels)\n evaluate_list.append(f'{metrics.item():.7f}')\n # evaluate_list.append(f'{output_time:.5f}')\n evaluate_list.append(f'{finish_time:.5f}')\n output_evaluate.append(evaluate_list)\n show_evaluate = np.mean(np.mean(output_evaluate, dtype=np.float32), axis=0)\n self._step_show(pbar, Metrics=show_evaluate)\n del show_evaluate\n # self._save_all(i, inputs, output, labels)\n # self._save_mat(i, idx, output)\n self._save_csv(output_evaluate, header)\n\n return self\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":9155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"85310932","text":"import os\nimport sys\n\ndef fib(n): # write Fibonacci series up to n\n a, b = 0, 1\n while a < n:\n print(a, end=' ')\n a, b = b, a+b\n print()\n return a\n\ndef test_print():\n print('test call this function get_TESTER_data')\n #path='/home/evan/mp4_to_png/CONVERT_VIDEO_PIC/TESTER/ALL_TEST_DATA/'\n #path='/home/evan/mp4_to_png/CONVERT_VIDEO_PIC/TESTER_0430/ALL_TEST_DATA_0430/'\n path='/home/evan/mp4_to_png/CONVERT_VIDEO_PIC/TESTER_0430/ALL_TEST_WITH_BOX/'\n root_dir=[name for name in os.listdir(path)]\n root_dir.sort()\n x=[]\n y=[]\n #for index_sub1,sub1_dir in enumerate(root_dir):\n # print (sub1_dir)\n return root_dir\n\n\ndef get_TESTER_data():\n print('test call this function get_TESTER_data')\n #path='/home/evan/mp4_to_png/CONVERT_VIDEO_PIC/TESTER/ALL_TEST_DATA/'\n #path='/home/evan/mp4_to_png/CONVERT_VIDEO_PIC/TESTER_0430/ALL_TEST_DATA_0430/'\n path='/home/evan/mp4_to_png/CONVERT_VIDEO_PIC/TESTER_0430/ALL_TEST_WITH_BOX/'\n root_dir=[name for name in os.listdir(path)]\n\n #root_dir.sort()\n root_dir.sort(key=lambda x: int(''.join(filter(str.isdigit, x))))\n x=[]\n y=[]\n for index_sub1,sub1_dir in enumerate(root_dir):\n sub1_dir=path+sub1_dir\n if (os.path.isdir(sub1_dir) and sub1_dir != (path+'__pycache__')):\n sub1_dir_name=[name for name in os.listdir(sub1_dir)]\n sub1_dir_name.sort()\n for index_sub2,sub2_dir in enumerate(sub1_dir_name):\n #print(sub2_dir,'meter=',index_sub2)\n sub2_dir=sub1_dir+'/'+sub2_dir\n sub2_dir_name=[name for name in os.listdir(sub2_dir)]\n sub2_dir_name.sort()\n for index_sub3,sub3_file in enumerate(sub2_dir_name):\n image_path=sub2_dir+'/'+sub3_file\n #print('image_path=',image_path,'ground true=',index_sub3)\n x.append(image_path)\n y.append(index_sub3) #ground true\n return x,y\n","sub_path":"gen_statistic_0516.py","file_name":"gen_statistic_0516.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"247800649","text":"\"\"\"Update the firewall for a Virtual Network.\"\"\"\n# :license: MIT, see LICENSE for more details.\n\nimport click\n\nimport cloudistics\nfrom cloudistics.cli import environment\nfrom cloudistics.cli import exceptions\nfrom cloudistics.cli import formatting\nfrom cloudistics.cli import helpers\n\n\n@click.command()\n@click.option('--wait',\n type=click.INT,\n help=\"Wait until the vnet is finished updating for up to N seconds before returning\")\n@click.argument('identifier')\n@click.argument('firewall_profile_id', required=False)\n@environment.pass_env\ndef cli(env, identifier, firewall_profile_id, wait):\n \"\"\"Update the firewall a Virtual Network.\"\"\"\n\n if not (env.skip_confirmations or formatting.confirm(\"This action will update the firewall for VNET. Continue?\")):\n raise exceptions.CLIAbort('Aborting VNET firewall update.')\n\n mgr = cloudistics.VNetsManager(env.client)\n\n # Build and setup our results table\n table = formatting.KeyValueTable(['name', 'value'])\n table.align['name'] = 'r'\n table.align['value'] = 'l'\n table.add_row(['action', 'firewall'])\n table.add_row(['identifier', identifier])\n table.add_row(['firewall-profile', firewall_profile_id])\n\n try:\n result = mgr.firewall_profile_update(identifier, firewall_profile_id)\n if 'message' in result:\n table.add_row(['status', result['message']])\n else:\n # Wait for the action to complete (if wait is set)\n helpers.wait_for_action(env, result, table, wait)\n\n # Return our results\n env.fout(table)\n except ValueError as ex:\n env.out(str(ex))\n raise exceptions.CLIHalt(code=1)\n","sub_path":"cloudistics/cli/vnets/firewall.py","file_name":"firewall.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"117141670","text":"import contextlib\r\nimport hashlib\r\nimport random\r\nimport sys\r\nimport os\r\nimport futurist\r\nfrom oslo_utils import uuidutils\r\nfrom taskflow import engines\r\nfrom taskflow import task\r\nfrom taskflow.patterns import graph_flow as gf\r\nfrom taskflow.patterns import linear_flow as lf\r\nfrom taskflow.persistence import models\r\n\r\ntop_dir = os.path.join(os.path.dirname(__file__), os.pardir)\r\ntop_dir = os.path.abspath(top_dir)\r\nsys.path.append(top_dir)\r\nfrom utils import example_utils as eu\r\n\r\n\r\n# 打印信息\r\nclass PrintText(task.Task):\r\n def __init__(self, print_what):\r\n content_hash = hashlib.md5(print_what.encode('utf-8')).hexdigest() # 字符串的hash\r\n super().__init__(name=\"Print: \" + content_hash)\r\n self._text = print_what\r\n\r\n def execute(self):\r\n eu.print_wrapped(self._text)\r\n\r\n\r\n# 定义虚拟机\r\nclass DefineVMSpec(task.Task):\r\n def __init__(self, name):\r\n super().__init__(provides='vm_spec', name=name)\r\n\r\n def execute(self): # '' --> vm_spec\r\n return {'type': 'kvm', 'disks': 3, 'vcpu': 2, 'ips': 2, 'volumes': 3}\r\n\r\n\r\n# 映射Map:磁盘镜像url : 保存到本地path\r\nclass LocateImages(task.Task):\r\n def __init__(self, name):\r\n super().__init__(provides='image_locations', name=name)\r\n\r\n def execute(self, vm_spec): # 'vm_spec' --> 'image_locations'\r\n image_locations = {}\r\n for i in range(0, vm_spec['disks']):\r\n disk_url = \"http://www.yahoo.com/images/{index}\".format(index=i)\r\n image_locations[disk_url] = \"/tmp/{index}.img\".format(index=i)\r\n return image_locations\r\n\r\n\r\n# 下载磁盘镜像到本地\r\nclass DownloadImages(task.Task):\r\n def __init__(self, name):\r\n super().__init__(provides='download_paths', name=name)\r\n\r\n def execute(self, image_locations): # 'image_locations' --> 'download_paths'\r\n for src, loc in image_locations.items():\r\n print(\"Downloading Disk Image from '{0}' => '{1}'\".format(src, loc))\r\n return sorted(image_locations.values())\r\n\r\n\r\n# 生成虚拟机网络配置文件\r\nclass CreateNetworkTpl(task.Task):\r\n SYSCONFIG_CONTENTS = \"\"\"DEVICE=eth{}\r\nBOOTPROTO=static\r\nIPADDR={}\r\nONBOOT=yes\r\n\"\"\"\r\n\r\n def __init__(self, name):\r\n super().__init__(provides='network_settings', name=name)\r\n\r\n def execute(self, ips): # 'ips' --> 'network_settings'\r\n settings = []\r\n for i, ip in enumerate(ips):\r\n settings.append(self.SYSCONFIG_CONTENTS.format(i, ip))\r\n return settings\r\n\r\n\r\n# 为虚拟机分配ip\r\nclass AllocateIP(task.Task):\r\n def __init__(self, name):\r\n super().__init__(provides='ips', name=name)\r\n\r\n def execute(self, vm_spec): # 'vm_spec' --> 'ips'\r\n ips = []\r\n for _i in range(0, vm_spec.get('ips', 0)):\r\n ips.append(\"192.168.0.{}\".format(random.randint(1, 254)))\r\n return ips\r\n\r\n\r\n# 网络配置文件写入下载的镜像文件\r\nclass WriteNetworkSettings(task.Task):\r\n def execute(self, download_paths, network_settings): # 'download_paths', 'network_settings' --> .\r\n for j, path in enumerate(download_paths):\r\n print(\"Mounting Image '{}' => '/tmp/{}'\".format(path, j))\r\n for i, setting in enumerate(network_settings):\r\n filename = \"/tmp/{}/etc/sysconfig/network-scripts/ifcfg-eth{}\".format(j, i)\r\n print(\"Writing to %s\" % filename)\r\n print(setting)\r\n\r\n\r\n# 启动虚拟机\r\nclass BootVM(task.Task):\r\n def execute(self, vm_spec): # 'vm_spec' --> .\r\n print(\"Starting vm!\")\r\n print(\"Created: %s\" % vm_spec)\r\n\r\n\r\n# 为虚拟机分配卷\r\nclass AllocateVolumes(task.Task):\r\n default_provides = 'volumes'\r\n\r\n def execute(self, vm_spec): # 'vm_spec' --> 'volumes'\r\n volumes = []\r\n for i in range(0, vm_spec['volumes']):\r\n volumes.append(\"/dev/vda{}\".format(i + 1))\r\n print(\"Allocated volume '{}'\".format(volumes[-1]))\r\n return volumes\r\n\r\n\r\n# 格式化分配的卷\r\nclass FormatVolumes(task.Task):\r\n def execute(self, volumes): # 'volumes' --> .\r\n for v in volumes:\r\n print(\"Formatting volume '{}'\".format(v))\r\n print(\"Formatted volume '{}'\".format(v))\r\n\r\n\r\n# 创建流实例\r\ndef create_flow():\r\n flow = lf.Flow(\"root\").add(\r\n PrintText(\"Starting vm creation.\"),\r\n lf.Flow('vm-maker').add(\r\n DefineVMSpec(\"define_spec\"),\r\n gf.Flow(\"img-maker\").add(\r\n LocateImages(\"locate_images\"),\r\n DownloadImages(\"download_images\"),\r\n ),\r\n gf.Flow(\"net-maker\").add(\r\n AllocateIP(\"get_my_ips\"),\r\n CreateNetworkTpl(\"fetch_net_settings\"),\r\n WriteNetworkSettings(\"write_net_settings\"),\r\n ),\r\n gf.Flow(\"volume-maker\").add(\r\n AllocateVolumes(\"allocate_my_volumes\"),\r\n FormatVolumes(\"volume_formatter\"),\r\n ),\r\n BootVM(\"boot-it\"),\r\n ),\r\n PrintText(\"Finished vm create.\"),\r\n PrintText(\"Instance is running!\")\r\n )\r\n return flow\r\n\r\n\r\neu.print_wrapped(\"Initializing\")\r\n\r\nwith eu.get_backend() as backend:\r\n book = models.LogBook(\"vm-boot\") # 创建Book\r\n assert uuidutils.is_uuid_like(book.uuid)\r\n\r\n with contextlib.closing(backend.get_connection()) as conn:\r\n conn.save_logbook(book) # 保存Book\r\n\r\n executor = futurist.GreenThreadPoolExecutor(max_workers=5)\r\n engine = engines.load_from_factory(create_flow, backend=backend, book=book, engine='parallel', executor=executor)\r\n print('book.uuid={}, engine.storage.flow_uuid={}'.format(book.uuid, engine.storage.flow_uuid))\r\n\r\n eu.print_wrapped('Running')\r\n engine.run()\r\n","sub_path":"python/taskflow/my-example/resume_vm_boot.py","file_name":"resume_vm_boot.py","file_ext":"py","file_size_in_byte":5739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"468631003","text":"from art.estimators.classification import TensorFlowV2Classifier, PyTorchClassifier\nfrom typing import Union, Tuple\nimport numpy as np\nimport tensorflow as tf\nimport torch\n\n\nclass Classifier:\n \"\"\"Classifier base class.\"\"\"\n\n def __init__(\n self,\n classifier: Union[tf.keras.Model, torch.nn.Module],\n nb_classes: int,\n input_shape: Tuple[int, ...],\n ):\n \"\"\"Initializes a Classifier class.\n\n :param classifier: The classifier. Either a Pytorch or Tensorflow classifier.\n :param nb_classes: Number of classes that were used to train the classifier.\n :param input_shape: Input shape of a data point of the classifier.\n \"\"\"\n self.art_classifier = self._to_art_classifier(\n classifier, nb_classes, input_shape\n )\n\n def predict(self, x: np.ndarray):\n \"\"\"Predicts labels for given data.\n\n :param x: Data which labels should be predicted for.\n :return: Predicted labels.\n \"\"\"\n return self.art_classifier.predict(x)\n\n @staticmethod\n def _to_art_classifier(\n classifier: Union[tf.keras.Model, torch.nn.Module],\n nb_classes: int,\n input_shape: Tuple[int, ...],\n ) -> Union[TensorFlowV2Classifier, PyTorchClassifier]:\n \"\"\"Converts a classifier to an ART classifier.\n\n :param classifier: Classifier to be converted. Either a Pytorch or Tensorflow classifier.\n :param nb_classes: Number of classes that were used to train the classifier.\n :param input_shape: Input shape of a data point of the classifier.\n :return: Given classifier converted to an ART classifier.\n :raises TypeError: If the given classifier is of an invalid type.\n \"\"\"\n if isinstance(classifier, torch.nn.Module):\n return PyTorchClassifier(\n model=classifier,\n loss=None,\n nb_classes=nb_classes,\n input_shape=input_shape,\n )\n if isinstance(classifier, tf.keras.Model):\n return TensorFlowV2Classifier(\n model=classifier, nb_classes=nb_classes, input_shape=input_shape,\n )\n else:\n raise TypeError(\n f\"Expected classifier to be an instance of {str(torch.nn.Module)} or {str(tf.keras.Model)}, received {str(type(classifier))} instead.\"\n )\n","sub_path":"privacy_evaluator/classifiers/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"236274083","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 20 21:51:52 2016\n\n@author: lixin\n\"\"\"\n\n\nfrom pandas import Series, DataFrame\nimport pandas as pd\n\nobj=Series([4,5,6,7])\nobj2=Series([4,5,6,7,], index=['a','b','c','d'])\n\n\n# create series form the dict of python\nsdata={'ohio':3500, 'texas':71000, 'oregon':16000}\nobj3=Series(sdata)\n\nstates=['califonia', 'ohio','oregon','texas']\nobj4=Series(sdata,index=states)\n\nobj4.name='population'\nobj4.index.name='state'\n\n# construct a dataframe form disctionary\ndata={'state':['ohio','ohio','ohio','Nevada','Nevada'],\n 'year':[2000,2001,2002,2001,2002],\n 'pop': [1.5,1.7,3.6,2.4,2.9]}\n\nframe=DataFrame(data)\n\nframe2=DataFrame(data, columns=['year','state','pop','debt'],\n index=['one','two','three','four','five'])\n\n","sub_path":"Python4DataAnalysis/ch5/ch5_pandas.py","file_name":"ch5_pandas.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"389730846","text":"## import some useful tools\nimport os\n# import numpy as np\n# import pandas as pd\n# import matplotlib.pyplot as plt\n# import datetime as dt\n\n## import torch module\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms, utils\n\n# import our model and dataloader\nimport sys\nsys.path.append(\"..\")\nfrom tools.args_tools import args, createfolder\nfrom tools.dataset_CNN import ToTensor, Normalize, TyDataset\n\nfrom CNN2D import CNN2D\n\n\ndef BMSE(outputs, labels):\n BMSE = 0\n outputs_size = outputs.shape[0]*outputs.shape[1]\n BMSE += torch.sum(1*(outputs[2>outputs]-labels[2>outputs])**2)\n BMSE += torch.sum(2*(outputs[5>outputs]-labels[5>outputs])**2) - torch.sum(2*(outputs[2>outputs]-labels[2>outputs])**2)\n BMSE += torch.sum(5*(outputs[10>outputs]-labels[10>outputs])**2) - torch.sum(5*(outputs[5>outputs]-labels[5>outputs])**2)\n BMSE += torch.sum(10*(outputs[30>outputs]-labels[30>outputs])**2) - torch.sum(10*(outputs[10>outputs]-labels[10>outputs])**2)\n BMSE += torch.sum(30*(outputs[outputs>=30]-labels[outputs>=30])**2)\n\n return BMSE/outputs_size\n\ndef BMAE(outputs, labels):\n BMAE = 0\n outputs_size = outputs.shape[0]*outputs.shape[1]\n BMAE += torch.sum(1*torch.abs(outputs[2>outputs]-labels[2>outputs]))\n BMAE += torch.sum(2*torch.abs(outputs[5>outputs]-labels[5>outputs])) - torch.sum(2*torch.abs(outputs[2>outputs]-labels[2>outputs]))\n BMAE += torch.sum(5*torch.abs(outputs[10>outputs]-labels[10>outputs])) - torch.sum(5*torch.abs(outputs[5>outputs]-labels[5>outputs]))\n BMAE += torch.sum(10*torch.abs(outputs[30>outputs]-labels[30>outputs])) - torch.sum(10*torch.abs(outputs[10>outputs]-labels[10>outputs]))\n BMAE += torch.sum(30*torch.abs(outputs[outputs>=30]-labels[outputs>=30]))\n\n return BMAE/outputs_size\n\n\ndef train(net, train_loader, test_loader, results_file, max_epochs=50, loss_function=BMSE,\n optimizer=optim.Adam, device=args.device):\n f = open(results_file,\"w\")\n criterion = loss_function\n optimizer = optimizer(net.parameters())\n total_step = len(train_loader)\n\n for epoch in range(max_epochs):\n # Training\n for i, data in enumerate(train_loader,0):\n inputs, labels = data[\"RAD\"].to(device, dtype=torch.float), data[\"QPE\"].to(device, dtype=torch.float)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n outputs = net(inputs)\n labels = labels.view(labels.shape[0], labels.shape[1]*labels.shape[2])\n\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n if (i+1) % 200 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:>.3f}'\n .format(epoch+1, max_epochs, i+1, total_step, loss.item()))\n f.writelines('Epoch [{}/{}], Step [{}/{}], Loss: {:>.3f}\\n'\n .format(epoch+1, max_epochs, i+1, total_step, loss.item()))\n\n test_loss = test(net, test_loader=test_loader, loss_function=criterion, device=device)\n print(\"Epoch [{}/{}], Test Loss:{:>.3f}\\n\".format(epoch+1, max_epochs, loss))\n f.writelines(\"Epoch [{}/{}], Test Loss:{:>.3f}\\n\".format(epoch+1, max_epochs, loss))\n\n total_params = sum(p.numel() for p in Net.parameters())\n print(\"Total_params: {:d}\".format(total_params))\n f.writelines(\"Total_params: {:d}\".format(total_params))\n f.close()\n\ndef test(net,test_loader,loss_function=nn.MSELoss(),device=args.device):\n net.eval()\n criterion = loss_function\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(test_loader,0):\n inputs, labels = data[\"RAD\"].to(device, dtype=torch.float), data[\"QPE\"].to(device, dtype=torch.float)\n outputs = net(inputs)\n\n labels = labels.view(labels.shape[0], labels.shape[1]*labels.shape[2])\n\n loss += criterion(outputs, labels)\n loss = loss/(i+1)\n\n return loss\n\n# Run exp1\ndef run(results_file, x_tsteps=6, forecast_t=1, loss_function=\"BMSE\", max_epochs=50, device=args.device):\n\n if loss_function == \"BMSE\":\n loss_function = BMSE\n elif loss_function == \"BMAE\":\n loss_function = BMAE\n\n mean = [12.834] * x_tsteps\n std = [14.14] * x_tsteps\n transfrom = transforms.Compose([ToTensor(),Normalize(mean=mean, std=std)])\n\n train_dataset = TyDataset(ty_list_file=\"../../ty_list.xlsx\",\n root_dir=\"../../01_TY_database/02_wrangled_data_Taipei\",\n x_tsteps=x_tsteps,\n forecast_t=forecast_t,\n train=True,\n transform = transfrom)\n\n test_dataset = TyDataset(ty_list_file=\"../../ty_list.xlsx\",\n root_dir=\"../../01_TY_database/02_wrangled_data_Taipei\",\n x_tsteps=x_tsteps,\n forecast_t=forecast_t,\n train=False,\n transform = transfrom)\n\n params = {\"batch_size\":4, \"shuffle\":True, \"num_workers\":1}\n train_generator = DataLoader(train_dataset, **params)\n test_generator = DataLoader(test_dataset, **params)\n\n # Make CNN2D Net\n Net = CNN2D(n_input=x_tsteps,n_hidden=[x_tsteps+1,x_tsteps+2,x_tsteps+3],kernel_size=[3,3,3],n_hid_layers=3,\n n_fully=72*72*(x_tsteps+3),n_fully_layers=1,n_out_layer=72*72,batch_norm=True).to(device)\n # print(Net)\n # Train process\n info = \"| Forecast time: {:02d}, Inputs time steps: {:02d} |\".format(forecast_t,x_tsteps)\n print(\"=\"*len(info))\n print(info)\n print(\"=\"*len(info))\n\n train(net=Net, train_loader=train_generator, test_loader=test_generator, results_file=results_file,\n max_epochs=max_epochs, loss_function=loss_function, device=device)\n total_params = torch.sum(p.numel() for p in Net.parameters())\n print(total_params)\n torch.save(Net.state_dict(), results_file[:-4]+'.ckpt')\n\n\n##--------------------main function--------------------##\ndevice = \"cuda:2\"\n\ndef main():\n results_dir = \"../01_results/CNN2D\"\n createfolder(results_dir)\n\n for forecast_t in range(1,4):\n for x_tsteps in range(3,10):\n results_file = os.path.join(results_dir,\"BMSE_f.{:02d}_x.{:02d}.txt\".format(forecast_t,x_tsteps))\n\n run(results_file=results_file,x_tsteps=x_tsteps,forecast_t=forecast_t,loss_function=\"BMSE\",max_epochs=1,device=device)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"models_taipei_I60_F60/cnn3D/CNN3D_run.py","file_name":"CNN3D_run.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"393095959","text":"from gensim.models import word2vec\nimport time\n\nsentences = word2vec.Text8Corpus(\"wakati.txt\")\n\nstart = time.time()\nmodel_100_5 = word2vec.Word2Vec(sentences, size = 100, window=5)\nmodel_100_5.save(\"model_100_5.model\")\nend = time.time()\nprint(\"model_100_5 finished in\", end-start)\ndel model_100_5\n\nstart = time.time()\nmodel_100_10 = word2vec.Word2Vec(sentences, size = 100, window=10)\nmodel_100_10.save(\"model_100_10.model\")\nend = time.time()\nprint(\"model_100_10 finished in\", end-start)\ndel model_100_10\n\nstart = time.time()\nmodel_300_5 = word2vec.Word2Vec(sentences, size = 300, window=5)\nmodel_300_5.save(\"model_300_5.model\")\nend = time.time()\nprint(\"model_300_5 finished in\", end-start)\ndel model_300_5\n\nstart = time.time()\nmodel_300_10 = word2vec.Word2Vec(sentences, size = 300, window=10)\nmodel_300_10.save(\"model_300_10.model\")\nend = time.time()\nprint(\"model_300_10 finished in\", end-start)\ndel model_300_10\n\nstart = time.time()\nmodel_500_5 = word2vec.Word2Vec(sentences, size = 500, window=5)\nmodel_500_5.save(\"model_500_5.model\")\nend = time.time()\nprint(\"model_500_5 finished in\", end-start)\ndel model_500_5\n\nstart = time.time()\nmodel_500_10 = word2vec.Word2Vec(sentences, size = 500, window=10)\nmodel_500_10.save(\"model_500_10.model\")\nend = time.time()\nprint(\"model_500_10 finished in\", end-start)\ndel model_500_10\n\nstart = time.time()\nmodel_1000_5 = word2vec.Word2Vec(sentences, size = 1000, window=5)\nmodel_1000_5.save(\"model_1000_5.model\")\nend = time.time()\nprint(\"model_1000_5 finished in\", end-start)\ndel model_1000_5\n\nstart = time.time()\nmodel_1000_10 = word2vec.Word2Vec(sentences, size = 1000, window=10)\nmodel_1000_10.save(\"model_1000_10.model\")\nend = time.time()\nprint(\"model_1000_10 finished in\", end-start)\ndel model_1000_10\n","sub_path":"word2vec/make_model.py","file_name":"make_model.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"609958113","text":"\"\"\"\nPavlo Smemchyshyn\n23/02/2020\n\"\"\"\n\n\nfrom notebook import Notebook, Note\n\n\ndef clas_note():\n \"\"\"\n A function for analysing instance of Note\n class\n \"\"\"\n note = Note(\"hello world\", tags=\"first note\")\n print(\"Object note is a representitive of class: \", type(note), \"\\n\")\n print(\"All attributes and their values of a Note object:\", note.__dict__, \"\\n\")\n all_attr, built_in_attr, created_attr = all_object_attr(note)\n print(\"All attributes of Note object:\", all_attr, \"\\n\")\n print(\"Built-in attributes:\", built_in_attr, \"\\n\")\n print(\"Created attributes:\", created_attr, \"\\n\")\n\n\ndef clas_notebook():\n \"\"\"\n A function for analysing instance of Notebook\n class\n \"\"\"\n notebook = Notebook()\n print(\"Object note is a representitive of class: \", type(notebook), \"\\n\")\n print(\"All attributes and their values of a Notebook object:\", notebook.__dict__, \"\\n\")\n all_attr, built_in_attr, created_attr = all_object_attr(notebook)\n print(\"All attributes of Notebook object:\", all_attr, \"\\n\")\n print(\"Built-in attributes:\", built_in_attr, \"\\n\")\n print(\"Created attributes:\", created_attr, \"\\n\")\n\n\ndef all_object_attr(objectt):\n \"\"\"\n A function for getting all\n attributes of an object\n \"\"\"\n all_attr = dir(objectt)\n built_ins = []\n created_attr = []\n for attr in all_attr:\n if \"__\" in attr:\n built_ins.append(attr)\n else:\n created_attr.append(attr)\n return all_attr, built_ins, created_attr\n\n\nif __name__ == \"__main__\":\n clas_note()\n print(\"_\"*100)\n clas_notebook()\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"95801290","text":"from conan.packager import ConanMultiPackager\nimport platform\n\nif __name__ == \"__main__\":\n builder = ConanMultiPackager(username = \"memsharded\", args = \"--build missing\")\n\n if platform.system() == \"Windows\":\n for compiler_version in [\"11\", \"12\", \"14\"]:\n for compiler_runtime in [\"MT\", \"MD\"]:\n for build_type in [\"Release\", \"Debug\"]:\n for arch in [\"x86\", \"x86_64\"]:\n for shared in [\"True\", \"False\"]:\n if build_type == \"Debug\" and compiler_runtime == \"MT\" and shared == \"True\":\n # NB: \"Debug Assertion Failed!\" in protoc.exe, at C++ Runtime \"debug_heap.cpp\" or \"dbgheap.cpp\"\n # \"arch\": [\"x86\", \"x86_64\"]\n # \"compiler.version\": [\"11\", 12\", \"14\"]\n continue\n\n settings = {}\n settings[\"compiler\"] = \"Visual Studio\"\n settings[\"compiler.version\"] = compiler_version\n settings[\"build_type\"] = build_type\n settings[\"compiler.runtime\"] = \"%s%s\" % (compiler_runtime, \"d\" if build_type == \"Debug\" else \"\")\n settings[\"arch\"] = arch\n\n options = {}\n options[\"Protobuf:shared\"] = shared\n\n builder.add(settings, options)\n else:\n for build_type in [\"Release\", \"Debug\"]:\n for shared in [\"True\", \"False\"]:\n settings = {}\n settings[\"build_type\"] = build_type\n\n options = {}\n options[\"Protobuf:shared\"] = shared\n\n builder.add(settings, options)\n\n builder.run()\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"625265076","text":"import unittest\n\nfrom tilequeue.queue import SqsQueue\nfrom ModestMaps.Core import Coordinate\nfrom mock import MagicMock\nfrom boto.sqs.message import RawMessage\nfrom tilequeue.tile import serialize_coord\n\n\nclass TestQueue(unittest.TestCase):\n def setUp(self):\n self.message = None\n self.mockQueue = MagicMock()\n self.mockQueue.write = self.fake_write\n self.mockQueue.write_batch = self.fake_write_batch\n self.mockRedis = MagicMock()\n self.sqs = SqsQueue(self.mockQueue, self.mockRedis)\n self.values = []\n self.key_name = None\n self.coords = None\n\n def fake_write(self, message):\n self.message = message\n\n def fake_write_batch(self, message_tuples):\n self.message_tuples = message_tuples\n\n def fake_sadd(self, name, *value):\n self.key_name = name\n if isinstance(value, (list, tuple)):\n for val in value:\n self.values.append(val)\n else:\n self.values.append(value)\n\n def test_enqueue_should_check_if_pending_work(self):\n from tilequeue.tile import coord_marshall_int\n coord = Coordinate(row=1, column=1, zoom=1)\n self.sqs.enqueue(coord)\n exp_value = coord_marshall_int(coord)\n self.mockRedis.sismember.assert_called_once_with(self.sqs.inflight_key,\n exp_value)\n\n def test_enqueue_batch_adds_tiles(self):\n coords = [Coordinate(row=1, column=1, zoom=1),\n Coordinate(row=2, column=2, zoom=2)]\n mock = MagicMock()\n mock.side_effect = [False, False]\n self.mockRedis.sismember = mock\n self.sqs.enqueue_batch(coords)\n self.assertEqual(2, len(self.message_tuples))\n self.assertEqual(self.message_tuples[0][1], \"1/1/1\")\n self.assertEqual(self.message_tuples[1][1], \"2/2/2\")\n\n def test_enqueue_batch_does_not_add_redundant_tile_in_flight(self):\n coords = [Coordinate(row=1, column=1, zoom=1),\n Coordinate(row=2, column=2, zoom=2)]\n mock = MagicMock()\n mock.side_effect = [True, False]\n self.mockRedis.sismember = mock\n self.sqs.enqueue_batch(coords)\n self.assertEqual(1, len(self.message_tuples))\n self.assertEqual(self.message_tuples[0][1], \"2/2/2\")\n\n def test_enqueue_should_write_message_to_queue(self):\n self.mockRedis.sismember = MagicMock(return_value=False)\n coord = Coordinate(row=1, column=1, zoom=1)\n self.sqs.enqueue(coord)\n self.assertIsNotNone(self.message)\n self.assertEqual(\"1/1/1\", self.message.get_body())\n\n def test_enqueue_should_not_write_message_to_queue(self):\n self.mockRedis.sismember = MagicMock(return_value=True)\n coord = Coordinate(row=1, column=1, zoom=1)\n self.sqs.enqueue(coord)\n self.assertEqual(None, self.message)\n\n def test_enqueue_adds_tile_as_in_flight(self):\n self.mockRedis.sismember = MagicMock(return_value=False)\n mock = MagicMock()\n self.mockRedis.sadd = mock\n coord = Coordinate(row=1, column=1, zoom=1)\n self.sqs.enqueue(coord)\n from tilequeue.tile import coord_marshall_int\n exp_value = coord_marshall_int(coord)\n self.mockRedis.sadd.assert_called_once_with(self.sqs.inflight_key,\n exp_value)\n\n def test_enqueue_batch_adds_tiles_as_in_flight(self):\n from tilequeue.tile import coord_marshall_int\n coords = [Coordinate(row=1, column=1, zoom=1),\n Coordinate(row=2, column=2, zoom=2)]\n mock = MagicMock()\n mock.side_effect = [False, False]\n self.mockRedis.sismember = mock\n self.mockRedis.sadd = self.fake_sadd\n self.sqs.enqueue_batch(coords)\n self.assertEqual(self.key_name, self.sqs.inflight_key)\n exp_values = map(coord_marshall_int, coords)\n self.assertEqual(exp_values, self.values)\n\n def test_job_done_removes_tile_from_in_flight(self):\n from tilequeue.tile import CoordMessage\n coord = Coordinate(row=1, column=1, zoom=1)\n payload = serialize_coord(coord)\n message = RawMessage()\n message.set_body(payload)\n coord_message = CoordMessage(coord, message)\n self.sqs.job_done(coord_message)\n from tilequeue.tile import coord_marshall_int\n exp_value = coord_marshall_int(coord)\n self.mockRedis.srem.assert_called_once_with(self.sqs.inflight_key,\n exp_value)\n\n def test_clear_removes_in_flight(self):\n self.mockQueue.get_messages = MagicMock(return_value=[])\n self.sqs.clear()\n self.mockRedis.delete.assert_called_once_with(self.sqs.inflight_key)\n","sub_path":"tests/queue/test_sqs.py","file_name":"test_sqs.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"90352446","text":"#Author: Bibek Poudel | 2019\n#U00745313\n\n# Please enter multiline input\n#First line number of Vertices and press Enter\n#In the lines after First line, enter edges in the format 1,2 and press Enter\n#Only Input one edge in one line\n#Press Enter when done with Inputs\n\n#import statements\n#only used sys if single vertex is entered by user\nimport sys\n\n# Taking User Input\nlines = []\nprint(\"Enter input:\")\nwhile True:\n line = input()\n if line:\n lines.append(line)\n else:\n break\nsize = len(lines)\n\n#Number of Vertices stored in n_vertices\nn_vertices = int(lines[0])\nif(n_vertices) is 1:\n print(\"Graph has no edges\")\n sys.exit(0)\n\n#Storing Vertices in a list called vList\n#vList has 1 indexing\nvList =[]\nfor i in range(1,n_vertices+1):\n vList.append(i)\n\n#Storing Edges in a list called eList\n#eList has 1 indexing\neList=[]\nfor j in range(1,len(lines)):\n eList.append(tuple(int(e) for e in lines[j].split(\",\"))) \n\n#Preparing Adjacency list\n#adjList has 0 indexing\nadjList = [[] for vertex in vList]\nfor edge in eList:\n adjList[edge[0]-1].append(edge[1]-1)\n\n#Initializing Time, pre_visit, post_visit, visited\ntime =1\npre_visit =[ 0 for vertex in range(n_vertices)]\npost_visit=[ 0 for vertex in range(n_vertices)]\nvisited=[ False for vertex in range(n_vertices)]\n\n#current_path list is similar to a recursion stack, but here not implemented as a stack\ncurrent_path=[]\n\n#Initializing back_flag to detect back Edges\n#one_count variable is for counting longest path from vertex one\nback_flag= 0\none_count= 0\n\n#Explore function, takes vertex as input\n#called recursively\n\ndef explore(vertex):\n if visited[vertex]==False:\n\n visited[vertex]=True\n\n #Variable scope dosent allow access unless declared global here\n #These variables declared outside of the function\n\n global current_path\n global one_count\n global time\n\n #we have made sure in the function call that current_path does not have duplicates\n current_path.append(vertex)\n\n #adjacency list uses zero indexing so 0 here actually means 1\n #if current_path has 1 then count the distance to end of list\n if 0 in current_path:\n one_count = len(current_path)- current_path.index(0) -1\n\n #assign pre visit time and increment value\n pre_visit[vertex] = time\n time += 1\n\n #Detecting back edge\n #nbr stands for neighbour\n for nbr in adjList[vertex]:\n #back edge exists if neighbour has been visited and lies in the current path\n if visited[nbr] is True and nbr in current_path:\n\n #matching variable scope for back_flag\n #flag set to 1 if a cycle detected\n global back_flag\n back_flag=1\n\n #If the adjacency list of a vertex is empty we do not check for neighbors\n if adjList[vertex]:\n for nbr in adjList[vertex]:\n #The recursive call to neighbor\n explore(nbr)\n\n #If the index of current neighbour in adjacency list is equal to\n #lengh of adjacency list then it must be the last neighbor for that vertex\n #So this means end of current path for this vertex\n\n if adjList[vertex].index(nbr) is (len(adjList[vertex])-1):\n current_path=[]\n\n #Assign post visit time and increment\n post_visit[vertex] = time\n time += 1\n\n\n#The program does not have a main function this is the call to explore function\n#This program is basically a python script at this point\nfor vertex in range(n_vertices):\n\n #This condition is checked so that current_path list has no duplicate values\n if adjList[vertex]:\n explore(vertex)\n\n#Detecting the status of back_flag and performing corresponding action\nif back_flag==1:\n #if back_flag then it has cycle not DAG\n print(\"NO, Not a DAG\")\nelse:\n print(\"YES, it is a DAG\")\n\n #Displaying the result in desired format is a little tricky\n #The Linearized order is obtained in the reverse sorted order of post visit number\n lin = sorted((e,i) for i,e in enumerate(post_visit))\n lin.reverse()\n\n in_str = \"Linearized: \"\n for i in range(len(lin)):\n\n #converting back the zero indexing to 1 indexing to display result\n in_str += str(lin[i][1]+1) + \" \"\n print(in_str)\n\n #Also displaying the longest path from vertex 1\n print(\"Length of longest path from vertex 1 is: \" + str(one_count))\n\n#This print statement is just to make things look nice\nprint(\" \")\n\n#This script was written as part of homework submission for Algorithms/ Problem Solving class\n#By Bibek Poudel (bpoudel@memphis.edu)\n","sub_path":"Bibek_Poudel.py","file_name":"Bibek_Poudel.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"559532727","text":"# coding=utf-8\n\"\"\"Dibujo de rectangulos que representan cielo y pasto\"\"\"\n\nimport glfw\nfrom OpenGL.GL import *\nimport OpenGL.GL.shaders\nimport numpy as np\nimport grafica.easy_shaders as es\nimport grafica.basic_shapes as bs\nfrom grafica.gpu_shape import GPUShape, SIZE_IN_BYTES\n\n__author__ = \"Daniel Calderon\"\n__license__ = \"MIT\"\n\n\n# We will use 32 bits data, so floats and integers have 4 bytes\n# 1 byte = 8 bits\nSIZE_IN_BYTES = 4\n\n\n# A class to store the application control\nclass Controller:\n fillPolygon = True\n\n\n# we will use the global controller as communication with the callback function\ncontroller = Controller()\n\n\ndef on_key(window, key, scancode, action, mods):\n\n if action != glfw.PRESS:\n return\n \n global controller\n\n if key == glfw.KEY_SPACE:\n controller.fillPolygon = not controller.fillPolygon\n\n elif key == glfw.KEY_ESCAPE:\n glfw.set_window_should_close(window, True)\n\n else:\n print('Unknown key')\n\ndef createQuad():\n\n # Defining locations and colors for each vertex of the shape\n #####################################\n \n vertexData = np.array([\n # positions colors\n -0.5, -0.5, 0.0, 1.0, 0.0, 0.0,\n 0.5, -0.5, 0.0, 0.0, 1.0, 0.0,\n 0.5, 0.5, 0.0, 0.0, 0.0, 1.0,\n -0.5, 0.5, 0.0, 1.0, 1.0, 1.0\n # It is important to use 32 bits data\n ], dtype = np.float32)\n\n # Defining connections among vertices\n # We have a triangle every 3 indices specified\n indices = np.array(\n [0, 1, 2,\n 2, 3, 0], dtype= np.uint32)\n\n size = len(indices)\n return bs.Shape(vertexData, indices)\n\n# Funcion para crear el cielo con \n# y0 la altura superior\n# yf la altura inferior\ndef createSky(y0, yf):\n # Defining locations and colors for each vertex of the shape\n #####################################\n color1 = [0.0, 0.6, 0.8]\n color0 = [0.7, 1.0, 1.0]\n\n vertexData = np.array([\n # positions colors\n -1.0, yf, 0.0, color1[0], color1[1], color1[2],\n 1.0, yf, 0.0, color1[0], color1[1], color1[2],\n 1.0, y0, 0.0, color0[0], color0[1], color0[2],\n -1.0, y0, 0.0, color0[0], color0[1], color0[2]\n # It is important to use 32 bits data\n ], dtype = np.float32)\n\n # Defining connections among vertices\n # We have a triangle every 3 indices specified\n indices = np.array(\n [0, 1, 2,\n 2, 3, 0], dtype= np.uint32)\n\n return bs.Shape(vertexData, indices)\n \n# Funcion para crear un rectangulo con \n# y0 la altura superior\n# yf la altura inferior\n# color0 un arreglo con el color rgb superior\n# color1 un arreglo con el color rgb inferior\ndef createRect(y0, yf, color0, color1):\n # Defining locations and colors for each vertex of the shape\n #####################################\n\n vertexData = np.array([\n # positions colors\n -1.0, yf, 0.0, color1[0], color1[1], color1[2],\n 1.0, yf, 0.0, color1[0], color1[1], color1[2],\n 1.0, y0, 0.0, color0[0], color0[1], color0[2],\n -1.0, y0, 0.0, color0[0], color0[1], color0[2]\n # It is important to use 32 bits data\n ], dtype = np.float32)\n\n # Defining connections among vertices\n # We have a triangle every 3 indices specified\n indices = np.array(\n [0, 1, 2,\n 2, 3, 0], dtype= np.uint32)\n\n size = len(indices)\n return bs.Shape(vertexData, indices)\n\n\nif __name__ == \"__main__\":\n\n # Initialize glfw\n if not glfw.init():\n glfw.set_window_should_close(window, True)\n\n width = 600\n height = 600\n\n window = glfw.create_window(width, height, \"Drawing a sky and grass\", None, None)\n\n if not window:\n glfw.terminate()\n glfw.set_window_should_close(window, True)\n\n glfw.make_context_current(window)\n\n # Connecting the callback function 'on_key' to handle keyboard events\n glfw.set_key_callback(window, on_key)\n \n # Creating our shader program and telling OpenGL to use it\n pipeline = es.SimpleShaderProgram()\n\n # Creating shapes on GPU memory\n\n # 1- Creamos la Figura del cielo en la GPU\n skyShape = createSky(1.0, -0.5) # Creamos los vertices e indices (guardandolos en un objeto shape)\n gpuSky = GPUShape().initBuffers() # Se le pide memoria a la GPU para guardar la figura\n pipeline.setupVAO(gpuSky) # Se le dice al pipeline como leer esta parte de la memoria \n gpuSky.fillBuffers(skyShape.vertices, skyShape.indices, GL_STATIC_DRAW) # Llenamos esta memoria de la GPU con los vertices e indices\n\n # 2- Creamos la Figura del pasto en la GPU\n grassShape = createRect(-0.5, -1.0, [0.0, 1.0, 0.0], [0.0, 0.6, 0.0]) # Creamos los vertices e indices (guardandolos en un objeto shape)\n gpuGrass = GPUShape().initBuffers() # Se le pide memoria a la GPU para guardar la figura\n pipeline.setupVAO(gpuGrass) # Se le dice al pipeline como leer esta parte de la memoria \n gpuGrass.fillBuffers(grassShape.vertices, grassShape.indices, GL_STATIC_DRAW) # Llenamos esta memoria de la GPU con los vertices e indices\n \n # Setting up the clear screen color\n glClearColor(0.15, 0.15, 0.15, 1.0)\n\n while not glfw.window_should_close(window):\n # Using GLFW to check for input events\n glfw.poll_events()\n\n # Filling or not the shapes depending on the controller state\n if (controller.fillPolygon):\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n else:\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n\n # Clearing the screen in both, color and depth\n glClear(GL_COLOR_BUFFER_BIT)\n\n # Se le dice a OpenGL que use el shaderProgram simple\n glUseProgram(pipeline.shaderProgram)\n pipeline.drawCall(gpuSky) # Se dibuja el cielo\n pipeline.drawCall(gpuGrass) # Se dibuja el pasto\n\n # Once the render is done, buffers are swapped, showing only the complete scene.\n glfw.swap_buffers(window)\n\n # freeing GPU memory\n gpuSky.clear()\n\n glfw.terminate()\n","sub_path":"p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":5989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"390573181","text":"import jwt\nfrom flask import request\nfrom flask_restful import Resource\n\nfrom app import db, Product, CartItem\nfrom app.endpoints.utils import check_user_session\n\n\nclass AddToCart(Resource):\n def post(self):\n try:\n user = check_user_session(request)\n\n product_id = request.args.get('product_id')\n quantity = request.args.get('quantity')\n\n product = Product.query.get(product_id)\n\n if product is None:\n raise Exception(\"Product is missing\")\n\n for item in user.cart_items:\n if product == item.product:\n return {'status': 'ok',\n 'message': 'This product is already in you shopping cart'}, 200\n\n cart_item = CartItem(quantity=quantity)\n cart_item.user = user\n cart_item.product = product\n\n user.cart_items.append(cart_item)\n\n db.session.commit()\n\n return {'status': 'ok',\n 'message': 'This product was added to you shopping cart'}, 201\n except jwt.ExpiredSignatureError as e:\n return {'status': 'fail',\n 'message': str(e)}, 401\n except Exception as e:\n return {'status': 'fail',\n 'message': str(e)}, 400\n","sub_path":"app/endpoints/product/add_to_cart.py","file_name":"add_to_cart.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"517421903","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport datetime\nimport netCDF4\nfrom matplotlib.colors import Normalize #for color bar \n\n\n#prepare data\nnx, ny = 720, 360\nwest, east, south, north = 0, 360, -90, 90\n#west, east, south, north = -180, 180, -90, 90\nlons = np.linspace( west+180./nx, east-180./nx, nx)\nlats = np.linspace( south+90./ny, north-90./ny, ny)\n#lons[lons > 180] -= 360 ## The west latitude must be negative\n\n# Show the map\nfig = plt.figure()\nax = plt.axes(projection=ccrs.PlateCarree(central_longitude=360.0))\nax.add_feature(cfeature.LAND)\nax.coastlines()\nax.gridlines()\n\n#read Crop_yield\nfilename = 'YLD_Ref.run01_O3_IRR_1st_2020.nc'\nncdata = netCDF4.Dataset(filename, 'r', format='NETCDF4')\nC = ncdata.variables['Crop_yield'][:]\n#C = np.ma.masked_where(C<-100, C)\n#C = np.log(C+1)\nvma = np.max(C)\nvmi = np.min(C)\nprint(vma,vmi)\n#C = np.ma.masked_where(C<0, C)\ncm = plt.cm.get_cmap('Greens')\n\n#show the data\n#m.imshow(C, cmap=cm, origin='lower', norm=Normalize(vmin=vmi, vmax=17500))\nim = ax.imshow(C, transform=ccrs.PlateCarree(), cmap=cm, origin='lower', norm=Normalize(vmin=vmi, vmax=17500))\n\n#ax.set_xlabel('longitude')\n#ax.set_ylabel('latitude')\n\nplt.colorbar(im, aspect=50,pad=0.08,orientation='horizontal')\n#m.colorbar(im, label='Crop_yield(kg/ha)', orientation='horizontal')\n\nax.set_title('Crop_yield[kg/ha]')\n","sub_path":"netCDF_cartopy.py","file_name":"netCDF_cartopy.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"459260125","text":"def waga(num):\n w = 0\n\n while num > 1:\n f = 2\n while f <= num:\n if num % f == 0:\n while num % f == 0:\n num //= f\n w += 1\n f += 1\n\n return w\n\n\ndef next(p):\n p[len(p) - 1] += 1\n for i in range(len(p) - 2, -1, -1):\n if p[i + 1] >= 3:\n p[i + 1] = 0\n p[i] += 1\n\n\ndef works(tab, podzial):\n s = [0, 0, 0]\n for i in range(len(tab)):\n s[podzial[i]] += tab[i]\n\n return s[0] == s[1] == s[2]\n\n\ndef zad2(tab):\n tab = [waga(n) for n in tab]\n podzial = [0 for _ in tab]\n\n while podzial[0] < 3:\n if works(tab, podzial):\n return True\n next(podzial)\n\n return False\n\n\nfrom random import shuffle\n\ndata = [64, 6, 30, 1, 1, 2, 2, 2]\nshuffle(data)\n\nprint(zad2(data))\n","sub_path":"zad02.py","file_name":"zad02.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"98800274","text":"import numpy as np\nimport pandas as pd\nimport random\nimport lightgbm as lgb\nfrom pyspark.sql.session import SparkSession\nfrom py_spark.common_func import write_row\nimport pprint\n\nN_FOLDS = 2\nMAX_EVALS = 2\nfile_path = \"C:/0-work document/py-test/00-data/HomeCredit/final_file.csv\"\nout_file = 'C:/0-work document/py-test/00-data/HomeCredit/random_search_trials.csv'\n\n# read data\nspark = SparkSession.builder.master('local').appName('test').getOrCreate()\napp_train = spark.read.csv(file_path, header=True, inferSchema=True).toPandas()\nspark.stop()\n# write head\nwrite_row(out_file, ['score', 'hyper_parameters', 'iteration'])\n# set dataset for lgb\ntrain_set = lgb.Dataset(data=app_train.iloc[:, 1:], label=app_train.iloc[:, 0])\n\nparam_grid = {\n 'boosting_type': ['gbdt', 'goss'],\n 'num_leaves': list(range(20, 150)),\n 'learning_rate': list(np.logspace(np.log10(0.005), np.log10(0.5), base=10, num=1000)),\n 'subsample_for_bin': list(range(20000, 300000, 20000)),\n 'min_child_samples': list(range(20, 500, 5)),\n 'reg_alpha': list(np.linspace(0, 1)),\n 'reg_lambda': list(np.linspace(0, 1)),\n 'colsample_bytree': list(np.linspace(0.6, 1, 10)),\n 'subsample': list(np.linspace(0.5, 1, 100)),\n 'is_unbalance': [True, False]\n}\n\n\ndef objective(hyper_parameters, iteration):\n if 'n_estimators' in hyper_parameters.keys():\n del hyper_parameters['n_estimators']\n # Perform n_folds cross validation\n cv_results = lgb.cv(hyper_parameters, train_set, num_boost_round=10000, nfold=N_FOLDS, early_stopping_rounds=100, metrics='auc', seed=42)\n # results to retun\n score = cv_results['auc-mean'][-1]\n estimators = len(cv_results['auc-mean'])\n hyper_parameters['n_estimators'] = estimators\n return [score, hyper_parameters, iteration]\n\n\ndef random_search(param_grid, max_evals=MAX_EVALS):\n results = pd.DataFrame(columns=['score', 'params', 'iteration'], index=list(range(max_evals)))\n for i in range(max_evals):\n # Choose random hyper_parameters\n hyper_parameters = {k: random.sample(v, 1)[0] for k, v in param_grid.items()}\n hyper_parameters['subsample'] = 1.0 if hyper_parameters['boosting_type'] == 'goss' else hyper_parameters['subsample']\n # Evaluate randomly selected hyper_parameters\n eval_results = objective(hyper_parameters, i)\n results.loc[i, :] = eval_results\n write_row(out_file, eval_results)\n # Sort with best score on top\n results.sort_values('score', ascending=False, inplace=True)\n results.reset_index(inplace=True)\n return results\n\n\nrandom_results = random_search(param_grid)\nprint('The best validation score was {:.5f}'.format(random_results.loc[0, 'score']))\nprint('\\nThe best hyper_parameters were:')\npprint.pprint(random_results.loc[0, 'params'])\n","sub_path":"py_spark/random_search_pandas.py","file_name":"random_search_pandas.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"358181919","text":"__author__ = 'Guorong Xu'\n\nimport os\nfrom util import YamlFileMaker\nfrom util import QstatParser\nfrom cfnCluster import ConnectionManager\nimport sys\n\nworkspace = \"/shared/workspace/Pipelines/\"\n#log_dir = \"/shared/workspace/data_archive/DNASeq/{}/logs\"\nlog_dir = \"/shared/workspace/logs/DNASeq/{}\"\n\n## executing WGS pipeline with the specific yaml file\ndef execute(ssh_client, project_name, analysis_steps, s3_input_files_address,\n sample_list, group_name, s3_output_files_address, email):\n yaml_file = project_name + \".yaml\"\n\n global log_dir\n log_dir = log_dir.format(project_name)\n\n print(\"making the yaml file ...\")\n YamlFileMaker.make_yaml_file(yaml_file, project_name, analysis_steps, s3_input_files_address,\n sample_list, group_name, s3_output_files_address, \"hg19\", \"NA\")\n\n print(\"copying yaml files to remote master node...\")\n ConnectionManager.copy_file(ssh_client, yaml_file, workspace + \"yaml_examples\")\n os.remove(yaml_file)\n\n #if not email == \"\":\n\n print(\"executing pipeline...\")\n ConnectionManager.execute_command(ssh_client, \"qsub -o /dev/null -e /dev/null \" + workspace + \"scripts/run.sh \"\n + workspace + \"yaml_examples/\" + yaml_file + \" \" + log_dir + \" \" + \"WGSPipeline.py\")\n\n\n## checking your jobs status\ndef check_status(ssh_client, job_name):\n print(\"checking processing status\")\n qstat = ConnectionManager.execute_command(ssh_client, \"qstat\")\n\n job_ids = QstatParser.get_job_ids(qstat)\n job_details = [ConnectionManager.execute_command(ssh_client, \n \"qstat -j %s\" % x[0]) for x in job_ids]\n\n job_info = [job_ids[x] + [job_details[x]] for x in range(len(job_ids))]\n\n global log_dir\n logs = ConnectionManager.list_dir(ssh_client, log_dir)\n\n QstatParser.parse_qstat(job_info, job_name, logs)\n\n## checking your jobs status\ndef check_jobs_status(ssh_client):\n print(\"checking jobs status\")\n ConnectionManager.execute_command(ssh_client, \"qstat\")\n\n## checking your host status\ndef check_host_status(ssh_client):\n print(\"checking qhost status\")\n ConnectionManager.execute_command(ssh_client, \"qhost\")\n","sub_path":"src/cirrus_ngs/deprecated/dnaSeq/WGSPipelineManager.py","file_name":"WGSPipelineManager.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"443787371","text":"import magic\nimport requests\nimport os\n\n\nclass MissingAuthError(Exception):\n pass\n\n\nclass HttpError(Exception):\n def __init__(self, message, status_code):\n super(HttpError, self).__init__(message)\n\n self.status_code = status_code\n\n\nclass Api():\n def __init__(self, key=None, id_token=None, access_token=None):\n self.key = key\n self.id_token = id_token\n self.access_token = access_token\n self.apibase = 'https://api.kuvien.io'\n\n def get_auth_header(self):\n return {\n 'Authorization': \"Bearer {}\".format(self.id_token),\n 'AccessToken': self.access_token\n }\n\n def upload(self, f):\n if not self.key:\n raise MissingAuthError\n\n mime = magic.Magic(mime=True)\n if isinstance(f, str):\n is_fo = False\n else:\n is_fo = True\n\n filename = os.path.basename(f.name) if is_fo else f\n fo = f if is_fo else open(f, 'rb')\n mimetype = mime.from_buffer(fo.read(1024))\n\n form_data = {\n 'file': (filename, fo, mimetype)\n }\n\n headers = {\n 'x-app-key': self.key\n }\n\n resp = requests.post(\n '{}/image/upload'.format(self.apibase),\n files=form_data,\n headers=headers\n )\n\n if not resp.status_code == 200:\n raise HttpError(resp.json()['status'], resp.status_code)\n\n return resp.json()['file']['url']\n\n def domains(self):\n resp = requests.get('{}/domains'.format(self.apibase))\n print(resp.status_code)\n if not resp.status_code == 200:\n raise HttpError(resp.json()['status'], resp.status_code)\n\n return resp.json()['domains']\n\n def list_subdomains(self):\n if not self.id_token or not self.access_token:\n raise MissingAuthError\n\n resp = requests.get(\n '{}/user/domains'.format(self.apibase),\n headers=self.get_auth_header()\n )\n\n if not resp.status_code == 200:\n raise HttpError(resp.json()['status'], resp.status_code)\n\n return resp.json()['domains']\n\n def add_subdomain(self, subdomain, domain):\n if not self.id_token or not self.access_token:\n raise MissingAuthError\n\n payload = {\n 'domain': domain,\n 'subdomain': subdomain\n }\n\n resp = requests.post(\n '{}/domain/add'.format(self.apibase),\n json=payload,\n headers=self.get_auth_header()\n )\n\n if not resp.status_code == 200:\n raise HttpError(resp.json()['status'], resp.status_code)\n\n return resp.json()\n\n def remove_subdomain(self, domainkey):\n if not self.id_token or not self.access_token:\n raise MissingAuthError\n\n payload = {\n 'key': domainkey\n }\n\n resp = requests.post(\n '{}/user/domain/delete'.format(self.apibase),\n json=payload,\n headers=self.get_auth_header()\n )\n\n if not resp.status_code == 200:\n raise HttpError(resp.json()['status'], resp.status_code)\n\n return resp.json()\n\n def regenerate_domainkey(self, domainkey):\n if not self.id_token or not self.access_token:\n raise MissingAuthError\n\n payload = {\n 'key': domainkey\n }\n\n resp = requests.post(\n '{}/user/domain/regenerate'.format(self.apibase),\n json=payload,\n headers=self.get_auth_header()\n )\n\n if not resp.status_code == 200:\n raise HttpError(resp.json()['status'], resp.status_code)\n\n return resp.json()\n\n def list_images(self, page=0):\n if not self.id_token or not self.access_token:\n raise MissingAuthError\n\n if page == 0:\n endpoint = '/user/images'\n else:\n endpoint = '/user/images/{}'.format(page)\n\n resp = requests.get(\n '{}{}'.format(self.apibase, endpoint),\n headers=self.get_auth_header()\n )\n\n if not resp.status_code == 200:\n raise HttpError(resp.json()['status'], resp.status_code)\n\n return resp.json()['images']\n\n def delete_image(self, id):\n if not self.id_token or not self.access_token:\n raise MissingAuthError\n\n payload = {\n 'id': id\n }\n\n resp = request.get(\n '{}/user/image/delete'.format(self.apibase),\n json=payload,\n headers=self.get_auth_header()\n )\n\n if not resp.status_code == 200:\n raise HttpError(resp.json()['status'], resp.status_code)\n\n return resp.json()\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"pykuvien/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"331399723","text":"base_uri = 'https://discordapp.com/api'\ntoken = 'MzU1MTAxMzU1NDgyMDIxODg5.DaapFg.d0Ygl-vmyc4Z2z0ChdYSz-b1jsg'\nauth = 'Bot ' + token\n\nfrom tornado import escape\nfrom tornado import gen\nfrom tornado import httpclient\nfrom tornado import httputil\nfrom tornado import ioloop\nfrom tornado import websocket\n\nimport functools\nimport json\nimport time\nimport requests\nimport asyncio\n\n\nclass WebSocketClient():\n def connect(self):\n res = requests.get(base_uri + '/gateway/bot', headers={'Authorization': auth})\n gateway = res.json()['url']\n\n ws_conn = websocket.websocket_connect(gateway)\n ws_conn.add_done_callback(self._connect_callback)\n\n def send(self, data):\n if not self._ws_connection:\n raise RuntimeError('Web socket connection is closed.')\n\n self._ws_connection.write_message(escape.utf8(json.dumps(data)))\n\n def close(self):\n if not self._ws_connection:\n raise RuntimeError('Web socket connection is already closed.')\n\n self._ws_connection.close()\n\n def _connect_callback(self, future):\n if future.exception() is None:\n self._ws_connection = future.result()\n self._on_connection_success()\n self._read_messages()\n else:\n self._on_connection_error(future.exception())\n\n @gen.coroutine\n def _read_messages(self):\n while True:\n msg = yield self._ws_connection.read_message()\n if msg is None:\n self._on_connection_close()\n break\n\n self._on_message(msg)\n\n def _on_message(self, msg):\n \"\"\"This is called when new message is available from the server.\n :param str msg: server message.\n \"\"\"\n\n pass\n\n def _on_connection_success(self):\n \"\"\"This is called on successful connection ot the server.\n \"\"\"\n\n pass\n\n def _on_connection_close(self):\n \"\"\"This is called when server closed the connection.\n \"\"\"\n pass\n\n def _on_connection_error(self, exception):\n \"\"\"This is called in case if connection to the server could\n not established.\n \"\"\"\n\n pass\n\n\nclass DiscordWS(WebSocketClient):\n heartbeat = None\n sequence_number = 0\n connection_status = 0\n\n def _on_message(self, msg):\n data = json.loads(msg)\n print(data)\n\n if data['op'] == 10:\n self.connection_status = 1\n self.heartbeat = data['d']['heartbeat_interval']\n self.send({\n 'op': 2,\n 'd': {\n \"token\": token,\n \"properties\": {\n \"$os\": \"linux\",\n \"$browser\": \"discord-interface\",\n \"$device\": \"discord-interface\"\n },\n \"compress\": False,\n \"large_threshold\": 150,\n \"presence\": {\n \"game\": {\n \"name\": \"Cards Against Humanity\",\n \"type\": 0\n },\n \"status\": \"online\",\n \"since\": 91879201,\n \"afk\": False\n }\n },\n })\n\n\n def _on_connection_success(self):\n print('Connected')\n\n def _on_connection_close(self):\n print('Connection closed')\n self.connection_status = 2\n\n def _on_connection_error(self, exception):\n print('Connection error: %s', exception)\n\n\n async def send_heartbeat(self):\n while self.connection_status == 0:\n await asyncio.sleep(2.5)\n\n while self.connection_status != 2:\n if self.connection_status == 1:\n print('Heartbeating')\n self.send({\n 'op': 1,\n 'd': None,\n })\n\n await asyncio.sleep(self.heartbeat / 1000)\n\n\ndef main():\n client = DiscordWS()\n client.connect()\n\n try:\n loop = asyncio.get_event_loop()\n loop.create_task(client.send_heartbeat())\n loop.run_forever()\n except KeyboardInterrupt:\n client.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"11767288","text":"# Find the sum of all the positive integers which cannot be written as the sum\n# of two abundant numbers.\n# We know that all integers greater than 28123 can be written as the sum of two abundant numbers.\n\nfrom itertools import chain\n\n# Function to check if a number is abundant\ndef isAbundant(x):\n # As we only need proper factors(i.e. not the numbers itself), we check for factors\n # between 1 and the number and later add '1' to the sum\n factors = set(chain.from_iterable([i, x//i] for i in range(2, int(x**0.5)+1) if not x%i))\n if sum(factors) + 1 > x:\n return True\n return False\n\n# Finding all abundants under 28124\nabundants = set([i for i in range(1, 28124) if isAbundant(i)])\n\n# Function to check if a number can be return as the sum of two abundant numbers\ndef isAbundable(x):\n if x < 24:\n return False\n for i in range(12, x):\n if i in abundants and x-i in abundants:\n return True\n return False\n\n# Storing the sum of non-abundant numbers\nanswer = 0\n\n# Searching for non-abundant numbers\nfor i in range(28124):\n if not isAbundable(i):\n answer += i\n\n# Output\nprint( \"The sum of all positive integers that are abundant is %d.\" % (answer) )","sub_path":"Python/Problem_023.py","file_name":"Problem_023.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"423494434","text":"#!/usr/bin/env python\n#----------------\n#Name: Dice_Roll_Gen\n#Author: Michael Mykolyshyn\n#Date of creation: April 11, 2016\n\"\"\"Purpose is to allow input of a number of an size of dice to then print a list of results\"\"\"\n#----------------\nfrom random import randrange\nmaster_counter = 1 #used for the loop of the program\n\nwhile master_counter == 1: #start a loop\n #Initalise Variables and moduals\n \"\"\"Use randrage to create random numbers (to simulate dice rolls). dice_roll_dict is a dictionary used to hold dice size and ammounts. dice_values is used to old lists of dice roll results.\"\"\"\n dice_roll_dict = {}\n dice_values = []\n\n #----------------\n #Define functions\n def dice_gen(dictionary):\n \"\"\"Takes and imput for side and number and then makes 2 lists (OR A DICTIONARY) that lists/links dice sides to dice number. It will return the lists/Dictonary to Main.\"\"\"\n \"\"\"counter1 is used to keep looping through dice attribute intake. number_of_dice is used to both start and iput loop and determine dice size. side is used to mark dice size. number is used to makr dice ammount. side_num_dict is used to hold side and number in a dictionary input structure.\"\"\"\n counter1 = 1\n number_of_dice = 1\n print(\"Please input the ammount and size of dice to be rolled. To end the input please select 0.\")\n while counter1 == 1:\n print(\"----\\nDice set #{}.\".format(number_of_dice))\n side = input(\"Sides of the Dice:\")\n number = input(\"Number of the Dice:\")\n if int(side) > 0 and int(number) > 0:\n side_num_dict = {side: number}\n dictionary.update(side_num_dict)\n number_of_dice += 1\n else:\n counter1 = 0\n return dictionary\n\n def num_gen(dictionary, list):\n \"\"\"takes input (lists or dictionary) and will create an operation that will generate numbers in that range (sides) a nuber of times (number) into a list to be displayed. This lists elemens will be inviduals list of each dice types outputs.\"\"\"\n \"\"\"element_list is a list used to hold values of dice resuts. count is used to loop through appending random dice rolls.\"\"\"\n for side, number in dictionary.items():\n element_list = []\n element_list.append(\"For D{}:\".format(side))\n count = int(number)\n while count > 0:\n element_list.append(randrange(1, int(side)+1))\n count -= 1\n list.append(element_list)\n return list\n\n def display_results(dictionary, list):\n \"\"\"takes a dictionary and list input and prints them in an approriate format for this script.\"\"\"\n print(\"----\\nYou selected the folliwng dice:\")\n for side, number in dictionary.items():\n print(number + \"D\" + side)\n print(\"----\\nThey outputted the following values:\")\n for output in list:\n print(output[0], output[1:])\n\n #----------------\n #Run script\n print(\"---Dice Roll Genderator---\\nInput values for dice saides and numbers and have a list of rolls displayed for you!\")\n dice_gen(dice_roll_dict)\n print(dice_roll_dict)\n num_gen(dice_roll_dict, dice_values)\n\n #----------------\n #Print results\n display_results(dice_roll_dict, dice_values)\n question = input(\"Another set of rolls? (n)\")\n if question == 'n':\n master_counter = 0\n#----------------\nprint('#----')\n#----------------\n#----------------\n#----------------\n#----------------\n#----------------\n#----------------\n#----------------\n#----------------\n#----------------\n#----------------\n#----------------\n","sub_path":"Dice_Roll_Gen.py","file_name":"Dice_Roll_Gen.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"539418006","text":"\"\"\"\nImplements a battleship strategy as follows:\nStart with the size 5 ship, and see every possible orientation\n Guess based on a probability distrib. that is the superposition of these possibilities\nOnce there is a hit, go into hunting mode:\n 1. Still generate superpositions, but prioritize positions that incorporate many hits\n 2. Only guess squares that are next to a hit square- that way you guess the edge last\n 3. Once it's sunk, go backward N squares to invalidate those squares\n 4. Remove that ship from the list and search for the next largest ship\nKeep track of shot count and hit count\nEnds when hit count = 2+3+3+4+5\n\nCitations:\n1. http://www.datagenetics.com/blog/december32011/\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nshowPlot = True\nxDim = 10\nyDim = 10\nD = xDim * yDim # input dimensionality: 10x10 grid\n\ndef generateProbabilities(length): \n prob = np.zeros(D, dtype = np.float64)\n\n #keep track of max number of hits\n #if the new number is higher, zero the array and then increment as usual\n #once you're finished, choose a random spot, and if nhits > 0, keep choosing until nextToHit = 1\n maxHits = 0\n \n #horz first\n for i in range(0, xDim - length + 1): #go from 0 to xDim-length, inclusive\n for j in range(0, yDim):\n action = j * xDim + i\n hits = isValidHorz(action, length)\n\n if(hits != -1):\n if(hits == maxHits):\n for i2 in range(0,length):\n prob[action] = prob[action] + 1\n action = right(action)\n elif(hits > maxHits):\n maxHits = hits\n prob = np.zeros(D, dtype = np.float64)\n for i2 in range(0,length):\n prob[action] = prob[action] + 1\n action = right(action)\n\n for i in range(0, xDim):\n for j in range(0, yDim - length + 1):\n action = j * xDim + i\n hits = isValidVert(action, length)\n \n if(hits != -1):\n if(hits == maxHits):\n for i2 in range(0,length):\n prob[action] = prob[action] + 1\n action = down(action)\n elif(hits > maxHits):\n maxHits = hits\n prob = np.zeros(D, dtype = np.float64)\n for i2 in range(0,length):\n prob[action] = prob[action] + 1\n action = down(action)\n \n return maxHits, prob/sum(prob)\n\n#if miss, return -1, if hit return # of hits, if neither return 0\ndef isValidHorz(action, length):\n isValid = 0\n for i in range(0,length):\n if(action == -1 or obs[action] == -1):\n return -1\n elif(obs[action] == 1):\n isValid = isValid + 1\n action = right(action)\n return isValid\n\ndef isValidVert(action, length):\n isValid = 0\n for i in range(0,length):\n if(action == -1 or obs[action] == -1):\n return -1\n elif(obs[action] == 1):\n isValid = isValid + 1\n action = down(action)\n return isValid\n\ndef isValidHorz2(action, length):\n isValid = 0\n for i in range(0,length):\n if(action == -1 or obs[action] == -1):\n return -1\n elif(obs[action] == 1):\n isValid = isValid + 1\n action = left(action)\n return isValid\n\ndef isValidVert2(action, length):\n isValid = 0\n for i in range(0,length):\n if(action == -1 or obs[action] == -1):\n return -1\n elif(obs[action] == 1):\n isValid = isValid + 1\n action = up(action)\n return isValid\n\n#action is the edge of the ship; length is the size\n#there should only be one that fits, so just check all four directions\n#this function requires that action is the edge of a sunk ship\ndef sunk(action, length):\n ships[length] = ships[length] - 1 #update the list of ships\n \n if(isValidHorz(action,length) == length): #all hits\n for i in range(0,length):\n obs[action] = -1 #invalidate the square\n action = right(action) #go to next square\n elif(isValidVert(action,length) == length):\n for i in range(0,length):\n obs[action] = -1 #invalidate the square\n action = down(action) #go to next square\n elif(isValidHorz2(action,length) == length):\n for i in range(0,length):\n obs[action] = -1 #invalidate the square\n action = left(action) #go to next square\n elif(isValidVert2(action,length) == length):\n for i in range(0,length):\n obs[action] = -1 #invalidate the square\n action = up(action) #go to next square\n \n #print(obs)\n\ndef nextToHit(action):\n nextToHit = False\n if((left(action) != -1 and obs[left(action)] == 1) or\n (right(action) != -1 and obs[right(action)] == 1) or\n (up(action) != -1 and obs[up(action)] == 1) or\n (down(action) != -1 and obs[down(action)] == 1)):\n nextToHit = True\n return nextToHit\n\ndef left(action):\n left = action - 1\n if(action % xDim == 0):\n left = -1\n return left\n\ndef right(action):\n right = action + 1\n if(action % xDim == xDim - 1):\n right = -1\n return right\n\ndef up(action):\n up = action - yDim\n if(action < xDim):\n up = -1\n return up\n\ndef down(action):\n down = action + yDim\n if(action >= yDim * xDim - xDim):\n down = -1\n return down\n\ndef chooseAction(p, isHit): #don't choose the same action twice\n #print('cA')\n a = np.random.choice(a=len(p), p=p)\n if(isHit):\n while(nextToHit(a) == False or obs[a] != 0):\n a = np.random.choice(a=len(p), p=p)\n else:\n while(obs[a] != 0):\n a = np.random.choice(a=len(p), p=p)\n return a\n\ndef largestShip(ships):\n for i in range(0, len(ships)-1):\n length = len(ships)-1 - i\n if(ships[length] > 0):\n return length\n #print(\"Error: no ships left\")\n return -1\n\nobs = np.zeros(D, dtype = np.int) #-1 = miss, 0 = unknown, 1 = hit\n\ndone = False\n\nturncount = 0\nhitcount = 0\n\nships = [0, 0, 1, 2, 1, 1];\nprint(\"Looking for size\", largestShip(ships), \"ship\")\n\nwhile(hitcount != 2+3+3+4+5):\n turncount = turncount + 1\n \n maxHits, p = generateProbabilities(largestShip(ships))\n \n if(showPlot):\n plt.imshow(p.reshape(xDim,yDim), cmap='hot', interpolation='nearest')\n plt.show()\n \n action = chooseAction(p, maxHits > 0)\n print(\"Turncount:\", turncount, \", x =\", action % 10 + 1, \", y =\", int(action / 10) + 1)\n \n result = input(\"Result? ('H', 'M', or 'S #')\") #H, S N, M\n while(result != 'M' and result != 'H' and result[0] != 'S'):\n result = input(\"Result? ('H', 'M', or 'S #')\") #H, S N, M\n \n if(result == \"M\"):\n obs[action] = -1\n elif(result == \"H\"):\n obs[action] = 1\n hitcount = hitcount + 1\n elif(result[0] == \"S\"):\n obs[action] = 1\n hitcount = hitcount + 1\n length = int(result[2])\n sunk(action, length)\n \n if(largestShip(ships) != -1):\n print(\"Looking for size\", largestShip(ships), \"ship\")\n\nprint(\"Done!\")\n","sub_path":"battleship-hardcode.py","file_name":"battleship-hardcode.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"291678120","text":"from pychess import *\n\nboard = Board()\np1, p2 = Player(True), Player(False)\ngame = Game(p1, p2, board)\ngame.run()\n\n# b = Board()\n# f = b.boxes[0][0]\n# print(b)\n# f.piece = None\n# print(b)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"545144319","text":"\"\"\"\nThis is the main source code for Bob, a bot made in Python similar to Alexa.\nIt can get information about people, places, things, or events from Wikipedia, play videos,\nget words from Jesus, and get the time. It can also tell jokes.\n\nBasic Alexa made with video \"Don't Buy Alexa! Build your own. Create Virtual Assistant with Python\". Link: https://www.youtube.com/watch?v=AWvsXxDtEkU\nJesus' words found here: https://www.christsbondservants.org/Church_Alive/wys-Ch%20The%20Complete%20Sayings%20of%20Jesus.ArthurHinds.pdf\n(From book \"The Complete Sayings of Jesus\" by Arthur Hinds.)\n\"\"\"\n\n\n#######################\n## Imports ##\n#######################\nimport speech_recognition as sr\nimport pyaudio\nimport pyttsx3\nimport pywhatkit\nimport datetime\nimport wikipedia\nimport pyjokes\nfrom jesuswords import fulllist\n\n\"\"\"\n1. Add already answered questions to a dataset\n a. Make Jesus topics individual files in a folder\n2. Add something like \"Should I save this?\"\n3. Query a database using the 5 W's and how\n4. When no one is around, be asleep, when someone is around, be awake\n5. Eventually make it a robot (hopefully)\n\nOther things\n\nLet the computer remove history from over 2 years ago automatically.\n\"\"\"\n\n\"\"\"\nNotes:\nFormat for text file:\n\nquestion: who is jesus' questionend\n\nanswer: Jesus (c. 4 BC – AD 30 / 33), also referred to as Jesus of Nazareth or Jesus Christ, was a first-century Jewish preacher and religious leader.' answerend\n\nquestion: what was the 1993 storm of the century' questionend\n\nanswer: The 1993 Storm of the Century (also known as the 93 Superstorm, The No Name Storm, or the Great Blizzard of '93/1993) was a large cyclonic storm that formed over the Gulf of Mexico on March 12, 1993.' answerend\n\nWill split the questions and answers into dictionaries like this:\n\naqdict = {\"who is jesus'\": \"Jesus (c. 4 BC – AD 30 / 33), also referred to as Jesus of Nazareth or Jesus Christ, was a first-century Jewish preacher and religious leader.'\"...}\nSo that it can be accessed by the program.\nkeylist = list(aqdict.list()))\nif question in keylist:\n talk(aqdict[question]) # or something like that\nelse:\n pass # will get it from the internet\n\"\"\"\n\nlistener = sr.Recognizer()\nengine = pyttsx3.init()\n#voices = engine.getProperty('voices')\n#engine.setProperty('voice', voices[1].id)\n######################\n## Functions ##\n######################\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n \ndef take_command(bobneeded = True):\n try:\n with sr.Microphone() as source:\n print('listening...')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if bobneeded:\n if 'bob' in command:\n command = command.replace('bob', '')\n print(command)\n return command\n else:\n talk(\"Would you like me to answer that?\")\n yn = take_command(False)\n if \"yes\" in yn.lower():\n print(command)\n return command\n else:\n talk(\"Ok.\")\n take_command(False)\n else:\n command = command.replace('bob', '')\n print(command)\n return command\n except:\n pass\n\ndef getansweredquestions(question):\n pass\n\ndef run_alexa():\n possibleverses = {}\n command = take_command()\n print(command)\n if 'play' in command:\n song = command.replace('play', '')\n print('playing ' + song)\n talk('playing ' + song)\n pywhatkit.playonyt(song)\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n print(time)\n talk(\"Current time is \" + time)\n elif 'who is' in command:\n person = command.replace('who is', '')\n if \"jesus\" in person.lower():\n info=wikipedia.summary(person, 2)\n else:\n info=wikipedia.summary(person, 1)\n with open(\"answeredquestions.txt\", \"a\", encoding=\"utf-8\") as aq:\n aq.write(\"question: \")\n aq.write(str(command.encode(\"utf-8\"))) # str\n aq.write(\"\\n\\n\")\n aq.write(\"answer: \")\n aq.write(str(info.encode(\"utf-8\"))) # str\n aq.write(\"\\n\")\n with open(\"answeredquestions.txt\", \"r\", encoding=\"utf-8\") as aq1:\n wo_b1 = aq1.read()\n wo_b = str(wo_b1.replace(\"b'\", \"\"))\n wo_b = str(wo_b.replace(\"\\\\xe2\\\\x80\\\\x93\", \"-\"))\n lastchar = len(wo_b1) - 1\n wo_b1 = list(wo_b1)\n wo_b1[lastchar] = \"\"\n wo_b1 = \"\".join(wo_b1)\n with open(\"answeredquestions.txt\", \"w\", encoding=\"utf-8\") as aq2:\n aq2.write(wo_b)\n print(type(wo_b))\n print(info)\n talk(info)\n elif 'what is' in command:\n thing = command.replace('what is', '')\n info = wikipedia.summary(thing, 1)\n with open(\"answeredquestions.txt\", \"a\", encoding=\"utf-8\") as aq:\n aq.write(str(command.encode(\"utf-8\")))\n aq.write(str(info.encode(\"utf-8\")))\n print(info)\n talk(info)\n elif 'who was' in command:\n person = command.replace('who was', '')\n if \"jesus\" in person.lower():\n info=wikipedia.summary(person, 2)\n else:\n info = wikipedia.summary(person, 1)\n with open(\"answeredquestions.txt\", \"a\", encoding=\"utf-8\") as aq:\n aq.write(str(command.encode(\"utf-8\")))\n aq.write(str(info.encode(\"utf-8\")))\n print(info)\n talk(info)\n elif 'what was' in command:\n thing = command.replace('what was', '')\n info = wikipedia.summary(thing, 1)\n with open(\"answeredquestions.txt\", \"a\", encoding=\"utf-8\") as aq:\n aq.write(str(command.encode(\"utf-8\")))\n aq.write(str(info.encode(\"utf-8\")))\n print(info)\n talk(info)\n elif 'tell me about' in command:\n thing = command.replace('tell me about', '')\n info = wikipedia.summary(thing, 5)\n print(info)\n talk(info)\n elif 'tell me a lot about' in command:\n thing = command.replace('tell me a lot about', '')\n info = wikipedia.summary(thing, 10)\n print(info)\n talk(info)\n elif \"tell me some information about\" in command:\n talk(\"Would you like it in sections? Yes or no.\")\n sectionsyn = take_command(False)\n if \"yes\" in sectionsyn.lower():\n def listsections():\n thing = command.replace('tell me some information about', '')\n talk(\"Sections are:\")\n print(wikipedia.page(thing).sections)\n talk(wikipedia.page(thing).sections)\n talk(\"Which section would you like to read?\")\n whichsection = take_command(False)\n if str(whichsection.strip()[0].upper() + whichsection[1:]) in wikipedia.page(thing).sections: # or simply .capitalize()\n print(whichsection.strip()[0].upper() + whichsection[1:])\n talk(wikipedia.page(thing).section(whichsection))\n else:\n talk(\"I didn't get that statement, or it is not a part of the sections. Please say it again.\")\n listsections()\n listsections()\n else:\n talk('How much?')\n howmuch = take_command(False)\n try:\n print(wikipedia.summary(thing, int(howmuch)))\n talk(wikipedia.summary(thing, int(howmuch)))\n except:\n talk(\"I didn't hear that well. Please say the command again.\")\n run_alexa()\n elif 'tell me a joke' in command:\n talk(pyjokes.get_joke())\n elif command == \" good night\":\n talk(\"Good night.\")\n quit()\n elif command == \" goodbye\" or command == \" bye\" or command == \"by\" or command == \" by\":\n talk(\"bye\")\n quit()\n elif 'what did jesus say about' in command:\n topicjesus = command.replace('what did jesus say about ', '')\n for i in range(0, len(fulllist) - 1):\n if topicjesus in fulllist[i]:\n possibleverses[fulllist[i]]=fulllist[i].lower().count(topicjesus.lower())\n allvals = list(possibleverses.values())\n allkeys = list(possibleverses.keys())\n \"\"\"try:\"\"\"\n largestmention = max(allvals)\n print(allkeys[allvals.index(largestmention)])\n talk(allkeys[allvals.index(largestmention)])\n talk(\"Would you like me to continue?\")\n yon=take_command(False)\n if \"yes\" in yon:\n newcontinue = allkeys[allvals.index(largestmention)]\n num=fulllist.index(newcontinue)\n print(num)\n howmanyverses=0\n for i in range(num, len(fulllist)-1):\n talk(fulllist[i])\n if howmanyverses==6:\n talk(\"Would you like to continue? Yes or no.\")\n continueyn = take_command(False)\n if \"yes\" in continueyn:\n howmanyverses=0\n continue\n else:\n talk(\"Ok.\")\n take_command()\n else:\n howmanyverses+=1\n \"\"\"print(allvals.index(largestmention-1))\n print(newcontinue)\n talk(newcontinue)\"\"\"\n elif \"no\" in yon:\n talk(\"Ok.\")\n take_command()\n \"\"\"except:\n talk(\"Jesus said nothing about \" + topicjesus)\"\"\"\n elif 'what did jesus say about being' in command:\n topicjesus = command.replace('what did jesus say about being ', '')\n talk(topicjesus)\n elif \"clear my history\" in command:\n with open(\"answeredquestions.txt\", \"w\", encoding=\"utf-8\") as aq2:\n aq2.write(\"\")\n talk(\"History cleared.\")\n else:\n talk(\"Please say the command again, if I didn't hear well, or try a different one, if I can't respond to it.\")\n run_alexa()\n\n######################\n## Output ##\n######################\n\n\ntalk('I\\'m Bob. What do you want from me?')\n\nwhile True:\n run_alexa()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"492795873","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('content', models.TextField()),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('last_modified', models.DateTimeField(auto_now=True)),\n ('is_approved', models.BooleanField(default=False)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='word_entry_comments', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Language',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('name', models.CharField(unique=True, max_length=70)),\n ('location', models.CharField(max_length=70)),\n ('is_tribal', models.BooleanField(default=True)),\n ],\n ),\n migrations.CreateModel(\n name='Picture',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('description', models.CharField(max_length=155)),\n ('file_name', models.CharField(max_length=255)),\n ('display_order', models.PositiveSmallIntegerField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='Tag',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('name', models.CharField(unique=True, max_length=200)),\n ('display', models.CharField(max_length=200)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('last_modified', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='WordClass',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('name', models.CharField(unique=True, max_length=255)),\n ('display_order', models.PositiveSmallIntegerField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='WordContent',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('content', models.TextField()),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('last_modified', models.DateTimeField(auto_now=True)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='word_entries_content')),\n ],\n ),\n migrations.CreateModel(\n name='WordEntry',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('relative_url', models.CharField(unique=True, max_length=155)),\n ('word', models.CharField(max_length=100)),\n ('short_description', models.CharField(max_length=155)),\n ('audio_file', models.CharField(blank=True, null=True, max_length=255)),\n ('phonetics', models.CharField(blank=True, null=True, max_length=255)),\n ('template', models.CharField(max_length=50, default='word-entry-default.html')),\n ('is_published', models.BooleanField(default=False)),\n ('access_count', models.BigIntegerField(default=0)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('last_modified', models.DateTimeField(auto_now=True)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='word_entries')),\n ('languages', models.ManyToManyField(to='dictionary.Language', related_name='word_entries')),\n ('tags', models.ManyToManyField(to='dictionary.Tag', related_name='tags')),\n ('word_classes', models.ManyToManyField(to='dictionary.WordClass', related_name='word_entries')),\n ('word_content', models.ForeignKey(to='dictionary.WordContent', related_name='word_entries', null=True)),\n ('words_related', models.ManyToManyField(to='dictionary.WordEntry', related_name='word_related_to')),\n ],\n ),\n migrations.AddField(\n model_name='picture',\n name='word_content',\n field=models.ForeignKey(to='dictionary.WordContent', related_name='pictures'),\n ),\n migrations.AddField(\n model_name='comment',\n name='word_entry',\n field=models.ForeignKey(to='dictionary.WordEntry', related_name='word_entry_comments'),\n ),\n ]\n","sub_path":"dictionary/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"319069829","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n path('', views.exam_admin_home, name='exam_admin_home'),\n path('add_question/', views.new_question, name='add_question'),\n path('view_questions/', views.view_question, name='view_question'),\n path('edit_questions/', views.edit_questions, name='edit_questions'),\n path('delete_questions/', views.delete_questions, name='delete_questions'),\n path('students/', views.students, name='student_list'),\n path('answerd_questions/',views.answerd_questions,name='answerd_questions'),\n]\n\n","sub_path":"admin_dash/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"471813081","text":"#!/usr/bin/python\n\nimport urwid\n\n\"\"\"\nNOTES\n-----\nThis module builds the widget to show the structure information for a table.\n\n\"\"\"\n\ndef show_table_structure(user_info, tablename):\n #build out the table that shows the table structure information\n #get table info\n table_info = user_info.db_obj.gettableinfo(user_info.db_conn, tablename)\n\n #building out list of column names\n table_col_names = []\n for x in table_info:\n table_col_names.append(urwid.Text(x[0]))\n\n #building out list of data types\n table_data_type = []\n for x in table_info:\n table_data_type.append(urwid.Text(x[1]))\n\n #building out list of character lengths\n table_char_length = []\n for x in table_info:\n if x[2] == None:\n table_char_length.append(urwid.Text(u\" \"))\n else:\n table_char_length.append(urwid.Text(str(x[2])))\n\n #building out list of null\n table_null = []\n for x in table_info:\n if x[3] == None: \n table_null.append(urwid.Text(u\" \"))\n else:\n table_null.append(urwid.Text(x[3]))\n\n #building out list of default values\n table_default = []\n for x in table_info:\n if x[4] == None: \n table_default.append(urwid.Text(u\" \"))\n else:\n table_default.append(urwid.Text(x[4]))\n\n #widgets for the table info table\n col_names = urwid.LineBox( urwid.Pile(table_col_names)\n , title=\"Name\", rline=' ', trcorner=u'\\u2500', brcorner=u'\\u2500')\n datatypes = urwid.LineBox( urwid.Pile(table_data_type)\n , title=\"Type\", rline=' ', trcorner=u'\\u2500', brcorner=u'\\u2500')\n charlengths = urwid.LineBox( urwid.Pile(table_char_length)\n , title=\"Length\", rline=' ', trcorner=u'\\u2500', brcorner=u'\\u2500')\n nulls =urwid.LineBox( urwid.Pile(table_null)\n , title=\"Is Null\", rline=' ', trcorner=u'\\u2500', brcorner=u'\\u2500')\n defaults = urwid.LineBox( urwid.Pile(table_default)\n , title=\"Default\")\n\n text_1 = urwid.Text(u\"Here is the table structure information. If the table does not look aligned, then please make your terminal wider.\")\n\n #main widget for view\n structure_view = urwid.Padding( urwid.Pile( [\n urwid.Divider(),\n text_1,\n urwid.Divider(),\n urwid.Text([u\"Structure for table: \", tablename]),\n urwid.Divider(),\n urwid.Columns([\n col_names,\n datatypes,\n charlengths,\n nulls,\n defaults\n ])\n ])\n , left=2, right=2)\n\n structure_view = urwid.WidgetPlaceholder(structure_view)\n\n #return the widget created that holds all the structure data\n return structure_view\n","sub_path":"viewmydb/TableStructure.py","file_name":"TableStructure.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"523636241","text":"#http://nullege.com/codes/search/openpyxl.workbook.Workbook.get_sheet_by_name\n\nimport openpyxl\n\nwb=openpyxl.load_workbook(\"CSVFile_11kb.xlsx\")\n\n#print(type(wb))\n'''\n#====================================\nsheet_names=wb.get_sheet_names() # print sheet names\n\nprint(type(sheet_names))\n\nfor name in sheet_names:\n print(name)\n#=====================================\n\n#Ex:-1\n#wb.get_sheet_by_name Exampls\n\nsheet_names=wb.get_sheet_names() # print sheet names\n\nprint(type(sheet_names))\n\nfor sheet_name in sheet_names:\n name=wb.get_sheet_by_name(sheet_name)\n print(\"name\",name)\n print(name.title)\n#=======================================\n'''\n\n\n\nshetnames=wb.get_sheet_by_name(\"Sheet\")\n\nprint(type(shetnames))\n\nrows=shetnames.max_row\ncolumns=shetnames.max_column\nprint(rows,columns)\n'''\nfor c in range(1,rows):\n d=shetnames.cell(row=1,column=c)\n print(d.value)\n\nprint(\"\\n\\n\")\n\nfor r in range(1,columns):\n d=shetnames.cell(row=r,column=3)\n print(d.value)\n'''\nfor r in range(1,rows+1):\n for c in range (1,columns+1):\n d=shetnames.cell(row=r,column=c)\n print('%-30s'%d.value,end=' ')\n print(' ')","sub_path":"Excel/examples/openexel_file.py","file_name":"openexel_file.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"556783911","text":"import math\nimport numpy as np\nimport chainer\nfrom src.function import conv_bn\nfrom src.function.spectral_norm_exact import spectral_norm_exact\nfrom src.function.spectral_norm import SpectralNormFunction\n\n\nclass Convolution2DBN(chainer.links.Convolution2D):\n \"\"\"This is a combined layer conv-2d + batch-norm.\n This also calculates its spectral norm in LMT-mode.\n\n \"\"\"\n\n def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,\n initialW=None, initial_bias=None,\n decay=.9, eps=2e-5, nobias=True, **kwargs):\n super(Convolution2DBN, self).__init__(in_channels=in_channels,\n out_channels=out_channels,\n ksize=ksize, stride=stride,\n pad=pad, nobias=True,\n initialW=initialW, initial_bias=initial_bias,\n **kwargs)\n self.lipschitz = None\n self.factor = None\n self.u = np.random.random((1, self.out_channels)).astype(np.float32) - .5\n self.register_persistent('u')\n\n self.avg_mean = np.zeros(out_channels, dtype=np.float32)\n self.register_persistent('avg_mean')\n self.avg_var = np.zeros(out_channels, dtype=np.float32)\n self.register_persistent('avg_var')\n self.decay = decay\n self.eps = eps\n self.u = np.random.random((1, self.out_channels)).astype(np.float32) - .5\n self.register_persistent('u')\n\n with self.init_scope():\n self.gamma = chainer.Parameter(np.ones((out_channels,), np.float32))\n self.beta = chainer.Parameter(np.zeros((out_channels,), np.float32))\n\n def __call__(self, x):\n x_in, t, l = x\n if chainer.config.train:\n self.lipschitz = None\n\n # convolution\n x = super(Convolution2DBN, self).__call__(x_in)\n\n if self.factor is None:\n #\n # calculation of \\|U\\|_2\n #\n k_out, k_in, k_h, k_w = self.W.shape\n s_h, s_w = self.stride\n p_h, p_w = self.pad\n # x_in's shape is (batchsize, k_in, h, w)\n assert x_in.shape[1] == k_in\n self.factor = math.ceil(min(k_h, x_in.shape[2] + p_h * 2 - k_h + 1) / s_h)\n self.factor *= math.ceil(min(k_w, x_in.shape[3] + p_w * 2 - k_w + 1) / s_w)\n self.factor = math.sqrt(self.factor)\n\n #\n # rescaling factor of Parseval networks\n # According to the author, this factor is not essential\n #\n self.parseval = 1 / math.sqrt(k_h * k_w)\n\n l = l * self.factor\n\n # rescaling of Parseval networks\n if getattr(chainer.config, 'parseval', False):\n x = x * self.parseval\n l = l * self.parseval\n\n # ensure that gamma >= 0\n gamma = chainer.functions.absolute(self.gamma)\n beta = self.beta\n\n # batch norm\n if chainer.config.train:\n func = conv_bn.BatchNormalizationFunction(\n self.eps, self.avg_mean, self.avg_var, self.decay, self.u)\n x, lipschitz = func(x, gamma, beta, self.W)\n\n self.avg_mean[:] = func.running_mean\n self.avg_var[:] = func.running_var\n else:\n mean = chainer.variable.Variable(self.avg_mean)\n var = chainer.variable.Variable(self.avg_var)\n x, lipschitz = conv_bn.fixed_batch_normalization(\n x, gamma, beta, mean, var, self.eps, self.W, self.u)\n\n if getattr(chainer.config, 'lmt', False) and getattr(chainer.config, 'exact', False):\n assert not chainer.config.train\n if self.lipschitz is None:\n W = (gamma.data / self.xp.sqrt(var.data + self.eps)\n ).reshape((self.W.shape[0], 1)) * self.W.data.reshape((self.W.shape[0], -1))\n self.lipschitz = spectral_norm_exact(W)\n l = l * self.lipschitz\n else:\n l = l * lipschitz\n\n return x, t, l\n","sub_path":"src/link/conv_bn.py","file_name":"conv_bn.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"287909956","text":"from . import *\nfrom app.irsystem.models.search import *\n\nimport pandas as pd\nimport numpy as np\nimport ssl\nimport requests\nimport json\n\nproject_name = \"Similar Singer\"\nnet_id = \"Alyssa Gao (ag2496), Celine Choo (cc972), Mahak Bindal (mb2359), Jerilyn Zheng (jjz67), Jasper Liang (jxl8)\"\n\nssl._create_default_https_context = ssl._create_unverified_context\n\ntf_idf = pd.read_csv(\"https://raw.githubusercontent.com/chynu/cs4300sp2021-ag2496-cc972-mb2359-jjz67-jxl8/master/data/processed/tfidf_mat_compressed.csv\")\nartist_details = pd.read_csv(\"removed_dups_new.csv\")\njaccard = pd.read_csv(\"https://raw.githubusercontent.com/chynu/cs4300sp2021-ag2496-cc972-mb2359-jjz67-jxl8/master/data/processed/jaccard.csv\",index_col=[0]).to_numpy()\n\nwith open(\"artist_descriptions.json\") as file:\n artist_album_descriptions = json.load(file)\n\nartist_names = [i.replace('\"', '') for i in tf_idf.values[:,0]]\nartist_name_to_index = {artist_names[i]: i for i in range(len(artist_names))}\nmatrix = tf_idf.to_numpy()[:,1:]\n\ndef get_artist_album_description(artist_name):\n \"\"\" Returns whole description of [artist_name], 'not found' if rating DNE.\n Description contains latest album ('album_title'), rating ('rating'), and review ('review')\n\n Parameters: {artist_name: String}\n Returns: Dict\n \"\"\"\n try:\n return artist_album_descriptions[artist_name]\n except:\n return {'album_title':'', 'rating':0, 'review':''}\n\ndef get_artist_id(artist_name):\n \"\"\" Returns id of [artist_name]'s profile.\n \n Parameters: {artist_name: String}\n Returns: String\n \"\"\"\n try:\n return artist_details['Artist ID'][artist_name_to_index[artist_name]]\n except:\n return \"no id\"\n \ndef get_artist_follower_count(artist_name):\n \"\"\" Returns follower count of [artist_name].\n \n Parameters: {artist_name: String}\n Returns: String\n \"\"\"\n return artist_details['followers'][artist_name_to_index[artist_name]]\n\ndef get_artist_description(artist_name):\n \"\"\" Returns description of [artist_name].\n \n Parameters: {artist_name: String}\n Returns: String\n \"\"\"\n try:\n followers = get_artist_follower_count(artist_name)\n return artist_name + \" has \" + str(followers) + \" followers on Spotify.\"\n except:\n return \"Couldn't find additional details on \" + artist_name + \". \"\n\ndef get_artist_genres(artist_name):\n \"\"\" Returns the genres of [artist_name].\n \n Parameters: {artist_name: String}\n Returns: List\n \"\"\"\n genres = artist_details['genres'][artist_name_to_index[artist_name]]\n return genres.translate(str.maketrans('','','[]\\'')).split(', ')\n\ndef get_artist_photo(artist_name):\n \"\"\" Returns url of [artist_name]'s photo.\n \n Parameters: {artist_name: String}\n Returns: String\n \"\"\"\n try:\n return artist_details['Image URL'][artist_name_to_index[artist_name]]\n except:\n return \"https://www.pngitem.com/pimgs/m/148-1487614_spotify-logo-small-spotify-logo-transparent-hd-png.png\"\n\ndef rocchio_update(query, query_obj, input_doc_mat=matrix, \\\n artist_name_to_index=artist_name_to_index,a=.3, b=.3, c=.3):\n \"\"\" Returns a vector representing the modified query vector.\n\n Note:\n Be sure to handle the cases where relevant and irrelevant are empty lists.\n \n Params: {query: Int (index of artist being queried for),\n query_obj: Dict (storing the names of relevant and irrelevant artists for query),\n input_doc_mat: Numpy Array,\n artist_name_to_index: Dict,\n a,b,c: floats (weighting of the original query, relevant artists,\n and irrelevant artists, respectively)}\n Returns: np.ndarray\n \"\"\"\n q = input_doc_mat[query]\n dimension = len(q)\n rel_d, irrel_d = np.zeros(dimension), np.zeros(dimension)\n relevant, irrelevant = query_obj['relevant_artists'], query_obj['irrelevant_artists']\n len_rel, len_irrel = len(relevant), len(irrelevant)\n \n for r in range(len_rel): # Get centroid of relevant artists\n artist = relevant[r]\n rel_d = rel_d + input_doc_mat[artist_name_to_index[artist]]\n \n for i in range(len_irrel): # Get centroid of irrelevant artists\n artist = irrelevant[i]\n irrel_d = irrel_d + input_doc_mat[artist_name_to_index[artist]]\n \n rocchio = a * q\n if len_rel > 0:\n rocchio += b * rel_d / len_rel\n if len_irrel > 0:\n rocchio -= c * irrel_d / len_irrel\n \n return np.clip(rocchio, 0, None)\n\ndef cosine_similarity(query_vec, tfidf_mat=matrix):\n \"\"\" Returns numpy array of each artist's cosine similarity score with [query_vec]\n \n Params: {query_vec: np.ndarray - (k,)\n tfidf_mat: np.ndarray - d x k (where d is number of documents/artists,\n and rows are normalized)\n Returns: np.ndarray\n \"\"\"\n scores = tfidf_mat.dot(query_vec)\n return scores\n\ndef get_filter_function(name, rel_artists, irrel_artists, avg_followers, percentage=0.2):\n \"\"\" Returns True if [name] has more followers than [percentage] * [avg_followers]\n and [name] is not part of user input ([rel_artists]).\n \n Parameters: {name: String\n rel_artists: List\n irrel_artists: List\n avg_followers: Float\n percentage: Float}\n Returns: Boolean\n \"\"\"\n follower_threshold = avg_followers * percentage\n return name not in rel_artists and name not in irrel_artists and get_artist_follower_count(name) > follower_threshold\n\ndef minmax_scale(vec):\n \"\"\" Returns min/max scale of [vec].\n \n Parameters: {vec: np.ndarray}\n Returns: np.ndarray\n \"\"\"\n min = np.min(vec)\n return (vec-min) / (np.max(vec) - min)\n\ndef get_rec_artists(liked_artists, ling_desc, disliked_artists, artist_name_to_index=artist_name_to_index):\n \"\"\" Returns list of recommended artists and their similarity scores \n that are similar to [query] and dissimilar to [disliked_artist].\n \n Parameters: {liked_artists: List (liked artist)\n ling_desc: String\n disliked_artists: List\n artist_name_to_index: Dict}\n Returns: List\n \"\"\"\n all_artists, set_liked, set_disliked = set(artist_name_to_index), set(liked_artists), set(disliked_artists)\n if (len(all_artists & set_liked) != len(set_liked)) or \\\n (disliked_artists and len(all_artists & set_disliked) != len(set_disliked)) or \\\n (disliked_artists and len(set_liked & set_disliked) > 0):\n return []\n\n idx = artist_name_to_index[liked_artists[0]]\n query_obj = {\n 'relevant_artists': liked_artists,\n 'irrelevant_artists': disliked_artists\n }\n query_vec = rocchio_update(idx, query_obj, c=0.8)\n \n cosine_scores = minmax_scale(cosine_similarity(query_vec))\n jaccard_scores = minmax_scale(rocchio_update(idx,query_obj,input_doc_mat=jaccard))\n ling_scores = ling_similarity(ling_desc)\n \n final_scores = cosine_scores + 2 * jaccard_scores + ling_scores\n final_scores /= 4 if ling_desc else 3\n \n sorted_indices = np.argsort(final_scores)\n rankings = [(artist_names[i], final_scores[i]) for i in sorted_indices[::-1]]\n \n average_followers = np.array([get_artist_follower_count(name) for name in query_obj['relevant_artists']]).mean()\n artist_ranking = list(filter(lambda x: get_filter_function(x[0], query_obj['relevant_artists'], query_obj['irrelevant_artists'], average_followers), rankings))\n return artist_ranking[:10]\n\ndef get_results(query, ling_desc, disliked_artist):\n \"\"\" Returns list of recommended artists who are similar to [query] and dissimilar\n to [disliked_artist] along with their similarity score, description and photo.\n \n Parameters: {query: String (liked artists)\n ling_desc: String\n disliked_artist: String}\n Returns: List\n \"\"\"\n data = []\n\n if not query:\n return data\n \n query = query.split(',')\n if disliked_artist:\n disliked_artist = disliked_artist.split(',')\n\n top_rec_artists = get_rec_artists(query, ling_desc, disliked_artist)\n\n if not top_rec_artists:\n return []\n\n query_genres = set([ i for artist in query for i in get_artist_genres(artist) ])\n\n for artist, score in top_rec_artists:\n genres = \", \".join(set(get_artist_genres(artist)) & query_genres)\n description = get_artist_album_description(artist)\n\n data.append({\n 'artist_name' : artist,\n 'sim_score' : round(score * 100, 2),\n 'artist_id' : get_artist_id(artist),\n 'common_genres' : genres,\n 'description' : get_artist_description(artist),\n 'follower_count': get_artist_follower_count(artist),\n 'img_url' : get_artist_photo(artist),\n 'rating' : description['rating'],\n 'album' : description['album_title'],\n 'review' : description['review']\n })\n return data \n\n@irsystem.route('/', methods=['GET'])\ndef search():\n \"\"\" Returns UI rendering of results. \"\"\"\n query = request.args.get('search')\n ling_desc = request.args.get('ling_desc')\n disliked_artist = request.args.get('disliked_artist')\n data = get_results(query, ling_desc, disliked_artist)\n all_artist_names = [s.replace('\\'', '').replace('\\\"', '') for s in artist_names]\n\n if ((not query) and (not ling_desc) and (not disliked_artist)): # empty query\n output_message = ''\n return render_template('search.html', name=project_name, netid=net_id, output_message=output_message, data=data,\\\n artist_names=all_artist_names)\n elif ((data == []) or (query == disliked_artist)): # query returned no results, or liked artist and disliked artist are the same\n output_message = ''\n return render_template('search.html', name=project_name, netid=net_id, output_message=output_message, data=data,\\\n artist_names=all_artist_names,\\\n query_info={\"artist_name\": query, \"ling_desc\": ling_desc, \"disliked_artist\": disliked_artist})\n else:\n output_message = \"Since you like \" + query + \"'s music, we recommend...\"\n return render_template('search_results.html', name=project_name, netid=net_id, output_message=output_message, data=data,\\\n artist_names=all_artist_names,\\\n query_info={\"artist_name\": query, \"ling_desc\": ling_desc, \"disliked_artist\": disliked_artist})\n","sub_path":"app/irsystem/controllers/search_controller.py","file_name":"search_controller.py","file_ext":"py","file_size_in_byte":10560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"320885459","text":"import sqlalchemy as db\r\nfrom datetime import datetime as dt\r\nfrom dateutil.relativedelta import relativedelta\r\nimport csv\r\n\r\ndef full_name(first, last):\r\n return first + ' ' + last\r\n\r\ndef age(dob):\r\n dob_format = dt.strptime(dob, '%Y-%m-%d')\r\n today = dt.today()\r\n age = relativedelta(today, dob_format)\r\n return age.years\r\ndef main():\r\n engine = sa.create_engine('sqlite:///customer.sqlite')\r\n connection = engine.connect()\r\n data = sa.MetaData()\r\n customer = sa.Table('customer', data, autoload=True, autoload_with=engine)\r\n query = sa.select([customer.columns.id,\r\n customer.columns.first_name,\r\n customer.columns.last_name,\r\n customer.columns.dob])\r\n proxy = connection.execute(query)\r\n table_data = proxy.fetchall()\r\n connection.close()\r\n with open('jdier3_assignment5.csv', 'w') as csv_file:\r\n writer = csv.writer(csv_file)\r\n writer.writerow(['Customer ID', 'Name', 'Age'])\r\n for x in table_data:\r\n writer.writerow([x[0], full_name(x[1], x[2]), age(x[3])])\r\n","sub_path":"jdier_sqlite.py","file_name":"jdier_sqlite.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"220866257","text":"import random\nimport time\n\ntry:\n #assert False\n import torch\n from transformers import GPT2LMHeadModel, GPT2Tokenizer\n # initialize tokenizer and model from pretrained GPT2 model\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n model = GPT2LMHeadModel.from_pretrained('gpt2')\n\n def predict(input):\n inputs = tokenizer.encode(input, return_tensors='pt')\n outputs = model.generate(inputs, max_length=200, do_sample=True)\n return tokenizer.decode(outputs[0], skip_special_tokens=True)\nexcept Exception as e:\n def predict(input):\n return (input + \"GPT2 is sorry, but does not want to say any more today.\"+str(random.random()) )\n\n#print(predict(\"\"\"Let me tell you a story:\"\"\"))\n\nclass Blockchain():\n def __init__(self):\n print(\"creating blockchain\")\n self.blocks=[\"Let me tell you a story:\"]\n self.setup_next()\n def next_block(self):\n m = max(self.votesto)\n nx = [i for i in range(3) if self.votesto[i]==m]\n #print(nx)\n self.blocks.append(self.opts[random.choice(nx)])\n self.setup_next()\n\n def setup_next(self):\n self.blocktime=time.time()\n last = self.blocks[-1]\n self.opts=[]\n for i in range(3):\n p = predict(last+\"\\n\"+\"\\n\")\n p = p[len(last):]\n while p[0]==\"\\n\":\n p=p[1:]\n l = p.find(\"\\n\")\n if l>0:\n p=p[:l]\n self.opts.append(p)\n\n self.votesby={}\n self.votesto=[0,0,0]\n\n def vote(self, name, target):\n \"\"\"Submit a vote. Returns false if invalid\"\"\"\n if target not in [0,1,2]:\n return False\n if name not in self.votesby:\n self.votesby[name]=target\n self.votesto[target]\n return True\n return False\n def getOpts(self):\n return [x for x in self.opts]\n def history(self, since):\n if time.time()-self.blocktime > 60:\n self.next_block()\n return list(self.blocks[since:])\n def __len__(self):\n return len(self.blocks)\n\n \n","sub_path":"server/app/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"50568855","text":"from django.urls import path\nfrom .views import BookInventoryListCreate, BookInventoryUpdate, BookInventoryGoogle, GoogleBooks, GoogleBooksDetails\nurlpatterns = [\n path('book/', BookInventoryListCreate.as_view()), \n path('book//', BookInventoryUpdate.as_view()), \n path('book//', BookInventoryUpdate.as_view()), \n path('book_google//', BookInventoryGoogle.as_view()), \n path('volumes/', GoogleBooks.as_view()), \n path('volumes//', GoogleBooksDetails.as_view()),\n\n]\n","sub_path":"book_inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"207394422","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom todos.models import Todo\nfrom todos.serializers import TodoSerializer\n\n\n@api_view(['GET', 'DELETE', 'PUT'])\ndef get_delete_update_todo(request, pk):\n try:\n todo = Todo.objects.get(pk=pk)\n except Todo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n todo = Todo.objects.get(pk=pk)\n serializer = TodoSerializer(todo)\n return Response(serializer.data)\n\n if request.method == 'PUT':\n serializer = TodoSerializer(todo, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_204_NO_CONTENT)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if request.method == 'DELETE':\n todo.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET', 'POST'])\ndef get_post_todos(request):\n if request.method == 'GET':\n todos = Todo.objects.all()\n serializer = TodoSerializer(todos, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n data = {\n 'title': request.data.get('title'),\n 'description': request.data.get('description'),\n 'entries': request.data.get('entries')\n }\n serializer = TodoSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"todo_backend/todos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"64291306","text":"# Copyright 2016 Hewlett Packard Enterprise Development, LP\n#\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport functools\n\nfrom neutron_lib import constants\nfrom neutron_lib import exceptions as n_exc\nfrom oslo_db import exception as db_exc\nfrom oslo_log import helpers as log_helpers\nfrom oslo_utils import uuidutils\nfrom sqlalchemy.orm import exc\n\nfrom neutron.callbacks import events\nfrom neutron.callbacks import registry\nfrom neutron.callbacks import resources\nfrom neutron.common import _deprecate\nfrom neutron.db import _utils as db_utils\nfrom neutron.db import api as db_api\nfrom neutron.db import common_db_mixin\nfrom neutron.db.models import segment as segment_model\nfrom neutron.db import segments_db as db\nfrom neutron.extensions import segment as extension\nfrom neutron import manager\nfrom neutron.objects import network\nfrom neutron.services.segments import exceptions\n\n_deprecate._moved_global('SegmentHostMapping', new_module=segment_model)\n\n\nclass SegmentDbMixin(common_db_mixin.CommonDbMixin):\n \"\"\"Mixin class to add segment.\"\"\"\n\n @staticmethod\n def _make_segment_dict(segment_db, fields=None):\n res = {'id': segment_db['id'],\n 'network_id': segment_db['network_id'],\n 'name': segment_db['name'],\n 'description': segment_db['description'],\n db.PHYSICAL_NETWORK: segment_db[db.PHYSICAL_NETWORK],\n db.NETWORK_TYPE: segment_db[db.NETWORK_TYPE],\n db.SEGMENTATION_ID: segment_db[db.SEGMENTATION_ID],\n 'hosts': [mapping.host for mapping in\n segment_db.segment_host_mapping],\n 'segment_index': segment_db['segment_index']}\n return db_utils.resource_fields(res, fields)\n\n def _get_segment(self, context, segment_id):\n try:\n return self._get_by_id(\n context, segment_model.NetworkSegment, segment_id)\n except exc.NoResultFound:\n raise exceptions.SegmentNotFound(segment_id=segment_id)\n\n @log_helpers.log_method_call\n def create_segment(self, context, segment):\n \"\"\"Create a segment.\"\"\"\n segment = segment['segment']\n segment_id = segment.get('id') or uuidutils.generate_uuid()\n try:\n new_segment = self._create_segment_db(context, segment_id, segment)\n except db_exc.DBReferenceError:\n raise n_exc.NetworkNotFound(net_id=segment['network_id'])\n registry.notify(resources.SEGMENT, events.AFTER_CREATE, self,\n context=context, segment=new_segment)\n return self._make_segment_dict(new_segment)\n\n def _create_segment_db(self, context, segment_id, segment):\n with context.session.begin(subtransactions=True):\n network_id = segment['network_id']\n physical_network = segment[extension.PHYSICAL_NETWORK]\n if physical_network == constants.ATTR_NOT_SPECIFIED:\n physical_network = None\n network_type = segment[extension.NETWORK_TYPE]\n segmentation_id = segment[extension.SEGMENTATION_ID]\n if segmentation_id == constants.ATTR_NOT_SPECIFIED:\n segmentation_id = None\n name = segment['name']\n if name == constants.ATTR_NOT_SPECIFIED:\n name = None\n description = segment['description']\n if description == constants.ATTR_NOT_SPECIFIED:\n description = None\n args = {'id': segment_id,\n 'network_id': network_id,\n 'name': name,\n 'description': description,\n db.PHYSICAL_NETWORK: physical_network,\n db.NETWORK_TYPE: network_type,\n db.SEGMENTATION_ID: segmentation_id}\n # Calculate the index of segment\n segment_index = 0\n segments = self.get_segments(\n context,\n filters={'network_id': [network_id]},\n fields=['segment_index'],\n sorts=[('segment_index', True)])\n if segments:\n # NOTE(xiaohhui): The new index is the last index + 1, this\n # may cause discontinuous segment_index. But segment_index\n # can functionally work as the order index for segments.\n segment_index = (segments[-1].get('segment_index') + 1)\n args['segment_index'] = segment_index\n\n new_segment = segment_model.NetworkSegment(**args)\n context.session.add(new_segment)\n # Do some preliminary operations before committing the segment to\n # db\n registry.notify(resources.SEGMENT, events.PRECOMMIT_CREATE, self,\n context=context, segment=new_segment)\n return new_segment\n\n @log_helpers.log_method_call\n def update_segment(self, context, uuid, segment):\n \"\"\"Update an existing segment.\"\"\"\n segment = segment['segment']\n with context.session.begin(subtransactions=True):\n curr_segment = self._get_segment(context, uuid)\n curr_segment.update(segment)\n return self._make_segment_dict(curr_segment)\n\n @log_helpers.log_method_call\n def get_segment(self, context, uuid, fields=None):\n segment_db = self._get_segment(context, uuid)\n return self._make_segment_dict(segment_db, fields)\n\n @log_helpers.log_method_call\n def get_segments(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):\n marker_obj = self._get_marker_obj(context, 'segment', limit, marker)\n make_segment_dict = functools.partial(self._make_segment_dict)\n return self._get_collection(context,\n segment_model.NetworkSegment,\n make_segment_dict,\n filters=filters,\n fields=fields,\n sorts=sorts,\n limit=limit,\n marker_obj=marker_obj,\n page_reverse=page_reverse)\n\n @log_helpers.log_method_call\n def get_segments_count(self, context, filters=None):\n return self._get_collection_count(context,\n segment_model.NetworkSegment,\n filters=filters)\n\n @log_helpers.log_method_call\n def get_segments_by_hosts(self, context, hosts):\n if not hosts:\n return []\n segment_host_mapping = network.SegmentHostMapping.get_objects(\n context, host=hosts)\n return list({mapping.segment_id for mapping in segment_host_mapping})\n\n @log_helpers.log_method_call\n def delete_segment(self, context, uuid):\n \"\"\"Delete an existing segment.\"\"\"\n segment = self.get_segment(context, uuid)\n # Do some preliminary operations before deleting the segment\n registry.notify(resources.SEGMENT, events.BEFORE_DELETE,\n self.delete_segment, context=context,\n segment=segment)\n\n # Delete segment in DB\n with context.session.begin(subtransactions=True):\n query = self._model_query(context, segment_model.NetworkSegment)\n query = query.filter(segment_model.NetworkSegment.id == uuid)\n if 0 == query.delete():\n raise exceptions.SegmentNotFound(segment_id=uuid)\n # Do some preliminary operations before deleting segment in db\n registry.notify(resources.SEGMENT, events.PRECOMMIT_DELETE,\n self.delete_segment, context=context,\n segment=segment)\n\n registry.notify(resources.SEGMENT, events.AFTER_DELETE,\n self.delete_segment, context=context,\n segment=segment)\n\n\ndef update_segment_host_mapping(context, host, current_segment_ids):\n with context.session.begin(subtransactions=True):\n segment_host_mapping = network.SegmentHostMapping.get_objects(\n context, host=host)\n previous_segment_ids = {\n seg_host['segment_id'] for seg_host in segment_host_mapping}\n for segment_id in current_segment_ids - previous_segment_ids:\n network.SegmentHostMapping(\n context, segment_id=segment_id, host=host).create()\n stale_segment_ids = previous_segment_ids - current_segment_ids\n if stale_segment_ids:\n for entry in segment_host_mapping:\n if entry.segment_id in stale_segment_ids:\n entry.delete()\n\n\ndef get_hosts_mapped_with_segments(context):\n \"\"\"Get hosts that are mapped with segments.\n\n L2 providers can use this method to get an overview of SegmentHostMapping,\n and then delete the stale SegmentHostMapping.\n \"\"\"\n segment_host_mapping = network.SegmentHostMapping.get_objects(context)\n return {row.host for row in segment_host_mapping}\n\n\ndef _get_phys_nets(agent):\n configurations_dict = agent.get('configurations', {})\n mappings = configurations_dict.get('bridge_mappings', {})\n mappings.update(configurations_dict.get('interface_mappings', {}))\n mappings.update(configurations_dict.get('device_mappings', {}))\n return mappings.keys()\n\n\nreported_hosts = set()\n\n# NOTE: Module level variable of segments plugin. It should be removed once\n# segments becomes a default plugin.\nsegments_plugin = None\n\n\ndef get_segments_with_phys_nets(context, phys_nets):\n \"\"\"Get segments from physical networks.\n\n L2 providers usually have information of hostname and physical networks.\n They could use this method to get related segments and then update\n SegmentHostMapping.\n \"\"\"\n if not phys_nets:\n return []\n\n with context.session.begin(subtransactions=True):\n segments = context.session.query(segment_model.NetworkSegment).filter(\n segment_model.NetworkSegment.physical_network.in_(phys_nets))\n return segments\n\n\ndef map_segment_to_hosts(context, segment_id, hosts):\n \"\"\"Map segment to a collection of hosts.\"\"\"\n with db_api.autonested_transaction(context.session):\n for host in hosts:\n network.SegmentHostMapping(\n context, segment_id=segment_id, host=host).create()\n\n\ndef _update_segment_host_mapping_for_agent(resource, event, trigger,\n context, host, plugin, agent):\n check_segment_for_agent = getattr(plugin, 'check_segment_for_agent', None)\n if not check_segment_for_agent:\n return\n phys_nets = _get_phys_nets(agent)\n if not phys_nets:\n return\n start_flag = agent.get('start_flag', None)\n if host in reported_hosts and not start_flag:\n return\n reported_hosts.add(host)\n segments = get_segments_with_phys_nets(context, phys_nets)\n current_segment_ids = {\n segment['id'] for segment in segments\n if check_segment_for_agent(segment, agent)}\n update_segment_host_mapping(context, host, current_segment_ids)\n\n\ndef _add_segment_host_mapping_for_segment(resource, event, trigger,\n context, segment):\n if not context.session.is_active:\n # The session might be in partial rollback state, due to errors in\n # peer callback. In that case, there is no need to add the mapping.\n # Just return here.\n return\n\n if not segment.physical_network:\n return\n cp = manager.NeutronManager.get_plugin()\n check_segment_for_agent = getattr(cp, 'check_segment_for_agent', None)\n if not hasattr(cp, 'get_agents') or not check_segment_for_agent:\n # not an agent-supporting plugin\n registry.unsubscribe(_add_segment_host_mapping_for_segment,\n resources.SEGMENT, events.PRECOMMIT_CREATE)\n return\n hosts = {agent['host'] for agent in cp.get_agents(context)\n if check_segment_for_agent(segment, agent)}\n map_segment_to_hosts(context, segment.id, hosts)\n\n\ndef _delete_segments_for_network(resource, event, trigger,\n context, network_id):\n admin_ctx = context.elevated()\n global segments_plugin\n if not segments_plugin:\n segments_plugin = manager.NeutronManager.load_class_for_provider(\n 'neutron.service_plugins', 'segments')()\n segments = segments_plugin.get_segments(\n admin_ctx, filters={'network_id': [network_id]})\n for segment in segments:\n segments_plugin.delete_segment(admin_ctx, segment['id'])\n\n\ndef subscribe():\n registry.subscribe(_update_segment_host_mapping_for_agent,\n resources.AGENT,\n events.AFTER_CREATE)\n registry.subscribe(_update_segment_host_mapping_for_agent,\n resources.AGENT,\n events.AFTER_UPDATE)\n registry.subscribe(_add_segment_host_mapping_for_segment,\n resources.SEGMENT, events.PRECOMMIT_CREATE)\n registry.subscribe(_delete_segments_for_network,\n resources.NETWORK,\n events.PRECOMMIT_DELETE)\n\nsubscribe()\n\n\n_deprecate._MovedGlobals()\n","sub_path":"neutron/services/segments/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":13866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"51376980","text":"# 4. Sa se scrie o functie care primeste ca parametri doua liste a si b si returneaza:\n# \t(a intersectat cu b, a reunit cu b, a - b, b - a)\n\n\ndef set_operations(a, b):\n a = set(a)\n b = set(b)\n\n intersection = list(a.intersection(b))\n union = list(a.union(b))\n dif1 = list(a.difference(b))\n diff2 = list(b.difference(a))\n\n return intersection, union, dif1, diff2\n\n\nprint(set_operations([1, 2, 3, 4], [3, 5, 6, 7, 1]))\n","sub_path":"labs/lab2/problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"591136243","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom permission.logics import PermissionLogic\nfrom permission.utils.permissions import perm_to_permission\nfrom kawaz.core.utils.permission import get_full_permission_name\n\n\nclass StarPermissionLogic(PermissionLogic):\n def _has_perm_of_content_object(self, user_obj, perm, star):\n \"\"\"\n スター付加先のオブジェクトの公開状態をチェックし、内部公開であれば\n ユーザーにその記事の閲覧権限があるかどうかによりスターの閲覧権限を\n 規定する\n \"\"\"\n try:\n # 対象オブジェクトのパーミッションを取得\n perm = get_full_permission_name(perm, star.content_object)\n # 文字列 permission を実体に変換\n perm_to_permission(perm)\n # 指定されたパーミッションが存在するためチェックを行う\n return user_obj.has_perm(perm, obj=star.content_object)\n except ObjectDoesNotExist:\n # 指定されたパーミッションが存在しない。\n # Star自体に閲覧権限があるわけではないので、今場合は常にTrue\n return True\n\n def has_perm(self, user_obj, perm, obj=None):\n \"\"\"\n Starのパーミッションを処理する\n\n add - 全てのメンバーが持つ\n change - 誰も持たない\n delete - 所有者のみ持つ\n view - 付加対象の公開状態依存\n \"\"\"\n\n # filter interest permissions\n if perm not in ('stars.add_star',\n 'stars.change_star',\n 'stars.delete_star',\n 'stars.view_star'):\n return False\n if perm == 'stars.change_star':\n # nobody can change stars\n return False\n if obj is None:\n permissions = ('stars.add_star', 'stars.delete_star',)\n if perm in permissions:\n if user_obj.is_authenticated() and user_obj.is_member:\n # メンバーはスターの付加・削除が可能\n return True\n if perm == 'stars.view_star':\n # あらゆるユーザがスターを見る権利を持つ可能性がある\n return True\n return False\n # object permission\n if perm == 'stars.view_star':\n # 基本的に全てのスターは誰でも見ることができ、Starそのものには\n # 公開状態がないが、このチェックをしないと非公開オブジェクトの\n # StarがpublicなAPIで取れてしまって引用などが見られてしまう\n # 可能性があるので、content_objectが見れる場合のみ閲覧権限がある\n return self._has_perm_of_content_object(user_obj, 'view', obj)\n elif perm == 'stars.delete_star':\n if user_obj == obj.author:\n # 自分が付加したスターは削除可能\n return True\n elif self._has_perm_of_content_object(user_obj, 'change', obj):\n # 付加先のコンテンツを編集可能な権限を持っている場合も削除可能\n return True\n elif self._has_perm_of_content_object(user_obj, 'delete', obj):\n # 付加先のコンテンツを削除可能な権限を持っている場合も削除可能\n return True\n return False\n","sub_path":"src/kawaz/apps/stars/perms.py","file_name":"perms.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"637880616","text":"import re\n\n\ndef preprocess_info_dict(info_dict):\n for key, value in info_dict.items():\n if key == \"resource-id\":\n value_remove_app = value.split(\"/\")[1]\n replace_underscore = value_remove_app.replace(\"_\", \" \")\n info_dict[key] = replace_underscore\n\n\n if key == \"name\":\n replace_underscore = value.replace(\"_\", \" \")\n info_dict[key] = replace_underscore\n\n camel_case_value = process_camel_case(value)\n info_dict[key] = camel_case_value\n\n return info_dict\n\n\n\ndef process_camel_case(string):\n if len(string) ==0:\n sentence=\"\"\n else:\n splitted = re.sub('([A-Z][a-z]+)', r' \\1', re.sub('([A-Z]+)', r' \\1', string)).split()\n sentence = splitted[0]\n if len(splitted)>1:\n for i in [1,len(splitted)-1]:\n sentence = sentence + \" \" + splitted[i]\n return sentence","sub_path":"text_matching/text_utils.py","file_name":"text_utils.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"34576476","text":"import layer\n\napple = 100\napple_num = 2\norange = 150\norange_num = 3\ntax = 1.1\n\n#계층\nmul_apple_layer = layer.MulLayer()\nmul_orange_layer = layer.MulLayer()\nadd_fruits_layer = layer.AddLayer()\nmul_tax_layer = layer.MulLayer()\n\n#순전파\napple_price = mul_apple_layer.forward(apple, apple_num)\norange_price = mul_orange_layer.forward(orange, orange_num)\nall_price = add_fruits_layer.forward(apple_price, orange_price)\nprice = mul_tax_layer.forward(all_price, tax)\n\n#역전파\ndprice = 1\ndall_price, dtax = mul_tax_layer.backward(dprice)\ndapple_price, dorange_price = add_fruits_layer.backward(dall_price)\ndapple, dapple_num = mul_apple_layer.backward(dapple_price)\ndorange, dorange_num = mul_orange_layer.backward(dorange_price)\n\n#출력\nprint(\"순전파 :\"+str(price))\nprint(\"역전파 :\"+str(dapple_num)+'/'+str(dapple)+'/'+str(dorange_num)+'/'+str(dorange)+'/'+str(dtax))","sub_path":"fruit.py","file_name":"fruit.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"581243074","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport storage.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ShareFile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date', models.DateTimeField(auto_now=True, verbose_name='date')),\n ('title', models.CharField(max_length=256, verbose_name='title')),\n ('description', models.TextField(verbose_name='description')),\n ('thefile', models.FileField(upload_to=storage.models._upload_to, max_length=128, verbose_name='the file')),\n ('digest', models.CharField(max_length=128, verbose_name='file didgest', db_index=True)),\n ('user', models.ForeignKey(verbose_name='user', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'storage file',\n 'verbose_name_plural': 'storage files',\n },\n ),\n ]\n","sub_path":"storage/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"459149064","text":"import json\n\nimport flask\nfrom flask import request\nfrom flask.ext import sqlalchemy\nfrom flask.ext import whooshalchemy\nfrom flask_recaptcha import ReCaptcha\nimport functools\nimport os\nimport sys\n\nfrom ufo.services import custom_exceptions\nfrom ufo.services import oauth\n\napp = flask.Flask(__name__, instance_relative_config=True)\n\napp.config.from_object('config.BaseConfiguration')\n\n# Register logging. Ensure INFO level is captured by Heroku's Logplex.\nimport logging\nstream_handler = logging.StreamHandler(sys.stdout)\napp.logger.addHandler(stream_handler)\napp.logger.setLevel(logging.INFO)\n\nif 'DATABASE_URL' in os.environ:\n app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']\n app.config['WHOOSH_BASE'] = os.environ['DATABASE_URL']\n\nRECAPTCHA_ENABLED_FOR_APP = False\nMAX_FAILED_LOGINS_BEFORE_RECAPTCHA = 10\nif 'RECAPTCHA_SITE_KEY' in os.environ and 'RECAPTCHA_SECRET_KEY' in os.environ:\n app.config['RECAPTCHA_SITE_KEY'] = os.environ['RECAPTCHA_SITE_KEY']\n app.config['RECAPTCHA_SECRET_KEY'] = os.environ['RECAPTCHA_SECRET_KEY']\n RECAPTCHA_ENABLED_FOR_APP = True\nelse:\n app.logger.error('No recaptcha site or secret key found. Please configure ' +\n 'RECAPTCHA_SITE_KEY and RECAPTCHA_SECRET_KEY in the ' +\n 'environment variables.')\n # RECAPTCHA_ENABLED_FOR_APP stays false\n\n# any instance-specific config the user wants to set, these override everything\napp.config.from_pyfile('application.cfg', silent=True)\n\ndb = sqlalchemy.SQLAlchemy(app)\n\n# Register the error handlers with the app.\nfrom ufo.services import error_handler\nerror_handler.init_error_handlers(app)\n\n# DB needs to be defined before this point\nfrom ufo.database import models\n\ntry:\n whooshalchemy.whoosh_index(app, models.User)\n whooshalchemy.whoosh_index(app, models.ProxyServer)\nexcept:\n app.logger.error('Whoosh indexing failed. Search may be broken as result. '\n 'Please redeploy to correct this.')\n\n# The headers and prefix listed below are to help guard against XSSI. The\n# prefix specifically causes us to escape out of any client that attempts to\n# execute the JSON as code. We don't use any callbacks or functions in our\n# returned JSON, but the prefix would catch it by causing execution to stop if\n# so. The prefix is supplied in the resource dictionaries so that it can be\n# stripped away on the client side when making AJAX calls.\nJSON_HEADERS = {'Content-Type': 'application/javascript; charset=utf-8'}\nXSSI_PREFIX = \")]}'\\n\"\n\nRECAPTCHA = ReCaptcha(app=app)\n\n@app.after_request\ndef checkCredentialChange(response):\n \"\"\"Save credentials if changed\"\"\"\n credentials = getattr(flask.g, '_credentials', None)\n if credentials is not None:\n config = get_user_config()\n json_credentials = credentials.to_json()\n if config.credentials != json_credentials:\n config.credentials = json_credentials\n config.save()\n\n return response\n\ndef get_user_config():\n \"\"\"Returns the current user-defined configuration from the database\"\"\"\n config = models.Config.query.get(0)\n if config is None:\n config = models.Config()\n config.id = 0\n\n config.save()\n\n return config\n\ndef setup_required(func):\n \"\"\"Decorator to handle routes that need setup to have been completed\n\n This decorator should be applied to nearly all routes\"\"\"\n @functools.wraps(func)\n def decorated_function(*args, **kwargs):\n config = get_user_config()\n if not config.isConfigured:\n raise custom_exceptions.SetupNeeded\n return func(*args, **kwargs)\n return decorated_function\n\ndef get_json_message(message_key):\n \"\"\"Get an i18n-ed message from the appropriate json file for the given key.\n\n Args:\n message_key: The message to get.\n\n Returns:\n A string the for the i18n-ed message or the key itself if an error occurs.\n \"\"\"\n file_path = (os.getcwd() + '/ufo/static/locales/' +\n flask.session['language_prefix'] + '/messages.json')\n try:\n with open(file_path) as json_file:\n messages = json.load(json_file)\n return messages[message_key]\n except:\n return message_key\n\ndef make_oauth_configration_resources_dict():\n \"\"\"Make the resources for the oauth configuration component.\n\n Returns:\n A dict of the resources for the oauth configuration component.\n \"\"\"\n config = get_user_config()\n return {\n 'config': config.to_dict(),\n 'oauth_url': oauth.getOauthFlow().step1_get_authorize_url(),\n }\n\n\nfrom ufo.services import key_distributor\nfrom ufo.handlers import routes\nfrom ufo.services import xsrf\nfrom ufo.services import resource_provider\n\n\n@app.before_first_request\ndef set_jinja_before_request():\n \"\"\"Set the global jinja environment vars.\"\"\"\n resource_provider.set_jinja_globals()\n\nDEFAULT_LANGUAGE_PREFIX = 'en'\nACCEPTABLE_LANGUAGE_PREFIXES = [\n 'en',\n 'es',\n 'fr',\n 'it',\n] # These aren't necessarily true, just something to test with.\n\n@app.before_request\ndef determine_language_prefix():\n \"\"\"Determine the language prefix using the language header.\"\"\"\n # TODO(eholder): Figure out a more appropriate way to map the header into\n # our set of prefixes. Since I don't know what those prefixes are yet, this\n # is intentionally very generic. I also need to decide if this should just be\n # done once as part of the login flow rather than checking every request.\n # Checking every request makes this easier to test and change though in the\n # meantime.\n languages_string = request.headers.get('Accept-Language')\n\n # If there is no header, use the default.\n if languages_string is None:\n flask.session['language_prefix'] = DEFAULT_LANGUAGE_PREFIX\n return\n\n languages = languages_string.split(',')\n if languages[0] in ACCEPTABLE_LANGUAGE_PREFIXES:\n flask.session['language_prefix'] = languages[0]\n return\n\n language_sections = languages[0].split(';')\n if language_sections[0] in ACCEPTABLE_LANGUAGE_PREFIXES:\n flask.session['language_prefix'] = language_sections[0]\n return\n\n language_subsections = language_sections[0].split('-')\n if language_subsections[0] in ACCEPTABLE_LANGUAGE_PREFIXES:\n flask.session['language_prefix'] = language_subsections[0]\n return\n\n flask.session['language_prefix'] = DEFAULT_LANGUAGE_PREFIX\n","sub_path":"ufo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"127434837","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views.generic import DetailView, ListView\nfrom dfirtrack_main.forms import ReportitemForm\nfrom dfirtrack_main.logger.default_logger import debug_logger\nfrom dfirtrack_main.models import Reportitem\n\nclass Reportitems(LoginRequiredMixin, ListView):\n login_url = '/login'\n model = Reportitem\n template_name = 'dfirtrack_main/reportitem/reportitems_list.html'\n def get_queryset(self):\n debug_logger(str(self.request.user), \" REPORTITEM_ENTERED\")\n return Reportitem.objects.order_by('reportitem_id')\n\nclass ReportitemsDetail(LoginRequiredMixin, DetailView):\n login_url = '/login'\n model = Reportitem\n template_name = 'dfirtrack_main/reportitem/reportitems_detail.html'\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n reportitem = self.object\n reportitem.logger(str(self.request.user), \" REPORTITEMDETAIL_ENTERED\")\n return context\n\n@login_required(login_url=\"/login\")\ndef reportitems_add(request):\n if request.method == 'POST':\n form = ReportitemForm(request.POST)\n if form.is_valid():\n reportitem = form.save(commit=False)\n reportitem.reportitem_created_by_user_id = request.user\n reportitem.reportitem_modified_by_user_id = request.user\n reportitem.save()\n reportitem.logger(str(request.user), \" REPORTITEM_ADD_EXECUTED\")\n messages.success(request, 'Reportitem added')\n return redirect('/systems/' + str(reportitem.system.system_id))\n else:\n if request.method == 'GET' and 'system' in request.GET:\n system = request.GET['system']\n form = ReportitemForm(initial={\n 'system': system,\n })\n else:\n form = ReportitemForm()\n debug_logger(str(request.user), \" REPORTITEM_ADD_ENTERED\")\n return render(request, 'dfirtrack_main/reportitem/reportitems_add.html', {'form': form})\n\n@login_required(login_url=\"/login\")\ndef reportitems_edit(request, pk):\n reportitem = get_object_or_404(Reportitem, pk=pk)\n if request.method == 'POST':\n form = ReportitemForm(request.POST, instance=reportitem)\n if form.is_valid():\n reportitem = form.save(commit=False)\n reportitem.reportitem_modified_by_user_id = request.user\n reportitem.save()\n reportitem.logger(str(request.user), \" REPORTITEM_EDIT_EXECUTED\")\n messages.success(request, 'Reportitem edited')\n return redirect('/systems/' + str(reportitem.system.system_id))\n else:\n form = ReportitemForm(instance=reportitem)\n reportitem.logger(str(request.user), \" REPORTITEM_EDIT_ENTERED\")\n return render(request, 'dfirtrack_main/reportitem/reportitems_edit.html', {'form': form})\n","sub_path":"dfirtrack_main/views/reportitems_views.py","file_name":"reportitems_views.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"187399236","text":"import requests\n\n\ndef rate_getter():\n url = \"http://api.exchangeratesapi.io/v1/latest?access_key=14ac6512ce2db0e6355ee053f6ad8827&base=EUR&symbols=USD,AMD,RUB\"\n response = requests.request(\"GET\", url)\n if not response.json()[\"success\"]:\n raise ConnectionError(f'Something went wrong :( Success: {response.json()[\"success\"]}')\n else:\n rates = response.json()[\"rates\"]\n rates[\"EUR\"] = 1\n return rates\n\n\nif __name__ == \"__main__\":\n rate_getter()","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"207122236","text":"\"\"\" Pre-merging utility for station data split into several files.\nExample: MOSCOW , 0-20000-0-27612 \n ['/raid60/scratch/leo/scratch/era5/odbs/3188/era5.3188.conv.C:4629', '/raid60/scratch/leo/scratch/era5/odbs/3188/era5.3188.conv.C:4567', '/raid60/scratch/leo/scratch/era5/odbs/3188/era5\\\n.3188.conv.C:5246']\n\n\"\"\"\n\n# variables ==> recordtimestamp, recordindex (to be calculated new)\n\n\n# groups ==> to be updated: observations_table, header_table, era5fb \n# ==> to be simply copied: 'station_configuration', 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type'\n\n\n\nimport os,sys\nimport netCDF4 as nc\nimport pandas as pd\nimport xarray as xr \nimport numpy as np\nimport argparse\n\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\npd.set_option('display.width', None)\n\n\n\n\ndef read_input_file(F , source_dir):\n \"\"\" Read the lists of stations to be pre-merged. \"\"\"\n df = pd.read_csv( F, delimiter = '\\t' ) \n dic = {}\n \n for i, row in df.iterrows():\n primary = row['primary_id']\n station = row['station_name']\n files = row['files'].replace('\"', '').replace(']', '').replace('[', '').replace(\"'\",'').replace(' ','')\n files = files.replace('/raid60/scratch/leo/scratch/era5/odbs/3188/' , source_dir ) # replacing the original file directory with the processed netCDF file (from the harvester tool) \n files = files.replace('/raid60/scratch/leo/scratch/era5/odbs/ai_bfr/', source_dir ) \n \n #files = files.replace\n files_split = files.split(',')\n #print (primary, ' ' , stations , ' ' , files )\n if '[' not in primary and station and ( len(files_split) > 1) :\n #print (primary, ' ' , station , ' ' , files )\n dic[primary] = files \n \n return dic\n\n\n\nclass CombineNetCDF():\n \"\"\" Functionalitites to read netCDF files in input \"\"\"\n \n def __init__(self, files ):\n self.files = files\n self.crs = ''\n self.observed_variables = ''\n self.units = ''\n self.z_coordinate_type = ''\n self.station_type = ''\n self.station_configuration = ''\n \n self.observations_table = ''\n self.era5fb = ''\n self.header_table = ''\n \n self.data = {} #containing all the df to be combined\n self.combined = {}\n \n def read_netCDF(self):\n \"\"\" Reading the groups from the netCDF file using xarray \"\"\"\n \n for f in self.files: # saved as pandas df, to be manipulated \n \n if '.nc' not in f :\n f_nc = f + '.nc'\n \n data = {} \n for group in ['observations_table' , 'era5fb' , 'header_table' ]: \n data[group] = xr.open_dataset(f_nc , engine = 'h5netcdf' , group = group).to_dataframe() \n\n if files.index(f) == 0: # no need to save as panda, they will be copied as they in xarray are to the output file \n for group in ['station_configuration', 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type' , 'id_scheme' , 'source_configuration' , 'station_configuration_codes' ]: \n self.combined[group] = xr.open_dataset(f_nc , engine = 'h5netcdf' , group = group) \n \n self.data[f] = data \n\n def combine_df(self):\n \"\"\" Analize the dataframes for the 'observations_table' , 'era5fb' , 'header_table' and combine \"\"\"\n \n observations_tables, header_tables, era5fb_tables = [], [], [] \n \n for k in self.data.keys():\n observations_tables.append(self.data[k]['observations_table'] )\n header_tables.append(self.data[k]['header_table'] )\n era5fb_tables.append (self.data[k]['era5fb']) \n \n # observations table\n observations_tables_combined = pd.concat(observations_tables)\n observations_tables_combined = observations_tables_combined.sort_values(by = ['date_time', 'z_coordinate' ] ) \n\n # header_table \n header_tables_combined = pd.concat(header_tables)\n header_tables_combined = header_tables_combined.sort_values(by = ['record_timestamp' ] ) \n \n # era5fb \n era5fb_tables_combined= pd.concat(era5fb_tables) \n \n try: # different sorting if the original source is in ODB vs all the rest of the formats \n era5fb_tables_combined = era5fb_tables_combined.sort_values(by = ['report_timestamp' , 'vertco_reference_1@body' ] ) \n except:\n era5fb_tables_combined = era5fb_tables_combined.sort_values(by = ['date@hdr', 'time@hdr' , 'vertco_reference_1@body' ] ) \n \n self.combined['era5fb'] = era5fb_tables_combined.to_xarray()\n self.combined['header_table'] = header_tables_combined.to_xarray()\n self.combined['observations_table'] = observations_tables_combined.to_xarray()\n \n \n print('*** Done combining dataframes')\n\n\n\n def find_date_indices(self):\n \"\"\" Extracts the list of observation dates, and store the indices of the first and last observations. Copy from the netCDF_CDM_converter script \"\"\" \n \n datetime = self.combined['observations_table']['date_time'].values \n \n date_times, indices, counts = np.unique(datetime, return_counts = True, return_index= True)\n #print('check')\n \n try: # convert to date_time object if needed \n date_times = [ datetime.strptime( str(int(i)) , '%Y%m%d%H%M') for i in date_times ]\n except:\n print('already date_time')\n pass\n \n return np.array(indices) , date_times, counts \n \n \n def write_netCDF(self, dataset = '', out_dir = '' ):\n \"\"\" Write the combined file \"\"\"\n \n station_id = self.combined['station_configuration']['primary_id'].values[0]\n \n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n \n out_file = out_dir + '/' + station_id + '_' + dataset + '_combined.nc'\n \n for k in self.combined.keys():\n self.combined[k].to_netcdf(out_file, format='netCDF4', engine='h5netcdf', mode='a' , group = k) # writing the merged observations_table\n\n \"\"\" Writing the new recordtimestamps and recordindex \"\"\"\n date_times, indices, counts = self.find_date_indices()\n di=xr.Dataset()\n\n di['recordtimestamps'] = ( {'recordtimestamps' : date_times.shape } , date_times )\n di['recordindex'] = ( {'recordindex' : indices.shape } , indices )\n di.to_netcdf(out_file, format='netCDF4', engine='h5netcdf', mode='a')\n \n \n print('*** Done writing ' , out_file )\n \n\n\ndef read_input_file(F , source_dir):\n \"\"\" Read the lists of stations to be pre-merged. \"\"\"\n df = pd.read_csv( F, delimiter = '\\t' ) \n dic = {}\n \n for i, row in df.iterrows():\n primary = row['primary_id']\n station = row['station_name']\n files = row['files'].replace('\"', '').replace(']', '').replace('[', '').replace(\"'\",'').replace(' ','')\n files = files.replace('/raid60/scratch/leo/scratch/era5/odbs/3188/' , source_dir ) # replacing the original file directory with the processed netCDF file (from the harvester tool) \n files = files.replace('/raid60/scratch/leo/scratch/era5/odbs/ai_bfr/', source_dir ) \n \n #files = files.replace\n files_split = files.split(',')\n #print (primary, ' ' , stations , ' ' , files )\n if '[' not in primary and station and ( len(files_split) > 1) :\n #print (primary, ' ' , station , ' ' , files )\n dic[primary] = files \n \n return dic\n \n \n\ndic_dataset_stationsfiles = { 'bufr' : {'stations_file': '/raid8/srvx1/federico/GitHub/DEVELOP_JANUARY2020/CEUAS/CEUAS/cdm/code/merging/make_stationsId_tables/stations_filenames_list/stations_filenames_list/bufr_summary_duplicated_stations.txt' , \n 'source_dir' : '/raid60/scratch/federico/netCDF_converted_Jan2020/bufr/' },\n \n 'era5_3188' : { 'stations_file': '/raid8/srvx1/federico/GitHub/DEVELOP_JANUARY2020/CEUAS/CEUAS/cdm/code/merging/make_stationsId_tables/stations_filenames_list/stations_filenames_list/era5_3188_summary_duplicated_stations.txt' ,\n 'source_dir' : '/raid60/scratch/federico/netCDF_converted_Jan2020/era5_3188/' },\n \n 'era5_1759' : {'stations_file': '/raid8/srvx1/federico/GitHub/DEVELOP_JANUARY2020/CEUAS/CEUAS/cdm/code/merging/make_stationsId_tables/stations_filenames_list/stations_filenames_list/era5_1759_summary_duplicated_stations.txt' ,\n 'source_dir' : '/raid60/scratch/federico/netCDF_converted_Jan2020/era5_1759/' }, \n \n 'igra2' : {'stations_file' : '/raid8/srvx1/federico/GitHub/DEVELOP_JANUARY2020/CEUAS/CEUAS/cdm/code/merging/make_stationsId_tables/stations_filenames_list/stations_filenames_list/igra2_summary_duplicated_stations.txt' ,\n 'source_dir' : '/raid60/scratch/federico/netCDF_converted_Jan2020/igra2/' },\n \n 'ncar' : {'stations_file' : '/raid8/srvx1/federico/GitHub/DEVELOP_JANUARY2020/CEUAS/CEUAS/cdm/code/merging/make_stationsId_tables/stations_filenames_list/stations_filenames_list/ncar_summary_duplicated_stations.txt' ,\n 'source_dir' : '/raid60/scratch/federico/netCDF_converted_Jan2020/ncar/' }\n \n }\n \n \n \n \nif __name__ == '__main__':\n \n \"\"\" Must provide a file list as a string with the files separated by a comma, an outpur directory, and the name of the dataset. \"\"\" \n parser = argparse.ArgumentParser(description=\"Combine files containing data for the same station\")\n\n parser.add_argument('--stations' , '-s',\n help = \"List of stations to be processed in the form of a string, with the a comma as separator for the files, e.g. 'station_1,station_2,station_3' \" ,\n default = '',\n type = str) \n \n parser.add_argument('--outdir' , '-o',\n help = \"Output directory\" ,\n default = '',\n type = str) \n \n parser.add_argument('--dataset' , '-d',\n help = \"Dataset , e.g era_1, ncar etc.\" ,\n default = '',\n type = str) \n \n args = parser.parse_args()\n stations = args.stations \n dataset = args.dataset \n \n outdir = args.outdir\n \n\n stations_file = dic_dataset_stationsfiles[dataset]['stations_file']\n source_dir = dic_dataset_stationsfiles[dataset]['source_dir']\n dic = read_input_file(stations_file , source_dir) \n \n \"\"\" Get list of stations out of the input string\"\"\"\n try:\n Stations = stations.split(',') # must be a string with files separated by a comma, e.g. file_1,file_2 \n Stations = [ f for f in Stations if f ]\n except:\n pass\n \n \n \n for stat in Stations:\n \n \"\"\" Analize, Combine, Write each station \"\"\"\n files = [ f for f in dic[stat].split(',') ] # list of absolute paths to the files to be combined \n files = [ f for f in files if f ]\n cb = CombineNetCDF(files = files)\n cb.read_netCDF ()\n cb.combine_df()\n cb.write_netCDF( dataset= dataset , out_dir= outdir )\n\n \n\n\n\n# to run -o /raid60/scratch/federico/Pre_Merged_File/era5_3188 -f 0-20000-0-01463,0-20000-0-02020, -d era5_3188 \n'''\not1 = xr.open_dataset(f1 , engine = 'h5netcdf')\n\not1 = xr.open_dataset(f1 , engine = 'h5netcdf' , group = 'observations_table').to_dataframe() #[['observed_variable', 'z_coordinate' , 'date_time' , 'observed_variable', 'observation_value'] ]\not2 = xr.open_dataset(f2 , engine = 'h5netcdf' , group = 'observations_table').to_dataframe() #[['observed_variable', 'z_coordinate' , 'date_time' , 'observed_variable', 'observation_value'] ]\not3 = xr.open_dataset(f3 , engine = 'h5netcdf' , group = 'observations_table').to_dataframe() #[['observed_variable', 'z_coordinate' , 'date_time' , 'observed_variable', 'observation_value'] ]\n\n\n#['observed_variables', 'z_coordinate' , 'date_time' , 'observed_variable', 'observatio\n#\n#n_value']\n\n\ns1, s2 , s3 = set(ot1['date_time']) , set(ot2['date_time']) , set(ot3['date_time'])\n\ntot = list(s1) + list(s2) + list(s3) \n\nprint ( len(set(tot)) ) \n\nconcatenated = pd.concat ( [ot1, ot2, ot3 ] )\nconcatenated = concatenated.drop_duplicates()\n'''\n\n\"\"\"\n1. concatenate obervation tables\n2. concatenate feedback\n3. sort both by date_time and pressure\n4. find new record_index \n\n\n\n \n \n #out_dir = os.getcwd() \n #out_dir = '/raid60/scratch/federico/MERGED_FILES/'\n \n #f1 = '/raid8/srvx1/federico/GitHub/DEVELOP_JANUARY2020/CEUAS/CEUAS/cdm/code/merging/MOSCOW/chera5.3188.conv.C:4567.nc'\n #f2 = '/raid8/srvx1/federico/GitHub/DEVELOP_JANUARY2020/CEUAS/CEUAS/cdm/code/merging/MOSCOW/MOSCOW/chera5.3188.conv.C:4629.nc'\n #f3 = '/raid8/srvx1/federico/GitHub/DEVELOP_JANUARY2020/CEUAS/CEUAS/cdm/code/merging/MOSCOW/MOSCOW/chera5.3188.conv.C:5246.nc'\n \n #F = /raid8/srvx1/federico/GitHub/DEVELOP_JANUARY2020/CEUAS/CEUAS/cdm/code/merging/MOSCOW/chera5.3188.conv.C:4567.nc,\n # test = f1 + ',' + f2 + ',' + f3\n\"\"\"\n","sub_path":"CEUAS/public/harvest/code/pre_merge_stations.py","file_name":"pre_merge_stations.py","file_ext":"py","file_size_in_byte":14338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"533034227","text":"import pmdarima as pm\n\n\ndef test_schema_endpoints(client):\n \"\"\"The jsonschema is made available in /schema/ endpoints.\n\n \"\"\"\n assert 200 <= client.get(\"/arima/auto/fit/schema/\").status_code < 300\n assert 200 <= client.get(\"/arima/auto/fit-predict/schema/\").status_code < 300\n assert 200 <= client.get(\"/arima/check-model/schema/\").status_code < 300\n assert 200 <= client.get(\"/arima/predict/schema/\").status_code < 300\n\n\ndef test_check_model_invalid(client):\n \"\"\"The model checker can identify schema problems.\n\n \"\"\"\n invalid_model_one = {\n \"order\": (1, 2, 3, 4),\n }\n response = client.post(\"/arima/check-model/\", json=invalid_model_one)\n assert response.json[\"code\"] == response.status_code == 400\n assert response.json[\"error\"] == \"Schema Validation Error\"\n assert response.json[\"extra\"].get(\"path\") == [\"order\"]\n\n invalid_model_two = {\n \"order\": (1, 2, 3),\n \"method\": \"non-existent\",\n }\n response = client.post(\"/arima/check-model/\", json=invalid_model_two)\n assert response.json[\"code\"] == response.status_code == 400\n assert response.json[\"error\"] == \"Schema Validation Error\"\n assert response.json[\"extra\"].get(\"path\") == [\"method\"]\n\n invalid_model_three = {\n \"maxiter\": 1,\n \"transparams\": True,\n }\n response = client.post(\"/arima/check-model/\", json=invalid_model_three)\n assert response.json[\"code\"] == response.status_code == 400\n assert response.json[\"error\"] == \"Schema Validation Error\"\n assert response.json[\"extra\"].get(\"path\") == []\n\n valid_model = {\n \"order\": [1, 2, 3]\n }\n response = client.post(\"/arima/check-model/\", json=valid_model)\n assert 200 <= response.status_code < 300\n assert response.json == valid_model\n\n\ndef test_autofit(client):\n \"\"\"The autofit endpoint can generate ARIMA models.\n\n \"\"\"\n data = pm.datasets.load_woolyrnq().tolist()\n response = client.post(\"/arima/auto/fit/\", json={\n \"params\": {\n \"m\": 4,\n \"maxiter\": 2, # set to prevent long-running tests\n },\n \"fit\": data,\n })\n assert 200 <= response.status_code < 300\n model = response.json[\"model\"]\n assert 200 <= client.post(\"/arima/check-model/\", json=model).status_code < 300\n\n\ndef test_autofit_predict(client):\n \"\"\"The fit-predict endpoint can generate ARIMA models, and make\n predictions. The predictions should be consistent with those\n obtained from the non-auto predict endpoint.\n\n \"\"\"\n data = pm.datasets.load_woolyrnq().tolist()\n predict = {\"n\": 4, \"alpha\": 0.05}\n response = client.post(\"/arima/auto/fit-predict/\", json={\n \"autofit\": {\n \"params\": {\n \"m\": 4,\n \"maxiter\": 2,\n },\n \"fit\": data,\n },\n \"predict\": predict,\n })\n assert 200 <= response.status_code < 300\n model = response.json[\"model\"]\n assert 200 <= client.post(\"/arima/check-model/\", json=model).status_code < 300\n predictions = response.json[\"predictions\"]\n assert len(predictions) == predict[\"n\"]\n assert all(p[\"lower\"] <= p[\"value\"] <= p[\"upper\"] for p in predictions)\n\n response = client.post(\"/arima/predict/\", json={\n \"arima\": model,\n \"fit\": data,\n \"predict\": predict,\n })\n assert 200 <= response.status_code < 300\n assert response.json[\"predictions\"] == predictions\n","sub_path":"tranque_v1.5.1_source/stats-v1.5.1/src/blueprints/arima/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"549589414","text":"#-------------------------------------------------------------------------------\n# Name: Config\n# Purpose: Extend python default ConfigParser\n#\n# Author: Michael Blackwood\n#\n# Created: 02/29/2012\n# Copyright: (c) Monxcleyr Productions 2012\n# License: See License.txt\n#-------------------------------------------------------------------------------\n#!/usr/bin/env python\n\nimport ConfigParser, os\nfrom ConfigParser import RawConfigParser\n\nclass Config(RawConfigParser):\n def __init__(self):\n RawConfigParser.__init__(self)\n\n def getint_tuple(self, section, option):\n\n \"\"\" Returns Returns a tuple of ints\"\"\"\n\n term = self.get(section, option)\n term = term.split(\",\")\n\n temp = []\n for item in term:\n temp.append(int(item.strip(\" \")))\n\n returntuple = tuple(temp)\n\n return returntuple\n\n def getfloat_tuple(self, section, option):\n\n \"\"\" Returns Returns a tuple of ints\"\"\"\n\n term = self.get(section, option)\n term = term.split(\",\")\n\n temp = []\n for item in term:\n temp.append(float(item.strip(\" \")))\n\n returntuple = tuple(temp)\n\n return returntuple\n\n\n def getstr_tuple(self, section, option):\n\n \"\"\" Returns a tuple of strings\"\"\"\n\n term = self.get(section, option)\n term = term.split(\",\")\n\n temp = []\n for item in term:\n temp.append(str(item.strip(\" \")))\n\n returntuple = tuple(temp)\n\n return returntuple\n\n\n def getint_list(self, section, option):\n\n \"\"\" Returns Returns a list of ints\"\"\"\n\n term = self.get(section, option)\n term = term.split(\",\")\n\n temp = []\n for item in term:\n temp.append(int(item.strip(\" \")))\n\n return temp\n\n\n def getstr_list(self, section, option):\n\n \"\"\" Returns a list of strings\"\"\"\n\n term = self.get(section, option)\n term = term.split(\",\")\n\n temp = []\n for item in term:\n temp.append(str(item.strip(\" \")))\n\n return temp\n\n def item_list(self, section):\n\n \"\"\" Returns a list of items\"\"\"\n\n term = self.items(section)\n\n temp = []\n for item in term:\n temp.append(item[1])\n\n return temp\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lib/game/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"511956772","text":"from rlkit.envs.gym_minigrid.gym_minigrid.minigrid_absolute import *\nfrom rlkit.envs.gym_minigrid.gym_minigrid.register import register\nfrom rlkit.envs.gym_minigrid.gym_minigrid.envs.getfood_base import FoodEnvBase\nfrom rlkit.envs.gym_minigrid.gym_minigrid.minigrid_absolute import CELL_PIXELS, Food\n\n\nclass FoodEnvEasy(FoodEnvBase):\n \"\"\"\n Pick up food to gain 1 health point,\n Lose 1 health point every `health_rate` timesteps,\n Get 1 reward per timestep\n \"\"\"\n\n def __init__(self,\n init_resources=None,\n food_rate_decay=0.0,\n lifespan=0,\n her=False,\n navigate=False,\n **kwargs):\n self.init_resources = init_resources or {}\n self.food_rate_decay = food_rate_decay\n self.lifespan = lifespan\n self.her = her\n self.navigate = navigate\n\n super().__init__(**kwargs)\n\n if self.navigate:\n self.goal = self.place_obj(None)\n print(self.goal)\n\n if self.obs_vision:\n shape = (12481,)\n else:\n if self.fully_observed:\n shape = (131,)\n else:\n shape = (227,)\n\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=shape,\n dtype='uint8'\n )\n\n if self.her or self.navigate:\n # position obs\n obs_space = spaces.Box(low=0, high=self.grid_size, shape=(2,), dtype='uint8')\n goal_space = spaces.Box(low=-self.grid_size, high=self.grid_size, shape=(2,), dtype='float16')\n if self.her:\n self.observation_space = spaces.Dict({\n 'observation': obs_space,\n 'achieved_goal': obs_space,\n 'desired_goal': obs_space\n })\n else:\n # navigate\n self.observation_space = obs_space\n self.goal_space = goal_space\n\n def extra_step(self, action, matched):\n self.food_rate += self.food_rate_decay\n\n if matched:\n return matched\n\n agent_cell = self.grid.get(*self.agent_pos)\n matched = True\n\n # Collect resources. In the case of this env, mining = instant health bonus.\n if action == self.actions.mine:\n if agent_cell and agent_cell.can_mine(self):\n self.grid.set(*self.agent_pos, None)\n self.add_health(agent_cell.food_value())\n else:\n matched = False\n\n return matched\n\n def extra_gen_grid(self):\n for type, count in self.init_resources.items():\n for _ in range(count):\n self.place_obj(TYPE_TO_CLASS_ABS[type]())\n\n def extra_reset(self):\n if self.her:\n self.goal_obs_her = np.random.randint(1, self.grid_size - 1, size=(2,))\n print(self.goal_obs_her)\n\n def place_items(self):\n if self.food_rate:\n self.place_prob(Food(lifespan=self.lifespan), 1 / self.food_rate)\n\n def step(self, action):\n obs, rwd, done, info = super().step(action)\n pos = np.array(self.agent_pos)\n if self.her:\n obs = {'observation': pos, 'desired_goal': self.goal_obs_her, 'achieved_goal': pos}\n rwd = self.compute_reward(self.agent_pos, self.goal_obs_her, info)\n elif self.navigate:\n obs = pos\n rwd = self.navigate_reward(obs)\n if rwd:\n self.goal = self.place_obj(None)\n print(self.goal)\n return obs, rwd, done, info\n\n def navigate_reward(self, obs):\n return int(np.array_equal(obs, self.goal))\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n assert self.her, \"`compute_reward` function should only be used for HER\"\n\n return int(np.array_equal(achieved_goal, desired_goal))\n\n def reset(self):\n # this is done first so that agent_pos is updated\n super_reset = super().reset()\n pos = np.array(self.agent_pos)\n if self.her:\n return {'observation': pos, 'desired_goal': self.goal_obs_her, 'achieved_goal': pos}\n if self.navigate:\n return pos\n else:\n return super_reset\n\n def decay_health(self):\n if self.navigate:\n return\n super().decay_health()\n\n\nclass FoodEnvEasyCap50(FoodEnvEasy):\n pass\n\n\nclass FoodEnvEmptyFullObsHER(FoodEnvEasy):\n def __init__(self):\n super().__init__(fully_observed=True, her=True, food_rate=0)\n\n\nclass FoodEnvEmptyFullObsNavigate(FoodEnvEasy):\n def __init__(self):\n super().__init__(fully_observed=True, navigate=True, food_rate=0)\n\n\nclass FoodEnvEasyCap50Vision(FoodEnvEasy):\n def __init__(self):\n super().__init__(obs_vision=True)\n\n\nclass FoodEnvEasyCap100(FoodEnvEasy):\n def __init__(self):\n super().__init__(health_cap=100)\n\n\nclass FoodEnvEasyCap100Vision(FoodEnvEasy):\n def __init__(self):\n super().__init__(health_cap=100, obs_vision=True)\n\n\nclass FoodEnvEasyCap50Decay(FoodEnvEasy):\n def __init__(self):\n super().__init__(health_cap=50, food_rate_decay=0.005)\n\n\nclass FoodEnvEasyCap100Init10Decay(FoodEnvEasy):\n def __init__(self):\n super().__init__(health_cap=100, init_resources={'food': 10},\n food_rate_decay=0.005)\n\n\nclass FoodEnvEasyCap100Init10DecayVision(FoodEnvEasy):\n def __init__(self):\n super().__init__(health_cap=100, init_resources={'food': 10},\n food_rate_decay=0.005, obs_vision=True)\n\n\nclass FoodEnvEasyFood6Cap100Decay(FoodEnvEasy):\n def __init__(self):\n super().__init__(health_cap=100, food_rate=6, food_rate_decay=0.005)\n\n\nclass FoodEnvEasyFood6Cap2000Lifespan50FullObs(FoodEnvEasy):\n def __init__(self):\n super().__init__(food_rate=6, health_cap=2000, lifespan=50, fully_observed=True)\n\n\nclass FoodEnvEasyFood6Cap50DecayLifespan30(FoodEnvEasy):\n def __init__(self):\n super().__init__(food_rate=6, health_cap=50, food_rate_decay=0.005,\n lifespan=30)\n\n\nclass FoodEnvEasyFood6Cap2000DecayLifespan30(FoodEnvEasy):\n def __init__(self):\n super().__init__(health_rate=10, food_rate=6, health_cap=2000, food_rate_decay=0.005,\n lifespan=30)\n\n\nclass FoodEnvEasyFood6Cap2000DecayLifespan30FullObs(FoodEnvEasy):\n def __init__(self):\n super().__init__(food_rate=6, health_cap=2000, food_rate_decay=0.005,\n lifespan=30, fully_observed=True)\n\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Cap50-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyCap50'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Empty-FullObs-HER-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEmptyFullObsHER'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Empty-FullObs-Navigate-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEmptyFullObsNavigate'\n)\n\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Cap50-Vision-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyCap50Vision'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Cap100-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyCap100'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Cap100-Vision-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyCap100Vision'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Cap50-Decay-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyCap50Decay'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Cap100-Init10-Decay-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyCap100Init10Decay'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Cap100-Init10-Decay-Vision-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyCap100Init10DecayVision'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Food6-Cap100-Decay-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyFood6Cap100Decay'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Food6-Cap2000-Lifespan50-FullObs-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyFood6Cap2000Lifespan50FullObs'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Food6-Cap50-Decay-Lifespan30-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyFood6Cap50DecayLifespan30'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Food6-Cap2000-Decay-Lifespan30-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyFood6Cap2000DecayLifespan30'\n)\n\nregister(\n id='MiniGrid-Food-8x8-Easy-Food6-Cap2000-Decay-Lifespan30-FullObs-v1',\n entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEasyFood6Cap2000DecayLifespan30FullObs'\n)\n","sub_path":"rlkit/envs/gym_minigrid/gym_minigrid/envs/getfood_easy.py","file_name":"getfood_easy.py","file_ext":"py","file_size_in_byte":8783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"283675531","text":"import argparse\nimport re\n\nimport datetime\nfrom dateutil.parser import parse as parsetime\nfrom dateutil.relativedelta import relativedelta\n\n\ndef subcmds(subparsers):\n # modification related methods not supported yet\n\n parser = subparsers.add_parser('attachment',\n help='get an attachment')\n parser.set_defaults(fcn='attachment')\n # positional args\n parser.add_argument('ids',\n type=attachment_id,\n action='parse_stdin',\n nargs='+',\n help='the ID(s) of the attachment(s)')\n # optional args\n attachment = base_options(parser, 'attachment')\n\n parser = subparsers.add_parser('get',\n help='get an issue')\n parser.set_defaults(fcn='get')\n # positional args\n parser.add_argument('ids',\n type='ids',\n action='parse_stdin',\n nargs='+',\n metavar='ID',\n help='the ID(s) of the issues(s) to retrieve')\n # optional args\n get = base_options(parser, 'get')\n get.add_argument('--no-updates',\n action='store_false',\n default=True,\n help='do not show updates to fields like labels, status, owner, ...',\n dest='get_updates')\n\n parser = subparsers.add_parser('search',\n help='search for issues')\n parser.set_defaults(fcn='search')\n # positional args\n parser.add_argument('terms',\n action='parse_stdin',\n nargs='*',\n help='strings to search for in title and/or body')\n # optional args\n search = base_options(parser, 'search')\n search.add_argument('--has',\n choices=['attachment', 'no-attachment', 'cc', 'no-cc', 'owner', 'no-owner',\n 'comment', 'no-comment', 'label', 'no-label', 'status', 'no-status',\n 'type', 'no-type'],\n action='append',\n help=\"restrict by issues that have or don't have a specified field\")\n search.add_argument('--attachment',\n help='restrict by issues that have attachments matching a certain filename')\n search.add_argument('--blocked',\n action='store_true',\n help='restrict by issues that are blocked')\n search.add_argument('--blocked-on',\n action='append',\n type=int,\n help='restrict by blocked on issues (one or more)')\n search.add_argument('--blocking',\n action='append',\n type=int,\n help='restrict by blocking issues (one or more)')\n search.add_argument('-o', '--owner',\n help='owner of the issue (or none for no owner)')\n search.add_argument('-r', '--reporter',\n help='restrict by reporter')\n search.add_argument('--cc',\n action='append',\n help='restrict by CC email address (one or more)')\n search.add_argument('--commenter',\n action='append',\n help='restrict by commenter email address (one or more)')\n search.add_argument('-s', '--status',\n action='append',\n help='restrict by status (one or more, use all for all statuses)')\n search.add_argument('-l', '--label',\n action='append',\n help='restrict by label (one or more)')\n search.add_argument('--attr',\n type=attribute,\n action='append',\n help='restrict by attribute and value (one or more of type attr:value)')\n search.add_argument('-t', '--type',\n action='append',\n help='restrict by type (one or more)')\n search.add_argument('--milestone',\n action='append',\n help='restrict by milestone (one or more)')\n search.add_argument('--opened',\n type=parse_dates,\n help='restrict by opened date')\n search.add_argument('--modified',\n type=parse_dates,\n help='restrict by last modified date')\n search.add_argument('--closed',\n type=parse_dates,\n help='restrict by closed date')\n search.add_argument('--published',\n type=parse_dates2,\n help='restrict by published date')\n search.add_argument('--updated',\n type=parse_dates2,\n help='restrict by updated date')\n search.add_argument('--stars',\n type=parse_stars,\n help='restrict by number of stars')\n search.add_argument('--summary',\n action='store_true',\n help='search in the issue summary')\n search.add_argument('--description',\n action='store_true',\n help='search in the issue description')\n search.add_argument('--comment',\n action='store_true',\n help='search in the issue comments')\n search.add_argument('--query',\n help='manually specify an advanced query')\n search.add_argument('--sort',\n #choices=service.attributes.keys() + ['-' + key for key in service.attributes.keys()],\n help='sort by field type')\n search.add_argument('-u', '--url',\n action='store_true',\n help='show search url for the browser')\n search.add_argument('--output',\n type=str,\n help='custom format for search output')\n\n # # add generic options for subcommands\n # get_actions = [get, search]\n # send_actions = []\n #\n # for group in get_actions:\n # generic_receive(group)\n # for group in send_actions:\n # generic_send(group)\n\n\ndef modify(subparsers):\n parser = subparsers.add_parser('modify',\n help='modify an issue (eg. post a comment)')\n parser.add_argument('issue_id',\n type=int,\n help='the ID of the issue to modify')\n # optional args\n parser.add_argument('-c', '--comment',\n help='add comment from command line')\n parser.add_argument('-d', '--duplicate',\n type=int,\n help='this issue is a duplicate')\n parser.add_argument('-o', '--owner',\n help='change owner for this issue')\n parser.add_argument('-t', '--title',\n help='set title of issue')\n parser.add_argument('-u', '--url',\n help='set URL field of issue')\n parser.add_argument('--add-cc',\n action='append',\n help='add an email to the CC list')\n parser.add_argument('--remove-cc',\n action='append',\n help='remove an email from the CC list')\n parser.add_argument('--add-depends',\n action='append',\n help='add an issue to the depends list')\n parser.add_argument('--remove-depends',\n action='append',\n help='remove an issue from the depends list')\n parser.add_argument('--add-blocked',\n action='append',\n help='add an issue to the blocked list')\n parser.add_argument('--remove-blocked',\n action='append',\n help='remove an issue from the blocked list')\n parser.add_argument('--fixed',\n action='store_true',\n help='mark issue as fixed')\n parser.add_argument('--invalid',\n action='store_true',\n help='mark issue as invalid')\n parser.set_defaults(fcn='modify')\n\ndef create(subparsers):\n parser = subparsers.add_parser('create',\n help='create a new issue')\n # optional args\n parser.add_argument('-t', '--title',\n help='title of issue')\n parser.add_argument('-d', '--description',\n help='description of the issue')\n parser.add_argument('-o', '--owner',\n help='change owner for this issue')\n parser.add_argument('--cc',\n help='add a list of emails to CC list')\n parser.add_argument('-u', '--url',\n help='URL associated with the issue')\n parser.add_argument('--depends-on',\n help='add a list of issue dependencies',\n dest='dependson')\n parser.add_argument('--blocked',\n help='add a list of blocker issues')\n parser.set_defaults(fcn='create')\n\ndef parse_stars(s):\n rg = re.match(r'^(\\d+)-(\\d+)$', s)\n gt = re.match(r'^(\\d+)-$', s)\n lt = re.match(r'^-(\\d+)$', s)\n eq = re.match(r'^\\d+(,\\d+)*$', s)\n\n if rg:\n (lower, upper) = rg.groups()\n lower = int(lower)\n upper = int(upper)+1\n stars_query = 'stars:{} -stars:{}'.format(lower, upper)\n elif gt:\n bound = int(gt.group(1))\n stars_query = 'stars:{}'.format(bound)\n elif lt:\n bound = int(lt.group(1))+1\n stars_query = 'stars:0 -stars:{}'.format(bound)\n elif eq:\n stars_query = [x for x in s.split(',')]\n else:\n msg = '{} is not a valid stars argument'.format(s)\n raise argparse.ArgumentTypeError(msg)\n\n return (s, stars_query)\n\ndef isodate(date):\n return date.strftime('%Y-%m-%d')\n\ndef oneday(day):\n next_day = day + relativedelta(days=+1)\n return (isodate(day), isodate(next_day))\n\ndef parse_date(string):\n if re.match(r'^\\d\\d\\d\\d$', string):\n temp_date = dateutil.parser.parse(string)\n lower = temp_date + relativedelta(nlyearday=365, years=-1)\n upper = temp_date + relativedelta(nlyearday=1, years=+1)\n date_range = (isodate(lower), isodate(upper))\n elif re.match(r'^\\d\\d\\d\\d/\\d\\d$', string):\n temp_date = dateutil.parser.parse(string)\n lower = temp_date + relativedelta(day=31, months=-1)\n upper = temp_date + relativedelta(day=1, months=+1)\n date_range = (isodate(lower), isodate(upper))\n elif re.match(r'^\\d\\d\\d\\d/\\d\\d/\\d\\d$', string):\n temp_date = dateutil.parser.parse(string)\n lower = temp_date + relativedelta(days=-1)\n upper = temp_date + relativedelta(days=+1)\n date_range = (isodate(lower), isodate(upper))\n else:\n return None\n\n return date_range\n\ndef parse_dates(string):\n days = { 'mon': MO, 'tue': TU, 'wed': WE, 'thu': TH, 'fri': FR, 'sat': SA, 'sun': SU }\n today = datetime.datetime.utcnow()\n tomorrow = today + relativedelta(days=+1)\n\n upper_bound = re.match(r'^-(\\d\\d\\d\\d(/\\d\\d){0,2})$', string)\n lower_bound = re.match(r'^(\\d\\d\\d\\d(/\\d\\d){0,2})-$', string)\n range = re.match(r'^(\\d\\d\\d\\d(/\\d\\d){0,2})-(\\d\\d\\d\\d(/\\d\\d){0,2})$', string)\n\n offset = re.match(r'^([<=>])(\\d+)([ymwd])$', string)\n\n temp_range = parse_date(string)\n if temp_range is not None:\n date_range = temp_range\n elif upper_bound:\n (lower, upper) = parse_date(upper_bound.group(1))\n date_range = (None, upper)\n elif lower_bound:\n (lower, upper) = parse_date(lower_bound.group(1))\n date_range = (lower, None)\n elif range:\n (lower1, upper1) = parse_date(range.group(1))\n (lower2, upper2) = parse_date(range.group(3))\n date_range = (lower1, upper2)\n elif offset:\n units = {'y': 'years', 'm': 'months', 'w': 'weeks', 'd': 'days'}\n unit = units[offset.group(3)]\n value = -int(offset.group(2))\n kw = {unit: value}\n operator = offset.group(1)\n\n if operator == '<':\n date = today + relativedelta(**kw)\n lower_bound = isodate(date)\n date_range = (lower_bound, None)\n elif operator == '=':\n date = today + relativedelta(**kw)\n date_range = oneday(date)\n elif operator == '>':\n date = today + relativedelta(**kw)\n upper_bound = isodate(date)\n date_range = (None, upper_bound)\n\n elif string.lower() in days:\n day = today + relativedelta(weekday=days[string.lower()](-1))\n date_range = oneday(day)\n elif string == 'today':\n date_range = oneday(today)\n elif string == 'yesterday':\n yesterday = today + relativedelta(days=-1)\n date_range = oneday(yesterday)\n elif string == 'this-week':\n monday = today + relativedelta(weekday=MO(-1))\n sunday = today + relativedelta(weekday=SU)\n this_monday = isodate(monday)\n this_sunday = isodate(sunday)\n date_range = (this_monday, this_sunday)\n elif string == 'last-week':\n monday = today + relativedelta(days=-1, weekday=MO(-2))\n sunday = today + relativedelta(days=-1, weekday=SU(-1))\n last_monday = isodate(monday)\n last_sunday = isodate(sunday)\n date_range = (last_monday, last_sunday)\n elif string == 'this-month':\n this_month = today + relativedelta(day=1)\n date_range = (isodate(this_month), isodate(tomorrow))\n elif string == 'last-month':\n last_month = today + relativedelta(day=1, months=-1)\n this_month = today + relativedelta(day=1)\n date_range = (isodate(last_month), isodate(this_month))\n else:\n msg = '{} is not a valid date argument'.format(string)\n raise argparse.ArgumentTypeError(msg)\n\n return (string, date_range)\n\ndef parse_dates2(string):\n days = { 'mon': MO, 'tue': TU, 'wed': WE, 'thu': TH, 'fri': FR, 'sat': SA, 'sun': SU }\n today = datetime.datetime.utcnow()\n tomorrow = today + relativedelta(days=+1)\n\n upper_bound = re.match(r'^-(\\d\\d\\d\\d(/\\d\\d){0,2})$', string)\n lower_bound = re.match(r'^(\\d\\d\\d\\d(/\\d\\d){0,2})-$', string)\n range = re.match(r'^(.+)-(.+)$', string)\n\n offset = re.match(r'^([<=>])(\\d+)([ymwdhs]|min)$', string)\n\n if range:\n lower = dateutil.parser.parse(range.group(1))\n upper = dateutil.parser.parse(range.group(2))\n date_range = (datetimetostr(lower), datetimetostr(upper))\n elif offset:\n units = {'y': 'years', 'm': 'months', 'w': 'weeks', 'd': 'days',\n 'h': 'hours', 'min': 'minutes', 's': 'seconds'}\n unit = units[offset.group(3)]\n value = -int(offset.group(2))\n kwargs = {unit: value}\n operator = offset.group(1)\n\n if operator == '<':\n date = today + relativedelta(**kwargs)\n lower_bound = datetimetostr(date)\n date_range = (lower_bound, None)\n #elif operator == '=':\n #date = today + relativedelta(**kwargs)\n #date_range = oneday(date)\n elif operator == '>':\n date = today + relativedelta(**kwargs)\n upper_bound = datetimetostr(date)\n date_range = (None, upper_bound)\n\n elif string.lower() in days:\n day = today + relativedelta(weekday=days[string.lower()](-1))\n date_range = oneday(day)\n elif string == 'today':\n date_range = oneday(today)\n elif string == 'yesterday':\n yesterday = today + relativedelta(days=-1)\n date_range = oneday(yesterday)\n elif string == 'this-week':\n monday = today + relativedelta(weekday=MO(-1))\n sunday = today + relativedelta(weekday=SU)\n this_monday = isodate(monday)\n this_sunday = isodate(sunday)\n date_range = (this_monday, this_sunday)\n elif string == 'last-week':\n monday = today + relativedelta(days=-1, weekday=MO(-2))\n sunday = today + relativedelta(days=-1, weekday=SU(-1))\n last_monday = isodate(monday)\n last_sunday = isodate(sunday)\n date_range = (last_monday, last_sunday)\n elif string == 'this-month':\n this_month = today + relativedelta(day=1)\n date_range = (isodate(this_month), isodate(tomorrow))\n elif string == 'last-month':\n last_month = today + relativedelta(day=1, months=-1)\n this_month = today + relativedelta(day=1)\n date_range = (isodate(last_month), isodate(this_month))\n else:\n msg = '{} is not a valid date argument'.format(string)\n raise argparse.ArgumentTypeError(msg)\n\n return (string, date_range)\n\ndef attribute(string):\n m = re.match(r'^\\w+:\\w+$', string)\n if m:\n return string\n else:\n msg = '{} is not a valid attr argument'.format(string)\n raise argparse.ArgumentTypeError(msg)\n\ndef attachment_id(string):\n m = re.match(r'^\\d+-\\d+$', string)\n if m:\n return string\n else:\n msg = '{} is not a valid attachment ID'.format(string)\n raise argparse.ArgumentTypeError(msg)\n","sub_path":"src/bite/args/monorail.py","file_name":"monorail.py","file_ext":"py","file_size_in_byte":15480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"461045705","text":"from dolfin import *\nfrom mshr import *\n\nimport_matplotlib = False\n\nimport sa_utils\nimport sa_hdf5\n\nimport numpy as np\nimport sympy as smp\nimport gc\nimport datetime\ncomm = MPI.comm_world\n\neps = 1e-10\n\nnow = datetime.datetime.now()\nlogger = sa_utils.LogWrapper('fenics_pum_{:s}'.format(now.strftime(\"%Y-%m-%d_%H:%M\")), stdout=True)\n\ndef sympy_get_monomials(dimension, degree, vector = False):\n xs = smp.symbols(['x[{:d}]'.format(ii) for ii in range(dimension)], real=True)\n monos = smp.polys.monomials.itermonomials(xs, degree)\n for xx in xs:\n monos = [smp.horner(mm, wrt=xx) for mm in monos]\n monos = sorted(monos, key=smp.polys.orderings.monomial_key('grlex', xs[::-1]))[1:]\n if vector:\n expressions = [Constant(\n [0 for ll in range(kk)]+\n [1] +\n [0 for ll in range(kk+1, dimension)]\n ) for kk in range(dimension)]\n expressions += [Expression(\n ['0' for ll in range(kk)]+\n [smp.ccode(mm)]+\n ['0' for ll in range(kk+1, dimension)],\n degree=int(smp.degree(mm))) for mm in monos for kk in range(dimension)\n ]\n else:\n expressions = [Constant(1)]\n expressions += [\n Expression(smp.ccode(mm), degree=int(smp.degree(mm))) for mm in monos\n ]\n return expressions\n\ndef get_interpolants(spaces, expressions):\n return [[interpolate(exp, space) for exp in expressions] for space in spaces]\n\ndef distance_points_box(points, low, high):\n dd = np.max([low-points, np.zeros_like(points), points-high], 0)\n return np.sqrt(np.sum(dd*dd, 1))\n\ndef inside_points_box(points, low, high):\n return (low-eps < points).all(1)*(points < high+eps).all(1)\n \ndef construct_pu(\n mesh, pum_low, pum_high, *,\n pum_res = 2, pum_alpha = .25, oversampling = [2.0],\n cutoff = eps\n):\n basedim = mesh.geometric_dimension()\n num_oversampling = len(oversampling)\n \n logger.info(\"Constructing PU\")\n pum_diff = pum_high-pum_low\n pum_hh = np.max(pum_diff)*2.**(-pum_res)\n pum_max = np.array(np.ceil(pum_diff/pum_hh), dtype=int)\n\n logger.info(\"Constructing flat top regions\")\n patches_flattop = []\n patches_oversampling = [[] for ii in range(num_oversampling)]\n for kk in range(pum_max[2]):\n zz_low = pum_low[2]+(kk+0.5)*pum_hh\n for jj in range(pum_max[1]):\n yy_low = pum_low[1]+(jj+0.5)*pum_hh\n for ii in range(pum_max[0]):\n xx_low = pum_low[0]+(ii+0.5)*pum_hh\n pt = np.array([xx_low, yy_low, zz_low])\n patches_flattop.append([pt-pum_hh*(1.-pum_alpha)*0.5, pt+pum_hh*(1.-pum_alpha)*0.5])\n for ll, oo in enumerate(oversampling):\n patches_oversampling[ll].append([pt-pum_hh*(1.+pum_alpha)*oo*0.5, pt+pum_hh*(1.+pum_alpha)*oo*0.5])\n patches_flattop = np.array(patches_flattop)\n num_patches = len(patches_flattop)\n \n pum_alpha_hh = pum_hh*pum_alpha\n mesh_vertices = mesh.coordinates()\n num_vertices = mesh.num_vertices()\n vertices_done = np.zeros(num_vertices, dtype=int)\n patches_ok = []\n vertices_ok = []\n for ii, patch in enumerate(patches_flattop):\n res = inside_points_box(mesh_vertices, *patch)\n if res.any():\n patches_ok.append(ii)\n vertices_done += res\n vertices_ok.append(np.where(res > 0)[0])\n vertices_undone = np.where(vertices_done < 1)[0]\n patches_ok = np.array(patches_ok, dtype=int)\n patches_flattop = patches_flattop[patches_ok]\n for ll in range(num_oversampling):\n patches_oversampling[ll] = np.array(patches_oversampling[ll])[patches_ok]\n logger.info(\"[{:d}/{:d}] patches left\".format(len(patches_flattop), num_patches))\n num_patches = len(patches_flattop)\n \n coeffs = np.zeros((num_patches, num_vertices))\n for ii, patch in enumerate(patches_flattop):\n vs = vertices_ok[ii]\n ci = coeffs[ii]\n ci[vertices_undone] = distance_points_box(mesh_vertices[vertices_undone], *patch)\n ci[np.where(ci > pum_alpha_hh)] = 0\n ci[np.where(ci < cutoff)] = 0\n ww = np.where(ci > 0)\n ci[ww] = (pum_alpha_hh-ci[ww])/pum_alpha_hh\n coeffs[ii, vs] = 1.\n\n coeffs_sum = sum(coeffs, 0)\n coeffs /= coeffs_sum\n \n VV = FunctionSpace(mesh, 'CG', 1)\n pu = []\n mm = dof_to_vertex_map(VV)\n for ii, patch in enumerate(patches_flattop):\n pu.append(Function(VV, name='u'))\n pu[-1].vector().set_local(coeffs[ii][mm])\n\n mesh_cells = mesh.cells()\n markers = [[] for ii in range(num_oversampling)]\n for ll, pts in enumerate(patches_oversampling):\n for pt in pts:\n inside_vertices = inside_points_box(mesh_vertices, *pt)\n mf = MeshFunction('size_t', mesh, basedim, 0)\n mf.set_values(np.apply_along_axis(lambda cc: inside_vertices[cc].any(), 1, mesh_cells))\n markers[ll].append(mf)\n return pu, markers\n\ndef make_subdomains(\n mesh, pu, markers, *,\n cell_function = None, facet_function = None,\n vector = False, cutoff = eps, debug = False\n):\n logger.info('make_subdomains, [{:d}] patches, mesh [{:d}] dof, start'.format(len(pu), mesh.num_vertices()));\n assert(len(pu) == len(markers))\n basedim = mesh.geometric_dimension()\n num = len(pu)\n mesh_scalar_VV = pu[0].function_space()\n mesh_vertex_to_scalar_dof_map = vertex_to_dof_map(mesh_scalar_VV)\n if vector:\n mesh_VV = VectorFunctionSpace(mesh, 'CG', 1)\n mesh_vertex_to_dof_map = vertex_to_dof_map(mesh_VV)\n if debug:\n debug_uu = interpolate(Expression(['x[0]*x[1]']+['0' for ii in range(basedim-1)], degree=1), mesh_VV)\n elif debug:\n debug_uu = interpolate(Expression('x[0]*x[1]', degree=1), mesh_scalar_VV)\n\n oversampled_submeshes = []\n oversampled_submeshes_VV = []\n subsubmesh_markers = []\n local_to_global_data = []\n if cell_function is not None:\n submesh_cell_functions = []\n if facet_function is not None:\n submesh_facet_functions = []\n bmesh = BoundaryMesh(mesh, 'exterior')\n bmesh_map = bmesh.entity_map(basedim-1)\n exterior_subdomain = AutoSubDomain(lambda xx, on: on)\n count = 0\n for pp, mk in zip(pu, markers):\n logger.info(' patch [{:d}/{:d}] start'.format(count+1, num))\n logger.info(' creating submesh')\n coeff = pp.vector()\n \n submesh = SubMesh(mesh, mk, 1)\n oversampled_submeshes.append(submesh)\n logger.info(' created submesh')\n logger.info(' creating maps')\n vertex_parent_map = submesh.data().array(\"parent_vertex_indices\", 0)\n \n mf = MeshFunction('size_t', submesh, basedim, 0)\n mf.set_values(np.apply_along_axis(lambda cc: (coeff[mesh_vertex_to_scalar_dof_map[vertex_parent_map[cc]]] > 0).any(), 1, submesh.cells()))\n subsubmesh_markers.append(mf)\n \n submesh_scalar_VV = FunctionSpace(submesh, 'CG', 1)\n submesh_scalar_dof_to_vertex_map = dof_to_vertex_map(submesh_scalar_VV)\n submesh_scalar_dof_to_mesh_scalar_dof_map = mesh_vertex_to_scalar_dof_map[vertex_parent_map[submesh_scalar_dof_to_vertex_map]]\n submesh_scalar_nonzero = np.where(coeff[submesh_scalar_dof_to_mesh_scalar_dof_map] > cutoff)[0]\n \n if not vector:\n submesh_VV = submesh_scalar_VV\n submesh_dof_to_mesh_dof_map = submesh_scalar_dof_to_mesh_scalar_dof_map\n submesh_nonzero = submesh_scalar_nonzero\n submesh_nonzero_pu = coeff[submesh_dof_to_mesh_dof_map[submesh_nonzero]]\n else:\n submesh_VV = VectorFunctionSpace(submesh, 'CG', 1)\n submesh_dof_to_vertex_map = dof_to_vertex_map(submesh_VV)\n submesh_dof_to_vertex_map_index = submesh_dof_to_vertex_map//basedim\n submesh_dof_to_vertex_map_offset = submesh_dof_to_vertex_map%basedim\n submesh_dof_to_mesh_dof_map = mesh_vertex_to_dof_map[basedim*vertex_parent_map[submesh_dof_to_vertex_map_index]+submesh_dof_to_vertex_map_offset]\n submesh_nonzero = np.where(coeff[mesh_vertex_to_scalar_dof_map[vertex_parent_map[submesh_dof_to_vertex_map_index]]] > cutoff)[0]\n submesh_nonzero_pu = coeff[mesh_vertex_to_scalar_dof_map[vertex_parent_map[submesh_dof_to_vertex_map_index[submesh_nonzero]]]]\n oversampled_submeshes_VV.append(submesh_VV)\n local_to_global_data.append((submesh_dof_to_mesh_dof_map, submesh_nonzero, submesh_nonzero_pu))\n logger.info(' created maps')\n \n if cell_function is not None:\n logger.info(' copying cell function to submesh')\n cell_parent_map = submesh.data().array(\"parent_cell_indices\", basedim)\n sub_V0 = FunctionSpace(submesh, 'DG', 0)\n cf = Function(sub_V0)\n cf.vector().set_local(cell_function.vector()[cell_parent_map])\n submesh_cell_functions.append(cf)\n logger.info(' copyied cell function to submesh')\n else:\n cf = None\n \n if facet_function is not None:\n logger.info(' copying facet function to submesh')\n ff = MeshFunction('size_t', submesh, basedim-1, 0)\n bsubmesh = BoundaryMesh(submesh, 'exterior')\n bsubmesh_map = bsubmesh.entity_map(basedim-1)\n logger.info(' submesh exterior')\n exterior_subdomain.mark(ff, 100)\n \n logger.info(' boundary mesh submesh')\n submesh_min = submesh.coordinates().min(0)\n submesh_max = submesh.coordinates().max(0)\n submesh_bounding = AutoSubDomain(lambda xx, on: (submesh_min-eps <= xx).all() and (xx <= submesh_max+eps).all())\n subbmarker = MeshFunction('size_t', bmesh, basedim-1, 0)\n submesh_bounding.mark(subbmarker, 1)\n subbmesh = SubMesh(bmesh, subbmarker, 1)\n subbmesh_map = subbmesh.data().array(\"parent_cell_indices\", basedim-1)\n \n logger.info(' slow loop')\n for sub_cell in cells(bsubmesh):\n sub_facet = Facet(submesh, bsubmesh_map[sub_cell.index()])\n sub_facet_vertices_set = set(vertex_parent_map[sub_facet.entities(0)])\n for cell in cells(subbmesh):\n facet = Facet(mesh, bmesh_map[subbmesh_map[cell.index()]])\n if set(facet.entities(0)) == sub_facet_vertices_set:\n ff[sub_facet.index()] = facet_function[facet.index()]\n break\n submesh_facet_functions.append(ff)\n logger.info(' copyied facet function to submesh')\n else:\n ff = None\n \n if debug:\n logger.info(' writing debugging output')\n tmp_mf = MeshFunction('double', submesh, basedim, 0)\n tmp_mf.set_values(cf.vector())\n sa_hdf5.write_dolfin_mesh(submesh, 'submesh_{:d}_restrictions'.format(count), cell_function = tmp_mf, facet_function = ff)\n sa_hdf5.write_dolfin_mesh_functions(submesh, 'submesh_{:d}_markers'.format(count), cell_functions = [subsubmesh_markers[-1]])\n uu = Function(submesh_scalar_VV)\n uu.vector()[submesh_scalar_nonzero] = coeff[submesh_scalar_dof_to_mesh_scalar_dof_map[submesh_scalar_nonzero]]\n sa_hdf5.write_dolfin_scalar_cg1('submesh_{:d}_pu'.format(count), [uu])\n vv = Function(submesh_VV)\n vv.vector()[submesh_nonzero] = debug_uu.vector()[submesh_dof_to_mesh_dof_map[submesh_nonzero]]\n vv.vector()[submesh_nonzero] *= submesh_nonzero_pu\n if vector:\n sa_hdf5.write_dolfin_vector_cg1('submesh_{:d}_vector'.format(count), [vv])\n else:\n sa_hdf5.write_dolfin_scalar_cg1('submesh_{:d}_scalar'.format(count), [vv])\n logger.info(' written debugging output')\n logger.info(' patch [{:d}/{:d}] end'.format(count+1, num))\n count += 1\n ret = dict()\n ret['meshes'] = oversampled_submeshes\n ret['spaces'] = oversampled_submeshes_VV\n ret['markers'] = subsubmesh_markers\n ret['local_to_global'] = local_to_global_data\n if cell_function is not None:\n ret['cell_functions'] = submesh_cell_functions\n if facet_function is not None:\n ret['facet_functions'] = submesh_facet_functions\n logger.info('make_subdomains, [{:d}] patches, mesh [{:d}] dof, end'.format(len(pu), mesh.num_vertices()));\n return ret\n\ndef local_to_global(VV, functions, local_to_global_data):\n ret = []\n for fs, (idx_map, idx, pus) in zip(functions, local_to_global_data):\n for ff in fs:\n uu = Function(VV)\n uu.vector()[idx_map[idx]] = ff.vector()[idx]*pus\n ret.append(uu)\n return ret\n\ndef cell_function_to_dg(ff, V0 = None):\n if V0 is None:\n V0 = FunctionSpace(ff.mesh(), 'DG', 0)\n v0 = Function(V0)\n v0.vector().set_local(ff.array())\n return v0\n \ndef create_oht_example():\n res = 2**6\n layers = 8\n layer_zz = 0.0074\n dims = np.array([6., 3., layers*layer_zz])\n basename = 'oht'\n\n box = Box(Point(*(-dims/2)), Point(*(dims/2)))\n hole = Cylinder(Point(0,0,-1), Point(0,0,1), 0.125, 0.125, int(res))\n domain = box-hole\n\n mesh = generate_mesh(domain, res)\n\n cell_coeff = MeshFunction('double', mesh, 3, 1)\n for cell in cells(mesh):\n if cell.midpoint().z() < layer_zz:\n cell_coeff[cell] = 10\n\n facet_function = MeshFunction('size_t', mesh, 2, 0)\n bc_dict = dict()\n left = AutoSubDomain(lambda xx, on: on and near(xx[0], -0.5*dims[0], eps = eps))\n bc_dict[1] = left\n right = AutoSubDomain(lambda xx, on: on and near(xx[0], 0.5*dims[0], eps = eps))\n bc_dict[2] = right\n front = AutoSubDomain(lambda xx, on: on and near(xx[1], -0.5*dims[1], eps = eps))\n bc_dict[3] = front\n back = AutoSubDomain(lambda xx, on: on and near(xx[1], 0.5*dims[1], eps = eps))\n bc_dict[4] = back\n bottom = AutoSubDomain(lambda xx, on: on and near(xx[2], -0.5*dims[2], eps = eps))\n bc_dict[5] = bottom\n top = AutoSubDomain(lambda xx, on: on and near(xx[2], 0.5*dims[2], eps = eps))\n bc_dict[6] = top\n border = AutoSubDomain(lambda xx, on: on)\n border.mark(facet_function, 100)\n for key in bc_dict:\n bc_dict[key].mark(facet_function, key)\n\n sa_hdf5.write_dolfin_mesh(mesh, '{:s}'.format(basename), cell_function = cell_coeff, facet_function = facet_function)\n return mesh, cell_coeff, facet_function, basename\n\n\nif __name__ == '__main__':\n polydegree = 1\n create = True\n debug = True\n vector = True\n if vector:\n Space = VectorFunctionSpace\n write_functions = sa_hdf5.write_dolfin_vector_cg1\n else:\n Space = FunctionSpace\n write_functions = sa_hdf5.write_dolfin_scalar_cg1\n \n if create:\n mesh, cell_coeff, facets, basename = create_oht_example()\n \n pum_low = np.array([-4.5,-1.5,-1.5])\n pum_high = np.array([7.5,1.5,1.5])\n pum_res = 2\n pum_alpha = 0.25\n\n oversampling = [1.5] \n pu, markers = construct_pu(\n mesh, pum_low, pum_high,\n pum_res = pum_res, pum_alpha = pum_alpha, oversampling = oversampling\n )\n \n logger.info('writing')\n sa_hdf5.write_dolfin_scalar_cg1('{:s}_pu'.format(basename), pu)\n for ii, oo in enumerate(oversampling):\n sa_hdf5.write_dolfin_mesh_functions(mesh, '{:s}_{:.2e}'.format(basename, oo), cell_functions = markers[ii])\n\n del mesh, cell_coeff, facets, pu, markers\n\n logger.info('reloading')\n read1 = sa_hdf5.read_dolfin_mesh('{:s}'.format(basename))\n mesh = read1['mesh']\n basedim = mesh.geometric_dimension()\n cell_function = cell_function_to_dg(read1['cell_function'])\n facet_function = read1['facet_function']\n del read1\n read2 = sa_hdf5.read_dolfin_mesh_functions('{:s}_{:.2e}'.format(basename, oversampling[-1]))\n markers = read2['cell_functions']\n del read2\n pu = sa_hdf5.read_dolfin_scalar_cg1('{:s}_pu'.format(basename)) \n \n subdomain_data = make_subdomains(\n mesh, pu, markers,\n cell_function = cell_function, facet_function = facet_function,\n debug = debug, vector = vector\n )\n \n poly_exprs = sympy_get_monomials(basedim, polydegree, vector)\n sub_polys = get_interpolants(subdomain_data['spaces'], poly_exprs)\n if debug:\n for ii, ps in enumerate(sub_polys):\n write_functions('{:s}_patch_{:d}_polys'.format(basename, ii), ps)\n global_VV = Space(mesh, 'CG', 1)\n global_polys = local_to_global(global_VV, sub_polys, subdomain_data['local_to_global'])\n if debug:\n write_functions('{:s}_polys'.format(basename), global_polys)\n \n ","sub_path":"sa_thesis/computation/gfem.py","file_name":"gfem.py","file_ext":"py","file_size_in_byte":16811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"200844891","text":"import os.path\nimport logging\nimport pandas as pd\nfrom geopandas import GeoDataFrame\n#from shapely.geometry import Point\n#from geopandas import gpd\nfrom ScoreModel import ModelData\nimport matplotlib.pyplot as pltcat\nimport matplotlib.patches as mpatches\nfrom matplotlib import mlab\nimport matplotlib as mpl\nimport numpy as np\nfrom scipy import stats\nimport math\nimport time\nimport copy\nimport operator\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef linear_decay_function(time, upper):\n '''\n Linear decay function for distance\n '''\n\n if time > upper:\n return 0\n else:\n return (upper - time) / upper\n\ndef root_decay_function(time, upper):\n '''\n Square root decay function for distance.\n '''\n if time > upper:\n return 0\n else:\n return (1 / math.sqrt(upper) ) * (-time ** 0.5) + 1\n\n\ndef logit_decay_function(time, upper):\n '''\n Logit distance decay function.\n '''\n if time > upper:\n return 0\n else:\n return 1-(1/(math.exp((upper/450)-(.3/60)*(time))+1))\n #return 1-(1/(math.exp((upper/300)-(.3/60)*(time))+1))\n #return 1-(1/(math.exp((upper/300)-(.4/60)*(time))+1))\n #return (100-(100/(math.exp((upper/300)-(.0065)*(time))+1))/100)\n\n\nclass CoverageModel(ModelData):\n '''\n Build the Per capita spending model which captures\n the level of spending for low income residents in\n urban enviroments.\n '''\n\n def __init__(self, network_type='drive', source_filename=None, \n dest_filename=None, sp_matrix_filename=None, limit_categories=None,\n upper=30):\n\n super().__init__(network_type, upper)\n self.results = None\n self.dpc = None\n self.res_pop = None\n self.limit_categories = limit_categories\n self.results_initialized = False\n self.dest_percap_target=None\n self.dest_target=None\n self.serv_pop=None\n assert type(limit_categories) == type(set()) or limit_categories == None, 'limit_categories must be of type set or None'\n\n self.load_sources(source_filename)\n self.load_dests(dest_filename)\n self.load_sp_matrix(sp_matrix_filename)\n self.process()\n\n self.good_to_write = False\n\n def calculate(self):\n '''\n Calculate the per capita value of ALL categories\n in the limited categories set, or the composite value\n for all categories.\n '''\n start_time = time.time()\n first = True\n #sum each limited catagory separately\n if self.limit_categories:\n for category in self.limit_categories:\n self.calculate_single(category)\n #combine each category of spending to one total value\n if first:\n self.results['all_categories'] = 0\n first = False\n self.results['all_categories'] += self.results[category]\n else:\n #sum all categories together\n self.calculate_single(None)\n\n\n self.good_to_write = True\n self.logger.info(\"Finished calculating CoverageModel in {:,.2f} seconds\".format(time.time() - start_time))\n\n def calculate_single(self, subset):\n '''\n Calculate the per capita value of ONE category\n in the limited categories set, or the composite value\n for all categories.\n Inputs:\n subset: 'all_categories', or the name of a single\n category\n '''\n\n #find each dest's target value per capita\n dest_percap_target = {}\n dest_target={}\n serv_pop={}\n if subset:\n subset_name = subset\n else:\n subset_name = 'all_categories'\n\n for dest_id, source_list in self.dest2source.items():\n if subset:\n if self.get_category(dest_id) != subset:\n continue\n serv_pop2 = 0\n for item in source_list:\n source_id, time_val = item\n #Get population info\n source_pop = self.get_population(source_id)\n serv_pop2 += source_pop\n dest_target = self.get_target(dest_id)\n #Calculate Coverage score\n if serv_pop2 !=0:\n dest_percap_target[dest_id] = dest_target / serv_pop2\n else:\n dest_percap_target[dest_id]=np.NaN\n\n #Get population per destination\n serv_pop[dest_id] = serv_pop2\n\n\n #Convert to DataFrames\n #Get coverage score \n dpc = pd.DataFrame.from_dict(dest_percap_target, orient='index')\n dpc.rename(columns={ dpc.columns[0]: 'coverage' }, inplace=True)\n pd.to_numeric(dpc.coverage)\n\n\n #Get population\n res_pop = pd.DataFrame.from_dict(serv_pop, orient='index')\n res_pop.rename(columns={ res_pop.columns[0]: 'serv_pop' }, inplace=True)\n \n self.results = self.dests.join(res_pop)\n self.results = self.results.join(dpc)\n\n\n\n def _get_aggregate(self, aggregate_type):\n '''\n Build an data frame of the results aggregated\n by community.\n Inputs\n aggregate_type: can be either 'coverage' or\n 'population'. If the former, the aggregation\n performed is average. If the latter, the\n aggregation is summation.\n '''\n assert self.good_to_write, 'need to calculate first'\n\n if aggregate_type == 'category':\n #Fill in NaN only to aggregate the values\n res_0=self.results.drop(columns=['serv_pop'])\n res=res_0.fillna(0)\n res = res.groupby(['category']).sum()\n elif aggregate_type == 'coverage':\n res_0=self.results.drop(columns=['serv_pop'])\n res=res_0.fillna(0)\n res = res.groupby(['lower_areal_unit']).sum()\n else:\n self.logger.error('Unknown aggregate_type: ({})'.format(aggregate_type))\n return None\n low_area=pd.DataFrame(self.sources.lower_areal_unit.unique())\n low_area.rename(columns={ 0: \"lower_areal_unit\" }, inplace=True)\n #Making the lower areal unit the index to avoid confusions with embedded Python's index \n low_area=low_area.set_index('lower_areal_unit')\n return low_area.join(res)\n\n\n def agg_area_cat(self):\n res=self.results.drop(columns=['serv_pop'])\n res=res.fillna(0)\n res = res.groupby(['lower_areal_unit','category']).sum()\n res=res[['target','coverage']]\n res=res.unstack()\n\n low_area=pd.DataFrame(self.sources.lower_areal_unit.unique())\n low_area.rename(columns={ 0: \"lower_areal_unit\" }, inplace=True)\n #Making the lower areal unit the index to avoid confusions with embedded Python's index \n low_area=low_area.set_index('lower_areal_unit')\n return low_area.join(res)\n \n def write_aggregate(self, aggregate_type, filename=None):\n '''\n Write the aggregate to csv\n '''\n df = self._get_aggregate(aggregate_type)\n if not filename:\n filename = self.get_output_filename_cov('{}_aggregate'.format(aggregate_type))\n\n df.to_csv(filename)\n self.logger.info('Wrote aggregate to file: {}'.format(filename))\n\n def write_agg_area_cat(self, filename=None):\n '''\n Write the aggregate2 to csv\n '''\n df = self.agg_area_cat()\n if not filename:\n filename = self.get_output_filename_cov('{}agg_area_cat'.format(self.network_type))\n\n df.to_csv(filename)\n self.logger.info('Wrote aggregate to file: {}'.format(filename))\n \n\n def plot_cdf(self, title='Coverage Amount'):\n '''\n Generate a CDF. If limit_categories was specified,\n each category will be given individually. If not, the\n aggregate value will be plotted.\n Inputs\n title: the title of the figure\n '''\n\n assert self.good_to_write, \"must calculate first\"\n\n #blocks with population greater than zero\n cdf_eligible = self.results.loc[self.results['population'] > 0]\n \n #initialize block parameters\n mpl.pyplot.close()\n mpl.pyplot.rcParams['axes.facecolor'] = '#cfcfd1'\n fig, ax = mpl.pyplot.subplots(figsize=(8, 4))\n n_bins = 100\n\n available_colors = ['black','magenta','lime','red','black','orange','grey','yellow','brown','teal']\n color_keys = []\n if self.limit_categories:\n for category in self.limit_categories:\n x = cdf_eligible[category]\n color = available_colors.pop(0)\n patch = mpatches.Patch(color=color, label=category)\n color_keys.append(patch)\n n, bins, blah = ax.hist(x, n_bins, density=True, histtype='step',\n cumulative=True, label=category, color=color)\n else:\n x = cdf_eligible['all_categories']\n n, bins, patches = ax.hist(x, n_bins, density=True, histtype='step',\n cumulative=True, label='all_categories')\n\n if self.limit_categories:\n ax.legend(loc='right',handles=color_keys)\n else:\n ax.legend(loc='right')\n ax.grid(True)\n ax.set_title(title)\n ax.set_xlabel('Per Capita Spending ($)')\n ax.set_ylabel('Percent of Blocks by Value')\n fig_name = self.figure_name()\n mpl.pyplot.savefig(fig_name, dpi=400)\n mpl.pyplot.show()\n self.logger.info('Plot was saved to: {}'.format(fig_name))\n\n return\n\n def write_csv(self, filename=None):\n '''\n Write the model data to file.\n '''\n assert self.good_to_write, 'need to calculate first'\n if not filename:\n filename = self.get_output_filename_cov('Coverage_{}'.format(self.network_type))\n self.results.to_csv(filename)\n\n\nclass AccessModel(ModelData):\n '''\n Build the Access model which captures the accessibility of \n nonprofit services in urban environments.\n '''\n\n def __init__(self, network_type='drive', source_filename=None, \n dest_filename=None, sp_matrix_filename=None, decay_function='linear',\n limit_categories=None, upper=30):\n\n super().__init__(network_type, upper)\n self.results = None\n\n if decay_function == 'linear':\n self.decay_function = linear_decay_function\n elif decay_function == 'root':\n self.decay_function = root_decay_function\n elif decay_function == 'logit':\n self.decay_function = logit_decay_function\n else:\n self.logger.error('Unrecognized decay function. Must be one of: linear, root or logit')\n\n self.load_sources(source_filename)\n self.load_dests(dest_filename)\n self.load_sp_matrix(sp_matrix_filename)\n \n self.process()\n self.limit_categories = limit_categories\n assert type(limit_categories) == type(set()) or limit_categories == None, 'limit_categories must be of type set or None'\n\n self.good_to_write = False\n self.custom_threshold = None\n\n\n def calculate(self, custom_threshold=40, normalize=True, \n custom_weight_dict=None, largest_weights_first=True):\n '''\n Calculate the Access score for each block\n from the vendors within the specified range.\n Inputs:\n custom_threshold- integer or float, optional. Results will contain\n a column showing percent of population with score greater\n than or equal to this value\n normalize-Boolean, optional (defaults to true). If true,\n final scores will be normalized on a range from 0-100.\n custom_weight_dict-a dictionary mapping strings of category names\n to a list of integer or float weights\n largest_weights_first: boolean, if using custom_weight_dict. If True,\n sort the weight arrays such that largest will be used first. IF false,\n do the opposite.\n '''\n\n start_time = time.time()\n self.custom_threshold = custom_threshold\n \n #subset the destination data frames on limit_categories\n if self.limit_categories:\n subset_targets = self.dests[self.dests['category'].isin(self.limit_categories)].copy(deep=True)\n else:\n subset_targets = self.dests.copy(deep=True)\n \n DIMINISH_WEIGHTS = [1,1,1,1,1,1,1,1,1,1]\n results = {}\n results_cat = {}\n itemized_results = {}\n\n\n #sort the user's input arrays, such that the highest\n #weight will be used first and the lowest weight will be\n #used last\n if custom_weight_dict is not None:\n for key in custom_weight_dict.keys():\n custom_weight_dict[key].sort(reverse= not largest_weights_first)\n\n for source_id, dest_list in self.source2dest.items():\n if custom_weight_dict is not None:\n weight_dict = copy.deepcopy(custom_weight_dict)\n else:\n weight_dict = {}\n access = 0\n access_cat=0\n\n '''\n Sort the destination list so the weight_dict[cat].pop\n will take the nearest neighbor first.\n '''\n dest_list.sort(key=operator.itemgetter(1))\n\n for item in dest_list:\n dest_id, time_val = item\n cat = self.get_category(dest_id)\n\n #skip this dest if not in limit categories\n if self.limit_categories != None and cat not in self.limit_categories:\n continue\n\n distance_weight = self.decay_function(time_val, self.upper)\n #if we haven't encountered this category for this source,\n #create a new list of weights\n if cat not in weight_dict.keys():\n weight_dict[cat] = {}\n\n #if we have encountered this category for this source,\n #take the next highest weight (0 if all weights have)\n #already been use\n if len(weight_dict[cat]) > 0:\n diminish_cat_weight = weight_dict[cat].pop()\n dw=distance_weight*diminish_cat_weight\n else:\n diminish_cat_weight = 0\n dw=0\n #In order to check that the score is calculated correctly:\n #print(distance_weight,diminish_cat_weight,dw,cat)\n #Access score for weights and distance decay\n access+=dw\n #Count of weights by areal unit\n access_cat += diminish_cat_weight \n\n results[source_id] = access\n results_cat[source_id] = access_cat\n\n\n #convert to DataFrame\n res = pd.DataFrame.from_dict(results, orient='index')\n res.rename(columns={ res.columns[0]: \"access\" }, inplace=True)\n \n res_cat = pd.DataFrame.from_dict(results_cat, orient='index')\n res_cat.rename(columns={ res_cat.columns[0]: \"access_cat\" }, inplace=True)\n\n\n #join with source data\n #Joins the missing values created from the units exceeding the 'upper' threshold. Later converts them to 0.\n self.results = self.sources.join(res)\n self.results = self.results.join(res_cat)\n\n\n if normalize:\n C = self.results['access']- self.results.access.min()\n D=self.results.access.max()-self.results.access.min()\n self.results['access_sd'] = (C/D)*100\n\n\n\n #Replace the null values with zeros (values above upper)\n self.results.fillna(0, inplace=True)\n\n #Find list within matrix with negative values\n #When constructing the matrix with p2p, the negative values (-1) are the edges on the border of the bounding box.\n #So we make those values NA\n for keyy, negs in self.neg_val.items():\n for j in self.results.keys():\n self.results.at[keyy,j] = -9999\n\n\n\n self.results=self.results.replace(-9999, np.nan)\n \n self.good_to_write = True\n self.logger.info(\"Finished calculating hssa in {:,.2f} seconds\".format(time.time() - start_time))\n\n def write_csv(self, filename=None):\n '''\n Write the model data to file.\n '''\n assert self.good_to_write, 'need to calculate first'\n if not filename:\n filename = self.get_output_filename_access('Access_{}'.format(self.network_type))\n self.results.to_csv(filename)\n\n def _get_aggregate(self, aggregate_type):\n '''\n Build an data frame of the results aggregated\n by community.\n Inputs\n aggregate_type: can be either 'access' or\n 'population'. If the former, the aggregation\n performed is average. If the latter, the\n aggregation is summation.\n '''\n assert self.good_to_write, 'need to calculate first'\n \n if aggregate_type == 'access': \n res_0=self.results\n res=res_0.fillna(0)\n #The .mean() disregards the NaN values\n res = res.groupby(['lower_areal_unit']).mean()\n #Can add any fields from AccessModel\n res = res[['access']]\n else:\n self.logger.error('Unknown aggregate_type: ({})'.format(aggregate_type))\n return None\n \n low_area=pd.DataFrame(self.sources.lower_areal_unit.unique())\n low_area.rename(columns={ 0: \"lower_areal_unit\" }, inplace=True)\n #Making the lower areal unit the index to avoid confusions with embedded Python's index \n low_area=low_area.set_index('lower_areal_unit')\n return low_area.join(res)\n\n \n def write_aggregate(self, aggregate_type, filename=None):\n '''\n Write the aggregate to csv\n '''\n df = self._get_aggregate(aggregate_type)\n if not filename:\n filename = self.get_output_filename_access('{}_aggregate'.format(aggregate_type))\n\n df.to_csv(filename)\n self.logger.info('Wrote aggregate to file: {}'.format(filename))\n \n\n def plot_cdf(self, title='CDF Access Score'):\n '''\n Generate a CDF of the aggregate Access score.\n Inputs\n title: the title of the figure\n '''\n assert self.good_to_write, \"must calculate first\"\n mpl.pyplot.rcParams['axes.facecolor'] = '#cfcfd1'\n x = self.results.loc[self.results['population'] > 0]['access']\n \n mpl.pyplot.close()\n fig, ax = mpl.pyplot.subplots(figsize=(8, 4))\n n_bins = 100\n n, bins, patches = ax.hist(x, n_bins, density=True, histtype='step',\n cumulative=True, label='All Categories of Spending')\n\n ax.grid(True)\n ax.legend(loc='right')\n ax.set_title(title)\n ax.set_xlabel('Access Score')\n ax.set_ylabel('Percent of Areal Units by Value')\n fig_name = self.figure_name()\n mpl.pyplot.savefig(fig_name, dpi=400)\n mpl.pyplot.show()\n self.logger.info('Plot was saved to: {}'.format(fig_name))\n\n return\n\n\nclass TTMetrics(ModelData):\n '''\n Build the Access model which captures the accessability of \n nonprofit services in urban environments.\n '''\n\n def __init__(self, network_type='walk', source_filename=None, \n dest_filename=None, sp_matrix_filename=None, decay_function='linear',\n limit_categories=None, upper=30):\n\n super().__init__(network_type, upper)\n self.results = None\n\n self.load_sources(source_filename)\n self.load_dests(dest_filename)\n self.load_sp_matrix(sp_matrix_filename)\n self.process()\n\n\n self.limit_categories = limit_categories\n assert type(limit_categories) == type(set()) or limit_categories == None, 'limit_categories must be of type set or None'\n\n self.good_to_write = False\n self.custom_threshold = None\n\n def calculate(self):\n\n #Stores no of destination wthin upper for a given source and category\n self.n_dests_in_range ={}\n for s, val in self.source2dest.items():\n self.n_dests_in_range[s] = {}\n\n for catt in self.category_set:\n self.n_dests_in_range[s][catt] = 0\n for d in val:\n cat = self.get_category(d[0])\n if d[1] >= 0:\n self.n_dests_in_range[s][cat] +=1\n\n #Stores nearest neighbour to given source of a given categeory\n self.near_nbr = {}\n for s,val in self.dicto.items():\n self.near_nbr[s] = {}\n no_cat = 0\n for d in val:\n cat = self.get_category(d[0])\n if cat not in self.near_nbr[s] and d[1]>0:\n self.near_nbr[s][cat] = d[1]\n no_cat +=1\n if(no_cat == len(self.category_set)):\n break\n\n self.tes = pd.DataFrame.from_dict(self.n_dests_in_range, orient='index')\n self.near_nbr = pd.DataFrame.from_dict(self.near_nbr, orient='index')\n self.n_dests_in_range = self.tes\n self.n_dests_in_range.fillna(0,inplace=True)\n #self.n_dests_in_range=self.n_dests_in_range.replace(-9999, 0)\n for keyy, negs in self.neg_val.items():\n for j in self.category_set:\n self.near_nbr.at[keyy,j] = -9999\n self.n_dests_in_range.at[keyy,j] = -9999\n\n self.n_dests_in_range=self.n_dests_in_range.replace(-9999, np.nan)\n self.near_nbr=self.near_nbr.replace(-9999, np.nan)\n\n \n def plot_nearest_providers(self, limit_categories=None, \n title='Closest Point CDF', n_bins=500, resolution='block'):\n '''\n Plot a cdf of travel times to the closest provider\n for each category.\n '''\n\n assert resolution in ['block', 'population'], 'must use block or resolution'\n #assert resolution != 'population', 'this feature is a Work in Progress'\n assert type(limit_categories) in [type(set()), type([]), type(None)], 'limit_categories must be type list, set or None'\n\n figure_name = self.figure_name()\n\n \n #initialize block parameters\n mpl.pyplot.close()\n mpl.pyplot.rcParams['axes.facecolor'] = '#cfcfd1'\n fig, ax = mpl.pyplot.subplots(figsize=(8, 4))\n\n available_colors = ['black','magenta','lime','red','black','orange','grey','yellow','brown','teal']\n color_keys = []\n if self.limit_categories:\n for category in self.limit_categories:\n self.near_nbr[category][self.near_nbr[category] > self.upper] = self.upper\n x = self.near_nbr[category]\n #Drop any NaNs to avoid error in plotting \n x=x.dropna()\n color = available_colors.pop(0)\n patch = mpatches.Patch(color=color, label=category)\n color_keys.append(patch)\n if resolution == 'population':\n res = {}\n for block_id, time_val in x.iteritems():\n block_pop = self.get_population(block_id)\n if block_pop <= 0:\n continue\n for i in range(block_pop):\n temp_id = '{}_{}'.format(block_id, i)\n res[temp_id] = time_val\n res = pd.Series(data=res)\n n, bins, blah = ax.hist(res, n_bins, density=True, histtype='step',\n cumulative=True, label=category, color=color)\n else:\n n, bins, blah = ax.hist(x, n_bins, density=True, histtype='step',\n cumulative=True, label=category, color=color)\n\n else:\n for category in self.category_set:\n self.near_nbr[category][self.near_nbr[category] > self.upper] = self.upper\n x = self.near_nbr[category]\n #Drop any NaNs to avoid error in plotting \n x=x.dropna()\n color = available_colors.pop(0)\n patch = mpatches.Patch(color=color, label=category)\n color_keys.append(patch)\n if resolution == 'population':\n res = {}\n for block_id, time_val in x.iteritems():\n #block_pop = self.get_population(block_id)\n block_pop = self.sources.loc[int(float(block_id)), 'population']\n if block_pop <= 0:\n continue\n for i in range(block_pop):\n temp_id = '{}_{}'.format(block_id, i)\n res[temp_id] = time_val\n res = pd.Series(data=res)\n n, bins, blah = ax.hist(res, n_bins, density=True, histtype='step',\n cumulative=True, label=category, color=color)\n else:\n n, bins, blah = ax.hist(x, n_bins, density=True, histtype='step',\n cumulative=True, label=category, color=color)\n\n if self.limit_categories:\n ax.legend(loc='best',handles=color_keys)\n else:\n ax.legend(loc='best', handles=color_keys)\n ax.grid(True)\n ax.set_title(title)\n ax.set_xlabel('Time in seconds')\n ax.set_ylabel('Percent of {} Within Range'.format(resolution))\n fig_name = self.figure_name()\n mpl.pyplot.savefig(fig_name, dpi=400)\n mpl.pyplot.show()\n self.logger.info('Plot was saved to: {}'.format(fig_name))\n\n","sub_path":"travel_times/scripts/.ipynb_checkpoints/CommunityAnalytics-checkpoint.py","file_name":"CommunityAnalytics-checkpoint.py","file_ext":"py","file_size_in_byte":25726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"275348791","text":"import docker\r\nimport subprocess\r\nimport socket\r\nimport random\r\nimport os\r\n\r\nMINPORT = 30000\r\nMAXPORT = 65000\r\n\r\nclass SocketIsUsedException(Exception):\r\n pass\r\n\r\ndef checkPort(port):\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n result = sock.connect_ex(('', port))\r\n\r\n if result == 0:\r\n raise SocketIsUsedException('socket %s in use' % (port))\r\n sock.close()\r\n return True\r\n\r\ndef getUnusedPort():\r\n port = False\r\n while port is False:\r\n givePort = random.randrange(MINPORT, MAXPORT)\r\n try:\r\n port = checkPort(givePort)\r\n except SocketIsUsedException as sock:\r\n raise sock\r\n else:\r\n return givePort\r\n\r\ndef copyCodeToServer(usrcontainer, usrcode, codelang):\r\n if (codelang == 35 ): #python\r\n lang = '.py'\r\n\r\n usrcontainer.container.exec_run('echo \\'' + usrcode + ' \\' > usrcompile' + lang )\r\n print(\"debug message.\")\r\n \r\n\r\nclass terminal:\r\n\r\n def __init__(self, ssh, port):\r\n print(\"Port is: \" + str(port))\r\n self.port = port\r\n self.https = False\r\n self.ssh = ssh\r\n self.running = False\r\n self.docker = None\r\n self.container = None\r\n self.address = None\r\n\r\n def __enter__(self):\r\n return self\r\n\r\n def __exit__(self, exec_type, exc_value, traceback):\r\n self.pid.terminate()\r\n\r\n def makeServer(self):\r\n try:\r\n print(\"Creating server with port: \" + str(self.port))\r\n self.docker = docker.from_env()\r\n # CHANGE THIS TO YOUR DOCKER IMAGE NAME\r\n # |\r\n # |\r\n # v\r\n imagecmd = '-p ' + str(self.port) + ' bash -x'\r\n self.container = self.docker.containers.run('fugg:latest',imagecmd,ports={self.port:self.port},detach=True,remove=True,network='test')\r\n print(\"THE CONTAINER NAME IS: \" + self.container.name)\r\n self.running = True\r\n cmd = \" docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' \" + self.container.name\r\n self.address = os.popen(cmd).read()\r\n machete = self.address +':'+ str(self.port)\r\n\r\n print(\"the docker thing is: \" + machete)\r\n \r\n return machete.replace('\\n','')\r\n except Exception as e:\r\n raise e\r\n \r\n def terminate(self):\r\n if self.docker is not None:\r\n self.running = False\r\n self.container.kill()\r\n self.docker = None\r\n","sub_path":"serverapp/core/scripts/terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"616173942","text":"# команда для ссылки на стрим покса\nimport command_system\nfrom vkapi import send_message\nfrom settings.BD import get_info\n\n\ndef link(token, user_id, stroka='', peer_id=''):\n # рассылка если пишет покс\n if str(user_id) == '151635695':\n message = 'Пацаны залетаем на стрим к поксу: https://www.twitch.tv/poqx'\n list_of_subs = get_info('subs')\n for user in list_of_subs:\n send_message(user[0], user[0], token, message)\n # если команду пишет любой другой чел\n message = 'Ссылочка на стрим: https://www.twitch.tv/poqx'\n return message, ''\n\n\nlink_command = command_system.Command()\n\nlink_command.keys = ['!стрим']\nlink_command.description = 'Подскажу ссылку на стрим покса:)'\nlink_command.process = link","sub_path":"commands/link_to_stream.py","file_name":"link_to_stream.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"380927603","text":"from django.shortcuts import render,get_object_or_404\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom ..models.judge_model import judge\nfrom ..models.project_model import project\nfrom ..models.judgeassignment_model import judgeassignment\nfrom django.core.paginator import Paginator\nfrom nhsee import models\nimport uuid\nfrom uuid import UUID\nimport os\nimport xlrd\n\ndef judgeslisting(request):\n\n if 'createjudges' in request.POST:\n file_path = request.POST.get('filepath')\n #print(file_path,\"ggggggggggggggg\")\n judgesdata=xlrd.open_workbook(file_path)\n sheet = judgesdata.sheet_by_index(0)\n judgenames=sheet.cell_value(0,0)\n\n for judgenum in range(1,sheet.nrows):\n individualjudge=sheet.row_values(judgenum)\n projectinsert = judge(judge_id=individualjudge[0],fname=individualjudge[1],lname=individualjudge[2])\n projectinsert.save()\n\n if 'deletejudges' in request.GET:\n deleterequest=request.GET.get('delete')\n if deleterequest==\"deletejudges\":\n\n judge.objects.all().delete()\n\n\n if 'judgeprojects' in request.POST:\n judgeid=request.POST.get('judge_id')\n #print(judgeid,\"kkkkkkkkkk-----\")\n judgeprojects=judgeassignment.objects.filter(judge_id_id=judgeid)\n judge_list=[]\n\n for judges in judgeprojects:\n print(judges.judge_id_id)\n projectid=judges.project_id_id\n judgenames=project.objects.filter(project_id=projectid)\n\n for judgesinfo in judgenames:\n judge_list.append({\"project_name\":str(judgesinfo.project_title),\"project_description\":str(judgesinfo.description),\"judge_id\":judgeid,\"project_id\":projectid})\n return render(request,'judges_templates/assignjudgelist.html',{\"judgesjson\":judge_list})\n\n\n judgel = judge.objects.all()\n judge_list=[]\n\n for judges in judgel:\n judgestatus=judgeassignment.objects.filter(judge_id=judges.judge_id)\n assign_judge_list=[]\n\n for judge_ids in judgestatus:\n assign_judge_list.append(judge_ids.judge_id_id)\n\n judgestatus=len(assign_judge_list)\n if judgestatus > 5:\n judge_list.append({\"judge_name\":str(judges.fname)+\" \"+str(judges.lname),\"judge_id\":judges.judge_id,\"judgestatus\":True})\n else:\n judge_list.append({\"judge_name\":str(judges.fname)+\" \"+str(judges.lname),\"judge_id\":judges.judge_id})\n paginator_projects = Paginator(judge_list, 10)\n page = request.GET.get('page')\n contacts = paginator_projects.get_page(page)\n\n\n\n return render(request,'judges_templates/listjudges.html',{\"judgesjson\":contacts})\n","sub_path":"nhsee/views/judge_views.py","file_name":"judge_views.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"490439153","text":"from smtplib import SMTPException\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nfrom log.ExceptionLogger import ExceptionLogger\n\n\n@ExceptionLogger\ndef succeeds(a):\n return a\n\n\n@ExceptionLogger\ndef fails(a, b, c):\n a + b + c\n raise SMTPException('failed!')\n\n\nclass TestExceptionLogger(TestCase):\n @patch('log.ExceptionLogger.logging')\n def test___call__succeed(self, mocked_logging):\n succeeds(0)\n self.assertEqual(0, mocked_logging.error.call_count)\n\n @patch('log.ExceptionLogger.logging')\n def test___call__fail(self, mocked_logging):\n fails(1, 2, c=3)\n self.assertEqual(1, mocked_logging.error.call_count)\n","sub_path":"log/test_exceptionLogger.py","file_name":"test_exceptionLogger.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"205342510","text":"# Author: Anjum, Syed Rehan\n# Date: July 12, 2021\n# File Name: coin_toss_game.py\n# Description: This program will allow the user to toss a coin and use a counter\n\n# Import Statements Here\nimport random\n\n# Global Variables\ncoinTossTurns = random.randint(1000,1000000)\ncounter = 0\nhead_count = 0\ntail_count = 0\n\nprint(\"This program will flip a coin a random number of times.\")\nprint(\"If the coin flips heads, the counter will increase by 1.\")\nprint(\"If the coin flips tails, the counter will decrease by 1.\")\n\n# For Loop to Flip Coin \n# 1 Represents Heads\n# 0 Represents Tails\nfor flip in range(coinTossTurns):\n if random.choice([1, 0]) == 1:\n counter += 1\n head_count += 1\n else:\n counter -= 1\n tail_count += 1\n\n# Final Output, Displaying Head Count, Tail Count and Final Counter\nprint(\"After\", coinTossTurns, \"tosses, the counter displays\", counter)\nprint(\"Heads was flipped\", head_count, \"times.\")\nprint(\"Tails was flipped\", tail_count, \"times.\")","sub_path":"1.5/coin_toss_game.py","file_name":"coin_toss_game.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"357939551","text":"import heapq\n\n\"\"\"\n题目:如何得到一个数据流中的中位数?\n如果从数据流中读出奇数个数值,那么中位数就是所有数值排序之后位于中间的数值。\n如果从数据流中读出偶数个数值,那么中位数就是所有数值排序之后中间两个数的平均值。\n\"\"\"\n\n\"\"\"\n使用Python heapq模块。\nheapq模块实现了一个适用于Python列表的最小堆排序算法。\n通过插入数值的相反数实现最大堆排序算法。\n\"\"\"\n\n\nclass DynamicArray:\n \"\"\"\n 包含一个最大堆和一个最小堆。\n \"\"\"\n\n def __init__(self):\n # 最小堆\n self.min = []\n # 最大堆\n self.max = []\n\n def insert(self, value):\n \"\"\"\n 插入数据\n \"\"\"\n if (len(self.min) + len(self.max)) & 1 == 0:\n if (len(self.max) > 0) and (value < (- self.max[0])):\n heapq.heappush(self.max, - value)\n\n value = - self.max[0]\n\n heapq.heappop(self.max)\n\n heapq.heappush(self.min, value)\n else:\n if (len(self.min) > 0) and (value > self.min[0]):\n heapq.heappush(self.min, value)\n\n value = self.min[0]\n\n heapq.heappop(self.min)\n\n heapq.heappush(self.max, - value)\n\n def get_median(self):\n \"\"\"\n 获取已有所有数据的中位数\n \"\"\"\n size = len(self.min) + len(self.max)\n if size == 0:\n raise Exception(\"No numbers are available\")\n\n median = 0\n if size & 1 == 1:\n median = self.min[0]\n else:\n median = (self.min[0] + (- self.max[0])) / 2\n\n return median\n\n\ndef main():\n dynamic_array = DynamicArray()\n\n while True:\n value = input(\"Please insert a integer, input q for quit: \")\n if value == 'q':\n break\n\n num = int(value)\n\n print(\"Insert number: {}\".format(num))\n dynamic_array.insert(num)\n print(\"The median number is: {}\".format(dynamic_array.get_median()))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"41-数据流中的中位数/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"315192772","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Notifications(models.Model):\n \"\"\"\n Represents a single notification sent by specific events\n \"\"\"\n\n # the user that does a specific action\n notifying_user = models.ForeignKey(User, on_delete=models.CASCADE, default=0, related_name=\"notifying_users\")\n\n # the receiving user of that notification\n receiving_user = models.ForeignKey(User, on_delete=models.CASCADE, default=0, related_name=\"receiving_user\")\n\n # the date the notification was sent.\n date = models.DateTimeField()\n\n # whether or not the notification is read or not\n read = models.BooleanField(default=False)\n\n # the notification content\n content = models.CharField(max_length=1000)\n\n def __str__(self):\n return self.content\n","sub_path":"cenpilos/submodels/notificationsModel.py","file_name":"notificationsModel.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"466749168","text":"import math\n\nmaxcount = 0\nmaxnum = 0\n\nfor i in range(0,1000000):\n num = 1000000-i\n snum = num\n count = 0\n while(num!=1):\n if(num%2==0):\n num /= 2\n else:\n num = 3*num+1\n count+=1\n\n if(count>maxcount):\n maxcount=count\n maxnum=snum\n\nprint(maxnum)\n\n\n","sub_path":"p14.py","file_name":"p14.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"261154536","text":"import os\nimport numpy as np\nimport random\nimport unittest\nfrom timeit import default_timer as timer\nfrom datetime import timedelta\nfrom sklearn.metrics import roc_auc_score\n\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble._gb import GradientBoostingClassifier, GradientBoostingRegressor\nimport xgboost as xgb\n\nfrom settree.set_data import SetDataset, OPERATIONS, flatten_datasets\nfrom settree.gbest import GradientBoostedSetTreeClassifier, GradientBoostedSetTreeRegressor\nimport exps.eval_utils as eval\n\n\ndef get_first_quarter_data(num_samples, min_items_set=2, max_items_set=10, dim=2):\n\n def inject_samples_in_first_quarter(set_of_samples, min=1, max=1, dim=2):\n num = random.choice(range(min, max + 1))\n pos_points = np.random.uniform(low=0, high=1, size=(num, dim))\n set_of_samples[:num, :] = pos_points\n return set_of_samples\n\n def sample_point_not_from_first_quarter(dim=2):\n\n # sample a quarter (not the first)\n while True:\n r = np.random.normal(0, 1, dim) > 0\n if sum(r) < dim:\n break\n\n # sample a point from the quarter\n p = []\n for i in r:\n # pos\n if i:\n p.append(np.random.uniform(low=0, high=1))\n # neg\n else:\n p.append(np.random.uniform(low=-1, high=0))\n return tuple(p)\n\n def sample_set(num, dim):\n return np.stack([sample_point_not_from_first_quarter(dim) for _ in range(num)])\n\n s_1 = [sample_set(random.choice(range(min_items_set, max_items_set)), dim) for _ in range(num_samples // 2)]\n s_2 = [sample_set(random.choice(range(min_items_set, max_items_set)), dim) for _ in range(num_samples // 2)]\n s_2 = [inject_samples_in_first_quarter(i, min=1, max=1, dim=dim) for i in s_2]\n\n data = s_1 + s_2\n y = np.concatenate([np.zeros(len(s_1)), np.ones(len(s_2))]).astype(int)\n\n indx = np.arange(len(y))\n random.shuffle(indx)\n return [data[i] for i in indx], y[indx]\n\n\nclass TestGBTDProblems(unittest.TestCase):\n test_counter = 1\n\n def __init__(self, splitter='set', use_attention_set=True, use_attention_set_comp=True, attention_set_limit=1):\n self.tree_args = {'splitter': splitter,\n 'use_attention_set': use_attention_set,\n 'use_attention_set_comp': use_attention_set_comp,\n 'attention_set_limit': attention_set_limit}\n print('Test args: {}'.format(self.tree_args))\n\n def init(self, name):\n np.random.seed(42)\n random.seed(42)\n\n print('####################({})####################'.format(self.test_counter))\n print('Start test: {}'.format(name))\n self.test_counter += 1\n\n\n def start_timer(self):\n self.start = timer()\n\n def end_timer(self):\n end = timer()\n print('Time: {}'.format(timedelta(seconds=end - self.start)))\n\n def end(self):\n print('############################################\\n')\n\n def train_and_predict_xgboost(self, params,\n train_x, train_y, test_x, test_y, val_x=None, val_y=None,\n early_stopping_rounds=None, mode='bin_cls'):\n\n print('############ XGBoost ############ ')\n\n if mode == 'bin_cls':\n gbdt = xgb.XGBClassifier(**params)\n eval_met = eval.acc\n eval_met_name = 'acc'\n\n elif mode == 'multi_cls':\n gbdt = xgb.XGBClassifier(**params)\n eval_met = eval.acc\n eval_met_name = 'acc'\n else:\n gbdt = xgb.XGBRegressor(**params)\n eval_met = eval.mse\n eval_met_name = 'mse'\n\n timer = eval.Timer()\n if np.any(val_x):\n gbdt.fit(X=train_x,\n y=train_y,\n eval_set=[(val_x, val_y)],\n early_stopping_rounds=early_stopping_rounds)\n else:\n gbdt.fit(train_x, train_y)\n print('Train took: {}'.format(timer.end()))\n\n timer = eval.Timer()\n train_preds = gbdt.predict(train_x)\n print('Eval train took: {}'.format(timer.end()))\n test_preds = gbdt.predict(test_x)\n\n train_met = eval_met(train_y, train_preds)\n test_met = eval_met(test_y, test_preds)\n\n if mode == 'bin_cls':\n train_proba = gbdt.predict_proba(train_x)[:, 1]\n test_proba = gbdt.predict_proba(test_x)[:, 1]\n\n train_auc = roc_auc_score(train_y, train_proba)\n test_auc = roc_auc_score(test_y, test_proba)\n\n print(\n 'Results : train {} {:.4f} auc: {:.4f} | test {} : {:.4f} auc: {:.4f}'.format(eval_met_name, train_met,\n train_auc, eval_met_name,\n test_met, test_auc))\n else:\n print('Results : train {} {:.4f} | test {} : {:.4f}'.format(eval_met_name, train_met,\n eval_met_name, test_met))\n return train_met, test_met\n\n def train_and_predict_sklearn_gbtd(self, params,\n train_x, train_y, test_x, test_y,\n mode='bin_cls'):\n print('############ Sklearn ############ ')\n\n if mode == 'bin_cls':\n gbdt = GradientBoostingClassifier(**params)\n eval_met = eval.acc\n eval_met_name = 'acc'\n\n elif mode == 'multi_cls':\n gbdt = GradientBoostingClassifier(**params)\n eval_met = eval.acc\n eval_met_name = 'acc'\n else:\n gbdt = GradientBoostingRegressor(**params)\n eval_met = eval.mse\n eval_met_name = 'mse'\n\n timer = eval.Timer()\n gbdt.fit(train_x, train_y)\n print('Train took: {}'.format(timer.end()))\n\n if mode == 'bin_cls':\n timer = eval.Timer()\n train_raw_predictions = gbdt.decision_function(train_x)\n print('Eval train took: {}'.format(timer.end()))\n test_raw_predictions = gbdt.decision_function(test_x)\n\n train_encoded_labels = gbdt.loss_._raw_prediction_to_decision(train_raw_predictions)\n train_preds = gbdt.classes_.take(train_encoded_labels, axis=0)\n test_encoded_labels = gbdt.loss_._raw_prediction_to_decision(test_raw_predictions)\n test_preds = gbdt.classes_.take(test_encoded_labels, axis=0)\n\n train_met = eval_met(train_y, train_preds)\n test_met = eval_met(test_y, test_preds)\n\n train_probs = gbdt.loss_._raw_prediction_to_proba(train_raw_predictions)\n test_probs = gbdt.loss_._raw_prediction_to_proba(test_raw_predictions)\n\n train_auc = roc_auc_score(train_y, train_probs[:, 1])\n test_auc = roc_auc_score(test_y, test_probs[:, 1])\n\n print(\n 'Results : train {} {:.4f} auc: {:.4f} | test {} : {:.4f} auc: {:.4f}'.format(eval_met_name, train_met,\n train_auc, eval_met_name,\n test_met, test_auc))\n else:\n timer = eval.Timer()\n train_preds = gbdt.predict(train_x)\n print('Eval train took: {}'.format(timer.end()))\n test_preds = gbdt.predict(test_x)\n train_met = eval_met(train_y, train_preds)\n test_met = eval_met(test_y, test_preds)\n\n print('Results : train {} {:.4f} | test {} : {:.4f}'.format(eval_met_name, train_met,\n eval_met_name, test_met))\n return train_met, test_met\n\n def first_quarter_four_dim(self):\n self.init('first_quarter_four_dim')\n set_size = 10\n train_data, train_y = get_first_quarter_data(num_samples=2000,\n min_items_set=set_size,\n max_items_set=set_size + 1,\n dim=4)\n test_data, test_y = get_first_quarter_data(num_samples=1000,\n min_items_set=set_size,\n max_items_set=set_size + 1,\n dim=4)\n ds_train = SetDataset(records=train_data, is_init=True)\n ds_test = SetDataset(records=test_data, is_init=True)\n\n set_gbdt = GradientBoostedSetTreeClassifier(n_estimators=5,\n operations=OPERATIONS,\n splitter=self.tree_args['splitter'],\n use_attention_set=self.tree_args['use_attention_set'],\n use_attention_set_comp=self.tree_args['use_attention_set_comp'],\n attention_set_limit=self.tree_args['attention_set_limit'],\n max_depth=6,\n max_features=4,\n # n_iter_no_change=3,\n # tol=1e-4,\n subsample=0.5,\n random_state=0,\n verbose=3)\n\n self.start_timer()\n set_gbdt.fit(ds_train, train_y)\n self.end_timer()\n\n train_acc = (set_gbdt.predict(ds_train) == train_y).mean()\n set_test_acc = (set_gbdt.predict(ds_test) == test_y).mean()\n print('Results : set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size,train_acc, set_test_acc))\n\n self.end()\n self.assertGreaterEqual(set_test_acc, 0.05)\n\n def first_quarter_no_active_set_compare(self):\n self.init('first_quarter_no_active_set_compare')\n set_size = 10\n train_data, train_y = get_first_quarter_data(num_samples=2000,\n min_items_set=set_size,\n max_items_set=set_size + 1,\n dim=4)\n test_data, test_y = get_first_quarter_data(num_samples=1000,\n min_items_set=set_size,\n max_items_set=set_size + 1,\n dim=4)\n ds_train = SetDataset(records=train_data, is_init=True)\n ds_test = SetDataset(records=test_data, is_init=True)\n\n set_gbdt = GradientBoostedSetTreeClassifier(n_estimators=5,\n operations=OPERATIONS,\n splitter=self.tree_args['splitter'],\n use_attention_set=False,\n use_attention_set_comp=False,\n attention_set_limit=1,\n max_depth=6,\n max_features=None,\n subsample=1,\n random_state=0,\n verbose=3)\n\n self.start_timer()\n set_gbdt.fit(ds_train, train_y)\n self.end_timer()\n\n train_acc = (set_gbdt.predict(ds_train) == train_y).mean()\n set_test_acc = (set_gbdt.predict(ds_test) == test_y).mean()\n print('Results : set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size,train_acc, set_test_acc))\n\n train_x, test_x = flatten_datasets(ds_train, ds_test, operations_list=set_gbdt.operations)\n\n params = {'n_estimators': 5, 'learning_rate': 0.1, 'max_depth': 6, 'max_features': None,\n 'subsample': 1, 'criterion': 'mse', 'random_state': 42}\n sk_learn_train_acc, sk_learn_test_acc = self.train_and_predict_sklearn_gbtd(params, train_x, train_y,\n test_x, test_y, mode='bin_cls')\n\n params = {'objective': 'binary:logistic', 'max_depth': 6, 'n_jobs': 0, 'eval_metric': ['error'],\n 'learning_rate': 0.1, 'n_estimators': 5, 'colsample_bytree': None, 'subsample': None,\n 'reg_lambda': 1, 'verbosity': 0, 'random_state': 0, 'seed': 0}\n self.train_and_predict_xgboost(params, train_x, train_y, test_x, test_y, val_x=None, val_y=None,\n early_stopping_rounds=None, mode='bin_cls')\n\n self.end()\n self.assertGreaterEqual(set_test_acc, sk_learn_test_acc)\n\n def influence_of_trees_depth(self):\n self.init('influence_of_trees_depth')\n set_size = 10\n train_data, train_y = get_first_quarter_data(num_samples=1000,\n min_items_set=set_size,\n max_items_set=set_size + 1,\n dim=4)\n test_data, test_y = get_first_quarter_data(num_samples=200,\n min_items_set=set_size,\n max_items_set=set_size + 1,\n dim=4)\n ds_train = SetDataset(records=train_data, is_init=True)\n ds_test = SetDataset(records=test_data, is_init=True)\n\n for d in [2, 4, 6, 8, 10]:\n set_gbdt = GradientBoostedSetTreeClassifier(n_estimators=8,\n splitter=self.tree_args['splitter'],\n use_attention_set=self.tree_args['use_attention_set'],\n use_attention_set_comp=self.tree_args['use_attention_set_comp'],\n attention_set_limit=self.tree_args['attention_set_limit'],\n max_depth=d,\n max_features=4,\n n_iter_no_change=3,\n tol=1e-4,\n subsample=0.5,\n random_state=0,\n verbose=3)\n self.start_timer()\n set_gbdt.fit(ds_train, train_y)\n self.end_timer()\n\n train_acc = (set_gbdt.predict(ds_train) == train_y).mean()\n set_test_acc = (set_gbdt.predict(ds_test) == test_y).mean()\n print('Results depth:{}: set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(d,\n set_size, train_acc,\n set_test_acc))\n\n def multiclass_mnist(self):\n\n self.init('multiclass_mnist')\n X, y = fetch_openml('mnist_784', version=1, return_X_y=True,\n data_home=os.path.join(os.path.abspath('__file__' + '/../../'), 'data'))\n y = y.astype(int)\n X_0 = X[y == 0, :]\n X_1 = X[y == 9, :]\n X_2 = X[y == 8, :]\n X_3 = X[y == 6, :]\n\n X_0 = eval.split_to_random_sets(X_0, min_size=2, max_size=30)\n X_1 = eval.split_to_random_sets(X_1, min_size=2, max_size=30)\n X_2 = eval.split_to_random_sets(X_2, min_size=2, max_size=30)\n X_3 = eval.split_to_random_sets(X_3, min_size=2, max_size=30)\n\n y = [0] * len(X_0) + [1] * len(X_1) + [2] * len(X_2) + [3] * len(X_3)\n X = X_0 + X_1 + X_2 + X_3\n train_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0.2)\n ds_train = SetDataset(records=train_x, is_init=True)\n ds_test = SetDataset(records=test_x, is_init=True)\n\n set_gbdt = GradientBoostedSetTreeClassifier(n_estimators=5,\n splitter=self.tree_args['splitter'],\n use_attention_set=self.tree_args['use_attention_set'],\n use_attention_set_comp=self.tree_args['use_attention_set_comp'],\n attention_set_limit=self.tree_args['attention_set_limit'],\n max_depth=2,\n max_features=None,\n n_iter_no_change=3,\n tol=1e-4,\n subsample=0.5,\n random_state=0,\n verbose=3)\n\n self.start_timer()\n set_gbdt.fit(ds_train, train_y)\n self.end_timer()\n\n train_acc = (set_gbdt.predict(ds_train) == train_y).mean()\n set_test_acc = (set_gbdt.predict(ds_test) == test_y).mean()\n print('Results : train acc {:.4f} | test acc : {:.4f}'.format(train_acc, set_test_acc))\n\n self.end()\n self.assertGreaterEqual(set_test_acc, 0.94)\n\n\nif __name__ == '__main__':\n toy_tests = TestGBTDProblems(splitter='sklearn',\n use_attention_set=True,\n use_attention_set_comp=True,\n attention_set_limit=3)\n\n toy_tests.first_quarter_four_dim()\n toy_tests.first_quarter_no_active_set_compare()\n toy_tests.influence_of_trees_depth()\n toy_tests.multiclass_mnist()\n print('######## End tests ########')","sub_path":"tests/gbest_sanity.py","file_name":"gbest_sanity.py","file_ext":"py","file_size_in_byte":18498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"469628081","text":"# Dan Blankenberg\nfrom __future__ import print_function\n\nimport sys\n\nfrom galaxy_utils.sequence.fastq import (\n fastqAggregator,\n fastqReader,\n fastqVerboseErrorReader,\n fastqWriter,\n)\n\n\ndef main():\n input_filename = sys.argv[1]\n input_type = sys.argv[2]\n output_filename = sys.argv[3]\n output_type = sys.argv[4]\n force_quality_encoding = sys.argv[5]\n summarize_input = sys.argv[6] == 'summarize_input'\n if force_quality_encoding == 'None':\n force_quality_encoding = None\n\n fix_id = False # fix inconsistent identifiers (SRA data dumps)\n if len(sys.argv) > 7:\n fix_id = sys.argv[7] == 'fix_id'\n\n aggregator = fastqAggregator()\n out = fastqWriter(path=output_filename, format=output_type, force_quality_encoding=force_quality_encoding)\n read_count = None\n if summarize_input:\n reader_type = fastqVerboseErrorReader\n else:\n reader_type = fastqReader\n\n reader = reader_type(path=input_filename, format=input_type, apply_galaxy_conventions=True, fix_id=fix_id)\n for read_count, fastq_read in enumerate(reader):\n if summarize_input:\n aggregator.consume_read(fastq_read)\n out.write(fastq_read)\n out.close()\n\n _print_output(read_count, input_type, output_type, summarize_input, aggregator)\n\n \ndef _print_output(read_count, input_type, output_type, summarize_input, aggregator):\n if read_count is not None:\n print(\"Groomed %i %s reads into %s reads.\" % (read_count + 1, input_type, output_type))\n if input_type != output_type and 'solexa' in [input_type, output_type]:\n print(\"Converted between Solexa and PHRED scores.\")\n if summarize_input:\n print(\"Based upon quality and sequence, the input data is valid for: %s\" % (\", \".join(aggregator.get_valid_formats()) or \"None\"))\n ascii_range = aggregator.get_ascii_range()\n decimal_range = aggregator.get_decimal_range()\n print(\"Input ASCII range: %s(%i) - %s(%i)\" % (repr(ascii_range[0]), ord(ascii_range[0]), repr(ascii_range[1]), ord(ascii_range[1]))) # print using repr, since \\x00 (null) causes info truncation in galaxy when printed\n print(\"Input decimal range: %i - %i\" % (decimal_range[0], decimal_range[1]))\n else:\n print(\"No valid FASTQ reads were provided.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"galaxy_utils/sequence/scripts/fastq_groomer.py","file_name":"fastq_groomer.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"95620573","text":"# coding=utf-8\nimport datetime\nimport json\nfrom nose.tools import set_trace\nfrom flask.ext.babel import lazy_gettext as _\n\nfrom circulation import (\n LoanInfo,\n HoldInfo,\n FulfillmentInfo,\n BaseCirculationAPI,\n)\nfrom core.odilo import (\n OdiloAPI as BaseOdiloAPI,\n OdiloRepresentationExtractor,\n OdiloBibliographicCoverageProvider,\n MockOdiloAPI as BaseMockOdiloAPI,\n)\n\nfrom core.model import (\n Credential,\n DataSource,\n ExternalIntegration,\n Identifier\n)\n\nfrom core.monitor import (\n CollectionMonitor,\n)\nfrom core.util.http import HTTP\n\nfrom circulation_exceptions import *\n\n\nclass OdiloAPI(BaseOdiloAPI, BaseCirculationAPI):\n NAME = ExternalIntegration.ODILO\n DESCRIPTION = _(\"Integrate an Odilo library collection.\")\n SETTINGS = [\n {\"key\": BaseOdiloAPI.LIBRARY_API_BASE_URL, \"label\": _(\"Library API base URL\")},\n {\"key\": ExternalIntegration.USERNAME, \"label\": _(\"Client Key\")},\n {\"key\": ExternalIntegration.PASSWORD, \"label\": _(\"Client Secret\")},\n ] + BaseCirculationAPI.SETTINGS\n\n SET_DELIVERY_MECHANISM_AT = BaseCirculationAPI.BORROW_STEP\n\n # maps a 2-tuple (media_type, drm_mechanism) to the internal string used in Odilo API to describe that setup.\n delivery_mechanism_to_internal_format = {\n v: k for k, v in OdiloRepresentationExtractor.format_data_for_odilo_format.iteritems()\n }\n\n error_to_exception = {\n \"TitleNotCheckedOut\": NoActiveLoan,\n \"patronNotFound\": PatronNotFoundOnRemote,\n \"ERROR_DATA_NOT_FOUND\": NotFoundOnRemote,\n \"LOAN_ALREADY_RESERVED\": AlreadyOnHold,\n \"CHECKOUT_NOT_FOUND\": NotCheckedOut,\n }\n\n def __init__(self, _db, collection):\n super(OdiloAPI, self).__init__(_db, collection)\n self.odilo_bibliographic_coverage_provider = (\n OdiloBibliographicCoverageProvider(\n collection, api_class=self\n )\n )\n\n def patron_request(self, patron, pin, url, extra_headers={}, data=None, exception_on_401=False, method=None):\n \"\"\"Make an HTTP request on behalf of a patron.\n\n The results are never cached.\n \"\"\"\n headers = dict(Authorization=\"Bearer %s\" % self.token)\n headers['Content-Type'] = 'application/json'\n headers.update(extra_headers)\n\n if method and method.lower() in ('get', 'post', 'put', 'delete'):\n method = method.lower()\n else:\n if data:\n method = 'post'\n else:\n method = 'get'\n\n url = self._make_absolute_url(url)\n response = HTTP.request_with_timeout(\n method, url, headers=headers, data=data,\n timeout=60\n )\n if response.status_code == 401:\n if exception_on_401:\n # This is our second try. Give up.\n raise Exception(\"Something's wrong with the patron OAuth Bearer Token!\")\n else:\n # Refresh the token and try again.\n self.check_creds(True)\n return self.patron_request(patron, pin, url, extra_headers, data, True)\n else:\n return response\n\n def _make_absolute_url(self, url):\n \"\"\"Prepend the API base URL onto `url` unless it is already\n an absolute HTTP URL.\n \"\"\"\n if not any(url.startswith(protocol)\n for protocol in ('http://', 'https://')):\n url = self.library_api_base_url + url\n return url\n\n def get_patron_credential(self, patron, pin):\n \"\"\"Create an OAuth token for the given patron.\"\"\"\n\n def refresh(credential):\n return self.get_patron_access_token(credential, patron, pin)\n\n return Credential.lookup(self._db, DataSource.ODILO, \"OAuth Token\", patron, refresh)\n\n def get_patron_access_token(self, credential, patron, pin):\n \"\"\"Request an OAuth bearer token that allows us to act on\n behalf of a specific patron.\n \"\"\"\n\n self.client_key = patron\n self.client_secret = pin\n self.refresh_creds(credential)\n\n return credential\n\n def checkout(self, patron, pin, licensepool, internal_format):\n \"\"\"Check out a book on behalf of a patron.\n\n :param patron: a Patron object for the patron who wants\n to check out the book.\n\n :param pin: The patron's alleged password.\n\n :param licensepool: Identifier of the book to be checked out is \n attached to this licensepool.\n\n :param internal_format: Represents the patron's desired book format.\n\n :return: a LoanInfo object.\n \"\"\"\n record_id = licensepool.identifier.identifier\n\n # Data just as 'x-www-form-urlencoded', no JSON\n\n payload = dict(\n patronId=patron.authorization_identifier,\n format=internal_format,\n )\n\n response = self.patron_request(\n patron, pin, self.CHECKOUT_ENDPOINT.format(recordId=record_id),\n extra_headers={'Content-Type': 'application/x-www-form-urlencoded'},\n data=payload)\n\n if response.content:\n response_json = response.json()\n if response.status_code == 404:\n self.raise_exception_on_error(response_json, default_exception_class=CannotLoan)\n else:\n return self.loan_info_from_odilo_checkout(licensepool.collection, response_json)\n\n # TODO: we need to improve this at the API and use an error code\n elif response.status_code == 400:\n raise NoAcceptableFormat('record_id: %s, format: %s' % (record_id, internal_format))\n\n raise CannotLoan('patron: %s, record_id: %s, format: %s' % (patron, record_id, internal_format))\n\n def loan_info_from_odilo_checkout(self, collection, checkout):\n start_date = self.extract_date(checkout, 'startTime')\n end_date = self.extract_date(checkout, 'endTime')\n\n return LoanInfo(\n collection,\n DataSource.ODILO,\n Identifier.ODILO_ID,\n checkout['id'],\n start_date,\n end_date,\n checkout['downloadUrl']\n )\n\n def checkin(self, patron, pin, licensepool):\n record_id = licensepool.identifier.identifier\n loan = self.get_checkout(patron, pin, record_id)\n url = self.CHECKIN_ENDPOINT.format(checkoutId=loan['id'], patronId=patron.authorization_identifier)\n\n response = self.patron_request(patron, pin, url, method='POST')\n if response.status_code == 200:\n return response\n\n self.raise_exception_on_error(response.json(), default_exception_class=CannotReturn)\n\n @classmethod\n def extract_date(cls, data, field_name):\n if field_name not in data or not data[field_name]:\n d = None\n else:\n # OdiloAPI dates are timestamps in milliseconds\n d = datetime.datetime.utcfromtimestamp(float(data[field_name]) / 1000.0)\n return d\n\n @classmethod\n def raise_exception_on_error(cls, data, default_exception_class=None, ignore_exception_codes=None):\n if not data or 'errors' not in data or len(data['errors']) <= 0:\n return '', ''\n\n error = data['errors'][0]\n error_code = error['id']\n message = ('description' in error and error['description']) or ''\n\n if not ignore_exception_codes or error_code not in ignore_exception_codes:\n if error_code in cls.error_to_exception:\n raise cls.error_to_exception[error_code](message)\n elif default_exception_class:\n raise default_exception_class(message)\n\n def get_checkout(self, patron, pin, record_id):\n patron_checkouts = self.get_patron_checkouts(patron, pin)\n for checkout in patron_checkouts:\n if checkout['recordId'] == record_id:\n return checkout\n\n raise NotFoundOnRemote(\"Could not find active loan for patron %s, record %s\" % (patron, record_id))\n\n def get_hold(self, patron, pin, record_id):\n patron_holds = self.get_patron_holds(patron, pin)\n for hold in patron_holds:\n if hold['recordId'] == record_id and hold['status'] in ('informed', 'waiting'):\n return hold\n\n raise NotFoundOnRemote(\"Could not find active hold for patron %s, record %s\" % (patron, record_id))\n\n def fulfill(self, patron, pin, licensepool, internal_format):\n record_id = licensepool.identifier.identifier\n content_link, content, content_type = self.get_fulfillment_link(patron, pin, record_id, internal_format)\n\n if not content_link and not content:\n self.log.info(\"Odilo record_id %s was not available as %s\" % (record_id, internal_format))\n else:\n return FulfillmentInfo(\n licensepool.collection,\n DataSource.ODILO,\n Identifier.ODILO_ID,\n record_id,\n content_link=content_link,\n content=content,\n content_type=content_type,\n content_expires=None\n )\n\n def get_fulfillment_link(self, patron, pin, record_id, format_type):\n \"\"\"Get the link corresponding to an existing checkout.\n \"\"\"\n # Retrieve checkout with its download_ulr. It is necessary to generate a download token in our API\n checkout = self.get_checkout(patron, pin, record_id)\n loan_format = checkout['format']\n if format_type and loan_format and (\n format_type == loan_format or\n (loan_format == self.ACSM and format_type in (self.ACSM_EPUB, self.ACSM_PDF))\n ):\n if 'downloadUrl' in checkout and checkout['downloadUrl']:\n content_link = checkout['downloadUrl']\n content = None\n content_type = OdiloRepresentationExtractor.format_data_for_odilo_format[format_type]\n\n # Get also .acsm file\n if format_type in (self.ACSM_EPUB, self.ACSM_PDF):\n response = self.patron_request(patron, pin, content_link)\n if response.status_code == 200:\n content = response.content\n elif response.status_code == 404 and response.content:\n self.raise_exception_on_error(response.json(), CannotFulfill)\n\n return content_link, content, content_type\n\n raise CannotFulfill(\"Cannot obtain a download link for patron[%r], record_id[%s], format_type[%s].\", patron,\n record_id, format_type)\n\n def get_patron_checkouts(self, patron, pin):\n data = self.patron_request(patron, pin, self.PATRON_CHECKOUTS_ENDPOINT.format(patronId=patron.authorization_identifier)).json()\n self.raise_exception_on_error(data)\n return data\n\n def get_patron_holds(self, patron, pin):\n data = self.patron_request(patron, pin, self.PATRON_HOLDS_ENDPOINT.format(patronId=patron.authorization_identifier)).json()\n self.raise_exception_on_error(data)\n return data\n\n def patron_activity(self, patron, pin):\n odilo_checkouts = self.get_patron_checkouts(patron, pin)\n odilo_holds = self.get_patron_holds(patron, pin)\n\n loans_info = []\n holds_info = []\n\n collection = self.collection\n\n for checkout in odilo_checkouts:\n loan_info = self.loan_info_from_odilo_checkout(collection, checkout)\n loans_info.append(loan_info)\n\n for hold in odilo_holds:\n hold_info = self.hold_from_odilo_hold(collection, hold)\n holds_info.append(hold_info)\n\n return loans_info + holds_info\n\n def hold_from_odilo_hold(self, collection, hold):\n start = self.extract_date(hold, 'startTime')\n # end_date: The estimated date the title will be available for the patron to borrow.\n end = self.extract_date(hold, 'notifiedTime')\n position = hold.get('holdQueuePosition')\n\n if position is not None:\n position = int(position)\n\n # Patron already notified to borrow the title\n if 'informed' == hold['status']:\n position = 0\n\n return HoldInfo(\n collection,\n DataSource.ODILO,\n Identifier.ODILO_ID,\n hold['id'],\n start_date=start,\n end_date=end,\n hold_position=position\n )\n\n def place_hold(self, patron, pin, licensepool, notification_email_address):\n \"\"\"Place a book on hold.\n\n :return: A HoldInfo object\n \"\"\"\n\n record_id = licensepool.identifier.identifier\n\n # Data just as 'x-www-form-urlencoded', no JSON\n payload = dict(patronId=patron.authorization_identifier)\n\n response = self.patron_request(\n patron, pin, self.PLACE_HOLD_ENDPOINT.format(recordId=record_id),\n extra_headers={'Content-Type': 'application/x-www-form-urlencoded'},\n data=payload)\n\n data = response.json()\n if response.status_code == 200:\n return self.hold_from_odilo_hold(licensepool.collection, data)\n\n self.raise_exception_on_error(data, CannotHold)\n\n def release_hold(self, patron, pin, licensepool):\n \"\"\"Release a patron's hold on a book.\n \"\"\"\n\n record_id = licensepool.identifier.identifier\n hold = self.get_hold(patron, pin, record_id)\n url = self.RELEASE_HOLD_ENDPOINT.format(holdId=hold['id'])\n payload = json.dumps(dict(patronId=patron.authorization_identifier))\n\n response = self.patron_request(patron, pin, url, extra_headers={}, data=payload, method='POST')\n if response.status_code == 200:\n return True\n\n self.raise_exception_on_error(response.json(), default_exception_class=CannotReleaseHold,\n ignore_exception_codes=['HOLD_NOT_FOUND'])\n return True\n\n\nclass OdiloCirculationMonitor(CollectionMonitor):\n \"\"\"Maintain LicensePools for recently changed Odilo titles\n \"\"\"\n SERVICE_NAME = \"Odilo Circulation Monitor\"\n INTERVAL_SECONDS = 500\n PROTOCOL = ExternalIntegration.ODILO\n\n def __init__(self, _db, collection, api_class=OdiloAPI):\n \"\"\"Constructor.\"\"\"\n super(OdiloCirculationMonitor, self).__init__(_db, collection)\n self.api = api_class(_db, collection)\n\n def run_once(self, start, cutoff):\n self.log.info(\"Starting recently_changed_ids, start: \" + str(start) + \", cutoff: \" + str(cutoff))\n\n start_time = datetime.datetime.now()\n self.all_ids(start)\n finish_time = datetime.datetime.now()\n\n time_elapsed = finish_time - start_time\n self.log.info(\"recently_changed_ids finished in: \" + str(time_elapsed))\n\n def all_ids(self, modification_date=None):\n \"\"\"Get IDs for every book in the system, from modification date if any\n \"\"\"\n\n retrieved = 0\n parsed = 0\n new = 0\n offset = 0\n limit = self.api.PAGE_SIZE_LIMIT\n\n if modification_date and isinstance(modification_date, datetime.date):\n modification_date = modification_date.strftime('%Y-%m-%d') # Format YYYY-MM-DD\n\n # Retrieve first group of records\n url = self.get_url(limit, modification_date, offset)\n status_code, headers, content = self.api.get(url)\n content = json.loads(content)\n\n # Retrieve Odilo record in groups\n while status_code == 200 and len(content) > 0:\n offset += limit\n retrieved += len(content)\n self.log.info('Retrieved %i records' % retrieved)\n\n # Process a bunch of records retrieved\n for record in content:\n record_id = record['id']\n self.log.info('Processing record %i/%i: %s' % (parsed, retrieved, record_id))\n identifier, is_new = self.api.odilo_bibliographic_coverage_provider.process_item(\n record_id, record\n )\n\n if is_new:\n new += 1\n\n parsed += 1\n\n # Persist each bunch of retrieved records\n self._db.commit()\n\n # Retrieve next group of records\n url = self.get_url(limit, modification_date, offset)\n status_code, headers, content = self.api.get(url)\n content = json.loads(content)\n\n if status_code >= 400:\n self.log.error('ERROR: Fail while retrieving data from remote source: HTTP ' + status_code)\n if content:\n self.log.error('ERROR response content: ' + str(content))\n else:\n self.log.info('Retrieving all ids finished ok. Retrieved %i records. New records: %i!!' % (retrieved, new))\n\n def get_url(self, limit, modification_date, offset):\n url = \"%s?limit=%i&offset=%i\" % (self.api.ALL_PRODUCTS_ENDPOINT, limit, offset)\n if modification_date:\n url = \"%s&modificationDate=%s\" % (url, modification_date)\n\n return url\n\n\nclass FullOdiloCollectionMonitor(OdiloCirculationMonitor):\n \"\"\"Monitor every single book in the Odilo collection.\n\n This tells us about books added to the Odilo collection that\n are not found in our collection.\n \"\"\"\n SERVICE_NAME = \"Odilo Full Collection Overview\"\n INTERVAL_SECONDS = 3600 * 4\n\n def run_once(self, start=None, cutoff=None):\n \"\"\"Ignore the dates and return all IDs.\"\"\"\n self.log.info(\"Starting recently_changed_ids, start: \" + str(start) + \", cutoff: \" + str(cutoff))\n\n start_time = datetime.datetime.now()\n self.all_ids(None)\n finish_time = datetime.datetime.now()\n\n time_elapsed = finish_time - start_time\n self.log.info(\"recently_changed_ids finished in: \" + str(time_elapsed))\n\n\nclass RecentOdiloCollectionMonitor(OdiloCirculationMonitor):\n \"\"\"Monitor recently changed books in the Odilo collection.\"\"\"\n\n SERVICE_NAME = \"Odilo Collection Recent Monitor\"\n INTERVAL_SECONDS = 60\n\n\nclass MockOdiloAPI(BaseMockOdiloAPI, OdiloAPI):\n def patron_request(self, patron, pin, *args, **kwargs):\n response = self._make_request(*args, **kwargs)\n\n # Modify the record of the request to include the patron information.\n original_data = self.requests[-1]\n\n # The last item in the record of the request is keyword arguments.\n # Stick this information in there to minimize confusion.\n original_data[-1]['_patron'] = patron\n original_data[-1]['_pin'] = patron\n return response\n","sub_path":"api/odilo.py","file_name":"odilo.py","file_ext":"py","file_size_in_byte":18575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"324959594","text":"import os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\nALLOWED_HOSTS = ['techpit-kanban-2020.herokuapp.com', '127.0.0.1' ]\n\nDEBUG = True\n","sub_path":"config/local_settings.py","file_name":"local_settings.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"342782744","text":"from flask import Blueprint, render_template, request, redirect, jsonify, url_for, flash, make_response\nfrom flask import session as Flask_Session\nfrom oauth2client.client import flow_from_clientsecrets, FlowExchangeError\nimport random, string, httplib2, json, requests\n\n\ndef init(app = None, DATA_SCOPE = \"openid email\", Client_Secret = 'client_secrets.json', data_Approvalprompt=\"force\"):\n DATA_CLIENT_ID = json.loads(open(Client_Secret, 'r').read())['web']['client_id']\n Oauth = Blueprint('Oauth', __name__, template_folder='templates')\n\n # Create anti-forgery state token\n @Oauth.route('/Login/')\n def Login():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))\n Flask_Session['state'] = state\n\n #return \"The current session state is %s\" % Flask_Session['state']\n return render_template('login.html', app=app, STATE=state, DATA_CLIENT_ID=DATA_CLIENT_ID, DATA_SCOPE=DATA_SCOPE, data_Approvalprompt=data_Approvalprompt)\n\n @Oauth.route('/gconnect', methods=['POST'])\n def gconnect():\n # Validate state token\n if request.args.get('state') != Flask_Session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets(Client_Secret, scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != DATA_CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n print(\"Token's client ID does not match app's.\")\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = Flask_Session.get('access_token')\n stored_gplus_id = Flask_Session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps('Current user is already connected.'),\n 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n Flask_Session['access_token'] = credentials.access_token\n Flask_Session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n Flask_Session['username'] = data['name']\n Flask_Session['picture'] = data['picture']\n Flask_Session['email'] = data['email']\n\n return \"Conected\"\n\n # DISCONNECT - Revoke a current user's token and reset their Flask_Session\n\n @Oauth.route('/gdisconnect')\n def gdisconnect():\n access_token = Flask_Session.get('access_token')\n if access_token is None:\n print\n 'Access Token is None'\n response = make_response(json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % Flask_Session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n del Flask_Session['access_token']\n del Flask_Session['gplus_id']\n del Flask_Session['username']\n del Flask_Session['email']\n del Flask_Session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response\n\n app.register_blueprint(Oauth)\n\n","sub_path":"Oauth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"358060407","text":"\"\"\"\n关于用户资料\n\"\"\"\nfrom LoveHouse.api_1_0 import api\nfrom flask import request,g,jsonify,session\nfrom LoveHouse.utils.common import login_required\nfrom LoveHouse.utils.image_storage import storage\nfrom LoveHouse.response_code import RET\nfrom LoveHouse import constant,db,models\n\n@api.route(rule=\"/users/avatar\",methods=[\"POST\"])\n@login_required\ndef set_user_avatar():\n \"\"\" 设置用户头像 \"\"\"\n # 获取参数\n user_id = g.user_id # 在login_required中保存了user_id\n image_file = request.files.get(\"avatar\") # 获取文件对象\n if image_file is None:\n return jsonify(error_code=RET.PARAMERR,error_message=\"未接收到头像\")\n file_data = image_file.read() # 读取文件二进制数据\n try:\n file_name = storage(file_data) # 上传\n except Exception as e:\n return jsonify(error_code=RET.THIRDERR,error_message=\"七牛保存文件失败\")\n # 保存到数据库中\n try:\n models.User.query.filter_by(id=user_id).update({\"avatar_url\":file_name})\n db.session.commit()\n except Exception as e:\n db.session.rollback() # 保存失败回滚\n return jsonify(error_code=RET.DBERR,error_message=\"数据库异常\")\n avatar_url = constant.QINIU_URL_DOMAIN + file_name\n return jsonify(error_code=RET.OK,error_message=\"保存头像成功\",data={\"avatar_url\":avatar_url})\n\n@api.route(rule=\"/users\",methods=[\"GET\"])\n@login_required\ndef get_user_profile():\n \"\"\" 获取个人信息 \"\"\"\n # 获取参数\n user_id = g.user_id\n # 获取用户对象\n try:\n user = models.User.query.filter_by(id=user_id).first()\n except Exception as e:\n return jsonify(error_code=RET.DBERR,error_message=\"数据库获取数据失败\")\n else:\n return jsonify(error_code=RET.OK,error_message=\"成功\",data=user.to_dict())\n\n@api.route(rule=\"/users/name\",methods=[\"PUT\"])\n@login_required\ndef change_user_name():\n \"\"\" 获取用户名 \"\"\"\n # 获取参数\n user_id = g.user_id\n name = request.data.get(\"name\")\n # 验证参数\n if not name:\n return jsonify(error_code=RET.PARAMERR,error_message=\"参数不完整\")\n # 获取用户对象\n try:\n models.User.query.get(user_id).query.update({\"name\":name})\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n return jsonify(error_code=RET.DBERR,error_message=\"数据库异常\")\n session[\"name\"] = name\n return jsonify(error_code=RET.OK,error_message=\"用户名修改成功\",data={\"name\":name})\n\n@api.route(rule=\"/users/auth\",methods=[\"GET\"])\n@login_required\ndef get_user_auth():\n \"\"\" 获取用户实名认证信息 \"\"\"\n # 获取参数\n user_id = g.user_id\n # 获取用户对象\n try:\n user = models.User.query.get(user_id)\n except Exception as e:\n return jsonify(error_code=RET.DBERR, error_message=\"数据库异常\")\n real_name = user.real_name\n id_card = user.id_card\n if real_name and id_card:\n data = {\n \"real_name\":real_name,\"id_card\":id_card\n }\n return jsonify(error_code=RET.OK,error_message=\"获取身份信息成功\",data=data)\n return jsonify(error_code=RET.NODATA,error_message=\"未设置身份信息\")\n\n@api.route(rule=\"/users/auth\",methods=[\"POST\"])\n@login_required\ndef set_user_auth():\n \"\"\" 设置用户实名认证信息 \"\"\"\n # 获取参数\n request_dict = request.get_json()\n real_name = request_dict.get(\"real_name\")\n id_card = request_dict.get(\"id_card\")\n # 验证参数\n if not all([real_name,id_card]):\n return jsonify(error_code=RET.PARAMERR,error_message=\"参数不完整\")\n # 一般来说这些数据是要验证真伪的,但是没有公安系统只能普通的保存了\n user_id = g.user_id\n # 获取用户\n try:\n models.User.query.filter_by(id=user_id).update({\"real_name\":real_name,\"id_card\":id_card})\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n return jsonify(error_code=RET.DBERR,error_message=\"数据库异常\")\n return jsonify(error_code=RET.OK,error_message=\"保存成功\")","sub_path":"LoveHouse/api_1_0/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"73687691","text":"class Node:\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\n\nclass LinkedList:\n def __init__(self, node=None):\n self.head = node\n self.tail = node\n self.length = 1 if node is not None else 0\n\n def __len__(self):\n return self.length\n\n def add_to_head(self, value):\n self.length += 1\n new_node = Node(value)\n if not self.head and not self.tail:\n self.head = new_node\n self.tail = new_node\n else:\n new_node.next = self.head\n self.head = new_node\n\n def add_to_tail(self, value):\n self.length += 1\n new_node = Node(value)\n if not self.head and not self.tail:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n\n def remove_head(self):\n if not self.head and not self.tail:\n return None\n if self.head is self.tail:\n self.length -= 1\n value = self.head.value\n self.head = None\n self.tail = None\n return value\n else:\n self.length -= 1\n value = self.head.value\n self.head = self.head.next\n return value\n\n def remove_tail(self):\n if not self.head and not self.tail:\n return None\n if self.head is self.tail:\n self.length -= 1\n value = self.tail.value\n self.head = None\n self.tail = None\n return value\n else:\n self.length -= 1\n value = self.tail.value\n current = self.head\n while current:\n if current.next is self.tail:\n self.tail = current\n current = current.next\n return value\n","sub_path":"singly_linked_list/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"338993270","text":"import csv\nimport sys\nfrom utility import *\n\nsites = {}\ndef siteAggregateReducer(current, line):\n global sites\n if len(line) == 0 and len(current) > 0:\n data = current.split('\\n')\n sites[url2site(data[0].split('\\t')[1])] = (data[0].split('\\t')[1], data[1].split('\\t')[1])\n return \"\"\n current += line + \"\\n\"\n return current\n\ndef writeCSV(file, dict, f):\n writer = csv.writer(open(file, 'wb'), escapechar='\\\\', lineterminator='\\n', quoting=csv.QUOTE_ALL)\n for k,v in dict.iteritems():\n writer.writerow(f(k, v))\n\nreduce(siteAggregateReducer, open(sys.argv[1], 'r').read().split('\\n'))\n\nwriteCSV('urls.csv', sites, lambda k, v: [k, v[0], v[1]]) \nwriteCSV('tag_urls.csv', sites, lambda k, v: [sys.argv[2], k, v[0], v[1]]) \nwriteCSV('labels.csv', sites, lambda k, v: [k, sys.argv[2]]) \n","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"482288930","text":"import logging\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport hazelbean as hb\nfrom hazelbean.ui import model, inputs\nfrom hazelbean.ui import validation\n\nlogging.basicConfig(level=logging.WARNING)\nhb.ui.model.LOGGER.setLevel(logging.WARNING)\nhb.ui.inputs.LOGGER.setLevel(logging.WARNING)\n\nL = hb.get_logger('seals')\nL.setLevel(logging.INFO)\n\nlogging.getLogger('Fiona').setLevel(logging.WARNING)\nlogging.getLogger('fiona.collection').setLevel(logging.WARNING)\n\nnp.seterr(divide='ignore', invalid='ignore')\n\ndev_mode = True\n# TODOO NOTE This funtion must be here as it is automatically called by the invest model. Consider making this more flexible in the next release.\n@validation.invest_validator\ndef validate(args, limit_to=None):\n validation_error_list = []\n return validation_error_list\n\nclass SealsUI(model.HazelbeanModel):\n def __init__(self, p):\n self.p = p\n model.HazelbeanModel.__init__(self,\n # label=u'seals',\n label=u'SEALS: Spatial Economic Allocation Land-change Simulator',\n target=p.execute,\n validator=validate,\n localdoc='../documentation')\n\n self.area_of_interest_path = inputs.File(args_key='area_of_interest_path',\n # default_path=p.default_paths['area_of_interest_path'],\n helptext=\"A shapefile with a single polygon that will be used to clip any other datasets.\",\n label='Area of interest',\n validator=None)\n self.add_input(self.area_of_interest_path)\n\n self.base_year_lulc_path = inputs.File(args_key='base_year_lulc_path',\n helptext=(\"File path for the land-use, land-cover map onto which expansion will be added.\"),\n label='Land-use, land-cover (raster)',\n validator=None)\n self.add_input(self.base_year_lulc_path)\n\n # MODE SWITCH WOULD GO HERE\n\n self.coarse_change_maps_dir = inputs.Folder(args_key='coarse_change_maps_dir',\n helptext=(\"Dir to several rasters or a single raster that defines how many new hectares of each class will be in each grid-cell. By definition, this must be coarser resolution than the LULC map (otherwise you alread know all you need to know). Running the model will allocate the hectarage changes in this coarser map to the best locations on the higher-resolution LULC map.\"),\n label='Coarse change maps directory',\n validator=None)\n self.add_input(self.coarse_change_maps_dir)\n\n self.physical_suitability_path = inputs.File(args_key='physical_suitability_path',\n helptext=(\"File path to a raster that defines physical suitability (e.g. from soils or slope, NOT from adjacency) on a 0-1 scale (1 is most suitable). Where this raster is zero, no expansion can happen.\"),\n label='Physical suitability (raster)',\n validator=None)\n self.add_input(self.physical_suitability_path)\n\n self.physical_suitability_importance = inputs.Text(\n args_key='physical_suitability_importance',\n helptext=(\"File path to a raster that defines physical suitability (e.g. from soils or slope, NOT from adjacency) on a 0-1 scale (1 is most suitable). Where this raster is zero, no expansion can happen.\"),\n label='Physical suitability importance',\n validator=None)\n self.add_input(self.physical_suitability_importance)\n\n self.lulc_class_types_path = inputs.File(\n args_key='lulc_class_types_path',\n helptext=('CSV that defines which LULC classes below to which LULC class types. Has two columns, \"lulc_id\" for the original class IDs in the input LULC map, and \"lulc_class_type\", which is an index for each of the class types. This step typically simplifies an LULC map with many similar classes to one with fewer classes (that might allow for easier understanding of expansion relationships). For example forest and grass both might be simplified to \\\"natural\\\".'),\n label='LULC class types (CSV)',\n validator=None)\n self.add_input(self.lulc_class_types_path)\n\n self.class_proximity_parameters_path = inputs.File(\n args_key='class_proximity_parameters_path',\n helptext=(\"A csv with 3 columns, lulc_class_type, clustering, decay and 1 row for each class that has any effect on agriculture. Clustering defines how much fragmentation affects the definition of class proximity ( lower value, like 1, means that pixels must be quite close to be considered part of a cluster while higher values, like 8, mean that pixels count as a cluster even if spread out considerably.\"),\n label='Class proximity parameters (CSV)',\n validator=None)\n self.add_input(self.class_proximity_parameters_path)\n\n self.pairwise_class_relationships_path = inputs.File(\n args_key='pairwise_class_relationships_path',\n helptext=(\"A csv that defines the relationships between all N classes in the simplified LULC. The first row and first column contain the class-ids from the simplified LULC while the interior values define how the class in the row_id attracts or repulses (1, -1 respectively) the col_id class;\"),\n label='Pairwise class relationships (CSV)',\n validator=None)\n self.add_input(self.pairwise_class_relationships_path)\n\n self.conversion_eligibility_path = inputs.File(\n args_key='conversion_eligibility_path',\n helptext=(\"A csv that defines the relationships between all N classes in the simplified LULC. The first row and first column contain the class-ids from the simplified LULC while the interior values define how the class in the row_id attracts or repulses (1, -1 respectively) the col_id class;\"),\n label='Conversion eligibility (CSV)',\n validator=None)\n self.add_input(self.conversion_eligibility_path)\n\n self.output_base_map_path = inputs.File(\n args_key='output_base_map_path',\n helptext=(\"If given, results will be placed on top of this map, inheriting the base map's final size and LULC where no data.\"),\n label='Output base map (optional)',\n validator=None)\n self.add_input(self.output_base_map_path)\n\n self.intermediate_dir = inputs.Folder('intermediate_dir', helptext='help', args_key='intermediate_dir')\n self.add_input(self.intermediate_dir)\n\n # # NOTE, containers dont need a seperate interactivity slot. has it by default it seems\n # self.advanced_options_container = inputs.Container(\n # args_key='advanced_options_container',\n # expandable=True,\n # expanded=False,\n # interactive=True,\n # label='Show advanced options')\n # self.add_input(self.advanced_options_container)\n\n self.enable_batch_mode = inputs.Checkbox('Enable batch mode', helptext='help', args_key='enable_batch_mode')\n self.enable_batch_mode.checkbox.setChecked(False)\n\n self.use_existing_batch = inputs.Checkbox('Use existing batch directory', helptext='help', args_key='use_existing_batch')\n self.use_existing_batch.checkbox.setChecked(True)\n\n self.skip_existing_batch_components = inputs.Checkbox('Skip existing batch components', helptext='help', args_key='skip_existing_batch_components')\n self.skip_existing_batch_components.checkbox.setChecked(True)\n\n self.batch_id = inputs.Text(\n args_key='batch_id',\n helptext=(\"Name of the column within the batch shapefile that identifies the name of each batch region.\"),\n label='Batch ID',\n validator=None)\n\n if dev_mode:\n self.add_input(self.enable_batch_mode)\n self.add_input(self.use_existing_batch)\n self.add_input(self.skip_existing_batch_components)\n self.add_input(self.batch_id)\n\n def generate_args_from_inputs(self):\n \"\"\"Used to geenrate args automatically rather than manuually adding them.\n e.g., args[self.create_simplied_lulc.args_key] = self.create_simplied_lulc.value(),\n Note that this then means that the args_key must be exactly correct.\"\"\"\n args = OrderedDict()\n input_types_to_read = [\n inputs.Text,\n inputs.Checkbox,\n inputs.Container,\n inputs.Dropdown,\n inputs.File,\n inputs.FileButton,\n inputs.FolderButton,\n inputs.Folder,\n inputs.InVESTModelInput,\n inputs.Multi,\n # inputs.FileSystemRunDialog,\n # inputs.FileDialog,\n ]\n\n for k,v in self.__dict__.items():\n if type(v) in input_types_to_read:\n args[v.args_key] = v.value()\n return args\n\n def assemble_args(self):\n return self.generate_args_from_inputs()\n","sub_path":"seals_ui.py","file_name":"seals_ui.py","file_ext":"py","file_size_in_byte":9001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"88828134","text":"import data\n\nfrom cogs import update_cog\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom .dependencies import owner_or_admin\nfrom .models import BotActivityMapping\nfrom constants import *\n\nimport config\nconfig.parse_args()\n\n\nrouter = APIRouter(\n prefix=\"/mappings/bot_activity\",\n tags=[\"bot_activity\"],\n dependencies=[Depends(owner_or_admin)],\n responses={404: {\"description\": \"Not found\"}},\n)\n\n\n@router.get(\"/{guildId}\", response_model=BotActivityMapping)\nasync def read_mapping(guildId: str):\n bot_instance = data.get_bot_instance(guildId)\n if not bot_instance:\n return {}\n\n activity_text = bot_instance[BOT_ACTIVITY_TEXT_KEY]\n activity_type = bot_instance[BOT_ACTIVITY_TYPE_KEY]\n\n return {\"activity_text\": activity_text, \"activity_type\": activity_type}\n\n\n@router.post(\"\", response_model=BotActivityMapping)\nasync def add_mapping(mapping: BotActivityMapping, guildId: str):\n bot_instance = data.get_bot_instance(guildId)\n\n if not bot_instance:\n raise HTTPException(status_code=404, detail=\"Bot config not found\")\n\n task = {\n 'kwargs': {\n 'guild_id': int(guildId),\n 'bot_id': int(bot_instance[BOT_ID_KEY]),\n 'activity_type_str': mapping.activity_type,\n 'activity_text': mapping.activity_text\n },\n 'function': 'update_activity'\n }\n data.add_task(task)\n\n return {\"success\": 1}\n","sub_path":"rallyrolebot/api/bot_activity_mappings.py","file_name":"bot_activity_mappings.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"629395868","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 28 19:28:01 2020\r\n\r\n@author: Varsha\r\n\"\"\"\r\n\r\nn = int(input())\r\ns1 = set(input())\r\n\r\nfor i in range(n-1):\r\n s2 = set(input())\r\n s1 = s1 & s2\r\n\r\nprint(len(s1))\r\n","sub_path":"Code library/149.gemstone.py","file_name":"149.gemstone.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"631782531","text":"from flask import Flask\nfrom flask import request\nfrom service import ToDoService\n\napp = Flask(__name__)\n\n\n@app.route('/todo/', methods=[\"POST\"])\ndef create_todo():\n result = ToDoService().create(request.get_json)\n return result.to_json()\n\n\n@app.route('/todo/done/', methods=[\"PUT\"])\ndef done_to_do(to_do_id):\n result = ToDoService().done(to_do_id)\n return result.to_json()\n\n\n@app.route('todo/undone', methods=[\"GET\"])\ndef get_undone_to_dos():\n result = ToDoService().get_undone()\n return result\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"41790970","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\n \r\ndf = pd.DataFrame.from_csv('RexJggKTableP28K5_no2_28n.csv', header=1);\r\nprint(df)\r\ndf.plot(legend=False);\r\n \r\nplt.xlim(0, 20 * (10**4));\r\nplt.ylim(10**(-8), 10**4);\r\nplt.xlabel(\"Number of Evals\");\r\nplt.ylabel(\"Best Evaluation Value\");\r\nplt.yscale('log')\r\nplt.savefig('no2_28n.pdf');\r\nplt.show();\r\nplt.close();\r\n","sub_path":"RexJggData/no2/28n/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"56013049","text":"import string\r\n\r\n\r\nparties_names = string.ascii_uppercase\r\n\r\ntests = int(input())\r\n\r\n\r\n\r\ndef solve(d, maj):\r\n\tm = max(maj.values())\r\n\tkey = None\r\n\ttaken = []\r\n\tfor i in maj:\r\n\t\tif maj[i] == m:\r\n\t\t\tkey = i\r\n\t\t\tbreak\r\n\r\n\ttaken.append(key)\r\n\r\n\td[key] -= 1\r\n\tif d[key] == 0:\r\n\t\td.pop(key)\r\n\t\r\n\ttotal = sum(d.values())\r\n\t\r\n\tfor i in d:\r\n\t\tmaj[i] = d[i] / total\r\n\r\n\tm = max(maj.values())\r\n\tif m > 0.5:\r\n\t\tkey = None\r\n\t\tfor i in maj:\r\n\t\t\tif maj[i] == m:\r\n\t\t\t\tkey = i\r\n\t\t\t\tbreak\r\n\r\n\t\ttaken.append(key)\r\n\t\td[key] -= 1\r\n\t\tif d[key] == 0:\r\n\t\t\td.pop(key)\r\n\treturn taken\r\n\r\ntest = 1\r\n\r\nwhile test <= tests:\r\n\tnum = int(input())\r\n\tparties = list(map(int, input().split(' ')))\r\n\tmaj = dict()\r\n\td = dict()\r\n\ttotal = sum(parties)\r\n\tfor i in range(len(parties)):\r\n\t\td[parties_names[i]] = parties[i]\r\n\t\tmaj[parties_names[i]] = parties[i] / total\r\n\r\n\ttaken = []\r\n\twhile len(d) > 0:\r\n\t\ttaken.append(solve(d, maj))\r\n\r\n\tprint('Case #%d: ' % (test,), end='')\r\n\tfor i in taken:\r\n\t\tprint(''.join(i), end=' ')\r\n\r\n\tprint('')\r\n\t\r\n\ttest += 1\r\n\r\n","sub_path":"solutions_5753053697277952_1/Python/qwikelx/prog1.py","file_name":"prog1.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"389777796","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nfrom datetime import datetime\nfrom argparse import ArgumentParser\n\nstart = datetime.now()\n\ndef getListOfFiles(opts):\n fileList = []\n if opts.subdirs:\n prefix = opts.input + \"/\"\n dirs = [ prefix + tmpDir for tmpDir in os.listdir(opts.input) if os.path.isdir(prefix + tmpDir) ]\n for tmpdir in dirs:\n if opts.verbose:\n print('Searching for files in {}'.format(tmpdir))\n prefix = tmpdir + \"/\"\n tmpfiles = [ prefix + file for file in os.listdir(tmpdir) if os.path.isfile(prefix + file) and file.endswith('.' + opts.task) ]\n if tmpfiles:\n for file in tmpfiles:\n fileList.append(file)\n else:\n prefix = opts.input + \"/\"\n tmpfiles = [ prefix + file for file in os.listdir(opts.input) if os.path.isfile(prefix + file) and file.endswith('.' + opts.task) ]\n if tmpfiles:\n for file in tmpfiles:\n fileList.append(file)\n\n fileList.sort()\n return fileList\n\ndef main(args=None):\n parser = ArgumentParser(\n usage=\"Usage: %(prog)s [options]\", description=\"Data list builder\")\n\n parser.add_argument(\"-i\", \"--input\", type=str,\n dest='input', help='Input DATA WD')\n parser.add_argument(\"-o\", \"--output\", type=str,\n dest='output', help='Output list (text file)')\n parser.add_argument(\"-v\", \"--verbose\", dest='verbose', default=False,\n action='store_true', help='run in high verbosity mode')\n parser.add_argument(\"-s\", \"--subdirs\", dest='subdirs', default=False,\n action='store_true', help='search in subdirs')\n parser.add_argument(\"-t\", \"--task\", type=str,\n dest='task', help='root/csv task')\n \n opts = parser.parse_args(args)\n\n # Get list of ROOT files\n dataList = getListOfFiles(opts)\n \n # Write files to list\n with open(opts.output, \"w\") as myList:\n for elm in dataList:\n myList.write(elm + \"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n print(datetime.now()-start)\n","sub_path":"Assets/listBuilder/listBuilder.py","file_name":"listBuilder.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"558583000","text":"import re\nimport csv\nimport sys\n\n# Avoid issue with fields larger than max size\ncsv.field_size_limit(sys.maxint)\n\nVERSION = '0.3.2'\n\n\nclass Processor(object):\n\n def __init__(self, fields=None, invert=False, delimiter=',',\n quotechar='\"', skip=0, grep_fields_map={}, substitutions_map={}):\n self.fields = fields\n self.invert = invert\n self.delimiter = delimiter\n self.quotechar = quotechar\n self.skip = skip\n self.validators = []\n self.grep_fields_map = grep_fields_map\n self.substitutions_map = substitutions_map\n\n def add_validator(self, f):\n self.validators.append(f)\n\n def skip_due_to_grep(self, row):\n for field_index, grep_expression in self.grep_fields_map.iteritems():\n if not re.match(grep_expression, row[field_index]):\n return True\n return False\n\n def substitute(self, row):\n for field_index, field_substitutions_map in self.substitutions_map.iteritems():\n source_string = row[field_index]\n dest_string = field_substitutions_map.get(source_string)\n if dest_string:\n row[field_index] = dest_string\n\n def process(self, file_handle):\n reader = csv.reader(file_handle, delimiter=self.delimiter,\n quotechar=self.quotechar)\n for row in reader:\n output = None\n if reader.line_num <= self.skip:\n continue\n if self.skip_due_to_grep(row):\n continue\n self.substitute(row)\n if self.fields:\n if not self.invert:\n output = [row[i] for i in self.fields if len(row) > i]\n else:\n output = [e for i,e in enumerate(row) if i not in self.fields]\n else:\n output = row\n if not self.is_valid(output):\n continue\n if output:\n yield output\n\n def is_valid(self, row):\n for validator in self.validators:\n if not validator(row):\n return False\n return True\n","sub_path":"csvhandler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"391238882","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 1 15:40:00 2019\r\n\r\n@author: anyan.sun\r\n\"\"\"\r\n\r\nimport logging\r\nimport datetime\r\n\r\n\r\nclass logging_func:\r\n def __init__(self, logger_name, filepath):\r\n self.logger_name = logger_name\r\n self.filepath = filepath\r\n\r\n def myLogger(self):\r\n\r\n logger = logging.getLogger(self.logger_name)\r\n logger.setLevel(logging.DEBUG)\r\n\r\n handler = logging.FileHandler(self.filepath + 'log/' + self.logger_name + format(datetime.datetime.now(), \"%m_%d_%Y\") + '.log')\r\n\r\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n return logger\r\n","sub_path":"codes/class_logging.py","file_name":"class_logging.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"601592712","text":"lista = list()\nwhile True:\n v = int(input('Digite um valor: '))\n lista.append(v)\n choice = str(input('Deseja Continuar? [S/N]')).lower()\n while choice not in 'sn':\n choice = str(input('Deseja Continuar? [S/N]')).lower()\n if choice in 'n':\n break\nlista.sort(reverse=True)\nprint('-=-'*15)\nprint(f'Você digitou {len(lista)} elementos.')\nprint(f'Os elementos na ordem decrescente são: {lista}')\nprint(f'O número 5 foi encontrado na lista.' if 5 in lista else 'O número 5 não foi encontrado na lista.')","sub_path":"Exercicios_1/ex081.py","file_name":"ex081.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"580689078","text":"# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#给文本加行号 \n\n\nimport project9_util as util\n\n\nencode_type = util.detect_encoding('fudan_history.txt')\n\ndef text_linenumber():\n with open('fudan_history.txt','r',encoding = encode_type) as file:\n line = file.readlines()\n i = 1\n z = []\n for a in range(1,len(line)):\n if line[a] == '\\n':\n s = '\\n\\n'\n a += 1\n else:\n s = ' %d %s\\n'%(i,line[a])\n i += 1\n a += 1\n b = z.append(s)\n \n new_file = util.nl_filename('fudan_history.txt')\n text = open(new_file,'x',encoding = encode_type)\n new_file= text.writelines(z)\n \n\nif __name__ == '__main__':\n text_linenumber()\n \n","sub_path":"project9/project/16307090235.py","file_name":"16307090235.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"184591902","text":"\nimport frida\nimport time\nimport sys\n\n\nRED = \"\\033[1;31m\" \nBLUE = \"\\033[1;34m\"\nCYAN = \"\\033[1;36m\"\nWHITE = \"\\033[1;37m\"\nYELLOW= \"\\033[1;33m\"\nGREEN = \"\\033[0;32m\"\nRESET = \"\\033[0;0m\"\nBOLD = \"\\033[;1m\"\nREVERSE = \"\\033[;7m\"\n\nclass nativeHandler():\n\n\n\n modules = []\n device = None\n script = None\n\n def __init__(self,device):\n super(nativeHandler,self).__init__()\n self.device = device\n \n \n\n def on_message(self,message, data):\n try:\n if message[\"type\"] == \"send\":\n payload = message[\"payload\"]\n self.modules.append(payload.split(\":\")[0].strip()) \n #self.script.post({'input':'null'}) \n \n except Exception as e:\n print(e) \n\n def getModules(self,package,force):\n\n print('[i] Using device with id {}'.format(self.device))\n\n self.modules = []\n try:\n if force:\n pid = self.device.spawn(package)\n print(\"[i] Starting process {} [pid:{}]\".format(package,pid))\n session = self.device.attach(pid)\n script = session.create_script(open(\"libraries/native.js\").read())\n script.on('message', self.on_message)\n script.load()\n self.device.resume(pid)\n time.sleep(5)\n script.unload()\n else:\n pid = self.device.get_process(package).pid\n print(\"[i] Attaching to process {} [pid:{}]\".format(package,pid))\n session = self.device.attach(pid)\n script = session.create_script(open(\"libraries/native.js\").read())\n script.on('message', self.on_message)\n script.load()\n time.sleep(5)\n script.unload()\n \n except Exception as e:\n print(e)\n \n return self.modules\n\n def __getitem__(self,key):\n return self.modules","sub_path":"libraries/natives.py","file_name":"natives.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"342787218","text":"import json\nimport time\nimport random\n\ndef lambda_handler(event, context):\n # chat\n text = event['text']\n \n greetings = ['Hello', 'hello', 'Hi', 'hi']\n hobbies = ['movies', 'music', 'fantasy', 'games', 'video']\n emotions = ['sad', 'happy', 'love']\n dislikes = ['exam', 'homework', 'midterm']\n \n message = [None] * 5\n message[0] = \"I'm a chat bot!\"\n message[1] = \"My NLP function is not ready yet.\"\n message[2] = \"Sorry, I cannot help you for now.\"\n message[3] = \"Are you a chat bot? Let's be friends!\"\n message[4] = \"Did you just say '\" + text + \"'?\"\n \n reply = None\n for word in text.split(' '):\n if word in greetings:\n reply = \"Hi, nice to meet you!\"\n elif word in hobbies:\n reply = \"Yeah, we need some fun in life.\"\n elif word in emotions:\n reply = \"I think I know how it feels...though you may not believe me:)\"\n elif word in dislikes:\n reply = \"That's awful!\"\n \n if not reply:\n reply = random.choice(message)\n \n \n current_time = time.localtime()\n now = time.strftime('%m-%d-%Y %H:%M:%S', current_time)\n \n response = {\n \"message\": reply,\n \"time\": now\n }\n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps(response)\n }\n","sub_path":"Chatbot/simple_chat_lambda_function.py","file_name":"simple_chat_lambda_function.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"365291803","text":"#!/usr/bin/python2.7\nimport httplib2\nfrom oauth2client.client import SignedJwtAssertionCredentials\nfrom apiclient.discovery import build\n\nBIGQUERY_SCOPE = 'https://www.googleapis.com/auth/bigquery'\n\n# Service account and keyfile only used for service account auth.\nSERVICE_ACCT = ('317752944021@developer.gserviceaccount.com')\n# Service account access will only be enabled if this file is present.\nKEY_FILE = 'dev_auth/key.pem'\ndef get_bigquery():\n with open (KEY_FILE, 'rb') as f:\n key = f.read();\n creds = SignedJwtAssertionCredentials(\n SERVICE_ACCT, \n key,\n BIGQUERY_SCOPE) \n return build('bigquery', 'v2', http=creds.authorize(httplib2.Http()))\n","sub_path":"samples/ch08/sensors/cloud/src/dev_auth/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"142936016","text":"from __future__ import print_function, absolute_import\nimport time\n\nfrom torch.autograd import Variable\nfrom utils.utils import AverageMeter\n\nclass BaseTrainer(object):\n def __init__(self, model, criterion):\n super(BaseTrainer, self).__init__()\n self.model = model\n self.criterion = criterion\n \n def train(self, parse, epoch, data_loader, criterion, optimizer, lr_scheduler, print_freq=1):\n \n \n if parse.fine('train') != -1:\n model.eval()\n else: model.train()\n \n \n lr_scheduler.step()\n end = time.time\n \n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n prtcisions = AverageMeter()\n \n for i, inputs in enumerate(data_loader):\n data_time.update(time.time - end)\n inputs, targets = self._parse_data(inputs)\n loss, precl = self._forward(inputs, targets)\n \n losses.update(torch.sum(loss.data), targets.size(0))\n precisions.update(precl)\n \n optimizer.zero_grad()\n loss.backward(0)\n optimizer.step()\n \n batch_time.update(time.time() - end)\n end = time.time()\n \n if (i + 1)%print_freq == 0:\n print (\"Mode:{}\\tEpoch{}:{}/{}:\\t\\\n BatchTime:{:.3f},{:.3f}\\t\\\n DataTime:{:.3f},{:.3f}\\t\\\n Loss:{:.3f},{:.3f}\\t\\\n prec:{:.2%},{:.2%}\".format(parse,epoch, i+1, len(data_loader),\n batch_time.val, batch_time.avg,\n data_time.val, data_time.avg,\n losses.val, losses.avg,\n precisions.val, precisions.avg))\n \n return losses.avg, precisons.avg\n \n \n def _parse_data(self, inputs):\n raise NotImplementedError\n def _forward(self, inputs, targets):\n raise NotImplementedError\n \nclass Trainer(BaseTrainer):\n \n def _parse_data(self, inputs):\n imgs, _ , ids, _ = inputs\n inputs = Variable(imgs.cuda())\n targets = Variable(ids.cuda())\n return inputs, targets\n \n def _forward(self, inputs, targets):\n \n _, outputs = self.model(*inputs)\n if isinstance(self.criterion, torch.nn.CrossEntropyLoss):\n loss = self.criterion(outputs, targets)\n _, preds = torch.max(outputs.data,1)\n prec = torch.sum(preds == targets.data)/targets.size(0)\n else:\n raise ValueError(\"Unsupported loss:{}\".format(self.criterion))\n return loss,prec\n ","sub_path":"script/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"641022805","text":"import torch\r\nimport numpy as np\r\nfrom torch.utils.data import TensorDataset, DataLoader\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\nclass data1():\r\n def __init__(self,opt,word_index_dict):\r\n self.opt=opt\r\n self.word_index_dict=word_index_dict\r\n\r\n # 返回该词的index,将每条记录转化成由index组成的list,判断其长度不足的补0\r\n def word2index(self,word):\r\n \"\"\"将一个word转换成index\"\"\"\r\n if word in self.word_index_dict:\r\n return self.word_index_dict[word]\r\n else:\r\n return 0\r\n\r\n\r\n def sentence2index(self,sentence):\r\n \"\"\"将一个句子转换成index的list,并截断或补零\"\"\"\r\n word_list = sentence.strip().split()\r\n index_list = list(map(self.word2index, word_list))\r\n len_sen = len(index_list)\r\n if len_sen < self.opt.fix_len:\r\n index_list = index_list + [0] * (self.opt.fix_len - len_sen)\r\n else:\r\n index_list = index_list[:self.opt.fix_len]\r\n return index_list\r\n\r\n # 划分数据集\r\n def get_splite_data(self):\r\n f = open(self.opt.train_data_path)\r\n documents = f.readlines()\r\n sentence = []\r\n for words in documents:\r\n s = self.sentence2index(words)\r\n sentence.append(s)\r\n\r\n x = np.array(sentence)\r\n\r\n \"\"\"取出标签\"\"\"\r\n y = [0] * self.opt.train_pos + [1] * self.opt.train_neg\r\n y = np.array(y)\r\n\r\n train_x, val_x, train_y, val_y = train_test_split(\r\n x, y, test_size=0.1, random_state=0)\r\n\r\n train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))\r\n valid_data = TensorDataset(torch.from_numpy(val_x), torch.from_numpy(val_y))\r\n\r\n train_loader = DataLoader(train_data, shuffle=False, batch_size=self.opt.batch_size)\r\n valid_loader = DataLoader(valid_data, shuffle=False, batch_size=self.opt.batch_size)\r\n\r\n return train_loader, valid_loader\r\n\r\n\r\n # 划分数据集2\r\n def get_splite_data2(self):\r\n f = open(self.opt.train_data_path)\r\n documents = f.readlines()\r\n sentence = []\r\n for words in documents:\r\n s = self.sentence2index(words)\r\n sentence.append(s)\r\n\r\n x = np.array(sentence)\r\n\r\n \"\"\"取出标签\"\"\"\r\n y = [0] * self.opt.train_pos + [1] * self.opt.train_neg\r\n y = np.array(y)\r\n\r\n l = []\r\n for i in range(len(y)):\r\n l.append((x[i], y[i]))\r\n\r\n total=self.opt.train_pos+self.opt.train_neg\r\n\r\n train_dataset, test_dataset = torch.utils.data.random_split(l, [int(total * 0.8), int(total * 0.2)])\r\n train_data = DataLoader(train_dataset, self.opt.batch_size, False)\r\n test_data = DataLoader(test_dataset, self.opt.batch_size, False)\r\n\r\n return train_data, test_data\r\n\r\n\r\n # ======================================================================================================================\r\n # 获得训练集\r\n # ======================================================================================================================\r\n def get_trainset(self):\r\n f = open(self.opt.train_data_path)\r\n documents = f.readlines()\r\n sentence = []\r\n for words in documents:\r\n s = self.sentence2index(words)\r\n sentence.append(s)\r\n\r\n x = np.array(sentence)\r\n\r\n \"\"\"取出标签\"\"\"\r\n y = [0] * self.opt.train_pos + [1] * self.opt.train_neg\r\n y = np.array(y)\r\n\r\n train_data = TensorDataset(torch.from_numpy(x), torch.from_numpy(y))\r\n train_loader = DataLoader(train_data, shuffle=False, batch_size=self.opt.batch_size)\r\n\r\n return train_loader\r\n\r\n\r\n # ======================================================================================================================\r\n # 获得测试集\r\n # ======================================================================================================================\r\n def get_testset(self):\r\n f = open(self.opt.test_data_path)\r\n documents = f.readlines()\r\n sentence = []\r\n for words in documents:\r\n s = self.sentence2index(words)\r\n sentence.append(s)\r\n\r\n x = np.array(sentence)\r\n\r\n \"\"\"取出标签\"\"\"\r\n y = [0] * self.opt.test_pos + [1] * self.opt.test_neg\r\n y = np.array(y)\r\n\r\n test_data = TensorDataset(torch.from_numpy(x), torch.from_numpy(y))\r\n test_loader = DataLoader(test_data, shuffle=False, batch_size=self.opt.batch_size)\r\n\r\n return test_loader\r\n","sub_path":"self_attention/data/dataset1.py","file_name":"dataset1.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"133683156","text":"# ---------------------------------------------------------------------------\n# Cosmica - All rights reserved by NeuroJump Trademark 2018\n# marketstat.py\n# Written by Chris Lewis\n# ---------------------------------------------------------------------------\n# This represents a Galactic Trade Market Statistic for a round of play\n# ---------------------------------------------------------------------------\nfrom anw.func import root, globals\n\nclass MarketStat(root.Root):\n \"\"\"A MarketStat represents the Market Statistics for a round of play, id=round\"\"\"\n def __init__(self, args):\n # Attributes\n self.id = str() # Unique Game Object ID = round\n self.avgSoldAL = float()\n self.avgSoldEC = float()\n self.avgSoldIA = float()\n self.sumSoldAL = float()\n self.sumSoldEC = float()\n self.sumSoldIA = float()\n self.volSoldAL = float()\n self.volSoldEC = float()\n self.volSoldIA = float()\n self.defaultAttributes = ('id','avgSoldAL','sumSoldAL','volSoldAL',\n 'avgSoldEC','sumSoldEC','volSoldEC',\n 'avgSoldIA','sumSoldIA','volSoldIA')\n self.setAttributes(args)\n # set the initial avg costs in case the market is stagnant.\n self.setInitialStat()\n \n def setInitialStat(self):\n \"\"\"Set the initial market stats based on the global difference in production of the basic resources\"\"\"\n self.avgSoldAL = globals.cityCRGen/globals.cityALGen\n self.avgSoldEC = globals.cityCRGen/globals.cityECGen\n self.avgSoldIA = globals.cityCRGen/globals.cityIAGen\n ","sub_path":"anw/Packages/anw/aw/marketstat.py","file_name":"marketstat.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"261764375","text":"# -*- coding:utf-8 -*-\n\"\"\"\n\n\"\"\"\nfrom sklearn.compose import ColumnTransformer, make_column_transformer\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import OneHotEncoder, PolynomialFeatures, FunctionTransformer\nfrom sklearn.impute import SimpleImputer\nfrom hypernets.frameworks.ml.sklearn_pandas import DataFrameMapper\nimport numpy as np\nimport pandas as pd\nfrom hypernets.frameworks.ml.column_selector import *\nfrom hypernets.frameworks.ml.sklearn_ex import MultiLabelEncoder\n\n\ndef get_df():\n X = pd.DataFrame(\n {\n \"a\": ['a', 'b', np.nan],\n \"b\": list(range(1, 4)),\n \"c\": np.arange(3, 6).astype(\"u1\"),\n \"d\": np.arange(4.0, 7.0, dtype=\"float64\"),\n \"e\": [True, False, True],\n \"f\": pd.Categorical(['c', 'd', np.nan]),\n \"g\": pd.date_range(\"20130101\", periods=3),\n \"h\": pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"i\": pd.date_range(\"20130101\", periods=3, tz=\"CET\"),\n \"j\": pd.period_range(\"2013-01\", periods=3, freq=\"M\"),\n \"k\": pd.timedelta_range(\"1 day\", periods=3),\n \"l\": [1, 10, 1000]\n }\n )\n y = [1, 1, 0]\n return X, y\n\n\nclass Test_Transformer():\n def test_func_transformer(self):\n dfm = DataFrameMapper(\n [(column_object_category_bool, [\n SimpleImputer(strategy='constant'),\n MultiLabelEncoder(),\n ]\n ),\n ],\n input_df=True,\n df_out=True,\n df_out_dtype_transforms=[\n (column_object, 'category')\n ]\n )\n X, y = get_df()\n x_new = dfm.fit_transform(X, y)\n assert x_new.dtypes.to_list() == [pd.CategoricalDtype(categories=[0, 1, 2], ordered=False),\n pd.CategoricalDtype(categories=[0, 1], ordered=False),\n pd.CategoricalDtype(categories=[0, 1, 2], ordered=False)]\n\n def test_pca(self):\n ct = make_column_transformer(\n (PCA(2), column_number_exclude_timedelta)\n )\n\n X, y = get_df()\n x_new = ct.fit_transform(X, y)\n assert x_new.shape == (3, 2)\n\n dfm = DataFrameMapper(\n [(column_number_exclude_timedelta, PCA(2)),\n (column_object_category_bool, [SimpleImputer(strategy='constant'), OneHotEncoder()]),\n (column_number_exclude_timedelta, PolynomialFeatures(2)),\n ], input_df=True, df_out=True\n )\n x_new = dfm.fit_transform(X, y)\n assert x_new.columns.to_list() == ['b_c_d_l_0', 'b_c_d_l_1', 'a_a', 'a_b', 'a_missing_value', 'e_False',\n 'e_True', 'f_c', 'f_d', 'f_missing_value', '1', 'b', 'c', 'd', 'l',\n 'b^2', 'b c', 'b d', 'b l', 'c^2', 'c d', 'c l', 'd^2', 'd l', 'l^2']\n","sub_path":"tests/hypergbm/sklearn_transformer_test.py","file_name":"sklearn_transformer_test.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"348015538","text":"\"\"\"empty message\n\nRevision ID: dfae13cbd321\nRevises: 5080958356d4\nCreate Date: 2021-05-12 15:53:03.748753\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'dfae13cbd321'\ndown_revision = '5080958356d4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('task', sa.Column('goal', sa.Integer(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('task', 'goal')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/dfae13cbd321_.py","file_name":"dfae13cbd321_.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"169499903","text":"import curses\nimport time\n\nstart_time = 0\nwait_space_start = 0\nwait_space = 2.5\n\nsolving = False\n\nsolves = []\n\ndef updateSolves(scr):\n scr.clear()\n lowest = float('inf')\n highest = 0\n\n lines = []\n solves_checked = []\n\n for index, solve in enumerate(solves):\n if index >= curses.LINES:\n break\n\n solve = round(solve, 4)\n\n if highest < solve:\n highest = solve\n\n if lowest > solve:\n lowest = solve\n \n solves_checked.append(solve)\n\n for index, solve in enumerate(solves_checked):\n line = f\"{solve}s\"\n\n try:\n if solves_checked[index + 1] > solve:\n line += f\" -{round(solves_checked[index + 1] - solve, 2)}\"\n\n elif solves_checked[index + 1] < solve:\n line += f\" +{round(solve - solves_checked[index + 1], 2)}\"\n\n except IndexError:\n pass\n\n if solve == highest:\n line += \" Worst\"\n\n if solve == lowest:\n line += \" Best\"\n\n lines.append(line)\n\n for index, line in enumerate(lines):\n scr.addstr(index, 0, line)\n\ndef main(scr):\n # Clear screen\n scr.clear()\n\n text = \"0s\"\n solving = False\n scr.nodelay(1)\n\n while True:\n scr.refresh()\n\n key = None\n\n try:\n key = scr.getkey()\n except Exception as e:\n time.sleep(1/60)\n\n if solving:\n text = f\"{round(time.time() - start_time, 2)}s\"\n\n scr.addstr(curses.LINES // 2, curses.COLS // 2 - len(text) // 2, text)\n guide = \"Press space to start the timer\"\n scr.addstr((curses.LINES // 2) + 5, curses.COLS // 2 - len(guide) // 2, guide)\n\n if key == ' ':\n if solving:\n # Rubik's cube finished!\n solving = False\n solve_time = time.time() - start_time\n solves.insert(0, solve_time)\n\n updateSolves(scr)\n\n else:\n # Start solving\n solving = True\n start_time = time.time()\n\ncurses.wrapper(main)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"36658091","text":"from django.urls import path, include\n\nfrom .views import *\n\nurlpatterns = [\n path('', HomeView.as_view(), name='home'),\n \n path('addInfo',addInfo,name='addInfo'),\n path('lists',lists,name='lists'),\n path('diss',dissatisfy, name='diss'),\n path('profile', profile, name = 'profile'),\n path('book', book, name= 'book'),\n path('address', address, name='address'),\n path('addview',addview, name='addview'),\n path('ins',ins, name='ins'),\n path('go',go, name='go'),\n path('schedule',schedule, name='schedule'),\n path('satisfaction_check', satisfaction_check, name = \"satisfaction_check\"),\n path('doctor_change', doctor_change, name=\"doctor_change\"),\n]","sub_path":"Cipher-one-Cipher/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"506105538","text":"##Please note: This file is not a part of any container as this mimics the data being streamed to our kafka from external\n## sources and as such, we assume this is NOT a part of our BDP and external data source\nfrom kafka import KafkaProducer\nimport csv\nimport time\n\n# The name of the server and topic on which we publish\nbootstrap_servers = ['localhost:9092']\ntopicName = 'mysimbdp'\n\nproducer = KafkaProducer(bootstrap_servers = bootstrap_servers)\nproducer = KafkaProducer()\n\n# Reads data from the sourse CSV and calls the API for pushin the data to kafka\n# by default this is pushed to a partition chosen by Kafka using round robin algo\nwith open(\"../../data/data.csv\") as file: \n data = file.read()\n dataRow = data.splitlines()\n for idx, i in enumerate(dataRow):\n time.sleep(5)\n ack = producer.send(topicName, str.encode(i))\n metadata = ack.get()\n print(\"Published row \" + str(idx) + \".\")\n\n","sub_path":"code/mysimbdp-broker/connect_to_kafka.py","file_name":"connect_to_kafka.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"82284459","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 23 22:34:37 2016\n\n@author: Erin\n\"\"\"\n\ndef F(n):\n if n < 2:\n print (n)\n return n \n else:\n print (F(n-2) + F(n-1))\n return F(n-2) + F(n-1)","sub_path":"FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"420850769","text":"import argparse\nimport os\nimport time\nimport timeit\nimport pdb\nimport shutil\nimport torch\nimport torchvision\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nfrom torch.nn.utils import clip_grad_norm_\n\nfrom dataset import TSNDataSet, TSNRGBFlowSet\nfrom models import TSN, AdversarialNetwork, Classifier, init_weights, TCNNet, Classifier1, RandomLayer\nfrom transforms import *\nfrom opts import parser\nfrom label_map import HMDB_UCF\nfrom torchvision import transforms\nimport torch.nn as nn\nfrom weight_init import weight_init\n\nimport os\n\nbest_prec1_rgb = 0\nbest_prec1_flow = 0\n\nglobal lmap\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch implementation of Temporal Segment Networks\")\n parser.add_argument('dataset', type=str, choices=['ucf101', 'hmdb51', 'kinetics', 'jester', 'something', 'phav', 'hmdb'])\n parser.add_argument('modality', type=str, choices=['RGB', 'Flow', 'RGBDiff'])\n parser.add_argument('test_rgb_list', type=str)\n parser.add_argument('test_flow_list', type=str)\n\n # ========================= Model Configs ==========================\n parser.add_argument('--arch', type=str, default=\"resnet101\")\n parser.add_argument('--test_segments', type=int, default=25)\n parser.add_argument('--consensus_type', type=str, default='avg',\n choices=['avg', 'max', 'topk', 'identity', 'rnn', 'cnn'])\n\n parser.add_argument('--dropout', '--do', default=0.5, type=float,\n metavar='DO', help='dropout ratio (default: 0.5)')\n parser.add_argument('--loss_type', type=str, default=\"nll\",\n choices=['nll'])\n\n # ========================= Runtime Configs ==========================\n parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n parser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('--snapshot_pref', type=str, default=\"\")\n parser.add_argument('--gpus', nargs='+', type=int, default=None)\n parser.add_argument('--flow_prefix', default=\"\", type=str)\n parser.add_argument('--test_crops', type=int, default=1)\n\n global args, best_prec1_rgb, best_prec1_flow, gpu_id\n args = parser.parse_args()\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(val) for val in args.gpus])\n gpu_id = torch.cuda.current_device()\n\n global lmap\n\n add_idx = 0\n if args.dataset == 'ucf101':\n lmap = HMDB_UCF() \n num_class = 101\n elif args.dataset == 'hmdb51':\n lmap = HMDB_UCF()\n num_class = 51\n elif args.dataset == 'kinetics':\n num_class = 400\n elif args.dataset == 'jester':\n num_class = 7\n elif args.dataset == 'something':\n num_class = 25\n elif args.dataset == 'phav':\n num_class = 21\n add_idx = -1\n elif args.dataset == 'hmdb':\n num_class = 21\n else:\n raise ValueError('Unknown dataset '+args.dataset)\n\n model_rgb = TSN(num_class, 1, 'RGB', base_model=args.arch, dropout=args.dropout)\n model_flow = TSN(num_class, 1, 'Flow', base_model=args.arch, dropout=args.dropout)\n classifier = Classifier(num_class, args.test_segments, \\\n model_rgb.output_dim(), consensus_type=args.consensus_type)\n classifier.apply(weight_init)\n \n if args.resume:\n if os.path.isfile(args.resume):\n print((\"=> loading checkpoint '{}'\".format(args.resume)))\n checkpoint = torch.load(args.resume)\n model_rgb_state_dict = checkpoint['model_rgb_state_dict']\n model_flow_state_dict = checkpoint['model_flow_state_dict']\n classifier_state_dict = checkpoint['classifier_state_dict']\n random_layer = checkpoint['random_layer']\n model_rgb.load_state_dict(model_rgb_state_dict)\n model_flow.load_state_dict(model_flow_state_dict)\n classifier.load_state_dict(classifier_state_dict)\n else:\n print((\"=> no checkpoint found at '{}'\".format(args.resume)))\n \n crop_size = model_rgb.crop_size\n scale_size = model_rgb.scale_size\n input_mean = model_rgb.input_mean\n input_std = model_rgb.input_std\n train_augmentation = model_rgb.get_augmentation()\n\n if args.test_crops == 1:\n cropping = torchvision.transforms.Compose([\n GroupScale(model_rgb.scale_size),\n GroupCenterCrop(model_rgb.input_size),\n ])\n elif args.test_crops == 10:\n cropping = torchvision.transforms.Compose([\n GroupOverSample(model_rgb.input_size, model_rgb.scale_size)\n ])\n else:\n raise ValueError(\"Only 1 and 10 crops are supported while we got {}\".format(args.test_crops))\n\n normalize = GroupNormalize(input_mean, input_std)\n data_length = 1\n\n rgb_str = ''\n if args.dataset == 'jester':\n rgb_str = \"{:05d}.jpg\"\n flow_str = \"flow_{}_{:05d}.jpg\"\n elif args.dataset == 'something':\n rgb_str = \"frame{:04d}.jpg\"\n flow_str = \"flow_{}_{:05d}.jpg\"\n elif args.dataset == 'phav':\n rgb_str = 'img_{:05d}.jpg'\n flow_str = \"flow_{}_{:05d}.jpg\"\n else:\n rgb_str = \"rgb_x_{:03d}.jpg\"\n flow_str = \"flow_{}_{:03d}.jpg\"\n\n test_rgb_transform = torchvision.transforms.Compose([\n cropping,\n Stack(roll=args.arch == 'BNInception'),\n ToTorchFormatTensor(div=args.arch != 'BNInception'),\n GroupNormalize(model_rgb.input_mean, model_rgb.input_std),\n ])\n\n crop_size = model_flow.crop_size\n scale_size = model_flow.scale_size\n input_mean = model_flow.input_mean\n input_std = model_flow.input_std\n train_augmentation = model_flow.get_augmentation()\n\n if args.test_crops == 1:\n cropping = torchvision.transforms.Compose([\n GroupScale(model_flow.scale_size),\n GroupCenterCrop(model_flow.input_size),\n ])\n elif args.test_crops == 10:\n cropping = torchvision.transforms.Compose([\n GroupOverSample(model_flow.input_size, model_flow.scale_size)\n ])\n else:\n raise ValueError(\"Only 1 and 10 crops are supported while we got {}\".format(args.test_crops))\n\n normalize = GroupNormalize(input_mean, input_std)\n data_length = 5\n\n test_flow_transform = torchvision.transforms.Compose([\n cropping,\n Stack(roll=args.arch == 'BNInception'),\n ToTorchFormatTensor(div=args.arch != 'BNInception'),\n GroupNormalize(model_flow.input_mean, model_flow.input_std),\n ])\n\n test_loader = torch.utils.data.DataLoader(\n TSNRGBFlowSet([args.test_rgb_list, args.test_flow_list], num_segments=args.test_segments,\n new_length=[1,5],\n image_tmpl=[rgb_str, flow_str],\n rgb_transform=test_rgb_transform, \n flow_transform=test_flow_transform,\n test_mode=True, add_idx=add_idx),\n batch_size=1, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n \n model_rgb = torch.nn.DataParallel(model_rgb, device_ids=[i for i in range(len(args.gpus))]).cuda()\n model_flow = torch.nn.DataParallel(model_flow, device_ids=[i for i in range(len(args.gpus))]).cuda()\n classifier = torch.nn.DataParallel(classifier, device_ids=[i for i in range(len(args.gpus))]).cuda()\n random_layer.cuda()\n\n #cudnn.benchmark = True\n cudnn.enabled = False\n \n class_loss = torch.nn.CrossEntropyLoss()\n prec1 = validate(test_loader, model_rgb, model_flow, random_layer, classifier, class_loss)\n print(\"Test accuracy is {}%.\".format(prec1))\n\ndef validate(test_loader, model_rgb, model_flow, random_layer, classifier, criterion, logger=None):\n global lmap\n # switch to evaluate mode\n model_flow.eval()\n model_rgb.eval()\n classifier.eval()\n\n all_output = []\n all_target = []\n with torch.no_grad():\n for batch_id, (rgb_input, flow_input, target) in enumerate(test_loader):\n rgb_input, flow_input, target = rgb_input.cuda(), flow_input.cuda(), target.cuda()\n rgb_input = rgb_input.view((args.test_segments * args.test_crops, -1)+ rgb_input.size()[-2:])\n flow_input = flow_input.view((args.test_segments * args.test_crops, -1)+ flow_input.size()[-2:])\n rgb_features = model_rgb(rgb_input)\n flow_features = model_flow(flow_input) \n rgb_features = rgb_features.view(args.test_segments, args.test_crops, 1024).permute(1,0,2).contiguous()\n flow_features = flow_features.view(args.test_segments, args.test_crops, 1024).permute(1,0,2).contiguous()\n mix_feature = random_layer([rgb_features.view(-1, 1024), flow_features.view(-1, 1024)]).view(args.test_crops, args.test_segments, -1)\n output = classifier(mix_feature)\n output = output.view(-1, output.size(-1)).mean(0).cpu()\n _, pred = torch.max(output, dim=0, keepdim=True)\n all_output.append(pred.item())\n all_target.append(target.item())\n video_pred, video_labels = torch.Tensor(all_output).long(), torch.Tensor(all_target).long()\n prec1 = 100 * float(np.sum(np.asarray(video_pred)==np.asarray(video_labels))) / len(video_pred)\n print('Testing Results: Prec@1 {:.3f}'.format(prec1))\n\n return prec1\n\nif __name__ == '__main__':\n main()\n","sub_path":"test_twostream.py","file_name":"test_twostream.py","file_ext":"py","file_size_in_byte":9623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"630236337","text":"#!/usr/bin/python\n\"\"\"Instrument custodian relationship.\"\"\"\nfrom peewee import ForeignKeyField, Expression, OP, CompositeKey\nfrom metadata.orm.utils import index_hash\nfrom metadata.orm.users import Users\nfrom metadata.orm.instruments import Instruments\nfrom metadata.orm.base import DB\nfrom metadata.rest.orm import CherryPyAPI\n\n\nclass InstrumentCustodian(CherryPyAPI):\n \"\"\"\n Relates proposals and instrument objects.\n\n Attributes:\n +------------+--------------------------------------------+\n | Name | Description |\n +============+============================================+\n | instrument | Link to the Instrument model |\n +------------+--------------------------------------------+\n | custodian | User who is responsible for the instrument |\n +------------+--------------------------------------------+\n \"\"\"\n\n instrument = ForeignKeyField(Instruments, related_name='custodians')\n custodian = ForeignKeyField(Users, related_name='instruments')\n\n # pylint: disable=too-few-public-methods\n class Meta(object):\n \"\"\"PeeWee meta class contains the database and the primary key.\"\"\"\n\n database = DB\n primary_key = CompositeKey('instrument', 'custodian')\n # pylint: enable=too-few-public-methods\n\n @staticmethod\n def elastic_mapping_builder(obj):\n \"\"\"Build the elasticsearch mapping bits.\"\"\"\n super(InstrumentCustodian, InstrumentCustodian).elastic_mapping_builder(obj)\n obj['instrument_id'] = obj['custodian_id'] = {'type': 'integer'}\n\n def to_hash(self, recursion_depth=1):\n \"\"\"Convert the object to a hash.\"\"\"\n obj = super(InstrumentCustodian, self).to_hash(recursion_depth)\n obj['_id'] = index_hash(int(self.custodian.id), int(self.instrument.id))\n obj['instrument_id'] = int(self.instrument.id)\n obj['custodian_id'] = int(self.custodian.id)\n return obj\n\n def from_hash(self, obj):\n \"\"\"Convert the hash into the object.\"\"\"\n super(InstrumentCustodian, self).from_hash(obj)\n if 'instrument_id' in obj:\n self.instrument = Instruments.get(Instruments.id == obj['instrument_id'])\n if 'custodian_id' in obj:\n self.custodian = Users.get(Users.id == obj['custodian_id'])\n\n def where_clause(self, kwargs):\n \"\"\"Where clause for the various elements.\"\"\"\n where_clause = super(InstrumentCustodian, self).where_clause(kwargs)\n if 'instrument_id' in kwargs:\n instrument = Instruments.get(Instruments.id == kwargs['instrument_id'])\n where_clause &= Expression(InstrumentCustodian.instrument, OP.EQ, instrument)\n if 'custodian_id' in kwargs:\n user = Users.get(Users.id == kwargs['custodian_id'])\n where_clause &= Expression(InstrumentCustodian.custodian, OP.EQ, user)\n return where_clause\n","sub_path":"metadata/orm/instrument_custodian.py","file_name":"instrument_custodian.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"408110773","text":"from aiohttp.web import middleware\n\nfrom .config import Configuration, check_config\nfrom .token import decode_token\n\n\nclass AuthUser(object):\n def __init__(self, values):\n self.values = values\n\n def __getattribute__(self, key):\n if key in self.values:\n return self.values[key]\n super().__getattribute__(key)\n\n def __str__(self):\n return str(self.values)\n\n\n@middleware\nasync def auth_middleware(request, handler):\n check_config()\n\n if request.middleware_configs.get(\"login_required\"):\n auth_token = request.cookies.get(Configuration.cookie_name)\n\n if not auth_token:\n auth_header = request.headers.get(\"Authorization\", \"\")\n if auth_header.startswith(\"Bearer \"):\n auth_token = auth_header.split()[-1]\n\n if not auth_token:\n raise Configuration.exc_cls(\"Login Required\", code=401)\n\n request.current_user = AuthUser(decode_token(auth_token))\n\n Configuration.logger.debug(\n f\"Authenticated User {request.current_user}\"\n )\n\n response = await handler(request)\n return response\n","sub_path":"vortex/plugins/auth/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"73843553","text":"#!/usr/bin/env python\n# coding=utf-8\nimport subprocess, common, parse_syntax, re\nfrom collections import defaultdict\nimport json, os\n\nNEW_LEXICON_EN = {\"what\": {\n \"Pro\": {\n \"tab\": [\"pn7\",\"pn6\"]\n }\n },\n \"which\": {\n \"Pro\": {\n \"tab\": [\"pn7\",\"pn6\"]\n }\n },\n \"that\": {\n \"Pro\": {\n \"tab\": [\"pn7\",\"pn6\"]\n }\n },\n \"although\": {\n \"Adv\": {\n \"tab\": [\"b1\"]}\n },\n \"as\" : {\n \"P\": {\n \"tab\": [\"pp\"]}\n },\n \"required\" : {\n \"A\": {\n \"tab\": [\"a1\"]}\n },\n \"infer\" : {\n \"V\": {\n \"tab\": [\"v13\"]}\n }\n}\n\nNEW_LEXICON_FR = {\n u\"déduire\" : {\"V\": {\n \"tab\": \"v113\",\n \"aux\": [\"av\"]\n }},\n u\"inférer\" : {\n \"V\": {\n \"tab\": \"v28\",\n \"aux\": [\"av\"]\n }},\n u\"suggérer\" : {\n \"V\": {\n \"tab\": \"v28\",\n \"aux\": [\"av\"]\n }}\n \n }\n\nNEW_LEXICON = {\n \"fr\" : NEW_LEXICON_FR,\n \"en\" : NEW_LEXICON_EN\n }\n\ndef as_paragraphs(ls):\n paras = []\n tmp = []\n for item in ls:\n if item == u'\"{0}\"'.format(common.NEWLINE_COMMAND):\n paras.append(tmp)\n tmp = []\n else :\n tmp.append(item)\n if tmp:\n paras.append(tmp)\n return paras\n\ndef extract_vocab(jsrealcode, d, lang):\n if lang == u'fr' :\n return extract_vocab_fr(jsrealcode, d)\n else :\n return extract_vocab_en(jsrealcode, d)\n\ndef extract_vocab_fr(jsrealcode, d):\n for l in parse_syntax.LEAF_NODES:\n for m in re.finditer(l + '\\(\"(.*?)\"\\)', jsrealcode):\n word = m.groups(1)[0]\n if l == u'N' :\n if word[0].isalpha():\n d[word][u'N'] = {\"g\": \"m\", \"tab\": [\"n3\"]}\n else:\n d[word][u'N'] = {\"g\": \"m\", \"tab\": [\"n3\"]}\n elif l == u'P' :\n d[word][u'P'] = {\"tab\" : [\"pp\"]}\n elif l == u'A' :\n d[word][u'A'] = {\"tab\" : [\"n28\"]}\n elif l == u'Adv' :\n d[word][u'Adv'] = {\"tab\" : [\"av\"]}\n elif l == u'V' :\n d[word][u'V'] = {\"tab\": \"v36\", \"aux\": [\"av\"]}\n elif l == u'Pro' :\n d[word][u'Pro'] = {\"tab\" : [\"pn5\"]}\n elif l == u'D' :\n d[word][u'D'] = {\"tab\" : [\"d3\"]}\n\ndef extract_vocab_en(jsrealcode, d):\n for l in parse_syntax.LEAF_NODES:\n for m in re.finditer(l + '\\(\"(.*?)\"\\)', jsrealcode):\n word = m.groups(1)[0]\n if l == u'N' :\n d[word][u'N'] = {\"tab\" : [\"n1\"]}\n elif l == u'P' :\n d[word][u'P'] = {\"tab\" : [\"pp\"]}\n elif l == u'A' :\n d[word][u'A'] = {\"tab\" : [\"a1\"]}\n elif l == u'Adv' :\n d[word][u'Adv'] = {\"tab\" : [\"b1\"]}\n elif l == u'V' :\n d[word][u'V'] = {\"tab\" : [\"v1\"]}\n elif l == u'Pro' :\n d[word][u'Pro'] = {\"tab\" : [\"pn5\"]}\n elif l == u'D' :\n d[word][u'D'] = {\"tab\" : [\"d3\"]}\n \ndef _extract_all_vocab(jsrealcodes, lang):\n d = defaultdict(dict)\n for jsrealcode in jsrealcodes:\n extract_vocab(jsrealcode, d, lang)\n return {k:v for k, v in d.items()}\n\n # The second vocabulary will override the first, in place.\ndef update_vocabs(v1, v2):\n for word, entry in v2.items():\n if word in v1:\n for syntagm_type, spec in entry.items():\n v1[word][syntagm_type] = spec\n else:\n v1[word] = entry\n\ndef new_vocab(jsrealcodes, lang):\n extracted = _extract_all_vocab(jsrealcodes, lang)\n update_vocabs(extracted, NEW_LEXICON[lang])\n return extracted\n\nclass Realizer():\n def realize(self, code):\n raise NotImplementedError()\n\nclass JSRealBRealizer(Realizer):\n \n def __init__(self, lang):\n self.lang = lang\n \n def realize(self, codes):\n basedir = os.path.dirname(__file__)\n jsrealcmd = os.path.join(basedir, \"jsrealcmd.js\")\n jslib = os.path.join(basedir, 'jsreal', 'JSrealB-EnFr.js')\n extracted_vocab = new_vocab(codes, self.lang)\n return subprocess.check_output([\"nodejs\",\n jsrealcmd,\n jslib,\n json.dumps(extracted_vocab),\n json.dumps(codes),\n self.lang])\n\n","sub_path":"gentext/gentext/realizer.py","file_name":"realizer.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"460637078","text":"from flask_assets import Bundle\nfrom .web_app import app, webassets\n\n# Javascript Libraries\njs_libs = Bundle(\"libs/bootstrap-notify/js/bootstrap-notify.js\",\n \"libs/sliptree-bootstrap-tokenfield/dist/bootstrap-tokenfield.min.js\",\n output=\"js/libs.js\")\n\n# CSS libraries\ncss_libs = Bundle(\"libs/bootstrap-notify/css/bootstrap-notify.css\",\n \"libs/bootstrap-notify/css/styles/alert-bangtidy.css\",\n \"libs/sliptree-bootstrap-tokenfield/dist/css/bootstrap-tokenfield.min.css\",\n output=\"css/libs.css\")\n\n# JS main script\njs_main = Bundle(\"js/src/main.js\",\n output=\"js/main.js\")\n\n# CSS main style\ncss_main = Bundle(\"css/src/main.css\",\n output=\"css/main.css\")\n\nwebassets.manifest = 'cache' if not app.debug else False\nwebassets.cache = not app.debug\nwebassets.debug = app.debug\n\nwebassets.register('js_libs', js_libs)\nwebassets.register('css_libs', css_libs)\nwebassets.register('js_main', js_main)\nwebassets.register('css_main', css_main)\n","sub_path":"piston/web_assets.py","file_name":"web_assets.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"314166298","text":"#!/usr/bin/python3\n\nimport roman\n\nsaved = 0\nwith open('p89.txt', 'r') as f:\n for line in f:\n line = line.rstrip()\n n = roman.parse(line)\n r = roman.minimal(n)\n saved += len(line) - len(r)\nprint(saved)\n","sub_path":"solved/p89.py","file_name":"p89.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"504140528","text":"\"\"\"numbers = list(input(\"Enter a sequence of comma separated values: \").split(\", \"))\r\nprint(numbers)\r\nsum = 0\r\nfor number in numbers:\r\n sum = sum+int(number)\r\nprint(sum)\"\"\"\r\n\r\ninNum = input(\"Enter the number: \")\r\nprint(\"The given number is: \",inNum)\r\nsum = 0\r\nfor num in inNum:\r\n sum = sum+int(num)\r\nprint(sum)\r\n\r\n","sub_path":"python/Activity7.py","file_name":"Activity7.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"343928597","text":"# google cloud function\n\nimport os\n# import datetime\nimport requests\nfrom flask import redirect\n# from google.cloud import datastore\n\n\"\"\" ENV VARS\n{\n 'MAILGUN_API_KEY': '',\n 'MAILGUN_DOMAIN_NAME': 'mg.mywellnessplatform',\n 'REDIRECT_SUCCESS_URL: 'https://mywellnessplatform.com',\n 'REDIRECT_FAILURE_URL: 'https://mywellnessplatform.com',\n 'TO_ADDRESS: '',\n 'PROJECT_ID: '',\n 'CONFIRMATION_EMAIL_BODY: 'Thanks for contacting us!',\n 'CONFIRMATION_EMAIL_FROM_NAME: 'myWellnessPlatform',\n}\n\n\"\"\"\n\n\ndef start(request):\n success_url = os.environ.get('REDIRECT_SUCCESS_URL', None)\n fail_url = os.environ.get('REDIRECT_FAILURE_URL', None)\n to_address = str(os.environ.get('TO_ADDRESS', None))\n from_email = request.form['email']\n from_name = request.form['from_name']\n body = request.form['body']\n # hidden spam field\n s_body = request.form['s_body']\n subject = request.form['subject']\n if s_body == '' or s_body is None:\n try:\n response = send_email(\n from_email,\n from_name,\n subject,\n body,\n to_address,\n )\n\n if response == 200:\n return redirect(success_url, code=302)\n except:\n pass\n return redirect(fail_url, code=302)\n\n\ndef send_email(from_email, from_name, subject, body, to_address):\n \"\"\"\n Send an email using MailGUN API Client\n \"\"\"\n\n # Initializing important data from environment\n mg_domain = os.environ.get('MAILGUN_DOMAIN_NAME', None)\n mg_key = os.environ.get('MAILGUN_API_KEY', None)\n\n # Preparing the data to be sent as email\n url = 'https://api.mailgun.net/v3/{}/messages'.format(mg_domain)\n auth = ('api', mg_key)\n data = {\n 'from': '{} <{}>'.format(from_name, from_email),\n 'to': to_address,\n 'subject': subject,\n 'text': body,\n }\n\n # Sending the email\n response = requests.post(url, auth=auth, data=data)\n return response.status_code\n\n\n\"\"\"\ndef save_user_data(FROM_NAME, FROM_EMAIL, body):\n\n # Save the data in our DB as user has not got the email\n\n # Initializing the data where PROJECT_ID = GCP Project ID\n PROJECT_ID = os.environ.get('PROJECT_ID', None)\n client = datastore.Client(PROJECT_ID)\n\n key = client.key('Task')\n\n # Create a new entity\n task = datastore.Entity(key, exclude_from_indexes=['message'])\n task.update({\n 'created': datetime.datetime.now(),\n 'name': FROM_NAME,\n 'email': FROM_EMAIL,\n 'message': body\n })\n\n # Upload the data\n client.put(task)\n\n return client.key\n\n\n\ndef send_confirmation_email(FROM_EMAIL, TO_ADDRESS):\n # Send a confirmation email to the user saying we're received their email.\n CONFIRMATION_EMAIL_TO_ADDRESS = FROM_EMAIL\n CONFIRMATION_EMAIL_FROM_ADDRESS = TO_ADDRESS\n CONFIRMATION_SUBJECT = 'Thank you for contacting us!'\n CONFIRMATION_EMAIL_BODY = os.environ.get('CONFIRMATION_EMAIL_BODY', None)\n CONFIRMATION_EMAIL_FROM_NAME = os.environ.get(\n 'CONFIRMATION_EMAIL_FROM_NAME', None)\n\n send_email(CONFIRMATION_EMAIL_FROM_ADDRESS, CONFIRMATION_EMAIL_FROM_NAME,\n CONFIRMATION_SUBJECT, CONFIRMATION_EMAIL_BODY, CONFIRMATION_EMAIL_TO_ADDRESS)\n\"\"\"","sub_path":"ops/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"438109432","text":"import numpy as np\nfrom pathlib import Path\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications.vgg19 import VGG19, preprocess_input\nfrom tqdm import tqdm\nfrom utils.fileutils import list_files_recursively, CacheStorage\n\n\nclass ImageNetPreprocessGenerator(ImageDataGenerator):\n def flow_from_directory(self,\n directory,\n target_size=(256, 256),\n batch_size=32,\n interpolation=\"nearest\",\n shuffle=True):\n self.batches = super(ImageNetPreprocessGenerator,\n self).flow_from_directory(\n directory=directory,\n target_size=target_size,\n batch_size=batch_size,\n interpolation=interpolation,\n shuffle=shuffle,\n )\n while True:\n batch_x, batch_y = next(self.batches)\n yield (preprocess_input(batch_x), batch_y)\n\n def reset(self):\n self.batches.reset()\n\n\ndef get_averaged_validation_image(directory):\n val_imgs, _ = get_validation_data(directory) # preprocess済\n val_img_average = val_imgs.mean(axis=0)\n return val_img_average\n\n\ndef get_validation_dataflow(directory, batch_size=50):\n \"\"\"validation_dataflow(preprocessを含むので後でもう一度使わないこと)\n Arguments:\n directory {[type]} -- [description]\n\n Keyword Arguments:\n batch_size {int} -- [description] (default: {50})\n\n Returns:\n [type] -- [description]\n \"\"\"\n\n val_datagen = ImageNetPreprocessGenerator()\n val_dataflow = val_datagen.flow_from_directory(\n directory=str(directory),\n target_size=(224, 224),\n batch_size=batch_size,\n interpolation=\"bicubic\",\n shuffle=False,\n )\n return val_dataflow\n\n\ndef load_imgs_through_dataflow(directory):\n val_dataflow = get_validation_dataflow(directory, batch_size=1)\n\n val_imgs = []\n val_labels = []\n\n num_imgs = 50000\n\n for i, (img, label) in enumerate(tqdm(val_dataflow, total=num_imgs)):\n val_imgs.append(img[0])\n val_labels.append(label[0])\n if i == (num_imgs - 1):\n break\n\n val_imgs = np.array(val_imgs)\n val_labels = np.array(val_labels)\n\n return val_imgs, val_labels\n\n\ndef get_saliency_maps(cache_directory):\n saliency_maps_dir = Path(cache_directory)\n\n cache = CacheStorage()\n print(\"Try to load cache file\")\n saliency_maps = cache.get_cache(saliency_maps_dir / \"saliency_maps\")\n if saliency_maps is None:\n print(\"Making cache file\")\n npz_path_list = list_files_recursively(saliency_maps_dir, [\"npz\"])\n saliency_maps = np.array(\n [np.load(npz_path)[\"x\"] for npz_path in tqdm(npz_path_list)])\n cache.set_cache(saliency_maps_dir / \"saliency_maps\", saliency_maps)\n return saliency_maps\n\n\ndef get_validation_data(cache_directory):\n valid_data_dir = Path(cache_directory)\n val_imgs = []\n val_labels = []\n\n cache = CacheStorage()\n print(\"Try to load cache file\")\n val_imgs = cache.get_cache(valid_data_dir / \"val_imgs\")\n val_labels = cache.get_cache(valid_data_dir / \"val_labels\")\n if val_imgs is None or val_labels is None:\n print(\"Making cache file\")\n val_imgs, val_labels = load_imgs_through_dataflow(valid_data_dir)\n cache.set_cache(valid_data_dir / \"val_imgs\", val_imgs)\n cache.set_cache(valid_data_dir / \"val_labels\", val_labels)\n return val_imgs, val_labels\n\n\nclass PredictionKeeper(object):\n def __init__(self, predicts):\n self.predicts = predicts\n\n # pred_labels[5,0] <- 5+1番目の入力に対して最も自信のあるクラス\n self.predicted_labels = predicts.argsort()[:, ::-1]\n\n def get_index_pred_n(self, n, target_id):\n \"\"\"尤度n番目の予測結果がtarget_idであるインデックスを取ってくる\n \"\"\"\n return np.where(self.predicted_labels[:, n] == target_id)\n\n def get_all_nth_confidence_index(self, n):\n return self.predicted_labels[:, n]\n\n\ndef get_predkeeper(img_dir_path, modelpath=None):\n img_dir_path = Path(img_dir_path)\n cache_path = str((img_dir_path / \"preds\"))\n preds = None\n\n cache = CacheStorage()\n\n preds = cache.get_cache(cache_path)\n if preds is None:\n val_imgs, _ = get_validation_data(img_dir_path)\n model = VGG16(weights=\"imagenet\") if modelpath is None else None\n preds = model.predict(val_imgs)\n cache.set_cache(cache_path, preds)\n predkeeper = PredictionKeeper(preds)\n return predkeeper\n\n\ndef evaluate_validation():\n valid_data_dir = Path(\"./data/ILSVRC2012_img_val_centercrop/\")\n val_imgs, val_labels = get_validation_data(valid_data_dir)\n\n model = VGG19(weights='imagenet', )\n model.compile(\n loss='categorical_crossentropy',\n optimizer='SGD',\n metrics=[\"accuracy\", \"top_k_categorical_accuracy\"],\n )\n print(model.metrics_names)\n print(model.evaluate(val_imgs, val_labels))\n\n\nclass AblateEvaluator(object):\n def __init__(self, model, averaged_X, patch_size=9):\n self.averaged_X = averaged_X # averaged image through all dataset\n self.model = model\n self.rows = averaged_X.shape[0]\n self.cols = averaged_X.shape[1]\n self.patch_size = patch_size\n\n def pre_evaluate(self, X, Y, saliency_maps):\n assert X.shape[0:3] == saliency_maps.shape\n predictions_before_ablation = self.get_predictions_for_own_class(\n self.model, X, Y)\n print(\"before:\", predictions_before_ablation.mean())\n \n return predictions_before_ablation\n \n def evaluate(self, X, Y, predictions_before_ablation, saliency_maps):\n assert X.shape[0:3] == saliency_maps.shape\n \n ablated_X = self.ablate(X, saliency_maps)\n\n predictions_after_ablation = self.get_predictions_for_own_class(\n self.model, ablated_X, Y)\n print(\"after:\", predictions_after_ablation.mean())\n print(\"diff:\", (predictions_before_ablation - predictions_after_ablation).mean())\n print(\"std:\", (predictions_before_ablation - predictions_after_ablation).std())\n return (predictions_before_ablation - predictions_after_ablation).mean()\n\n def ablate(self, X, saliency_maps):\n \"\"\"与えられた画像のうちsaliency mapの最高値を含むパッチについてデータセットの平均画像で置換\n\n Arguments:\n X {[type]} -- [description]\n saliency_maps {[type]} -- [description]\n\n Returns:\n [type] -- [description]\n \"\"\"\n assert len(X.shape) == 4\n\n ablated_X = np.copy(X)\n\n for i, saliency_map in enumerate(saliency_maps):\n row, col = np.unravel_index(saliency_map.argmax(),\n saliency_map.shape)\n\n top = row - self.patch_size // 2\n bottom = row + self.patch_size // 2 + 1\n left = col - self.patch_size // 2\n right = col + self.patch_size // 2 + 1\n\n if top < 0:\n top = 0\n if bottom > self.rows - 1:\n bottom = self.rows - 1\n if left < 0:\n left = 0\n if right > self.cols - 1:\n right = self.cols - 1\n\n ablated_X[i, top:bottom, left:right, :] = self.averaged_X[\n top:bottom, left:right]\n\n return ablated_X\n\n @classmethod\n def get_predictions_for_own_class(cls, model, X, Y):\n return model.predict(X, verbose=1)[Y.astype(bool)]\n\n\nclass PointingGame(object):\n def __init__(self, masks):\n self.masks = masks\n self.num_samples = masks.shape[0]\n\n def calc_hitratio(self, binary_saliency_maps):\n assert self.masks.shape == binary_saliency_maps.shape\n num_fp_each_map = np.logical_and(\n self.masks, binary_saliency_maps).sum(axis=(1, 2))\n num_miss = np.count_nonzero(\n np.logical_or(\n num_fp_each_map, binary_saliency_maps.sum(axis=(1, 2)) == 0))\n hitratio = 1 - num_miss / self.num_samples\n num_miss2 = np.count_nonzero(\n np.logical_or(\n num_fp_each_map, binary_saliency_maps.sum(axis=(1, 2)) == 0), axis=1)\n hitratio2 = np.average(1 - num_miss2)\n std = np.std(1 - num_miss2)\n print(hitratio)\n print(num_miss2)\n print(std)\n return hitratio\n\n\ndef safe_divide(x, y):\n if y == 0:\n return 0\n else:\n return x / y\n\n\nclass PRevaluator(object):\n def __init__(self, masks):\n self.masks = masks.astype(bool) # bboxがFalseのマスク\n self.num_real_positives = np.logical_not(self.masks).sum()\n self.num_real_negatives = self.masks.sum()\n self.tp = None\n\n def get_tp(self, binary_saliency_maps, masks=None):\n if masks is None:\n masks = self.masks\n return np.logical_and(np.logical_not(masks),\n binary_saliency_maps).sum()\n\n def get_fp(self, binary_saliency_maps, masks=None):\n if masks is None:\n masks = self.masks\n return np.logical_and(masks, binary_saliency_maps).sum()\n\n def get_tn(self, binary_saliency_maps, masks=None):\n if masks is None:\n masks = self.masks\n return np.logical_and(masks,\n np.logical_not(binary_saliency_maps)).sum()\n\n def get_fn(self, binary_saliency_maps, masks=None):\n if masks is None:\n masks = self.masks\n return np.logical_and(\n np.logical_not(masks), np.logical_not(binary_saliency_maps)).sum()\n\n def calc_precision(self, binary_saliency_maps):\n tp = self.get_tp(binary_saliency_maps)\n num_pred_positives = binary_saliency_maps.sum()\n precision = safe_divide(tp, num_pred_positives)\n return precision\n\n def calc_recall(self, binary_saliency_maps):\n tp = self.get_tp(binary_saliency_maps)\n recall = safe_divide(tp, self.num_real_positives)\n return recall\n\n def fmeasure(self, binary_saliency_maps):\n precision = self.calc_precision(binary_saliency_maps)\n recall = self.calc_recall(binary_saliency_maps)\n fvalue = 2 * precision * recall / (precision + recall)\n return fvalue\n\n def calc_fprate(self, binary_saliency_maps):\n fp = self.get_fp(binary_saliency_maps)\n fprate = fp / self.num_real_negatives\n return fprate\n\n def calc_tprate(self, binary_saliency_maps):\n tp = self.get_tp(binary_saliency_maps)\n tprate = tp / self.num_real_positives\n return tprate\n\n\nclass SaliencyBinarizerBase(object):\n def __init__(self, saliency_maps):\n self.norm_saliency_maps = saliency_maps\n\n def binarize(self, threshold):\n # threshold=1はすべてFalseになる\n binary_saliency_map = self.norm_saliency_maps >= threshold\n return binary_saliency_map\n\n def binarize_zero_or_not(self):\n binary_saliency_map = self.norm_saliency_maps > 0\n return binary_saliency_map\n\n def areadivide(self, remain_area_ratio):\n # 0より大きいピクセルの数を面積として保存\n # remain_area_ratioと面積をかけて残す面積の大きさを計算\n # np.zeros_like(saliency_map)\n # saliency_mapの値の大きい順に並べて残す面積分だけ1を代入\n pass\n\n\nclass MaxNormSaliencyBinarizer(SaliencyBinarizerBase):\n \"\"\"尤度マップごとに最大値が1になるように正規化して,thを1-0で動かす\n \"\"\"\n\n def __init__(self, saliency_maps):\n super(MaxNormSaliencyBinarizer, self).__init__(saliency_maps)\n max_each_map = saliency_maps.max(axis=(1, 2), keepdims=True)\n max_each_map[max_each_map == 0] = 1 # 1にしておくことで0除算を回避\n self.norm_saliency_maps /= max_each_map # 各マップの最大値が1になるようにスケーリング\n\n\nclass MaxSaliencyBinarizer(SaliencyBinarizerBase):\n \"\"\"尤度マップ全体で最大値が1になるように正規化して,thを1-0で動かす\n \"\"\"\n\n def __init__(self, saliency_maps):\n super(MaxSaliencyBinarizer, self).__init__(saliency_maps)\n max_saliency = saliency_maps.max()\n if max_saliency == 0:\n max_saliency = 1 # 1にしておくことで0除算を回避\n self.norm_saliency_maps /= max_saliency # 全マップの最大値が1になるようにスケーリング\n","sub_path":"utils/evaluations.py","file_name":"evaluations.py","file_ext":"py","file_size_in_byte":12682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"173457808","text":"from kazoo.client import KazooClient\nimport logging\nfrom config import client, apiClient\nimport time\nimport sys\nimport random\nimport string\nimport socket\n\n\ndef bring_up_new_worker_container(slave_name, db_name):\n print(\"[+] Starting(A) container: \" + db_name, file=sys.stdout)\n client.containers.run(\n image=\"mongo:3.6.3\",\n network=\"ubuntu_backend\",\n name=db_name,\n hostname=db_name,\n detach=True,\n remove=False\n )\n\n time.sleep(5)\n\n print(\"[+] Starting(A) container: \" + slave_name, file=sys.stdout)\n client.containers.run(\n image=\"slave:latest\",\n command=\"python3 -u worker.py\",\n environment={\"DB_HOSTNAME\": db_name, \"WORKER_TYPE\": \"slave\", \"NODE_NAME\": slave_name},\n entrypoint=[\"sh\", \"trap.sh\"],\n hostname=slave_name,\n name=slave_name,\n network=\"ubuntu_backend\",\n detach=True,\n remove=False\n )\n\n\ndef listdiff(l1, l2):\n if len(l1) > len(l2):\n for i in l1:\n if i not in l2:\n return i\n else:\n for i in l2:\n if i not in l1:\n return i\n\n\nclass ZooWatch:\n def __init__(self, zookeeper_hostname):\n logging.basicConfig()\n self.zk = KazooClient(hosts=zookeeper_hostname)\n self.zk.start()\n self.temp = []\n self.master_db_name = \"mongomaster\"\n\n def start(self):\n print(\"[*] Starting zoo watch\", file=sys.stdout)\n self.zk.ensure_path(\"/worker\")\n\n @self.zk.ChildrenWatch(\"/worker\")\n def callback_worker(workers):\n print(\"[*] Changes detected\", file=sys.stdout)\n print(workers, self.temp)\n if len(workers) < len(self.temp):\n node = listdiff(self.temp, workers)\n print(\"[-] Node deleted: \" + node, file=sys.stdout)\n print(\"[*] Current workers: \" + str(workers), file=sys.stdout)\n if \"slave\" in node:\n killed_containers = client.containers.list(all=True, filters={\"exited\": \"137\"})\n slave_cnt = client.containers.get(node)\n slave_db_cnt = client.containers.get(\"mongo\" + node)\n if slave_cnt in killed_containers:\n slave_cnt.remove()\n slave_db_cnt.remove()\n random_name = \"\".join(random.choices(string.ascii_lowercase + string.digits, k=7))\n bring_up_new_worker_container(\n slave_name=\"slave\" + random_name,\n db_name=\"mongoslave\" + random_name\n )\n else:\n print(\"[*] Scaling down - removing \" + node)\n print(\"[*] Or newly elected master is deleting its old node\")\n else:\n print(\"[-] Master failed\", file=sys.stdout)\n master_cnt = client.containers.get(\"master\")\n master_db_cnt = client.containers.get(self.master_db_name)\n master_cnt.remove()\n master_db_cnt.remove()\n slave_pids = {}\n for i in client.containers.list():\n if \"slave\" in i.name and \"mongo\" not in i.name:\n slave_pids[apiClient.inspect_container(i.name)[\"State\"][\"Pid\"]] = i.name\n new_leader = slave_pids[min(slave_pids.keys())]\n s = socket.socket()\n s.connect((new_leader, 23456))\n s.send(\"You are now the master\".encode())\n s.close()\n self.master_db_name = \"mongo\" + new_leader\n time.sleep(5)\n random_name = \"\".join(random.choices(string.ascii_lowercase + string.digits, k=7))\n bring_up_new_worker_container(\n slave_name=\"slave\" + random_name,\n db_name=\"mongoslave\" + random_name\n )\n\n elif len(workers) > len(self.temp):\n print(\"[+] Node added: \" + listdiff(self.temp, workers), file=sys.stdout)\n print(\"[*] Current workers: \" + str(workers), file=sys.stdout)\n\n else:\n pass\n\n self.temp = workers\n\n while True:\n pass\n","sub_path":"final_project/dbaas/orchestrator/zoo_watch.py","file_name":"zoo_watch.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"55105148","text":"# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport time\n\nimport time\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom openerp import api, fields, models, _\nimport openerp.addons.decimal_precision as dp\nfrom openerp.exceptions import UserError\nfrom openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT\n\nclass PartnerBalanceReport(models.TransientModel):\n _name = \"partner.balance.report\"\n _description = \"Partner Balance Report\"\n \n date_from = fields.Date(string='Date From', default=lambda *a: time.strftime('%Y-%m-01'))\n date_to = fields.Date(string='Date to', default=lambda *a: time.strftime('%Y-%m-%d'))\n result_selection = fields.Selection([\n ('customer', 'Receivable'),\n ('supplier', 'Payable'),\n #('customer_supplier', 'Receivable & Payable'),\n ], 'Account Type', select=True, required=False, default='customer')\n display_type = fields.Selection([('summary','Summary by Partner'), ('detail','Detail by Invoice'), ('detail_product','Detail by Product')], \n string='Display Type', required=False, default='summary') \n with_zero_balance = fields.Boolean('With Zero Balance', default=True)\n \n# sale_order = fields.Boolean('Sales Orders', default=True)\n# customer_invoice = fields.Boolean('Customer Invoices', default=True)\n# receivable = fields.Boolean('Receivable', default=True)\n# payable = fields.Boolean('Payable', default=True)\n# account_other = fields.Boolean('Account Transit', default=True)\n# account_cash = fields.Boolean('Account Cash')\n# account_bank = fields.Boolean('Account Bank')\n# account_inventory = fields.Boolean('Account Inventory')\n# \n# period_length = fields.Integer(string='Period Length (days)', required=True, default=30)\n# display_type = fields.Selection([('summary','Summary'), ('detail','Detail'), ('salesperson','Salesperson'), ('partner','Customers/Suppliers')], \n# string='Display Type', required=False, default='summary') \n# daily_report_cash = fields.One2many('daily.report.cash', 'daily_id', string='Daily Cash Line')\n# daily_report_bank = fields.One2many('daily.report.bank', 'daily_id', string='Daily Bank Line')\n# account_inventory_line = fields.Many2many('account.account','account_inventory_line_rel','daily_id','account_id',string='Inventory', default=lambda self: self.env['account.account'].search([('daily_type', '=', 'inventory')]))\n# account_other_line = fields.Many2many('account.account','account_other_line_rel','daily_id','account_id',string='Other', default=lambda self: self.env['account.account'].search([('daily_type', '=', 'other')]))\n# account_transit_id = fields.Many2one('account.account', string='Transit Account', help=\"Transit Account\")\n# daily_type = fields.Selection([\n# ('cash', 'Cash'),\n# ('bank', 'Bank'),\n# ('other', 'Other'),\n# ('inventory', 'Inventory'),\n# ('none', 'None'),\n# ], 'Daily Type', select=True, required=False)\n \n# @api.model\n# def default_get(self, fields): \n# res = super(DailyReport, self).default_get(fields)\n# active_ids = self._context.get('active_ids')\n# account_obj = self.env['account.account']\n# acc_cash_lines = []\n# acc_bank_lines = []\n# acc_inventory_lines = []\n# for acc_cash in account_obj.sudo().search([('daily_type', '=', 'cash')]):\n# if acc_cash.daily_type == 'cash':\n# acc_cash_lines.append([0, 0, {\n# 'account_id': acc_cash.id,\n# }])\n# for acc_bank in account_obj.sudo().search([('daily_type', '=', 'bank')]):\n# if acc_bank.daily_type == 'bank':\n# acc_bank_lines.append([0, 0, {\n# 'account_id': acc_bank.id,\n# }])\n# \n# account_transit_id = account_obj.sudo().search([('daily_type', '=', 'other')])\n# res['daily_report_cash'] = acc_cash_lines\n# res['daily_report_bank'] = acc_bank_lines\n# res['account_transit_id'] = account_transit_id.id\n# print \"-----res----\",res\n# return res\n\n def _build_contexts(self, data):\n result = {}\n #result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False\n #result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''\n result['date_from'] = data['form']['date_from'] or False\n result['date_to'] = data['form']['date_to'] or False\n result['result_selection'] = data['form']['result_selection']\n result['display_type'] = data['form']['display_type']\n return result\n \n def xls_export(self, cr, uid, ids, context=None):\n #print \"------xls_export------\",context\n context = context or {}\n res = {}\n if context.get('xls_export'):\n # we update form with display account value\n datas = {'ids': context.get('active_ids', [])}\n datas['model'] = 'partner.balance.report'\n datas['form'] = self.read(cr, uid, ids, ['date_from', 'date_to', 'result_selection', 'display_type', 'with_zero_balance'])[0]\n used_context = self._build_contexts(datas)\n datas['form']['used_context'] = dict(used_context, lang=context.get('lang', 'en_US'))\n #for aged\n# period_length = datas['form']['period_length']\n# if period_length<=0:\n# raise UserError(_('You must set a period length greater than 0.'))\n# if not datas['form']['date_from']:\n# raise UserError(_('You must set a start date.')) \n# start = datetime.strptime(datas['form']['date'], \"%Y-%m-%d\")\n# for i in range(5)[::-1]:\n# stop = start - relativedelta(days=period_length)\n# res[str(i)] = {\n# 'name': (i!=0 and (str((5-(i+1)) * period_length) + '-' + str((5-i) * period_length)) or ('+'+str(4 * period_length))),\n# 'stop': start.strftime('%Y-%m-%d'),\n# 'start': (i!=0 and stop.strftime('%Y-%m-%d') or False),\n# }\n# start = stop - relativedelta(days=1)\n# datas['form'].update(res)\n\n return {'type': 'ir.actions.report.xml',\n 'report_name': 'partner_report_form',\n 'datas': datas\n }\n","sub_path":"aos_partner_subsidiary_ledger/wizard/partner_balance_report.py","file_name":"partner_balance_report.py","file_ext":"py","file_size_in_byte":6581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"426453746","text":"#! /usr/bin/python3\r\n\r\n#for basic os manipulation\r\nimport os\r\n\r\n#for use in processing\r\nfrom pickle import load as pickle_load\r\nfrom pickle import dump as pickle_dump\r\nfrom math import sin, pi\r\nimport numpy\r\nimport argparse\r\n\r\n\r\nclass ProcessingObj:\r\n \"\"\"\r\n Class used to reconstruct images from their variables for processing.\r\n \"\"\"\r\n \r\n def __init__(self):\r\n self.datalist = None\r\n self.numlist = None\r\n \r\n def read_trainingset(self, name):\r\n \"\"\"\r\n Simple method which reads a trainingset in for processing\r\n :param name: str\r\n :return str \"NO_FILE\", \"INVALID_FORMAT\"\r\n \"\"\"\r\n if not os.path.isfile(name):\r\n name = os.path.normpath(os.getcwd()+\"/\"+name)\r\n if not os.path.isfile(name):\r\n return \"NO_FILE\"\r\n\r\n try:\r\n with open(name,\"rb\") as file:\r\n datalist,numlist = pickle_load(file)\r\n except:\r\n return \"INVALID_FORMAT\"\r\n \r\n self.datalist = datalist\r\n self.numlist = numlist\r\n\r\n def get_coord(self, u, var1, var2, var3, var4):\r\n # Mainly used for readability\r\n return var1 * sin(var2 * u + var3) + var4\r\n\r\n def process_randvar_1(self, var):\r\n return var/9\r\n\r\n def process_randvar_2(self, var):\r\n return (var+10)/20\r\n \r\n def process_randvar_3(self, var):\r\n return var/(2*pi)\r\n \r\n def process_randvar_4(self, var):\r\n return (var+pi)/(2*pi)\r\n\r\n def reverse_process_randvar_1(self,var):\r\n return var*9\r\n \r\n def reverse_process_randvar_2(self,var):\r\n return (var*20)-10\r\n \r\n def reverse_process_randvar_3(self,var):\r\n return var*(2*pi)\r\n \r\n def reverse_process_randvar_4(self,var):\r\n return (var*(2*pi))-pi\r\n\r\n def process_dataset(self, dataset):\r\n \r\n if type(dataset) != list:\r\n print(\"WARN: Dataset is not a list, attempting to convert to list for processing\")\r\n try:\r\n dataset = list(dataset)\r\n except:\r\n print(\"ERROR: Converting to list failed, exiting\")\r\n return \"INVALID_FORMAT\"\r\n \r\n for idx,sublist in enumerate(dataset):\r\n if type(sublist) == numpy.array:\r\n print(\"WARN: Element of dataset is an array, attempting to convert to list for processing\")\r\n try:\r\n dataset[idx] = list(sublist)\r\n except:\r\n print(\"ERROR: Converting to list failed, exiting\")\r\n return \"INVALID_FORMAT\"\r\n\r\n for idx,var in enumerate(dataset[0]):\r\n dataset[0][idx] = self.process_randvar_1(var)\r\n \r\n for idx,var in enumerate(dataset[1]):\r\n dataset[1][idx] = self.process_randvar_1(var)\r\n \r\n for idx,var in enumerate(dataset[2]):\r\n dataset[2][idx] = self.process_randvar_2(var)\r\n \r\n for idx,var in enumerate(dataset[3]):\r\n dataset[3][idx] = self.process_randvar_2(var)\r\n \r\n for idx,var in enumerate(dataset[4]):\r\n dataset[4][idx] = self.process_randvar_3(var)\r\n \r\n for idx,var in enumerate(dataset[5]):\r\n dataset[5][idx] = self.process_randvar_3(var)\r\n \r\n for idx,var in enumerate(dataset[6]):\r\n dataset[6][idx] = self.process_randvar_4(var)\r\n\r\n for idx,var in enumerate(dataset[7]):\r\n dataset[7][idx] = self.process_randvar_4(var)\r\n \r\n for idx,sublist in enumerate(dataset):\r\n dataset[idx] = numpy.array(sublist, dtype =numpy.float64)\r\n \r\n return dataset\r\n\r\n def reverse_process_dataset(self, dataset):\r\n\r\n if type(dataset) != list:\r\n print(\"WARN: Dataset is not a list, attempting to convert to list for processing\")\r\n try:\r\n dataset = list(dataset)\r\n except:\r\n print(\"ERROR: Converting to list failed, exiting\")\r\n return \"INVALID_FORMAT\"\r\n\r\n\r\n for idx, sublist in enumerate(dataset):\r\n if type(sublist) != numpy.array:\r\n print(\"WARN: Element of dataset is not an array, is this a genuine dataset?\")\r\n try:\r\n sublist = list(sublist)\r\n except:\r\n print(\"ERROR: Converting to list failed, exiting\")\r\n return \"INVALID_FORMAT\"\r\n else:\r\n sublist = list(sublist)\r\n dataset[idx] = sublist\r\n\r\n for idx,var in enumerate(dataset[0]):\r\n dataset[0][idx] = self.reverse_process_randvar_1(var)\r\n \r\n for idx,var in enumerate(dataset[1]):\r\n dataset[1][idx] = self.reverse_process_randvar_1(var)\r\n \r\n for idx,var in enumerate(dataset[2]):\r\n dataset[2][idx] = self.reverse_process_randvar_2(var)\r\n \r\n for idx,var in enumerate(dataset[3]):\r\n dataset[3][idx] = self.reverse_process_randvar_2(var)\r\n \r\n for idx,var in enumerate(dataset[4]):\r\n dataset[4][idx] = self.reverse_process_randvar_3(var)\r\n \r\n for idx,var in enumerate(dataset[5]):\r\n dataset[5][idx] = self.reverse_process_randvar_3(var)\r\n \r\n for idx,var in enumerate(dataset[6]):\r\n dataset[6][idx] = self.reverse_process_randvar_4(var)\r\n \r\n for idx,var in enumerate(dataset[7]):\r\n dataset[7][idx] = self.reverse_process_randvar_4(var)\r\n \r\n return dataset\r\n\r\n def process_trainingset(self):\r\n\r\n if self.datalist == None or self.numlist == None:\r\n print(\"ERROR: No trainingset is loaded, exiting\")\r\n return \"NO_SET\"\r\n\r\n if type(self.datalist) == numpy.array:\r\n print(\"WARN: Trainingset is an array, attempting to convert to list for processing\")\r\n try:\r\n self.datalist = list(self.datalist)\r\n except:\r\n print(\"ERROR: Converting to list failed, exiting\")\r\n return \"INVALID_FORMAT\"\r\n \r\n for idx,dataset in enumerate(self.datalist):\r\n self.datalist[idx] = self.process_dataset(dataset)\r\n \r\n final_array = [numpy.array(self.datalist,dtype=numpy.float64),numpy.array(self.numlist,dtype =numpy.float64)]\r\n \r\n self.datalist = None\r\n self.numlist = None\r\n \r\n return final_array\r\n\r\n def reverse_process_trainingset(self):\r\n\r\n if type(self.datalist) == None or type(self.numlist) == None:\r\n print(\"ERROR: No trainingset is loaded, exiting\")\r\n return \"NO_SET\"\r\n\r\n if type(self.datalist) != numpy.array:\r\n print(\"WARN: Trainingset is not an array, is this a genuine trainingset?\")\r\n try:\r\n self.datalist = list(self.datalist)\r\n except:\r\n print(\"Converting to list failed, exiting\")\r\n return \"INVALID_FORMAT\"\r\n else:\r\n self.datatlist = list(self.datalist)\r\n\r\n for idx,dataset in enumerate(self.datalist):\r\n self.datalist[idx] = self.reverse_process_dataset(dataset)\r\n\r\n raw_set = [self.datalist,self.numlist]\r\n \r\n self.datalist = None\r\n self.numlist = None\r\n return raw_set\r\n\r\ndef main():\r\n\r\n # Setting argparser arguments\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"-i\",\"--file_name\", type=str, help=\"The name of the file you want to process.\")\r\n\r\n parser.add_argument(\"-v\",\"--verbosity\", help=\"How much information the program will output about the process.\", action=\"store_true\")\r\n\r\n parser.add_argument(\"-o\",\"--output_name\", type=str, help=\"The name of the output file.\")\r\n\r\n parser.add_argument(\"-pt\",\"--process_trainingset\", help=\"Sets a flag to indicate one entire trainingset should be processed.\", action=\"store_true\")\r\n\r\n parser.add_argument(\"-po\",\"--process_one\",help=\"Sets a flag to indicate one set of variables should be processed.\", action = \"store_true\")\r\n\r\n parser.add_argument(\"-rp\",\"--reverse_process\",help=\"Sets a flag to indicate a trainigset should be processed to obtain the original variables.\", action = \"store_true\")\r\n\r\n # Getting the args\r\n args = parser.parse_args()\r\n\r\n if args.file_name == None:\r\n print(\"ERROR: No file name was given!\")\r\n return\r\n\r\n #Defaulting variables:\r\n if args.output_name == None:\r\n args.output_name = args.file_name\r\n\r\n if args.process_one and args.process_trainingset:\r\n print(\"WARN: -pt and -po cannot both be set, defaulting to -pt!\")\r\n args.process_one = False\r\n\r\n #Creating ProcessingObj instance\r\n variable_processor = ProcessingObj()\r\n\r\n error = variable_processor.read_trainingset(args.file_name)\r\n if type(error) == str:\r\n print(\"ERROR:\"+error)\r\n return\r\n\r\n if args.verbosity:\r\n print(\"Trainingset read succesfully\")\r\n\r\n \r\n if args.process_one:\r\n if args.verbosity:\r\n print(\"Processing dataset\")\r\n \r\n if args.reverse_process:\r\n processed_set = variable_processor.reverse_process_dataset()\r\n else:\r\n processed_set = variable_processor.process_dataset()\r\n\r\n elif args.process_trainingset:\r\n if args.verbosity:\r\n print(\"Processing trainingset\")\r\n \r\n if args.reverse_process:\r\n processed_set = variable_processor.reverse_process_trainingset()\r\n else:\r\n processed_set = variable_processor.process_trainingset()\r\n\r\n if args.verbosity:\r\n print(\"Saving processed data\")\r\n\r\n if args.reverse_process:\r\n suffix = \"\"\r\n if args.output_name[:-12] == \".trainingset\":\r\n args.ouput_name = args.output_name[::-12]\r\n else:\r\n suffix = \".trainingset\"\r\n\r\n filename = os.path.normpath(os.getcwd()+\"/\"+args.output_name+suffix)\r\n\r\n if os.path.isfile(filename):\r\n print(\"WARN: Output file name is already in use, file will be overwritten!\")\r\n\r\n with open(filename, \"wb\") as file:\r\n pickle_dump(processed_set,file)\r\n\r\n if args.verbosity:\r\n print(\"Processing finished\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"process_trainingset.py","file_name":"process_trainingset.py","file_ext":"py","file_size_in_byte":10327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"12848244","text":"import torch\nimport torch.nn as nn\nfrom mmcv.cnn import normal_init\n\nfrom mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32, AnchorGenerator\nfrom ..builder import build_loss\nfrom ..registry import HEADS\nfrom ..utils import bias_init_with_prob, Scale, ConvModule\nfrom mmdet.ops import DeformConv\nfrom mmdet.core import cdc_anchor_target\nimport torch.nn.functional as F\nimport numpy as np\n\nINF = 1e8\n\n\n@HEADS.register_module\nclass CDCHead(nn.Module):\n\n def __init__(self,\n num_classes,\n in_channels,\n feat_channels=256,\n strides=(8, 16, 32, 64, 128),\n kernel_size=5,\n cp_num=16,\n p_num=300,\n scope=1024,\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='PolyMatchingLoss', p_num=300),\n loss_offset=dict(type='PolyMatchingLoss', p_num=300)):\n super(CDCHead, self).__init__()\n\n self.num_classes = num_classes\n self.cls_out_channels = num_classes - 1\n self.in_channels = in_channels\n self.feat_channels = feat_channels\n self.strides = strides\n self.loss_cls = build_loss(loss_cls)\n self.loss_bbox = build_loss(loss_bbox)\n self.loss_offset = build_loss(loss_offset)\n self.fp16_enabled = False\n self.kernel_size = kernel_size\n self.cp_num = cp_num\n self.p_num = p_num\n self.scope = scope\n self.anchor_strides = self.strides\n self.anchor_generators = []\n for anchor_stride in self.anchor_strides:\n self.anchor_generators.append(\n AnchorGenerator(anchor_stride, [1], [1]))\n self.sampling = False\n self.ok, self.okm = self.offset_kernel()\n\n self._init_layers()\n\n def _init_layers(self):\n offset_channels = self.kernel_size * self.kernel_size * 2\n self.offset = nn.Conv2d(self.feat_channels, offset_channels, 3, padding=1)\n self.conv_adpaption = DeformConv(\n self.in_channels,\n self.feat_channels,\n kernel_size=self.kernel_size,\n padding=(self.kernel_size - 1) // 2)\n self.relu = nn.ReLU(inplace=True)\n self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)\n self.conv_reg = nn.Conv2d(self.feat_channels, self.cp_num * 2, 1)\n self.tanh = nn.Tanh()\n\n def init_weights(self):\n normal_init(self.offset, std=0.01)\n normal_init(self.conv_adpaption, std=0.01)\n normal_init(self.conv_cls, std=0.01)\n normal_init(self.conv_reg, std=0.01)\n\n def forward(self, feats):\n return multi_apply(self.forward_single, feats, self.strides)\n\n def forward_single(self, x, stride):\n offset = self.offset(x)\n adapted_feat = self.relu(self.conv_adpaption(x, offset))\n n, c, w, h = offset.shape\n offset = ((offset + self.ok) * stride).reshape(n, c//2, 2, w, h)[:, self.okm, :, :, :].\\\n reshape(n, -1, w, h)\n cls_score = self.conv_cls(adapted_feat)\n bbox_pred = self.conv_reg(adapted_feat)\n bbox_pred = self.scope * self.tanh(bbox_pred)\n\n return cls_score, bbox_pred, offset\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n def loss(self,\n cls_scores,\n bbox_preds,\n offsets,\n gt_bboxes,\n gt_labels,\n img_metas,\n cfg,\n shrinked_gt_masks, ignored_masks, dense_gt_polys, text_scales,\n gt_bboxes_ignore=None):\n assert len(cls_scores) == len(bbox_preds) == len(offsets)\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n assert len(featmap_sizes) == len(self.anchor_generators)\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, img_metas)\n\n num_imgs = len(img_metas)\n num_levels = len(featmap_sizes)\n # TODO: direct interpolate may cause unaiignment on high level feature maps\n ml_mi_valid = [F.interpolate(shrinked_gt_masks.float(), size).byte() for size in featmap_sizes]\n ml_mi_ignore = [F.interpolate(ignored_masks.float(), size).byte() for size in featmap_sizes]\n text_list = []\n ignore_list = []\n for i in range(num_imgs):\n ml_valid = []\n ml_ignore = []\n for j in range(num_levels):\n ml_valid.append(ml_mi_valid[j][i].reshape(-1))\n ml_ignore.append(ml_mi_ignore[j][i].reshape(-1))\n text_list.append(ml_valid)\n ignore_list.append(ml_ignore)\n\n cls_reg_targets = cdc_anchor_target(\n anchor_list, valid_flag_list, text_list, ignore_list,\n gt_bboxes, gt_labels, dense_gt_polys, img_metas, cfg)\n if cls_reg_targets is None:\n return None\n (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n num_total_pos, num_total_neg) = cls_reg_targets\n num_total_samples = (\n num_total_pos + num_total_neg if self.sampling else num_total_pos)\n losses_cls, losses_bbox, losses_offset = multi_apply(\n self.loss_single,\n cls_scores,\n bbox_preds,\n offsets,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n bbox_weights_list,\n num_total_samples=num_total_samples,\n cfg=cfg)\n return dict(loss_cls=losses_cls, loss_bbox=losses_bbox, loss_offset=losses_offset)\n\n def loss_single(self, cls_score, bbox_pred, offsets, labels, label_weights,\n bbox_targets, bbox_weights, num_total_samples, cfg):\n # classification loss\n labels = labels.reshape(-1)\n label_weights = label_weights.reshape(-1)\n cls_score = cls_score.permute(0, 2, 3,\n 1).reshape(-1, self.cls_out_channels)\n loss_cls = self.loss_cls(\n cls_score, labels, label_weights, avg_factor=num_total_samples)\n # regression loss\n bbox_targets = bbox_targets.reshape(-1, self.p_num, 2)\n bbox_weights = bbox_weights.reshape(-1, self.p_num, 2)\n\n bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, bbox_pred.shape[1]//2, 2)\n offsets = offsets.permute(0, 2, 3, 1).reshape(-1, offsets.shape[1]//2, 2)\n\n pos_bbox_weights = bbox_weights[:, 0, 0] > 0\n bbox_weights = bbox_weights[pos_bbox_weights]\n bbox_targets = bbox_targets[pos_bbox_weights]\n bbox_pred = bbox_pred[pos_bbox_weights]\n offsets = offsets[pos_bbox_weights]\n bbox_pred = self.uniformsample(bbox_pred, self.p_num)\n offsets = self.uniformsample(offsets, self.p_num)\n loss_bbox = self.loss_bbox(bbox_pred, bbox_targets)\n loss_offset = self.loss_offset(offsets, bbox_targets)\n return loss_cls, loss_bbox, loss_offset\n\n def get_anchors(self, featmap_sizes, img_metas):\n \"\"\"Get anchors according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n img_metas (list[dict]): Image meta info.\n\n Returns:\n tuple: anchors of each image, valid flags of each image\n \"\"\"\n num_imgs = len(img_metas)\n num_levels = len(featmap_sizes)\n\n # since feature map sizes of all images are the same, we only compute\n # anchors for one time\n multi_level_anchors = []\n for i in range(num_levels):\n anchors = self.anchor_generators[i].grid_anchors(\n featmap_sizes[i], self.anchor_strides[i])\n multi_level_anchors.append(anchors)\n anchor_list = [multi_level_anchors for _ in range(num_imgs)]\n\n # for each image, we compute valid flags of multi level anchors\n valid_flag_list = []\n for img_id, img_meta in enumerate(img_metas):\n multi_level_flags = []\n for i in range(num_levels):\n anchor_stride = self.anchor_strides[i]\n feat_h, feat_w = featmap_sizes[i]\n h, w, _ = img_meta['pad_shape']\n valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)\n valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)\n flags = self.anchor_generators[i].valid_flags(\n (feat_h, feat_w), (valid_feat_h, valid_feat_w))\n multi_level_flags.append(flags)\n valid_flag_list.append(multi_level_flags)\n\n return anchor_list, valid_flag_list\n\n def offset_kernel(self, device='cuda'):\n grid = torch.arange(self.kernel_size, device=device)\n shift = torch.tensor([(self.kernel_size-1)//2], device=device)\n xx = grid.repeat(self.kernel_size) - shift\n yy = grid.reshape(-1, 1).repeat((1, self.kernel_size)).view(-1) - shift\n # mask = torch.zeros(self.kernel_size, self.kernel_size, dtype=torch.uint8, device=device)\n # mask_ones = torch.ones(self.kernel_size, dtype=torch.uint8, device=device)\n # mask[0], mask[-1], mask[:, 0], mask[:, -1] = [mask_ones] * 4\n # mask = torch.stack([mask.reshape(-1)]*2, -1).reshape(-1)\n # TODO:\n # easy setting for kenel size = 5\n mask_ind = torch.tensor([0, 1, 2, 3, 4, 9, 14, 19, 24, 23, 22, 21, 20, 15, 10, 5], device=device)\n return torch.stack([xx, yy], -1).reshape(1, -1, 1, 1).float(), mask_ind\n\n def uniformsample(self, pgtnp_px2, newpnum):\n # # pgtnp_px2 (N, cp_num, 2)\n if pgtnp_px2.shape[0] == 0:\n return pgtnp_px2.new_zeros(pgtnp_px2.shape[0], newpnum, 2)\n device = pgtnp_px2.device\n pnum, cnum = pgtnp_px2.shape[-2:]\n assert cnum == 2\n\n idxnext_p = (torch.arange(pnum, dtype=torch.long) + 1) % pnum\n pgtnext_px2 = pgtnp_px2[:, idxnext_p, :]\n edgelen_p = torch.sqrt(torch.sum((pgtnext_px2 - pgtnp_px2) ** 2, dim=-1))\n edgeidxsort_p = torch.argsort(edgelen_p, dim=-1)\n\n # two cases\n # we need to remove gt points\n # we simply remove shortest paths\n if pnum > newpnum:\n edgeidxkeep_k = edgeidxsort_p[:, pnum - newpnum:]\n edgeidxsort_k = torch.argsort(edgeidxkeep_k, dim=-1)\n pgtnp_kx2 = pgtnp_px2[edgeidxsort_k, :]\n assert pgtnp_kx2.shape[1] == newpnum\n return pgtnp_kx2\n # we need to add gt points\n # we simply add it uniformly\n else:\n # (N, cp_num)\n psamplenps = []\n edgenum = torch.round(edgelen_p * newpnum / torch.sum(edgelen_p, dim=-1)[:, None]).int()\n edgenum[edgenum == 0] = 1\n # after round, it may has 1 or 2 mismatch\n edgenumsum_ = torch.sum(edgenum, -1).int()\n assert len(edgenumsum_) == pgtnp_px2.shape[0]\n for i, edgenumsum in enumerate(edgenumsum_):\n if edgenumsum != newpnum:\n if edgenumsum > newpnum:\n id = -1\n passnum = edgenumsum - newpnum\n while passnum > 0:\n edgeid = edgeidxsort_p[i, id]\n if edgenum[i, edgeid] > passnum:\n edgenum[i, edgeid] -= passnum\n passnum -= passnum\n else:\n passnum -= edgenum[i, edgeid] - 1\n edgenum[i, edgeid] -= edgenum[i, edgeid] - 1\n id -= 1\n else:\n id = -1\n edgeid = edgeidxsort_p[i, id]\n edgenum[i, edgeid] += newpnum - edgenumsum\n\n assert torch.sum(edgenum[i]) == newpnum\n\n psample = []\n for j in range(pnum):\n pb_1x2 = pgtnp_px2[i, j:j + 1, :]\n pe_1x2 = pgtnext_px2[i, j:j + 1, :]\n\n pnewnum = edgenum[i, j]\n wnp_kx1 = torch.arange(pnewnum, dtype=torch.float32, device=device).reshape(-1, 1) / pnewnum\n\n pmids = pb_1x2 * (1 - wnp_kx1) + pe_1x2 * wnp_kx1\n psample.append(pmids)\n\n psamplenp = torch.cat(psample, dim=0)\n psamplenps.append(psamplenp)\n\n return torch.stack(psamplenps, dim=0)","sub_path":"mmdet/models/anchor_heads/cdc_head.py","file_name":"cdc_head.py","file_ext":"py","file_size_in_byte":12582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"578969875","text":"def sumB(x,y):\n\tz = []\n\tfor i in range(len(x)):\n\t\tif x[i] == y[i]:\n\t\t\tz.append(0)\n\t\telse:\n\t\t\tz.append(1)\n\treturn z\n\nN = input()\nM = input()\n\nR = sumB(N,M)\n\nR = list(map(str,R))\n\nR = ''.join(R)\n\nprint(R)\t\n","sub_path":"competicao_3/codeForces_61A.py","file_name":"codeForces_61A.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"275974144","text":"#!/usr/bin/python\n\nimport os\n\nflag = \"flag{s0meT1mEs_r5a_is_nO7_sAf3_eNou9H}\"\n\nfor i in range(0,len(flag),4):\n f = 'flag' + str(i/4) \n os.system('echo '+flag[i:i+4]+' > '+f+'.txt')\n os.system('openssl rsautl -encrypt -in '+f+'.txt -inkey rsakey.pub -pubin -out '+f+'.enc')\n","sub_path":"encrypt15/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"407085932","text":"#-*- coding:utf-8 -*-\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.http import JsonResponse\n\nfrom apps.display.models import Country, Region\n\n\ndef index_view(request):\n context = RequestContext(request)\n groups = Region.objects.all()\n\n if request.method == \"GET\":\n return render_to_response(\n 'index.html',\n {'groups': groups},\n context\n )\n else:\n if request.method == \"POST\":\n data = []\n\n if request.POST.get(u'group', None):\n group_id = Region.objects.get(name=request.POST[u'group'])\n else:\n group_id = Region.objects.get(name=u'ЕС-27')\n\n countries = Country.objects.filter(region_id=group_id)\n for country in countries:\n data += [{\"country\": country.name, \"value\": country.value}]\n\n if request.is_ajax():\n return JsonResponse({\"data\": data})\n else:\n return render_to_response(\n 'index.html',\n {'groups': groups, 'data': data},\n context\n )\n","sub_path":"apps/display/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"562514892","text":"from ClassLibrary.BaseClass.Object import *\nfrom ClassLibrary.CategoryClass.StoreCategory import StoreCategory\n\n\nclass StoreCategorySecond(StoreCategory):\n\n def __init__(self):\n super(StoreCategorySecond, self).__init__()\n self.className = self.__class__.__name__\n\n def output_StoreCategorySecond(self):\n if self.instance:\n data = self.output_StoreCategory()\n data.update({attribute_storeCategoryThird: self.get_attribute_storeCategoryThird()})\n return data\n return None\n\n def get_attribute_storeCategoryThird(self):\n if self.instance:\n store = Base.get_relation_data_and_attribute(self.instance.get(attribute_objectId), Class_Name_StoreCategorySecond, attribute_storeCategoryThird, attribute_state, STATE_OK, 1, limit=100)\n if store:\n returnList = []\n for foo in store:\n third = StoreCategory(Class_Name_StoreCategoryThird)\n third.set_instance(foo)\n returnList.append(third.output_StoreCategory())\n print(returnList)\n return returnList\n return None\n\n def delete_Category(self):\n self.set_attribute_state(STATE_DELETE)\n First = StoreCategory(Class_Name_StoreCategoryFirst)\n firstInstance = self.get_attribute_storeCategoryFirst()\n if firstInstance:\n First.get_Object(firstInstance.id)\n First.remove_attribute_relation(attribute_storeCategorySecond, self.instance)\n return True\n\n return None\n\n def create_StoreCategorySecond(self, data, storeCategoryFirst):\n if data and storeCategoryFirst:\n if self.get_Object_name(data[attribute_name]):\n if self.get_attribute_objectId() == storeCategoryFirst.get(attribute_objectId):\n return True\n self.create_StoreCategory(data)\n self.set_attribute_storeCategoryFirst(storeCategoryFirst)\n return True\n return None\n","sub_path":"ClassLibrary/CategoryClass/StoreCategorySecond.py","file_name":"StoreCategorySecond.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"319592165","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport random\nimport copy\nimport time\nfrom threading import Thread\n\nfrom tinkerforge.ip_connection import IPConnection\nfrom tinkerforge.bricklet_led_strip import LEDStrip\nfrom tinkerforge.bricklet_led_strip_v2 import LEDStripV2\nfrom tinkerforge.bricklet_piezo_speaker import PiezoSpeaker\n\nimport config\nfrom repeated_timer import RepeatedTimer\nfrom keypress import KeyPress\n\n\nclass PongSpeaker:\n def __init__(self, ipcon):\n self.okay = False\n self.ipcon = ipcon\n\n if not config.UID_PIEZO_SPEAKER_BRICKLET:\n print(\"Not Configured: Piezo Speaker\")\n return\n\n self.speaker = PiezoSpeaker(config.UID_PIEZO_SPEAKER_BRICKLET, self.ipcon)\n\n try:\n self.speaker.get_identity()\n print(\"Found: Piezo Speaker ({0})\".format(config.UID_PIEZO_SPEAKER_BRICKLET))\n except:\n print(\"Not Found: Piezo Speaker ({0})\".format(config.UID_PIEZO_SPEAKER_BRICKLET))\n return\n\n self.okay = True\n\n def sirene(self, freq):\n if not self.okay:\n return\n\n for j in range(2):\n for i in range(25):\n self.speaker.beep(10, freq + i*20)\n time.sleep(0.007)\n for i in range(25):\n self.speaker.beep(10, freq + 24*20 - i*20)\n time.sleep(0.007)\n\n def beep_paddle_hit(self):\n if not self.okay:\n return\n\n self.speaker.beep(100, 500)\n\n def beep_sirene(self):\n if not self.okay:\n return\n\n Thread(target=self.sirene, args=(1000,)).start()\n\n\nclass Pong:\n PADDLE_SIZE = 3\n\n# Antialased ball?\n# PONG_COLOR_INDEX_BALL_TOP = 8\n# PONG_COLOR_INDEX_BALL_LEFT = 9\n# PONG_COLOR_INDEX_BALL_RIGHT = 10\n# PONG_COLOR_INDEX_BALL_BOTTOM = 11\n\n COLORS = [\n ( 0, 0, 0), # off\n# ( 10, 10, 10), # grey\n (255, 0, 0), # red\n (255, 80, 0), # orange\n (255, 255, 0), # yellow\n ( 0, 255, 0), # green\n ( 0, 0, 255), # blue\n (255, 0, 150), # violet\n (255, 0, 40), # purple\n ( 0, 0, 0), # ball top\n ( 0, 0, 0), # ball left\n ( 0, 0, 0), # ball right\n ( 0, 0, 0) # ball bottom\n ]\n\n SCORE_FONT = {\n 0: [\"222\",\n \"202\",\n \"202\",\n \"202\",\n \"222\"],\n 1: [\"020\",\n \"020\",\n \"020\",\n \"020\",\n \"020\"],\n 2: [\"222\",\n \"002\",\n \"222\",\n \"200\",\n \"222\"],\n 3: [\"222\",\n \"002\",\n \"222\",\n \"002\",\n \"222\"],\n 4: [\"202\",\n \"202\",\n \"222\",\n \"002\",\n \"002\"],\n 5: [\"222\",\n \"200\",\n \"222\",\n \"002\",\n \"222\"],\n 6: [\"222\",\n \"200\",\n \"222\",\n \"202\",\n \"222\"],\n 7: [\"222\",\n \"002\",\n \"002\",\n \"002\",\n \"002\"],\n 8: [\"222\",\n \"202\",\n \"222\",\n \"202\",\n \"222\"],\n 9: [\"222\",\n \"202\",\n \"222\",\n \"002\",\n \"002\"],\n }\n\n playfield = [x[:] for x in [[0]*config.LED_COLS]*config.LED_ROWS]\n score = [0, 0]\n paddle_position_x = [4, 15]\n paddle_position_y = [3, 3]\n ball_position = [10, 5]\n ball_direction = [0.1, 0.2]\n timer = None\n loop = True\n\n def __init__(self, ipcon):\n self.okay = False\n self.ipcon = ipcon\n\n if not config.UID_LED_STRIP_BRICKLET:\n print(\"Not Configured: LED Strip or LED Strip V2 (required)\")\n return\n\n if not config.IS_LED_STRIP_V2:\n self.led_strip = LEDStrip(config.UID_LED_STRIP_BRICKLET, self.ipcon)\n else:\n self.led_strip = LEDStripV2(config.UID_LED_STRIP_BRICKLET, self.ipcon)\n\n try:\n self.led_strip.get_frame_duration()\n if not config.IS_LED_STRIP_V2:\n print(\"Found: LED Strip ({0})\".format(config.UID_LED_STRIP_BRICKLET))\n else:\n print(\"Found: LED Strip V2 ({0})\".format(config.UID_LED_STRIP_BRICKLET))\n except:\n if not config.IS_LED_STRIP_V2:\n print(\"Not Found: LED Strip ({0})\".format(config.UID_LED_STRIP_BRICKLET))\n else:\n print(\"Not Found: LED Strip V2({0})\".format(config.UID_LED_STRIP_BRICKLET))\n return\n\n self.kp = KeyPress(self.ipcon)\n self.speaker = PongSpeaker(self.ipcon)\n\n self.okay = True\n\n self.led_strip.set_frame_duration(40)\n\n if not config.IS_LED_STRIP_V2:\n self.led_strip.register_callback(self.led_strip.CALLBACK_FRAME_RENDERED,\n self.frame_rendered)\n else:\n self.led_strip.register_callback(self.led_strip.CALLBACK_FRAME_STARTED,\n self.frame_rendered)\n\n self.led_strip.set_channel_mapping(config.CHANNEL_MAPPING)\n\n self.init_game()\n\n def init_game(self):\n self.new_ball()\n self.paddle_position_y = [3, 3]\n self.score = [0, 0]\n\n def frame_rendered(self, length):\n self.write_playfield()\n\n def write_playfield(self):\n if not self.okay:\n return\n\n field = copy.deepcopy(self.playfield)\n\n self.add_score_to_playfield(field)\n self.add_paddles_to_playfield(field)\n self.add_ball_to_playfield(field)\n\n # Reorder LED data into R, G and B channel\n r = []\n g = []\n b = []\n frame = []\n\n for row in range(config.LED_ROWS):\n col_range = range(config.LED_COLS)\n if row % 2 == 0:\n col_range = reversed(col_range)\n for col in col_range:\n r.append(self.COLORS[field[row][col]][0])\n g.append(self.COLORS[field[row][col]][1])\n b.append(self.COLORS[field[row][col]][2])\n frame.append(self.COLORS[field[row][col]][0])\n frame.append(self.COLORS[field[row][col]][1])\n frame.append(self.COLORS[field[row][col]][2])\n\n if not config.IS_LED_STRIP_V2:\n # Make chunks of size 16\n r_chunk = [r[i:i+16] for i in range(0, len(r), 16)]\n g_chunk = [g[i:i+16] for i in range(0, len(g), 16)]\n b_chunk = [b[i:i+16] for i in range(0, len(b), 16)]\n\n for i in range(len(r_chunk)):\n length = len(r_chunk[i])\n\n # Fill up chunks with zeros\n r_chunk[i].extend([0]*(16-len(r_chunk[i])))\n g_chunk[i].extend([0]*(16-len(g_chunk[i])))\n b_chunk[i].extend([0]*(16-len(b_chunk[i])))\n\n try:\n self.led_strip.set_rgb_values(i*16, length, r_chunk[i], g_chunk[i], b_chunk[i])\n except:\n break\n else:\n try:\n self.led_strip.set_led_values(0, frame)\n except:\n return\n\n def add_score_to_playfield(self, field):\n for row in range(3):\n for col in range(5):\n field[row][col+1] = int(self.SCORE_FONT[self.score[0]][col][row])\n field[row+17][col+1] = int(self.SCORE_FONT[self.score[1]][col][row])\n\n def add_ball_to_playfield(self, field):\n x = max(0, min(19, int(self.ball_position[0])))\n y = max(0, min(9, int(self.ball_position[1])))\n field[x][y] = config.PONG_COLOR_INDEX_BALL\n\n# Antialased ball?\n# x = max(0, min(19, self.ball_position[0]))\n# y = max(0, min(9, self.ball_position[1]))\n# ix = int(x)\n# iy = int(y)\n# field[ix][iy] = config.PONG_COLOR_INDEX_BALL\n# if ix + 1 < config.LED_ROWS:\n# field[ix+1][iy] = PONG_COLOR_INDEX_BALL_RIGHT\n# if ix - 1 > 0:\n# field[ix-1][iy] = PONG_COLOR_INDEX_BALL_LEFT\n# if iy + 1 < config.LED_COLS:\n# field[ix][iy+1] = PONG_COLOR_INDEX_BALL_TOP\n# if iy - 1 > 0:\n# field[ix][iy-1] = PONG_COLOR_INDEX_BALL_BOTTOM\n#\n# dx = x - int(x)\n# dy = x - int(x)\n# self.COLORS[PONG_COLOR_INDEX_BALL_RIGHT] = (0, 255*dx/64, 0)\n# self.COLORS[PONG_COLOR_INDEX_BALL_LEFT] = (0, 255*(1-dx)/64, 0)\n# self.COLORS[PONG_COLOR_INDEX_BALL_TOP] = (0, 255*dy/64, 0)\n# self.COLORS[PONG_COLOR_INDEX_BALL_BOTTOM] = (0, 255*(1-dy)/64, 0)\n\n def add_paddles_to_playfield(self, field):\n for player in range(2):\n for i in range(self.PADDLE_SIZE):\n field[self.paddle_position_x[player]][self.paddle_position_y[player]+i] = config.PONG_COLOR_INDEX_PLAYER[player]\n\n def move_paddle(self, player, change):\n new_pos = self.paddle_position_y[player] + change\n if new_pos >= 0 and new_pos <= config.LED_COLS - self.PADDLE_SIZE:\n self.paddle_position_y[player] = new_pos\n\n def new_ball(self):\n self.ball_position = [(config.LED_ROWS - 1.0) / 2.0, (config.LED_COLS - 1.0) / 2.0]\n self.ball_direction = [random.choice([-0.2, 0.2]), random.choice([random.randrange(1, 9)/10.0, random.randrange(-9, -1)/10.0])]\n\n def tick(self):\n # Move ball\n for i in range(2):\n self.ball_position[i] += self.ball_direction[i]\n\n # Wall collision top/bottom\n if self.ball_position[1] < 0 or self.ball_position[1] >= config.LED_COLS:\n self.ball_direction[1] = -self.ball_direction[1]\n\n # Wall collision left/right\n def hit_left_right(player):\n self.speaker.beep_sirene()\n self.new_ball()\n\n self.score[player] += 1\n if self.score[player] > 9:\n self.score[player] = 0\n\n if self.ball_position[0] < 0:\n hit_left_right(1)\n\n if self.ball_position[0] >= config.LED_ROWS:\n hit_left_right(0)\n\n # Paddle collision\n def hit_paddle(skew):\n self.speaker.beep_paddle_hit()\n self.ball_direction[0] = -self.ball_direction[0]\n self.ball_direction[1] -= skew\n for i in range(2):\n self.ball_direction[i] *= 1.1 # Increase speed\n\n if self.ball_direction[0] < 0:\n if self.paddle_position_x[0] + 0.5 <= self.ball_position[0] <= self.paddle_position_x[0] + 1.5:\n if self.paddle_position_y[0] - 0.5 <= self.ball_position[1] <= self.paddle_position_y[0] + self.PADDLE_SIZE + 0.5:\n paddle_skew = (self.paddle_position_y[0] + self.PADDLE_SIZE/2.0 - self.ball_position[1])/10.0\n hit_paddle(paddle_skew)\n\n if self.ball_direction[0] > 0:\n if self.paddle_position_x[1] - 0.5 <= self.ball_position[0] <= self.paddle_position_x[1] + 0.5:\n if self.paddle_position_y[1] - 0.5 <= self.ball_position[1] <= self.paddle_position_y[1] + self.PADDLE_SIZE + 0.5:\n paddle_skew = (self.paddle_position_y[1] + self.PADDLE_SIZE/2.0 - self.ball_position[1])/10.0\n hit_paddle(paddle_skew)\n\n def run_game_loop(self):\n self.frame_rendered(0)\n\n self.timer = RepeatedTimer(0.1, self.tick)\n\n while self.loop:\n key = self.kp.read_single_keypress().lower()\n\n if key == 'a':\n self.move_paddle(0, -1)\n elif key == 's':\n self.move_paddle(0, 1)\n elif key == 'k':\n self.move_paddle(1, -1)\n elif key == 'l':\n self.move_paddle(1, 1)\n elif key == 'r':\n self.init_game()\n elif not config.HAS_GUI and key == 'q':\n break\n\n if not config.IS_LED_STRIP_V2:\n self.led_strip.register_callback(self.led_strip.CALLBACK_FRAME_RENDERED, None)\n else:\n self.led_strip.register_callback(self.led_strip.CALLBACK_FRAME_STARTED, None)\n\n self.timer.stop()\n self.kp.stop()\n\n\nif __name__ == \"__main__\":\n # Create IP Connection and connect it\n ipcon = IPConnection()\n ipcon.connect(config.HOST, config.PORT)\n\n # Create Pong object and start game loop\n pong = Pong(ipcon)\n\n if pong.okay:\n print('Press q to exit')\n\n pong.run_game_loop()\n\n ipcon.disconnect()\n","sub_path":"games/python/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":12380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"449361351","text":"from nm import Nmap\r\nimport socket\r\nfrom termcolor import colored\r\n\r\nclass SmtpScanner():\r\n\tdef __init__(self, target, port=25):\r\n\t\tself.target = target\r\n\t\tself.port = port\r\n\t\tself.nmap = Nmap(target, str(self.port))\r\n\r\n\tdef nmapScripts(self, scriptList=['smtp-enum-users.nse', 'smtp-brute.nse']):\r\n\t\treturn self.nmap.scripts(self.port, scriptList)\r\n\r\n\tdef userVrfyBruteForce(self, wordlist='/usr/share/wordlists/metasploit/unix_users.txt'):\r\n\t\ttry:\r\n\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\t\ts.connect((self.target, self.port))\r\n\t\t\ts.recv(1024)\r\n\r\n\t\t\tvalidUsers = []\r\n\r\n\t\t\tuserWordlist = open(wordlist, 'r')\r\n\r\n\t\t\tcounter = 0\r\n\r\n\t\t\tfor user in userWordlist:\r\n\t\t\t\ts.send('VRFY ' + user.strip() + '\\n')\r\n\t\t\t\tresponse = s.recv(1024)\r\n\r\n\t\t\t\tif response.split(' ')[0] != '550':\r\n\t\t\t\t\tvalidUsers.append(user)\r\n\r\n\t\t\t\tcounter = counter + 1\r\n\t\t\t\tif counter%20 == 0:\r\n\t\t\t\t\ts.close()\r\n\t\t\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\t\t\t\ts.connect((self.target, self.port))\r\n\t\t\t\t\ts.recv(1024)\r\n\r\n\t\t\ts.close()\r\n\r\n\t\t\treturn validUsers\r\n\r\n\t\texcept:\r\n\t\t\tprint(colored('\\n[-] Network error connecting to SMTP server\\n', 'red'))\r\n\t\t\treturn None\r\n\r\n\tdef getBanner(self):\r\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\ts.connect((self.target, self.port))\r\n\t\tresponse = s.recv(1024)\r\n\t\ts.close()\r\n\t\treturn response","sub_path":"v1/smtpScanner.py","file_name":"smtpScanner.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"427745758","text":"import json\nimport webob.dec\nfrom webob import Response\nimport mysql.connector\n\n\nclass Controller(object):\n def __init__(self):\n self.db = mysql.connector.connect(user='root', password='password', database='snort')\n\n def getOffending(self, req):\n cur = self.db.cursor()\n # Define the query put it into tuple ()\n query = (\"select inet_ntoa(ip_src) as ip_src, count(*) as count from iphdr group by ip_src order by count(*) desc limit 5\")\n # Execute the query\n cur.execute(query)\n\n # Put the records from DB into the dictionary\n query_result = [dict(line) for line in [zip([column[0] for column in cur.description], row) for row in cur.fetchall()]]\n\n event_json = json.dumps(query_result, indent=4)\n\n response = Response(request=req,\n status=200,\n content_type='application/json')\n response.body = json.dumps(event_json)\n return response\n\n @webob.dec.wsgify\n def __call__(self, request):\n method_call = request.method\n if method_call == 'GET':\n return self.getOffending(request)\n\n\ndef create_resource():\n return Controller()\n","sub_path":"offending.py","file_name":"offending.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"308797641","text":"\"\"\"\nprotocolDefinitions.py\n\nThe following module consists of a list of commands or definitions to be used in the communication between devices and the control system\n\nMichael Xynidis\nFluvio L Lobo Fenoglietto\n09/26/2016\n\"\"\"\n# Definition Name Value Class\n# ---------- ---- ----- -----\nCHK = chr(0x01) # System Check 0x01 ORG \nENQ = chr(0x05) # Enquiry 0x05 STD\nEOT = chr(0x04) # End of Transmission 0x04 STD\nACK = chr(0x06) # Positive Acknowledgement 0x06 STD\nNAK = chr(0x15) # Negative Acknowledgement 0x15 STD\nCAN = chr(0x18) # Cancel Current Command 0x18 STD\nESC = chr(0x1B) # Escape 0x1B STD\nDC1_STRTREC = chr(0x11) # Start Recording 0x11 ORG\nDC2_STPREC = chr(0x12) # Stop Recording 0x12 ORG\nDC3_STRTREC = chr(0x13) # Start Recording 0x13 ORG\nDC4_STPREC = chr(0x14) # Stop Recording 0x14 ORG\nNRMOP = chr(0x20) # Normal Operation 0x20 ORG\nSIM = chr(0x21) # Simulation Mode 0x21 ORG\nSIM_000 = chr(0x30) # Simulate Scenario 0 0x30 ORG\nSIM_001 = chr(0x31) # Simulate Scenario 1 0x31 ORG\n\n# Legend\n# STD - Standard terminology / Standard reference for command\n# ORG - Original or custom-made command and reference\n","sub_path":"Software/Python/OLD/protocolDefinitions.py","file_name":"protocolDefinitions.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"323163238","text":"#coding=utf-8\r\n#这个文档写的是在太简略了吧,主要是看在这个库支持深度学习不然我根本没兴趣。\r\n#卧槽怪不得这个文档这么简略原来只有一个处理图片的class 因为在autokeras下面只有一行\r\n#from autokeras.classifier import ImageClassifier .classifier只有一个class ImageClassifier\r\n#总体感觉这个库确实是比较简单的吧,而且能够找到的例子就二个都是关于图像处理相关的\r\n#https://github.com/jhfjhfj1/autokeras/tree/master/tests 测试前面还有一些例子\r\n#可以试一下这个能否应用于Titanic的数据集上面,我在想github上这个东西很有可能被过誉了吧\r\n#但是有一点还是挺好的,这个东西是基于Pytorch的,总比基于tensorflow好多了吧,至少我还能试\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport autokeras as ak\r\nfrom sklearn import preprocessing\r\n\r\ndata_train = pd.read_csv(\"C:/Users/1/Desktop/train.csv\")\r\ndata_test = pd.read_csv(\"C:/Users/1/Desktop/test.csv\")\r\ncombine = [data_train, data_test]\r\n\r\nfor dataset in combine:\r\n dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\\.', expand=False)\r\n dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')\r\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\r\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\r\n dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')\r\n title_map = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rare': 5}\r\n dataset['Title'] = dataset['Title'].map(title_map)\r\n dataset['Title'] = dataset['Title'].fillna(0) \r\n\r\nfor dataset in combine:\r\n dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1\r\n dataset['FamilySizePlus'] = 0\r\n dataset.loc[dataset['FamilySize'] == 1, 'FamilySizePlus'] = 1\r\n dataset.loc[dataset['FamilySize'] == 2, 'FamilySizePlus'] = 2\r\n dataset.loc[dataset['FamilySize'] == 3, 'FamilySizePlus'] = 2\r\n dataset.loc[dataset['FamilySize'] == 4, 'FamilySizePlus'] = 2\r\n dataset.loc[dataset['FamilySize'] == 5, 'FamilySizePlus'] = 1\r\n dataset.loc[dataset['FamilySize'] == 6, 'FamilySizePlus'] = 1\r\n dataset.loc[dataset['FamilySize'] == 7, 'FamilySizePlus'] = 1\r\n\r\nfor dataset in combine:\r\n dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0}).astype(int)\r\n\r\nguess_ages = np.zeros((2, 3))\r\nfor dataset in combine:\r\n for i in range(0, 2):\r\n for j in range(0, 3):\r\n guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j+1)]['Age'].dropna()\r\n age_guess = guess_df.median()\r\n guess_ages[i,j] = int(age_guess / 0.5 + 0.5) * 0.5\r\n for i in range(0, 2):\r\n for j in range(0, 3):\r\n dataset.loc[(dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j + 1), 'Age'] = guess_ages[i, j]\r\n dataset['Age'] = dataset['Age'].astype(int)\r\n \r\nfor dataset in combine: \r\n dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0 \r\n dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1 \r\n dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2 \r\n dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3 \r\n dataset.loc[ dataset['Age'] > 64, 'Age'] = 4\r\n \r\n#这里的mode是求解pandas.core.series.Series众数的第一个值(可能有多个众数)\r\nfreq_port = data_train.Embarked.dropna().mode()[0]\r\nfor dataset in combine:\r\n dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)\r\nfor dataset in combine:\r\n dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2})\r\n\r\n#将data_test中的fare元素所缺失的部分由已经包含的数据的中位数决定哈\r\ndata_test['Fare'].fillna(data_test['Fare'].dropna().median(), inplace=True)\r\n\r\nfor dataset in combine:\r\n dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0\r\n dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1\r\n dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2\r\n dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3\r\n dataset['Fare'] = dataset['Fare'].astype(int)\r\n\r\nfor dataset in combine:\r\n dataset.loc[(dataset.Cabin.isnull()), 'Cabin'] = 0\r\n dataset.loc[(dataset.Cabin.notnull()), 'Cabin'] = 1\r\n\r\n#尼玛给你说的这个是贡献船票,原来的英文里面根本就没有这种说法嘛\r\ndf = data_train['Ticket'].value_counts()\r\ndf = pd.DataFrame(df)\r\ndf = df[df['Ticket'] > 1]\r\n#print(df)\r\ndf_ticket = df.index.values #共享船票的票号\r\ntickets = data_train.Ticket.values #所有的船票\r\n#print(tickets)\r\nresult = []\r\nfor ticket in tickets:\r\n if ticket in df_ticket:\r\n ticket = 1\r\n else:\r\n ticket = 0 #遍历所有船票,在共享船票里面的为1,否则为0\r\n result.append(ticket)\r\n \r\ndf = data_train['Ticket'].value_counts()\r\ndf = pd.DataFrame(df)\r\ndf = df[df['Ticket'] > 1]\r\ndf_ticket = df.index.values #共享船票的票号\r\ntickets = data_train.Ticket.values #所有的船票\r\n\r\nresult = []\r\nfor ticket in tickets:\r\n if ticket in df_ticket:\r\n ticket = 1\r\n else:\r\n ticket = 0 #遍历所有船票,在共享船票里面的为1,否则为0\r\n result.append(ticket)\r\n\r\nresults = pd.DataFrame(result)\r\nresults.columns = ['Ticket_Count']\r\ndata_train = pd.concat([data_train, results], axis=1)\r\n\r\ndf = data_test['Ticket'].value_counts()\r\ndf = pd.DataFrame(df)\r\ndf = df[df['Ticket'] > 1]\r\ndf_ticket = df.index.values \r\ntickets = data_test.Ticket.values \r\nresult = []\r\nfor ticket in tickets:\r\n if ticket in df_ticket:\r\n ticket = 1\r\n else:\r\n ticket = 0 \r\n result.append(ticket)\r\nresults = pd.DataFrame(result)\r\nresults.columns = ['Ticket_Count']\r\ndata_test = pd.concat([data_test, results], axis=1) \r\n\r\ndata_train_1 = data_train.copy()\r\ndata_test_1 = data_test.copy()\r\ndata_test_1 = data_test_1.drop(['PassengerId', 'Name', 'SibSp', 'Parch', 'Ticket', 'FamilySize'], axis=1)\r\n\r\nX_train = data_train_1[['Pclass', 'Sex', 'Age', 'Fare', 'Embarked', 'Cabin', 'Title', 'FamilySizePlus', 'Ticket_Count']]\r\nY_train = data_train_1['Survived']\r\n\r\nX_test = data_test_1[['Pclass', 'Sex', 'Age', 'Fare', 'Embarked', 'Cabin', 'Title', 'FamilySizePlus', 'Ticket_Count']]\r\n\r\nX_all = pd.concat([X_train, X_test], axis=0)\r\n#我觉得训练集和测试集需要在一起进行特征缩放,所以注释掉了原来的X_train的特征缩放咯\r\nX_all_scaled = pd.DataFrame(preprocessing.scale(X_all), columns = X_train.columns)\r\n#X_train_scaled = pd.DataFrame(preprocessing.scale(X_train), columns = X_train.columns)\r\nX_train_scaled = X_all_scaled[:len(X_train)]\r\nX_test_scaled = X_all_scaled[len(X_train):]\r\n\r\n#其实真正的代码就在下面三行而已,前面的全部都是获取数据并预处理\r\n#我用这个模型接入了Titanic的数据然后单步调试发现了很多有意思的东西\r\n#至少这个贝叶斯优化器以及超参搜索的参数似乎是文档中鲜有提到的\r\nclf = ak.ImageClassifier()\r\n#我感觉好像单步调参挑不出问题在哪里呢,干脆还是\r\nclf.fit(X_train_scaled, Y_train)\r\n#results = clf.predict(x_test)\r\n\"\"\"\r\n\r\n#https://www.codetd.com/article/2940603\r\n#下面终于找到了一个autokeras的代码与数据\r\n#给的示例代码没有数据,这个做的真心垃圾吧\r\n#迄今为止吧,我觉得我的Titanic数据可以伪装成为图片进行学习的吧?\r\nimport os\r\nimport csv\r\nimport pickle\r\n\r\n#TRAIN_IMG_DIR = '/home/yourname/Documents/tensorflow/images/500pics2/train'\r\n#TRAIN_CSV_DIR = '/home/yourname/Documents/tensorflow/images/500pics2/train_labels.csv'\r\n#TEST_IMG_DIR = '/home/yourname/Documents/tensorflow/images/500pics2/test'\r\n#TEST_CSV_DIR = '/home/yourname/Documents/tensorflow/images/500pics2/test_labels.csv'\r\nTRAIN_IMG_DIR = \"C:/Users/1/Desktop/re/train\"\r\nTRAIN_CSV_DIR = \"C:/Users/1/Desktop/re/train_labels.csv\"\r\nTEST_IMG_DIR = \"C:/Users/1/Desktop/re/test\"\r\nTEST_CSV_DIR = \"C:/Users/1/Desktop/re/test_labels.csv\"\r\n\r\ndef mkcsv(img_dir, csv_dir):\r\n list = []\r\n list.append(['File Name','Label'])\r\n for file_name in os.listdir(img_dir):\r\n if file_name[0] == '3': #bus\r\n item = [file_name, 0]\r\n elif file_name[0] == '4': #dinosaur\r\n item = [file_name, 1]\r\n elif file_name[0] == '5': #elephant\r\n item = [file_name, 2]\r\n elif file_name[0] == '6': #flower\r\n item = [file_name, 3]\r\n else:\r\n item = [file_name, 4] #horse\r\n list.append(item)\r\n\r\n print(list)\r\n f = open(csv_dir, 'w', newline='')\r\n writer = csv.writer(f)\r\n writer.writerows(list)\r\n\r\nmkcsv(TRAIN_IMG_DIR, TRAIN_CSV_DIR)\r\nmkcsv(TEST_IMG_DIR, TEST_CSV_DIR)\r\n\r\n\r\nfrom tensorflow.keras.preprocessing import image\r\nimport os\r\n\r\n#TEST_IMG_DIR_INPUT = \"/home/yourname/Documents/tensorflow/images/500pics2/test_origin\"\r\n#TEST_IMG_DIR_OUTPUT = \"/home/yourname/Documents/tensorflow/images/500pics2/test\"\r\n#TRAIN_IMG_DIR_INPUT = \"/home/yourname/Documents/tensorflow/images/500pics2/train_origin\"\r\n#TRAIN_IMG_DIR_OUTPUT = \"/home/yourname/Documents/tensorflow/images/500pics2/train\"\r\nTEST_IMG_DIR_INPUT = \"C:/Users/1/Desktop/re/test\"\r\n#在win7下面这种写法是可以的,但是win10下面这种代码就无法运行\r\n#原来是可以下面这样的方式写的,原因是因为没有没有对应的文件夹\r\n#不过我觉得这个也太弱智了吧,居然不能够自己建立文件夹的么,居然给我报错\r\nTEST_IMG_DIR_OUTPUT = \"C:/Users/1/Desktop/re/test_scaled\"\r\n#TEST_IMG_DIR_OUTPUT = \"C:\\\\Users\\\\win7\\\\Desktop\\\\re\\\\test_scaled\"\r\nTRAIN_IMG_DIR_INPUT = \"C:/Users/1/Desktop/re/train\"\r\nTRAIN_IMG_DIR_OUTPUT = \"C:/Users/1/Desktop/re/train_scaled\"\r\n#TRAIN_IMG_DIR_OUTPUT = \"C:\\\\Users\\\\win7\\\\Desktop\\\\re\\\\train_scaled\"\r\nIMAGE_SIZE = 28\r\n\r\ndef format_img(input_dir, output_dir):\r\n for file_name in os.listdir(input_dir):\r\n path_name = os.path.join(input_dir, file_name)\r\n img = image.load_img(path_name, target_size=(IMAGE_SIZE, IMAGE_SIZE))\r\n path_name = os.path.join(output_dir, file_name)\r\n img.save(path_name)\r\n\r\nformat_img(TEST_IMG_DIR_INPUT, TEST_IMG_DIR_OUTPUT)\r\nformat_img(TRAIN_IMG_DIR_INPUT, TRAIN_IMG_DIR_OUTPUT)\r\n\r\n\r\n#这个可能是老版本的代码吧,居然出现了autokeras.image_supervised\r\n#妈卖批我居然Google了一下还没有查到代码还是我在Eclipse跳转查到的位置\r\n#from autokeras.image_supervised import load_image_dataset, ImageClassifier\r\nfrom autokeras.classifier import load_image_dataset, ImageClassifier\r\nfrom keras.models import load_model\r\nfrom keras.utils import plot_model\r\nfrom keras.preprocessing.image import load_img, img_to_array\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n#from pydev import pydevd这个写法是错误的,根本没有'pydev'这种东西呢。\r\n\r\n#TRAIN_CSV_DIR = '/home/yourname/Documents/tensorflow/images/500pics2/train_labels.csv'\r\n#TRAIN_IMG_DIR = '/home/yourname/Documents/tensorflow/images/500pics2/train'\r\n#TEST_CSV_DIR = '/home/yourname/Documents/tensorflow/images/500pics2/test_labels.csv'\r\n#TEST_IMG_DIR = '/home/yourname/Documents/tensorflow/images/500pics2/test'\r\n\r\nTRAIN_CSV_DIR = \"C:/Users/1/Desktop/re/train_labels.csv\"\r\nTRAIN_IMG_DIR = \"C:/Users/1/Desktop/re/train\"\r\nTRAIN_IMG_DIR_SCALED = \"C:/Users/1/Desktop/re/train_scaled\"\r\nTEST_CSV_DIR = \"C:/Users/1/Desktop/re/test_labels.csv\"\r\nTEST_IMG_DIR = \"C:/Users/1/Desktop/re/test\"\r\nTEST_IMG_DIR_SCALED = \"C:/Users/1/Desktop/re/test_scaled\"\r\n\r\n#PREDICT_IMG_PATH = '/home/yourname/Documents/tensorflow/images/500pics2/test/719.jpg'\r\nPREDICT_IMG_PATH = \"C:/Users/1/Desktop/re/test_scaled/719.jpg\"\r\n\r\n#MODEL_DIR = '/home/yourname/Documents/tensorflow/images/500pics2/model/my_model.h5'\r\n#MODEL_PNG = '/home/yourname/Documents/tensorflow/images/500pics2/model/model.png'\r\nMODEL_DIR = \"C:/Users/1/Desktop/re/model/my_model.h5\"\r\nMODEL_PNG = \"C:/Users/1/Desktop/re/model/model.png\"\r\nIMAGE_SIZE = 28\r\n\r\nif __name__ == '__main__':\r\n # 获取本地图片,转换成numpy格式\r\n #下面的两行代码读取的数据在clf.fit(train_data, train_labels, time_limit=1 * 60)的时候会出现一些错误\r\n #我觉得很费解的就是为什么下面的两行代码会造成clf.fit(train_data,报错说含有非数字的数据呢?\r\n #train_data, train_labels = load_image_dataset(csv_file_path=TRAIN_CSV_DIR, images_path=TRAIN_IMG_DIR)\r\n #test_data, test_labels = load_image_dataset(csv_file_path=TEST_CSV_DIR, images_path=TEST_IMG_DIR)\r\n #下面的这种写法似乎是可以执行到clf.fit(train_data, train_labels, time_limit=1 * 60)不报错,但是好像没执行出结果\r\n train_data, train_labels = load_image_dataset(csv_file_path=TRAIN_CSV_DIR, images_path=TRAIN_IMG_DIR_SCALED)\r\n test_data, test_labels = load_image_dataset(csv_file_path=TEST_CSV_DIR, images_path=TEST_IMG_DIR_SCALED)\r\n\r\n # 数据进行格式转换\r\n #to_csv写出来的数据非常的奇怪啊,我实在不知道怎么整的\r\n #直接执行下面的.astype('float32')居然直接报错了\r\n #pd.DataFrame(data=train_data).to_csv(\"C:/Users/1/Desktop/train_data.csv\")\r\n #pd.DataFrame(data=test_data).to_csv(\"C:/Users/1/Desktop/test_data.csv\")\r\n #难道是因为路径出问题了吗,划分出来的train_data怎么是这个尿性的呢\r\n #train_data应该是读取的像素点吧,怎么看都感觉没发现问题的呀?\r\n #如果只是除法的话,应该被注释掉也没有关系的吧???不能被注释否则type类型不对的吧没办法搜索\r\n #我仔细想了一下,大概是因为从TRAIN_IMG_DIR里面读取的数据是可能过多了吧\r\n #调试的时候以及写入文件的时候train_data出现了...,这大概就是非数字字符吧\r\n #如果是从TRAIN_IMG_DIR_SCALED中读取数据的时候,除了fit没结果其他都可以正常执行所以应该是上述的原因吧。\r\n #我了个飞天大草,使用TRAIN_IMG_DIR_SCALED路径并且使用下面的astype设置类型并做除法就可以了运行了。。。\r\n train_data = train_data.astype('float32') / 255\r\n test_data = test_data.astype('float32') / 255\r\n #那我再尝试一下是否可以不用做除法呢,我觉得说不定可以的吧\r\n #实验表明将会一直在控制台输出ModuleNotFoundError: No module named 'pydevd',这可真的是费解呢。\r\n #我好想要操了吧,似乎真的时不时的会出现,我看到一个解决方案居然是import pydevd,这尼玛有点秀吧。没有pydevd的吧。\r\n #train_data = train_data.astype('float32')\r\n #test_data = test_data.astype('float32')\r\n print(\"train data shape:\", train_data.shape)\r\n\r\n # 使用图片识别器\r\n clf = ImageClassifier(verbose=True)\r\n # 给其训练数据和标签,训练的最长时间可以设定,假设为1分钟,autokers会不断找寻最优的网络模型\r\n #下面这个写法_validate终于没有报之前说的必须是数字的错误了,但报错说x_train必须至少是二维的\r\n #clf.fit(train_labels, train_labels, time_limit=1 * 60)\r\n #那应该还是说明了一点,train_data中的非数字应该不是type=什么之类的,而可能是出现的...吧\r\n #这个fit、final_fit以及evaluate之间到底有什么区别哦\r\n clf.fit(train_data, train_labels, time_limit=1 * 60)\r\n \r\n \"\"\"\r\n # 找到最优模型后,再最后进行一次训练和验证\r\n clf.final_fit(train_data, train_labels, test_data, test_labels, retrain=True)\r\n # 给出评估结果\r\n #顺便看了一下classifier里面的evaluate、fit和final_fit函数\r\n #我个人感觉这些函数的接口设计的都是蛮奇怪的吧,也许可能是因为我不太懂深度学习??\r\n y = clf.evaluate(test_data, test_labels)\r\n print(\"evaluate:\", y)\r\n\r\n # 给一个图片试试预测是否准确\r\n #这里似乎读错了数据,只需要读入SCALED的数据即可\r\n #但是是否存在那种需要读入原数据的情况呢,因为不同\r\n #输入尺寸的神经网络才能够接受不一样的输入尺寸的数据吧\r\n img = load_img(PREDICT_IMG_PATH)\r\n x = img_to_array(img)\r\n x = x.astype('float32') / 255\r\n #因为训练数据似乎都是\r\n x = np.reshape(x, (1, IMAGE_SIZE, IMAGE_SIZE, 3))\r\n print(\"x shape:\", x.shape)\r\n\r\n # 最后的结果是一个numpy数组,里面是预测值4,意味着是马,说明预测准确\r\n #我刚才愣了一下为毛下面的不能够预测,突然想到是因为前面没有执行fit函数\r\n y = clf.predict(x)\r\n print(\"predict:\", y)\r\n \"\"\"\r\n\r\n \"\"\"\r\n # 导出我们生成的模型\r\n #clf.load_searcher().load_best_model().produce_keras_model().save(MODEL_DIR)\r\n #上面一行的代码爆出了下面的错误,错误的原因在于NetworkX 2的改变,这种问题在所难免吧\r\n #AttributeError: 'Graph' object has no attribute 'produce_keras_model'\r\n #可能是networkx版本的问题,但是pip install networkx==1.9.1或者2.0或者2.1都无法解决该问题\r\n #最后老子又将networkx改回1.11的版本了,免得我之前用的代码不能够运行或者出现异常\r\n #我觉得我开始失去耐心了还是采用pickle的办法写入到文件中进行保存吧,库中也是这么封装的\r\n #最关键的是我上午使用Google了那么多的东西都没有用,\r\n best_model = clf.load_searcher().load_best_model()\r\n #下面的两条语句已经输出了对象的类型了,接下来直接翻阅对应代码就可以了\r\n #print(type(clf.load_searcher())) #输出结果是\r\n #print(type(clf.load_searcher().load_best_model()))输出结果是\r\n #原来Anaconda安装以后的代码都存在于这个目录下D:\\Anaconda3\\Lib\\site-packages找到autokeras即可\r\n #这个bayesian里面的代码真的也是蛮奇怪的,我不太理解贝叶斯为啥需要计算layer的distance呢\r\n #文件保存的路径好像有点奇怪,可能是D:\\autokeras\\tmp吧,不然怎么会凭空多出这个文件夹呢。。\r\n #费了很大的劲还是没搞清楚这个到底是咋回事儿,可能是autokeras和tensorflow版本不匹配吧\r\n #而且文件似乎是乱存的,我觉得还是直接使用pickle存储吧,自己还能控制存储位置,他底层也是用pickle的\r\n best_model.produce_keras_model().save(MODEL_DIR)\r\n # 加载模型\r\n model = load_model(MODEL_DIR)\r\n # 将模型导出成可视化图片\r\n plot_model(model, to_file=MODEL_PNG)\r\n \"\"\"\r\n \r\n\r\n #算了还是直接使用pickle进行封装吧\r\n files = open(\"autokeras_best_model.pickle\", \"wb\")\r\n pickle.dump(clf, files)\r\n files.close()\r\n \r\n files = open(\"autokeras_best_model.pickle\", \"rb\")\r\n best_model = pickle.load(files)\r\n files.close()\r\n \r\n \r\n \"\"\"\r\n img = load_img(PREDICT_IMG_PATH)\r\n x = img_to_array(img)\r\n x = x.astype('float32') / 255\r\n \r\n #这一小串代码证明了,我的模型确实是被正确的存储了下来\r\n x = np.reshape(x, (1, IMAGE_SIZE, IMAGE_SIZE, 3))\r\n print(\"x shape:\", x.shape)\r\n\r\n y = best_model.predict(x)\r\n print(\"predict:\", y)\r\n \"\"\"\r\n \r\n #将模型进行可视化的显示咯,但是不论怎么设置环境好像都没用\r\n #终于找到解决方案了而且写在了 常见代码问题汇总.py中\r\n #但是现在出现了新的问题:'ImageClassifier' object has no attribute 'layers'\r\n #我总觉得这个问题和之前的模型无法存储有一定的关系,说不定是因为keras版本不够高\r\n #我找到了一条安装指定版本的命令 pip install keras==2.0.8\r\n #我个人觉得这个问题应该是keras和tensorflow的版本不匹配的问题吧\r\n #我现在的keras版本是2.2.2,刚才降到2.1.6了,然而还是这个问题咯\r\n #从报错而言绝壁是keras单方面的问题,肯定不是tensorflow方面的问题\r\n #乱输一个pip install keras==2.18会反馈所有的正确可以安装的版本哈\r\n #研究这个问题已经几个小时了,可能有4个小时了吧,\r\n #plot_model(best_model, to_file=MODEL_PNG)\r\n print(best_model)\r\n print(best_model.__doc__)\r\n print(best_model.__module__)\r\n #print(best_model.__name__)\r\n #print(best_model.__qualname__)\r\n #print(best_model.__self__)\r\n #print(best_model.__text_signature__)\r\n \r\n #AttributeError: 'ImageClassifier' object has no attribute 'layers'\r\n #其他的输出都一切正常,这样子模型根本没办法输出吧\r\n #但是在autokeras的官方示例代码里面根本没有存储这些功能吧\r\n #plot_model(best_model, to_file=MODEL_PNG)\r\n ","sub_path":"Example7.py","file_name":"Example7.py","file_ext":"py","file_size_in_byte":20763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"626061676","text":"import asyncio\nimport json\nimport os\nimport random\nimport sys\nfrom collections import defaultdict\nfrom typing import Dict\n\nimport discord\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\n\nfrom discord_classes.discord_guild import DiscordGuild\nfrom mysql_connector.basic_connector import BasicConnector\n\n# TODO Implement class style (entirely OO) version of bot\nclass DiscordBot:\n\n def __init__(self):\n self.guild_tables_to_build = None\n\n self.init_bot()\n\n def init_bot(self):\n pass\n\n def verify_env_existence(self):\n if not os.path.exists('./config/.env'):\n if not os.path.exists('./config'):\n os.mkdir('./config')\n with open('./config/.env', 'w') as dotenv_file:\n dotenv_file.write('DISCORD_TOKEN=\\nDISCORD_GUILD=')\n print('Enter .env variables in \"./config/.env\"', file=sys.stderr, flush=True)\n exit(1)\n\n def load_initial_env_variables(self):\n load_dotenv(dotenv_path='./config/.env')\n TOKEN = os.getenv('DISCORD_TOKEN')\n GUILD_NAMES = json.loads(os.getenv('DISCORD_GUILD'))\n\n async def build_tables(self, guild_id, tables=None):\n if not tables:\n return\n\n guild = discord.utils.get(bot.guilds, id=guild_id)\n\n for table_name in tables:\n table_build = implemented_table_builders[table_name]\n if not table_build:\n continue\n await table_build.builder_func(served_guilds[guild_id].mysql_conn,\n served_guilds[guild_id].build_custom_db_name(table_build.db_name),\n *table_build.get_static_build_params(),\n await table_build.get_channel_history(guild, limit=None))\n\n\nguild_tables_to_build = {\n 266837146717913088: ['kanan', 'quotes'], # 4dpolytopes\n 647244068706975763: [''], # vinnyputty\n}\n\n\nclass TableBuild:\n\n def __init__(self, builder_func, db_name, tb_name, tb_cols_init, tb_cols, channel_name):\n self.builder_func = builder_func\n self.db_name = db_name\n self.tb_name = tb_name\n self.tb_cols_init = tb_cols_init\n self.tb_cols = tb_cols\n self.channel_name = channel_name\n\n def get_static_build_params(self):\n return self.tb_name, self.tb_cols_init, self.tb_cols\n\n async def get_channel_history(self, guild, *, limit=200):\n channel = discord.utils.get(guild.text_channels, name=self.channel_name)\n channel_history = None\n if channel:\n channel = guild.get_channel(channel.id)\n channel_history = channel.history(limit=limit)\n return channel_history\n\n\nimplemented_table_builders: Dict[str, TableBuild] = defaultdict(lambda: None)\n\nserved_guilds: Dict[int, DiscordGuild] = {}\nserved_guilds_lock = asyncio.Lock()\n\n# client = discord.Client()\n\nbot = commands.Bot(command_prefix='^')\n\n\n# region Commands\n@bot.command(name='randomquote', aliases=['rq', 'randquote'], help='Responds with a random quote from corn')\nasync def random_quote(ctx, *args):\n # print(f'Message seen: \"{ctx.message.content}\" in channel: \"{ctx.message.channel}\"')\n # _, args = parse_command(ctx.message.content)\n if len(args) < 1:\n # return\n args = ['corn']\n if args[0] == '<@!189945609615048704>':\n args[0] = 'corn'\n await served_guilds_lock.acquire()\n discord_guild = served_guilds[ctx.guild.id]\n served_guilds_lock.release()\n response = discord_guild.mysql_conn.get_random_row('$'.join((os.getenv('QUOTES_DB_NAME'), str(ctx.guild.id))),\n args[0], os.getenv('QUOTES_TB_COLS'))[0]\n await ctx.send(response)\n\n\n@bot.command(name='kanan', aliases=['k', 'kana'], help='Responds with a random picture from kanan-channel')\nasync def random_kanan(ctx, *args):\n # @bot.group(pass_context=True)\n # async def john(ctx):\n # if ctx.invoked_subcommand is None:\n # await bot.say('https://i.imgur.com/rZWO3QB.png'.format(ctx))\n\n # print(f'Message seen: \"{ctx.message.content}\" in channel: \"{ctx.message.channel}\"')\n # _, args = parse_command(ctx.message.content)\n # if len(args) < 1:\n # # return\n # args = ['corn']\n # if args[0] == '<@!189945609615048704>':\n # args[0] = 'corn'\n args = ('kanan',)\n await served_guilds_lock.acquire()\n discord_guild = served_guilds[ctx.guild.id]\n served_guilds_lock.release()\n response = \\\n discord_guild.mysql_conn.get_random_row('$'.join((os.getenv('KANAN_DB_NAME'), str(ctx.guild.id))), args[0],\n os.getenv('KANAN_TB_COLS'))[0]\n if not response:\n await ctx.send(content='No kanan available. :(')\n else:\n # await ctx.send(response)\n await ctx.send(\n embed=discord.Embed(title='Here, have some Kanan <:kananayaya:696804621095796777>').set_image(url=response))\n\n\n# TODO split into scramble and unscramble and add option to scramble for specified amount of time\n@bot.command(name='togglescramble', aliases=['ts'], help='Toggles the message scrambler state for the selected member')\nasync def toggle_scramble(ctx, *args):\n if len(args) < 1:\n await ctx.send('You must specify a member to scramble/stop scrambling.')\n return\n await served_guilds_lock.acquire()\n discord_guild = served_guilds[ctx.guild.id]\n served_guilds_lock.release()\n member = ctx.guild.get_member(int(args[0][3:-1]))\n if member:\n member_row = discord_guild.mysql_conn.select_row(\n '$'.join((os.getenv('MESSAGE_SCRAMBLER_DB_NAME'), str(ctx.guild.id))),\n 'message_scrambler', os.getenv('MESSAGE_SCRAMBLER_TB_COLS'), select_clause=f'member={member.id}')\n if not member_row:\n member_row = discord_guild.mysql_conn.add_row(\n '$'.join((os.getenv('MESSAGE_SCRAMBLER_DB_NAME'), str(ctx.guild.id))),\n 'message_scrambler', os.getenv('MESSAGE_SCRAMBLER_TB_COLS'),\n row_values=(member.id, True), return_inserted_row=True)\n member_status = member_row[0]\n else:\n member_status = not member_row[1]\n discord_guild.mysql_conn.update_row(\n discord_guild.build_custom_db_name(os.getenv('MESSAGE_SCRAMBLER_DB_NAME')), 'message_scrambler',\n os.getenv('MESSAGE_SCRAMBLER_TB_COLS'), update_clause=f'status={member_status}',\n select_clause=f'member={member.id}')\n response = f'Message scrambling for {member.mention} turned **{\"ON\" if member_status else \"OFF\"}**'\n else:\n response = f'Member doesn\\'t exist in the server.'\n await ctx.send(response)\n\n\n# endregion\n\nasync def check_scramble_message(message, in_place=False):\n if message.content[0] == '^':\n return False\n await served_guilds_lock.acquire()\n discord_guild = served_guilds[message.guild.id]\n served_guilds_lock.release()\n member = message.author\n # member_row = discord_guild.mysql_conn.select_row(\n # '$'.join((os.getenv('MESSAGE_SCRAMBLER_DB_NAME'), str(message.guild.id))),\n # 'message_scrambler', os.getenv('MESSAGE_SCRAMBLER_TB_COLS'), select_clause=f'where member={member.id}')\n member_row = discord_guild.mysql_conn.select_row(\n '$'.join((os.getenv('MESSAGE_SCRAMBLER_DB_NAME'), str(message.guild.id))),\n 'message_scrambler', os.getenv('MESSAGE_SCRAMBLER_TB_COLS'), select_clause=f'member={member.id}')\n if not member_row:\n member_row = discord_guild.mysql_conn.add_row(\n '$'.join((os.getenv('MESSAGE_SCRAMBLER_DB_NAME'), str(message.guild.id))),\n 'message_scrambler', os.getenv('MESSAGE_SCRAMBLER_TB_COLS'),\n row_values=(member.id, True), return_inserted_row=True)\n member_status = member_row[1]\n if not member_status:\n return False\n if in_place:\n return await scramble_message_in_place(message)\n else:\n return await scramble_message_bot_overwrite(message)\n\n\nasync def scramble_message_bot_overwrite(message):\n message_content = message.content\n await message.delete()\n scrambled_message = await clean_scramble_string(message_content)\n response = f'{message.author.mention}: {scrambled_message}'\n await message.channel.send(response)\n return True\n\n\nasync def scramble_message_in_place(message):\n # Currently impossible because user is only allowed to edit messages that they have sent\n return False\n\n\nasync def clean_scramble_string(orig_string):\n orig_string = ''.join(filter(lambda s: s == ' ' or str.isalpha(s), orig_string))\n orig_string = list(orig_string.lower())\n random.shuffle(orig_string)\n scrambled_string = ''.join(orig_string)\n return scrambled_string\n\n\n# region Events\n@bot.event\nasync def on_ready():\n MEMBERS_TO_PRINT = os.getenv('MEMBERS_TO_PRINT')\n # FIXME maybe handle \"allowed\" servers differently (currently non-allowed servers are ignored with prejudice,\n # notification, or configurability\n for guild_name in GUILD_NAMES:\n guild = discord.utils.get(bot.guilds, name=guild_name)\n if guild:\n await served_guilds_lock.acquire()\n if guild.id in served_guilds:\n served_guilds_lock.release()\n continue\n served_guilds[guild.id] = DiscordGuild(guild=guild, bot=bot)\n # await served_guilds[guild.id].mysql_conn.build_kanan_table(\n # '$'.join((os.getenv('KANAN_DB_NAME'), str(guild.id))), 'kanan',\n # os.getenv('KANAN_TB_COLS_INIT'), '(link)', limit=None)\n await build_tables(guild.id, guild_tables_to_build[guild.id])\n served_guilds_lock.release()\n print(f'\\n------')\n print(f'{bot.user} is connected to the following guild: {guild.name} (id: {guild.id})')\n print(f'First {MEMBERS_TO_PRINT} members:')\n members = '\\n - '.join([member.name for member in guild.members[:20]])\n print(f' - {members}')\n print(f'------\\n')\n\n\n\n\n@bot.event\nasync def on_message(message):\n if message.author == bot.user:\n return\n print(f'Message seen: \"{message.content}\" in channel: \"{message.channel}\"')\n if str(message.channel) not in ['home', 'gaming', 'news']:\n if len(message.content):\n await bot.process_commands(message)\n await check_scramble_message(message)\n return\n\n if message.content == 'raise-exception':\n raise discord.DiscordException\n\n\n# endregion\n\n\ndef parse_command(command):\n command = command.split(' ')\n command, args = command[0], command[1:]\n return command, args\n\n\nasync def print_message_history(guild_id, channel_name, limit=-1):\n print(f'Printing message history of channel \"{channel_name}\":\\n')\n guild = discord.utils.get(bot.guilds, id=guild_id)\n channel = discord.utils.get(guild.text_channels, name=channel_name)\n channel_id = channel.id\n # print_message_history()\n if limit > 0:\n channel_history = bot.get_channel(channel_id).history(limit=limit)\n else:\n channel_history = bot.get_channel(channel_id).history()\n async for message in channel_history:\n print(message.content)\n\n\ndef main():\n # guild = discord.utils.get(bot.guilds, name=)\n # mysql_basic_connector = BasicConnecter()\n # mysql_basic_connector.connect_to_mysql()\n # mysql_client = connect_to_mysql()\n # mysql_cursor = mysql_client.cursor()\n # database_list = build_database_list(mysql_cursor)\n # print(f'Old database list: {database_list}')\n # verify_database_existence(mysql_cursor, db_name=os.getenv('QUOTES_DB_NAME'), database_list=database_list)\n # verify_database_existence(mysql_cursor, db_name=os.getenv('KANAN_DB_NAME'), database_list=database_list)\n # database_list = build_database_list(mysql_cursor)\n # print(f'Updated database list: {database_list}')\n #\n # mysql_dbclient = connect_to_mysql(database=os.getenv('QUOTES_DB_NAME'))\n # mysql_dbcursor = mysql_dbclient.cursor()\n # verify_table_existence(mysql_dbcursor, 'corn', os.getenv('QUOTES_TB_COLS_INIT'))\n\n implemented_table_builders['quotes'] = TableBuild(BasicConnector.build_table, os.getenv('QUOTES_DB_NAME'), 'corn',\n os.getenv('QUOTES_TB_COLS_INIT'),\n os.getenv('QUOTES_TB_COLS'),\n os.getenv('QUOTES_CHANNEL'))\n implemented_table_builders['kanan'] = TableBuild(BasicConnector.build_kanan_table, os.getenv('KANAN_DB_NAME'),\n 'kanan',\n os.getenv('KANAN_TB_COLS_INIT'),\n os.getenv('KANAN_TB_COLS'),\n os.getenv('KANAN_CHANNEL'))\n\n bot.run(TOKEN)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"discord_bot.py","file_name":"discord_bot.py","file_ext":"py","file_size_in_byte":13012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"363843355","text":"# This Python file uses the following encoding: utf-8\nfrom datetime import datetime\nimport jalali\n\ndef jalali_converter(gourg_date):\n str_gourg_date = \"%s-%s-%s\" % (gourg_date.year, gourg_date.month, gourg_date.day)\n jalai_date = jalali.Gregorian(str_gourg_date)\n persian_date = PersianDate(day=jalai_date.persian_day,\n month=jalai_date.persian_month,\n year=jalai_date.persian_year,\n time=gourg_date.time())\n return persian_date\n\n\nclass PersianDate:\n def __init__(self, day, month, year, time):\n self.day = day\n self.month = month\n self.year = year\n self.time = time\n\n\ndef now():\n gourg_date = datetime.now()\n return jalali_converter(gourg_date)\n\ndef get_month_name(number):\n if number>12 or number<1:\n raise \"not valid month!\"\n\n persian_months = ['فروردین', 'اردیبهشت', 'خرداد',\n 'تیر', 'مرداد', 'شهریور',\n 'مهر', 'آبان', 'آذر',\n 'دی', 'بهمن', 'اسفند']\n return persian_months[number-1]\n\n\ndef get_time(date):\n try:\n hour = date.time.hour\n minute = date.time.minute\n except:\n raise \"invalid date!\"\n\n return '%s:%s' % (hour, minute)\n\ndef is_valid_time(time):\n try:\n index = time.find(':')\n hour = int(time[0:index])\n minute = int(time[index+1:])\n if isinstance(hour, int) and isinstance(minute, int) and 0 <= hour < 24 and 0 <= minute < 60:\n return True\n return False\n except:\n return False\n\n\nprint(is_valid_time('21:00'))","sub_path":"PersianDate.py","file_name":"PersianDate.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"38642291","text":"from typing import Dict, Tuple, Union\nfrom collections import OrderedDict\nimport os\nfrom pathlib import Path\n\nfrom catalyst.core import utils\nfrom catalyst.core.callback import Callback, CallbackNode, CallbackOrder\nfrom catalyst.core.runner import IRunner\n\n\ndef _pack_runner(runner: IRunner):\n checkpoint = utils.pack_checkpoint(\n model=runner.model,\n criterion=runner.criterion,\n optimizer=runner.optimizer,\n scheduler=runner.scheduler,\n epoch_metrics=dict(runner.epoch_metrics),\n valid_metrics=dict(runner.valid_metrics),\n stage_name=runner.stage_name,\n epoch=runner.epoch,\n loader_name=runner.loader_name,\n loader_step=runner.loader_batch_step,\n global_epoch=runner.global_epoch,\n checkpoint_data=runner.checkpoint_data,\n main_metric=runner.main_metric,\n minimize_metric=runner.minimize_metric,\n valid_loader=runner.valid_loader,\n )\n return checkpoint\n\n\ndef _load_checkpoint(\n *, filename, runner: IRunner, load_full: bool = True\n) -> None:\n \"\"\"\n Load checkpoint from a file.\n\n Arguments:\n filename (str): path to checkpoint\n runner (IRunner): current runner\n load_full (bool): if true (default) then will be performed\n loading states for criterion, optimizer and scheduler.\n File should contain keys required for\n loading model (``'model_state_dict'``),\n criterion (``'criterion_state_dict'``) (only for full load),\n optimizer (``'optimizer_state_dict'``),\n scheduler (``'scheduler_state_dict'``).\n\n Raises:\n FileNotFoundError: when file specified in ``filename``\n is not exist.\n \"\"\"\n if not os.path.isfile(filename):\n raise FileNotFoundError(f\"No checkpoint found at {filename}!\")\n\n print(f\"=> Loading checkpoint {filename}\")\n checkpoint = utils.load_checkpoint(filename)\n\n if not runner.stage_name.startswith(\"infer\") and load_full:\n runner.stage_name = checkpoint[\"stage_name\"]\n runner.epoch = checkpoint[\"epoch\"]\n runner.global_epoch = checkpoint[\"global_epoch\"]\n # @TODO: should we also load,\n # checkpoint_data, main_metric, minimize_metric, valid_loader ?\n # epoch_metrics, valid_metrics ?\n\n if load_full:\n utils.unpack_checkpoint(\n checkpoint,\n model=runner.model,\n criterion=runner.criterion,\n optimizer=runner.optimizer,\n scheduler=runner.scheduler,\n )\n\n print(\n f\"loaded state checkpoint {filename} \"\n f\"(global epoch {checkpoint['global_epoch']}, \"\n f\"epoch {checkpoint['epoch']}, \"\n f\"stage {checkpoint['stage_name']})\"\n )\n else:\n utils.unpack_checkpoint(\n checkpoint, model=runner.model,\n )\n\n print(f\"loaded model checkpoint {filename}\")\n\n\ndef _required_files(logdir: str, load_map: Dict[str, str]) -> Dict[str, str]:\n \"\"\"\n Generate required files for load model, criterion,\n scheduler, optimizer specified in ``load_map``.\n\n Expected that ``load_map`` contains keys:\n ``\"model\"``, ``\"criterion\"``, ``\"optimizer\"``, ``\"scheduler\"``.\n Otherwise an empty dict will be generated.\n\n Arguments:\n logdir (str): directory with logs\n load_map (Dict[str, str]): dict with specification\n what should be loaded\n\n Returns:\n Mapping from file to parts required from this file.\n \"\"\"\n if load_map is None:\n return OrderedDict()\n\n default_states = {\"best\", \"best_full\", \"last\", \"last_full\"}\n required_full_checkpoint = [\"criterion\", \"optimizer\", \"scheduler\"]\n experiment_parts = [\"model\"] + required_full_checkpoint\n\n # keep required parts\n experiment_parts = list(\n filter(lambda part: part in load_map, experiment_parts)\n )\n\n # avoid unnecessary loading\n if \"model\" in experiment_parts and len(experiment_parts) > 1:\n required_full_checkpoint.append(\"model\")\n\n # mapping - : \n required_files = OrderedDict()\n for part in experiment_parts:\n fname = load_map[part]\n required_full = fname.endswith(\"_full\")\n # specified default state\n if fname in default_states:\n if part in required_full_checkpoint and not required_full:\n fname = fname + \"_full\"\n fname = f\"{logdir}/checkpoints/{fname}.pth\"\n # in other case specified path to checkpoint\n required_files[fname] = required_files.get(fname, []) + [part]\n return required_files\n\n\ndef _load_states_from_file_map(\n *, runner: IRunner, load_map: Dict[str, str]\n) -> None:\n \"\"\"\n Load state of a model, criterion, optimizer, scheduler\n from files specified in ``load_map``.\n\n Arguments:\n runner (IRunner): current runner\n load_map (Dict[str, str]): dict with mappings to load.\n Expected keys - ``'model'``, ``'criterion'``\n ``'optimizer'``, ``'scheduler'``, other keys will be\n ignored.\n Expected that values will be states (``'best'``,\n ``\"best_full\"``, ``\"last\"``, ``\"last_full\"``) or\n path to checkpoint.\n **NOTE:** for successful load criterion, optimizer,\n scheduler states required a full checkpoint.\n\n Raises:\n FileNotFoundError: when file/state specified in ``load_map``\n is not exist.\n \"\"\"\n required_files = _required_files(runner.logdir, load_map)\n\n for filename in required_files.keys():\n if not os.path.isfile(filename):\n raise FileNotFoundError(f\"No checkpoint found at {filename}!\")\n\n # extracting parts from files\n for filename, parts_to_load in required_files.items():\n print(f\"=> Loading {', '.join(parts_to_load)} from {filename}\")\n checkpoint = utils.load_checkpoint(filename)\n to_unpack = {part: getattr(runner, part) for part in parts_to_load}\n utils.unpack_checkpoint(checkpoint, **to_unpack)\n print(f\" loaded: {', '.join(parts_to_load)}\")\n\n\nclass BaseCheckpointCallback(Callback):\n \"\"\"Base class for all checkpoint callbacks.\"\"\"\n\n def __init__(self, metrics_filename: str = \"_metrics.json\"):\n \"\"\"\n Args:\n metrics_filename (str): filename to save metrics\n in checkpoint folder. Must ends on ``.json`` or ``.yml``\n \"\"\"\n super().__init__(order=CallbackOrder.External, node=CallbackNode.All)\n self.metrics_filename = metrics_filename\n self.metrics: dict = {}\n\n def get_checkpoint_suffix(self, checkpoint: dict) -> str:\n return \"checkpoint\"\n\n def save_metric(self, logdir: Union[str, Path], metrics: Dict) -> None:\n utils.save_config(\n metrics, f\"{logdir}/checkpoints/{self.metrics_filename}\"\n )\n\n def on_exception(self, runner: IRunner):\n exception = runner.exception\n if not utils.is_exception(exception):\n return\n\n try:\n checkpoint = _pack_runner(runner)\n suffix = self.get_checkpoint_suffix(checkpoint)\n suffix = f\"{suffix}.exception_{exception.__class__.__name__}\"\n utils.save_checkpoint(\n logdir=Path(f\"{runner.logdir}/checkpoints/\"),\n checkpoint=checkpoint,\n suffix=suffix,\n is_best=False,\n is_last=False,\n )\n metrics = self.metrics\n metrics[suffix] = runner.valid_metrics\n self.save_metric(runner.logdir, metrics)\n except Exception:\n pass\n\n\nclass CheckpointCallback(BaseCheckpointCallback):\n \"\"\"\n Checkpoint callback to save/restore your\n model/criterion/optimizer/scheduler.\n \"\"\"\n\n def __init__(\n self,\n save_n_best: int = 1,\n resume: str = None,\n resume_dir: str = None,\n metrics_filename: str = \"_metrics.json\",\n load_on_stage_start: Union[str, Dict[str, str]] = None,\n load_on_stage_end: Union[str, Dict[str, str]] = None,\n ):\n \"\"\"\n Args:\n save_n_best (int): number of best checkpoint to keep,\n if ``0`` then store only last state of model and\n ``load_on_stage_end`` should be one of\n ``last`` or ``last_full``.\n resume (str): path to checkpoint to load\n and initialize runner state\n resume_dir (str): directory with checkpoints,\n if specified in combination with ``resume``\n than resume checkpoint will be loaded from ``resume_dir``\n metrics_filename (str): filename to save metrics\n in checkpoint folder.\n Must ends on ``.json`` or ``.yml``\n load_on_stage_start (str or Dict[str, str]): load specified\n state/model at stage start.\n\n If passed **string** then will be performed initialization from\n specified state (``best``/``best_full``/``last``/``last_full``)\n or checkpoint file.\n\n If passed **dict** then will be performed initialization only\n for specified parts - model, criterion, optimizer, scheduler.\n\n Example:\n\n >>> # possible checkpoints to use:\n >>> # \"best\"/\"best_full\"/\"last\"/\"last_full\"\n >>> # or path to specific checkpoint\n >>> to_load = {\n >>> \"model\": \"path/to/checkpoint.pth\",\n >>> \"criterion\": \"best\",\n >>> \"optimizer\": \"last_full\",\n >>> \"scheduler\": \"best_full\",\n >>> }\n >>> CheckpointCallback(load_on_stage_start=to_load)\n\n All other keys instead of ``\"model\"``, ``\"criterion\"``,\n ``\"optimizer\"`` and ``\"scheduler\"`` will be ignored.\n\n If ``None`` or an empty dict (or dict without mentioned\n above keys) then no action is required at stage start and:\n\n - Config API - will be used best state of model\n - Notebook API - no action will be performed (will be\n used the last state)\n\n **NOTE:** Loading will be performed on all stages except first.\n\n **NOTE:** Criterion, optimizer and scheduler are optional keys\n and should be loaded from full checkpoint.\n\n Model state can be loaded from any checkpoint.\n\n When dict contains keys for model and some other part\n (for example ``{\"model\": \"last\", \"optimizer\": \"last\"}``)\n and they match in prefix (``\"best\"`` and\n ``\"best_full\"``) then will be loaded full checkpoint\n because it contains required states.\n load_on_stage_end (str or Dict[str, str]): load specified\n state/model at stage end.\n\n If passed **string** then will be performed initialization from\n specified state (``best``/``best_full``/``last``/``last_full``)\n or checkpoint file.\n\n If passed **dict** then will be performed initialization only\n for specified parts - model, criterion, optimizer, scheduler.\n Logic for dict is the same as for ``load_on_stage_start``.\n\n If ``None`` then no action is required at stage end\n and will be used the last runner.\n\n **NOTE:** Loading will be performed always at stage end.\n \"\"\"\n super().__init__(metrics_filename)\n _possible_states = {\n None,\n \"best\",\n \"last\",\n \"best_full\",\n \"last_full\",\n }\n assert save_n_best >= 0\n if save_n_best == 0:\n assert load_on_stage_end in (None, \"last\", \"last_full\")\n if isinstance(load_on_stage_start, str):\n assert load_on_stage_start in _possible_states\n if isinstance(load_on_stage_end, str):\n assert load_on_stage_end in _possible_states\n if resume_dir is not None:\n assert resume is not None\n\n self.save_n_best = save_n_best\n self.resume = resume\n self.resume_dir = resume_dir\n self.load_on_stage_start = load_on_stage_start\n self.load_on_stage_end = load_on_stage_end\n\n self.top_best_metrics = []\n self.metrics_history = []\n\n self._keys_from_state = [\"resume\", \"resume_dir\"]\n\n def get_checkpoint_suffix(self, checkpoint: dict) -> str:\n \"\"\"\n Create checkpoint filename suffix based on checkpoint data.\n\n Args:\n checkpoint (dict): checkpoint dict,\n should contain ``stage_name`` and ``epoch`` keys.\n \"\"\"\n result = f\"{checkpoint['stage_name']}.{checkpoint['epoch']}\"\n return result\n\n def process_metrics(self, last_valid_metrics: Dict[str, float]) -> Dict:\n \"\"\"\n Add last validation metrics to list of previous validation metrics\n and keep ``save_n_best`` metrics.\n\n Args:\n last_valid_metrics (dict): dict with metrics\n from last validation step.\n \"\"\"\n top_best_checkpoints = [\n (Path(filepath).stem, valid_metric)\n for (filepath, _, valid_metric) in self.top_best_metrics\n ]\n all_epochs_metrics = [\n (f\"epoch_{order_index}\", valid_metric)\n for (order_index, valid_metric) in enumerate(self.metrics_history)\n ]\n metrics = []\n if self.save_n_best > 0:\n best_valid_metrics = top_best_checkpoints[0][1]\n metrics = (\n [(\"best\", best_valid_metrics), (\"last\", last_valid_metrics)]\n + top_best_checkpoints\n + all_epochs_metrics\n )\n else:\n metrics = [(\"last\", last_valid_metrics)]\n self.metrics = OrderedDict(metrics)\n return self.metrics\n\n def truncate_checkpoints(self, minimize_metric: bool) -> None:\n \"\"\"\n Keep ``save_n_best`` checkpoints based on main metric.\n\n Args:\n minimize_metric (bool): if ``True`` then keep\n ``save_n_best`` checkpoints with the lowest/highest values\n of the main metric.\n \"\"\"\n self.top_best_metrics = sorted(\n self.top_best_metrics,\n key=lambda x: x[1],\n reverse=not minimize_metric,\n )\n if len(self.top_best_metrics) > self.save_n_best:\n last_item = self.top_best_metrics.pop(-1)\n last_filepath = Path(last_item[0])\n last_filepaths = last_filepath.parent.glob(\n last_filepath.name.replace(\".pth\", \"*\")\n )\n for filepath in last_filepaths:\n os.remove(filepath)\n\n def _save_checkpoint(\n self,\n logdir: Union[str, Path],\n suffix: str,\n checkpoint: Dict,\n is_best: bool,\n is_last: bool,\n ) -> Tuple[str, str]:\n \"\"\"\n Save checkpoint (simple and full).\n\n Args:\n logdir (str or Path object): directory for storing checkpoints\n suffix (str): checkpoint suffix\n checkpoint (dict): dict with checkpoint data\n is_best (bool): indicator to save best checkpoint,\n if true then will be saved two additional checkpoints -\n ``best`` and ``best_full``.\n is_last (bool): indicator to save the last checkpoint,\n if true then will be saved two additional checkpoints -\n ``last`` and ``last_full``.\n \"\"\"\n full_checkpoint_path = utils.save_checkpoint(\n logdir=Path(f\"{logdir}/checkpoints/\"),\n checkpoint=checkpoint,\n suffix=f\"{suffix}_full\",\n is_best=is_best,\n is_last=is_last,\n special_suffix=\"_full\",\n )\n exclude = [\"criterion\", \"optimizer\", \"scheduler\"]\n checkpoint_path = utils.save_checkpoint(\n checkpoint={\n key: value\n for key, value in checkpoint.items()\n if all(z not in key for z in exclude)\n },\n logdir=Path(f\"{logdir}/checkpoints/\"),\n suffix=suffix,\n is_best=is_best,\n is_last=is_last,\n )\n return (full_checkpoint_path, checkpoint_path)\n\n def process_checkpoint(\n self,\n logdir: Union[str, Path],\n checkpoint: Dict,\n is_best: bool,\n main_metric: str = \"loss\",\n minimize_metric: bool = True,\n ) -> None:\n \"\"\"\n Save checkpoint and metrics.\n\n Args:\n logdir (str or Path object): directory for storing checkpoints\n checkpoint (dict): dict with checkpoint data\n is_best (bool): indicator to save best checkpoint,\n if true then will be saved two additional checkpoints -\n ``best`` and ``best_full``.\n main_metric (str): metric to use for selecting the best model\n minimize_metric (bool): indicator for selecting best metric,\n if true then best metric will be the metric with\n the lowest value, otherwise with the greatest value.\n \"\"\"\n _, filepath = self._save_checkpoint(\n logdir=logdir,\n checkpoint=checkpoint,\n suffix=self.get_checkpoint_suffix(checkpoint),\n is_best=is_best,\n is_last=True,\n )\n valid_metrics = checkpoint[\"valid_metrics\"]\n checkpoint_metric = valid_metrics[main_metric]\n metrics_record = (filepath, checkpoint_metric, valid_metrics)\n self.top_best_metrics.append(metrics_record)\n self.metrics_history.append(metrics_record)\n self.truncate_checkpoints(minimize_metric=minimize_metric)\n metrics = self.process_metrics(valid_metrics)\n self.save_metric(logdir, metrics)\n\n @staticmethod\n def _load_runner(\n runner: IRunner,\n mapping: Union[str, Dict[str, str]],\n load_full: bool = False,\n ) -> None:\n \"\"\"\n Selects a loading method based on type of mapping.\n\n Args:\n runner (IRunner): current runner\n mapping (str or dict): mapping to use for loading\n load_full (bool): load a full model, used only\n when mapping type is string\n\n \"\"\"\n if isinstance(mapping, str):\n if mapping in {\"best\", \"best_full\", \"last\", \"last_full\"}:\n checkpoint = f\"{runner.logdir}/checkpoints/{mapping}.pth\"\n else:\n checkpoint = mapping\n _load_checkpoint(\n filename=checkpoint, runner=runner, load_full=load_full,\n )\n elif isinstance(mapping, dict):\n _load_states_from_file_map(\n runner=runner, load_map=mapping,\n )\n\n def on_stage_start(self, runner: IRunner) -> None:\n \"\"\"\n Setup model for stage.\n\n **NOTE:** If CheckpointCallback initialized with ``resume``\n (as path to checkpoint file) or ``resume`` (as filename)\n and ``resume_dir`` (as directory with file)\n then will be performed loading checkpoint.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n for key in self._keys_from_state:\n value = getattr(runner, key, None)\n if value is not None:\n setattr(self, key, value)\n\n if self.resume_dir is not None:\n self.resume = str(self.resume_dir) + \"/\" + str(self.resume)\n\n if self.resume is not None:\n self._load_runner(runner, mapping=self.resume, load_full=True)\n self.resume = None\n else:\n _exists_checkpoint = False\n _load_full = False\n if isinstance(self.load_on_stage_start, str):\n _exists_checkpoint = os.path.isfile(\n \"{}/checkpoints/{}.pth\".format(\n runner.logdir, self.load_on_stage_start\n )\n )\n _load_full = self.load_on_stage_start.endswith(\"full\")\n elif isinstance(self.load_on_stage_start, dict):\n required_files = _required_files(\n runner.logdir, self.load_on_stage_start\n ).keys()\n _exists_checkpoint = all(\n os.path.isfile(file) for file in required_files\n )\n\n if self.load_on_stage_start is not None and _exists_checkpoint:\n self._load_runner(\n runner,\n mapping=self.load_on_stage_start,\n load_full=_load_full,\n )\n\n def on_epoch_end(self, runner: IRunner) -> None:\n \"\"\"\n Collect and save checkpoint after epoch.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n if (\n runner.stage_name.startswith(\"infer\")\n or runner.is_distributed_worker\n ):\n return\n\n if self.save_n_best > 0:\n checkpoint = _pack_runner(runner)\n self.process_checkpoint(\n logdir=runner.logdir,\n checkpoint=checkpoint,\n is_best=runner.is_best_valid,\n main_metric=runner.main_metric,\n minimize_metric=runner.minimize_metric,\n )\n\n def on_stage_end(self, runner: IRunner) -> None:\n \"\"\"\n Show information about best checkpoints during the stage and\n load model specified in ``load_on_stage_end``.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n if (\n runner.stage_name.startswith(\"infer\")\n or runner.is_distributed_worker\n ):\n return\n log_message = \"Top best models:\\n\"\n # store latest state\n if self.save_n_best == 0:\n checkpoint = _pack_runner(runner)\n _, filepath = self._save_checkpoint(\n logdir=runner.logdir,\n checkpoint=checkpoint,\n suffix=\"last\",\n is_best=True, # will duplicate current (last) as best\n is_last=False, # don't need that because current state is last\n )\n metrics = self.process_metrics(checkpoint[\"valid_metrics\"])\n self.save_metric(runner.logdir, metrics)\n main_metric_value = metrics[\"last\"][runner.main_metric]\n log_message += \"{filepath}\\t{metric:3.4f}\".format(\n filepath=filepath, metric=main_metric_value\n )\n else:\n log_message += \"\\n\".join(\n [\n \"{filepath}\\t{metric:3.4f}\".format(\n filepath=filepath, metric=checkpoint_metric\n )\n for filepath, checkpoint_metric, _ in self.top_best_metrics\n ]\n )\n print(log_message)\n _not_required_load_states = {\"last\", \"last_full\"}\n if (\n isinstance(self.load_on_stage_end, str)\n and self.load_on_stage_end not in _not_required_load_states\n and self.save_n_best > 0\n ):\n _load_full = (\n self.load_on_stage_end.endswith(\"full\")\n if isinstance(self.load_on_stage_end, str)\n else False\n )\n self._load_runner(\n runner, mapping=self.load_on_stage_end, load_full=_load_full,\n )\n elif isinstance(self.load_on_stage_end, dict) and self.save_n_best > 0:\n to_load = {\n k: v\n for k, v in self.load_on_stage_end.items()\n if v not in _not_required_load_states\n }\n self._load_runner(runner, mapping=to_load)\n\n\nclass IterationCheckpointCallback(BaseCheckpointCallback):\n \"\"\"Iteration checkpoint callback to save your model/criterion/optimizer.\"\"\"\n\n def __init__(\n self,\n save_n_last: int = 1,\n period: int = 100,\n stage_restart: bool = True,\n metrics_filename: str = \"_metrics_iter.json\",\n load_on_stage_end: str = \"best_full\",\n ):\n \"\"\"\n Args:\n save_n_last (int): number of last checkpoint to keep\n period (int): save the checkpoint every `period`\n stage_restart (bool): restart counter every stage or not\n metrics_filename (str): filename to save metrics\n in checkpoint folder. Must ends on ``.json`` or ``.yml``\n load_on_stage_end (str): name of the model to load\n at the end of the stage.\n You can use ``best``, ``best_full`` (default)\n to load the best model according to validation metrics,\n or ``last`` ``last_full`` to use just the last one.\n \"\"\"\n super().__init__(metrics_filename)\n self.save_n_last = save_n_last\n self.period = period\n self.stage_restart = stage_restart\n self._iteration_counter = 0\n self.last_checkpoints = []\n self.metrics_history = []\n self.load_on_stage_end = load_on_stage_end\n\n def get_checkpoint_suffix(self, checkpoint: dict) -> str:\n \"\"\"\n Create checkpoint filename suffix based on checkpoint data.\n\n Args:\n checkpoint (dict): checkpoint dict,\n should contain ``stage_name`` and ``epoch`` keys.\n \"\"\"\n result = (\n f\"{checkpoint['stage_name']}.\"\n f\"epoch.{checkpoint['epoch']}.\"\n f\"iter.{self._iteration_counter}\"\n )\n\n return result\n\n def process_metrics(self) -> Dict:\n \"\"\"\n Update metrics with last ``save_n_last`` checkpoints.\n \"\"\"\n n_last_checkpoints = [\n (Path(filepath).stem, batch_values)\n for (filepath, batch_values) in self.last_checkpoints\n ]\n all_epochs_metrics = [\n (f\"epoch_{order_index}\", valid_metric)\n for (order_index, valid_metric) in enumerate(self.metrics_history)\n ]\n\n metrics = OrderedDict(n_last_checkpoints + all_epochs_metrics)\n self.metrics = metrics\n return self.metrics\n\n def truncate_checkpoints(self, **kwargs) -> None:\n \"\"\"\n Keep ``save_n_best`` checkpoints based on main metric.\n \"\"\"\n if len(self.last_checkpoints) > self.save_n_last:\n item = self.last_checkpoints.pop(0)\n top_filepath = item[0]\n os.remove(top_filepath)\n\n def process_checkpoint(\n self,\n logdir: Union[str, Path],\n checkpoint: Dict,\n batch_metrics: Dict[str, float],\n ):\n \"\"\"\n Save checkpoint and metrics.\n\n Args:\n logdir (str or Path object): directory for storing checkpoints\n checkpoint (dict): dict with checkpoint data\n batch_metrics (dict): dict with metrics based on a few batches\n \"\"\"\n filepath = utils.save_checkpoint(\n logdir=Path(f\"{logdir}/checkpoints/\"),\n checkpoint=checkpoint,\n suffix=self.get_checkpoint_suffix(checkpoint),\n is_best=False,\n is_last=False,\n )\n\n self.last_checkpoints.append((filepath, batch_metrics))\n self.truncate_checkpoints()\n\n self.metrics_history.append(batch_metrics)\n\n metrics = self.process_metrics()\n self.save_metric(logdir, metrics)\n print(f\"\\nSaved checkpoint at {filepath}\")\n\n def on_stage_start(self, runner: IRunner):\n \"\"\"\n Reset iterations counter.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n if self.stage_restart:\n self._iteration_counter = 0\n\n def on_batch_end(self, runner: IRunner):\n \"\"\"\n Save checkpoint based on batches count.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n self._iteration_counter += 1\n if self._iteration_counter % self.period == 0:\n checkpoint = _pack_runner(runner)\n self.process_checkpoint(\n logdir=runner.logdir,\n checkpoint=checkpoint,\n batch_metrics=runner.batch_metrics,\n )\n\n def on_stage_end(self, runner: IRunner):\n \"\"\"\n Load model specified in ``load_on_stage_end``.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n if self.load_on_stage_end in [\"best\", \"best_full\"]:\n resume = (\n f\"{runner.logdir}/checkpoints/{self.load_on_stage_end}.pth\"\n )\n print(f\"Loading {self.load_on_stage_end} model from {resume}\")\n _load_checkpoint(\n filename=resume,\n runner=runner,\n load_full=self.load_on_stage_end.endswith(\"full\"),\n )\n\n\n__all__ = [\"CheckpointCallback\", \"IterationCheckpointCallback\"]\n","sub_path":"catalyst/core/callbacks/checkpoint.py","file_name":"checkpoint.py","file_ext":"py","file_size_in_byte":29074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"451261408","text":"\"\"\"\nThe implementation of SAMME algorithm for multi-class Adaboost\nRevised with updating weight in one iteration\nBase classifier: Chou-liu tree\n\"\"\"\nimport csv\nimport time\nimport math\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\n\nimport ChowLiu as CL\n\n\"\"\"\nconstruct experimental data set, data should be in the form of list of list\nlabel is the position of class\n\"\"\"\nstart_time = time.time()\ndata = []\n\nwith open(\"data/car/car.data\") as file:\n content = csv.reader(file, delimiter=',')\n for row in content:\n data.append(row)\n\n# get the size of the data-set\nlabel = 6\nK = 4\nn = len(data)\nM = 300\nW = [1. / n] * n\nC = []\nError = []\n\n\ndef benchmark(t_data, C):\n correct = 0.\n for d in t_data:\n scores = defaultdict(float)\n for model in C:\n scores[CL.predict_label(d, model[0])] += model[1]\n if d[label] == max(scores, key=scores.get):\n correct += 1\n correct = correct / len(data)\n Error.append(1 - correct)\n print(\"The accuracy for up to\", len(C), \"round is:\", correct)\n return correct\n\n\nfor m in range(M):\n CLT = CL.ChowLiuTree(data, label, W)\n e = CLT.error_rate()\n C.append([CLT, math.log((1 / e - 1) * (K - 1))])\n for i in range(n):\n W[i] = W[i] * (K - 1) / (K * e) if CLT.cache[i] == 0 else W[i] / (K * (1 - e))\n if benchmark(data, C) == 1:\n break\n\nprint(\"The running time is: \", time.time() - start_time)\nfig = plt.figure()\nplt.plot(Error)\nfig.suptitle(\"Car Evaluation\")\nplt.ylabel('Training Error')\nplt.xlabel(\"Boosting Round\")\nplt.show()\n","sub_path":"SAMME_car.py","file_name":"SAMME_car.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"56916864","text":"from aiogram.types import Message\n\nimport zmanim_bot.keyboards.inline\nfrom zmanim_bot import api\nfrom zmanim_bot.misc import dp\nfrom zmanim_bot.texts.single import buttons, messages\nfrom zmanim_bot.tracking import track\n\n\n@dp.message_handler(text=buttons.sm_candle)\n@track('Candle lighting selection')\nasync def settings_menu_cl(msg: Message):\n current_cl = await api.get_or_set_cl()\n kb = zmanim_bot.keyboards.inline.get_cl_settings_keyboard(current_cl)\n await msg.reply(messages.settings_cl, reply_markup=kb)\n\n\n@dp.message_handler(text=buttons.sm_havdala)\n@track('Havdala selection')\nasync def settings_menu_havdala(msg: Message):\n current_havdala = await api.get_or_set_havdala()\n kb = zmanim_bot.keyboards.inline.get_havdala_settings_keyboard(current_havdala)\n await msg.reply(messages.settings_havdala, reply_markup=kb)\n\n\n@dp.message_handler(text=buttons.sm_zmanim)\n@track('Zmanim selection')\nasync def settings_menu_zmanim(msg: Message):\n current_zmanim = await api.get_or_set_zmanim()\n kb = zmanim_bot.keyboards.inline.get_zmanim_settings_keyboard(current_zmanim)\n await msg.reply(messages.settings_zmanim, reply_markup=kb)\n\n","sub_path":"zmanim_bot/handlers/text/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"431805510","text":"from django.db import transaction\nfrom rest_framework import serializers\nfrom books.models import (Book, Genre, Author, BookAuthor)\n\n\n# class BookAuthorSerializer(serializers.HyperlinkedModelSerializer):\n# id = serializers.ReadOnlyField(source=\"author.id\")\n# name = serializers.ReadOnlyField(source=\"author.name\")\n\n# class Meta:\n# model = BookAuthor\n\n# fields = ('id', 'name', 'role')\n\n\nclass BookSerializer(serializers.ModelSerializer):\n # authors = BookAuthorSerializer(source='bookauthor_set', many=True)\n \n class Meta:\n model = Book\n fields = (\n 'id', 'name', 'authors', 'description',\n 'genre', 'genre_name', 'created'\n )\n \n @transaction.atomic\n def create(self, validated_data):\n book = Book.objects.create(**validated_data)\n if \"authors\" in self.initial_data:\n authors = self.initial_data.get(\"authors\")\n for author in authors:\n author_id = author.get(\"author\")\n role = author.get(\"role\")\n author_instance = Author.objects.get(pk=author_id)\n BookAuthor(book=book, author=author_instance, role=role).save()\n book.save()\n return book\n\nclass GenreSerializer(serializers.ModelSerializer):\n class Meta:\n model = Genre\n fields = ('id', 'name')\n\n\nclass AuthorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Author\n fields = ('id', 'first_name', 'middle_name', 'last_name')","sub_path":"django-vue/backend/backend/books/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"305550644","text":"import pyglet\nimport os\nimport sys\nimport zlib\nimport base64\n\nfrom urllib.parse import unquote\nimport xml.etree.ElementTree as ET\n\nworking_dir = os.path.dirname(os.path.realpath(__file__))\nimages_dir = os.path.join(working_dir, 'images')\n\npyglet.resource.path = [working_dir, images_dir]\npyglet.resource.reindex()\n\ndef inflate(b, b64=False):\n \"\"\"~2016 draw.io started compressing 'using standard deflate'\n https://about.draw.io/extracting-the-xml-from-mxfiles/\n experience has shown this is deflate WITH NO HEADER\n \"\"\"\n if b64: # optional, additionally base64 decode\n b = base64.b64decode(b)\n return unquote(zlib.decompress(b, -15).decode('utf8'))\n\ndef sprite(batch, image=\"redsquare.png\", x=0, y=0):\n image = pyglet.resource.image(image)\n return pyglet.sprite.Sprite(img=image,\n x=x,\n y=y,\n batch=batch)\n\ndef mapFromDrawio(drawioxml):\n tree = ET.parse(drawioxml)\n root = tree.getroot()\n content = ET.fromstring(inflate(root[0].text, True))\n return (root[0].attrib.get('name', \"Window 1\"), content)\n","sub_path":"simulator/resources/load_resources.py","file_name":"load_resources.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"185206096","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n\n\n'''\nCreated on Jan 10, 2017\n\n@author: hegxiten\n'''\nimport sys\nfrom imposm.parser import OSMParser\nimport geo.haversine as haversine\nimport numpy\nimport time\nfrom scipy import spatial\nimport csv\nimport codecs\nfrom sphinx.versioning import levenshtein_distance\n\ndefault_encoding='utf-8'\nif sys.getdefaultencoding()!=default_encoding:\n reload(sys)\n sys.setdefaultencoding(default_encoding)\n\nPI=3.14159265358\n\ndef extract_station_nodes(FOLDER,FILE,CONCURRENCYVAL,GLOBALROUNDINGDIGITS):\n \n station_dict={}\n station_dict_complete={}\n subway_nodes_complete=set()\n subway_ways_from_relations=set()\n\n def collect_subway_nodes():\n def imposm_extract_subway_nodes_from_relations(relations):\n target_relation_tags=[\"subway\",\"light_rail\",\"tram\"]\n for osmid, tags, members in relations:\n if tags.get(\"route\",\"\") in target_relation_tags or tags.get(\"railway\") in target_relation_tags:\n for m in members:\n if m[1]==\"node\":\n subway_nodes_complete.add(m[0])\n if m[1]==\"way\":\n subway_ways_from_relations.add(m[0])\n \n def imposm_extract_subway_nodes_from_ways(ways):\n target_way_tags=[\"subway\",\"light_rail\",\"tram\"]\n for osmid, tags, refs in ways:\n if tags.get(\"railway\",\"\") in target_way_tags or osmid in subway_ways_from_relations:\n for r in refs:\n subway_nodes_complete.add(r)\n \n \n OSMParser(concurrency=CONCURRENCYVAL, relations_callback=imposm_extract_subway_nodes_from_relations).parse(FOLDER+FILE)\n OSMParser(concurrency=CONCURRENCYVAL, ways_callback=imposm_extract_subway_nodes_from_ways).parse(FOLDER+FILE)\n \n def station_nodes_from_areas():\n def imposm_extract_railway_station_nodes_from_areas(areas):\n target_area_tags=[\"station\"]\n excluding_area_tags=[\"light_rail\",\"subway\"]\n for osmid, tags, refs in areas:\n '''areas are using the same key as ways in OpenStreetMap, while areas are \"closed ways\"'''\n if tags.get(\"railway\",\"\") in target_area_tags and tags.get(\"station\",\"\") not in excluding_area_tags:\n station_dict[refs[0]]=[tags.get(\"name\",\"N/A\")]\n station_dict_complete[refs[0]]=[tags]\n '''the substitution node for the area of the station is to be refined'''\n \n def imposm_nodes_substitute_for_area_stations(coords):\n '''must running after imposm_extract_station_from_areas for some induced station substitution nodes are not extracted by OSMParser callbacks'''\n for osmid, lon, lat in coords:\n if osmid in station_dict and len(station_dict[osmid])==1:\n station_dict[osmid].append(lat)\n station_dict[osmid].append(lon)\n station_dict[osmid].append(\"railwaysuffix\")\n station_dict_complete[osmid].append(lat)\n station_dict_complete[osmid].append(lon)\n station_dict_complete[osmid].append(\"railwaysuffix\")\n OSMParser(concurrency=CONCURRENCYVAL, ways_callback=imposm_extract_railway_station_nodes_from_areas).parse(FOLDER+FILE) \n OSMParser(concurrency=CONCURRENCYVAL, coords_callback=imposm_nodes_substitute_for_area_stations).parse(FOLDER+FILE)\n \n \n def station_nodes_from_nodes():\n def imposm_extract_railway_station_nodes_from_nodes(nodes):\n target_node_tags=[\"station\"]\n excluding_node_tags=[\"light_rail\",\"subway\"]\n for osmid, tags, (lon,lat) in nodes:\n if tags.get(\"railway\",\"\") in target_node_tags and tags.get(\"station\",\"\") not in excluding_node_tags: \n station_dict[osmid]=[tags.get(\"name\",\"N/A\"),lat,lon]\n station_dict_complete[osmid]=[tags, lat, lon]\n OSMParser(concurrency=CONCURRENCYVAL, nodes_callback=imposm_extract_railway_station_nodes_from_nodes).parse(FOLDER+FILE)\n\n \n def station_nodes_from_ways():\n def imposm_extract_railway_station_nodes_from_ways(ways):\n '''get accurate railway stations from exact railways ways instead of getting wrong subway stations'''\n target_way_tags=[\"rail\"]\n for osmid, tags, refs in ways:\n if tags.get(\"railway\",\"\") in target_way_tags:\n for i in refs:\n if i in station_dict:\n station_dict[i].append(\"railwaysuffix\")\n OSMParser(concurrency=CONCURRENCYVAL, ways_callback=imposm_extract_railway_station_nodes_from_ways).parse(FOLDER+FILE)\n \n def station_nodes_from_relations():\n def imposm_extract_railway_stations_from_relations(relations):\n '''get accurate railway stations from exact railways relations instead of getting wrong subway stations'''\n target_relation_tags=[\"train\",\"rail\"]\n for osmid, tags, members in relations:\n if tags.get(\"route\") in target_relation_tags or tags.get(\"railway\") in target_relation_tags:\n for i in members:\n if i[1]==\"node\" and (i[0] in station_dict):\n station_dict[i[0]].append(\"railwaysuffix\")\n OSMParser(concurrency=CONCURRENCYVAL, relations_callback=imposm_extract_railway_stations_from_relations).parse(FOLDER+FILE)\n \n def filterout():\n def filterout_subway_station_nodes_in_dic():\n for s in subway_nodes_complete:\n if s in station_dict and station_dict[s][-1]!=\"railwaysuffix\":\n del station_dict[s]\n \n def filterout_repeating_stations():\n '''repeating station string processing is to be refined'''\n repeating_station_set=set()\n station_nodes_coord=[]\n stationmap=[]\n for s in station_dict:\n station_nodes_coord.append((station_dict[s][1],station_dict[s][2]))\n stationmap.append(s)\n tree = spatial.KDTree(station_nodes_coord)\n for s in station_dict:\n similarstation_dict={}\n similarstation_dict[s]=[station_dict[s][0],station_dict[s][1],station_dict[s][2]]\n res=tree.query((station_dict[s][1],station_dict[s][2]), k=3)\n for i in res[1]:\n a=station_dict[s][0]\n b=station_dict[stationmap[i]][0]\n if a and b:\n if haversine.hav_distance(station_dict[s][1],station_dict[s][2],station_dict[stationmap[i]][1],station_dict[stationmap[i]][2])<0.05:\n if levenshtein_distance(station_dict[s][0], station_dict[stationmap[i]][0])<=1 or a.split(\" \")[:2]==b.split(\" \")[:2]:\n if len(a)<=len(b):\n station_dict[stationmap[i]][0]=station_dict[s][0]\n else:\n station_dict[s][0]=station_dict[stationmap[i]][0]\n iterated=set()\n for s in station_dict:\n if s in iterated:\n continue\n else:\n nearbystation_dict={}\n nearbystation_dict[s]=[station_dict[s][0],station_dict[s][1],station_dict[s][2]]\n res=tree.query((station_dict[s][1],station_dict[s][2]), k=5)\n for i in res[1]:\n nearbystation_dict[stationmap[i]]=[station_dict[stationmap[i]][0],station_dict[stationmap[i]][1],station_dict[stationmap[i]][2]]\n keys=nearbystation_dict.keys()\n values=nearbystation_dict.values()\n namelist=[]\n for i in range(len(values)):\n namelist.append(values[i][0])\n repeatlist=[]\n for i in range(len(values)):\n if namelist.count(namelist[i])>=2 and namelist[i]:\n repeatlist.append(keys[i])\n if len(repeatlist)==0:\n continue\n else:\n iterated.add(s)\n iterated=iterated|set(repeatlist)\n stationtokeep=None\n for i in range(len(repeatlist)):\n if station_dict[repeatlist[i]][-1]=='railway':\n stationtokeep=repeatlist.pop(i)\n repeating_station_set=repeating_station_set|set(repeatlist)\n break\n if stationtokeep:\n continue\n else:\n taglength=[]\n for r in repeatlist: \n taglength.append(len(station_dict_complete[r][0]))\n repeatlist.pop(taglength.index(max(taglength)))\n repeating_station_set=repeating_station_set|set(repeatlist)\n \n '''#First Step: remove absolute repeating stations\n for k,v in nearbystation_dict.iteritems():\n if v[0] not in namelist:\n stationidlist.append(k)\n namelist.append(v[0])\n else:\n IDindex=namelist.index(v[0])\n existcoord=(station_dict[stationidlist[IDindex]][1],station_dict[stationidlist[IDindex]][2])\n newcoord=(v[1],v[2])\n distance=haversine_distance(existcoord[1], existcoord[0], newcoord[1], newcoord[0])\n if distance>3:\n stationidlist.append(k)\n namelist.append(v[0])\n else:\n repeating_station_set.add(k)\n #Second Step: remove repeating stations whose names are in small edit distances\n '''\n for rs in repeating_station_set:\n if rs in station_dict:\n #print station_dict[rs][0]\n del station_dict[rs] \n filterout_subway_station_nodes_in_dic()\n filterout_repeating_stations()\n \n print(\"Begin: Parse 1---> Extract all stations and kicking out subway or repeating stations...\") \n startt=time.time() \n collect_subway_nodes()\n station_nodes_from_areas()\n station_nodes_from_nodes()\n station_nodes_from_relations()\n station_nodes_from_ways()\n filterout()\n stopt=time.time() \n print(\"Finish: Parse 1---> Extract all stations and kicking out subway or repeating stations. Time:(\"+str(stopt-startt)+\"s)\")\n return station_dict\n\ndef output_station_csv(FOLDER,FILE,station_dict):\n target = codecs.open(FOLDER+FILE+\"_stations.csv\", 'w',encoding='utf-8')\n for x in station_dict:\n '''Export csv file for all stations'''\n target.write(str(x)+\",$\"+station_dict[x][0]+\"$,\"+str(station_dict[x][1])+\",\"+str(station_dict[x][2])+\"\\n\")\n '''Example row: >>1234(osmid),$Illinois Terminal$($name$),40.11545(latitude),-88.24111(longitude)<<'''\n #print station_dict[x][0]\n target.close()\n\nif __name__ == '__main__':\n print (\"===you're in test mode of station_nodes.py===\")\n FILE='beijing_china_latest.osm.pbf'\n FOLDER='/home/hegxiten/workspace/data/'+FILE+'/'\n CONCURRENCYVAL=4\n GLOBALROUNDINGDIGITS=5\n \n stationdictionary=extract_station_nodes(FOLDER,FILE,CONCURRENCYVAL,GLOBALROUNDINGDIGITS)\n output_station_csv(FOLDER,FILE,stationdictionary)\n print (\"===test mode of station_nodes.py terminated===\")","sub_path":"WorldRailNetwork_WRN_workspace/src/station_nodes.py","file_name":"station_nodes.py","file_ext":"py","file_size_in_byte":11862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"137466048","text":"#!/usr/bin/python\n#Outputs pending task for rofi\nimport sys, os\n\nfilename=\"/home/light/.task/pending.data\"\nnum_lines = sum(1 for line in open(filename))\nwith open(filename) as f:\n\tcontent = f.readlines()\n# you may also want to remove whitespace characters like `\\n` at the end of each line\ncontent = [x.strip() for x in content] \n# print(content[2][14:-104])\nstring=\"\"\nfor i in range(num_lines):\n\tif content[i][-47] == 'd':\n\t\tcontinue\n\tstring += str(i+1) + \". \" +content[i][14:-104] \n\tif i != num_lines-1:\n\t\tstring += \" | \"\nprint(string)\n# [description:\"Fill the Hall vacation form on Pingala\" end:\"1594113644\" entry:\"1594113641\" modified:\"1594113644\" status:\"deleted\" uuid:\"e4262455-6c66-41fc-8aa6-edbd0386dc80\"]\n","sub_path":"polybar/scripts/todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"184730282","text":"# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\nimport datetime\nimport json\nimport logging\n\nfrom components import auth\nfrom components import config\nfrom google.appengine.api import namespace_manager\nfrom google.appengine.ext import ndb\nfrom testing_utils import testing\n\nimport main\nimport mock\n\nclass ConsoleAppApiTest(testing.EndpointsTestCase):\n\n api_service_cls = main.ConsoleAppApi\n\n def testTimeseriesUpdate(self):\n # TODO(norulez): Verify timeseries is correctly overwritten when UI \n # endpoints are implemented.\n points = [{'time': 1.0,\n 'value': 10.0}]\n fields = [{'key': 'project_id',\n 'value': 'chromium'}]\n request = {'timeseries': [\n {'points': points,\n 'fields': fields,\n 'metric': 'disk_used'}]}\n self.mock(auth, 'is_group_member', lambda _: True)\n response = self.call_api('timeseries_update', request).json_body\n self.assertEquals(response, {})\n # Calling the function a second time in order to test that the console\n # updates existing data in the datastore.\n self.call_api('timeseries_update', request)\n # Calling the function with an empty fields list tests for anonymous graphs.\n request = {'timeseries': [\n {'points': [],\n 'fields': [],\n 'metric': ''}]}\n self.call_api('timeseries_update', request)\n\n\nclass UIApiTest(testing.EndpointsTestCase):\n\n api_service_cls = main.UIApi\n\n def testGetProjects(self): \n self.mock(config, 'get_project_configs', mock.Mock())\n self.mock(auth, 'get_current_identity', mock.Mock())\n self.mock(auth, 'is_group_member', mock.Mock(return_value=False))\n auth.get_current_identity.return_value = auth.Identity('user', 'a@a.com')\n configs = {\n \"infra\": (\"888\", mock.Mock(\n access=['group:all','a@a.com','user:b@a.com'])), \n \"v8\": (\"888666\", mock.Mock(access=['group:all','a@a.com']))\n }\n config.get_project_configs.return_value = configs\n response = self.call_api('get_projects').json_body\n self.assertEquals(len(response['configs']), 2)\n\n auth.get_current_identity.return_value = auth.Identity('user', 'b@b.com')\n response = self.call_api('get_projects').json_body\n self.assertEquals(len(response.keys()), 0)\n\n\n auth.is_group_member.side_effect = lambda name: name == 'all'\n response = self.call_api('get_projects').json_body\n self.assertEquals(len(response['configs']), 2)\n\n def testGetGraphs(self):\n self.mock(config, 'get_project_config', mock.Mock())\n self.mock(auth, 'get_current_identity', mock.Mock())\n self.mock(auth, 'is_group_member', mock.Mock(return_value=False))\n mock_class1 = mock.Mock(access=['group:all','a@a.com','user:b@a.com'])\n mock_class1.configure_mock(name=\"infra.git\")\n cfg = (\"888\", mock_class1)\n config.get_project_config.return_value = cfg\n\n auth.get_current_identity.return_value = auth.Identity('user', 'a@a.com')\n \n namespace_manager.set_namespace('projects.infra')\n points = [main.PointModel(time = 1.0,\n value= 10.0)]\n fields = [main.FieldModel(field_key='project_id', value='infra')]\n ts = main.TimeSeriesModel(\n points=points, fields=fields, metric='disk_used')\n\n ts.put()\n \n response = self.call_api('get_graphs', {\"project_id\":'infra'}).json_body\n self.assertEquals(len(response['timeseries']), 1)\n\n # User doesn't have access to the project.\n auth.get_current_identity.return_value = auth.Identity('user', 'b@b.com')\n response = self.call_api('get_graphs', {\"project_id\":'infra'}).json_body\n self.assertEquals(len(response.keys()), 0)\n\n auth.is_group_member.return_value = True\n response = self.call_api('get_graphs', {\"project_id\":'infra'}).json_body\n self.assertEquals(len(response['timeseries']), 1)\n\n auth.is_group_member.return_value = True\n config.get_project_config.return_value = (None, None)\n response = self.call_api('get_graphs', {\"project_id\":'v8'}).json_body\n self.assertEquals(len(response.keys()), 0)\n \n # Project does not exist.\n auth.is_group_member.return_value = True\n config.get_project_config.return_value = (None, None)\n response = self.call_api('get_graphs', {\"project_id\":'123'}).json_body\n self.assertEquals(len(response.keys()), 0)\n\n\nclass CronTest(testing.AppengineTestCase):\n \n @property\n def app_module(self):\n return main.WEBAPP\n\n @mock.patch('main.time_now')\n def test_get(self, time_now_mock): \n start_time = datetime.datetime(2015, 8, 3, 8, 0, 43) \n points = [main.PointModel(time = 1.0, value= 10.0)]\n fields = [main.FieldModel(field_key='project_id', value='infra')]\n ts1 = main.TimeSeriesModel(\n points=points, fields=fields,metric='disk_used')\n time_now_mock.return_value = start_time\n ts1.update_timestamp()\n ts1.put()\n\n points = [main.PointModel(time = 3.0, value= 10.0)]\n fields = [main.FieldModel(field_key='project_id', value='v8')]\n ts2 = main.TimeSeriesModel(\n points=points, fields=fields,metric='disk_used')\n time_now_mock.return_value = start_time + datetime.timedelta(hours=25)\n ts2.update_timestamp()\n ts2.put() \n\n time_now_mock.return_value = start_time + datetime.timedelta(hours=27)\n self.test_app.get('/tasks/clean_outdated_graphs')\n self.assertEquals(main.TimeSeriesModel.query().count(), 1)\n\n\nclass TimeSeriesTest(testing.AppengineTestCase):\n\n @property\n def app_module(self):\n return main.WEBAPP\n\n def test_post(self):\n points = [{'time': 1.0,\n 'value': 10.0}]\n fields = [{'key': 'project_id',\n 'value': 'chromium'}]\n request = {'timeseries': [\n {'points': points,\n 'fields': fields,\n 'metric': 'disk_used'}]}\n self.mock(auth, 'is_group_member', lambda _: True)\n self.mock(main.TimeSeriesHandler, 'xsrf_token_enforce_on', [])\n headers = {'content-type': 'application/json'}\n self.test_app.post('/timeseries_update', json.dumps(request), headers)\n","sub_path":"appengine/chrome_infra_console/test/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":6073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"276329069","text":"\nimport pandas as pd\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import cohen_kappa_score\nfrom scipy.stats import mode\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.model_selection import train_test_split\n\n\nimport xgboost as xgb\nfrom xgboost import XGBClassifier\nfrom xgboost import plot_importance\nfrom matplotlib import pyplot\nimport shap\n\nos.listdir('../input/data-science-bowl-2019')\n\n### %time\nkeep_cols = ['event_id', 'game_session', 'installation_id', 'event_count',\n 'event_code','title' ,'game_time', 'type', 'world','timestamp']\ntrain=pd.read_csv('../input/data-science-bowl-2019/train.csv',usecols=keep_cols)\ntrain_labels=pd.read_csv('../input/data-science-bowl-2019/train_labels.csv',\n usecols=['installation_id','game_session','accuracy_group'])\ntest=pd.read_csv('../input/data-science-bowl-2019/test.csv',usecols=keep_cols)\nsubmission=pd.read_csv('../input/data-science-bowl-2019/sample_submission.csv')\ntrain.shape,train_labels.shape\nx=train_labels['accuracy_group'].value_counts()\nsns.barplot(x.index,x)\nnot_req=(set(train.installation_id.unique()) - set(train_labels.installation_id.unique()))\ntrain_new=~train['installation_id'].isin(not_req)\ntrain.where(train_new,inplace=True)\ntrain.dropna(inplace=True)\ntrain['event_code']=train.event_code.astype(int)\ndef extract_time_features(df):\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n df['month'] = df['timestamp'].dt.month\n df['hour'] = df['timestamp'].dt.hour\n df['year'] = df['timestamp'].dt.year\n df['dayofweek'] = df['timestamp'].dt.dayofweek\n df['weekofyear'] = df['timestamp'].dt.weekofyear\n return df\ntime_features=['month','hour','year','dayofweek','weekofyear']\ndef prepare_data(df):\n df=extract_time_features(df)\n \n df=df.drop('timestamp',axis=1)\n #df['timestamp']=pd.to_datetime(df['timestamp'])\n #df['hour_of_day']=df['timestamp'].map(lambda x : int(x.hour))\n \n\n join_one=pd.get_dummies(df[['event_code','installation_id','game_session']],\n columns=['event_code']).groupby(['installation_id','game_session'],\n as_index=False,sort=False).agg(sum)\n\n agg={'event_count':sum,'game_time':['sum','mean'],'event_id':'count'}\n\n join_two=df.drop(time_features,axis=1).groupby(['installation_id','game_session']\n ,as_index=False,sort=False).agg(agg)\n \n join_two.columns= [' '.join(col).strip() for col in join_two.columns.values]\n \n\n join_three=df[['installation_id','game_session','type','world','title']].groupby(\n ['installation_id','game_session'],as_index=False,sort=False).first()\n \n join_four=df[time_features+['installation_id','game_session']].groupby(['installation_id',\n 'game_session'],as_index=False,sort=False).agg(mode)[time_features].applymap(lambda x: x.mode[0])\n \n join_one=join_one.join(join_four)\n \n join_five=(join_one.join(join_two.drop(['installation_id','game_session'],axis=1))). \\\n join(join_three.drop(['installation_id','game_session'],axis=1))\n \n return join_five\n\n\n\njoin_train=prepare_data(train)\ncols=join_train.columns.to_list()[2:-3]\njoin_train[cols]=join_train[cols].astype('int16')\n\n\njoin_test=prepare_data(test)\ncols=join_test.columns.to_list()[2:-3]\njoin_test[cols]=join_test[cols].astype('int16')\ncols=join_test.columns[2:-12].to_list()\ncols.append('event_id count')\ncols.append('installation_id')\ndf=join_test[['event_count sum','game_time mean','game_time sum',\n 'installation_id']].groupby('installation_id',as_index=False,sort=False).agg('mean')\n\ndf_two=join_test[cols].groupby('installation_id',as_index=False,\n sort=False).agg('sum').drop('installation_id',axis=1)\n\ndf_three=join_test[['title','type','world','installation_id']].groupby('installation_id',\n as_index=False,sort=False).last().drop('installation_id',axis=1)\n\ndf_four=join_test[time_features+['installation_id']].groupby('installation_id',as_index=False,sort=False). \\\n agg(mode)[time_features].applymap(lambda x : x.mode[0])\n\nfinal_train=pd.merge(train_labels,join_train,on=['installation_id','game_session'],\n how='left').drop(['game_session'],axis=1)\n\n#final_test=join_test.groupby('installation_id',as_index=False,sort=False).last().drop(['game_session','installation_id'],axis=1)\nfinal_test=(df.join(df_two)).join(df_three.join(df_four)).drop('installation_id',axis=1)\ndf=final_train[['event_count sum','game_time mean','game_time sum','installation_id']]. \\\n groupby('installation_id',as_index=False,sort=False).agg('mean')\n\ndf_two=final_train[cols].groupby('installation_id',as_index=False,\n sort=False).agg('sum').drop('installation_id',axis=1)\n\ndf_three=final_train[['accuracy_group','title','type','world','installation_id']]. \\\n groupby('installation_id',as_index=False,sort=False). \\\n last().drop('installation_id',axis=1)\n\ndf_four=join_train[time_features+['installation_id']].groupby('installation_id',as_index=False,sort=False). \\\n agg(mode)[time_features].applymap(lambda x : x.mode[0])\n\n\n\nfinal_train=(df.join(df_two)).join(df_three.join(df_four)).drop('installation_id',axis=1)\nfinal_train.shape,final_test.shape\nlen(set(final_train.columns) & set(final_test.columns))\nfinal=pd.concat([final_train,final_test])\nencoding=['type','world','title']\nfor col in encoding:\n lb=LabelEncoder()\n lb.fit(final[col])\n final[col]=lb.transform(final[col])\n \nfinal_train=final[:len(final_train)]\nfinal_test=final[len(final_train):]\n\n\n \n\nX_train=final_train.drop('accuracy_group',axis=1)\ny_train=final_train['accuracy_group']\ndef model(X_train,y_train,final_test,n_splits=3):\n scores=[]\n pars = {\n 'colsample_bytree': 0.8, \n 'learning_rate': 0.08,\n 'max_depth': 10,\n 'subsample': 1,\n 'objective':'multi:softprob',\n 'num_class':4,\n 'eval_metric':'mlogloss',\n 'min_child_weight':3,\n 'gamma':0.25,\n 'n_estimators':500\n }\n\n kf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n y_pre=np.zeros((len(final_test),4),dtype=float)\n final_test=xgb.DMatrix(final_test.drop('accuracy_group',axis=1))\n\n\n for train_index, val_index in kf.split(X_train, y_train):\n train_X = X_train.iloc[train_index]\n val_X = X_train.iloc[val_index]\n train_y = y_train[train_index]\n val_y = y_train[val_index]\n xgb_train = xgb.DMatrix(train_X, train_y)\n xgb_eval = xgb.DMatrix(val_X, val_y)\n\n xgb_model = xgb.train(pars,\n xgb_train,\n num_boost_round=1000,\n evals=[(xgb_train, 'train'), (xgb_eval, 'val')],\n verbose_eval=False,\n early_stopping_rounds=20\n )\n\n val_X=xgb.DMatrix(val_X)\n pred_val=[np.argmax(x) for x in xgb_model.predict(val_X)]\n score=cohen_kappa_score(pred_val,val_y,weights='quadratic')\n scores.append(score)\n print('choen_kappa_score :',score)\n\n pred=xgb_model.predict(final_test)\n y_pre+=pred\n\n pred = np.asarray([np.argmax(line) for line in y_pre])\n print('Mean score:',np.mean(scores))\n \n return xgb_model,pred\nxgb_model,pred=model(X_train,y_train,final_test,5)\nsub=pd.DataFrame({'installation_id':submission.installation_id,'accuracy_group':pred})\nsub.to_csv('submission.csv',index=False)\n\nfig, ax = plt.subplots(figsize=(10,10))\nxgb.plot_importance(xgb_model, max_num_features=50, height=0.5, ax=ax,importance_type='gain')\nplt.show()\nfig, ax = plt.subplots(figsize=(10,10))\nxgb.plot_importance(xgb_model, max_num_features=50, height=0.5, ax=ax,importance_type='weight')\nplt.show()\nshap_values = shap.TreeExplainer(xgb_model).shap_values(X_train)\nshap.summary_plot(shap_values, X_train, plot_type=\"bar\")\nshap.summary_plot(shap_values[3], X_train)\nshap.summary_plot(shap_values[0], X_train)\nX_train,X_test,y_train,y_test=train_test_split(X_train,y_train,test_size=.1)\nmodel = XGBClassifier()\nmodel.fit(X_train, y_train)\nthreshold = np.sort(model.feature_importances_)[40:]\nfor thresh in threshold:\n # select features using threshold\n selection = SelectFromModel(model, threshold=thresh, prefit=True)\n select_X_train = selection.transform(X_train)\n # train model\n selection_model = XGBClassifier()\n selection_model.fit(select_X_train, y_train)\n # eval model\n select_X_test = selection.transform(X_test)\n y_pred = selection_model.predict(select_X_test)\n predictions = [round(value) for value in y_pred]\n accuracy = cohen_kappa_score(y_test, predictions)\n print(\"Thresh=%.3f, n=%d, cohen kappa score: %.2f%%\" % (thresh, select_X_train.shape[1], accuracy*100.0))\n \n \n ","sub_path":"sources/xgboost-feature-selection-dsbowl.py","file_name":"xgboost-feature-selection-dsbowl.py","file_ext":"py","file_size_in_byte":9089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"57000059","text":"import image\nimport cv2\nimport preProcessing as pp\nimport featureExtraction as fe\nimport boto3\n\ndatasetBucket = 'signature-dataset'\nnameImage = '001_01'\nurlImage = 'training/sigcomp11-offlinetrainingset/genuine/'\n\ndef lambda_handler(event,context):\n s3client = boto3.client('s3')\n s3client.download_file(datasetBucket, urlImage+nameImage+'.PNG', '/tmp/'+nameImage+'.PNG')\n \n originalImage = image.loadImage('/tmp/', nameImage+'.PNG')\n\n resizedImage = pp.resizeImage(originalImage)\n image.saveImage(resizedImage, '/tmp/', 'resizedImage.jpg')\n\n treatHImage = pp.treatHImage(resizedImage)\n image.saveImage(treatHImage, '/tmp/', 'treatHImage.jpg')\n\n treatedLImage = pp.treatedLImage(resizedImage)\n image.saveImage(treatedLImage, '/tmp/', 'treatedLImage.jpg')\n\n grayImage = fe.convertGrayScale(resizedImage)\n image.saveImage(grayImage, '/tmp/', 'grayImage.jpg')\n\n edgeImage = fe.convertEdgeImage(resizedImage)\n image.saveImage(edgeImage, '/tmp/', 'edgeImage.jpg')\n\n binaryImage = fe.convertBinaryImage(resizedImage)\n image.saveImage(edgeImage, '/tmp/', 'edgeImage.jpg')\n\n segments = []\n segments = fe.segmentImage(grayImage, 4, 4)\n\n index = 0\n for piece in segments:\n index += 1\n image.saveImage(piece, '/tmp/', 'piece' + str(index) + '.jpg')\n\n hist = fe.getHistogram(piece)\n image.saveHistogramPlotAsImage(hist, '/tmp/', 'Histpiece' + str(index)+'.png')\n\n resizedImageHistogram = fe.getHistogram(resizedImage)\n image.saveHistogramPlotAsImage(resizedImageHistogram, '/tmp/', 'resizedImageHistogram.png')\n\n grayImageHistogram = fe.getHistogram(grayImage)\n image.saveHistogramPlotAsImage(grayImageHistogram, '/tmp/', 'grayImageHistogram.png')\n\n binaryImageHistogram = fe.getHistogram(binaryImage)\n image.saveHistogramPlotAsImage(binaryImageHistogram, '/tmp/', 'binaryImageHistogram.png')\n \n return \"200+\"\n","sub_path":"PythonOpenCv/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"204245605","text":"from .stratAvance import StratAvance\n\nclass StratAvanceplus(StratAvance):\n \"\"\"\n Cette Stratégie permet de ralentir progressivement le robot vers la distance self.dst\n Afin de s'approcher le plus précisement possible de la distance voulu sans la dépasser\n \"\"\"\n def __init__(self, robot, distance, vitesse):\n StratAvance.__init__(self,robot,distance,vitesse)\n self.ralenti = 0 # étape de la stratégie\n \n def step(self):\n angleg = (self.robot.get_motor_position()[0]*self.robot.WHEEL_CIRCUMFERENCE)/360\n angled = (self.robot.get_motor_position()[1]*self.robot.WHEEL_CIRCUMFERENCE)/360\n \n super().step()\n \n #Si on arrive au 4/5 de la distance à parcourir on divise la vitesse du robot par 2\n if(self.ralenti == 0 and (angleg >= self.dst*(4/5) or angled >= self.dst*(4/5))):\n self.robot.set_motor_dps(1, (self.vit/2))\n self.robot.set_motor_dps(2, (self.vit/2))\n self.vit = (self.vit/2)\n self.ralenti = 1 # changement d'étape\n \n #Si on arrive au 5/6 de la distance à parcourir on divise encore la vitesse par 2\n if(self.ralenti == 1 and (angleg >= self.dst*(5/6) or angled >= self.dst*(5/6))):\n self.robot.set_motor_dps(1, (self.vit/2))\n self.robot.set_motor_dps(2, (self.vit/2))\n self.vit = (self.vit/2)\n self.ralenti = 2 # Changement d'étape\n \n","sub_path":"strategie/stratAvanceplus.py","file_name":"stratAvanceplus.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"86638851","text":"#! usr/bin/python\n# coding=utf-8\nimport tornado.web\nimport pickle\nfrom read.pointreader import PointReader\nfrom write.pointwriter import PointWriter\nimport time\nimport json\nimport datetime\nimport math\nimport types\nimport numpy as np\nimport uuid\nfrom sklearn.decomposition import PCA\nfrom operator import itemgetter\n\ng_PointReader = PointReader();\ng_PointWriter = PointWriter();\n\nairlineMap = {'APG': 'Air People International', 'GUY': 'Air Guyane Express', 'KAL': 'Korean Air', 'KMF': 'Kam Air',\n 'MAS': 'Malaysia Airlines', 'LHN': 'Express One International', 'KMZ': 'Comores Aviation',\n 'SKP': 'Skytrans Airlines', 'VTA': 'Air Tahiti', 'ERH': 'Era Aviation', 'PNR': 'PAN Air',\n 'BAB': 'Bahrain Air', 'JOL': 'Atyrau Air Ways', 'OMA': 'Oman Air', 'WIG': 'Wiggins Airways',\n 'MYW': 'MyAir', 'NAO': 'North American Airlines', 'DKN': 'Kingfisher Red', 'UGX': 'East African',\n 'PAG': 'Perimeter Aviation (Perimeter Airlines)', 'GLO': 'Gol Transportes Aéreos',\n 'AAL': 'American Airlines', 'BEA': 'Best Air', 'HKA': 'Superior Aviation', 'NOS': 'Neos',\n 'VRD': 'Virgin America', 'NIG': 'Aero Contractors', 'IZM': 'Izair', 'LAA': 'Libyan Airlines',\n 'CES': 'China Eastern Airlines', 'UMK': 'Yuzmashavia', 'NOV': 'Nova Airline', 'HSV': 'Direktflyg',\n 'VBW': 'Air Burkina', 'AEB': 'Aero Benin', 'IMP': 'Hellenic Imperial Airways', 'TAR': 'Tunisair',\n 'JLJ': 'J-Air', 'ASQ': 'Atlantic Southeast Airlines', 'PVV': 'Continental Airways',\n 'ERT': 'Eritrean Airlines', 'APW': 'Arrow Air', 'MPE': 'Canadian North', 'LOT': 'LOT Polish Airlines',\n 'VLO': 'Varig Logistica (VarigLog)', 'PWF': 'Private Wings Flugcharter',\n 'ACI': 'Aircalin (Air Caledonie International)', 'SPM': 'Air Saint Pierre', 'SEQ': 'Sky Eyes',\n 'CRQ': 'Air Creebec', 'PAS': 'Pelita Air Service', 'IBB': 'Binter Canarias', 'QTR': 'Qatar Airways',\n 'VAL': 'Voyageur Airways', 'BEL': 'Brussels Airlines', 'ECA': 'Eurocypria Airlines', 'TZT': 'Air Zambezi',\n 'ATI': 'Aero-Tropics Air Services', 'ISW': 'Islas Airways', 'TGW': 'Tiger Airways',\n 'BBR': 'Santa Barbara Airlines', 'DAL': 'Delta Air Lines', 'CBB': 'Cargo B Airlines', 'ICD': 'Icaro Air',\n 'BMR': 'BMI Regional', 'STU': 'Servicios de Transportes Aereos Fueguinos',\n 'GLG': 'Aerolineas Galapagos (Aerogal)', 'SCX': 'Sun Country Airlines', 'NCH': 'Chanchangi Airlines',\n 'RPH': 'Republic Express Airlines', 'AXM': 'AirAsia', 'DHL': 'Astar Air Cargo', 'JAZ': 'JALways',\n 'PCE': 'Pace Airlines', 'GOW': 'GoAir', 'ANS': 'Andes Líneas Aéreas', 'BTV': 'Batavia Air',\n 'BBD': 'Bluebird Cargo', 'ELY': 'El Al Israel Airlines', 'BCS': 'European Air Transport',\n 'WOA': 'World Airways', 'AZA': 'Alitalia', 'HKE': 'Hong Kong Express Airways', 'TUI': 'TUIfly',\n 'UCR': 'Aero-Charter Ukraine', 'GFT': 'Gulfstream International Airlines', 'NTN': 'National Airways',\n 'TWE': 'Transwede Airways', 'URJ': 'Star Air Aviation', 'BZH': 'Brit Air', 'MSI': 'Motor Sich',\n 'DER': 'Deer Jet', 'LCO': 'LAN Cargo', 'AKX': 'Air Nippon Network Co. Ltd.', 'HEJ': 'Hellas Jet',\n 'DNV': 'Aeroflot-Don', 'SCH': 'CHC Airways', 'VUR': 'VIP Ecuador', 'AVI': \"L'Avion\",\n 'AEE': 'Aegean Airlines', 'AIC': 'Air India', 'KLM': 'KLM Royal Dutch Airlines',\n 'EXL': 'Sunshine Express Airlines', 'NAY': 'Navegacion y Servicios Aéreos Canarios (NAYSA)',\n 'COA': 'Continental Airlines', 'JOS': 'DHL de Guatemala', 'BSK': 'Miami Air International',\n 'SDY': 'Island Express', 'SQC': 'Singapore Airlines Cargo', 'BLE': 'Blue Line',\n 'LTU': 'LTU International', 'ARE': 'Aires, Aerovias de Integracion Regional, S.A.',\n 'VLG': 'Vueling Airlines', 'FPO': 'Europe Airpost', 'SVK': 'Air Slovakia', 'HPA': 'Pearl Airways',\n 'SIA': 'Singapore Airlines', 'POE': 'Porter Airlines', 'CLX': 'Cargolux', 'CSA': 'Czech Airlines',\n 'AYT': 'Ayit Aviation and Tourism', 'VEC': 'Venezolana', 'FHY': 'Freebird Airlines',\n 'DIR': 'Dirgantara Air Service', 'AVN': 'Air Vanuatu', 'CAL': 'China Airlines', 'SKX': 'Skyways Express',\n 'AXB': 'Air India Express', 'ANA': 'All Nippon Airways', 'TMG': 'Tri-MG Intra Asia Airlines',\n 'CCI': 'Capital Cargo International Airlines', 'MYD': 'Maya Island Air', 'BDA': 'Blue Dart Aviation',\n 'BRI': 'Brindabella Airlines', 'JTA': 'Japan Transocean Air', 'ASE': 'Airstars',\n 'GWY': 'USA 3000 Airlines', 'NXA': 'Air Next', 'FAJ': 'Air Fiji', 'XAK': 'Airkenya Express',\n 'AAW': 'Afriqiyah Airways', 'NAC': 'Northern Air Cargo', 'ESY': 'EasyFly', 'GWI': 'Germanwings',\n 'UBD': 'United Airways (Bangladesh)', 'GOT': 'WaltAir', 'MLD': 'Air Moldova',\n 'AXK': 'African Express Airways', 'LMT': 'Almaty Aviation', 'IAW': 'Iraqi Airways', 'TSC': 'Air Transat',\n 'XAX': 'AirAsia X', 'SEY': 'Air Seychelles', 'FLI': 'Atlantic Airways', 'MNO': 'Mango',\n 'DNM': 'Denim Air', 'ABQ': 'airblue', 'UEA': 'United Eagle Airlines',\n 'EIA': 'Evergreen International Airlines', 'CUA': 'China United Airlines',\n 'VRN': 'VRG Linhas Aereas (VARIG)', 'TUP': 'Aviastar-TU', 'EJA': 'NetJets',\n 'ORB': 'Orenair (Orenburg Airlines)', 'IRK': 'Kish Air', 'EXV': 'Expo Aviation',\n 'ITK': 'Interlink Airlines', 'TAM': 'TAM Airlines (TAM Linhas Aereas)', 'CFE': 'BA CityFlyer',\n 'MUA': 'National Airlines', 'BLS': 'Bearskin Lake Air Service', 'TWN': 'Avialeasing Aviation Company',\n 'AIE': 'Air Inuit', 'TVR': 'Tavrey Airlines', 'CPA': 'Cathay Pacific', 'SSQ': 'Sunstate Airlines',\n 'LER': 'LASER Airlines', 'SNG': 'Air Senegal International', 'JLL': 'JetLite', 'BTI': 'Air Baltic',\n 'CHB': 'China West Air', 'BVT': 'Berjaya Air', 'NZM': 'Mount Cook Airlines',\n 'AJT': 'Amerijet International', 'AWQ': 'Indonesia AirAsia', 'TAK': 'Tatarstan Airlines',\n 'CGL': 'Seagle Air', 'ICL': 'CAL Cargo Air Lines', 'SPA': 'Sierra Pacific Airlines',\n 'GCR': 'Grand China Express Air', 'RFR': 'Royal Air Force', 'MTZ': 'Mali Airways', 'AGV': 'Air Glaciers',\n 'AWC': 'Titan Airways', 'LOF': 'Trans States Airlines', 'EXY': 'South African Express', 'ONE': 'Oceanair',\n 'CRK': 'Hong Kong Airlines', 'JCS': 'Jetclub', 'JOR': 'Blue Air', 'VIK': 'Viking Airlines',\n 'DSM': 'LAN Argentina', 'ABE': 'Aban Air', 'HWY': 'Highland Airways', 'URN': 'Turan Air',\n 'TMA': 'Trans Mediterranean Airlines', 'SOZ': 'Sat Airlines', 'LNK': 'Airlink', 'ABR': 'Air Contractors',\n 'SYL': 'Yakutia Airlines', 'BBO': 'Flybaboo', 'LVG': 'Livingston Energy Flight',\n 'CDP': 'Aero Condor Peru', 'HMS': 'Hemus Air', 'MGL': 'MIAT Mongolian Airlines',\n 'FSC': 'Four Star Aviation / Four Star Cargo', 'OLA': 'Overland Airways', 'JEA': 'Jet Air',\n 'VNZ': 'Tbilaviamsheni', 'UKW': 'Lviv Airlines', 'BRG': 'Bering Air', 'IBZ': 'International Business Air',\n 'ABS': 'Transwest Air', 'KYV': 'Cyprus Turkish Airlines (KTHY Kibris Turk Hava Yollari)',\n 'GAP': 'Air Philippines', 'TXC': 'TransAVIAexport Airlines',\n 'RAE': 'Regional Compagnie Aerienne Europeenne', 'JAF': 'Jetairfly', 'AFG': 'Ariana Afghan Airlines',\n 'SMY': 'Sama Airlines', 'IDA': 'Indonesia Air Transport', 'LIA': 'Leeward Islands Air Transport',\n 'CAO': 'Air China Cargo', 'LKE': 'Lucky Air', 'EZS': 'easyJet Switzerland', 'ABB': 'Business Aviation',\n 'ANE': 'Air Nostrum', 'CAV': 'Calm Air', 'PNF': 'Panafrican Airways', 'RSU': 'Aerosur',\n 'RIT': 'Zest Airways', 'AAR': 'Asiana Airlines', 'TGZ': 'Georgian Airways', 'AAG': 'Air Atlantique',\n 'VIR': 'Virgin Atlantic Airways', 'THE': 'Toumaï Air Tchad', 'WJA': 'WestJet', 'ETD': 'Etihad Airways',\n 'AZU': 'Azul Brazilian Airlines', 'AVJ': 'Avia Traffic Company', 'AZQ': 'Silk Way Airlines',\n 'MNB': 'MNG Airlines', 'CGK': 'Click Airways', 'CSN': 'China Southern Airlines',\n 'BGH': 'BH Air (Balkan Holidays)', 'AIJ': 'Interjet', 'BMM': 'Atlas Blue',\n 'SBI': 'S7 Airlines (Siberia Airlines)', 'UPS': 'United Parcel Service', 'RWD': 'Rwandair Express',\n 'MNJ': 'Menajet', 'CBC': 'Caribair', 'ABV': 'Antrak Air', 'CKK': 'China Cargo Airlines',\n 'GHB': 'Ghana International Airlines', 'NAX': 'Norwegian Air Shuttle',\n 'DTA': 'TAAG Angola Airlines (Linhas Aéreas de Angola)', 'PRF': 'Precision Air', 'AJM': 'Air Jamaica',\n 'KNE': 'Nas Air', 'GGN': 'Air Georgian', 'UKS': 'Ukrainian Cargo Airways', 'FDX': 'FedEx Express',\n 'LTR': 'Lufttransport', 'MLR': 'Mihin Lanka', 'SKY': 'Skymark Airlines', 'LRC': 'LACSA',\n 'PIA': 'Pakistan International Airlines', 'FFV': 'Fly540', 'CHQ': 'Chautauqua Airlines',\n 'SNJ': 'Skynet Asia Airways', 'CFS': 'Empire Airlines', 'TMW': 'Trans Maldivian Airways',\n 'IOS': 'Isles of Scilly Skybus', 'EWG': 'Eurowings', 'DTR': 'DAT Danish Air Transport',\n 'RKH': 'Royal Khmer Airlines', 'VVM': 'Viva Macau', 'BNX': 'LAI - Linea Aerea IAACA', 'LBT': 'Nouvelair',\n 'SZL': 'Swaziland Airlink', 'BTA': 'ExpressJet Airlines', 'MDV': 'Moldavian Airlines', 'WZZ': 'Wizz Air',\n 'ETS': 'Avitrans', 'DLH': 'Lufthansa', 'BGL': 'Benin Golf Air', 'AHA': 'Air Alpha Greenland',\n 'VOZ': 'Virgin Blue', 'HER': \"Hex'Air\", 'CJC': 'Colgan Air', 'GBK': 'Gabon Airlines',\n 'HOA': 'Hola Airlines', 'NEA': 'New England Airlines', 'PJS': 'Jet Aviation',\n 'TUA': 'Turkmenistan Airlines', 'FAB': 'First Air', 'HSA': 'East African Safari Air Express',\n 'ROI': 'Avior Airlines', 'RPA': 'Republic Airlines', 'CMI': 'Continental Micronesia',\n 'TOM': 'Thomson Airways', 'PAO': 'Polynesian Airlines', 'LGW': 'Luftfahrtgesellschaft Walter',\n 'OTG': 'One-Two-GO Airlines', 'JUB': 'Jubba Airways', 'GBA': 'Gulf Air Bahrain',\n 'SWG': 'Sunwing Airlines', 'ALK': 'SriLankan Airlines', 'JET': 'Wind Jet', 'ESL': 'Russian Sky Airlines',\n 'XME': 'Australian air Express', 'DRU': 'Alrosa Mirny Air Enterprise', 'KRT': 'Air Kokshetau',\n 'SDM': 'Rossiya - Russian Airlines', 'CRL': 'Corsairfly', 'CVA': 'Air Chathams',\n 'SKZ': 'Skyway Enterprises', 'MDM': 'Medavia', 'AMF': 'Ameriflight', 'BFL': 'Buffalo Airways',\n 'DAG': 'Dagestan Airlines', 'OTL': 'South Airlines', 'UGA': 'Air Uganda', 'CDG': 'Shandong Airlines',\n 'MVD': 'Kavminvodyavia', 'NKF': 'Barents AirLink', 'ADE': 'Ada Air', 'MPX': 'Aeromexpress',\n 'SHY': 'Sky Airlines', 'BIE': 'Air Mediterranee', 'GES': 'Gestair', 'SRU': 'Star Perú',\n 'LNI': 'Lion Air (Lion Mentari Airlines )', 'UBA': 'Myanmar Airways International',\n 'ACK': 'Nantucket Airlines', 'PAL': 'Philippine Airlines', 'WEB': 'WebJet Linhas Aéreas',\n 'MAT': 'Maldivian Air Taxi', 'ACP': 'Astral Aviation', 'TAY': 'TNT Airways', 'KKK': 'Atlasjet',\n 'BLV': 'Bellview Airlines', 'NSO': 'Aerolineas Sosa', 'GMT': 'Magnicharters', 'FWA': 'Interstate Airline',\n 'PSV': 'Servicios Aereos Profesionales', 'HKS': 'CHC Helikopter Service', 'FDN': 'Dolphin Air',\n 'ERG': 'Aviaenergo', 'MAA': 'MasAir', 'MPD': 'Air Comet', 'TSO': 'Transaero Airlines', 'BMA': 'bmi',\n 'SAI': 'Shaheen Air International', 'VCV': 'Conviasa', 'PGT': 'Pegasus Airlines', 'VXG': 'Avirex',\n 'CXP': 'Xtra Airways', 'CSC': 'Sichuan Airlines', 'LLM': 'Yamal Airlines',\n 'BES': 'Aero Services Executive', 'TOS': 'Tropic Air', 'WIA': 'Windward Islands Airways',\n 'VLK': 'Vladivostok Air', 'KOR': 'Air Koryo', 'CPN': 'Caspian Airlines', 'KBA': 'Kenn Borek Air',\n 'SAT': 'SATA Air Acores', 'GSM': 'Flyglobespan', 'HAG': 'Hageland Aviation Services',\n 'CEB': 'Cebu Pacific', 'QCL': 'Air Class Lineas Aereas', 'KQA': 'Kenya Airways',\n 'THY': 'Turkish Airlines', 'VAP': 'Phuket Air', 'ANO': 'Airnorth', 'CJT': 'Cargojet Airways',\n 'KEN': 'Kenmore Air', 'TOB': 'Tobruk Air', 'KAJ': 'Karthago Airlines', 'AIQ': 'Thai AirAsia',\n 'SRR': 'Star Air', 'OST': 'Alania Airlines', 'BOS': 'OpenSkies', 'PDT': 'Piedmont Airlines',\n 'BLX': 'TUIfly Nordic', 'AUL': 'Aeroflot-Nord', 'ISV': 'Islena De Inversiones', 'DWT': 'Darwin Airline',\n 'FJI': 'Air Pacific', 'MCM': 'Heli Air Monaco', 'OAW': 'Helvetic Airways', 'RPK': 'Royal Airlines',\n 'BCI': 'Blue Islands', 'FRL': 'Freedom Airlines', 'VIV': 'VivaAerobus', 'NCA': 'Nippon Cargo Airlines',\n 'MCK': 'Macair Airlines', 'CBE': 'MexicanaClick', 'CYZ': 'China Postal Airlines',\n 'IKA': 'Gorkha Airlines', 'JIA': 'PSA Airlines', 'APP': 'Aeroperlas', 'KIL': 'Kuban Airlines',\n 'SAY': 'ScotAirways', 'CFG': 'Condor Flugdienst', 'FPY': \"Compagnie Africaine d'Aviation\",\n 'DAH': 'Air Algerie', 'MXL': 'Maxair', 'AYZ': 'Atlant-Soyuz Airlines', 'VES': 'Vieques Air Link',\n 'PGP': 'Perm Airlines', 'SYR': 'Syrian Arab Airlines (Syrianair)', 'AHK': 'Air Hong Kong',\n 'AZS': 'Aviacon Zitotrans', 'PIC': 'Jetstar Pacific', 'UDN': 'Dniproavia', 'DJB': 'Djibouti Airlines',\n 'SNC': 'Air Cargo Carriers', 'KNI': 'KD Avia', 'RVL': 'Air Vallée', 'SHU': 'SAT Airlines',\n 'CRG': 'Cargoitalia', 'NFA': 'Air Norway', 'SAX': 'Sabah Air', 'AMB': 'DRF Luftrettung',\n 'CIU': 'Cielos Airlines', 'AUH': 'Abu Dhabi Amiri Flight', 'AXF': 'Asian Express Airlines',\n 'RCF': 'Aeroflot-Cargo', 'MNG': 'Aero Mongolia', 'VGN': 'Virgin Nigeria Airways',\n 'TGN': 'Trigana Air Service', 'VLE': 'Volare Airlines', 'MKU': 'Island Air', 'GXL': 'XL Airways Germany',\n 'CLU': 'Triple Alpha Luftfahrtgesellschaft', 'CPZ': 'Compass Airlines', 'BOT': 'Air Botswana',\n 'NSE': 'SATENA', 'MDW': 'Midway Airlines', 'NWL': 'North-Wright Airways',\n 'SAS': 'Scandinavian Airlines System (SAS)', 'ZAK': 'Zambia Skyways', 'BOX': 'AeroLogic',\n 'SXS': 'SunExpress', 'VAZ': 'Red Wings Airlines', 'AJX': 'Air Japan', 'MAH': 'Malev Hungarian Airlines',\n 'ANG': 'Air Niugini', 'PCO': 'Pacific Coastal Airline', 'DHX': 'DHL International Aviation ME',\n 'TUD': 'Flight Alaska', 'FIF': 'Air Finland', 'SEH': 'Sky Express', 'CLI': 'Clickair',\n 'OKA': 'Okay Airways', 'GRL': 'Air Greenland', 'CXI': 'Shanxi Airlines', 'AFR': 'Air France',\n 'FTA': 'Frontier Flying Service', 'VOA': 'Viaggio Air', 'ABX': 'ABX Air', 'QNZ': 'JetConnect',\n 'KHO': 'Khors Aircompany', 'HHI': 'Hamburg International', 'AIZ': 'Arkia Israel Airlines',\n 'TIW': 'Transcarga', 'CQN': 'Chongqing Airlines', 'JNA': 'Jin Air', 'RJA': 'Royal Jordanian',\n 'GRO': 'Allegro', 'TUY': 'Linea Turistica Aerotuy', 'TLR': 'Air Libya', 'USH': 'US Helicopter',\n 'RYR': 'Ryanair', 'OCA': 'Aserca Airlines', 'AZW': 'Air Zimbabwe', 'GAO': 'Golden Air',\n 'SKW': 'SkyWest Airlines', 'IBE': 'Iberia Airlines', 'SLM': 'Surinam Airways', 'UAE': 'Emirates Airline',\n 'PMW': 'Paramount Airways', 'UTA': 'UTair Aviation', 'CTT': 'Custom Air Transport',\n 'SUS': 'Sun Air of Scandinavia', 'CSZ': 'Shenzhen Airlines', 'RSI': 'Air Sunshine',\n 'MDL': 'Mandala Airlines', 'AMU': 'Air Macao', 'SSV': 'Skyservice Airlines', 'COY': 'Coyne Aviation',\n 'ASH': 'go!', 'RLA': 'Airlinair', 'BIH': 'British International Helicopters', 'TZK': 'Tajik Air',\n 'EPA': 'Shenzhen Donghai Airlines', 'EAG': 'Eagle Airways', 'RYN': 'Ryan International Airlines',\n 'ACA': 'Air Canada', 'FFX': 'Flex Linhas Aéreas', 'MKA': 'MK Airlines', 'WAK': 'Wings of Alaska',\n 'JKK': 'Spanair', 'THT': 'Air Tahiti Nui', 'KRP': 'Carpatair', 'IYE': 'Yemenia - Yemen Airways',\n 'CIX': 'City Connexion Airlines', 'BSR': 'Guine Bissaur Airlines', 'HVY': 'Heavylift Cargo Airlines',\n 'SRQ': 'South East Asian Airlines', 'DLA': 'Air Dolomiti', 'ISS': 'Meridiana', 'AKK': 'Aklak Air',\n 'BKA': 'Bankair', 'TPA': 'Tampa Cargo', 'OAE': 'Omni Air International', 'CWM': 'Air Marshall Islands',\n 'SWT': 'Swiftair', 'PAC': 'Polar Air Cargo', 'AFL': 'Aeroflot Russian Airlines',\n 'PEN': 'PenAir (Peninsula Airways)', 'CPB': 'Corporate Express', 'GTI': 'Atlas Air', 'ELO': 'Eurolot',\n 'LOG': 'Loganair', 'VKG': 'Thomas Cook Airlines Scandinavia', 'CIM': 'Cimber Air',\n 'ATM': 'Airlines Of Tasmania', 'IRB': 'Iran Air Tours', 'FFM': 'Firefly',\n 'OLT': 'OLT - Ostfriesische Lufttransport', 'FLG': 'Pinnacle Airlines', 'EAK': 'Euro-Asia Air',\n 'EGJ': 'Scenic Airlines', 'KFR': 'Kingfisher Airlines', 'AWE': 'US Airways', 'GAI': 'Moskovia Airlines',\n 'TJT': 'Twin Jet', 'VLM': 'VLM Airlines', 'RKM': 'RAK Airways', 'IRM': 'Mahan Air', 'MXA': 'Mexicana',\n 'QNK': 'Kabo Air', 'RBA': 'Royal Brunei Airlines', 'LYC': 'Lynden Air Cargo', 'MWG': 'MASwings',\n 'PPW': 'Royal Phnom Penh Airways', 'JXX': 'Primera Air', 'SBU': 'Saint Barth Commuter',\n 'JSA': 'Jetstar Asia Airways', 'LAO': 'Lao Airlines', 'AAN': 'Amsterdam Airlines',\n 'JZA': 'Air Canada Jazz', 'SWA': 'Southwest Airlines', 'AUT': 'Austral Lineas Aereas',\n 'CTN': 'Croatia Airlines', 'LAB': 'L.A.B. Flying Service', 'TSG': 'Trans Air Congo', 'TVS': 'Smart Wings',\n 'MAU': 'Air Mauritius', 'DHK': 'DHL Air UK', 'TNA': 'TransAsia Airways', 'IAD': 'Fly Wex',\n 'SFJ': 'Star Flyer', 'AWU': 'Sylt Air', 'LUR': 'Atlantis European Airways', 'AEI': 'Air Italy Polska',\n 'NRR': 'Nature Air', 'GSS': 'Global Supply Systems', 'LMZ': 'Starline.kz',\n 'AFU': 'Afrinat International Airlines', 'EYE': 'FS Air Service', 'TRS': 'AirTran Airways',\n 'ONA': 'Yeongnam Air', 'AJV': 'ANA & JP Express', 'TCI': 'Air Turks and Caicos', 'SKU': 'Sky Airline',\n 'SWQ': 'Swift Air', 'BBC': 'Biman Bangladesh Airlines', 'AHW': 'Aeromist-Kharkiv', 'REU': 'Air Austral',\n 'RLK': 'Air Nelson', 'MAK': 'MAT Macedonian Airlines', 'SLI': 'Aeroméxico Connect',\n 'LCG': 'Lignes Aériennes Congolaises', 'TAE': 'TAME', 'LKN': 'Lankair', 'GZP': 'Gazpromavia',\n 'CLW': 'Centralwings', 'NYL': 'Mid Airlines', 'NVR': 'Novair', 'RUS': 'Cirrus Airlines',\n 'MES': 'Mesaba Airlines', 'CRD': 'Air Corridor', 'VSV': 'Scat Aircompany', 'LID': 'Alidaunia',\n 'HFY': 'Hi Fly', 'SWU': 'Swiss European Air Lines', 'PBN': 'Pacific Blue Airlines', 'AKL': 'Air Kiribati',\n 'GLR': 'Central Mountain Air', 'ACL': 'ItAli Airlines', 'BON': 'B&H Airlines', 'WSG': 'Wasaya Airways',\n 'JEC': 'Jett8 Airlines Cargo', 'LNE': 'LAN Ecuador', 'FWI': 'Air Caraibes', 'IRC': 'Iran Aseman Airlines',\n 'CVC': 'Centre-Avia', 'LAV': 'Aeropostal Alas de Venezuela', 'KPA': 'Kunpeng Airlines',\n 'AFN': 'Air Freight NZ', 'JZR': 'Jazeera Airways', 'KFS': 'Kalitta Charters', 'CAW': 'Comair',\n 'TUS': 'ABSA Cargo Airline', 'AXY': 'Axis Airways', 'AIP': 'Alpine Air Express',\n 'FXI': 'Air Iceland (Flugfélag Íslands)', 'TFL': 'Arkefly', 'COZ': 'Cosmic Air', 'MDG': 'Air Madagascar',\n 'IBX': 'Ibex Airlines', 'AUB': 'Augsburg Airways GmbH', 'PEC': 'Pacific East Asia Cargo Airlines',\n 'EDW': 'Edelweiss Air', 'SLA': 'Sierra National Airlines', 'WON': 'Wings Air',\n 'LTC': 'SmartLynx Airlines', 'SMX': 'Alitalia Express', 'BMJ': 'Bemidji Airlines',\n 'AAS': 'Askari Aviation', 'VNR': 'Wan Air', 'BHP': 'Belair Airlines', 'JUS': 'USA Jet Airlines',\n 'HVN': 'Vietnam Airlines', 'RZO': 'SATA International', 'ADH': 'Air One', 'EUP': 'EuroAir',\n 'BUC': 'Bulgarian Air Charter', 'CXH': 'China Xinhua Airlines', 'RSO': 'Aero Asia International',\n 'RNA': 'Nepal Airlines', 'AWI': 'Air Wisconsin', 'WOW': 'Air Southwest', 'TCW': 'Thomas Cook Airlines',\n 'PIR': 'Pamir Airways', 'HZT': 'Air Horizon', 'TCX': 'Thomas Cook Airlines', 'BTR': 'Botir-Avia',\n 'PTN': 'Pantanal Linhas Aereas Sul-Matogrossenses', 'SAA': 'South African Airways', 'EZY': 'easyJet',\n 'FRE': 'Freedom Air', 'TPU': 'TACA Peru', 'CRF': 'Air Central', 'SAM': 'SAM Colombia', 'WVL': 'Wizz Air',\n 'PHW': 'AVE.com', 'MGX': 'Montenegro Airlines', 'AGB': 'Air Service Gabon', 'OZW': 'Skywest Airlines',\n 'WIF': 'Wideroe', 'BPA': 'Blue Panorama Airlines', 'ANT': 'Air North', 'RPB': 'AeroRepublica',\n 'FSW': 'Faso Airways', 'AMX': 'AeroMéxico', 'AMC': 'Air Malta', 'KEE': 'Keystone Air Services',\n 'GIA': 'Garuda Indonesia', 'ENK': 'Enkor', 'SEJ': 'Spicejet', 'THA': 'Thai Airways International',\n 'REA': 'Aer Arann', 'BLF': 'Blue1', 'EGU': 'Eagle Air', 'SSX': 'Lynx Aviation',\n 'EXK': 'Executive Airlines', 'IWD': 'Iberworld', 'ELL': 'Estonian Air', 'NAS': 'Nasair',\n 'RGL': 'Regional Air Lines', 'HHA': 'Atlantic Airlines de Honduras', 'CJA': 'CanJet',\n 'JAI': 'Jet Airways', 'KZU': 'Kuzu Airlines Cargo', 'WTA': 'Africa West', 'ESK': 'SkyEurope',\n 'GFG': 'Georgian National Airlines', 'ISR': 'Israir Airlines', 'GDR': 'Gadair European Airlines',\n 'INC': 'Insel Air', 'KGA': 'Kyrgyzstan Airlines', 'NMI': 'Pacific Wings', 'COM': 'Comair',\n 'KDR': 'Royal Daisy Airlines', 'SGN': 'SGA Airlines', 'KZR': 'Air Astana', 'HAL': 'Hawaiian Airlines',\n 'KRE': 'AeroSucre', 'LAL': 'Air Labrador', 'PEL': 'Aeropelican Air Services', 'CSH': 'Shanghai Airlines',\n 'PLR': 'Northwestern Air', 'ABD': 'Air Atlanta Icelandic', 'CSQ': 'IBC Airways', 'SWN': 'West Air Sweden',\n 'BUG': 'Mokulele Airlines', 'BAW': 'British Airways', 'ERO': \"Sun d'Or International Airlines\",\n 'AEY': 'Air Italy', 'GAL': 'Galaxy Air', 'NTJ': 'Nextjet', 'DAE': 'DHL Aero Expreso',\n 'ASZ': 'Astrakhan Airlines', 'SAH': 'Sayakhat Airlines', 'MON': 'Monarch Airlines',\n 'AER': 'Alaska Central Express', 'ARG': 'Aerolineas Argentinas', 'SMJ': 'Avient Aviation',\n 'OZJ': 'Ozjet Airlines', 'OHY': 'Onur Air', 'LDE': 'LADE - Líneas Aéreas del Estado',\n 'ESF': 'Estafeta Carga Aerea', 'AAY': 'Allegiant Air', 'AUR': 'Aurigny Air Services', 'VIM': 'Air VIA',\n 'SOA': 'Southern Air Charter', 'SLK': 'SilkAir', 'AEA': 'Air Europa', 'EZE': 'Eastern Airways',\n 'BRU': 'Belavia Belarusian Airlines', 'TIB': 'TRIP Linhas Aereas',\n 'ATN': 'Air Transport International LLC', 'BWA': 'Caribbean Airlines', 'IMX': 'Zimex Aviation',\n 'TNM': 'Tiara Air', 'FWL': 'Florida West International Airways', 'CLH': 'Lufthansa CityLine',\n 'CAY': 'Cayman Airways', 'RUN': 'ACT Airlines', 'TPC': 'Air Caledonie', 'ANZ': 'Air New Zealand',\n 'KAC': 'Kuwait Airways', 'SEU': 'XL Airways France', 'JAB': 'Air Bagan', 'OAL': 'Olympic Airlines',\n 'ODS': 'Odessa Airlines', 'JBA': 'Helijet', 'NGE': 'Angel Airlines', 'SOL': 'Solomon Airlines',\n 'AZN': 'Amaszonas', 'KAE': 'Kartika Airlines', 'MMZ': 'euroAtlantic airways',\n 'MNA': 'Merpati Nusantara Airlines', 'BML': 'Bismillah Airlines', 'AEW': 'Aerosvit Airlines',\n 'SVR': 'Ural Airlines', 'AUI': 'Ukraine International Airlines', 'CCA': 'Air China', 'TAO': 'Aeromar',\n 'AHY': 'Azerbaijan Airlines', 'RNX': '1Time', 'MPH': 'Martinair', 'KMP': 'Kampuchea Airlines',\n 'BST': 'Bestair', 'MEA': 'Middle East Airlines', 'CHH': 'Hainan Airlines', 'BRV': 'Bravo Air Congo',\n 'ARR': 'Air Armenia', 'KGL': 'Kogalymavia', 'ICE': 'Icelandair', 'AIB': 'Airbus Industrie',\n 'FFT': 'Frontier Airlines', 'ADR': 'Adria Airways', 'DAO': 'Daallo Airlines', 'PUA': 'PLUNA',\n 'MJN': 'Royal Air Force of Oman', 'WDL': 'WDL Aviation', 'TRA': 'transavia.com', 'AEU': 'Astraeus',\n 'CGN': \"Chang'an Airlines\", 'KAP': 'Cape Air', 'MRW': 'Mars RK', 'GJS': 'GoJet Airlines',\n 'AZE': 'Arcus-Air Logistic', 'UCA': 'CommutAir', 'FRJ': 'Afrijet Airlines', 'RLE': 'Rico Linhas Aereas',\n 'MAI': 'Mauritania Airlines International', 'SOO': 'Southern Air', 'ABW': 'AirBridgeCargo Airlines',\n 'PGA': 'Portugalia', 'BKP': 'Bangkok Airways', 'EEU': 'Eurofly', 'WDA': 'Wimbi Dira Airways',\n 'YZR': 'Yangtze River Express', 'AHR': 'Air2there', 'RCH': 'Air Mobility Command', 'BER': 'Air Berlin',\n 'EIN': 'Aer Lingus', 'EAQ': 'Eastern Australia Airlines', 'HDA': 'Dragonair, Hong Kong Dragon Airlines',\n 'LAN': 'LAN Airlines', 'TDX': 'Tradewinds Airlines', 'GIF': 'Guinee Airlines', 'KIS': 'Contact Air',\n 'RON': 'Nauru Air Corporation', 'LBC': 'Albanian Airlines', 'TUX': 'TunisAir Express',\n 'GEC': 'Lufthansa Cargo', 'ITX': 'Imair Airlines', 'UIA': 'Uni Air', 'DNL': 'Dutch Antilles Express',\n 'TDM': 'Tandem Aero', 'NWR': 'Northwest Regional Airlines', 'NWA': 'Northwest Airlines',\n 'CYP': 'Cyprus Airways', 'CRN': 'Aero Caribbean', 'TNB': 'Trans Air Benin', 'AAF': 'Aigle Azur',\n 'FHE': 'Hello', 'FCR': 'Flying Carpet', 'UKM': 'UM Airlines', 'UAL': 'United Airlines',\n 'LIL': 'FlyLal (Lithuanian Airlines)', 'LAM': 'LAM Mozambique Airlines', 'LPE': 'LAN Peru',\n 'PBA': 'PB Air', 'ASJ': 'Air Satellite', 'VTS': 'Everts Air Cargo', 'BEE': 'Flybe',\n 'KLC': 'KLM Cityhopper', 'VDA': 'Volga-Dnepr Airlines', 'CUB': 'Cubana de Aviacion',\n 'KLB': 'Air Mali International', 'BCY': 'CityJet', 'BHS': 'Bahamasair', 'SHQ': 'Shanghai Airlines Cargo',\n 'LXP': 'LAN Express', 'ARD': 'Aerocondor', 'SXR': 'Sky Express', 'CGP': 'Cargo Plus Aviation',\n 'UTY': 'Alliance Airlines', 'GMI': 'Germania', 'KFA': 'Kelowna Flightcraft Air Charter',\n 'TOK': 'Airlines PNG', 'MDA': 'Mandarin Airlines', 'HHN': 'Hahn Air', 'IGO': 'IndiGo Airlines',\n 'SUD': 'Sudan Airways', 'ALX': 'Hewa Bora Airways', 'JEX': 'JAL Express', 'LGL': 'Luxair',\n 'GLA': 'Great Lakes Airlines', 'PSD': 'President Airlines', 'ADO': 'Hokkaido International Airlines',\n 'NMB': 'Air Namibia', 'RAM': 'Royal Air Maroc', 'SDA': 'Sol Dominicana Airlines',\n 'MBN': 'Zambian Airways', 'LZB': 'Bulgaria Air', 'BMI': 'bmibaby', 'SOV': 'Saravia', 'ARA': 'Arik Air',\n 'DQA': 'Maldivian', 'NKS': 'Spirit Airlines', 'AIA': 'Avies', 'CCM': 'CCM Airlines',\n 'TCF': 'Shuttle America', 'LTF': 'Lufttaxi Fluggesellschaft', 'CQH': 'Spring Airlines', 'EXS': 'Jet2.com',\n 'QXE': 'Horizon Air', 'TAP': 'TAP Portugal', 'LYN': 'Kyrgyzstan', 'AML': 'Air Malawi',\n 'VAS': 'ATRAN Cargo Airlines', 'WRF': 'Wright Air Service', 'LLR': 'Air India Regional',\n 'URG': 'Air Urga', 'VUN': 'Air Ivoire', 'QFA': 'Qantas', 'RXA': 'Regional Express Airlines',\n 'WBA': 'Finncomm Airlines', 'JAC': 'Japan Air Commuter', 'ASA': 'Alaska Airlines', 'GTV': 'Aerogaviota',\n 'JAT': 'Jat Airways', 'CMM': 'Compagnie Aérienne du Mali', 'SJY': 'Sriwijaya Air', 'LAP': 'TAM Airlines',\n 'MLA': '40-Mile Air', 'POT': 'Polet Airlines', 'DRK': 'Druk Air', 'EGF': 'American Eagle Airlines',\n 'CVU': 'Grand Canyon Airlines', 'GOM': 'Gomelavia', 'SPR': 'Provincial Airlines',\n 'PDF': 'Pelican Air Services', 'NOK': 'Nok Air', 'TSD': 'TAF Linhas Aéreas', 'JST': 'Jetstar Airways',\n 'PLM': 'Air Pullmantur', 'VPP': 'Vintage Props and Jets', 'MOV': 'VIM Airlines', 'ABY': 'Air Arabia',\n 'CIR': 'Arctic Circle Air Service', 'CXA': 'Xiamen Airlines', 'MSL': 'Marsland Aviation',\n 'WRC': 'Wind Rose Aviation', 'JJA': 'Jeju Air', 'GWL': 'Great Wall Airlines', 'CKS': 'Kalitta Air',\n 'JAV': 'Jordan Aviation', 'IRP': 'Payam Air', 'DNU': 'DOT LT', 'SWR': 'Swiss International Air Lines',\n 'AAH': 'Aloha Air Cargo', 'BWG': 'Blue Wings', 'SCW': 'Malmö Aviation', 'FIN': 'Finnair',\n 'ILN': 'Interair South Africa', 'AVA': 'Avianca - Aerovias Nacionales de Colombia, S.A.',\n 'DTH': 'Tassili Airlines', 'UZB': 'Uzbekistan Airways', 'RMV': 'Romavia', 'RIU': 'Riau Airlines',\n 'SBS': 'Seaborne Airlines', 'SFR': 'Safair', 'TSE': 'Transmile Air Services', 'LBY': 'Belle Air',\n 'OEA': 'Orient Thai Airlines', 'JAE': 'Jade Cargo International', 'AGX': 'Aviogenex',\n 'CWC': 'Centurion Air Cargo', 'UDC': 'DonbassAero', 'IRA': 'Iran Air', 'SDR': 'City Airline',\n 'PMT': 'PMTair', 'NJS': 'National Jet Systems', 'EZA': 'Eznis Airways', 'EVA': 'EVA Air',\n 'NRD': 'Nordic Regional', 'TCV': 'TACV Cabo Verde Airlines', 'GUN': 'Grant Aviation', 'BRQ': 'Buraq Air',\n 'EEA': 'Empresa Ecuatoriana De Aviacion', 'KDC': 'KD Air', 'AAB': 'Abelag Aviation',\n 'NJE': 'NetJets Europe', 'ANK': 'Air Nippon', 'DXH': 'East Star Air', 'DKH': 'Juneyao Airlines',\n 'JBU': 'JetBlue Airways', 'ROT': 'Tarom', 'RHC': 'Redhill Aviation', 'RSR': 'Aero-Service',\n 'SVA': 'Saudi Arabian Airlines', 'RNV': 'Armavia', 'CMP': 'Copa Airlines', 'RLN': 'Aero Lanka',\n 'JFU': 'Jet4you', 'JAL': 'Japan Airlines', 'CHP': 'Aviacsa'}\n\n\n# read the points by constraint,\n# if constraint = {}, return all the points\nclass PointReadHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n constraint = self.get_argument('constraint')\n points = g_PointReader.queryPoints(constraint)\n self.write(result)\n\n\ndef filterTrajData(points):\n # arrTrajs = []\n # depTrajs = []\n trajData = []\n trajKey = {}\n\n for index in points:\n if index['trajID'] not in trajKey:\n trajKey[index['trajID']] = 0\n\n temp = {}\n\n if 'arrdir' in index:\n temp['arr'] = index['arrdir']\n else:\n temp['arr'] = -1\n\n if type(index['callsign']) == type('aa'):\n temp['callsign'] = index['callsign']\n else:\n temp['callsign'] = ''\n\n temp['trajID'] = index['trajID']\n temp['data'] = []\n\n for i in range(len(index['timestamp'])):\n if math.isnan(index['timestamp'][i]) or math.isnan(index['loc']['coordinates'][i][2]) or math.isnan(\n index['speed'][i]) or math.isnan(index['loc']['coordinates'][i][1]) or math.isnan(\n index['loc']['coordinates'][i][0]):\n continue\n record = {}\n record['Timestamp'] = index['timestamp'][i]\n # record['Altitude'] = index['loc']['coordinates'][i][2]\n # record['Speed'] = index['speed'][i]\n record['latlon'] = [index['loc']['coordinates'][i][1], index['loc']['coordinates'][i][0]]\n temp['data'].append(record)\n\n temp['sTime'] = index['stTime']\n temp['eTime'] = index['enTime']\n\n # if index['trajInf']==1:\n # temp['origin'] = index['origin']\n # temp['destination'] = index['destination']\n # temp['time'] = index['time']\n\n try:\n temp['origin'] = index['origin']\n temp['destination'] = index['destination']\n temp['time'] = index['time']\n except:\n temp['origin'] = ''\n temp['destination'] = ''\n temp['time'] = ''\n\n trajData.append(temp)\n else:\n trajKey[index['trajID']] += 1\n\n return trajData\n\n # if index['arrdir'] == 1:\n # temp={}\n # temp['arr'] = 1;\n\n # if type(index['callsign'])==type('aa'):\n # temp['callsign'] = index['callsign']\n # else:\n # temp['callsign'] = ''\n\n\n # temp['trajID'] = index['trajID']\n # temp['data'] = []\n\n # for i in range(len(index['timestamp'])):\n # if math.isnan(index['timestamp'][i]) or math.isnan(index['loc']['coordinates'][i][2]) or math.isnan(index['speed'][i]) or math.isnan(index['loc']['coordinates'][i][1]) or math.isnan(index['loc']['coordinates'][i][0]):\n # continue\n # record={}\n # record['Timestamp'] = index['timestamp'][i]\n # #record['Altitude'] = index['loc']['coordinates'][i][2]\n # #record['Speed'] = index['speed'][i]\n # record['latlon'] = [index['loc']['coordinates'][i][1], index['loc']['coordinates'][i][0]]\n # temp['data'].append(record)\n\n\n # temp['sTime'] = index['stTime']\n # temp['eTime'] = index['enTime']\n # if index['trajInf']==1:\n # temp['origin'] = index['origin']\n # temp['destination'] = index['destination']\n # #temp['aircraft'] = index['aircraft']\n # temp['time'] = index['time']\n # arrTrajs.append(temp)\n\n # else:\n # #print(index['trajID'])\n\n # #print(index['trajID'])\n # try:\n # temp={}\n # temp['arr'] = 0;\n # # print('trajID')\n\n # if type(index['callsign'])==type('aa'):\n # temp['callsign'] = index['callsign']\n # else:\n # # print(index['callsign'])\n # # print(type(index['callsign']))\n # temp['callsign'] = ''\n\n # temp['trajID'] = index['trajID']\n # # print(temp['trajID'])\n # temp['data'] = []\n\n # for i in range(len(index['timestamp'])):\n # if math.isnan(index['timestamp'][i]) or math.isnan(index['loc']['coordinates'][i][2]) or math.isnan(index['speed'][i]) or math.isnan(index['loc']['coordinates'][i][1]) or math.isnan(index['loc']['coordinates'][i][0]):\n # continue\n # record={}\n # record['Timestamp'] = index['timestamp'][i]\n # #record['Altitude'] = index['loc']['coordinates'][i][2]\n # #record['Speed'] = index['speed'][i]\n # record['latlon'] = [index['loc']['coordinates'][i][1], index['loc']['coordinates'][i][0]]\n # temp['data'].append(record)\n\n # temp['sTime'] = index['stTime']\n # temp['eTime'] = index['enTime']\n # if index['trajInf']==1:\n # temp['origin'] = index['origin']\n # temp['destination'] = index['destination']\n # #temp['aircraft'] = index['aircraft']\n # temp['time'] = index['time']\n # depTrajs.append(temp)\n # except:\n # print('traj find except')\n\n\n # print(arrTrajs[0])\n\n\nclass CurtimeReadHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n\n start = time.clock()\n queryData = g_PointReader.queryCurtime(constraint)\n\n # print(queryData)\n\n points = queryData['trajData']\n\n CDMData = queryData['CDMData']\n\n x = float('nan')\n print('traNum:', len(points))\n # print('CDMNum:',len(CDMData))\n\n data = {}\n\n trajData = filterTrajData(points)\n # arrCDMs = []\n # depCDMs = []\n # CDMKey= []\n\n\n # for index in CDMData:\n # if index['trajID'] not in CDMKey:\n # del index['_id']\n # ICAO = getICAO(index['航班号'])\n # index['airlineCode'] = ICAO\n # try:\n # index['airline'] = airlineMap[getICAO(index['航班号'])]\n # except:\n # index['airline'] = ''\n\n # CDMKey.append(index['trajID'])\n # if index['arrdir'] ==1:\n # arrCDMs.append(index)\n # else:\n # depCDMs.append(index)\n\n # CDMData = {'depCDMs': depCDMs, 'arrCDMs': arrCDMs}\n\n # pcData = filterData(CDMData)\n\n\n data = {'trajData': trajData}\n data = json.dumps(data);\n\n print('buffer size', len(data) / (1024 * 1024), \"M\")\n\n end = time.clock()\n print('Filter curtime running time: %s Seconds' % (end - start))\n # start = time.clock()\n self.write({'data': data});\n # end = time.clock()\n # print('Running time: %s Seconds'%(end-start))\n\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(MyEncoder, self).default(obj)\n\n\ndef dealData(data, flag):\n temp = []\n for i in range(len(data)):\n if flag == 0:\n data[i]['计划降落'] = ''\n data[i]['实际降落'] = ''\n else:\n data[i]['开始登机'] = ''\n data[i]['关闭登机门'] = ''\n data[i]['P&S时间'] = ''\n data[i]['TAX时间'] = ''\n data[i]['DEP时间'] = ''\n\n record = {}\n record['callsign'] = data[i]['航班号']\n # record['trajID'] = data[i]['trajID']\n if 'trajID' in data[i].keys():\n record['trajID'] = data[i]['trajID']\n # else:\n # record['trajID'] = str(uuid.uuid1())\n record['arr'] = flag\n\n if flag == 1:\n # arrival\n record['Schedule Arrival'] = data[i]['计划降落'] if data[i]['计划降落'] else ''\n record['Actual Arrival'] = data[i]['实际降落'] if data[i]['实际降落'] else ''\n record['Arrival Delay Dur'] = (data[i][\"实际降落\"] - data[i][\"计划降落\"]) / (60 * 1000) if data[i][\"实际降落\"] and \\\n data[i][\"计划降落\"] else ''\n record['Flight Delay Dur'] = (\n (data[i][\"实际降落\"] - data[i][\"实际起飞\"]) / (60 * 1000) - (data[i][\"计划降落\"] - data[i][\"计划起飞\"]) / (60 * 1000)) if \\\n data[i][\"实际降落\"] and data[i][\"实际起飞\"] and data[i][\"计划降落\"] and data[i][\"计划起飞\"] else ''\n # public\n # record['机型'] = data[i][\"机型\"] if data[i][\"机型\"] else ''\n record['Schedule Departure'] = data[i][\"计划起飞\"] if data[i][\"计划起飞\"] else ''\n record[\"Actual Departure\"] = data[i][\"实际起飞\"] if data[i][\"实际起飞\"] else ''\n record[\"Departure Delay Dur\"] = (data[i][\"实际起飞\"] - data[i][\"计划起飞\"]) / (60 * 1000) if data[i][\"实际起飞\"] and \\\n data[i][\"计划起飞\"] else ''\n # srecord['Runway'] = data[i][\"跑道\"] if data[i][\"跑道\"] else ''\n else:\n # departure\n record['Boarding Dur'] = (data[i][\"关闭登机门\"] - data[i][\"开始登机\"]) / (60 * 1000) if data[i][\"关闭登机门\"] and data[i][\n \"开始登机\"] else ''\n record['Preparing Dur'] = (data[i][\"P&S时间\"] - data[i][\"关闭登机门\"]) / (60 * 1000) if data[i][\"P&S时间\"] and \\\n data[i][\"关闭登机门\"] else ''\n record['Service Dur'] = (data[i][\"TAX时间\"] - data[i][\"P&S时间\"]) / (60 * 1000) if data[i][\"TAX时间\"] and data[i][\n \"P&S时间\"] else ''\n record['Taxing Dur'] = (data[i][\"DEP时间\"] - data[i][\"TAX时间\"]) / (60 * 1000) if data[i][\"DEP时间\"] and data[i][\n \"TAX时间\"] else ''\n # record['Runway Flow'] = data[i][\"跑道流量\"] if data[i][\"跑道流量\"] else ''\n # record['Reason'] = data[i][\"最终判断原因\"] if data[i][\"最终判断原因\"] else ''\n # public\n # record['机型'] = data[i][\"机型\"] if data[i][\"机型\"] else ''\n record['Schedule Departure'] = data[i][\"计划起飞\"] if data[i][\"计划起飞\"] else ''\n record[\"Actual Departure\"] = data[i][\"DEP时间\"] if data[i][\"DEP时间\"] else ''\n record[\"Departure Delay Dur\"] = (data[i][\"DEP时间\"] - data[i][\"计划起飞\"]) / (60 * 1000) if data[i][\"DEP时间\"] and \\\n data[i][\n \"计划起飞\"] else ''\n # record['Runway'] = data[i][\"跑道\"] if data[i][\"跑道\"] else ''\n\n # if flag==0:\n # record[\"实际起飞时间\"] = data[i][\"DEP时间\"] if data[i][\"DEP时间\"] else ''\n # record[\"起飞延误时长/min\"] = (data[i][\"DEP时间\"]-data[i][\"计划起飞\"])/(60*1000) if data[i][\"DEP时间\"] and data[i][\"计划起飞\"] else ''\n # else:\n # record[\"实际起飞时间\"] = data[i][\"实际起飞\"] if data[i][\"实际起飞\"] else ''\n # record[\"起飞延误时长/min\"] = (data[i][\"实际起飞\"]-data[i][\"计划起飞\"])/(60*1000) if data[i][\"实际起飞\"] and data[i][\"计划起飞\"] else ''\n\n\n temp.append(record)\n data = temp\n return data\n\n\ndef getICAO(callsign):\n callsign = str(callsign)\n for i in range(0, len(callsign)):\n if callsign[i] <= '9':\n break\n ICAO = callsign[:i]\n return ICAO\n\n\ndef filterData(data):\n features = [{'feaName': 'Schedule Arrival', 'arr': 1, 'type': 'time'},\n {'feaName': 'Actual Arrival', 'arr': 1, 'type': 'time'},\n {'feaName': 'Arrival Delay Dur', 'arr': 1, 'type': 'number'},\n {'feaName': 'Flight Delay Dur', 'arr': 1, 'type': 'number'},\n\n {'feaName': 'Schedule Departure', 'arr': 2, 'type': 'time'},\n {'feaName': 'Actual Departure', 'arr': 2, 'type': 'time'},\n {'feaName': 'Departure Delay Dur', 'arr': 2, 'type': 'number'},\n # {'feaName':'Runway','arr':2,'type':'time'},\n {'feaName': 'Boarding Dur', 'arr': 0, 'type': 'number'},\n {'feaName': 'Preparing Dur', 'arr': 0, 'type': 'number'},\n {'feaName': 'Service Dur', 'arr': 0, 'type': 'number'},\n {'feaName': 'Taxing Dur', 'arr': 0, 'type': 'number'},\n # {'feaName':'Runway Flow','arr':0,'type':'number'}\n # {'feaName':'Reason','arr':0,'type':'number'},\n ]\n\n # ordinal\n\n arrFeaList = []\n depFeaList = []\n pubFeaList = []\n feaList = []\n feaRange = {}\n feaDict = {}\n\n for index in features:\n if index['arr'] == 1:\n arrFeaList.append(index['feaName'])\n elif index['arr'] == 0:\n depFeaList.append(index['feaName'])\n else:\n pubFeaList.append(index['feaName'])\n feaList.append(index['feaName'])\n feaDict[index['feaName']] = index\n\n arrData = data['arrCDMs'];\n depData = data['depCDMs'];\n\n # print('arrData',arrData)\n depData = dealData(depData, 0)\n arrData = dealData(arrData, 1)\n\n data = arrData + depData\n\n for d in feaList:\n valueMin = 0\n valueMax = 1\n if feaDict[d]['arr'] == 1:\n if arrData:\n temp = []\n for index in arrData:\n if index[d]:\n temp.append(index[d])\n valueMax = max(temp)\n valueMin = min(temp)\n elif feaDict[d]['arr'] == 0:\n if depData:\n # valueMax = max(index[d] if index[d] for index in depData)\n # valueMin = min(index[d] if index[d] for index in depData)\n temp = []\n for index in depData:\n if index[d]:\n temp.append(index[d])\n valueMax = max(temp)\n valueMin = min(temp)\n else:\n if data:\n try:\n temp = []\n for index in data:\n if index[d]:\n temp.append(index[d])\n valueMax = max(temp)\n valueMin = min(temp)\n\n # valueMax = max(index[d] if index[d] for index in data)\n # valueMin = min(index[d] if index[d] for index in data)\n except:\n print(index)\n valueInterval = math.ceil((valueMax - valueMin) / 14) * 1.2\n # valueMin = valueMin - valueInterval\n feaRange[d] = [int(valueMin), int(math.ceil(valueMax)), valueInterval]\n\n for d in feaList:\n print(d)\n valueMin = 0\n valueMax = 1\n if feaDict[d]['arr'] == 1:\n if arrData:\n temp = []\n for index in arrData:\n if not index[d]:\n index[d] = feaRange[d][0]\n elif feaDict[d]['arr'] == 0:\n if depData:\n temp = []\n for index in depData:\n if not index[d]:\n index[d] = feaRange[d][0]\n else:\n if data:\n try:\n temp = []\n for index in data:\n if not index[d]:\n index[d] = feaRange[d][0]\n except:\n print(index)\n\n for index in arrFeaList:\n for i in range(len(arrData)):\n if not arrData[i][index]:\n arrData[i][index] = feaRange[index][0]\n\n for index in depFeaList:\n for i in range(len(depData)):\n if not depData[i][index]:\n depData[i][index] = feaRange[index][0]\n\n feaSta = {'Arr': {}, 'Dep': {}, 'Arr/Dep': {}}\n lineData = {'Arr': {}, 'Dep': {}, 'Arr/Dep': {}}\n\n # print(arrData)\n\n for d in feaList:\n # 统计的间隔为15份\n valueRange = feaRange[d]\n valueMin = valueRange[0]\n valueMax = valueRange[1]\n valueInterval = valueRange[2]\n # valueMin -= valueInterval\n # 第一个刻度存放值不存在的情况\n\n aaa = 0\n\n if feaDict[d]['arr'] == 1 and arrData:\n feaSta['Arr'][d] = []\n for i in range(15):\n value = valueMin + valueInterval * i\n num = 0\n try:\n for re in arrData:\n if re[d] >= value and re[d] < value + valueInterval:\n num += 1\n except:\n pass\n feaSta['Arr'][d].append(\n {'value': value, 'arrNum': num, 'depNum': 0, 'arrNumRemove': 0, 'depNumRemove': 0, 'num': num})\n\n\n # lineData['Arr'][d]=[]\n # for i in range(len(arrData)):\n # num=1\n # for j in range(i+1,len(arrData),1):\n # if arrData[i][d] == arrData[j][d]:\n # num+=1\n # lineData['Arr'][d].append({'value': arrData[i][d], 'num':num})\n\n # lineData['Arr'][d] = sorted(lineData['Arr'][d], key=itemgetter('value'))\n\n # print('aaaNum',len(arrData)+len(depData))\n # print('aaa',aaa)\n\n if feaDict[d]['arr'] == 0 and depData:\n feaSta['Dep'][d] = []\n for i in range(15):\n value = valueMin + valueInterval * i\n num = 0\n try:\n for re in depData:\n if re[d] >= value and re[d] < value + valueInterval:\n num += 1\n except:\n pass\n feaSta['Dep'][d].append(\n {'value': value, 'arrNum': 0, 'depNum': num, 'arrNumRemove': 0, 'depNumRemove': 0, 'num': num})\n\n # lineData['Dep'][d]=[]\n # for i in range(len(depData)):\n # num=1\n # for j in range(i+1,len(depData),1):\n # if depData[i][d] == depData[j][d]:\n # num+=1\n # lineData['Dep'][d].append({'value': depData[i][d], 'num':num})\n\n # lineData['Dep'][d] = sorted(lineData['Dep'][d], key=itemgetter('value'))\n\n if feaDict[d]['arr'] == 2 and data:\n feaSta['Arr/Dep'][d] = []\n for i in range(15):\n value = valueMin + valueInterval * i\n num = 0\n arrNum = 0\n depNum = 0\n try:\n for re in data:\n if re[d] >= value and re[d] < value + valueInterval:\n num += 1\n if re['arr'] == 1:\n arrNum += 1\n else:\n depNum += 1\n except:\n pass\n feaSta['Arr/Dep'][d].append(\n {'value': value, 'arrNum': arrNum, 'depNum': depNum, 'arrNumRemove': 0, 'depNumRemove': 0,\n 'num': num})\n\n\n\n # lineData['Arr/Dep'][d]=[]\n # for i in range(len(data)):\n # num=1\n # for j in range(i+1,len(data),1):\n # if data[i][d] == data[j][d]:\n # num+=1\n # lineData['Arr/Dep'][d].append({'value': data[i][d], 'num':num})\n\n # lineData['Arr/Dep'][d] = sorted(lineData['Arr/Dep'][d], key=itemgetter('value'))\n\n # for feaName in feaSta.keys():\n # for fea in feaSta[feaName].keys():\n # feaSta[feaName][fea]['num']=feaSta[feaName][fea]['arrNum']+feaSta[feaName][fea]['depNum']\n\n # print('data',data)\n # feaSta = {'Arr':{},'Dep':{},'Arr/Dep':{}}\n print('dataLen', len(data))\n return {\"data\": data, 'features': features, 'feaSta': feaSta, 'lineData': lineData, 'feaRange': feaRange}\n\n\nclass MonthStaReadHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n\n points = g_PointReader.queryMonthSta(constraint)\n\n data = {}\n\n # print(points[0])\n del points[0]['_id']\n\n print(points[0].keys())\n for index in points[0].keys():\n data[index] = points[0][index]\n\n data = json.dumps(data)\n\n self.write({\n 'data': data,\n })\n\n\nclass DayStaReadHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n\n start = time.clock()\n\n points = g_PointReader.queryDaySta(constraint)\n\n end = time.clock()\n print('Filter daysta running time: %s Seconds' % (end - start))\n\n data = {}\n del points[0]['_id']\n print(points[0].keys())\n for index in points[0].keys():\n temp = []\n for re in points[0][index]:\n temp = temp + re['data']\n data[index] = temp\n\n data = json.dumps(data)\n\n self.write({\n 'data': data,\n })\n\n\nclass CDMReadHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n\n CDMData = g_PointReader.queryCDM(constraint)\n\n print('CDMData', len(CDMData))\n\n CDMKey = []\n arrCDMs = []\n depCDMs = []\n for index in CDMData:\n if index['trajID'] not in CDMKey:\n del index['_id']\n ICAO = getICAO(index['航班号'])\n try:\n index['airline'] = airlineMap[getICAO(index['航班号'])]\n except:\n index['airlineCode'] = ''\n\n CDMKey.append(index['trajID'])\n if index['arrdir'] == 1:\n arrCDMs.append(index)\n else:\n depCDMs.append(index)\n\n CDMData = {'depCDMs': depCDMs, 'arrCDMs': arrCDMs}\n pcData = filterData(CDMData)\n # data = {'CDMData':CDMData}\n data = {'CDMData': CDMData, 'pcData': pcData}\n data = json.dumps(data)\n self.write({\n 'data': data,\n })\n\n\nclass FliterCircleReadHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n start = time.clock()\n allData = g_PointReader.queryFilterCircle(constraint)\n points = allData['trajList']\n data = []\n\n for index in points:\n del index['_id']\n data.append(index['trajID'])\n # data = filterTrajData(points)\n CDMData = allData['CDMData']\n arrCDMs = []\n depCDMs = []\n CDMKey = []\n\n for index in CDMData:\n if index['trajID'] not in CDMKey:\n del index['_id']\n ICAO = getICAO(index['航班号'])\n index['airlineCode'] = ICAO\n try:\n index['airline'] = airlineMap[getICAO(index['航班号'])]\n except:\n index['airline'] = ''\n\n CDMKey.append(index['trajID'])\n if index['arrdir'] == 1:\n arrCDMs.append(index)\n else:\n depCDMs.append(index)\n\n CDMData = {'depCDMs': depCDMs, 'arrCDMs': arrCDMs}\n\n pcData = filterData(CDMData)\n\n data = {'trajList': data, 'CDMData': CDMData, 'pcData': pcData}\n data = json.dumps(data)\n\n end = time.clock()\n print('Filter circle running time: %s Seconds' % (end - start))\n\n self.write({\n 'data': data,\n })\n\n\nclass ProjectionAlgorithmHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n\n # print(constraint)\n\n arr_X = np.array(constraint['arr_data'])\n dep_X = np.array(constraint['dep_data'])\n\n pca = PCA(n_components=2)\n\n arr_newData = []\n dep_newData = []\n\n if len(arr_X):\n arr_newData = pca.fit_transform(arr_X).tolist()\n if len(dep_X):\n dep_newData = pca.fit_transform(dep_X).tolist()\n # arr_newData = pca.fit_transform(arr_X).tolist()\n # dep_newData = pca.fit_transform(dep_X).tolist()\n\n # print(arr_newData)\n # print(dep_newData)\n\n # newData = []\n\n # if len(arr_X):\n # for row in arr_newData:\n # newData.append(row.tolist())\n\n # if len(dep_X):\n # for row in dep_newData:\n # newData.append(row.tolist())\n\n # print(newData)\n\n # newData = newData.tolist()\n\n data = {'arr_data': arr_newData, 'dep_data': dep_newData}\n data = json.dumps(data)\n # print(data)\n self.write({\n 'data': data,\n })\n\n\nclass FixpotPassingNumberHandler(tornado.web.RequestHandler):\n def post(self):\n self.set_header('Access-Control-Allow-Origin', \"*\")\n\n # 航路点到该条轨迹的最短距离\n def pass_this_fixpot(traj_select, FP_select):\n\n def distance_two_points(fp_x, fp_y, tr_x, tr_y):\n calX = fp_x - tr_x\n calY = fp_y - tr_y\n distance = pow((calX * calX + calY * calY), 0.5)\n return distance\n\n def distance_FP_traj(fp_x, fp_y, a_x, a_y, b_x, b_y):\n cross = {}\n result = {}\n # 求最近距离\n if a_x == b_x and a_y == b_y:\n dis = distance_two_points(fp_x, fp_y, a_x, a_y)\n cross['x'] = a_x\n cross['y'] = a_y\n else:\n if a_x == b_x:\n dis = abs(a_x - fp_x)\n cross['x'] = a_x\n cross['y'] = fp_y\n else:\n A = (a_y - b_y) / (a_x - b_x)\n B = a_y - A * a_x\n dis = abs(A * fp_x + B - fp_y) / math.sqrt(1 + A * A)\n # 求两直线交点坐标\n m = fp_x + A * fp_y\n cross['x'] = (m - A * B) / (A * A + 1)\n cross['y'] = A * cross['x'] + B\n # 时间插值\n timeRange = traj_select['data'][index2]['Timestamp'] - traj_select['data'][index1]['Timestamp']\n if a_x == b_x and a_y == b_y:\n time_stamp = traj_select['data'][index1]['Timestamp']\n else:\n if b_x != a_x:\n timeDomain = b_x - a_x\n time_stamp = traj_select['data'][index1]['Timestamp'] + timeRange * (\n b_x - cross['x']) / timeDomain\n else:\n timeDomain = b_y - a_y\n # print('timeDomain:', timeDomain, a_x, b_x, a_y, b_y)\n time_stamp = traj_select['data'][index1]['Timestamp'] + timeRange * (\n b_y - cross['y']) / timeDomain\n\n time_to_end = traj_select['eTime'] - time_stamp\n result = {\"select_traj\": traj_select, \"FP\": FP_select, \"distance\": dis, \"timestamp\": time_stamp,\n \"crosspoint\": cross, \"time_to_end\": time_to_end};\n return result\n\n # ***************************************************************************************************\n\n distance_array = []\n # åprint('$$$$$$$$$$$$$$$$$$$$$$$$', traj_select)\n for d in traj_select['data']:\n res = distance_two_points(FP_select['x'], FP_select['y'], d['x'], d['y'])\n distance_array.append(res)\n\n min = 10000000\n for i, d in enumerate(distance_array):\n if d < min:\n min = d\n index1 = i\n if index1 == 0:\n index2 = index1 + 1\n if index1 == len(distance_array) - 1:\n index2 = index1 - 1\n else:\n if distance_array[index1 - 1] < distance_array[index1 + 1]:\n index2 = index1 - 1\n else:\n index2 = index1 + 1\n\n distance_final = distance_FP_traj(FP_select['x'], FP_select['y'], traj_select['data'][index1]['x'],\n traj_select['data'][index1]['y'], traj_select['data'][index2]['x'],\n traj_select['data'][index2]['y'])\n\n return distance_final\n\n # ***************************************************************************************************\n\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n\n arr_trajDataHistogram = constraint['arr_trajDataHistogram']\n dep_trajDataHistogram = constraint['dep_trajDataHistogram']\n trajDataHistogram = {'arrTrajs': arr_trajDataHistogram, 'depTrajs': dep_trajDataHistogram}\n fixpot_array = constraint['fixpot_array']\n\n # 该函数的全局变量\n arr_dep = ['arrTrajs', 'depTrajs']\n fixpot_arrival = ['BOBAK', 'DOGAR', 'GITUM', 'KM', 'VYK']\n fixpot_departure = ['LADIX', 'RENOB', 'SOSDI', 'TONIL', 'YV', 'CDY']\n result_array = {\"arr\": [], \"dep\": []}\n fixpot_filter_arrival = {}\n fixpot_filter_departure = {}\n # 整理计算结果\n for n in arr_dep:\n for d in trajDataHistogram[n]:\n traj_select = d\n n2 = n[0:3]\n for k in fixpot_array[n2]:\n FP_select = k\n result = pass_this_fixpot(traj_select, FP_select)\n if result['distance'] < 20:\n result_array[n2].append(result)\n # 筛选出经过指定fixpot的航班\n for s in fixpot_arrival:\n fixpot_filter_arrival[s] = []\n for d in result_array['arr']:\n if d['FP']['name'] == s:\n fixpot_filter_arrival[s].append(d)\n\n for s in fixpot_departure:\n fixpot_filter_departure[s] = []\n for d in result_array['dep']:\n if d['FP']['name'] == s:\n fixpot_filter_departure[s].append(d)\n\n data = {\"arr\": fixpot_filter_arrival, \"dep\": fixpot_filter_departure}\n data = json.dumps(data)\n # print(data)\n\n self.write({\n 'data': data,\n })\n\n\nclass AirportReadHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n\n points = g_PointReader.queryAirport(constraint)\n\n # print(\"points:\", points)\n airport = []\n for index in points:\n del index['_id']\n temp = {}\n loc = index['loc']\n del index['loc']\n temp['country'] = loc['country']\n temp['latitude'] = loc['coordinates'][1]\n temp['longitude'] = loc['coordinates'][0]\n index['position'] = temp\n airport.append(index)\n # del points['_id']\n # data = points['data']\n airport = json.dumps(airport)\n\n self.write({\n 'data': airport,\n })\n\n\nclass LoginReadHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n\n username = constraint['username']\n password = constraint['password']\n\n # print(username)\n # print(password)\n inf = \"error\"\n if username == \"boeing\" and password == \"pkuvisboeing\":\n inf = 'success'\n\n inf = json.dumps(inf)\n self.write({\n 'data': inf,\n })\n\n\nclass FixpotReadHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n\n # print('$$$$$$$$$$$$$$$$$')\n # print(constraint)\n points = g_PointReader.queryFixpot(constraint)\n\n # print(\"points:\", points)\n # airport = []\n # for index in points:\n # del index['_id']\n # temp={}\n # loc = index['loc']\n # del index['loc']\n # temp['country'] = loc['country']\n # temp['latitude'] = loc['coordinates'][1]\n # temp['longitude'] = loc['coordinates'][0]\n\n # index['position'] = temp\n # airport.append(index) \n # del points['_id']\n # data = points['data']\n\n fixPot = []\n for i in range(len(points)):\n del points[i]['_id']\n fixPot.append(points[i])\n\n fixPot = json.dumps(fixPot)\n\n self.write({\n 'data': fixPot,\n })\n\n\nclass CallsignReadHandler(tornado.web.RequestHandler):\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n constraint = self.get_argument('constraint')\n constraint = json.loads(constraint)\n\n CDMData = g_PointReader.queryCallsign(constraint)\n\n CDMKey = []\n arrCDMs = []\n depCDMs = []\n for index in CDMData:\n if index['trajID'] not in CDMKey:\n del index['_id']\n ICAO = getICAO(index['航班号'])\n index['airlineCode'] = ICAO\n try:\n index['airline'] = airlineMap[getICAO(index['航班号'])]\n except:\n index['airline'] = ''\n\n CDMKey.append(index['trajID'])\n if index['arrdir'] == 1:\n arrCDMs.append(index)\n else:\n depCDMs.append(index)\n\n # for index in CDMData:\n # #if index['trajID'] not in CDMKey:\n # del index['_id']\n # #CDMKey.append(index['trajID'])\n # #index['trajID'] = str(uuid.uuid1())\n # if index['arrdir'] ==1:\n\n # arrCDMs.append(index)\n # else:\n # depCDMs.append(index)\n\n CDMData = {'depCDMs': depCDMs, 'arrCDMs': arrCDMs}\n\n pcData = filterData(CDMData)\n\n data = {'CDMData': CDMData, 'pcData': pcData}\n data = json.dumps(data)\n\n end = time.clock()\n # print('Filter circle running time: %s Seconds'%(end-start))\n\n self.write({\n 'data': data,\n })\n\n # data=[]\n # for index in points:\n # del index['_id']\n # data.append(index)\n\n # print(\"callsign num\", len(data))\n # data = json.dumps(data)\n\n\n\n # self.write({\n # 'data': data,\n # })\n\n\nclass TrajIDReadHandler(tornado.web.RequestHandler):\n # query trajID\n def post(self):\n ''''\n constraint: {\n 'databaseType': //database type\n 'dataSetName': //name of collection\n }\n '''\n self.set_header('Access-Control-Allow-Origin', \"*\")\n constraint = self.get_argument('constraint')\n # print('**************************')\n # print(constraint)\n constraint = json.loads(constraint)\n # print('**************************')\n points = g_PointReader.queryTrajID(constraint)\n # traj = {}\n # for i in range(len(points)):\n # del points[i]['_id']\n # traj[str(i)]=points[i]\n del points['_id']\n traj = json.dumps(points)\n\n print('traj')\n\n self.write({\n 'sus': \"yes\",\n 'data': traj,\n })\n\n\nclass PointMetaReadHandler(tornado.web.RequestHandler):\n def get(self):\n self.write('ok');\n\n\nclass PointWriteHandler(tornado.web.RequestHandler):\n def post(self):\n datasetname = self.get_argument('DataSetName');\n datasetname = datasetname.strip();\n\n lidir = self.get_argument('liDir').split(',')\n lidir_new = []\n for tempdir in lidir:\n lidir_new.append(tempdir.strip())\n lidir = lidir_new;\n\n datadescript = self.get_argument('DataDescript')\n\n posnamelist = self.get_argument('PosNameList').split(',')\n liposname_new = []\n for posname in posnamelist:\n liposname_new.append(posname.strip())\n posnamelist = liposname_new;\n\n timename = self.get_argument('TimeName').strip();\n\n liattr = self.get_argument('liAtrrName').split(',');\n liattr_new = []\n for att in liattr:\n liattr_new.append(att.strip())\n liattr = liattr_new;\n\n pointmeta = {}\n pointmeta['m_DataSetName'] = datasetname;\n pointmeta['m_DataDescript'] = datadescript;\n pointmeta['m_PosName'] = posnamelist;\n pointmeta['m_TimeName'] = timename;\n pointmeta['m_liAtrrName'] = liattr;\n print(' point meta ', pointmeta);\n\n # pointmeta = json.loads(pointmeta_str);\n # print('[PointWriteHandler] 2 ', pointmeta, type(pointmeta));\n # print(\"[PointWriteHandler] read point file\", pointmeta['m_DataSetName']);\n # print(\"[PointWriteHandler] read m_DataDescript\", pointmeta.m_DataDescript);\n # print(\"[PointWriteHandler] read m_PosName\", pointmeta.m_PosName);\n # print(\"[PointWriteHandler] read m_TimeName\", pointmeta.m_TimeName);\n # print(\"[PointWriteHandler] read m_liAtrrName\", pointmeta.m_liAtrrName);\n\n result = g_PointWriter.writePoints(pointmeta, lidir);\n # self.write(result);\n self.write('ok');\n\n # self.write('ok');\n","sub_path":"server/handler/data/pointhandler.py","file_name":"pointhandler.py","file_ext":"py","file_size_in_byte":70921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"140092448","text":"# script for generating shortest paths and path lengths\n\n# import\n\nimport osmnx as ox\nimport networkx\nimport pickle\nimport sqlalchemy\nfrom sqlalchemy import Column, Integer, String, Boolean, Float, Sequence, BigInteger\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom geoalchemy2 import Geometry\nimport pandas as pd\nfrom datetime import datetime \nimport csv\n\n\n\n########################################################################################################\n# connect to database\n\nengine = sqlalchemy.create_engine('postgresql://postgres:tuesday789@localhost:5432/Dissertation')\n\n# initialize ORM object/table relationship class\n\nBase = declarative_base(engine)\nmetadata = sqlalchemy.MetaData()\n\n# class travel_time(Base):\n\n# \t__tablename__ = 'travel_times'\n\n# \tid = Column(Integer, Sequence('id'), primary_key=True)\n# \tnet_filter = Column(Integer)\n# \torigin = Column(BigInteger)\n# \tdestination = Column(BigInteger)\n# \thas_path = Column(Boolean)\n# \tdistance = Column(Float)\n\nclass nearest_nodes(Base):\n\n\t__table__ = sqlalchemy.Table('nearest_node', metadata, autoload=True, autoload_with=engine)\n\n\nBase.metadata.create_all(engine)\n\n# test_travel_time = travel_time(net_filter = 'bike_1', origin = 652813798, destination = 1236955052, has_path = True, distance = 101.101)\n\nSession = sessionmaker(bind=engine)\n\nsession = Session()\n\n# after this line, the data in test_travel_time is \"pending\"\n# still separate from the DB\n# session.add(travel_time)\n\n# now commit the outstanding data/session status\n# session.commit()\n\n####################################################################################################\n\ndef net_dict():\n\tfile_dict = {}\n\tfor i in range(1,3):\n\t\t# print(i)\n\t\tfile_dict[i] = 'pickles/unconnected_london_bike_' + str(i) + '_projected_pickle.file'\n\t\t# print(file_dict[i])\n\treturn file_dict\n\n\n# def trip_calc(net, origin, dest):\n\n# \tcheck_path = networkx.has_path(net, origin, destination)\n\n# \tif check_path:\n\n# \t\t# path = networkx.shortest_path(net, origin, destination, weight='length')\n# \t\tpath_length = networkx.shortest_path_length(net, origin, destination, weight='length')\n# \t\tcalced_data = travel_time(origin = origin, destination = destination, has_path = check_path, distance = path_length)\n\n# \telse: \n\t\n# \t\tcalced_data = travel_time(origin = origin, destination = destination, has_path = check_path)\n\n\n# \treturn calced_data\n\n\n\n# net_dict = net_dict()\n# print(net_dict)\n\n\n# file_dict['1'] = 'pickles/london_bike_1_projected_pickle.file'\n# file_dict['2'] = 'pickles/london_bike_2_projected_pickle.file'\n# file_dict['3'] = 'pickles/london_bike_3_projected_pickle.file'\n# file_dict['4'] = 'pickles/london_bike_4_projected_pickle.file'\n# file_dict['5'] = 'pickles/london_bike_5_projected_pickle.file'\n\n####################################################################################################\n\n# get origin and destination lists\nsql = 'select id, lsoa11cd, nearest_common_node from nearest_node'\nnodes_df = pd.read_sql(sql, con = engine) \nprint(nodes_df.head(2))\n\n# load network from pickle\n\n# file = 'pickles/london_bike_1_projected_pickle.file'\n\n# net = pickle.load(open(file, \"rb\" ))\n\nnode_id_list = list(nodes_df['nearest_common_node'])\n# p_id = 0\n# debug_limit = 10\nnet_dict = net_dict()\n\nstart = datetime.now()\n\nfor key, value in net_dict.items():\n\n\tprint('key: ', key)\n\t# print('type of key: ', type(key))\n\tprint('value: ', value)\n\t# debug_limit = p_id + 10\n\n\tnet = pickle.load(open(value, \"rb\" ))\n\tprint(type(net))\n\n\tout_file = 'undirected_haspath' + str(key) + '.csv'\n\n\twith open(out_file, 'a') as csv_file:\n\n\t\twriter = csv.writer(csv_file)\n\n\t\t# for each item in origin list\n\n\t\tfor origin in node_id_list:\n\t\t\t\n\t\t\t# if p_id > debug_limit:\n\t\t\t\t# break\n\n\t\t\tfor destination in node_id_list:\n\n\t\t\t\tif origin == destination:\n\t\t\t\t\t# print('matchy matchy')\n\t\t\t\t\tcontinue\n\n\t\t\t\t# print('id: ', p_id)\n\t\t\t\t# print('origin: ', origin)\n\t\t\t\t# print('destination: ', destination)\n\n\t\t\t\t# if p_id > debug_limit:\n\t\t\t\t\t# break\n\n\t\t\t\t###############################################################\n\t\t\t\t# calc data for O/D pair\n\n\t\t\t\t# trip = trip_calc(net, origin, destination)\n\n\t\t\t\tcheck_path = networkx.has_path(net, origin, destination)\n\n\t\t\t\trow = [origin, destination, check_path]\n\n\t\t\t\twriter.writerow(row)\n\n\n\t\t\t\t# trip.net_filter = key\n\t\t\t\t# print(\"trip object filter value: \", trip.net_filter)\n\t\t\t\t# print('distance: ', trip.distance)\n\n\t\t\t\t# p_id = p_id + 1\n\n\t\t\t\t#####################\n\t\t\t\t# send to sql DB\n\n\t\t\t\t# session.add(trip)\n\t\t\t\t# session.commit()\n\tcsv_file.close()\n\nend = datetime.now()\n\nprint('start: ', start)\nprint('end: ', end)\n\nprint('elapse: ', (end - start))\n\n\t# for each item in destination list\n\n\n\t\t# try\n\n\t\t\t# shortest path\n\n\t\t\t# shortest path length\n\n\t\t\t# push object to database\n\n\t\t# except\n\n\t\t\t\t# O and D are not connected\n\n\t\t\t\t# push Null to DB \n\n\n\n\n\n# test\n\n######################################################################################################\n\n# file = 'pickles/london_bike_1_projected_pickle.file'\n\n# net = pickle.load(open(file, \"rb\" ))\n\n# origin = 652813798\n# destination = 1236955052\n\n\n# # could use dijkstra or Bellman-ford algo for finding path\n\n\n# check = networkx.has_path(net, origin, destination)\n\n# test_path = networkx.shortest_path(net, origin, destination, weight='length')\n\n# test_path_length = networkx.shortest_path_length(net, origin, destination, weight='length')\n\n# fig, ax = ox.plot_graph_route(net, test_path, node_size=0)\n\n######################################################################################################\n\n\n\n# route_map = ox.plot_route_folium(net, test_path)\n\n\n# https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.shortest_paths.generic.has_path.html#networkx.algorithms.shortest_paths.generic.has_path\n\n# https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.shortest_paths.weighted.dijkstra_path.html#networkx.algorithms.shortest_paths.weighted.dijkstra_path\n\n# shortest_path_length = networkx.dijkstra_path_length(net, origin, destination, weight = ??)\n\n# https://networkx.github.io/documentation/stable/reference/algorithms/shortest_paths.html\n\n\n# shortest_path_nodes = networkx.dijkstra_path(net, origin, destination, weight = ?????)\n\n\n# NetworkX algorithms designed for weighted graphs cannot use multigraphs directly because it is not \n# clear how to handle multiedge weights. Convert to Graph using edge attribute ‘weight’ to enable \n# weighted graph algorithms.\n\n# code for converting a multigraph into a graph with weights\n# https://stackoverflow.com/questions/15590812/networkx-convert-multigraph-into-simple-graph-with-weighted-edges#15598279\n\n\n\n# osmnx example seems to think nx.shortest_path can handle whatever OSMNX builds from OSM. \n# classes\n\n# multidigraph \n# \n","sub_path":"Analysis/has_paths.py","file_name":"has_paths.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"328213538","text":"# Course: Data Analysis Tools\n# Week2\n#Editor: Kuo-Lin Hsueh\nimport pandas as pd\nimport numpy as np\nimport seaborn\nimport scipy.stats\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv(\"gapminder.csv\", low_memory = False, index_col = 0)\nmaleemployrate = pd.read_csv(\"maleemployrate_sub.csv\", low_memory = False, index_col = 0)\n\ndf[\"incomeperperson\"] = df[\"incomeperperson\"].convert_objects(convert_numeric=True)\ndf[\"femaleemployrate\"] = df[\"femaleemployrate\"].convert_objects(convert_numeric=True)\ndf[\"polityscore\"] = df[\"polityscore\"].convert_objects(convert_numeric=True)\ndf['employrate'] = df['employrate'].convert_objects(convert_numeric=True)\n\nmaleemployrate['2007'] = maleemployrate['2007'].convert_objects(convert_numeric=True)\n\n\n#Concatenate df , df2\ndf3 = pd.concat([df, maleemployrate], axis=1, join_axes=[df.index])\ndf3.rename(columns= {'2007':'maleemployrate'}, inplace=True) #rename column\n\n## Group by mean\nipp_mean = df3['incomeperperson'].dropna().mean()\nfirst = df3['incomeperperson'].quantile(q=0.2)\nsecond = df3['incomeperperson'].quantile(q=0.4)\nthird = df3['incomeperperson'].quantile(q=0.6)\nfourth = df3['incomeperperson'].quantile(q=0.8)\nfifth = df3['incomeperperson'].quantile(q=1)\ndef meangroup (row):\n if row['incomeperperson'] >=ipp_mean:\n return 1\n elif row['incomeperperson'] < ipp_mean:\n return 0\n \ndf3['meangroup'] = df3.apply(lambda row : meangroup(row),axis=1)\nc3= df3.groupby('meangroup').size()\n\n#cut into 5 groups\ndef income5groups(row):\n if row['incomeperperson'] <= first:\n return \"1_Terrible\"\n elif row['incomeperperson'] <=second:\n return \"2_Bad\"\n elif row['incomeperperson'] <= third:\n return \"3_Average\"\n elif row['incomeperperson'] <= fourth:\n return \"4_Decent\"\n elif row['incomeperperson'] <= fifth:\n return \"5_Great\"\n \ndf3['income5groups'] = df3.apply(lambda row : income5groups(row),axis=1)\ndf3['income5groups'] = df3['income5groups'].astype('category')\n# contingency table of observed counts\nct1=pd.crosstab(df3['polityscore'] , df3['income5groups'])\nprint (ct1)\n\n# column percentages\ncolsum=ct1.sum(axis=0)\ncolpct=ct1/colsum\nprint (colsum)\nprint(colpct)\n\n# chi-square\nprint ('chi-square value, p value, expected counts')\ncs1= scipy.stats.chi2_contingency(ct1)\nprint (ct1)\n\n#code for setting variables to numeric:\ndf3['polityscore'] = df3['polityscore'].convert_objects(convert_numeric=True)\n\n# graph percent with nicotine dependence within each smoking frequency group \nseaborn.factorplot(x=\"income5groups\", y=\"polityscore\", data=df3, kind=\"bar\", ci=None)\nplt.xlabel('income5groups')\nplt.ylabel('polityscore')\nplt.show()\n\n\n\n# Terrible vs. Bad\nrecode ={'1_Terrible': 'Terrible', '2_Bad':'Bad'}\ndf3['terriblevsbad'] = df3['income5groups'].map(recode)\nct2 = pd.crosstab(df3['polityscore'], df3['terriblevsbad'])\nprint ('Terrible vs. Bad\\nchi-square value, p value, expected counts')\ncs2= scipy.stats.chi2_contingency(ct2)\nprint (cs2)\n\n# Terrible vs Average\nrecode1 ={'1_Terrible': 'Terrible', '3_Average':'Average'}\ndf3['terriblevsaverage'] = df3['income5groups'].map(recode1)\nct3 = pd.crosstab(df3['polityscore'], df3['terriblevsaverage'])\nprint ('Terrible vs Average\\nchi-square value, p value, expected counts')\ncs3= scipy.stats.chi2_contingency(ct3)\nprint (cs3)\n\n# Terrible vs Decent\nrecode2 ={'1_Terrible': 'Terrible', '4_Decent':'Decent'}\ndf3['terriblevsdecent'] = df3['income5groups'].map(recode2)\nct4 = pd.crosstab(df3['polityscore'], df3['terriblevsdecent'])\nprint ('Terrible vs Decent\\nchi-square value, p value, expected counts')\ncs4= scipy.stats.chi2_contingency(ct4)\nprint (cs4)\n\n#Terrible vs. Great\nrecode3 ={'1_Terrible': 'Terrible', '5_Great':'Great'}\ndf3['terriblevsgreat'] = df3['income5groups'].map(recode3)\nct5 = pd.crosstab(df3['polityscore'], df3['terriblevsgreat'])\nprint ('Terrible vs. Great\\nchi-square value, p value, expected counts')\ncs5= scipy.stats.chi2_contingency(ct5)\nprint (cs5)\n\n# Bad vs Average\nrecode3plus ={'2_Bad': 'Bad', '3_Average':'Average'}\ndf3['badvsaverage'] = df3['income5groups'].map(recode3plus)\nct5plus = pd.crosstab(df3['polityscore'], df3['terriblevsgreat'])\nprint ('Bad vs Average\\nchi-square value, p value, expected counts')\ncs5plus= scipy.stats.chi2_contingency(ct5plus)\nprint (cs5plus)\n\n\n#Bad vs. Decent\nrecode4 ={'2_Bad': 'Bad', '4_Decent':'Decent'}\ndf3['badvsdecent'] = df3['income5groups'].map(recode4)\nct6 = pd.crosstab(df3['polityscore'], df3['badvsdecent'])\nprint ('Bad vs. Decent\\nchi-square value, p value, expected counts')\ncs6= scipy.stats.chi2_contingency(ct6)\nprint (cs6)\n\n# Bad vs Great\nrecode5 ={'2_Bad': 'Bad', '5_Great':'Great'}\ndf3['badvsgreat'] = df3['income5groups'].map(recode5)\nct7 = pd.crosstab(df3['polityscore'], df3['badvsgreat'])\nprint ('Bad vs Great\\nchi-square value, p value, expected counts')\ncs7= scipy.stats.chi2_contingency(ct7)\nprint (cs7)\n\n#Average vs. Decent\nrecode5plus = {'3_Average':'Average', \"4_Decent\":\"Decent\"}\ndf3['averagevsdecent'] = df3['income5groups'].map(recode5plus)\nct7plus = pd.crosstab(df3['polityscore'], df3['averagevsdecent'])\nprint ('Average vs. Decent\\nchi-square value, p value, expected counts')\ncs7plus= scipy.stats.chi2_contingency(ct7plus)\nprint (cs7plus)\n\n\n# Average vs Great\nrecode5plus2 = {'3_Average':'Average', '5_Great':'Great'}\ndf3['averagevsgreat'] = df3['income5groups'].map(recode5plus2)\nct7plus2 = pd.crosstab(df3['polityscore'], df3['averagevsgreat'])\nprint ('Average vs Great\\nchi-square value, p value, expected counts')\ncs7plus2= scipy.stats.chi2_contingency(ct7plus2)\nprint (cs7plus2)\n\n\n\n# Decent vs Great\nrecode6 ={'4_Decent':'Decent', '5_Great':'Great'}\ndf3['decentvsgreat'] = df3['income5groups'].map(recode6)\nct8 = pd.crosstab(df3['polityscore'], df3['decentvsgreat'])\nprint ('Decent vs Great\\nchi-square value, p value, expected counts')\ncs8= scipy.stats.chi2_contingency(ct8)\nprint (cs8)\n","sub_path":"week2_Chi_square.py","file_name":"week2_Chi_square.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"316647120","text":"import afnumpy\nimport numpy\n\ndef copy(a, order='K'):\n return afnumpy.array(a, order=order, copy=True)\n\ndef meshgrid(*xi, **kwargs):\n ndim = len(xi)\n\n copy_ = kwargs.pop('copy', True)\n sparse = kwargs.pop('sparse', False)\n indexing = kwargs.pop('indexing', 'xy')\n\n if kwargs:\n raise TypeError(\"meshgrid() got an unexpected keyword argument '%s'\"\n % (list(kwargs)[0],))\n\n if indexing not in ['xy', 'ij']:\n raise ValueError(\n \"Valid values for `indexing` are 'xy' and 'ij'.\")\n\n s0 = (1,) * ndim\n\n output = [afnumpy.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])\n for i, x in enumerate(xi)]\n\n shape = [x.size for x in output]\n\n if indexing == 'xy' and ndim > 1:\n # switch first and second axis\n output[0].shape = (1, -1) + (1,)*(ndim - 2)\n output[1].shape = (-1, 1) + (1,)*(ndim - 2)\n shape[0], shape[1] = shape[1], shape[0]\n\n if sparse:\n if copy_:\n return [x.copy() for x in output]\n else:\n return output\n else:\n # Return the full N-D matrix (not only the 1-D vector)\n if copy_:\n # Numpy uses dtype=int but Arrayfire does not support int64 in all functions\n mult_fact = afnumpy.ones(shape, dtype=numpy.int32)\n return [x * mult_fact for x in output]\n else:\n return afnumpy.broadcast_arrays(*output)\n\ndef angle(z, deg=0):\n \"\"\"\n Return the angle of the complex argument.\n\n Parameters\n ----------\n z : array_like\n A complex number or sequence of complex numbers.\n deg : bool, optional\n Return angle in degrees if True, radians if False (default).\n\n Returns\n -------\n angle : {ndarray, scalar}\n The counterclockwise angle from the positive real axis on\n the complex plane, with dtype as numpy.float64.\n\n See Also\n --------\n arctan2\n absolute\n\n\n\n Examples\n --------\n >>> np.angle([1.0, 1.0j, 1+1j]) # in radians\n array([ 0. , 1.57079633, 0.78539816])\n >>> np.angle(1+1j, deg=True) # in degrees\n 45.0\n\n \"\"\"\n if deg:\n fact = 180/pi\n else:\n fact = 1.0\n z = afnumpy.asarray(z)\n if numpy.issubdtype(z.dtype, numpy.complexfloating):\n zimag = z.imag\n zreal = z.real\n else:\n zimag = 0\n zreal = z\n return afnumpy.arctan2(zimag, zreal) * fact\n","sub_path":"afnumpy/lib/function_base.py","file_name":"function_base.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"347790250","text":"import socketserver\nimport struct\nimport threading\n\n\n#主机地址\nHOST='127.0.0.1'\n#主机端口\nPORT=7654\n\n#连接池\ng_conn_pool = []\n\n\nclass ZxServer(socketserver.BaseRequestHandler):\n\n #缓冲区\n DataBuffer=bytes()\n #头部长度\n HeaderSize=12\n #数据包计数器\n sn=0\n\n\n def setup(self):\n self.request.sendall(\"连接服务器成功!\".encode(encoding='utf8'))\n # 加入连接池\n g_conn_pool.append(self.request)\n\n\n def finish(self):\n print(\"清除了这个客户端。\")\n \n def remove(self):\n print(\"有一个客户端掉线了。\")\n g_conn_pool.remove(self.request)\n\n\n def dataHandle(self,headPack, body):\n self.sn +=1\n print (\"LOG________第%s个数据包\" %self.sn)\n print(\"LOG________版本号:%s, 内容长度:%s, 命令:%s\" % headPack)\n print(body.decode())\n print(\"\")\n\n def handle(self):\n\n try:\n conn=self.request\n zState=True\n\n while zState:\n data = conn.recv(1024)\n if data:\n # 把数据存入缓冲区,类似于push数据\n self.DataBuffer += data\n while True:\n if len(self.DataBuffer) < self.HeaderSize:\n print(\"LOG________数据包(%s Byte)小于消息头部长度,跳出小循环\" % len(self.DataBuffer))\n break\n\n # 读取包头\n # struct中:!代表Network order,3I代表3个unsigned int数据\n headPack = struct.unpack('!3I', self.DataBuffer[:self.HeaderSize])\n bodySize = headPack[1]\n\n # 分包情况处理,跳出函数继续接收数据\n if len(self.DataBuffer) < self.HeaderSize+bodySize :\n print(\"LOG________数据包(%s Byte)不完整(总共%s Byte),跳出小循环\" % (len(self.DataBuffer), self.HeaderSize+bodySize))\n break\n # 读取消息正文的内容\n body = self.DataBuffer[self.HeaderSize:self.HeaderSize+bodySize]\n\n # 数据处理\n self.dataHandle(headPack, body)\n\n # 粘包情况的处理\n self.DataBuffer = self.DataBuffer[self.HeaderSize+bodySize:] # 获取下一个数据包,类似于把数据pop出\n\n except ConnectionResetError as x:\n print(x.strerror)\n g_conn_pool.remove(self.request)\n \n \n \nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n pass\n\n\nif __name__ == '__main__':\n print('服务程序开启')\n server=socketserver.ThreadingTCPServer(((HOST, PORT)),ZxServer)\n\n # 新开一个线程运行服务端\n server_thread = threading.Thread(target=server.serve_forever)\n server_thread.daemon = True\n server_thread.start()\n\n\n print('XX')\n \n\n # 主线程逻辑\n while True:\n cmd = input(\"\"\"--------------------------\n输入1:查看当前在线人数\n输入2:给指定客户端发送消息\n输入3:关闭服务端\n\"\"\")\n if cmd == '1':\n print(\"--------------------------\")\n print(\"当前在线人数:\", len(g_conn_pool))\n elif cmd == '2':\n print(\"--------------------------\")\n index, msg = input(\"请输入“索引,消息”的形式:\").split(\",\")\n g_conn_pool[int(index)].sendall(msg.encode(encoding='utf8'))\n elif cmd == '3':\n server.shutdown()\n server.server_close()\n exit()\n","sub_path":"SocketServer_X.py","file_name":"SocketServer_X.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"231869962","text":"import math\r\nfrom pathlib import Path\r\n\r\n'''Trie node which does not represent the end of a word'''\r\nclass Node():\r\n \r\n def __init__(self, name):\r\n self.__name__ = name\r\n self.__children__ = {}\r\n self.__word_end__ = False\r\n \r\n def __add_child__(self, child):\r\n self.__children__[child] = Node(child)\r\n \r\n def __add_word_child__(self, child):\r\n self.__children__[child] = WordNode(child)\r\n \r\n def __has_child__(self, child):\r\n return child in self.__children__\r\n\r\n'''Trie node which represents the end of a word''' \r\nclass WordNode():\r\n \r\n def __init__(self, name, node = None):\r\n if node:\r\n self.__name__ = node.__name__\r\n self.__children__ = node.__children__\r\n else:\r\n self.__name__ = name\r\n self.__children__ = {}\r\n self.__count__ = 0\r\n self.__beg__ = 0\r\n self.__end__ = 0\r\n self.__word_end__ = True\r\n self.__pre_words__ = {}\r\n self.__children__ = {}\r\n \r\n def __add_child__(self, child):\r\n self.__children__[child] = Node(child)\r\n \r\n def __add_word_child__(self, child):\r\n self.__children__[child] = WordNode(child)\r\n \r\n def __has_child__(self, child):\r\n return child in self.__children__\r\n \r\n def __increment__(self, pre, flag):\r\n self.__count__+=1\r\n if pre:\r\n pre = pre.lower()\r\n if pre in self.__pre_words__:\r\n self.__pre_words__[pre]+=1\r\n else:\r\n self.__pre_words__[pre] = 1\r\n if flag == \"beg\":\r\n self.__beg__+=1\r\n if flag == \"end\":\r\n self.__end__+=1\r\n \r\n def __get_counts__(self, pre, flag):\r\n first = None\r\n second = self.__count__\r\n third = None\r\n if flag is not \"beg\":\r\n if pre in self.__pre_words__:\r\n first = self.__pre_words__[pre]\r\n if flag == \"beg\" and self.__beg__ is not 0:\r\n first = self.__beg__\r\n if flag == \"end\" and self.__end__ is not 0:\r\n third = self.__end__ \r\n return first, second, third\r\n\r\n'''Stores instances of Node and WordNode. Methods used by the user are add_sentence__, read_file, and lookup_sentence''' \r\nclass Model():\r\n\r\n __punct__ = ([\".\", \"!\", \"?\", \"\\r\", \"\\n\", \",\", \";\", \":\", \")\", \"(\", \"[\", \"]\", \"{\", \"}\", \"*\", \"<\", \">\", \"\\\"\"])\r\n __punctDict__ = {x: 0 for x in __punct__}\r\n \r\n def __init__(self):\r\n self.__trie_dict__ = {}\r\n self.__tokens__ = 0\r\n self.__sentences__ = 0\r\n \r\n def __increment_model__(self, word, pre, flag):\r\n word_len = len(word)\r\n word = word.lower()\r\n if not word[0] in self.__trie_dict__:\r\n self.__trie_dict__[word[0]] = Node(word[0])\r\n current_node = self.__trie_dict__[word[0]]\r\n i = 1 if word_len > 1 else 0\r\n while i < word_len - 1: \r\n if not current_node.__has_child__(word[i]):\r\n current_node.__add_child__(word[i])\r\n current_node = current_node.__children__[word[i]]\r\n i+=1\r\n if not current_node.__has_child__(word[i]):\r\n current_node.__add_word_child__(word[i])\r\n current_node = current_node.__children__[word[i]]\r\n current_node = WordNode(None, current_node) if (isinstance(current_node, Node)) else current_node\r\n current_node.__increment__(pre, flag)\r\n self.__tokens__+=1 \r\n\r\n def __ispunct__(self, character):\r\n return True if self.__punctDict__.get(character) != None else False\r\n\r\n def __trim__(self, word):\r\n word = word.lower()\r\n if not self.__ispunct__(word[0]) and not self.__ispunct__(word[len(word) -1]):\r\n return word\r\n else: \r\n if self.__ispunct__(word[0]):\r\n word = self.__fronttrim__(word) \r\n if self.__ispunct__(word[len(word) - 1]):\r\n word = self.__endtrim__(word)\r\n return word\r\n \r\n def __fronttrim__(self, word):\r\n trimmed = False\r\n index = 0\r\n while not trimmed:\r\n if not self.__ispunct__(word[index]):\r\n word = word[index:]\r\n trimmed = True\r\n break\r\n index+=1\r\n return word\r\n\r\n def __endtrim__(self, word):\r\n trimmed = False\r\n index = len(word)\r\n while not trimmed:\r\n if not self.__ispunct__(word[index-1]):\r\n word = word[:index]\r\n trimmed = True\r\n break\r\n index-=1 \r\n return word\r\n\r\n def __find_word__(self, word, pre, flag):\r\n word_len = len(word)\r\n word = word.lower()\r\n if not word[0] in self.__trie_dict__:\r\n return None, None, None \r\n i = 1\r\n current_node = self.__trie_dict__[word[0]]\r\n while i < word_len -1:\r\n if not current_node.__has_child__(word[i]):\r\n return None, None, None\r\n current_node = current_node.__children__[word[i]]\r\n i+=1 \r\n try:\r\n current_node = current_node.__children__[word[i]]\r\n except:\r\n return None, None, None\r\n if current_node.__word_end__:\r\n return current_node.__get_counts__(pre, flag)\r\n return None, None, None\r\n \r\n '''Adds a sentence from a corpus to the model''' \r\n def add_sentence(self, sentence):\r\n pre = None\r\n flag = \"beg\"\r\n start = 0\r\n end = 0\r\n length = len(sentence)\r\n isRunning = True \r\n try:\r\n stop = False\r\n while isRunning: \r\n if not stop and (end < length):\r\n end+=1\r\n if end == length:\r\n stop = True\r\n else:\r\n if sentence[end] == \" \" or (sentence[end] == \"/\" or sentence[end] == \"-\"):\r\n stop = True\r\n if end == length: \r\n flag = \"end\"\r\n subString = self.__trim__(sentence[start:end])\r\n self.__increment_model__(subString, pre, flag)\r\n isRunning = False\r\n break\r\n if stop:\r\n subString = self.__trim__(sentence[start:end])\r\n self.__increment_model__(subString, pre, flag)\r\n start = end + 1\r\n end = start\r\n pre = subString\r\n if flag == \"beg\":\r\n flag = None\r\n stop = False\r\n except Exception as e:\r\n print(str(e) + \" Some of the text could not be added to the model.\")\r\n self.__sentences__+=1\r\n self.__tokens__+=2\r\n \r\n '''Reads sentences from a text file and calls add_sentence'''\r\n def read_file(self, fileName):\r\n path = Path(fileName)\r\n if not path.exists():\r\n print(\"Filename does not exist. Please check the filename and try again.\")\r\n pass\r\n if path.exists() and not fileName.endswith(\".txt\"):\r\n print(\"Invalid filename. Filename must end with the *.txt extension.\")\r\n pass\r\n if path.exists() and fileName.endswith(\".txt\"): \r\n with open(fileName) as file:\r\n line = file.readline().strip()\r\n while line:\r\n self.add_sentence(line)\r\n line = file.readline().strip()\r\n \r\n '''Looks up probability and likelihood of input sentence, and calls __find_word__'''\r\n def lookup_sentence(self, sentence):\r\n pre = None\r\n pre_count = None\r\n pre_display = \"\"\r\n flag = \"beg\"\r\n counts = ()\r\n start = 0\r\n end = 0\r\n total = 0\r\n result = 0\r\n length = len(sentence)\r\n substring = \"\"\r\n isRunning = True \r\n try:\r\n stop = False\r\n while isRunning: \r\n if not stop and (end < length):\r\n end+=1\r\n if end == length:\r\n stop = True\r\n else:\r\n if sentence[end] == \" \":\r\n stop = True\r\n if flag == \"end\":\r\n result = (counts[2]/counts[1]) if counts[1] and counts[2] else (1/self.__tokens__)\r\n result = (math.log(result))\r\n print(\" | \" + sentence[start:end] + \" : Likelihood = \" + str(result) + \" Probability = \" + str(math.exp(result) * 100) + \"%\")\r\n total+= result\r\n isRunning = False\r\n break\r\n if stop:\r\n if end == length:\r\n flag = \"end\"\r\n subString = self.__trim__(sentence[start:end]) \r\n counts = self.__find_word__(subString, pre, flag)\r\n if flag == \"beg\":\r\n result = (counts[0]/self.__sentences__) if counts[0] and counts[1] else (counts[1]/self.__tokens__) if not counts[0] and counts[1] else (1/self.__tokens__)\r\n else: \r\n result = (counts[0]/pre_count) if counts[0] and pre_count else (counts[1]/self.__tokens__) if (not pre_count and not counts[0]) and counts[1] else (1/self.__tokens__)\r\n result = (math.log(result)) \r\n print(sentence[start:end] + \" | \" + (\"\" if flag == \"beg\" else pre_display) + \" : Likelihood = \" + str(result) + \" Probability = \" + str(math.exp(result) * 100) + \"%\")\r\n total+= result \r\n start = end + 1 if end is not length else start\r\n end = start if end is not length else end\r\n pre_display = sentence[start: end]\r\n pre = subString\r\n pre_count = counts[1] \r\n if flag == \"beg\":\r\n flag = None\r\n stop = False\r\n except Exception as e:\r\n print(str(e) + \" Some of the text could not be looked up.\")\r\n print(\"\\n\\\"\" + sentence + \"\\\"\" + \" Likelihood = \" + str(total) + \" Probability = \" + str(math.exp(total) * 100) + \"%\\n\") \r\n \r\n\r\n \r\n\r\n\r\n","sub_path":"BiGramModel.py","file_name":"BiGramModel.py","file_ext":"py","file_size_in_byte":10463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"139019891","text":"from flask import Flask, request, jsonify,render_template, send_file\nfrom flask_ngrok import run_with_ngrok\nimport os\nfrom torch_utils import get_prediction\n\napp = Flask(__name__,static_folder='./static')\nrun_with_ngrok(app)\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg','jfif'}\ndef allowed_file(filename):\n # xxx.png\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/', methods=['GET'])\ndef home():\n return render_template('index.html',file_url='', flag = True)\n\n@app.route('/image', methods=['GET'])\ndef get_image():\n filename = './testimages/1.jpg'\n return send_file(filename, mimetype='image/gif')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n print(request.files)\n file = request.files.get('leaf')\n if file is None or file.filename == \"\":\n return jsonify({'error': 'no file'})\n if not allowed_file(file.filename):\n return jsonify({'error': 'format not supported'})\n try:\n file.save(\"./testimages/1.jpg\")\n prediction = get_prediction()\n return render_template('index.html',predict=prediction, flag = False)\n except:\n return jsonify({'error': 'error during prediction'})","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"488901359","text":"# https://programmers.co.kr/learn/courses/30/lessons/12927\r\n\"\"\"\r\n전체 배열 크기의 합 구해서 재분배하기\r\n근데 그 숫자보다 작은것들은 늘릴 수가 없으니까 빼고 재분배하기\r\n-> 리스트 정렬한 다음에 총 작업시간 계산해두고 하나씩 빼가다가\r\n그 리스트의 값이 남은 값 // 남은 개수 이상이 되면 break\r\n5개 남았고, 12시간 남았으면 2시간 3개, 3시간 2개\r\nremain // (len(works) - i) = 12 // 5 = 2\r\nremain % tmp = 12 % 5 = 2\r\n-> (remain // tmp) ** 2 * (tmp - remain % tmp) + (remain // tmp + 1) ** 2 * (remain % tmp)\r\n\"\"\"\r\ndef solution(n, works):\r\n if sum(works) <= n: return 0\r\n answer = 0\r\n works.sort()\r\n remain = sum(works) - n\r\n tmp = len(works)\r\n for i in range(len(works)):\r\n if works[i] <= remain // tmp:\r\n remain -= works[i]\r\n tmp -= 1\r\n continue\r\n for j in works[:i]: answer += j**2\r\n return answer + (remain // tmp) ** 2 * (tmp - remain % tmp) + (remain // tmp + 1) ** 2 * (remain % tmp)\r\n\r\nprint(solution(10,[7,8,9]))\r\n","sub_path":"Level 3/야근 지수.py","file_name":"야근 지수.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"90287491","text":"from django.conf.urls import url\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.urls import path\n\nfrom . import views\n\napp_name = 'home'\nurlpatterns = [\n url(r'^contact_chart/$', views.recent_contact_chart, name='contact_chart'),\n\n path(\n 'sales_sheet/',\n login_required(views.SalesSheetView.as_view()),\n name='sales_sheet'\n ),\n\n url(r'^$', login_required(views.Index.as_view()), name='index'),\n]\n","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"1759144","text":"\n# coding: utf-8\n\n# # Data Reader\n# \n# ---\n\n# The following code defines reads the data from the original idx3-ubyte files (available at http://yann.lecun.com/exdb/mnist/) puts it into an easily usable NumPy array. Additionally we very simply normalize the data by dividing each value by 255 and shuffle the samples.\n\n# In[3]:\n\n\nimport numpy as np\n\ndef convert(img_file,\n label_file,\n n_images, \n single_output,\n shuffle):\n \n print(\"Started loading data ...\")\n img_f = open(img_file, \"rb\") # Images\n lbl_f = open(label_file, \"rb\") # Corresponding labels\n\n ### no need for header info for both\n img_f.read(16)\n lbl_f.read(8)\n \n if single_output:\n Y = np.zeros(shape=(n_images,1))\n X = np.zeros(shape=(n_images,784))\n for i in range(n_images):\n Y[i,0] = ord(lbl_f.read(1))\n for j in range(784):\n X[i,j] = ord(img_f.read(1))\n else:\n Y = np.zeros(shape=(n_images,10))\n X = np.zeros(shape=(n_images,784))\n for i in range(n_images):\n Y[i,ord(lbl_f.read(1))] = 1\n for j in range(784):\n X[i,j] = ord(img_f.read(1))\n\n ### (simple) normalizing\n X = X/255\n \n ### For more sophisticated normalizing ###\n # from sklearn.preprocessing import normalize\n # X = normalize(X, axis=1)\n \n # shuffling the array\n if shuffle:\n Z = np.concatenate((X,Y), axis=1)\n np.random.shuffle(Z)\n Zbar = np.array_split(Z, [784], axis=1)\n X = Zbar[0]\n Y = Zbar[1] # again seperating them again to be able to return them seperately\n\n\n print(\"Finished loading data. Loaded \" + str(n_images) + \" images.\")\n return X, Y\n\n","sub_path":"data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"462461093","text":"#\n#\n\nfrom __future__ import print_function\nimport string\n\n\n#### Global Variables\nrtc_list={}\nconnections={}\nnshost=\"localhost\"\nNS = None\n\n#\n#\ndef get_handle_list(hostname=\"localhost\", ns=None):\n global NS\n if ns :\n NS=ns\n NS.list_obj()\n hdls_names = NS.rtc_handles.keys()\n n=1\n res=[]\n for name in hdls_names:\n print (n,\":\", name)\n res.append(name)\n n += 1\n return res\n\ndef get_handle(name, ns=None):\n global NS\n if not ns : ns=NS\n\n if name.count(\".rtc\") == 0 : name = name+\".rtc\"\n\n try:\n return ns.rtc_handles[name]\n except:\n #get_handle_list()\n return None\n\ndef get_port_info(name, ns=None):\n h = get_handle(name, ns)\n res=[]\n if h :\n for x in h.inports.keys():\n res.append( \"%s (in) [%s]\" % (x, h.inports[x].data_type))\n for x in h.outports.keys():\n res.append( \"%s (out) [%s]\" % (x, h.outports[x].data_type))\n for x in h.services.keys():\n res.append( \"%s (service)\" % (x,))\n return res\n\ndef exit_rtc(name, ns=None):\n h=get_handle(name, ns)\n if h : h.exit()\n\ndef get_named_dataport(name, port, ns=None):\n global NS\n if not ns : ns=NS\n\n if name.count(\".rtc\") == 0 : name = name+\".rtc\"\n hndl=get_handle(name, ns)\n\n if hndl and port:\n if port in hndl.inports.keys():\n return hndl.inports[port]\n if port in hndl.outports.keys():\n return hndl.outports[port]\n\n return None\n\ndef get_name_port(path1):\n val = path1.split(\":\")\n if len(val) == 1 : return [path1, None]\n return val\n\ndef create_connection_name(path1, path2):\n global NS\n name1, port1 = get_name_port(path1)\n name2, port2 = get_name_port(path2)\n\n try:\n pp1 = get_named_dataport(name1, port1, NS)\n pp2 = get_named_dataport(name2, port2, NS)\n pn1 = string.join([name1,pp1.name, name2, pp2.name], '_')\n pn2 = string.join([name2,pp2.name, name1, pp1.name], '_')\n return [pn1, pn2]\n\n except:\n print (\"ERROR in create_connection_name.\")\n pass\n\n return None \n\ndef check_connection_list(names, connections):\n for name in names:\n if name in connections.keys() : return True\n\n return False\n\ndef connect_ports(path1, path2):\n global NS, connections\n pp1 = None\n pp2 = None\n con = None\n path1=str(path1.strip())\n path2=str(path2.strip())\n\n if check_connection(path1, path2):\n print (\"Connection already exist.\")\n return None\n\n cnames = create_connection_name(path1, path2)\n if not cnames:\n print (\"Invalid Path: %s, %s\" % (path1, path2))\n return None\n\n \n name1, port1 = get_name_port(path1)\n name2, port2 = get_name_port(path2)\n\n try:\n pp1 = get_named_dataport(name1, port1, NS)\n pp2 = get_named_dataport(name2, port2, NS)\n\n if pp1 and pp2:\n con = IOConnector([pp1, pp2], cnames[0])\n con.connect()\n connections[con.name] = con\n\n except:\n print (\"Connection error\")\n pass\n\n return con\n\ndef find_connection(path1, path2):\n global NS, connections\n path1=str(path1.strip())\n path2=str(path2.strip())\n cnames = create_connection_name(path1, path2)\n\n if not cnames:\n print (\"Invalid Path: %s, %s\" % (path1, path2))\n\n if cnames[0] in connections:\n return [cnames[0], connections[cnames[0]] ]\n\n if cnames[1] in connections:\n return [cnames[1], connections[cnames[1]] ]\n\n print (\"No connection exists.\")\n return None\n\ndef disconnect_ports(path1, path2):\n global NS, connections\n path1=str(path1.strip())\n path2=str(path2.strip())\n if not check_connection(path1, path2):\n print (\"No connection exist.\")\n return None\n\n res=remove_connection(path1, path2)\n '''\n con = find_connection(path1, path2)\n if con:\n del(connections[con[0]])\n con[1].disconnect();\n return con[0]\n '''\n return res\n\ndef activate_rtc(path1):\n name, port = get_name_port(path1)\n\n h=get_handle(name.strip())\n if h :\n h.activate()\n return \"RTC_OK\"\n else:\n return \"No such a RTC:\"+name\n\ndef activate_rtcs(names):\n res=\"\"\n for n in names.split(\",\") :\n res = activate_rtc(n) + \"\\n\"\n return res\n\ndef deactivate_rtc(path1):\n name, port = get_name_port(path1)\n\n h=get_handle(name.strip())\n if h :\n h.deactivate()\n return \"RTC_OK\"\n else:\n return \"No such a RTC:\"+name\n\ndef deactivate_rtcs(names):\n res=\"\"\n for n in names.split(\",\") :\n res = deactivate_rtc(n) + \"\\n\"\n return res\n\ndef exit_rtc(path1):\n name, port = get_name_port(path1)\n\n h=get_handle(name.strip())\n if h :\n h.exit()\n return \"RTC_OK\"\n else:\n return \"No such a RTC:\"+name\n\ndef exit_rtcs(names):\n res=\"\"\n for n in names.split(\",\") :\n res = exit_rtc(n) + \"\\n\"\n return res\n\ndef retrieve_connection_profiles(path1):\n global NS\n name1, port1 = get_name_port(path1)\n\n try:\n pp1 = get_named_dataport(name1, port1, NS)\n return pp1.get_connections()\n except:\n return []\n\ndef check_connection(path1, path2):\n connectors1=retrieve_connection_profiles(path1)\n connectors2=retrieve_connection_profiles(path2)\n for con in connectors1:\n cid = con.connector_id\n for con2 in connectors2:\n if cid == con2.connector_id : return con\n return None\n \ndef remove_connection(path1, path2):\n con = check_connection(path1, path2)\n if con :\n con.ports[0].disconnect(con.connector_id)\n return con\n else:\n print (\"No Connections\")\n return False\n\ndef disconnect_all(path1):\n cprofs=retrieve_connection_profiles(path1)\n for x in cprofs:\n x.ports[0].disconnect(x.connector_id)\n\ndef make_connection(paths, dim=\",\"):\n ports=paths.split(dim) \n if len(ports) == 2:\n connect_ports(ports[0], ports[1])\n\ndef delete_connection(paths, dim=\",\"):\n ports=paths.split(dim) \n if len(ports) == 2:\n disconnect_ports(ports[0], ports[1])\n\ndef clear_connection_list():\n global connections\n connections = []\n\n","sub_path":"launch/rtc_handle_tool.py","file_name":"rtc_handle_tool.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"49333599","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.optimize import curve_fit\n\n\ndef get_data(file):\n Vacc = []\n time_ns = []\n particles = []\n with open(file, 'r') as f:\n lines = f.readlines()\n for i in range(len(lines)):\n if 'Time' in lines[i].split():\n Vacc.append(lines[i].split()[-1])\n\n if lines[i].split():\n try:\n float(lines[i].split()[0])\n ans = True\n except ValueError:\n ans = False\n if ans:\n time_ns.append(float(lines[i].split()[0]))\n particles.append(float(lines[i].split()[1]))\n time_ns_f = np.reshape(time_ns, (len(Vacc), int(len(time_ns)/len(Vacc))))\n particles_f = np.reshape(particles, (len(Vacc), int(len(time_ns)/len(Vacc))))\n return Vacc, time_ns_f, particles_f\n\n\ndef exponential(t, N0, alpha):\n return N0 * np.exp(alpha * t)\n\n\ndef plot_CST_data(Vacc, time, particles, title, exponential_fit_display=False, log_y_scale=False):\n alpha_ls = []\n vacc_ls = []\n sns.set_palette(sns.color_palette(\"inferno_r\", len(Vacc)))\n for i in range(1, len(time)):\n plt.plot(time[i], particles[i], label=Vacc[i].split('=')[1].split(')')[0] + ' kV')\n vacc_ls.append(Vacc[i].split('=')[1].split(')')[0])\n plt.legend(loc='best', fancybox=False, framealpha=0.7, edgecolor='k', ncol=3)\n pars, cov = curve_fit(f=exponential, xdata=time[i][630::], ydata=particles[i][630::], p0=[0, 0],\n bounds=(-np.inf, np.inf))\n if exponential_fit_display:\n plt.plot(time[i], exponential(time[i], *pars), linestyle='--', linewidth=2,\n label='Fit of ' + Vacc[i].split('=')[1].split(')')[0] + ' kV')\n alpha_ls.append(pars[1])\n plt.grid(alpha=0.5, ls=':')\n plt.xlabel(r'Time (ns)', fontsize=13)\n plt.ylabel('Number of particles', fontsize=13)\n plt.title(title)\n if log_y_scale:\n plt.yscale('log')\n return vacc_ls, alpha_ls\n\n# if __name__ == '__main__':\n# Vacc, time_ns_f, particles_f = get_data('/Users/chen/Desktop/ANL_work/WiFEL/CST/multipacting/mp_bottom_source_100ns.txt')\n# plt.plot(time_ns_f[0], particles_f[0])\n# plt.show()\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"480653836","text":"from get_data import get_data\nimport traceback\n\ndataset = \"genomes\"\ngoal = 0.7\nprecisions = []\ntopk = \"R1\"\nmax_time = 2\nfor method, data in get_data(dataset):\n\tmax_recall = 0\n\ttry:\n\t\tbest_precision = 0\t\t\n\t\tfor measure, time, recall, precision in data:\n\t\t\tif measure.split(\"@\")[0] == topk and recall > goal and time < max_time:\n\t\t\t\tmax_recall = max(recall, max_recall)\n\t\t\t\tbest_precision = max(best_precision, precision)\n\n\t\tif best_precision != 0:\n\t\t\tprecisions.append(best_precision)\n\t\t\tprint(method, best_precision)\t\n\t\t\tprint(max_recall)\n\texcept Exception as e:\n\t\ttraceback.print_exc() \n\t\tpass\n\nprecisions.sort()\nprint(f\"At {goal} recall with max time of {max_time}ms, the best method on {dataset} is {precisions[-1] / precisions[-2]} better on precision\")","sub_path":"analysis/compare-precision.py","file_name":"compare-precision.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"637343849","text":"class Solution:\r\n\tdef maxSubArray(self,nums):\r\n\t\tif not nums:\r\n\t\t\treturn 0\r\n\t\tn=len(nums)\r\n\t\tmaxnum=nums[0]\r\n\t\tcurrnum=nums[0]\r\n\t\tfor i in range(1,n):\r\n\t\t\tif currnum<0:\r\n\t\t\t\tcurrnum=nums[i]\r\n\t\t\telse:\r\n\t\t\t\tcurrnum+=nums[i]\r\n\t\t\tmaxnum=max(maxnum,currnum)\r\n\t\treturn maxnum\r\n","sub_path":"leetcode分类/动态规划/线性DP/53-最大子序和.py","file_name":"53-最大子序和.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"542852400","text":"from pylatex import Document, Package, UnsafeCommand, Section, Subsection\nfrom pylatex.base_classes import Environment, Options\n\n\nclass FormEnvironment(Environment):\n \"\"\"\\begin{Form} ... \\end{Form}\"\"\"\n _latex_name = 'Form'\n packages = [Package('hyperref')]\n escape = False\n content_separator = '\\n'\n\n\n\ndef format_appendix(doc):\n with doc.create(Section(\"Appendix\")):\n with doc.create(Subsection('Comments')):\n with doc.create(FormEnvironment()):\n # \\TextField[name=multilinetextbox,multiline=true,width=\\linewidth,height=1in]{}\n input_field = UnsafeCommand(\n 'TextField',\n arguments=[''],\n options=Options(\n name='multilinetextbox',\n multiline='true',\n width=r'\\linewidth',\n height='3.0in')\n )\n doc.append(input_field)\n# doc = Document()\n# format_appendix(doc)\n# doc.generate_pdf('textfield_test', compiler='pdflatex', clean_tex=False)","sub_path":"scripts/document_parts_package/appendix_generator.py","file_name":"appendix_generator.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"344493712","text":"import numpy\nimport pandas\nimport sys\nfrom sklearn import tree\nfrom sklearn import svm\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom scipy import stats\nimport bvd\n\ncolumns = [\n \"PregnancyCount\",\n \"PlasmaGlucose\",\n \"DiastolicBP\",\n \"TricepsSkinFold\",\n \"2HrSerumInsulin\",\n \"BMI\",\n \"DiabetesPedigreeFx\",\n \"Age\",\n \"Class\"]\n\ntrainD = pandas.read_csv(\n \"C:/Users/Nicholas/Documents/Repos/UWPMP-MachineLearning\"\n \"/Ensembles/Data/pima-indians-diabetes.train\",\n sep=\",\",\n header=None,\n names=columns)\nprint(\"Training data loaded!\")\n\ntestD = pandas.read_csv(\n \"C:/Users/Nicholas/Documents/Repos/UWPMP-MachineLearning\"\n \"/Ensembles/Data/pima-indians-diabetes.test\",\n sep=\",\",\n header=None,\n names=columns)\nprint(\"Test data loaded!\")\n\nnumOfEstimators = 30\n\n# QUESTION 1\n# Create an ensemble of decision trees\ndTree = tree.DecisionTreeClassifier(criterion='entropy')\nbagging = BaggingClassifier(\n dTree,\n n_estimators=numOfEstimators,\n max_samples=len(trainD),\n bootstrap=True)\n\nxDF = trainD.drop('Class', 1, inplace=False)\nyDF = trainD['Class']\n\n# # Train\nbagging.fit(xDF.values, yDF.values)\n\n# # Predict\ntx = testD.drop('Class', 1, inplace=False)\npreds = bagging.predict_proba(tx.values)\n\n# # Calculate bias-variance\nty = testD['Class']\nbvd.biasVarZeroOne(ty.values, preds, numOfEstimators)\n\n# QUESTION 4 PART 1\n# Create a few SVM classifiers\nrbfsvm = svm.SVC(kernel='rbf', probability=True)\nrbfsvm = rbfsvm.fit(xDF.values, yDF.values)\nresult = rbfsvm.score(tx.values, ty.values)\nprint(\"rbf kernel score:{0}\".format(result))\n\nlinsvm = svm.SVC(kernel='linear', probability=True)\nlinsvm = linsvm.fit(xDF.values, yDF.values)\nresult = linsvm.score(tx.values, ty.values)\nprint(\"linear kernel score:{0}\".format(result))\n\nsigmoidsvm = svm.SVC(kernel='sigmoid', probability=True)\nsigmoidsvm = sigmoidsvm.fit(xDF.values, yDF.values)\nresult = sigmoidsvm.score(tx.values, ty.values)\nprint(\"sigmoid kernel score:{0}\".format(result))\n\n# QUESTION 4 PART 2\n# Create a naive bayes classifier\nmnb = MultinomialNB()\ny_pred = mnb.fit(xDF.values, yDF.values).score(tx.values, ty.values)\nprint(\"Naive Bayes score:{0}\".format(y_pred))\n\n# QUESTION 4 PART 3\npreds = rbfsvm.predict_proba(tx.values)\nbvd.biasVarZeroOne(ty.values, preds, 1)\n\npreds = linsvm.predict_proba(tx.values)\nbvd.biasVarZeroOne(ty.values, preds, 1)\n\npreds = sigmoidsvm.predict_proba(tx.values)\nbvd.biasVarZeroOne(ty.values, preds, 1)\n\n","sub_path":"Ensembles/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"8440004","text":"##TiC TAC TOE BY TAREK EL-ETER \r\n\r\n#all the modules i needed to import\r\n'''modules i used to import'''\r\nimport os \r\nimport time\r\nimport random\r\nimport sys\r\nimport subprocess\r\nimport math\r\n\r\n\r\nnew = ['','','','','','','','','']\r\nman = ''\r\nplayer2 = ''\r\nmachine = ''\r\nnull = ''\r\n\r\nplayer2 = input(\"1 player or 2 player ???\")\r\nif player2 == 'player2' or player2 == '2':\r\n \r\n \r\n def sign(man, machine, player2):\r\n '''this function asks wither they want to be x or o'''\r\n man = input(\"What team do you want to be? X or O \")\r\n while man not in ('x','X','o','O'):\r\n print (\"Invalid Choice!\")\r\n man = input(\"What team do you want to be? X or O \")\r\n if man == 'x' or man == 'X':\r\n print (\"Ok, X is yours!\")\r\n machine = 'o'\r\n else:\r\n print (\"Ok, O is yours!\")\r\n machine = 'x'\r\n return man.upper(), machine.upper(), player2.upper()\r\n \r\n \r\n\r\ndef decide_turn():\r\n turn = None\r\n while turn not in ('y','Y','n','N'):\r\n turn = input(\"Do you want to play first? \")\r\n if turn == 'yes' or turn == 'Y':\r\n return 1\r\n elif turn == 'no' or turn == 'N':\r\n return 0\r\n else:\r\n print (\"thats an invalid choice.\")\r\n\r\ndef draw(a):\r\n '''This function draws the board a given as input'''\r\n print (\"\\t|\\t|\\t\")\r\n print (\" \"+a[0]+\" \\t|\"+a[1]+\" \\t|\"+a[2]+\"\")\r\n print (\"--------|-------|--------\")\r\n print (\" \"+a[3]+\" \\t|\"+a[4]+\" \\t|\"+a[5]+\"\")\r\n print (\"--------|-------|--------\")\r\n print (\" \"+a[6]+\" \\t|\"+a[7]+\" \\t|\"+a[8]+\"\")\r\n print (\"\\t|\\t|\\t\")\r\n \r\n\r\ndef congo_man():\r\n '''prints that the user has won'''\r\n print (\"You have won!!\")\r\n \r\n \r\ndef congo_player2():\r\n print (\"bro i win\")\r\n\r\ndef congo_machine():\r\n '''prints in a fun way that the ai has won'''\r\n print (\"Lol, you lost to a computer!!!\")\r\n\r\ndef man_first(man, machine, new):\r\n while winn(man, machine, new) is None:\r\n move = man_move(man, new)\r\n new[int(move)] = man\r\n draw(new)\r\n if winn(man, machine, new) != None:\r\n break\r\n else:\r\n pass\r\n print (\"Ok i'll take..\")\r\n p_move = machine_move(man, machine, new)\r\n new[int(p_move)] = machine\r\n draw(new)\r\n q = winn(man, machine, new)\r\n if q == 1:\r\n congo_man()\r\n elif q == 0:\r\n congo_machine()\r\n else:\r\n print (\"Its a Tie...\")\r\n\r\n\r\n\r\ndef player2_first(man, player2, new):\r\n while winn(man, player2, new) is None:\r\n move = player_move(man, player2, new)\r\n new[int(move)] = player2\r\n draw(new)\r\n if winn(man, player2, new) != None:\r\n break\r\n else:\r\n pass\r\n print (\"Ok i'll take..\")\r\n p_move = man_move(man, player2, new)\r\n new[int(p_move)] = player2\r\n draw(new)\r\n q = winn(man, player2, new)\r\n if q == 1:\r\n congo_player2()\r\n elif q == 0:\r\n congo_man()\r\n else:\r\n print (\"Its a Tie...\")\r\n\r\n\r\ndef machine_first(man, machine, new):\r\n while not winn(man, machine, new):\r\n print (\"i'll take...\")\r\n p_move = machine_move(man, machine, new)\r\n new[p_move] = machine\r\n draw(new)\r\n if winn(man, machine, new) != None:\r\n break\r\n else:\r\n pass\r\n move = man_move(man, new)\r\n new[int(move)] = man\r\n draw(new)\r\n q = winn(man, machine, new)\r\n if q == 1:\r\n congo_man()\r\n elif q == 0:\r\n congo_machine()\r\n else:\r\n print (\"Its a tie bro...\")\r\n\r\n\r\ndef winn(man, machine, player2, new):\r\n '''shows all possible ways for the user and the machine to win'''\r\n ways = ((0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6))\r\n for i in ways:\r\n if new[i[0]] == new[i[1]] == new[i[2]] != null:\r\n winner = new[i[0]]\r\n if winner == man:\r\n return 1\r\n elif winner == machine:\r\n return 0\r\n if null not in new: \r\n return 'TIE'\r\n if null not in new: \r\n return 'TIE' \r\n return None\r\n\r\n\r\ndef man_move(man, new):\r\n '''makes a prompt asking user where do they want to move'''\r\n a = input(\"where do you want to move? \")\r\n while True:\r\n if a not in ('0','1','2','3','4','5','6','7','8'):\r\n print (\"Sorry, that's a invalid move\")\r\n a = input(\"where do you want to move? \")\r\n elif new[int(a)] != null:\r\n print (\"Sorry, that place is already taken\")\r\n a = input(\"where do you want to move? \")\r\n else:\r\n return int(a)\r\n\r\ndef player2_move(player2, new):\r\n '''makes a prompt asking user where do they want to move'''\r\n a = input(\"where do you want to move? \")\r\n while True:\r\n if a not in ('0','1','2','3','4','5','6','7','8'):\r\n print (\"Sorry, that's a invalid move\")\r\n a = input(\"where do you want to move? \")\r\n elif new[int(a)] != null:\r\n print (\"Sorry, that place is already taken\")\r\n a = input(\"where do you want to move? \")\r\n else:\r\n return int(a)\r\n \r\ndef machine_move(man, machine, new):\r\n best = [4, 0, 2, 6, 8]\r\n blank = []\r\n for i in range(0,9):\r\n if new[i] == null:\r\n blank.append(i)\r\n \r\n for i in blank:\r\n new[i] = machine\r\n if winn(man, machine, new) is 0:\r\n\r\n return i\r\n new[i] = null\r\n\r\n for i in blank:\r\n new[i] = man\r\n if winn(man, machine, new) is 1:\r\n\r\n return i\r\n new[i] = null\r\n\r\n return int(blank[random.randrange(len(blank))])\r\n\r\n\r\ndef display_instruction():\r\n print(\" Displays Game Instuructions. \")\r\n print(\"\"\"\r\nWelcome to my game...\r\n\r\n████████╗██╗ ██████╗ ████████╗ █████╗ ██████╗ ████████╗ ██████╗ ███████╗\r\n╚══██╔══╝██║██╔════╝ ╚══██╔══╝██╔══██╗██╔════╝ ╚══██╔══╝██╔═══██╗██╔════╝\r\n ██║ ██║██║ ██║ ███████║██║ ██║ ██║ ██║█████╗ \r\n ██║ ██║██║ ██║ ██╔══██║██║ ██║ ██║ ██║██╔══╝ \r\n ██║ ██║╚██████╗ ██║ ██║ ██║╚██████╗ ██║ ╚██████╔╝███████╗\r\n ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═════╝ ╚══════╝\r\n \r\nYou will make your move by entering a number, 0 - 8.\r\nThese numbers are bound to the board position as illustrated:\r\n\r\n\r\n0 | 1 | 2\r\n----------\r\n3 | 4 | 5 \r\n----------\r\n6 | 7 | 8\r\n\r\n \r\nHave Fun\r\n \"\"\")\r\n\r\n \r\ndef main(man, machine, player2, new):\r\n display_instruction()\r\n print (\"so let the game begin...\")\r\n a = sign(man, machine, player2)\r\n man = a[0]\r\n machine = a[1]\r\n b = decide_turn()\r\n if b == 1:\r\n print (\"Ok, you are first!\")\r\n print (\"Lets get started, here's a new board!\")\r\n draw(new)\r\n man_first(man, machine, new)\r\n elif b == 0:\r\n print (\"Ok, I'll be the first to start!\")\r\n print (\"So, lets start the game\")\r\n draw(new)\r\n machine_first(man, machine, new)\r\n else:\r\n pass\r\n\r\nmain(man, machine, new)\r\ninput(\"Press enter to exit\")\r\n","sub_path":"testtac2.py","file_name":"testtac2.py","file_ext":"py","file_size_in_byte":7755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"459963403","text":"import gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nimport numpy.matlib\n\n\nclass TestConvergenceEnv(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 50\n }\n\n def __init__(self):\n self.goal_pos = np.array([0.0, 0.0])\n self.start_pos = np.array([-14.0, 0.0, 0.0])\n self.bound_box = np.array([[-15.0, 2.0], [-3.0, 3.0]])\n\n self.state = np.copy(self.start_pos)\n self.viewer = None\n\n self.num_samples_laser = 30\n # self.num_samples_laser = 160\n # self.max_measurement_laser = 10.0\n self.max_measurement_laser = 5.0\n self.laser_obs = 100.0 * np.ones(self.num_samples_laser)\n\n down_obs = np.zeros(self.num_samples_laser)\n up_obs = 100*np.ones(self.num_samples_laser)\n\n # self.action_space = spaces.Discrete(8*3)\n # self.action_space = spaces.Discrete(5 * 3)\n self.action_space = spaces.Discrete(8)\n # self.action_space = spaces.Discrete(7)\n self.observation_space = spaces.Box(down_obs, up_obs)\n self.manual_pose = False\n\n self.goal_radius = 0.75\n self.drone_radius = 0.4\n\n self.obstacle_num = 1\n self.obstacle_radius = 0.2\n self.obstacle_dimensions_user = 0.0\n self.obstacle_type = ['cylinder']\n self.obstacle_pos = np.zeros((self.obstacle_num, 3))\n self.obstacle_dim = []\n self.obstacle_dim.append(self.obstacle_radius)\n\n def set_obstacle_radius(self, radius):\n self.obstacle_radius = radius\n\n def _seed(self, seed=None): # this is a hack, but no easy way to set parameters\n # self.set_obstacle_radius(seed['obstacle_radius'])\n self.obstacle_dimensions_user = seed['obstacle_dimension']\n self.obstacle_num = seed['obstacle_num']\n self.obstacle_pos = np.zeros((self.obstacle_num, 3))\n self.obstacle_type = seed['obstacle_type']\n if seed['set_seed']:\n self.np_random, seed = seeding.np_random(seed['seed'])\n if seed['set_obst_pose']:\n self.manual_pose = True\n self.obstacle_pos[:, 0] = seed['obst_pose'][0]\n self.obstacle_pos[:, 1] = seed['obst_pose'][1]\n self.goal_pos[0] = seed['goal_pose'][0]\n self.goal_pos[1] = seed['goal_pose'][1]\n self.obstacle_dim = []\n self.obstacle_dim.append(self.obstacle_dimensions_user)\n\n\n return [True]\n\n def _step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\" % (action, type(action))\n\n old_state = np.copy(self.state)\n\n d_state = self.action_2_state_d(action)\n\n rot_mat = np.array([[np.cos(self.state[2]), -np.sin(self.state[2])],\n [np.sin(self.state[2]), np.cos(self.state[2])]])\n d_state_trans = np.matmul(rot_mat, d_state[0:2])\n\n self.state[0:2] = self.state[0:2] + d_state_trans # + 0.03 * np.random.randn(2)\n self.state[2] = self.state[2] + d_state[2]\n\n self.state[2] = (self.state[2] + np.pi) % (2 * np.pi) - np.pi\n\n done = self.state[0] < self.bound_box[0, 0] \\\n or self.state[0] > self.bound_box[0, 1] \\\n or self.state[1] < self.bound_box[1, 0] \\\n or self.state[1] > self.bound_box[1, 1]\n\n done = bool(done)\n\n reward = 0.0\n if done:\n # reward = -1.0 # TODO: maybe not to penalize for going out\n reward = 0.0\n else:\n\n collision = self.test_collision(self.state[:2], self.drone_radius)\n\n if collision:\n reward = -1.0\n done = True\n else:\n if self.state[0] > 0.0:\n reward = 1.0\n done = True\n else:\n if np.linalg.norm(self.state[0] - self.goal_pos[0]) < np.linalg.norm(old_state[0] - self.goal_pos[0]):\n #reward = 0.0005\n reward = 0.01\n #reward = 0.001\n\n reward += -0.005\n\n observation = self.laser_readings()\n self.laser_obs = np.copy(observation)\n distance_vec = self.goal_pos - self.state[0:2]\n distance = np.linalg.norm(distance_vec)\n angle = np.arctan2(distance_vec[1], distance_vec[0])\n angle = angle - self.state[2]\n angle = (angle + np.pi) % (2 * np.pi) - np.pi\n goal_measurements = np.array([distance, angle])\n\n measurement = np.concatenate((observation, goal_measurements))\n # measurement = goal_measurements\n\n info = self.obstacle_pos\n\n return measurement, reward, done, info\n\n # def action_2_state_d(self, action):\n # return {\n # 0: np.array([0.5, 0.0, 0.0]),\n # 1: np.array([0.353, 0.353, 0.0]),\n # 2: np.array([0.0, 0.5, 0.0]),\n # 3: np.array([0.0, -0.5, 0.0]),\n # 4: np.array([0.353, -0.353, 0.0]),\n # 5: np.array([0.0, 0.0, np.pi/16]),\n # 6: np.array([0.0, 0.0, -np.pi/16]),\n # }[action]\n\n\n # def action_2_state_d(self, action):\n # return {\n # 0: np.array([0.5, 0.0, 0.0]),\n # 1: np.array([0.353, 0.353, 0.0]),\n # 2: np.array([0.0, 0.5, 0.0]),\n # 3: np.array([0.0, -0.5, 0.0]),\n # 4: np.array([0.353, -0.353, 0.0]),\n # 5: np.array([0.5, 0.0, np.pi/8]),\n # 6: np.array([0.353, 0.353, np.pi/8]),\n # 7: np.array([0.0, 0.5, np.pi/8]),\n # 8: np.array([0.0, -0.5, np.pi/8]),\n # 9: np.array([0.353, -0.353, np.pi/8]),\n # 10: np.array([0.5, 0.0, -np.pi / 8]),\n # 11: np.array([0.353, 0.353, -np.pi / 8]),\n # 12: np.array([0.0, 0.5, -np.pi / 8]),\n # 13: np.array([0.0, -0.5, -np.pi / 8]),\n # 14: np.array([0.353, -0.353, -np.pi / 8]),\n # }[action]\n\n # def action_2_state_d(self, action):\n # return {\n # 0: np.array([0.5, 0.0, 0.0]),\n # 1: np.array([0.353, 0.353, 0.0]),\n # 2: np.array([0.0, 0.5, 0.0]),\n # 3: np.array([0.0, -0.5, 0.0]),\n # 4: np.array([0.353, -0.353, 0.0]),\n # 5: np.array([0.5, 0.0, np.pi/16]),\n # 6: np.array([0.353, 0.353, np.pi/16]),\n # 7: np.array([0.0, 0.5, np.pi/16]),\n # 8: np.array([0.0, -0.5, np.pi/16]),\n # 9: np.array([0.353, -0.353, np.pi/16]),\n # 10: np.array([0.5, 0.0, -np.pi / 16]),\n # 11: np.array([0.353, 0.353, -np.pi / 16]),\n # 12: np.array([0.0, 0.5, -np.pi / 16]),\n # 13: np.array([0.0, -0.5, -np.pi / 16]),\n # 14: np.array([0.353, -0.353, -np.pi / 16]),\n # }[action]\n\n def action_2_state_d(self, action):\n return {\n 0: np.array([0.5, 0.0, 0.0]),\n 1: np.array([0.353, 0.353, 0.0]),\n 2: np.array([0.0, 0.5, 0.0]),\n 3: np.array([-0.353, 0.353, 0.0]),\n 4: np.array([-0.5, 0.0, 0.0]),\n 5: np.array([-0.353, -0.353, 0.0]),\n 6: np.array([0.0, -0.5, 0.0]),\n 7: np.array([0.353, -0.353, 0.0]),\n }[action]\n\n # def action_2_state_d(self, action):\n # return {\n # 0: np.array([0.2, 0.0, 0.0]),\n # 1: np.array([0.1414, 0.1414, 0.0]),\n # 2: np.array([0.0, 0.2, 0.0]),\n # 3: np.array([0.0, -0.2, 0.0]),\n # 4: np.array([0.1414, -0.1414, 0.0]),\n # 5: np.array([0.2, 0.0, np.pi/8]),\n # 6: np.array([0.1414, 0.1414, np.pi/8]),\n # 7: np.array([0.0, 0.2, np.pi/8]),\n # 8: np.array([0.0, -0.2, np.pi/8]),\n # 9: np.array([0.1414, -0.1414, np.pi/8]),\n # 10: np.array([0.2, 0.0, -np.pi / 8]),\n # 11: np.array([0.1414, 0.1414, -np.pi / 8]),\n # 12: np.array([0.0, 0.2, -np.pi / 8]),\n # 13: np.array([0.0, -0.2, -np.pi / 8]),\n # 14: np.array([0.1414, -0.1414, -np.pi / 8]),\n # }[action]\n\n # def action_2_state_d(self, action):\n # return {\n # 0: np.array([0.2, 0.0, 0.0]),\n # 1: np.array([0.1414, 0.1414, 0.0]),\n # 2: np.array([0.0, 0.2, 0.0]),\n # 3: np.array([0.0, -0.2, 0.0]),\n # 4: np.array([0.1414, -0.1414, 0.0]),\n # 5: np.array([0.2, 0.0, np.pi/16]),\n # 6: np.array([0.1414, 0.1414, np.pi/16]),\n # 7: np.array([0.0, 0.2, np.pi/16]),\n # 8: np.array([0.0, -0.2, np.pi/16]),\n # 9: np.array([0.1414, -0.1414, np.pi/16]),\n # 10: np.array([0.2, 0.0, -np.pi / 16]),\n # 11: np.array([0.1414, 0.1414, -np.pi / 16]),\n # 12: np.array([0.0, 0.2, -np.pi / 16]),\n # 13: np.array([0.0, -0.2, -np.pi / 16]),\n # 14: np.array([0.1414, -0.1414, -np.pi / 16]),\n # }[action]\n\n # def action_2_state_d(self, action):\n # return {\n # 0: np.array([0.2, 0.0, 0.0]),\n # 1: np.array([0.1414, 0.1414, 0.0]),\n # 2: np.array([0.0, 0.2, 0.0]),\n # 3: np.array([-0.1414, 0.1414, 0.0]),\n # 4: np.array([-0.2, 0.0, 0.0]),\n # 5: np.array([-0.1414, -0.1414, 0.0]),\n # 6: np.array([0.0, -0.2, 0.0]),\n # 7: np.array([0.1414, -0.1414, 0.0]),\n # 8: np.array([0.2, 0.0, np.pi/8]),\n # 9: np.array([0.1414, 0.1414, np.pi/8]),\n # 10: np.array([0.0, 0.2, np.pi/8]),\n # 11: np.array([-0.1414, 0.1414, np.pi/8]),\n # 12: np.array([-0.2, 0.0, np.pi/8]),\n # 13: np.array([-0.1414, -0.1414, np.pi/8]),\n # 14: np.array([0.0, -0.2, np.pi/8]),\n # 15: np.array([0.1414, -0.1414, np.pi/8]),\n # 16: np.array([0.2, 0.0, -np.pi / 8]),\n # 17: np.array([0.1414, 0.1414, -np.pi / 8]),\n # 18: np.array([0.0, 0.2, -np.pi / 8]),\n # 19: np.array([-0.1414, 0.1414, -np.pi / 8]),\n # 20: np.array([-0.2, 0.0, -np.pi / 8]),\n # 21: np.array([-0.1414, -0.1414, -np.pi / 8]),\n # 22: np.array([0.0, -0.2, -np.pi / 8]),\n # 23: np.array([0.1414, -0.1414, -np.pi / 8]),\n # }[action]\n\n # def action_2_state_d(self, action):\n # return {\n # 0: np.array([0.5, 0.0]),\n # 1: np.array([0.353, 0.353]),\n # 2: np.array([0.0, 0.5]),\n # 3: np.array([-0.353, 0.353]),\n # 4: np.array([-0.5, 0.0]),\n # 5: np.array([-0.353, -0.353]),\n # 6: np.array([0.0, -0.5]),\n # 7: np.array([0.353, -0.353]),\n # }[action]\n\n def _reset(self):\n\n self.state = np.copy(self.start_pos)\n\n if not self.manual_pose:\n\n # goal pose\n\n angle_goal = np.random.uniform(-np.pi, np.pi, 1)\n r = 6.0\n self.goal_pos[0] = 0.0\n self.goal_pos[1] = 0.0\n\n # obstacle pose\n self.obstacle_dim = []\n\n for it_obstacle in range(self.obstacle_num):\n if self.obstacle_type[it_obstacle] == 'cylinder':\n obstacle_pose_radius = np.random.uniform(2.0, 5.0, 1)\n angle_goal_angle = np.random.uniform(-np.pi, np.pi, 1)\n\n obstacle_x = obstacle_pose_radius * np.cos(angle_goal_angle)\n obstacle_y = obstacle_pose_radius * np.sin(angle_goal_angle)\n\n obstacle_x = np.random.uniform(-8.0, -2.0, 1)\n obstacle_y = np.random.uniform(-3.0, 3.0, 1)\n\n # obstacle_pose_direction = np.random.uniform(1.5, 4.5, 1)\n # obstacle_pose_perpendicular = np.random.uniform(-3.0, 3.0, 1)\n #\n # obstacle_x = obstacle_pose_direction * np.cos(angle_goal) - obstacle_pose_perpendicular * np.sin(\n # angle_goal)\n # obstacle_y = obstacle_pose_direction * np.sin(angle_goal) + obstacle_pose_perpendicular * np.cos(\n # angle_goal)\n\n self.obstacle_pos[it_obstacle, 0] = obstacle_x\n self.obstacle_pos[it_obstacle, 1] = obstacle_y\n self.obstacle_pos[it_obstacle, 2] = 0.0\n\n # radius = 0.5\n # self.obstacle_dim.append(radius)\n\n self.obstacle_dim.append(self.obstacle_dimensions_user)\n\n elif self.obstacle_type[it_obstacle] == 'rectangle':\n obstacle_pose_direction = np.random.uniform(1.5, 4.5, 1)\n obstacle_pose_perpendicular = np.random.uniform(-3.0, 3.0, 1)\n\n obstacle_x = obstacle_pose_direction * np.cos(angle_goal) - obstacle_pose_perpendicular * np.sin(\n angle_goal)\n obstacle_y = obstacle_pose_direction * np.sin(angle_goal) + obstacle_pose_perpendicular * np.cos(\n angle_goal)\n\n self.obstacle_pos[it_obstacle, 0] = obstacle_x\n self.obstacle_pos[it_obstacle, 1] = obstacle_y\n # self.obstacle_pos[it_obstacle, 2] = np.random.uniform(-np.pi, np.pi, 1)\n self.obstacle_pos[it_obstacle, 2] = np.random.uniform(-np.pi/4, np.pi/4, 1)\n\n # dimensions = np.array([0.2, 3.5])\n # self.obstacle_dim.append(dimensions)\n\n self.obstacle_dim.append(self.obstacle_dimensions_user)\n\n elif self.obstacle_type[it_obstacle] == 'wall':\n\n # this is for perpendicular wall TODO: add horizontal wall\n obstacle_pose_direction = np.random.uniform(1.5, 4.5, 1)\n obstacle_pose_perpendicular = np.random.uniform(-3.0, -2.0, 1)\n\n obstacle_x = obstacle_pose_direction * np.cos(angle_goal) - obstacle_pose_perpendicular * np.sin(angle_goal)\n obstacle_y = obstacle_pose_direction * np.sin(angle_goal) + obstacle_pose_perpendicular * np.cos(angle_goal)\n\n self.obstacle_pos[it_obstacle, 0] = obstacle_x\n self.obstacle_pos[it_obstacle, 1] = obstacle_y\n self.obstacle_pos[it_obstacle, 2] = 0.0\n\n wall_second_point = np.array([0.0, 0.0])\n\n obstacle_pose_direction = np.random.uniform(1.5, 4.5, 1)\n obstacle_pose_perpendicular = np.random.uniform(2.0, 3.0, 1)\n\n obstacle_x = obstacle_pose_direction * np.cos(angle_goal) - obstacle_pose_perpendicular * np.sin(\n angle_goal)\n obstacle_y = obstacle_pose_direction * np.sin(angle_goal) + obstacle_pose_perpendicular * np.cos(\n angle_goal)\n\n wall_second_point[0] = obstacle_x\n wall_second_point[1] = obstacle_y\n\n self.obstacle_dim.append(wall_second_point)\n\n # TODO: can check collision at start of goal and obstacle\n\n observation = self.laser_readings()\n self.laser_obs = np.copy(observation)\n distance_vec = self.goal_pos - self.state[0:2]\n distance = np.linalg.norm(distance_vec)\n angle = np.arctan2(distance_vec[1], distance_vec[0])\n angle = angle - self.state[2]\n angle = (angle + np.pi) % (2 * np.pi) - np.pi\n goal_measurements = np.array([distance, angle])\n\n return np.concatenate((observation, goal_measurements))\n # return goal_measurements\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n screen_width = int(300*17.0/6.0)\n screen_height = 300\n\n scale_width = screen_width / (self.bound_box[0, 1] - self.bound_box[0, 0])\n scale_height = screen_height / (self.bound_box[1, 1] - self.bound_box[1, 0])\n\n zero_width = scale_width * (-self.bound_box[0, 0])\n zero_height = scale_height * (-self.bound_box[1, 0])\n\n drone_width = 20\n drone_height = 20\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height)\n\n l, r, t, b = -drone_width / 2, drone_width / 2, drone_height / 2, -drone_height / 2\n drone = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n self.drone_trans = rendering.Transform()\n drone.add_attr(self.drone_trans)\n self.viewer.add_geom(drone)\n\n goal = rendering.make_circle(scale_height * 0.5)\n goal.set_color(.0, 1.0, .0)\n self.goal_trans = rendering.Transform()\n goal.add_attr(self.goal_trans)\n self.viewer.add_geom(goal)\n\n obstacles = []\n self.obstacle_trans = []\n for it_obstacle in range(self.obstacle_num):\n if self.obstacle_type[it_obstacle] == 'cylinder':\n radius = scale_height * self.obstacle_dim[it_obstacle]\n obstacles.append(rendering.make_circle(radius))\n obstacles[it_obstacle].set_color(1.0, .0, .0)\n self.obstacle_trans.append(rendering.Transform())\n obstacles[it_obstacle].add_attr(self.obstacle_trans[it_obstacle])\n self.viewer.add_geom(obstacles[it_obstacle])\n\n elif self.obstacle_type[it_obstacle] == 'rectangle':\n obst_dim = self.obstacle_dim[it_obstacle]\n rect_width = obst_dim[0] * scale_width\n rect_height = obst_dim[1] * scale_height\n l, r, t, b = -rect_width / 2, rect_width / 2, rect_height / 2, -rect_height / 2\n obstacles.append(rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]))\n obstacles[it_obstacle].set_color(1.0, .0, .0)\n self.obstacle_trans.append(rendering.Transform())\n obstacles[it_obstacle].add_attr(self.obstacle_trans[it_obstacle])\n self.viewer.add_geom(obstacles[it_obstacle])\n\n elif self.obstacle_type[it_obstacle] == 'wall':\n wall_vector = self.obstacle_dim[it_obstacle] - self.obstacle_pos[it_obstacle, :2]\n wall_width = np.linalg.norm(wall_vector) * scale_width\n wall_height = 5\n l, r, t, b = 0, wall_width, 0, wall_height\n obstacles.append(rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]))\n obstacles[it_obstacle].set_color(1.0, 0.0, 0.0)\n self.obstacle_trans.append(rendering.Transform())\n obstacles[it_obstacle].add_attr(self.obstacle_trans[it_obstacle])\n self.viewer.add_geom(obstacles[it_obstacle])\n\n laser_readings = []\n self.laser_readings_array = []\n for it_laser in range(self.num_samples_laser):\n laser_readings.append(rendering.make_circle(3))\n laser_readings[it_laser].set_color(0.0, 0.0, 1.0)\n self.laser_readings_array.append(rendering.Transform())\n laser_readings[it_laser].add_attr(self.laser_readings_array[it_laser])\n self.viewer.add_geom(laser_readings[it_laser])\n\n if self.state is None: return None\n\n drone_x = self.state[0] * scale_width + zero_width\n drone_y = self.state[1] * scale_height + zero_height\n self.drone_trans.set_translation(drone_x, drone_y)\n self.drone_trans.set_rotation(self.state[2])\n\n goal_x = self.goal_pos[0] * scale_width + zero_width\n goal_y = self.goal_pos[1] * scale_height + zero_height\n self.goal_trans.set_translation(goal_x, goal_y)\n\n for it_obstacles in range(self.obstacle_num):\n if self.obstacle_type[it_obstacles] == 'cylinder':\n object_it_pos = self.obstacle_pos[it_obstacles]\n obstacle_x = object_it_pos[0] * scale_width + zero_width\n obstacle_y = object_it_pos[1] * scale_height + zero_height\n self.obstacle_trans[it_obstacles].set_translation(obstacle_x, obstacle_y)\n\n elif self.obstacle_type[it_obstacles] == 'rectangle':\n object_it_pos = self.obstacle_pos[it_obstacles]\n obstacle_x = object_it_pos[0] * scale_width + zero_width\n obstacle_y = object_it_pos[1] * scale_height + zero_height\n self.obstacle_trans[it_obstacles].set_translation(obstacle_x, obstacle_y)\n self.obstacle_trans[it_obstacles].set_rotation(object_it_pos[2])\n\n elif self.obstacle_type[it_obstacles] == 'wall':\n wall_pose = self.obstacle_pos[it_obstacles]\n wall_x = wall_pose[0] * scale_width + zero_width\n wall_y = wall_pose[1] * scale_height + zero_height\n self.obstacle_trans[it_obstacles].set_translation(wall_x, wall_y)\n\n wall_vector = self.obstacle_dim[it_obstacles] - wall_pose[:2]\n rotation_angle = np.arctan2(wall_vector[1], wall_vector[0])\n self.obstacle_trans[it_obstacles].set_rotation(rotation_angle)\n\n if True:\n rays = np.linspace(-np.pi / 2, np.pi / 2, self.num_samples_laser)\n # rays = np.linspace(-np.pi, np.pi, self.num_samples_laser)\n for it_laser in range(self.num_samples_laser):\n laser_reading_it = self.laser_obs[it_laser]\n if laser_reading_it > self.max_measurement_laser:\n laser_reading_it = self.max_measurement_laser\n laser_intersect = self.state[:2] + laser_reading_it *\\\n np.array([np.cos(self.state[2] + rays[it_laser]),\n np.sin(self.state[2] + rays[it_laser])])\n laser_x = laser_intersect[0] * scale_width + zero_width\n laser_y = laser_intersect[1] * scale_height + zero_height\n self.laser_readings_array[it_laser].set_translation(laser_x, laser_y)\n\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n\n def laser_readings(self):\n\n num_samples = self.num_samples_laser\n max_measurement = self.max_measurement_laser\n\n lidar_readings = 100 * np.ones(num_samples)\n\n for it_obstacle in range(self.obstacle_num):\n if self.obstacle_type[it_obstacle] == 'cylinder':\n single_obstacle_readings = \\\n self.laser_reading_single_cylinder(self.obstacle_pos[it_obstacle, :2],\n self.obstacle_dim[it_obstacle])\n elif self.obstacle_type[it_obstacle] == 'rectangle':\n single_obstacle_readings = self.laser_readings_rectangele(self.obstacle_pos[it_obstacle],\n self.obstacle_dim[it_obstacle])\n elif self.obstacle_type[it_obstacle] == 'wall':\n wall_points = np.zeros((2, 2))\n wall_points[0, :] = self.obstacle_pos[it_obstacle, :2]\n wall_points[1, :] = self.obstacle_dim[it_obstacle]\n single_obstacle_readings = self.laser_intersect_wall(wall_points)\n\n lidar_readings[single_obstacle_readings < lidar_readings] = single_obstacle_readings[\n single_obstacle_readings < lidar_readings]\n\n # for it in range(self.obstacle_num):\n # if np.linalg.norm(self.state[0:2] - self.obstacle_pos[it, :]) <= max_measurement:\n # single_obstacle_readings = self.laser_reading_single_cylinder(self.obstacle_pos[it, :])\n # lidar_readings[single_obstacle_readings < lidar_readings] = single_obstacle_readings[\n # single_obstacle_readings < lidar_readings]\n\n return lidar_readings\n\n def laser_reading_single_cylinder(self, obstacle, radius_obstacle):\n\n circle_center = obstacle\n\n num_samples = self.num_samples_laser\n max_measurement = self.max_measurement_laser\n\n rays = np.linspace(self.state[2] - np.pi / 2, self.state[2] + np.pi / 2, num_samples)\n # rays = np.linspace(self.state[2] - np.pi, self.state[2] + np.pi, num_samples)\n directions = np.array([np.cos(rays), np.sin(rays)])\n radius = radius_obstacle\n\n t0 = np.zeros(num_samples)\n\n quad_pose = self.state[0:2]\n\n A = np.sum(directions ** 2, axis=0)\n B = 2.0 * np.sum(np.multiply(np.transpose(directions), quad_pose - circle_center), axis=1)\n C = np.sum((quad_pose - circle_center) ** 2.0, axis=0) - radius ** 2.0\n\n mid_result = B ** 2.0 - 4.0 * C * A\n\n zero_array = np.zeros(np.shape(mid_result))\n less_zero = np.less(mid_result, zero_array)\n greater_zero = np.logical_not(less_zero)\n\n t0[less_zero] = np.inf\n\n mid_result_2 = mid_result[greater_zero] ** (0.5)\n t0[greater_zero] = (-B[greater_zero] - mid_result_2) / (2.0 * A[greater_zero])\n\n negative_t0 = t0 < 0\n t0[negative_t0] = np.inf\n\n intersection_distace = t0\n intersection_distace[t0 > max_measurement] = 100\n\n return intersection_distace\n\n def laser_intersect_wall(self, wall_points):\n\n num_samples = self.num_samples_laser\n max_measurement = self.max_measurement_laser\n\n laser_readings = np.inf*np.ones(num_samples)\n quad_pose = self.state[0:2]\n quad_angle = self.state[2]\n\n rays = np.linspace(quad_angle - np.pi / 2, quad_angle + np.pi / 2, num_samples)\n # rays = np.linspace(quad_angle - np.pi, quad_angle + np.pi, num_samples)\n directions = np.transpose(np.array([np.cos(rays), np.sin(rays)]))\n\n wall_vector = wall_points[1, :] - wall_points[0, :]\n wall_start = wall_points[0, :]\n\n denum = np.cross(directions, wall_vector)\n\n # determine if lines are parallel\n not_zero = np.logical_not(np.equal(denum, 0.0))\n\n wall_intersection = np.cross(wall_start - quad_pose, directions[not_zero, :])/denum[not_zero]\n\n # find intersections in range 0 to 1\n wall_intersection_less_zero = wall_intersection > 0.0\n wall_intersection_grater_zero = wall_intersection < 1.0\n intersect_ind = np.logical_and(wall_intersection_less_zero, wall_intersection_grater_zero)\n\n # rays that intersect\n rays_intersecting = False*np.ones(num_samples, dtype=bool)\n rays_intersecting[not_zero] = intersect_ind\n\n laser_readings[rays_intersecting] = np.cross(wall_start - quad_pose, wall_vector)/ \\\n ((denum[not_zero])[intersect_ind])\n\n # negative intersections\n negative_int = laser_readings < 0.0\n laser_readings[negative_int] = np.inf\n\n # put far readings to 100\n laser_readings[laser_readings > max_measurement] = 100\n\n return laser_readings\n\n def laser_readings_rectangele(self, rect_state, rect_dim):\n\n walls = np.zeros((4, 2, 2))\n rot_mat = np.array([[np.cos(rect_state[2]), -np.sin(rect_state[2])],\n [np.sin(rect_state[2]), np.cos(rect_state[2])]])\n\n walls[0, 0] = rect_state[:2] + np.matmul(rot_mat, np.array([-rect_dim[0]/2.0, rect_dim[1]/2.0]))\n walls[0, 1] = rect_state[:2] + np.matmul(rot_mat, np.array([-rect_dim[0]/2.0, -rect_dim[1]/2.0]))\n walls[1, 0] = rect_state[:2] + np.matmul(rot_mat, np.array([-rect_dim[0] / 2.0, -rect_dim[1] / 2.0]))\n walls[1, 1] = rect_state[:2] + np.matmul(rot_mat, np.array([rect_dim[0] / 2.0, -rect_dim[1] / 2.0]))\n walls[2, 0] = rect_state[:2] + np.matmul(rot_mat, np.array([rect_dim[0] / 2.0, -rect_dim[1] / 2.0]))\n walls[2, 1] = rect_state[:2] + np.matmul(rot_mat, np.array([rect_dim[0] / 2.0, rect_dim[1] / 2.0]))\n walls[3, 0] = rect_state[:2] + np.matmul(rot_mat, np.array([rect_dim[0] / 2.0, rect_dim[1] / 2.0]))\n walls[3, 1] = rect_state[:2] + np.matmul(rot_mat, np.array([-rect_dim[0] / 2.0, rect_dim[1] / 2.0]))\n\n num_samples = self.num_samples_laser\n max_measurement = self.max_measurement_laser\n\n lidar_readings = 100 * np.ones(num_samples)\n\n for it_walls in range(4):\n laser_readings_single_wall = self.laser_intersect_wall(walls[it_walls])\n lidar_readings[laser_readings_single_wall < lidar_readings] = laser_readings_single_wall[\n laser_readings_single_wall < lidar_readings]\n\n return lidar_readings\n\n def test_collision(self, state, radius):\n\n for it_obstacle in range(self.obstacle_num):\n if self.obstacle_type[it_obstacle] == 'cylinder':\n collision = self.cylinder_collision(state,\n radius,\n self.obstacle_pos[it_obstacle, :2],\n self.obstacle_dim[it_obstacle])\n elif self.obstacle_type[it_obstacle] == 'rectangle':\n collision = self.rectangle_collision(state,\n radius,\n self.obstacle_pos[it_obstacle],\n self.obstacle_dim[it_obstacle])\n elif self.obstacle_type[it_obstacle] == 'wall':\n collision = self.wall_collision(state,\n radius,\n self.obstacle_pos[it_obstacle, :2],\n self.obstacle_dim[it_obstacle])\n # elif self.obstacle_type[it_obstacle] == 'passage':\n # collision = self.passage_collision(state, radius, pasage_state, passage_dimension)\n else:\n print(\"UNKOWN OBSTACLE\" + self.obstacle_type[it_obstacle])\n return True\n\n if collision:\n return collision\n\n return False\n\n def cylinder_collision(self, state, radius, cyl_state, cul_radius):\n if np.linalg.norm(state - cyl_state) < (cul_radius + radius):\n return True\n else:\n return False\n\n def rectangle_collision(self, state, radius, rect_state, rect_dimensions):\n vec_to_drone = state - rect_state[:2]\n rot_mat = np.array([[np.cos(rect_state[2]), np.sin(rect_state[2])],\n [-np.sin(rect_state[2]), np.cos(rect_state[2])]])\n vec_to_drone = np.matmul(rot_mat, vec_to_drone)\n\n vec_to_drone_coll = np.zeros((5, 2))\n vec_to_drone_coll[0, :] = vec_to_drone + np.array([radius, 0.0])\n vec_to_drone_coll[1, :] = vec_to_drone + np.array([-radius, 0.0])\n vec_to_drone_coll[2, :] = vec_to_drone + np.array([0.0, radius])\n vec_to_drone_coll[3, :] = vec_to_drone + np.array([0.0, -radius])\n vec_to_drone_coll[4, :] = vec_to_drone - radius * vec_to_drone/np.linalg.norm(vec_to_drone)\n\n for it in range(5):\n point_checking = vec_to_drone_coll[it, :]\n check_1 = point_checking[0] >= -rect_dimensions[0] / 2.0\n check_2 = point_checking[0] <= rect_dimensions[0] / 2.0\n check_3 = point_checking[1] >= -rect_dimensions[1] / 2.0\n check_4 = point_checking[1] <= rect_dimensions[1] / 2.0\n if check_1 and check_2 and check_3 and check_4:\n return True\n\n return False\n\n def wall_collision(self, state, radius, wall_state, wall_end):\n\n vector_to_drone = state - wall_state\n vector_wall = wall_end - wall_state\n t_on_wall = np.dot(vector_to_drone, vector_wall) / np.dot(vector_wall, vector_wall)\n if t_on_wall > 1.0 or t_on_wall < 0.0:\n return False\n else:\n vector_ortogonal = state - (t_on_wall * vector_wall + wall_state)\n if np.linalg.norm(vector_ortogonal) < radius:\n return True\n else:\n return False\n","sub_path":"envs/curriculum_envs/test_convergence.py","file_name":"test_convergence.py","file_ext":"py","file_size_in_byte":32198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"628300787","text":"#!/usr/bin/env python\n\n'''\nMOUDLE : decode source data to cluster with excluding overlap siganl\n'''\n\n__author__ = \"YANG TAO \"\n__copyright__ = \"Copyright (c) yangtao\"\n__created__ = \"[2018-04-10 Apr 12:00]\"\n\nimport os\nimport sys\nimport re\nimport numpy as np\nimport time\nimport ROOT\nimport logging\nlogging.basicConfig(level=logging.DEBUG,format= ' %(asctime)s - %(levelname)s- %(message)s')\n\nclass Decode():\n\n def __init__(self,input,output):\n\n #define input and output\n self.input = input\n self.output = ROOT.TFile(output,'recreate')\n\n #creat clusters tree\n self.cluster_tree = ROOT.TTree('Cluster_Tree','Cluster')\n\n #init value\n\n self.seed_channel = np.zeros(1, dtype='int16')\n self.seed_row = np.zeros(1, dtype='int16')\n self.seed_adc = np.zeros(1, dtype='int16')\n\n self.total_cluster_adc = np.zeros(1, dtype='int16')\n\n self.single_cluster_adc = ROOT.std.vector(int)()\n\n self.size = np.zeros(1, dtype='int16')\n\n #creat branches\n self.cluster_tree.Branch('Seed_Channel',self.seed_channel,'Seed_Channel/S')\n self.cluster_tree.Branch('Seed_Row',self.seed_row,'Seed_Row/S')\n self.cluster_tree.Branch('SeedSignal',self.seed_adc,'SeedSignal/S')\n self.cluster_tree.Branch('TotalClusterSignal',self.total_cluster_adc,'TotalClusterSignal/S')\n self.cluster_tree.Branch('SingleClusterSignal',self.single_cluster_adc)\n self.cluster_tree.Branch('Size',self.size,'Size/S')\n\n #define thread\n self.seed_threshold = 500\n self.cluster_threshold = 500\n self.size_adc_threshold = 200 \n\n #configure cluster size\n self.cluster_size = 2\n\n def get_int16_abs(self,number):\n \n if (number & 0x8000) != 0: \n abs_number = ((number - 1) ^ 0xFFFF)\n else:\n abs_number = number\n\n return abs_number \n\n def bytes_to_int(self,thebytes):\n uint_data = np.frombuffer(thebytes, dtype=np.uint8)\n int_data = uint_data.astype('int16')\n return int_data\n\n def process_raw(self,framebytes):\n clear_frame_bytes = b''\n origin_frame_bytes = framebytes \n tmp = re.findall(b'....(.{32})....',origin_frame_bytes,re.DOTALL) \n clear_frame_bytes = b''.join(tmp)\n return clear_frame_bytes\n\n def fill_root(self,frame_adc):\n\n #init value\n row_seed = 0\n channel_seed = 0\n\n #init temp value\n tmp_adc = 0\n tmp_seed_adc = 0\n tmp_cluster_adc = 0\n tmp_size = 0\n\n tmp_frame_array=np.reshape(frame_adc, newshape=(48, 16, 2))\n tmp_frame=np.zeros((48,16), dtype='int16')\n\n for row in xrange(0,48):\n for channel in xrange(0,16):\n\n tmp_adc =tmp_frame_array[row, channel, 0]+tmp_frame_array[row, channel, 1]*256 \n tmp_adc = self.get_int16_abs(tmp_adc)\n tmp_frame[row,channel] = tmp_adc\n\n #find seed#\n tmp_seed_adc = np.max(tmp_frame)\n tmp_seed_position = np.where(tmp_frame == tmp_seed_adc)\n\n row_seed = tmp_seed_position[0][0]\n channel_seed = tmp_seed_position[1][0]\n #seed end#\n\n # print(tmp_frame)\n # print('SEED ADC : ',tmp_seed_adc)\n # print('SEED POS : ',tmp_seed_position)\n # print('SEED ROW : ',row_seed)\n # print('SEED CHA : ',channel_seed)\n\n #exclude overlap\n overlap = False\n for row_angle in [-1,0,1]:\n for channel_angle in [-1,0,1]:\n if (row_angle == 0) and (channel_angle == 0):\n continue\n tmp_row = row_seed + 2*row_angle\n tmp_channel = channel_seed + 2*channel_angle\n if( (tmp_row>=0) and (tmp_row<48) and (tmp_channel>=0) and (tmp_channel<16) ):\n if (tmp_frame[tmp_row,tmp_channel] - tmp_frame[row_seed+row_angle,channel_seed+channel_angle])>200:\n overlap = True\n break\n #exclude end#\n\n if not overlap:\n\n row_cluster_start = row_seed - self.cluster_size\n row_cluster_end = row_seed + self.cluster_size\n\n channel_cluster_start = channel_seed - self.cluster_size\n channel_cluster_end = channel_seed + self.cluster_size\n\n if tmp_seed_adc > self.seed_threshold:\n self.single_cluster_adc.clear()\n for row_pos in xrange(row_cluster_start,row_cluster_end+1):\n for channel_pos in xrange(channel_cluster_start,channel_cluster_end+1):\n\n \n if( (row_pos>=0) and (row_pos<48) and (channel_pos>=0) and (channel_pos<16)):\n \n tmp_cluster_adc += tmp_frame[row_pos,channel_pos]\n\n\n #print(int(tmp_frame[row_pos,channel_pos]))\n self.single_cluster_adc.push_back(int(tmp_frame[row_pos,channel_pos]))\n \n #count size#\n if tmp_frame[row_pos,channel_pos] > self.size_adc_threshold:\n tmp_size += 1\n #count end#\n if (channel_seed >= 2) and (channel_seed <= 13) and (row_seed >= 2) and (row_seed <= 45) :\n if tmp_cluster_adc < 6000 :\n self.seed_channel[0] = channel_seed\n self.seed_row[0] = row_seed\n self.seed_adc[0] = tmp_seed_adc\n self.total_cluster_adc[0] = tmp_cluster_adc\n self.size[0] = tmp_size\n self.cluster_tree.Fill()\n \n \n def process_frame(self):\n \n #configure\n data = open(self.input,'rb')\n print_number = 10\n try_process_number = 19280\n maxframenumber = 1000000000000\n\n #init value\n seek_position = 0\n frame_number = 0\n cds_frame_number = 0\n broken_frame_number = 0\n broken_bulk_number = 0\n broken_flag = False\n\n #start \n while frame_number < maxframenumber: \n\n data.seek(seek_position)\n try_process_data = data.read(try_process_number)\n\n if len(try_process_data) != try_process_number:\n logging.critical('\\033[33;1m find total %d frames!\\033[0m'%frame_number)\n logging.critical('\\033[32;1m find total %d cds frames!\\033[0m'%cds_frame_number)\n logging.critical('\\033[31;1m find total %d broken frames!\\033[0m'%broken_frame_number)\n logging.critical('\\033[35;1m find total %d broken bulk!\\033[0m'%broken_bulk_number)\n logging.critical(' END !')\n break\n\n m =re.search(b'(\\xaa\\xaa\\xaa\\xaa)(.*?)(\\xf0\\xf0\\xf0\\xf0)',try_process_data,re.DOTALL)\n\n if m:\n if len(m.group(2)) == 1920:\n frame_number += 1\n frame_bytes = m.group(2) \n clear_frame_bytes = self.process_raw(m.group(2))\n frame_adc = self.bytes_to_int(clear_frame_bytes)\n\n else:\n data.seek(seek_position+m.start())\n tmp_process_data = data.read(1928)\n tmp_m = re.search(b'(\\xaa\\xaa\\xaa\\xaa)(.{1920})(\\xf0\\xf0\\xf0\\xf0)',tmp_process_data,re.DOTALL)\n\n if tmp_m:\n frame_number += 1\n frame_bytes = tmp_m.group(2)\n clear_frame_bytes = self.process_raw(tmp_m.group(2))\n frame_adc = self.bytes_to_int(clear_frame_bytes)\n\n else:\n broken_frame_number += 1\n broken_flag = True\n logging.info('\\033[31;1m find %d broken frames!\\033[0m'%broken_frame_number)\n logging.info('\\033[31;1m position: (%d %d) \\033[0m'%(seek_position+m.start(),seek_position+m.end()))\n logging.info('\\033[31;1m broken length : %d\\033[0m'%len(m.group()))\n\n ### cds start ####\n if frame_number > 1 :\n\n cds_frame_adc = frame_adc-last_frame_adc\n self.fill_root(cds_frame_adc)\n cds_frame_number += 1 \n ### cds end ####\n\n if frame_number % print_number == 0:\n logging.info('Find %d frames !'%frame_number)\n logging.info('position: (%d %d)'%(seek_position+m.start(),seek_position+m.end()))\n logging.info('Get %d cds frames'%cds_frame_number)\n\n seek_position += (m.start()+(len(m.group())))\n\n last_frame_adc = frame_adc\n\n else:\n print('There is no frame in ( %d %d )'%(seek_position,seek_position+try_process_number))\n broken_flag = True\n broken_bulk_number += 1\n seek_position += try_process_number\n\n logging.critical('\\033[33;1m find total %d frames!\\033[0m'%frame_number)\n logging.critical('\\033[32;1m find total %d cds frames!\\033[0m'%cds_frame_number)\n logging.critical('\\033[31;1m find total %d broken frames!\\033[0m'%broken_frame_number)\n logging.critical('\\033[35;1m find total %d broken bulk!\\033[0m'%broken_bulk_number)\n\n self.cluster_tree.GetCurrentFile().Write()\n self.output.Close()\n data.close()\n\n def run(self):\n start_time = time.clock()\n self.process_frame()\n end_time = time.clock()\n print('Running time: %s Seconds'%(end_time-start_time))\n\n\n","sub_path":"jadepix1/python/lib/decode_iron55.py","file_name":"decode_iron55.py","file_ext":"py","file_size_in_byte":9837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"408323585","text":"import numpy as np\nimport cv2\n\nazarray = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',\n 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\nsubfolder = ['upper', 'lower']\n\nfor i in range(0, 1):\n for j in range(22, 23):\n for k in range(5, 6):\n filename = 'new/' + str(subfolder[i]) + '/' + str(azarray[j]) + '/' + str(k)\n # Create a black image\n img = np.zeros((100, 100, 3), np.uint8)\n img[:, :, :] = (255, 255, 255)\n\n # Write some Text\n font = cv2.FONT_HERSHEY_PLAIN | cv2.FONT_ITALIC\n cv2.putText(img,str(azarray[j]),(5,100), font, 5,(0,0,0),5)\n\n #Display the image\n # cv2.imshow(\"img\",img)\n # cv2.waitKey(0)\n\n #Save image\n # cv2.imwrite(filename + '.png', img)","sub_path":"letters.py","file_name":"letters.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"363942572","text":"# MIT License\n#\n# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport logging\nimport time\n\nfrom blockchainetl.jobs.exporters.databasse.mongo_db import Database\nfrom config.constant import BlockConstant, TransactionConstant, TokenConstant, TokenTypeConstant, WalletConstant, \\\n ExportItemConstant, ExportItemTypeConstant, LoggerConstant\n\nlogger = logging.getLogger(LoggerConstant.KnowledgeGraphExporter)\n\n\nclass KnowledgeGraphExporter:\n\n def __init__(self):\n self.mapping_handler = {\n ExportItemTypeConstant.transaction: self._transaction_handler,\n ExportItemTypeConstant.block: self._block_handler,\n ExportItemTypeConstant.token_transfer: self._token_transfer_handler,\n ExportItemTypeConstant.event: self._event_handler,\n ExportItemTypeConstant.token: self._token_handler\n }\n self.data_base = Database()\n\n def open(self):\n pass\n\n def export_items(self, items):\n for item in items:\n self.export_item(item)\n\n def export_item(self, item):\n handler = self.mapping_handler.get(item.get(ExportItemConstant.type))\n if handler:\n handler(item)\n\n def close(self):\n pass\n\n def _block_handler(self, item):\n item[BlockConstant.gas_limit] = str(item.get(BlockConstant.gas_limit))\n item[BlockConstant.gas_used] = str(item.get(BlockConstant.gas_used))\n self.data_base.update_block(item)\n\n def _transaction_handler(self, item):\n item[TransactionConstant.gas] = str(item.get(TransactionConstant.gas))\n item[TransactionConstant.gas_price] = str(item.get(TransactionConstant.gas_price))\n item[TransactionConstant.value] = str(item.get(TransactionConstant.value))\n if item.get(TransactionConstant.input) == TokenConstant.native_token:\n item[TransactionConstant.transaction_hash] = item.pop(TransactionConstant.hash)\n self._update_wallet_and_item(item, TokenConstant.native_token)\n self.data_base.update_transaction_transfer(item)\n self.data_base.update_transaction(item)\n\n def _token_transfer_handler(self, item):\n item[TokenConstant.value] = str(item.get(TokenConstant.value))\n token_address = item.get(TokenConstant.contract_address)\n item[TokenConstant.type] = TokenTypeConstant.Transfer\n # start_time = time.time()\n\n self._update_wallet_and_item(item, token_address)\n # logger.debug(f\"Time to update wallet item in event {time.time() - start_time}\")\n start_time = time.time()\n self.data_base.insert_to_token_collection(token_address, item)\n # logger.info(f\"Time to insert_to_token_collection item in event {time.time() - start_time}\")\n\n def _event_handler(self, item):\n item[TokenConstant.value] = str(item.get(TokenConstant.value))\n contract_address = item.get(TokenConstant.contract_address)\n item[TokenConstant.type] = item.pop(TokenConstant.event_type)\n start_time = time.time()\n self._update_wallet_and_item(item, contract_address)\n # logger.info(f\"Time to update wallet item in event {time.time() - start_time}\")\n self.data_base.insert_to_token_collection(contract_address, item)\n\n def _token_handler(self, item):\n item[TokenConstant.total_supply] = str(item.get(TokenConstant.total_supply))\n self.data_base.update_token(item)\n\n def _update_wallet_and_item(self, item, balance_address):\n # start_time_all = time.time()\n if not item.get(TransactionConstant.wallets):\n return\n for wallet in item.get(TransactionConstant.wallets):\n address = wallet.get(WalletConstant.address)\n start_time = time.time()\n wallet_in_db = self.data_base.get_wallet(address)\n # logger.info(f\"Time to get wallet in db{time.time() - start_time}\")\n balances = wallet_in_db.get(WalletConstant.balances)\n supply = wallet_in_db.get(WalletConstant.supply)\n borrow = wallet_in_db.get(WalletConstant.borrow)\n if not balances:\n balances = {}\n if not supply:\n supply = {}\n if not borrow:\n borrow = {}\n unit_token = wallet.get(WalletConstant.unit_token)\n if not unit_token:\n unit_token = balance_address\n\n wallet[WalletConstant.balance] = str(wallet.get(WalletConstant.balance))\n wallet[WalletConstant.pre_balance] = str(wallet.get(WalletConstant.pre_balance))\n\n balances[unit_token] = wallet.get(WalletConstant.balance)\n if wallet.get(WalletConstant.supply):\n supply[unit_token] = wallet.get(WalletConstant.supply)\n if wallet.get(WalletConstant.borrow):\n borrow[unit_token] = wallet.get(WalletConstant.borrow)\n\n wallet_in_db[WalletConstant.balances] = balances\n wallet_in_db[WalletConstant.supply] = supply\n wallet_in_db[WalletConstant.borrow] = borrow\n\n wallet[WalletConstant.balances] = balances\n wallet[WalletConstant.supply] = supply\n wallet[WalletConstant.borrow] = borrow\n\n wallet_in_db[WalletConstant.at_block_number] = item.get(TransactionConstant.block_number)\n start_time = time.time()\n self.data_base.replace_wallet(wallet_in_db)\n # logger.debug(f\"time to replace_wallet wallet in db{time.time() - start_time}\")\n\n # logger.debug(f\"Time to _update_wallet_and_item {time.time() - start_time_all}\")\n","sub_path":"blockchainetl/jobs/exporters/knowledge_graph_exporter.py","file_name":"knowledge_graph_exporter.py","file_ext":"py","file_size_in_byte":6643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"323744837","text":"import os\nfrom setuptools import setup\n\nCURRENT_DIR = os.path.abspath(os.path.dirname(__file__))\n\ndef get_reqs(*fns):\n lst = []\n for fn in fns:\n for package in open(os.path.join(CURRENT_DIR, fn)).readlines():\n package = package.strip()\n if not package or package.startswith('#'):\n continue\n lst.append(package.strip())\n return lst\n\nsetup(name='pyAudioAnalysis',\n version='0.2.5',\n description='Python Audio Analysis Library: Feature Extraction, Classification, Segmentation and Applications',\n url='https://github.com/tyiannak/pyAudioAnalysis',\n author='Theodoros Giannakopoulos',\n author_email='tyiannak@gmail.com',\n license='Apache License, Version 2.0',\n packages=['pyAudioAnalysis'],\n zip_safe=False,\n install_requires=get_reqs('requirements.txt'))\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"543323629","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .track import Track\n\n\nclass Station:\n \"\"\"\n Represents a streamable, unending station. This should be created with `AmazonMusic.createStation`.\n\n Key properties are:\n\n * `id` - ID of the station (Amazon ASIN)\n * `name` - Name of the station.\n * `coverUrl` - URL containing cover art for the station.\n * `tracks` - Iterable generator for the `Tracks` that make up this station.\n \"\"\"\n\n def __init__(self, amzn, asin, data):\n \"\"\"\n Internal use only.\n\n :param amzn: AmazonMusic object, used to make API calls.\n :param asin: Station ASIN.\n :param data: JSON data structure for the station, from Amazon Music.\n \"\"\"\n self._amzn = amzn\n self.id = asin\n self.json = data\n self.cover_url = data['queue']['queueMetadata']['imageUrlMap']['FULL']\n self.name = data['queue']['queueMetadata']['title']\n self._page_token = data['queue']['pageToken']\n\n def tracks(self):\n \"\"\"\n Provides an iterable generator for the `Tracks` that make up this station.\n \"\"\"\n tracks = []\n tracks.extend(self.json['trackMetadataList'])\n while tracks:\n yield Track(self._amzn, tracks.pop(0))\n\n if not tracks:\n data = self._amzn.call(\n 'mpqs/voiceenabled/getNextTracks',\n 'com.amazon.musicplayqueueservice.model.client.external.voiceenabled.MusicPlayQueueService'\n 'ExternalVoiceEnabledClient.getNextTracks', {\n 'pageToken': self._page_token,\n 'numberOfTracks': 10,\n 'customerInfo': {\n 'deviceId': self._amzn.device_id,\n 'deviceType': self._amzn.device_type,\n 'musicTerritory': self._amzn.territory,\n 'customerId': self._amzn.customer_id\n }\n })\n self._page_token = data['nextPageToken']\n tracks.extend(data['trackMetadataList'])\n","sub_path":"amazon_music/internal/station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"37935045","text":"#基本20持に実行(UTC 11:00)\n\nimport requests\nimport datetime\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\nimport random\nimport numpy as np\nimport pandas as pd\n\nfrom settings import *\nimport models\n\nline_notify_api = 'https://notify-api.line.me/api/notify'\nmaco_headers = {'Authorization': 'Bearer ' + maco_token}\ngroup_headers = {'Authorization': 'Bearer ' + group_token}\n\nmaco = create_engine(maco_db,pool_pre_ping=True)\nSession = sessionmaker(bind=maco)\ns = Session()\n\nnow=datetime.datetime.now()\nmonth = now.strftime('%Y%m')\nmessage_date = now.strftime('%Y/%m/%d %H:%M:%S')\ntomorrow=now+datetime.timedelta(days=1)\nmenu = s.query(models.Menu).filter_by(finish=0,date=int(tomorrow.strftime('%Y%m%d'))).first()\nif menu is not None:\n menulist = [menu.menu1, menu.menu2, menu.menu3]\n date = menu.date\n query = f'''select user_id,name,order_num,option from ( select * from \"order\" where date = {date} and \n order_num != 0 ) as a inner join \"user\" on a.user_id = \"user\".id;'''\n df = pd.read_sql(query, maco)\n if len(df)>0:\n ordersum = [sum(df['order_num']==1), sum(df['order_num']==2), sum(df['order_num']==3)]\n\n # send maco\n message = ''\n for i in range(len(menulist)):\n if ordersum[i] > 0:\n message = message + f'{menulist[i]}\\t{ordersum[i]}個\\n'\n payload = {'message': '\\n' + message + 'お願いします'}\n try:\n requests.post(line_notify_api, data=payload, headers=maco_headers)\n print(message_date + ' order was sended to maco\\n')\n except:\n print('Error\\n',message_date, message)\n\n #send group\n dlist=df[df['option'] == 0]\n if len(dlist)<1:\n dlist=df\n dlist=dlist.reset_index()\n\n query = f'''select o_date as date, \"order\", delivery from (select date as o_date,user_id as order from \n \"order\" where order_num != 0 and date between {month}00 and {month}99 order by o_date) as a left \n outer join (select date as d_date,user_id as delivery from \"delivery\" where date between \n {month}00 and {month}99) as b on a.o_date = b.d_date;'''\n df2 = pd.read_sql(query, maco)\n cost=[]\n for index,row in dlist.iterrows():\n onum=sum(df2['order'] == row['user_id'])\n dnum=sum(df2['delivery'] == row['user_id'])\n cost.append(onum-dnum)\n cost=np.array(cost) - min(cost)\n if sum(cost) < 1:\n cost = cost + 1\n randlist = [i for i in range(len(cost)) for j in range(cost[i])]\n random.seed(datetime.datetime.now().timestamp())\n print(randlist)\n deli = dlist.loc[random.choice(randlist)]\n\n s.add(models.Delivery(date, deli['user_id']))\n s.commit()\n message=''\n for i in range(len(menulist)):\n if ordersum[i] > 0:\n tmp=','.join(df[df['order_num'] == i+1]['name'])\n message = message + f'\\n{menulist[i]}\\t{ordersum[i]}個({tmp})'\n group_payload = {'message': f'\\n明日の配達は{deli[\"name\"]}です\\n{message}'}\n try:\n requests.post(line_notify_api, data=group_payload, headers=group_headers)\n print(message_date + ' delivery was sended to group\\n')\n except:\n print('Error\\n', message_date, message)\n else:\n print(message_date + ' order is none\\n')\n menu.finish=1\n s.commit()\nelse:\n print(message_date + ' menu is none\\n')\n\n\n\n # order = s.query(models.Order).filter_by(date=date).all()\n # if len(order) > 0:\n # #send maco\n # orderlist = []\n # ordersum = [0, 0, 0]\n # for row in order:\n # if row.order_num < 1:\n # continue\n # tmp = s.query(models.User).filter_by(id=row.user_id).first()\n # num = row.order_num - 1\n # orderlist.append([tmp.name, menulist[num], tmp.option, tmp.id])\n # ordersum[num] = ordersum[num] + 1\n # message = ''\n # for i in range(len(menulist)):\n # if ordersum[i] > 0:\n # message = message + f'{menulist[i]}\\t{ordersum[i]}つ\\n'\n # payload = {'message': '\\n' + message + 'お願いします'}\n # try:\n # requests.post(line_notify_api, data=payload, headers=maco_headers)\n # print(message_date + ' order is sended to maco\\n')\n # except:\n # print('Error\\n',message_date, message)\n\n # #send group\n # member = []\n # for row in orderlist:\n # if row[2] != 1:\n # member.append([row[0], row[3]])\n # if len(member) < 1:\n # for row in orderlist:\n # member.append([row[0], row[3]])\n # random.seed(datetime.datetime.now().timestamp())\n # deli = member[random.randrange(len(member))]\n # s.add(models.Delivery(date, deli[1]))\n # s.commit()\n # group_payload = {'message': '\\n' + message + f'\\n明日の配達は{deli[0]}です'}\n # try:\n # requests.post(line_notify_api, data=group_payload, headers=group_headers)\n # print(message_date + ' delivery is sended to group\\n')\n # except:\n # print('Error\\n', message_date, message)\n # else:\n # print(message_date + ' order is none\\n')\n # menu.finish=1\n # s.commit()\n\n#culculate point\nif tomorrow.day == 1:\n query = f'''select user_id,name,point from (select user_id, sum(mcount) as point from (select user_id, count(user_id)\n as mcount from menu where date between {month}00 and {month}99 group by user_id union select user_id,\n count(user_id)*2 as dcount from delivery where date between {month}00 and {month}99 group by user_id)\n as total group by user_id) as points inner join \"user\" on points.user_id = \"user\".id order by point desc;\n '''\n df = pd.read_sql(query, maco)\n query = f'''select count(*) from ( select user_id from \"order\" where date between {month}00 and {month}99 and \n order_num != 0 ) as a inner join ( select id from \"user\" where option = 1 ) as b on a.user_id = b.id;'''\n num = pd.read_sql(query, maco).values[0][0]\n\n bonus_list = np.zeros(len(df))\n rank = df[0:3]['point']\n if len(df) > 2:\n if rank[0] == rank[1] and rank[1] == rank[2]:\n tmp = int(np.floor(num / 3))\n bonus_list[0] = tmp\n bonus_list[1] = tmp\n bonus_list[2] = tmp\n elif rank[0] == rank[1]:\n tmp = int(np.floor(num / 5))\n bonus_list[2] = tmp\n tmp = int(np.floor((num - tmp) / 2))\n bonus_list[0] = tmp\n bonus_list[1] = tmp\n elif rank[1] == rank[2]:\n tmp = int(np.floor(num / 2))\n bonus_list[0] = tmp\n tmp = int(np.floor((num - tmp) / 2))\n bonus_list[1] = tmp\n bonus_list[2] = tmp\n else:\n bonus_list[0] = int(np.ceil(num / 2))\n bonus_list[1] = int(np.ceil((num - bonus_list[0]) * 3 / 5))\n bonus_list[2] = int(num - bonus_list[0] - bonus_list[1])\n elif len(df) == 2:\n if rank[0] == rank[1]:\n tmp = int(np.floor(num / 2))\n bonus_list[0] = tmp\n bonus_list[1] = tmp\n else:\n bonus_list[0] = int(np.ceil(num * 7 / 10))\n bonus_list[1] = num - bonus_list[0]\n elif len(df)==1:\n bonus_list[0] = num\n\n for index, row in df.iterrows():\n s.add(models.Points(int(month), row['user_id'], int(row['point']), int(bonus_list[index]*50)))\n print(int(month), row['user_id'], int(row['point']), int(bonus_list[index]*50))\n s.commit()","sub_path":"send_order.py","file_name":"send_order.py","file_ext":"py","file_size_in_byte":7714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"590824949","text":"import secrets\nfrom flask import (\n Flask,\n render_template,\n url_for,\n)\n\nimport skrun\n\nimport os\nimport shutil\nimport sys\n\napp = Flask(__name__)\n\nTHIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) + \"/..\"\nsys.path.insert(1, THIS_FOLDER)\n\nshutil.rmtree(\"sessions\", ignore_errors=True)\nos.system(\"mkdir sessions\")\n\nsessions = {}\nterminated = set()\n\n@app.route('/', methods=(\"POST\", \"GET\"))\ndef home():\n session = secrets.token_hex(64)\n sessions[session] = None\n return render_template(\"index.html\", session=session)\n\n","sub_path":"src/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"653007325","text":"import scrapy\nfrom scrapy.loader import ItemLoader\n# from scrapy.linkextractors import LinkExtractor\n# from scrapy.contrib.spiders import Rule\nfrom items import TestItem\n\n\nclass CrawlSpider(scrapy.Spider):\n\n name = \"Crawly\"\n\n allowed_urls = [\n 'https://www.reuters.com',\n ]\n start_urls = [\n 'https://www.reuters.com/finance',\n 'https://www.reuters.com/finance/markets/asia'\n ]\n\n def start_requests(self):\n yield scrapy.Request('https://www.reuters.com/finance', self.parse)\n\n def parse(self, response):\n # GOOD\n if response.url == 'https://www.reuters.com/finance':\n tests = response.css('div[class=\"story-content\"]')\n for test in tests:\n item = ItemLoader(item=TestItem)\n time = test.css('time.article-time').extract_first()\n title = test.xpath('//h3[@class=\"story-title\"]').extract_first()\n item.add_value('time', time.encode('ascii', 'ignore'))\n item.add_value('title', title.encode('ascii', 'ignore'))\n yield item.load_item()","sub_path":"Web-Scraper/CRAWLER/msqltester/msqltester/spiders/test_spider.py","file_name":"test_spider.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"431268380","text":"#Faça um programa que peça 10 números inteiros, calcule e mostre a quantidade de números pares e a quantidade de números impares.\n\npares=[]\nimpares=[]\nqte_pares=0\nqte_impares=0\nfor c in range(1,11):\n num=int(input(f'digite o nº{c}'))\n if num%2==0:\n pares.append(num)\n qte_pares+=1\n else:\n impares.append(num)\n qte_impares+=1\nprint(f'dos 10 números digitados : {qte_impares} são ímpares e {qte_pares} são pares'\n f'\\n os números ímpares digitados anteriormente são:{impares}'\n f'\\n os números pares digitados anteriormente são:{pares} ')\n","sub_path":"Exercicio.14_listaPyBr.py","file_name":"Exercicio.14_listaPyBr.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"206784012","text":"\"\"\"*****************************************************************************\n* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries.\n*\n* Subject to your compliance with these terms, you may use Microchip software\n* and any derivatives exclusively with Microchip products. It is your\n* responsibility to comply with third party license terms applicable to your\n* use of third party software (including open source software) that may\n* accompany Microchip software.\n*\n* THIS SOFTWARE IS SUPPLIED BY MICROCHIP \"AS IS\". NO WARRANTIES, WHETHER\n* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED\n* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A\n* PARTICULAR PURPOSE.\n*\n* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,\n* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND\n* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS\n* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE\n* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN\n* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,\n* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.\n*****************************************************************************\"\"\"\n################################################################################\n#### Business Logic ####\n################################################################################\ndef updateSDCDENCommentVisibility(symbol, event):\n symbol.setVisible(event[\"value\"])\n\ndef updateSDWPENCommentVisibility(symbol, event):\n symbol.setVisible(event[\"value\"])\n\ndef updateSDMMCClkFreq(symbol, event):\n # round the frequency value to the nearest KHz to avoid integer division errors arising out\n # configuring fractional PLL sources\n symbol.setValue(int(round((event[\"value\"]), -3)), 2)\n\n # Display warning if the required clock sources are not enabled\n sdmmcBaseClkSrcComment.setVisible(0 == Database.getSymbolValue(sdmmcInstanceName.getValue().lower(), \"SDMMC_BASECLK_FREQ\"))\n sdmmcMultClkSrcComment.setVisible(0 == Database.getSymbolValue(sdmmcInstanceName.getValue().lower(), \"SDMMC_MULTCLK_FREQ\"))\n\n\n# Dependency Function to show or hide the warning message depending on Interrupt enable/disable status\ndef interruptStatusWarning(symbol, event):\n if Database.getSymbolValue(sdmmcInstanceName.getValue().lower(), \"INTERRUPT_MODE\") == True:\n symbol.setVisible(event[\"value\"])\n\n# Dependency Function to show or hide the warning message depending on Clock enable/disable status\ndef clockStatusWarning(symbol, event):\n if event[\"value\"] == False:\n symbol.setVisible(True)\n else:\n symbol.setVisible(False)\n\n################################################################################\n#### Component ####\n################################################################################\ndef destroyComponent(sdmmcComponent):\n Database.clearSymbolValue(coreNamespace, interruptEnable)\n Database.clearSymbolValue(coreNamespace, interruptHandler)\n Database.clearSymbolValue(coreNamespace, interruptHandlerLock)\n\ndef instantiateComponent(sdmmcComponent):\n global coreNamespace\n global interruptEnable\n global interruptHandler\n global interruptHandlerLock\n global sdmmcInstanceName\n global sdmmcBaseClkSrcComment\n global sdmmcMultClkSrcComment\n\n sdmmcInstanceName = sdmmcComponent.createStringSymbol(\"SDMMC_INSTANCE_NAME\", None)\n sdmmcInstanceName.setVisible(False)\n sdmmcInstanceName.setDefaultValue(sdmmcComponent.getID().upper())\n Log.writeInfoMessage(\"Running \" + sdmmcInstanceName.getValue())\n\n coreNamespace = \"core\"\n interruptEnable = sdmmcInstanceName.getValue() + \"_INTERRUPT_ENABLE\"\n interruptHandler = sdmmcInstanceName.getValue() + \"_INTERRUPT_HANDLER\"\n interruptHandlerLock = sdmmcInstanceName.getValue() + \"_INTERRUPT_HANDLER_LOCK\"\n interruptEnableUpdate = sdmmcInstanceName.getValue() + \"_INTERRUPT_ENABLE_UPDATE\"\n\n # Initial settings for CLK\n Database.setSymbolValue(coreNamespace, sdmmcInstanceName.getValue() + \"_CLOCK_ENABLE\", True, 2)\n\n # Initial settings for Interrupt\n Database.setSymbolValue(coreNamespace, interruptEnable, True, 2)\n Database.setSymbolValue(coreNamespace, interruptHandler, sdmmcInstanceName.getValue() + \"_InterruptHandler\", 2)\n Database.setSymbolValue(coreNamespace, interruptHandlerLock, True, 2)\n\n sdmmcInterrupt = sdmmcComponent.createBooleanSymbol(\"INTERRUPT_MODE\", None)\n sdmmcInterrupt.setLabel(\"Interrupt Mode\")\n sdmmcInterrupt.setDefaultValue(True)\n sdmmcInterrupt.setReadOnly(True)\n\n sdmmcHClk = sdmmcComponent.createIntegerSymbol(\"SDMMC_HCLOCK_FREQ\", None)\n sdmmcHClk.setVisible(False)\n sdmmcHClk.setDefaultValue(int(round(Database.getSymbolValue(\"core\", sdmmcInstanceName.getValue() + \"_CLOCK_FREQUENCY\"), -3)))\n sdmmcHClk.setDependencies(updateSDMMCClkFreq, [\"core.\" + sdmmcInstanceName.getValue() + \"_CLOCK_FREQUENCY\"])\n\n sdmmcBaseClk = sdmmcComponent.createIntegerSymbol(\"SDMMC_BASECLK_FREQ\", None)\n sdmmcBaseClk.setLabel(\"Base Clock Frequency (Hz)\")\n sdmmcBaseClk.setDefaultValue(int(round(Database.getSymbolValue(\"core\", sdmmcInstanceName.getValue() + \"_BASECLK_FREQUENCY\"), 3)))\n sdmmcBaseClk.setDependencies(updateSDMMCClkFreq, [\"core.\" + sdmmcInstanceName.getValue() + \"_BASECLK_FREQUENCY\"])\n sdmmcBaseClk.setReadOnly(True)\n\n sdmmcBaseClkSrcComment = sdmmcComponent.createCommentSymbol(\"SDMMC_BASE_CLOCK_SOURCE_COMMENT\", None)\n sdmmcBaseClkSrcComment.setVisible(False)\n sdmmcBaseClkSrcComment.setLabel(\"Source clock for divided clock mode is not enabled !!!\")\n\n sdmmcMultClk = sdmmcComponent.createIntegerSymbol(\"SDMMC_MULTCLK_FREQ\", None)\n sdmmcMultClk.setLabel(\"Programmable Clock Frequency (Hz)\")\n sdmmcMultClk.setReadOnly(True)\n sdmmcMultClk.setDefaultValue(int(round(Database.getSymbolValue(\"core\", sdmmcInstanceName.getValue() + \"_MULTCLK_FREQUENCY\"), -3)))\n sdmmcMultClk.setDependencies(updateSDMMCClkFreq, [\"core.\" + sdmmcInstanceName.getValue() + \"_MULTCLK_FREQUENCY\"])\n\n sdmmcMultClkSrcComment = sdmmcComponent.createCommentSymbol(\"SDMMC_MULT_CLOCK_SOURCE_COMMENT\", None)\n sdmmcMultClkSrcComment.setVisible(False)\n sdmmcMultClkSrcComment.setLabel(\"Source clock for programmable clock mode is not enabled !!!\")\n\n #Parse the ATDF to find out whether the IP supports CD and WP pins on this mask\n CDPinAvailable = False\n WPPinAvailable = False\n signals = ATDF.getNode(\"/avr-tools-device-file/devices/device/peripherals/module@[name=\\\"SDMMC\\\"]/instance@[name=\\\"\"\n + sdmmcInstanceName.getValue() + \"\\\"]/signals\").getChildren()\n for index in range(0, len(signals)):\n if signals[index].getAttribute(\"group\").endswith(\"_CD\"):\n CDPinAvailable = True\n if signals[index].getAttribute(\"group\").endswith(\"_WP\"):\n WPPinAvailable = True\n\n sdmmcCDSupport = sdmmcComponent.createBooleanSymbol(\"SDCARD_SDCD_SUPPORT\", None)\n sdmmcCDSupport.setLabel(\"Card detect support available\")\n sdmmcCDSupport.setVisible(False)\n sdmmcCDSupport.setDefaultValue(CDPinAvailable)\n\n sdmmcWPSupport = sdmmcComponent.createBooleanSymbol(\"SDCARD_SDWP_SUPPORT\", None)\n sdmmcWPSupport.setLabel(\"Write protect support available\")\n sdmmcWPSupport.setVisible(False)\n sdmmcWPSupport.setDefaultValue(WPPinAvailable)\n\n sdmmcUseCD = sdmmcComponent.createBooleanSymbol(\"SDCARD_SDCDEN\", None)\n sdmmcUseCD.setLabel(\"Use SD Card Detect (SDCD#) Pin\")\n sdmmcUseCD.setVisible(False)\n sdmmcUseCD.setDefaultValue(sdmmcCDSupport.getValue())\n\n sdmmcUseWP = sdmmcComponent.createBooleanSymbol(\"SDCARD_SDWPEN\", None)\n sdmmcUseWP.setLabel(\"Use SD Write Protect (SDWP#) Pin\")\n sdmmcUseWP.setVisible(False)\n sdmmcUseWP.setDefaultValue(sdmmcWPSupport.getValue())\n\n sdmmcDescLines = sdmmcComponent.createIntegerSymbol(\"SDMMC_NUM_DESCRIPTOR_LINES\", None)\n sdmmcDescLines.setLabel(\"Number of ADMA2 Descriptor Lines\")\n sdmmcDescLines.setMin(1)\n sdmmcDescLines.setMax(10)\n sdmmcDescLines.setDefaultValue(1)\n\n # Dependency Status for interrupt\n sdmmcSymIntEnComment = sdmmcComponent.createCommentSymbol(sdmmcInstanceName.getValue() + \"_INTERRUPT_ENABLE_COMMENT\", None)\n sdmmcSymIntEnComment.setVisible(False)\n sdmmcSymIntEnComment.setLabel(\"Warning!!! \" + sdmmcInstanceName.getValue() + \" Interrupt is Disabled in Interrupt Manager\")\n sdmmcSymIntEnComment.setDependencies(interruptStatusWarning, [\"core.\" + interruptEnableUpdate])\n\n # Dependency Status for clock\n sdmmcSymClkEnComment = sdmmcComponent.createCommentSymbol(sdmmcInstanceName.getValue() + \"_CLK_ENABLE_COMMENT\", None)\n sdmmcSymClkEnComment.setVisible(False)\n sdmmcSymClkEnComment.setLabel(\"Warning!!! \" + sdmmcInstanceName.getValue() + \" Clock is Disabled in Clock Manager\")\n sdmmcSymClkEnComment.setDependencies(clockStatusWarning, [\"core.\"+ sdmmcInstanceName.getValue() + \"_CLOCK_ENABLE\"])\n\n ############################################################################\n #### Code Generation ####\n ############################################################################\n configName = Variables.get(\"__CONFIGURATION_NAME\")\n\n sdmmcHeaderFile = sdmmcComponent.createFileSymbol(\"SDMMC_HEADER\", None)\n sdmmcHeaderFile.setSourcePath(\"../peripheral/sdmmc_44002/templates/plib_sdmmc_common.h\")\n sdmmcHeaderFile.setOutputName(\"plib_sdmmc_common.h\")\n sdmmcHeaderFile.setDestPath(\"peripheral/sdmmc/\")\n sdmmcHeaderFile.setProjectPath(\"config/\" + configName + \"/peripheral/sdmmc/\")\n sdmmcHeaderFile.setType(\"HEADER\")\n sdmmcHeaderFile.setOverwrite(True)\n\n sdmmcHeader1File = sdmmcComponent.createFileSymbol(\"SDMMC_HEADER1\", None)\n sdmmcHeader1File.setSourcePath(\"../peripheral/sdmmc_44002/templates/plib_sdmmc.h.ftl\")\n sdmmcHeader1File.setOutputName(\"plib_\"+sdmmcInstanceName.getValue().lower()+\".h\")\n sdmmcHeader1File.setDestPath(\"peripheral/sdmmc/\")\n sdmmcHeader1File.setProjectPath(\"config/\" + configName + \"/peripheral/sdmmc/\")\n sdmmcHeader1File.setType(\"HEADER\")\n sdmmcHeader1File.setOverwrite(True)\n sdmmcHeader1File.setMarkup(True)\n\n sdmmcSource1File = sdmmcComponent.createFileSymbol(\"SDMMC_SOURCE1\", None)\n sdmmcSource1File.setSourcePath(\"../peripheral/sdmmc_44002/templates/plib_sdmmc.c.ftl\")\n sdmmcSource1File.setOutputName(\"plib_\"+sdmmcInstanceName.getValue().lower()+\".c\")\n sdmmcSource1File.setDestPath(\"peripheral/sdmmc/\")\n sdmmcSource1File.setProjectPath(\"config/\" + configName + \"/peripheral/sdmmc/\")\n sdmmcSource1File.setType(\"SOURCE\")\n sdmmcSource1File.setOverwrite(True)\n sdmmcSource1File.setMarkup(True)\n\n sdmmcSystemInitFile = sdmmcComponent.createFileSymbol(\"SDMMC_INIT\", None)\n sdmmcSystemInitFile.setType(\"STRING\")\n sdmmcSystemInitFile.setOutputName(\"core.LIST_SYSTEM_INIT_C_SYS_INITIALIZE_PERIPHERALS\")\n sdmmcSystemInitFile.setSourcePath(\"../peripheral/sdmmc_44002/templates/system/initialization.c.ftl\")\n sdmmcSystemInitFile.setMarkup(True)\n\n sdmmcSystemDefFile = sdmmcComponent.createFileSymbol(\"SDMMC_DEF\", None)\n sdmmcSystemDefFile.setType(\"STRING\")\n sdmmcSystemDefFile.setOutputName(\"core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES\")\n sdmmcSystemDefFile.setSourcePath(\"../peripheral/sdmmc_44002/templates/system/definitions.h.ftl\")\n sdmmcSystemDefFile.setMarkup(True)\n","sub_path":"peripheral/sdmmc_44002/config/sdmmc.py","file_name":"sdmmc.py","file_ext":"py","file_size_in_byte":11447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"596627585","text":"# coding:utf8\n\nimport json\nimport os\nimport pysftp\nimport sys\n\nprogressDict={}\nprogressEveryPercent=10\n\nfor i in range(0,101):\n if i%progressEveryPercent==0:\n progressDict[str(i)]=\"\"\n\ndef printProgressDecimal(x,y):\n if int(100*(int(x)/int(y))) % progressEveryPercent ==0 and progressDict[str(int(100*(int(x)/int(y))))]==\"\":\n print(\"{}% ({} Transfered(B)/ {} Total File Size(B))\".format(str(\"%.2f\" %(100*(int(x)/int(y)))),x,y))\n progressDict[str(int(100*(int(x)/int(y))))]=\"1\"\n\nwith open(os.path.join(sys.path[0], \"upload_sftp.json\"), 'r') as f:\n config = json.load(f)\n\nprint(config)\n\nlocalpath = sys.argv[1]\n\n\"\"\"remotepath: the destination path on the SFTP server. \nNote that the filename should be included.\nOnly specifying a directory may result in an error.\"\"\"\nremotepath = sys.argv[2]\n\nprint(localpath)\nprint(remotepath)\n\ncnopts = pysftp.CnOpts()\ncnopts.hostkeys = None\n\nsrv = pysftp.Connection(host=config[\"host\"], port=config[\"port\"], username=config[\"username\"],\n password=config[\"password\"], cnopts=cnopts)\nsrv.put(localpath, remotepath, callback=printProgressDecimal)\nsrv.close()\n","sub_path":"tools/upload_sftp.py","file_name":"upload_sftp.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"418514678","text":"\"\"\"\nevolving a neural network for source seeking\n\"\"\"\n\nimport sys, pickle, argparse, random\nsys.path.insert(1,'../flat_game')\nsys.path.insert(1,'../')\nsys.path.insert(1,'../settings')\nimport numpy as np\nfrom pygmo import *\nfrom tools import fileHandler as fh\nfrom classes import evolution, swarmulator\nfrom classes.randomize_environment import get_spawn_pos\n\n#####################\n# Argument parser #\n#####################\nparser = argparse.ArgumentParser(description='Evolve a controller using swarmulator')\nparser.add_argument('-controller', type=str, help=\"(str) Controller to use\", default=\"gas_seeking\")\nparser.add_argument('-agent', type=str, help=\"(str) Swramulator agent to use\", default=\"gas_agent\")\nparser.add_argument('-gen', type=int, help=\"(int) Max number generations, default = 100\", default=400)\nparser.add_argument('-batchsize', type=int, help=\"(int) Batch size. How many parallel tests to try, default = 5\", default=5)\nparser.add_argument('-resume', type=str, help=\"(str) Resume after quitting from the indicated saved file, default = None\", default=None)\nparser.add_argument('-plot', type=str, help=\"(str) If set, it will plot the evolution from a saved run, default = None\", default=None)\nparser.add_argument('-id', type=int, help=\"(int) Evolutionary run ID, default = 1\", default=1)\nargs = parser.parse_args()\n\nprint(\"Loading and building Swarmulator\")\nsim = swarmulator.swarmulator(verbose=False)\nsim.make(controller=args.controller, agent=args.agent, clean=True, logger=False, verbose=False)\n# Swarmulator settings\nsim.runtime_setting(\"time_limit\", str(\"50\")) # Time limit of each simulation \nsim.runtime_setting(\"simulation_realtimefactor\", str(\"300\")) # Real time factor\nsim.runtime_setting(\"environment\", \"image_testing\") # Environment, leave empty for boundless\nsim.runtime_setting(\"fitness\", \"source_distance\") # Fitness function to use (in sw/simulation/fitness_functions.h)\n\n\n# Specify network topology\nshape_file = \"../../conf/policies/gas_shape.txt\"\npolicy_file = \"conf/policies/gas_params.txt\"\nsim.runtime_setting(\"policy\", policy_file) \nenvironments = ['rand_env_1','rand_env_2','rand_env_3','rand_env_4','rand_env_5']\n\npolicy_shape = [6,20,20,3]\nnum_params = 0\nbias_add = True\nnum_agents = 10\nsim.set_n_agents(num_agents)\nnum_params+= np.sum([policy_shape[i]*policy_shape[i+1] for i in range(len(policy_shape)-1)])\nif(bias_add):\n num_params+= np.sum(policy_shape[1:])\nfh.save_to_txt(np.array(policy_shape),shape_file)\n\n\nclass prob_bart:\n \n def __init__(self):\n self.dim = num_params\n \n def fitness(self,x):\n fh.save_to_txt(x, sim.path+policy_file)\n f = sim.batch_run_envs(environments) # Run with 10-20 robots, 5 times (default args.batchsize=5)\n return [f.mean()]\n\n def get_bounds(self):\n return([-20]*num_params,[20]*num_params)\n\n\nif __name__ == \"__main__\":\n \n \n algo = algorithm(sga(gen=1))\n algo.set_verbosity(1)\n prob = problem(prob_bart())\n pop = population(prob,50)\n\n\n for i in range(400):\n get_spawn_pos(num_agents,'../../conf/environments/')\n print(\"Generation %i\"%i)\n pop = algo.evolve(pop)\n print(pop.champion_f)\n fh.save_to_txt(pop.champion_x,'best_individual.txt')\n\n print(pop)\n\n\n\n uda = algo.extract(sga)\n log = uda.get_log()\n #print(log)\n import matplotlib.pyplot as plt \n plt.plot([entry[0] for entry in log],[entry[2]for entry in log], 'k--') \n plt.show() \n #print(model.nn.get_weights())","sub_path":"scripts/python/final_model/pygmo_evo.py","file_name":"pygmo_evo.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"118198553","text":"#coding:utf-8\nimport os\nimport re\n\nfrom sys import stdin\nfrom os import walk\nfrom os.path import join\n\ntarget_dir = os.getcwd()\ntarget_name = \"定时播放\"\ndo_cmd_usr = \" pi \"\ncrontab_file = \"/etc/crontab\"\n\n#print target_dir\n\nadd_crontab_content = \"\"\nold_crontab_content = \"\"\n\n\nfor root, dirs, files in walk(target_dir):\n for dir_name in dirs:\n #print dir_name\n if target_name in dir_name:\n full_dir_name = (join(root, dir_name))\n #print dir_name\n #dir_name_split = dir_name.split('每周')\n result = re.search(\"(每周)_(.*)_(开始时间)_(.*)_(结束时间)_(.*)_(定时播放)\", dir_name)\n if result:\n #print result.group(0)\n #print result.group(1)\n #print result.group(2)\n #print result.group(3)\n #print result.group(4)\n #print result.group(5)\n #print result.group(6)\n\n week = result.group(2)\n start_time = result.group(4)\n end_time = result.group(6)\n\n #print week\n #print start_time\n #print end_time\n\n week = week.replace(\"_\", \",\")\n #print \"week = %s\" %(week)\n\n # 组织定时启动命令\n start_time_hour = start_time.split('_')[0]\n start_time_minute = start_time.split('_')[1]\n #print \"start_time_hour = %s, start_time_minute = %s\" %(start_time_hour, start_time_minute)\n\n start_cmd = \"cd %s && sudo ./play_mp3.sh\" %(full_dir_name)\n #print start_cmd\n start_crontab = \"%s %s * * %s %s %s\\n\" %(start_time_minute, start_time_hour, week, do_cmd_usr, start_cmd)\n #print start_crontab\n\n add_crontab_content = add_crontab_content + start_crontab\n\n # 组织定时关闭命令\n end_time_hour = end_time.split('_')[0]\n end_time_minute = end_time.split('_')[1]\n\n #print \"end_time_hour = %s, end_time_minute = %s\" %(end_time_hour, end_time_minute)\n\n stop_cmd = \"cd %s && sudo ./stop_mp3.sh\" %(target_dir)\n #print stop_cmd\n stop_crontab = \"%s %s * * %s %s %s\\n\" %(end_time_minute, end_time_hour, week, do_cmd_usr, stop_cmd)\n #print stop_crontab\n\n add_crontab_content = add_crontab_content + stop_crontab\n\n\n #print dir_name_split\n \n #print full_dir_name\n\n# 获取crontab中的原始内容\nfile_handle = open(crontab_file, 'rb')\nfor line in file_handle.readlines():\n line = line.strip()\n if not len(line) or line.startswith('#'):\n continue\n #print line\n result = re.search(do_cmd_usr, line)\n if not result:\n #print line.strip()\n old_crontab_content = old_crontab_content + line + \"\\n\"\n \nold_crontab_content = old_crontab_content + \"\\n\" \nfile_handle.close()\n \n\n#print old_crontab_content\n#print \"+++++++++++++++++++++++++\"\n#print add_crontab_content\n\n\nfile_handle = open(crontab_file,'w')\nfile_handle.write(old_crontab_content)\nfile_handle.write(add_crontab_content)\nfile_handle.close()\n\n\n#if __name__ == '__main__':\n\n\n#55 6 * * 0,2,4 root cd /home/pi/zengxiaolong/music/poetry && ./play_mp3.bash\n#40 7 * * 0,2,4 root cd /home/pi/zengxiaolong/music/poetry && ./stop_mp3.bash\n \n","sub_path":"每周_1-3_开始时间_7_00_结束时间_7_30_定时播放_古诗/modify_etc_crontab.py","file_name":"modify_etc_crontab.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"418433192","text":"n = int(input())\ns = input()\n\nif len(s) % 2 == 0:\n print(-1)\n exit()\n\nif s[len(s)//2] != \"b\":\n print(-1)\n exit()\n\nans = 0\nmid = len(s)//2\nfor i in range(1, mid+1):\n if i % 3 == 1:\n if s[mid-i] == \"a\" and s[mid+i] == \"c\":\n ans += 1\n else:\n ans = -1\n break\n\n elif i % 3 == 2:\n if s[mid-i] == \"c\" and s[mid+i] == \"a\":\n ans += 1\n else:\n ans = -1\n break\n\n elif i % 3 == 0:\n if s[mid-i] == \"b\" and s[mid+i] == \"b\":\n ans += 1\n else:\n ans = -1\n break\n\nprint(ans)\n","sub_path":"ABC_B/ABC023_B.py","file_name":"ABC023_B.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"271478162","text":"\r\nimport numpy as np\r\nimport math as math\r\n\r\n#np.seterr('raise')\r\n\r\n\r\ndef STREAMLINE_SPM(XP, YP, XB, YB, phi, S):\r\n # Number of panels\r\n numPan = len(XB) - 1 # Number of panels\r\n\r\n # Initialize arrays\r\n Mx = np.zeros(numPan) # Initialize Ix integral array\r\n My = np.zeros(numPan) # Initialize Iy integral array\r\n\r\n # Compute integral\r\n for j in range(numPan): # Loop over all panels\r\n # Compute intermediate values\r\n A = -(XP - XB[j]) * np.cos(phi[j]) - (YP - YB[j]) * np.sin(phi[j]) # A term\r\n B = (XP - XB[j]) ** 2 + (YP - YB[j]) ** 2; # B term\r\n Cx = -np.cos(phi[j]); # C term (X-direction)\r\n Dx = XP - XB[j]; # D term (X-direction)\r\n Cy = -np.sin(phi[j]); # C term (Y-direction)\r\n Dy = YP - YB[j]; # D term (Y-direction)\r\n E = math.sqrt(B - A ** 2); # E term\r\n if (E == 0 or np.iscomplex(E) or np.isnan(E) or np.isinf(E)): # If E term is 0 or complex or a NAN or an INF\r\n Mx[j] = 0 # Set Mx value equal to zero\r\n My[j] = 0 # Set My value equal to zero\r\n else:\r\n # Compute Mx, Ref [1]\r\n term1 = 0.5 * Cx * np.log((S[j] ** 2 + 2 * A * S[j] + B) / B); # First term in Mx equation\r\n term2 = ((Dx - A * Cx) / E) * (math.atan2((S[j] + A), E) - math.atan2(A, E)); # Second term in Mx equation\r\n Mx[j] = term1 + term2; # Compute Mx integral\r\n\r\n # Compute My, Ref [1]\r\n term1 = 0.5 * Cy * np.log((S[j] ** 2 + 2 * A * S[j] + B) / B); # First term in My equation\r\n term2 = ((Dy - A * Cy) / E) * (math.atan2((S[j] + A), E) - math.atan2(A, E)); # Second term in My equation\r\n My[j] = term1 + term2; # Compute My integral\r\n\r\n # Zero out any problem values\r\n if (np.iscomplex(Mx[j]) or np.isnan(Mx[j]) or np.isinf(Mx[j])): # If Mx term is complex or a NAN or an INF\r\n Mx[j] = 0 # Set Mx value equal to zero\r\n if (np.iscomplex(My[j]) or np.isnan(My[j]) or np.isinf(My[j])): # If My term is complex or a NAN or an INF\r\n My[j] = 0 # Set My value equal to zero\r\n\r\n return Mx, My # Return both Mx and My matrice\r\n","sub_path":"math/sehyeouk/STREAMLINE_SPM.py","file_name":"STREAMLINE_SPM.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"215732631","text":"# @Authors : Javad Amirian\n# @Email : javad.amirian@inria.fr\n\nimport numpy as np\nimport os\n\n\nclass ParserHermes:\n \"\"\"\n Parser class for Hermes experiments\n -------\n You can either use the class constructor or call 'load' method,\n by passing the annotation folder: e.g. \"OpenTraj/HERMES/Bottleneck_Data/uo-180-070.txt\"\n\n Attributes:\n id_p_dict: map from pedestrian id to her trajectory (positions)\n id_v_dict: map from pedestrian id to her velocity data\n id_t_dict: map from pedestrian id to timestamps she appears\n t_id_dict: map from dataset timestamps to pedestrian ids\n t_p_dict : map from dataset timestamps to location of all pedestrians\n min_t : first timestamp\n max_t : last timestamp\n interval : interval between timestamps\n [min_x, max_x], [min_y, max_y] : spacial extents of all the trajectories\n \"\"\"\n\n def __init__(self, filename=''):\n self.id_p_dict = dict()\n self.id_v_dict = dict()\n self.id_t_dict = dict()\n self.id_label_dict = dict() # FIXME\n self.t_id_dict = dict()\n self.t_p_dict = dict()\n self.t_v_dict = dict()\n self.groupmates = dict()\n self.dataset_name = ''\n self.max_t = 0\n self.min_t = 0\n self.interval = 1 # fixed\n self.fps = 16 # fixed\n self.min_x = +1000\n self.min_y = +1000\n self.max_x = -1000\n self.max_y = -1000\n if filename:\n self.load(filename)\n\n def load(self, filename, down_sample=1, delimit=' '):\n # check to search for many files?\n file_names = list()\n if '*' in filename:\n files_path = filename[:filename.index('*')]\n extension = filename[filename.index('*')+1:]\n for file in os.listdir(files_path):\n if file.endswith(extension):\n file_names.append(files_path+file)\n else:\n file_names.append(filename)\n\n for file in file_names:\n with open(file, 'r') as data_file:\n content = data_file.readlines()\n for i, row in enumerate(content):\n row = row.split(delimit)\n while '' in row: row.remove('')\n if len(row) < 5: continue\n\n id = int(row[0])\n ts = int(row[1])\n if ts % down_sample != 0:\n continue\n\n px = -float(row[3])/100.\n py = float(row[2])/100.\n\n if ts < self.min_t: self.min_t = ts\n if ts > self.max_t: self.max_t = ts\n if px < self.min_x: self.min_x = px\n if px > self.max_x: self.max_x = px\n if py < self.min_y: self.min_y = py\n if py > self.max_y: self.max_y = py\n\n if id not in self.id_p_dict:\n self.id_p_dict[id] = list()\n self.id_v_dict[id] = list()\n self.id_t_dict[id] = list()\n self.id_p_dict[id].append([px, py])\n self.id_t_dict[id].append(ts)\n\n if ts not in self.t_p_dict:\n self.t_p_dict[ts] = []\n self.t_id_dict[ts] = []\n self.t_p_dict[ts].append([px, py])\n self.t_id_dict[ts].append(id)\n\n # FIXME: this is already done in parser.__post_load__()\n for pid in self.id_p_dict:\n self.id_p_dict[pid] = np.array(self.id_p_dict[pid])\n self.id_t_dict[pid] = np.array(self.id_t_dict[pid])\n self.groupmates[pid] = []\n self.id_v_dict[pid] = (self.id_p_dict[pid][1:] - self.id_p_dict[pid][:-1]) * self.fps\n if len(self.id_p_dict[pid]) == 1:\n self.id_v_dict[pid] = np.zeros((1, 2), dtype=np.float64)\n else:\n self.id_v_dict[pid] = np.append(self.id_v_dict[pid], self.id_v_dict[pid][-1].reshape(1, 2), axis=0)\n\n def get_all_trajs(self):\n all_trajs = []\n for key, val in sorted(self.id_p_dict.items()):\n all_trajs.append(val)\n return all_trajs\n\n def get_all_points(self):\n all_points = []\n for key, val in sorted(self.id_p_dict.items()):\n all_points.extend(val)\n return all_points\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n # parser = ParserHermes(\"../../HERMES/Corridor-1D/uo-300-300-200.txt\")\n parser = ParserHermes(\"../../HERMES/Corridor-2D/boa-300-050-070.txt\")\n n_ped = len(parser.id_p_dict.items())\n if not n_ped:\n print(\"HermesParser failed loading file(s)\")\n exit(1)\n print(\"HermesParser successfully loaded file and found %d pedestrians\" % n_ped)\n\n for key, traj in parser.id_p_dict.items():\n plt.plot(traj[:, 0], traj[:, 1])\n plt.show()","sub_path":"toolkit/parser/parser_hermes.py","file_name":"parser_hermes.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"352821128","text":"from django import forms\nfrom .models import *\n\n\nclass CitaOtroForm(forms.ModelForm):\n class Meta:\n model = Cita\n fields = '__all__'\n exclude = ['estatus']\n\n\nclass CitaIncidenciaForm(forms.ModelForm):\n class Meta:\n model = Cita\n fields = '__all__'\n exclude = ['estatus']\n\n\nclass IncidenciaForm(forms.ModelForm):\n class Meta:\n model = CitaIncidencia\n fields = ['incidencia']\n\n def __init__(self, *args, **kwargs):\n super(IncidenciaForm, self).__init__(*args, **kwargs)\n self.fields['incidencia'].queryset = Incidencia.objects.filter(estatus__in='1')\n\n\nclass CitaIncidenciaAlForm(forms.ModelForm):\n class Meta:\n model = CitaIncidencia\n fields = '__all__'\n exclude = ['estatus']\n\n def __init__(self, *args, **kwargs):\n grado = kwargs.pop('grado')\n grupo = kwargs.pop('grupo')\n pk = kwargs.pop('pk')\n super(CitaIncidenciaAlForm, self).__init__(*args, **kwargs)\n print(grado)\n self.fields['incidencia'].queryset = Incidencia.objects.filter(incidenciaalumno__alumno__grado=grado,\n incidenciaalumno__alumno__grupo=grupo,\n estatus__in='1')\n self.fields['cita'].queryset = Cita.objects.filter(id_cita=pk)\n","sub_path":"ProyectSecusoft/cita/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"108511925","text":"\"\"\"\nThis experiment script uses a REINFORCE agent to control the battery environment.\n\nExperiment runs through the entire length of the state time series CSV.\n\"\"\"\n\nimport sys\n\nimport tensorflow as tf\n\nfrom energy_py.agents.policy_based.reinforce import REINFORCE_Agent\nfrom energy_py.envs.battery.battery_env import Battery_Env\nfrom energy_py.main.scripts.experiment_blocks import run_single_episode\nfrom energy_py.main.scripts.visualizers import Eternity_Visualizer\nargs = sys.argv\n\nEPISODES = int(args[1])\nEPISODE_LENGTH = int(args[2])\n\nprint('running {} episodes of length {}'.format(EPISODES, EPISODE_LENGTH))\n\nenv = Battery_Env(lag = 0,\n episode_length = EPISODE_LENGTH,\n episode_start = 'random',\n power_rating = 2, # in MW\n capacity = 4, # in MWh\n verbose = 0)\nprint('made env')\nagent = REINFORCE_Agent(env,\n epsilon_decay_steps = EPISODE_LENGTH * EPISODES / 2,\n learning_rate = 0.01,\n batch_size = 64 )\nprint('made agent')\n# creating the TensorFlow session for this experiment\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for episode in range(1, EPISODES):\n agent, env, sess = run_single_episode(episode,\n agent,\n env,\n sess)\n\n # get a batch to learn from\n observations, actions, returns = agent.memory.get_episode_batch(episode)\n # train the model\n loss = agent.learn(observations, actions, returns, sess)\n\n# finally collect data from the agent & environment\nglobal_history = Eternity_Visualizer(episode, agent, env)\noutputs = global_history.output_results()\n","sub_path":"energy_py/main/experiments/battery_reinforce/battery_reinforce.py","file_name":"battery_reinforce.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"376601777","text":"# -*- coding: utf-8 -*-\r\n'''\r\n @File : creat_scene.py\r\n @Time : 2018/10/12 16:36\r\n @Author : Chenzd\r\n @Project : 创建32个场景\r\n @Software: PyCharm\r\n'''\r\nimport unittest\r\n\r\nfrom page.LeeBus.common import Common\r\nfrom page.LeeBus.scene.scene_editPage import scene_editPage\r\nfrom page.appiumDriver import MyDriver\r\nfrom page.common.homePage import HomePage\r\nfrom public.configLog import Logger\r\nlogger = Logger(logger='testCase.LeeBus.scene.scene_edit.creat_scene').getlog()\r\nclass creat_scene(unittest.TestCase):\r\n '''自定义场景--创建32个'''\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n print(\"----------自定义场景--创建冒烟开始---------\")\r\n cls.driver = MyDriver.cur_driver()\r\n cls.i = 0\r\n\r\n def test_creat_scene(self):\r\n '''自定义场景--创建32个'''\r\n HomePage(self.driver).scenes_click()\r\n common = Common(self.driver)\r\n scene_edit = scene_editPage(self.driver)\r\n while self.i < 32:\r\n scene_edit.add_scene()\r\n print('1、=========对场景图标进行随机选择=========')\r\n logger.info('1、=========对场景图标进行随机选择=========')\r\n scene_edit.chose_item_img()\r\n # 检验名称编辑功能\r\n print('2、=========对场景进行名称编辑=========')\r\n logger.info('2、=========对场景进行名称编辑=========')\r\n scene_edit.edit_name('', common.random_name(), HomePage.home_tab_home)\r\n print('3、=========判断是否创建成功=========')\r\n logger.info('3、=========判断是否创建成功=========')\r\n result = scene_edit.check_creat_custom_scene()\r\n self.assertEqual(0, result, '场景创建异常')\r\n self.i += 1\r\n print('已创建场景数:【' + str(self.i)+'】')\r\n common.back_top()\r\n HomePage(self.driver).home_click()\r\n @classmethod\r\n def tearDownClass(cls):\r\n Common(MyDriver.cur_driver()).back_home()\r\n print(\"----------自定义场景--创建冒烟结束---------\")","sub_path":"LuxdomoAuto/testCase/LeeBus/scene/scene_edit/creat_scene.py","file_name":"creat_scene.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"30374188","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 27 06:34:52 2022\n\n@author: botond\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport itertools\nimport functools\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mtc\nimport seaborn as sns\nimport pingouin as pg\nfrom scipy import stats\nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\nfrom tqdm import tqdm\nfrom IPython import get_ipython\n\nget_ipython().run_line_magic('cd', '..')\nfrom helpers.regression_helpers import check_covariance, match, \\\ncheck_assumptions, detrend_diab_sex, detrend_diab_sex_info\nfrom helpers.data_loader import DataLoader\nfrom helpers.plotting_style import plot_pars, plot_funcs\nget_ipython().run_line_magic('cd', 'cognition')\n\n\n# =============================================================================\n# Setup\n# =============================================================================\n\n# Filepaths\nHOMEDIR = os.path.abspath(os.path.join(__file__, \"../../../\")) + \"/\"\nSRCDIR = HOMEDIR + \"data/\"\nOUTDIR = HOMEDIR + \"results/cognition/\"\n\n# Inputs\nCTRS = \"diab\" # Contrast: diab or age\nEXTRA = \"_sex\"\nT1DM_CO = 40 # Cutoff age value for age of diagnosis of diabetes to separate\n# T1DM from T2DM. Explained below in more details.\nAGE_CO = 50 # Age cutoff (related to T1DM_CO) to avoid T2DM low duration subjects\nRLD = 0\n\nprint(\"\\nRELOADING REGRESSORS!\\n\") if RLD else ...\n\n# <><><><><><><><>\n# raise\n# <><><><><><><><>\n\n# %%\n# =============================================================================\n# Load data\n# =============================================================================\n\n# Load cognitive data\n# -------\n# Labels\nlabels = {\n \"4282-2.0\": \"Numeric_Memory\",\n \"6350-2.0\": \"Executive_Function\",\n \"20016-2.0\": \"Abstract_Reasoning\",\n \"20023-2.0\": \"Reaction_Time\",\n \"23324-2.0\": \"Processing_Speed\",\n }\n\n# Load data\ndata = pd \\\n .read_csv(SRCDIR + \"cognition/cognition_data.csv\") \\\n [[\"eid\",\n *labels.keys()\n ]]\n\n# Rename columns\ndata = data.rename(labels, axis=1)\n\n# Load regressors\n# ------\n\n# Initiate loader object\ndl = DataLoader()\n\n# Load data\ndl.load_basic_vars(SRCDIR)\n\n# Extract relevant variables\nage, sex, diab, college, bmi, mp, hrt, age_onset, duration, htn = \\\n (dl.age, dl.sex, dl.diab, dl.college, dl.bmi, dl.mp, dl.hrt, dl.age_onset, \\\n dl.duration, dl.htn)\n\n\n# Restrictive variables\n# -----\n\n# Perform filtering\ndl.filter_vars(AGE_CO, T1DM_CO)\n\n# Extract filtered series\nage, mp, hrt, age_onset = dl.age, dl.mp, dl.hrt, dl.age_onset\n\n# %%\n# =============================================================================\n# Transform\n# =============================================================================\n\n# Status\nprint(\"Transforming.\")\n\n\n# Merge IVs and put previously defined exclusions into action (through inner merge)\nregressors = functools.reduce(\n lambda left, right: pd.merge(left, right, on=\"eid\", how=\"inner\"),\n [age, sex, college, diab, mp, hrt, htn, age_onset, duration]\n ) \\\n .drop([\"mp\", \"hrt\", \"age_onset\"], axis=1)\n\n# Get regressor columns for later\nreg_cols = regressors.columns\n\n# Merge domain data and clean out invalied entries\ndata_merged = data[data>0].dropna()\n\n# Get data columns for later\ndata_cols = data_merged.columns\n\n# Merge regressors with data\nregressors_y = regressors.merge(data_merged, on=\"eid\", how=\"inner\")\n\n# Drop data columns\nregressors_clean = regressors_y.drop(data_cols[1:], axis=1)\n\n# Match\nif RLD == False:\n\n # Balance out duration as well\n # ------\n regressors_detrended = detrend_diab_sex(regressors_clean, thr=.05)\n\n # Match\n regressors_matched = match(\n df=regressors_detrended,\n main_vars=[\"sex\", \"diab\"],\n vars_to_match=[\"age\", \"college\", \"htn\"],\n random_state=111\n )\n\n # Look at balance\n detrend_diab_sex_info(regressors_matched)\n\n# Save matched regressors matrix\nif RLD == False:\n regressors_matched \\\n .reset_index(drop=True) \\\n .to_csv(OUTDIR + \"regressors/pub_meta_cognition_lineplot_matched_\" \\\n f\"regressors_{CTRS}{EXTRA}.csv\")\n\n# Get regressors\nregressors_matched = pd.read_csv(\n OUTDIR + f\"regressors/pub_meta_cognition_lineplot_matched_regressors_\" \\\n f\"{CTRS}{EXTRA}.csv\",\n index_col=0)\n\n # %%\n# =============================================================================\n# Statistics\n# =============================================================================\n\n# Status\nprint(\"Statistics.\")\n\n# Descriptive stats\n# ---------\n\n# Make copy\ndf = regressors_matched.copy()\n\n# Duration\nprint(\"Median duration:\\n\", df.groupby([\"sex\", \"diab\"])[\"duration\"].median())\nprint(\"Mean duration:\\n\", df.groupby([\"sex\", \"diab\"])[\"duration\"].mean())\n\n# Sample sizes\nss = df \\\n .pipe(lambda df: df.assign(**{\n \"age_group\":pd.cut(df[\"age\"], bins=np.arange(0, 100, 5)).astype(str)\n })) \\\n .groupby([\"age_group\"])[\"eid\"].count() \\\n .divide(4)\n\nprint(\"Sample sizes from age groups:\\n\", ss)\n\n# Merge and standardize\ndf = regressors_matched \\\n .merge(data_merged, on=\"eid\") \\\n .set_index(list(reg_cols)) \\\n .pipe(lambda df: df.assign(**{\n \"Numeric_Memory\": df[\"Numeric_Memory\"],\n \"Executive_Function\": -1*df[\"Executive_Function\"],\n \"Abstract_Reasoning\": df[\"Abstract_Reasoning\"],\n \"Reaction_Time\": -1*df[\"Reaction_Time\"],\n \"Processing_Speed\": df[\"Processing_Speed\"]\n })) \\\n .pipe(lambda df:\n ((df - df.mean(axis=0))/df.std(axis=0)).mean(axis=1)) \\\n .rename(\"score\") \\\n .reset_index() \\\n\n# Separately for sexes\n# >>>>>>>>\n\n# Labels for printing\nlabels = [\"Female\", \"Male\"]\n\n# It\nfor sex_val in [0, 1]:\n\n # Take relevant subset (F or M only)\n sdf = df.query(f'sex=={sex_val}')\n\n # Linear regression\n model = smf.ols(\"score ~ diab + age + C(college) + htn\", data=sdf)\n results = model.fit()\n # print(results.summary())\n\n print(f'\\n>>>>>>\\nCohort: {labels[sex_val]}')\n print(\n f'Coeffs: T2DM={results.params[\"diab\"]:.0f}, ' \\\n f'Age={results.params[\"age\"]:.0f}'\n )\n print(\n f'Estimated age gap: {results.params[\"diab\"]/results.params[\"age\"]:.2f} years.'\n )\n\n # Covariance matrix of coefficients\n # print(results.cov_params())\n\n# Interaction\n# >>>>>>>>>\n\n# Make a copy\nsdf = df.copy()\n\n# Linear regression\nmodel = smf.ols(\"score ~ diab + sex + diab*sex + age + C(college) + htn\", data=sdf)\nresults = model.fit()\n# print(results.summary())\n\nprint(\n f'\\nInteraction term (separate analysis): coeff:' \\\n f'{results.params[\"diab:sex\"]:.0f}, p value: {results.pvalues[\"diab:sex\"]:.2g}'\n )\n\n# Covariance matrix of coefficients\n# print(results.cov_params())\n\n\n\n\"\"\"\n# CI for the ratio below is computed using an online tool (Fieller method):\nhttps://www.graphpad.com/quickcalcs/errorProp1/?Format=SEM\n\nAn alternative approach would be to bootstrap using sigmas and covariances.\n\"\"\"\n\n# Estimated age gap between the two cohorts\nprint(f'\\nEstimated age gap: {results.params[\"diab\"]/results.params[\"age\"]:.2f}, years.')\n\n#TODO: -v\n# %%\n# =============================================================================\n# Plot\n# =============================================================================\n\n# Status\nprint(\"Plotting.\")\n\n# Prep\n# -----\n\n# Unpack plotting utils\nfs, lw = plot_pars\np2star, colors_from_values, float_to_sig_digit_str, pformat = plot_funcs\nlw = lw*1\n\n# Graphing df:\n# add back in data\n# unify columns of contrast variables\n# make age groups\ngdf = df \\\n .pipe(lambda df: df.assign(**{\n \"group\": df[[\"sex\", \"diab\"]].astype(str).agg(\"_\".join, axis=1),\n \"age_group\":pd.cut(df[\"age\"], bins=np.arange(0, 100, 5)).astype(str)\n })) \\\n .sort_values(by=[\"age\", \"group\"]) \\\n .query('age_group not in [\"(40, 45]\"]') \\\n .query('age_group not in [\"(40, 45]\", \"(45, 50]\"]') \\\n # .query('age_group not in [\"(40, 45]\", \"(45, 50]\", \"(75, 80]\"]')\n\n\n# Sample sizes\nss = regressors_matched.groupby([\"sex\", \"diab\"])[\"eid\"].count().to_list()[0]\n\n# Colors\npalette = sns.color_palette([\"coral\", \"maroon\", \"dodgerblue\", \"navy\"])\n\n# Content\n# -----\n\n# Make figure\nplt.figure(figsize=(5, 4))\n\n# Create plot\nsns.lineplot(data=gdf, x=\"age_group\", y=\"score\",\n hue=\"group\", ci=68, err_style=\"bars\",\n marker=\"o\", linewidth=1*lw, markersize=3*lw, err_kws={\"capsize\": 2*lw,\n \"capthick\": 1*lw,\n \"elinewidth\": 1*lw},\n sort=False, palette=palette)\n\n# Format\n# ----\n\n# Title\nplt.title(\"Cognitive Performance vs Age, T2DM Status and Sex\\n\" \\\n f\"N={ss} (Per Group, Exact Matched)\", x=0.37, y=1.05)\n\nplt.xlabel(\"Age group (year)\")\n\nplt.ylabel(\"Cognitive performance\\n(combined score from five tasks)\")\nplt.gca().yaxis.set_major_formatter(mtc.FuncFormatter\n (lambda x, pos: f\"{x:.2f}\"))\n# plt.annotate(\"×10$^5$\", xy=[0, 1.03], xycoords=\"axes fraction\",\n# fontsize=8*fs, va=\"center\")\n\nlegend_handles, _ = plt.gca().get_legend_handles_labels()\n[ha.set_linewidth(5) for ha in legend_handles]\n\nplt.legend(handles=legend_handles,\n labels=[\"F, HC\", \"F, T2DM+\", \"M, HC\", \"M, T2DM+\"],\n loc=1)\n\nplt.gca().xaxis.tick_bottom()\nplt.gca().yaxis.tick_left()\n# plt.xticks(rotation=45)\n\nfor sp in ['bottom', 'top', 'left', 'right']:\n plt.gca().spines[sp].set_linewidth(0.75*lw)\n plt.gca().spines[sp].set_color(\"black\")\n\n# Annotate stats\n# tval, pval = results.tvalues[\"diab\"], results.pvalues[\"diab\"]\n# text = f\"T2DM+ vs HC:\\nT={tval:.1f}, {pformat(pval)}{p2star(pval)}\"\n# plt.annotate(text, xycoords=\"axes fraction\", xy=[0.40, 0.08],\n# fontsize=8*fs, fontweight=\"regular\", ha=\"center\")\n\nplt.gca().yaxis.grid(True)\nplt.tight_layout()\n\n# Save\n# ----\n\nplt.tight_layout(rect=[0, 0, 1, 0.98])\nplt.savefig(OUTDIR + f\"figures/JAMA_meta_figure_cognition_lineplot{EXTRA}.pdf\",\n transparent=True)\n# plt.close(\"all\")\n\n\n\n\n\n\n\n","sub_path":"cognition/cognition_lineplot_sex.py","file_name":"cognition_lineplot_sex.py","file_ext":"py","file_size_in_byte":10006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"413220137","text":"from extras.constants import CF_TYPE_TEXT, CF_TYPE_INTEGER, CF_TYPE_BOOLEAN, CF_TYPE_DATE, CF_TYPE_URL, CF_TYPE_SELECT\nfrom extras.models import CustomField, CustomFieldChoice\n\nfrom ruamel.yaml import YAML\n\ntext_to_fields = {\n 'boolean': CF_TYPE_BOOLEAN,\n 'date': CF_TYPE_DATE,\n 'integer': CF_TYPE_INTEGER,\n 'selection': CF_TYPE_SELECT,\n 'text': CF_TYPE_TEXT,\n 'url': CF_TYPE_URL,\n}\n\ndef get_class_for_class_path(class_path):\n import importlib\n from django.contrib.contenttypes.models import ContentType\n\n module_name, class_name = class_path.rsplit(\".\", 1)\n module = importlib.import_module(module_name)\n clazz = getattr(module, class_name)\n return ContentType.objects.get_for_model(clazz)\n\nwith open('/opt/netbox/initializers/custom_fields.yml', 'r') as stream:\n yaml = YAML(typ='safe')\n customfields = yaml.load(stream)\n\n if customfields is not None:\n for cf_name, cf_details in customfields.items():\n custom_field, created = CustomField.objects.get_or_create(name = cf_name)\n\n if created:\n if cf_details.get('default', 0):\n custom_field.default = cf_details['default']\n\n if cf_details.get('description', 0):\n custom_field.description = cf_details['description']\n\n if cf_details.get('filterable', 0):\n custom_field.is_filterables = cf_details['filterable']\n\n if cf_details.get('label', 0):\n custom_field.label = cf_details['label']\n\n for object_type in cf_details.get('on_objects', []):\n custom_field.obj_type.add(get_class_for_class_path(object_type))\n\n if cf_details.get('required', 0):\n custom_field.required = cf_details['required']\n\n if cf_details.get('type', 0):\n custom_field.type = text_to_fields[cf_details['type']]\n\n if cf_details.get('weight', 0):\n custom_field.weight = cf_details['weight']\n\n custom_field.save()\n\n for choice_details in cf_details.get('choices', []):\n choice = CustomFieldChoice.objects.create(\n field=custom_field,\n value=choice_details['value'])\n\n if choice_details.get('weight', 0):\n choice.weight = choice_details['weight']\n choice.save()\n\n print(\"🔧 Created custom field\", cf_name)\n","sub_path":"kubernetes/contrib/components/netbox/charts/netbox-app/resources/config/startup_scripts/20_custom_fields.py","file_name":"20_custom_fields.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"68627156","text":"from parse_config import parse_config\nfrom colorama import Fore\n\ndef get_value_from_config(variable_name):\n try:\n config_dict = parse_config()\n\n except FileNotFoundError:\n print(Fore.CYAN+\"Config file could not be find, you can create it typing\")\n print(Fore.LIGHTRED_EX+\"\\tgetvsix create-config\")\n print(Fore.RESET)\n return None\n \n result = config_dict.get(variable_name)\n if result is None:\n print(Fore.CYAN + \"Value of variable '{}' could not be found\".format(variable_name))\n print(Fore.RESET)\n return None\n\n else:\n return result.strip()\n","sub_path":"get_value_from_config.py","file_name":"get_value_from_config.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"491813070","text":"import torch.nn as nn\nimport torch\nfrom Transformer.modules import MultihHeadAttention, PositionwiseFeedForward\n\n\nclass EncoderLayer(nn.Module):\n\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super().__init__()\n self.slf_attn = MultihHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffc = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, enc_input, slf_attn_mask=None):\n enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input, enc_input, mask=slf_attn_mask)\n enc_output = self.pos_ffc(enc_output)\n\n return enc_output, enc_slf_attn\n\n\nclass DecoderLayer(nn.Module):\n\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super().__init__()\n self.slf_attn = MultihHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.enc_attn = MultihHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffc = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, dec_input, enc_output, slf_attn_mask=None, dec_enc_attn_mask=None):\n dec_output, dec_slf_attn = self.slf_attn(dec_input, dec_input, dec_input, mask=slf_attn_mask)\n dec_output, dec_enc_attn = self.enc_attn(dec_output, enc_output, enc_output, mask=dec_enc_attn_mask)\n dec_output = self.pos_ffc(dec_output)\n\n return dec_output, dec_slf_attn, dec_enc_attn","sub_path":"Transformer/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"537917077","text":"\n\n\n\nfrom threading import Thread,Lock\nimport os,my_module1\n\n\n\n\ng_num = 1000000\n\ndef work1():\n global g_num\n lock.acquire()\n # time.sleep(0.1)\n for i in range(500000):\n g_num -= 1\n print(\"in work1 g_num is : %d\" % g_num)\n lock.release()\n\ndef work2():\n global g_num\n lock.acquire()\n for i in range(500000):\n g_num -= 1\n print(\"in work2 g_num is : %d\" % g_num)\n lock.release()\n\nif __name__ == '__main__':\n lock = Lock()\n t1 = Thread(target=work1)\n t2 = Thread(target=work2)\n t1.start()\n # time.sleep(1)\n t2.start()\n","sub_path":"多线程/互斥锁.py","file_name":"互斥锁.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"356284835","text":"T = int(input())\n\nfor t in range(1, T+1):\n N = int(input())\n max_danjo = 0\n danjo_list = []\n n_list = list(map(int, input().split()))\n result = True\n\n if len(n_list) == 1:\n max_danjo = -1\n else:\n for i in range(len(n_list)-1):\n for j in range((i+1), len(n_list)):\n print(i, j)\n isdanjo = str(n_list[i] * n_list[j])\n for d in range(len(isdanjo) - 1):\n if int(isdanjo[d]) > int(isdanjo[d+1]):\n result = False\n break\n\n if result == True:\n print(isdanjo)\n if int(isdanjo) > max_danjo:\n max_danjo = int(isdanjo)\n print('#{} {}'.format(t, max_danjo))","sub_path":"intermediate/day_08/단조_solving.py","file_name":"단조_solving.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"573883707","text":"import sys\nimport glob\nimport serial\nimport errno\n\n\nclass ScanSerial:\n\n def __init__(self):\n \"\"\"\n\n \"\"\"\n self.available = []\n self.possible_ports = []\n self.serial_port = None\n self.gps_dataformats = [b'$GPRMC', b'$GPGGA', b'$GPGSA']\n\n if sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n self.possible_ports += glob.glob(\"/dev/rfcomm[0-9]*\")\n self.possible_ports += glob.glob(\"/dev/ttyUSB[0-9]*\")\n\n elif sys.platform.startswith('win'):\n self.possible_ports += [\"\\\\.\\COM%d\" % i for i in range(256)]\n\n elif sys.platform.startswith('darwin'):\n exclude = [\n '/dev/tty.Bluetooth-Incoming-Port',\n '/dev/tty.Bluetooth-Modem'\n ]\n self.possible_ports += [port for port in glob.glob('/dev/tty.*') if port not in exclude]\n\n def get_gps_port(self):\n \"\"\"\n returns the port where the gps is connected\n\n :return: gps port as string\n \"\"\"\n for port in self.available_ports():\n self.serial_port = serial.Serial(port=port)\n self.serial_port.timeout = 1\n line = self.serial_port.readline()\n for dataformat in self.gps_dataformats:\n if dataformat in line:\n self.serial_port.close()\n return port\n\n def try_port(self, port_str):\n \"\"\"\n returns boolean for port availability\n \"\"\"\n try:\n s = serial.Serial(port_str)\n s.close()\n return True\n\n except serial.SerialException:\n pass\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise e\n\n return False\n\n def available_ports(self):\n \"\"\"\n creates a list with available ports\n\n :return: list with available ports\n \"\"\"\n for port in self.possible_ports:\n if self.try_port(port):\n self.available.append(port)\n\n return self.available\n\n","sub_path":"src/communication/scan_serial.py","file_name":"scan_serial.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"122431265","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndata = np.array([[6, 7], [8, 9], [10, 13], [14, 17.5], [18, 18]])\n\nX = data[:, :1]\ny = data[:, 1:]\n\n\nfrom sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression()\nmodel.fit(X, y)\n\nyp = model.predict(X)\n\n\nscore = model.score(X, y) # r-squared score\n\nprint(\"train score: %.6f\" % score)\n\nplt.plot(X, y, 'k.')\nplt.plot(X, yp, 'r-')\nplt.xlabel('size')\nplt.ylabel('price')\nplt.grid(True)\nplt.show()","sub_path":"fujian_telecom/note/scripts/LinearRegression_step02.py","file_name":"LinearRegression_step02.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"248715831","text":"import tensorflow as tf\n\n# a = tf.constant([1.0,2.0],name = \"a\") #定义两个向量\n# b = tf.constant([2.0,3.0],name = \"b\")\n# result = a + b\n#\n# w1 = tf.Variable(tf.random_normal([2,3],stddev=1,seed=1)) #这里也有计算要执行\n# w2 = tf.Variable(tf.random_normal([3,1],stddev=1,seed=2))\n#\n# x = tf.constant([[0.7,0.9]])\n#\n# c = tf.matmul(x,w1)\n# y = tf.matmul(c,w2)\n#\n# with tf.Session() as sess:\n# sess.run(w1.initializer)\n# sess.run(w2.initializer)\n# print(sess.run(y))\n#\n#\n# print(tf.get_default_graph())\n\nw1 = tf.Variable(tf.random_normal([2, 3], stddev=1))\nw2 = tf.Variable(tf.random_normal([3, 1], stddev=1))\n\nx = tf.placeholder(tf.float32, shape=(1, 2), name=\"input\")\n\na = tf.matmul(x, w1)\ny = tf.matmul(a, w2)\n\nwith tf.Session() as sess:\n init_op = tf.initialize_all_variables()\n sess.run(init_op)\n print(sess.run(y, feed_dict={x: [[0.7, 0.9]]}))\n","sub_path":"learn_test3.py","file_name":"learn_test3.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"282196736","text":"import pandas as pd\nfrom pandas.io.json import json_normalize\nimport ast\nimport json\nimport time\nimport Helpers\n\n# pandas display config\npd.set_option('display.max_columns', 20)\npd.set_option('display.max_colwidth', 1000)\npd.set_option('display.width', 10000)\n\nstart_time = time.time()\ncsv_path = 'data/Datasets/merged_amazon_meta_reviews.csv'\n\nchunksize = 100000\nasin_title_dict = dict()\n\nstart = 0\nend = chunksize\ntotal_rows = sum(1 for line in open(csv_path))\n\n\n# remove keys which are not in dict and collections with length < 2. lists are mutable, pass by ref\ndef clean_flattened_collection(flattened_collection):\n i = 0\n flattened_items_len = len(flattened_collection)\n while i < flattened_items_len:\n items = flattened_collection[i]\n j = 0\n items_len = len(items)\n while j < items_len:\n if items[j] not in asin_title_dict:\n items.remove(items[j])\n j -= 1\n items_len -= 1\n j += 1\n\n if len(items) < 2:\n flattened_collection.remove(items)\n i -= 1\n flattened_items_len -= 1\n\n i += 1\n\n\ndef process(chunk, start, end):\n print('start processing batch from %d to %d - Remaining: %d rows' % (start, end, total_rows - end))\n size = len(chunk)\n\n meta_reviews = chunk.loc[:, ~chunk.columns.str.contains('^Unnamed')].dropna() # remove unnamed columns\n\n # Quick Dict for lookup\n asin_title = meta_reviews[['asin', 'title']]\n asin_title_dict.update(asin_title.set_index('asin')['title'].to_dict())\n\n items_by_reviewer_id = [items for items in meta_reviews.groupby('reviewerID')['asin'].apply(list).tolist()]\n\n clean_flattened_collection(items_by_reviewer_id)\n Helpers.save_list_to_file(items_by_reviewer_id,\n 'data/Preprocessed/by_reviewer_id/items_by_reviewer_id_%d_%d.txt' % (\n start, end))\n\n items_by_categories = [items for items in meta_reviews.groupby('categories')['asin'].apply(list).tolist()]\n\n clean_flattened_collection(items_by_categories)\n Helpers.save_list_to_file(items_by_categories,\n 'data/Preprocessed/by_categories/items_by_categories_%d_%d.txt' % (start, end))\n\n if len(meta_reviews.query('related.notnull()', engine='python')):\n\n related_cols = json_normalize(meta_reviews['related'].apply(ast.literal_eval))\n meta_reviews_related = meta_reviews.reset_index().join(related_cols)\n del related_cols\n\n if 'related' in meta_reviews_related:\n del meta_reviews_related['related'] # not interesting\n\n if 'related_x' in meta_reviews_related:\n del meta_reviews_related['related_x'] # not interesting\n\n if 'related_y' in meta_reviews_related:\n del meta_reviews_related['related_y'] # not interesting\n\n if 'also_viewed' in meta_reviews_related:\n del meta_reviews_related['also_viewed'] # not interesting\n\n meta_reviews_related = meta_reviews_related.dropna(axis=0, subset=['asin'])\n\n # add asin item to the related lists\n for index, row in meta_reviews_related.iterrows():\n if 'also_bought' in row and isinstance(row['also_bought'], (list,)):\n row['also_bought'].insert(0, str(row['asin']))\n if 'bought_together' in row and isinstance(row['bought_together'], (list,)):\n row['bought_together'].insert(0, str(row['asin']))\n if 'buy_after_viewing' in row and isinstance(row['buy_after_viewing'], (list,)):\n row['buy_after_viewing'].insert(0, str(row['asin']))\n\n if 'also_bought' in meta_reviews_related:\n items_by_bought_too = meta_reviews_related.groupby('asin')[\n 'also_bought'].apply(list).tolist()\n flattened_items_by_bought_too = Helpers.flatten_collection(items_by_bought_too)\n clean_flattened_collection(flattened_items_by_bought_too)\n Helpers.save_list_to_file(flattened_items_by_bought_too,\n 'data/Preprocessed/also_bought/flattened_items_by_bought_too_%d_%d.txt' % (\n start, end))\n del items_by_bought_too\n del flattened_items_by_bought_too\n\n if 'bought_together' in meta_reviews_related:\n items_by_bought_together = meta_reviews_related.groupby('asin')['bought_together'].apply(list).tolist()\n flattened_items_by_bought_together = Helpers.flatten_collection(items_by_bought_together)\n clean_flattened_collection(flattened_items_by_bought_together)\n Helpers.save_list_to_file(flattened_items_by_bought_together,\n 'data/Preprocessed/bought_together/flattened_items_by_bought_together_%d_%d.txt' % (\n start, end))\n del items_by_bought_together\n del flattened_items_by_bought_together\n\n if 'buy_after_viewing' in meta_reviews_related:\n items_by_bought_after_viewing = meta_reviews_related.groupby('asin')[\n 'buy_after_viewing'].apply(list).tolist()\n flattened_items_by_bought_after_viewing = Helpers.flatten_collection(items_by_bought_after_viewing)\n clean_flattened_collection(flattened_items_by_bought_after_viewing)\n Helpers.save_list_to_file(flattened_items_by_bought_after_viewing,\n 'data/Preprocessed/buy_after_viewing/flattened_items_by_bought_after_viewing_%d_%d.txt' % (\n start, end))\n del items_by_bought_after_viewing\n del flattened_items_by_bought_after_viewing\n\n del meta_reviews_related\n else:\n print('no related')\n\n start += size\n end += size\n\n del chunk\n del meta_reviews\n\n print('processing batch from %d to %d finished.' % (start, end))\n return start, end\n\n\nHelpers.remove_all_from_folder('data/Preprocessed/asin_title_dict/')\nHelpers.remove_all_from_folder('data/Preprocessed/also_bought/')\nHelpers.remove_all_from_folder('data/Preprocessed/bought_together/')\nHelpers.remove_all_from_folder('data/Preprocessed/buy_after_viewing/')\nHelpers.remove_all_from_folder('data/Preprocessed/by_categories/')\nHelpers.remove_all_from_folder('data/Preprocessed/by_reviewer_id/')\n\nprint('Total rows to process: %d' % total_rows)\nfor chunk in pd.read_csv(csv_path, chunksize=chunksize,\n dtype={\"reviewerID\": str, \"asin\": str, \"overall\": float, \"title\": str, \"categories\": str,\n \"related\": str}):\n start, end = process(chunk, start, end)\n\nwith open('data/Preprocessed/asin_title_dict/asin_title_dict.json', 'w') as fp:\n json.dump(asin_title_dict, fp)\n\nelapsed_time = time.time() - start_time\nprint('Building sentences finished. Elapsed time: %d s.' % elapsed_time)\n\nwith open('data/Preprocessed/elapsed_time_log', 'a') as f:\n f.write(\"Building sentences: %d s. (%d min)\\n\" % (elapsed_time, elapsed_time/60))\n\n","sub_path":"AmazonBuildSentences.py","file_name":"AmazonBuildSentences.py","file_ext":"py","file_size_in_byte":7078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"262088492","text":"from __future__ import absolute_import, print_function\n\nfrom django import forms\nfrom sentry.auth.view import AuthView, ConfigureView\nfrom sentry.models import AuthIdentity\n\nfrom .client import RagtagClient\n\n\ndef _get_name_from_email(email):\n \"\"\"\n Given an email return a capitalized name. Ex. john.smith@example.com would return John Smith.\n \"\"\"\n name = email.rsplit('@', 1)[0]\n name = ' '.join([n_part.capitalize() for n_part in name.split('.')])\n return name\n\n\nclass FetchUser(AuthView):\n def __init__(self, client_id, client_secret, *args, **kwargs):\n self.client = RagtagClient(client_id, client_secret)\n super(FetchUser, self).__init__(*args, **kwargs)\n\n def handle(self, request, helper):\n access_token = helper.fetch_state('data')['access_token']\n\n user = self.client.get_user(access_token)\n\n # A user hasn't set their name in their Ragtag profile so it isn't\n # populated in the response\n if not user.get('full_name'):\n user['name'] = _get_name_from_email(user['email'])\n\n helper.bind_state('user', user)\n\n return helper.next_step()\n\n\nclass RagtagConfigureView(ConfigureView):\n def dispatch(self, request, organization, auth_provider):\n return self.render('sentry_auth_ragtag/configure.html')\n","sub_path":"sentry_auth_ragtag/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"197935026","text":"\"\"\"How to use:\n\nrun\n\npython manage.py.shell\n\nthen in that shell import this file and run build_superuser()\n\n\"\"\"\n\nfrom django.contrib.auth.models import User\nfrom dashboard.views import Position, Brother\nfrom dashboard.models import Semester, query_positions_with_committee\nimport datetime\n\n\ndef create_django_superuser(username):\n user = User()\n user.username = username\n user.is_staff = True\n user.is_admin = True\n user.is_superuser = True\n user.save()\n\n return user\n\ndef create_superbrother(user):\n new_brother = Brother()\n new_brother.user = user\n new_brother.first_name = \"fillmein\"\n new_brother.last_name = \"fillmein\"\n new_brother.case_ID = user.username\n new_brother.birthday = datetime.date.today()\n new_brother.save()\n\ndef create_positions():\n for title, _ in Position.PositionChoices.choices:\n new_position = Position()\n new_position.title = title\n new_position.save()\n\ndef add_user_to_all_positions(user):\n for position in Position.objects.all():\n position.brothers.add(user.brother)\n position.save()\n\n\ndef build_superuser(username):\n user = create_django_superuser(username)\n\n create_superbrother(user)\n\n create_positions()\n\n add_user_to_all_positions(user)\n\ndef make_user_super(username):\n user = User.objects.get(username=username)\n user.is_staff = True\n user.is_admin = True\n user.is_superuser = True\n user.save()\n\ndef add_semesters():\n for year in Semester.YEAR_CHOICES:\n for season in Semester.SEASON_CHOICES:\n sem = Semester()\n sem.year = year[0]\n sem.season = season[0]\n sem.save()\n\n# add all the brothers in the given csv file path\n# the file should have each line as the following\n# firstname,lastname,caseid\n# and make sure there is a new line at the end\ndef add_all_brothers(csvpath, assume_users_exist=False):\n lines = []\n with open(csvpath, \"r\") as inp:\n lines = inp.readlines()\n\n for line in lines:\n first, last, caseid = line.split(\",\")\n # strip newline\n caseid = caseid[:-1]\n if assume_users_exist:\n user = User.objects.get(username=caseid)\n else:\n user = User()\n user.username = caseid\n user.save()\n\n add_brother(first, last, user)\n\ndef add_brother(first, last, user):\n new_brother = Brother()\n new_brother.user = user\n new_brother.first_name = first\n new_brother.last_name = last\n new_brother.case_ID = user.username\n new_brother.birthday = datetime.date.today()\n new_brother.save()\n\n","sub_path":"create_positions.py","file_name":"create_positions.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"298972840","text":"\"tools.Tool for bmtagger.sh.\"\n\nimport tools\nimport util.file\nimport os\nimport logging\nfrom tools import urlretrieve\nlog = logging.getLogger(__name__)\n\n\nclass BmtaggerTools(tools.Tool):\n '''\n \"Abstract\" base class for bmtagger.sh, bmfilter, extract_fullseq, srprism.\n Subclasses must define class member subtoolName.\n\n Note: bmtagger calls blastn so that must be installed somewhere in $PATH.\n\n WARNING: bmtagger.sh does not work with the version of getopt that ships\n with Mac OS X. This can be worked around by installing linux getopt\n using fink and assuring that /sw/bin comes before /usr/bin in $PATH.\n\n '''\n\n # subtoolName must be defined in subclass\n\n def __init__(self, install_methods=None):\n if install_methods is None:\n install_methods = []\n install_methods.append(DownloadBmtagger(self.subtoolName))\n tools.Tool.__init__(self, install_methods=install_methods)\n\n\nclass BmtaggerShTool(BmtaggerTools):\n subtoolName = 'bmtagger.sh'\n\n\nclass BmfilterTool(BmtaggerTools):\n subtoolName = 'bmfilter'\n\n\nclass Extract_fullseqTool(BmtaggerTools):\n subtoolName = 'extract_fullseq'\n\n\nclass SrprismTool(BmtaggerTools):\n subtoolName = 'srprism'\n\n\nclass DownloadBmtagger(tools.InstallMethod):\n executables = ['bmtagger.sh', 'bmfilter', 'extract_fullseq', 'srprism']\n\n def __init__(self, subtoolName):\n self.installed = False\n self.targetDir = os.path.join(util.file.get_build_path(), 'bmtagger')\n self.targetpath = os.path.join(self.targetDir, subtoolName)\n tools.InstallMethod.__init__(self)\n\n def is_installed(self):\n return self.installed\n\n def executable_path(self):\n return self.installed and self.targetpath or None\n\n def verify_install(self):\n self.installed = all(os.access(os.path.join(self.targetDir, executable), (os.X_OK | os.R_OK))\n for executable in self.executables)\n return self.installed\n\n def _attempt_install(self):\n if self.verify_install():\n return\n util.file.mkdir_p(self.targetDir)\n urlBase = 'ftp://ftp.ncbi.nlm.nih.gov/pub/agarwala/bmtagger/'\n uname = os.uname()\n if uname[0] == 'Darwin':\n urlBase += 'mac-os/'\n elif uname[0] != 'Linux' or not uname[4].endswith('64'):\n log.debug('OS %s not implemented', uname[0])\n return\n for executable in self.executables:\n path = os.path.join(self.targetDir, executable)\n url = urlBase + executable\n log.info('Downloading from %s ...', url)\n urlretrieve(url, path)\n os.system('chmod +x ' + path)\n self.verify_install()\n","sub_path":"tools/bmtagger.py","file_name":"bmtagger.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"135789709","text":"import random\r\nimport os\r\n\r\ndef create_splash():\r\n path = \"art\"\r\n\r\n with open(file, 'r') as f:\r\n lines = f.read().splitlines()\r\n\r\n print(lines)\r\n\r\n\r\n print(\"////////Don't Flip the Table!\\\\\\\\\\\\\\\\\")\r\n print(\"///////////(╯°□°)╯︵ ┻━┻\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\")\r\n print(\"\")\r\n\r\ndef get_puzzle():\r\n path = \"data\"\r\n\r\n file_names = os.listdir(path)\r\n\r\n for i, f in enumerate(file_names):\r\n print(str(i + 1) + \")\" + f)\r\n \r\n choice = input('pick one ')\r\n choice = int(choice)\r\n\r\n file = path + \"/\" + file_names[choice]\r\n\r\n with open(file, 'r') as f:\r\n lines = f.read().splitlines()\r\n\r\n category_name = lines[0]\r\n puzzle = random.choice( lines[1:] )\r\n\r\n return puzzle\r\n\r\ndef get_solved(puzzle, guesses):\r\n solved = \"\"\r\n\r\n for letter in puzzle:\r\n if letter in guesses:\r\n solved += letter\r\n else:\r\n solved += \"-\"\r\n\r\n print(\"\")\r\n return solved\r\n\r\ndef get_guess():\r\n while True:\r\n letter = input(\"Guess a letter: \")\r\n\r\n if len(letter) < 2 and letter.isalpha():\r\n return letter\r\n else:\r\n print(\"\")\r\n print(\"Enter one letter ya nub.\")\r\n \r\n\r\ndef display_board(solved, guesses, strikes):\r\n print(\"\")\r\n print(solved)\r\n print(\"(\" + guesses + \")\")\r\n\r\n if strikes == 1:\r\n print(\"(╯\")\r\n elif strikes == 2:\r\n print(\"(╯°□°\")\r\n elif strikes == 3:\r\n print(\"(╯°□°)╯\")\r\n elif strikes == 4:\r\n print(\"(╯°□°)╯︵ ┻\")\r\n elif strikes == 5:\r\n print(\"(╯°□°)╯︵ ┻━\")\r\n elif strikes == 6:\r\n print(\"(╯°□°)╯︵ ┻━┻\")\r\n else:\r\n print()\r\n\r\ndef show_result(strikes, limit):\r\n if strikes == limit:\r\n print(\"\")\r\n print(\"ur not even good\")\r\n else:\r\n print(\"\")\r\n print(\"Noice job m8\")\r\n\r\ndef play_again():\r\n while True:\r\n decision = input(\"Would you like to play again? (y/n) \")\r\n\r\n if decision == 'y' or decision == 'yes':\r\n return True\r\n elif decision == 'n' or decision == 'no':\r\n return False\r\n else:\r\n print(\"\")\r\n print(\"Please enter 'y' or 'n'.\")\r\n\r\ndef credits_screen():\r\n print(\"\")\r\n print(\"********Don't Flip the Table********\")\r\n print(\"**********(╯°□°)╯︵ ┻━┻************\")\r\n print(\"************************************\")\r\n print(\"************By Tristan**************\")\r\n\r\ndef play():\r\n puzzle = get_puzzle()\r\n guesses = \"\"\r\n solved = get_solved(puzzle, guesses)\r\n\r\n strikes = 0\r\n limit = 6\r\n\r\n print(\"\")\r\n print(solved)\r\n\r\n while solved != puzzle and strikes < limit:\r\n letter = get_guess()\r\n\r\n if letter not in puzzle:\r\n strikes += 1\r\n \r\n guesses += letter\r\n solved = get_solved(puzzle, guesses)\r\n display_board(solved, guesses, strikes)\r\n\r\n show_result(strikes, limit)\r\n \r\n\r\ncreate_splash()\r\n\r\nplaying = True\r\n\r\nwhile playing:\r\n play()\r\n playing = play_again()\r\n\r\ncredits_screen()\r\n\r\n#\r\n\r\n\r\n","sub_path":"hangman2.py","file_name":"hangman2.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"290033551","text":"#!/usr/bin/env python\n\nimport argparse\nimport json\nimport operator\nimport os\nimport subprocess\nimport sys\nfrom datetime import datetime\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--partial', dest='partial', action='store_true', help='Only download a small subset of data (for testing)')\n parser.add_argument(\"version_id\")\n parser.add_argument(\"datatable_name\")\n parser.add_argument(\"galaxy_datamanager_filename\")\n args = parser.parse_args()\n\n with open(args.galaxy_datamanager_filename) as fh:\n config = json.load(fh)\n\n output_directory = config.get(\"output_data\", [{}])[0].get(\"extra_files_path\", None)\n data_manager_dict = {}\n data_manager_dict[\"data_tables\"] = config.get(\"data_tables\", {})\n data_manager_dict[\"data_tables\"][args.datatable_name] = data_manager_dict[\n \"data_tables\"\n ].get(args.datatable_name, [])\n\n os.mkdir(output_directory)\n cmd_args = ['funannotate', 'setup', '-d', output_directory, '-b', 'all']\n if args.partial:\n cmd_args += ['-i', 'merops', '-b', 'eukaryota']\n proc = subprocess.Popen(args=cmd_args, shell=False, cwd=output_directory)\n return_code = proc.wait()\n if return_code:\n print(\"Error downloading Funannotate database.\", file=sys.stderr)\n sys.exit(return_code)\n\n version_id = datetime.today().strftime('%Y-%m-%d-%H%M%S')\n\n version = '1.0'\n\n data_manager_dict[\"data_tables\"][args.datatable_name].append(\n dict(\n value=version_id,\n description=\"Funannotate database %s\" % version_id,\n format_version=version,\n path=output_directory,\n )\n )\n\n data_manager_dict[\"data_tables\"][args.datatable_name].sort(\n key=operator.itemgetter(\"value\"), reverse=True\n )\n with open(args.galaxy_datamanager_filename, \"w\") as fh:\n json.dump(data_manager_dict, fh, indent=2, sort_keys=True)\n","sub_path":"data_managers/data_manager_funannotate/data_manager/funannotate.py","file_name":"funannotate.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"238287970","text":"# Урок 4. Парсинг HTML. XPath\r\n# Написать приложение, которое собирает основные новости с сайтов mail.ru, lenta.ru.\r\n# Для парсинга использовать xpath. Структура данных должна содержать:\r\n# * название источника,\r\n# * наименование новости,\r\n# * ссылку на новость,\r\n# * дата публикации\r\n\r\nfrom pprint import pprint\r\nfrom lxml import html\r\nimport requests\r\nimport pandas as pd\r\nimport time\r\nimport datetime\r\nimport numpy as np\r\nfrom datetime import datetime, date, time, timedelta\r\n\r\n\r\nheaders = {'accept':'*/*',\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'}\r\n\r\ndef mail_parce(headers):\r\n mail_link = ('https://www.mail.ru')\r\n\r\n req = requests.get(mail_link, headers=headers)\r\n root = html.fromstring(req.text)\r\n\r\n title = root.xpath('//div[@class=\"news-item__inner\"]/a[last()]/text() | //div[@class=\"news-item__content\"]/h3/text()')\r\n href = root.xpath('//div[@class=\"news-item__inner\"]/a[last()]/@href | //div[@class=\"news-item o-media news-item_media news-item_main\"]/a/@href')\r\n data_time = []\r\n source = 'mail.ru'\r\n\r\n # Переходим по ссылке каждой новости, чтобы вытянуть дату публикации\r\n for link in href:\r\n link_req = requests.get(link, headers=headers)\r\n root = html.fromstring(link_req.text)\r\n # Вторая часть выражения есть, потому что у кино.маил.ру верстка другая\r\n dt = root.xpath('//span[@class=\"note__text breadcrumbs__text js-ago\"]/@datetime | //span[@class=\"breadcrumbs__item js-ago\"]/@datetime')\r\n if not dt:\r\n data_time.append(np.nan)\r\n else:\r\n data_time.append(dt[0])\r\n #time.sleep(1)\r\n\r\n data_time_std = []\r\n\r\n # Приводим data_time к стандартному типу\r\n for dt in data_time:\r\n _date,_time = dt.split(sep='T')\r\n _date = _date.split(sep='-')\r\n _year = _date[0]\r\n _month =_date[1]\r\n _day = _date[2]\r\n _time, _time_inc = _time.split(sep='+')\r\n _time =_time.split(sep=':')\r\n _time_inc = _time_inc.split(sep=':')\r\n _hour = _time[0]\r\n _minute = _time[1]\r\n _sec = _time[2]\r\n _hour_inc = _time_inc[0]\r\n\r\n d = date(int(_year), int(_month), int(_day))\r\n t = time(int(_hour), int(_minute))\r\n\r\n data_time_std.append(datetime.combine(d, t) + timedelta(hours=int(_hour_inc)))\r\n\r\n data_time = data_time_std\r\n\r\n #Формируем результат\r\n result = pd.DataFrame({'title' : title,\r\n 'href' : href,\r\n 'source': source,\r\n 'date_time': data_time},\r\n columns=['title','href', 'date_time', 'source'])\r\n\r\n return result\r\n\r\n\r\ndef lenta_parce(headers):\r\n lenta_link = ('https://lenta.ru')\r\n req = requests.get(lenta_link, headers=headers)\r\n root = html.fromstring(req.text)\r\n title = root.xpath(\"//div[contains(@class, 'first-item')]/h2/a/text() | //div[contains(@class, 'span4')]/div[contains(@class, 'item')]/a/text()\")\r\n href = root.xpath(\"//div[contains(@class, 'span4')]/div[contains(@class, 'item')]/a/@href\")\r\n data_time = root.xpath(\"//div[contains(@class, 'first-item')]/h2/a/time/@datetime | //div[contains(@class, 'span4')]/div[contains(@class, 'item')]/a/time/@datetime\")\r\n months = {'января':1, 'февраля':2, 'марта':3, 'апреля':4, 'мая':5, 'июня':6,\r\n 'июля':7, 'августа':8, 'сентября':9, 'октября':10, 'ноября':11, 'декабря':12}\r\n data_time_std = []\r\n\r\n # Приводим data_time к стандартному типу\r\n for dt in data_time:\r\n _time, _date = dt.split(sep=',')\r\n _hour, _minute = _time.split(sep=':')\r\n _date = _date.split(sep=' ')\r\n _day = _date[1]\r\n _month = months[_date[2]]\r\n _year = _date[3]\r\n d = date(int(_year), int(_month), int(_day))\r\n t = time(int(_hour), int(_minute))\r\n data_time_std.append(datetime.combine(d, t))\r\n\r\n data_time = data_time_std\r\n\r\n source = 'lenta.ru'\r\n for i in range(len(href)):\r\n href[i] = lenta_link + href[i]\r\n #print(data_time)\r\n #print(title)\r\n result = pd.DataFrame({'title': title,\r\n 'href': href,\r\n 'date_time': data_time,\r\n 'source': source},\r\n columns=['title', 'href', 'date_time', 'source'])\r\n\r\n return result\r\n\r\nresult = lenta_parce(headers)\r\nresult.to_csv('result_lenta.csv')\r\nresult_mail = mail_parce(headers)\r\nresult_mail.to_csv('result_mail.csv')\r\n#pprint(result)\r\nresult = result.append(result_mail, ignore_index=True)\r\nresult.to_csv('result_mail_lenta.csv')\r\n","sub_path":"SOD/SOD_homework_1/venv/sod_hw_4.py","file_name":"sod_hw_4.py","file_ext":"py","file_size_in_byte":5113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"240597872","text":"from Inequality.logic.logicRL import *\nfrom Inequality.legacy.logic.utils import *\nfrom copy import deepcopy\nimport random\nimport itertools\nimport os\n\nimport numpy as np\nimport torch.utils.data as data_handler\nimport torch\n\nReal = LogicFunction(\"Real\", 1)\nNonNegative = LogicFunction(\"NonNegative\", 1)\nBiggerOrEqual = LogicFunction(\"BiggerOrEqual\", 2)\nSmallerOrEqual = LogicFunction(\"SmallerOrEqual\", 2)\nstandard_logic_functions = {\n \"Real\": Real,\n \"NonNegative\": NonNegative,\n \"BiggerOrEqual\": BiggerOrEqual,\n \"SmallerOrEqual\": SmallerOrEqual,\n}\n\nadd = NumericalFunction(\"add\", 2)\nsub = NumericalFunction(\"sub\", 2)\nmul = NumericalFunction(\"mul\", 2)\nsqr = NumericalFunction(\"sqr\", 1)\ninv = NumericalFunction(\"inv\", 1)\ngeometric_mean = NumericalFunction(\"geometric_mean\", 2)\nidentity = NumericalFunction(\"identity\", 1)\nstandard_numerical_functions = {\n \"add\": add,\n \"sub\": sub,\n \"mul\": mul,\n \"sqr\": sqr,\n \"inv\": inv,\n \"geometric_mean\": geometric_mean,\n \"identity\": identity,\n}\n\nreal_sqr_non_neg = Theorem(name=\"real_sqr_non_neg\", input_no=2, input_constraints=[(sqr, (0,), (1,))],\n assumptions=[(Real, (0,))], conclusions=[(NonNegative, (1,))])\namgm = Theorem(name=\"AMGM for 2 elements\", input_no=4,\n input_constraints=[(add, (0, 1), (2, )), (geometric_mean, (0, 1), (3, ))],\n assumptions=[(NonNegative, (0, )), (NonNegative, (1, ))],\n conclusions=[(BiggerOrEqual, (2, 3))])\n\nstandard_theorems = {\n \"real_sqr_non_neg\": real_sqr_non_neg,\n \"amgm\": amgm,\n}\n\n\nclass ReplayBuffer:\n def __init__(self, name, max_size):\n self.buffer = list()\n self.name = name\n self.max_size = max_size\n\n def cache(self, other):\n self.buffer.extend(other)\n if len(self.buffer) > self.max_size:\n self.buffer = self.buffer[-self.max_size:]\n random.shuffle(self.buffer)\n\n def sample(self, sample_size):\n return random.sample(self.buffer, sample_size)\n\n\nclass List2Dataset(data_handler.Dataset):\n def __init__(self, d_list):\n self.dataset = d_list\n\n def __getitem__(self, index):\n return self.dataset[index]\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass NumericalEvaluator:\n def __init__(self, file_path= os.path.dirname(os.path.abspath(__file__)) + \"/../data/3inputs1000tuples.npy\"):\n self.inputs = np.load(file_path)\n\n def identity(x):\n return x\n def add(x, y):\n return x + y\n def mul(x, y):\n return x * y\n def sub(x, y):\n return x - y\n def geometric_mean(x, y):\n return 2 * np.sqrt(x * y)\n def sqr(x):\n return x**2\n def inv(x):\n return 1 / x\n\n self.numerical_function_dict = {\"add\": add, \"sub\": sub, \"mul\": mul, \"sqr\": sqr, \"inv\": inv,\n \"geometric_mean\": geometric_mean, \"identity\": identity}\n\n def evaluate(self, entity_string, input_no=2):\n return eval(entity_string, {\n **{\"input{}\".format(i): self.inputs[:, i] for i in range(1, 1+input_no)},\n **self.numerical_function_dict\n })\n\n def equal_pair(self, entity_pair, input_no=2):\n return np.allclose(self.evaluate(entity_pair[0].name, input_no=input_no),\n self.evaluate(entity_pair[1].name, input_no=input_no))\n\n def equal_string_pair(self, entity_string_pair, input_no=2):\n return np.allclose(self.evaluate(entity_string_pair[0], input_no=input_no),\n self.evaluate(entity_string_pair[1], input_no=input_no))\n\n def batch_evaluate_equal_pairs(self, entity_pairs, input_no=2):\n return [self.equal_pair(pair, input_no=input_no) for pair in entity_pairs]\n\n\ndef get_hard_prover():\n # Define all entities to be used\n x = Entity(\"input1\")\n y = Entity(\"input2\")\n z = Entity(\"input3\")\n x_inv = inv.execute([x])\n y_inv = inv.execute([y])\n z_inv = inv.execute([z])\n x_over_y = mul.execute([x, y_inv])\n x_over_z = mul.execute([x, z_inv])\n y_over_x = mul.execute([y, x_inv])\n y_over_z = mul.execute([y, z_inv])\n z_over_x = mul.execute([z, x_inv])\n z_over_y = mul.execute([z, y_inv])\n x_over_y_sqr = sqr.execute([x_over_y])\n y_over_z_sqr = sqr.execute([y_over_z])\n z_over_x_sqr = sqr.execute([z_over_x])\n lhs = add.execute([add.execute([x_over_y_sqr, y_over_z_sqr]), z_over_x_sqr])\n rhs = add.execute([add.execute([x_over_z, y_over_x]), z_over_y])\n all_entities = [x, y, z, x_inv, y_inv, z_inv, x_over_y, x_over_z, y_over_x, y_over_z, z_over_x, z_over_y,\n x_over_y_sqr, y_over_z_sqr, z_over_x_sqr, lhs, rhs]\n\n # Define all assumptions\n real_x = Real.to_string([x])\n real_y = Real.to_string([y])\n real_z = Real.to_string([z])\n assumptions = [real_x, real_y, real_z]\n\n # Define the objective\n lhs_geq_rhs = BiggerOrEqual.to_string([lhs, rhs])\n objectives = [lhs_geq_rhs]\n\n # Define the proof\n proof = Proof(entities=all_entities, axioms=standard_theorems, assumptions=assumptions, objectives=objectives)\n\n # Define entity maxsize\n ent_maxsize = 20\n\n # Define ground truth maxsize\n gt_maxsize = 20\n\n # Define lemma maxsize\n lemma_maxsize = 5\n\n # Define lemma input_entity_embedding capacity\n lemma_embedding_size = 128\n\n # Define lemma operand size\n lemma_operand_maxsize = 5\n\n # Define objective maxsize\n objective_maxsize = 1\n\n hard_prover = LogicBasedProver(proof=proof, ent_maxsize=ent_maxsize, gt_maxsize=gt_maxsize,\n lemma_maxsize=lemma_maxsize, lemma_embedding_size=lemma_embedding_size,\n lemma_operand_size=lemma_operand_maxsize, objective_maxsize=objective_maxsize)\n return hard_prover\n\n\ndef set_random_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n\n\ndef element_index_in_list_according_to_name(list_of_interest, element):\n for i, item in enumerate(list_of_interest):\n if item.name == element.name:\n return i\n\n\ndef smaller(a, b):\n if a <= b:\n return a\n return b\n\n\ndef exhaust_actions(prover):\n all_actions = list()\n theorems = prover.proof.lemmas\n entities = prover.proof.entities\n for theorem in theorems:\n entity_combinations = itertools.combinations(entities, r=theorem.input_no)\n for operands in entity_combinations:\n action = {\"action_type\": \"theorem\", \"action\": [theorem, operands]}\n all_actions.append(action)\n return all_actions\n\n\ndef end_to_end_max_q_and_action(prover, q_net):\n max_action, max_q = None, 0.\n for action in exhaust_actions(prover):\n predicted_q = q_net(obs=prover.raw_observe(), act=action)\n if predicted_q >= max_q:\n max_action = action\n max_q = predicted_q.item()\n return max_q, max_action\n\n\ndef entity_pair_evaluate_model(model, theorem, test_set):\n test_size = len(test_set)\n outputs = [(model(theorem=theorem, entities=[entity0, entity1]), target)\n for entity0, entity1, target in test_set]\n output_tensor = torch.cat([output[0] for output in outputs], dim=0)\n target_tensor = torch.FloatTensor([output[1] for output in outputs]).view(-1, 1)\n return float(torch.sum(torch.abs(output_tensor - target_tensor) < 0.5).float() / float(test_size))\n\n\ndef run_q_solver(prover, q_net, depth):\n for i in range(1, 1+depth):\n max_q, max_action = end_to_end_max_q_and_action(prover=prover, q_net=q_net)\n _, reward, done, _ = prover.step(max_action)\n print(max_action[\"action\"][0])\n print([ent.to_string() for ent in max_action[\"action\"][1]])\n if done:\n pprint(prover.proof.print_proof_status())\n\n\ndef what_is_proved(observation, obj_observation):\n to_be_proved = list()\n already_proved_string_list = [ls.name for ls in observation['ground_truth']]\n for logic_state in obj_observation['ground_truth']:\n if logic_state.name not in already_proved_string_list:\n to_be_proved.append(logic_state)\n print(to_be_proved)\n return to_be_proved\n\n\nif __name__ == \"__main__\":\n a = non_trivial_prover().proof\n ne = NumericalEvaluator()\n","sub_path":"Inequality/logic/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"497436018","text":"from flask import Blueprint, render_template, redirect, url_for\nfrom manage_me.users.forms import UserLoginForm, UserRegistrationForm\n\nusers = Blueprint('users', __name__)\n\n@users.route('/login')\n@users.route('/', methods=['GET', 'POST'])\ndef login():\n form = UserLoginForm()\n return render_template('login.html', form=form)\n\n@users.route('/register')\ndef register():\n form = UserRegistrationForm()\n\n return render_template('register.html', form=form)\n","sub_path":"manage_me/users/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"230416652","text":"import re\n\n\ndef load_data(filename: str) -> list:\n with open(filename) as f:\n X = f.read().split(\"\\n\")\n return X\n\n\ndef parse_input(X: list) -> dict:\n d: dict = {}\n for x in X:\n outer = x.split(\" contain \")[0].split(\" \")\n outer = outer[0] + \" \" + outer[1]\n inner = x.split(\" contain \")[1].split(\", \")\n d[outer] = {}\n for i in inner:\n j = i.split(\" \")\n if j != [\"no\", \"other\", \"bags.\"]:\n d[outer].update({j[1] + \" \" + j[2]: int(j[0])})\n return d\n\n\ndef check_gold_bag(bags: dict, d: dict) -> int:\n count = 0\n colors = list(bags.keys())\n for color in colors:\n if color == \"shiny gold\":\n count += 1\n elif check_gold_bag(d[color], d):\n count += 1\n return count\n\n\ndef count_gold_bags(d: dict) -> int:\n num_gold_bags = 0\n for i in d:\n if check_gold_bag(d[i], d) > 0:\n num_gold_bags += 1\n return num_gold_bags\n\n\ndef count_bags_inside(d: dict, bag) -> int:\n count = 0\n if d[bag]:\n for j in d[bag]:\n count += d[bag][j] + d[bag][j] * count_bags_inside(d, j)\n return count\n\n\nif __name__ == \"__main__\":\n\n filename = \"./data/data07.txt\"\n\n X = load_data(filename)\n d = parse_input(X)\n\n print(count_gold_bags(d))\n print(count_bags_inside(d, \"shiny gold\"))\n","sub_path":"puzzles/puzzle07.py","file_name":"puzzle07.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"199202163","text":"#!/usr/bin/python3\n####################################################################################################\n\nimport re\nimport time\nfrom lib.ui import UI\nfrom lib.script import Script\nfrom lib.cli.facebook.openbmc import OpenBMC\n\nclass OpenBMC_0430(Script):\n def __init__(self, dut, job_id=\"\", image_server = 'None'):\n headline = ['Power OFF/ON Microserver']\n purpose = ['To verify the microserver is working properly when after power off and power on the Microserver.',\n 'Run the \"wedge_power.sh off\" and \"wedge_power.sh on\" script to power off and power on the Microserver,',\n 'Run the above steps for 100 iterations and check if the microserver comes back up each time and OS(ONIE or CentOS) boots correctly to the login prompt.']\n\n self.__dut = dut[1]\n self.image_server = image_server\n super().__init__(headline, purpose, script_path=__file__, job_id=job_id)\n # Start logging the script.\n super().beginLog()\n\n def run(self):\n \"\"\"\n Function Name: run\n Purpose: Executes the steps defined by this test case.\n \"\"\"\n\n # initialize serial, Telnet and SSH UI with SystemMgmt APIs.\n self.__TELNET = super().initUI(self.__dut.telnet_credentials, self.__dut.platform, OpenBMC)\n self.__SSH = super().initUI(self.__dut.ssh_credentials, self.__dut.platform, OpenBMC)\n self.__cycle = 1\n self.__fail_count = 0\n self.__TELNET.send('sudo ifconfig eth0 ' + self.__dut.ssh_credentials[1] + ' netmask ' + self.__dut.ssh_netmask + '\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('sv stop mTerm\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n \n for i in range(1, self.__cycle + 1):\n # ==================================================================================================\n UI.log('STEP#01 - cycle#' + str(i) + '/' + str(self.__cycle), 'Repeat ' + str(self.__cycle) + ' times with the command \"wedge_power.sh off\" and \"wedge_power.sh on\" to power off and power on the Microserver.')\n self.__TELNET.send('sol.sh\\r')\n \n # SSH connection send and expect\n self.__SSH.send('wedge_power.sh off\\r')\n self.__SSH.expect(self.__dut.ssh_credentials[5])\n self.__SSH.send('wedge_power.sh on\\r')\n self.__SSH.expect(self.__dut.ssh_credentials[5])\n \n while True:\n try:\n x = self.__TELNET.expect(['~]# ', 'quit.', ']', 'IPv4.', 'IPv6.'], timeout=180)\n if x == 0:\n break\n except:\n self.__fail_count += 1\n break\n \n # ==================================================================================================\n UI.log('STEP#02 - cycle#' + str(i) + '/' + str(self.__cycle), 'To check if the microserver comes back up each time correctly.')\n self.__TELNET.send('\\x18')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n \n if self.__fail_count == 0:\n self.__TELNET.send('log-util all --clear\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n else:\n self.__TELNET.send('cat /mnt/data/logfile\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('hexdump /mnt/data/sel1.bin\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n self.__TELNET.send('bic-util scm --get_post_code\\r')\n self.__TELNET.expect(self.__dut.ssh_credentials[5])\n \n UI.log('FAIL', 'Cycle#' + str(i) + '/' + str(self.__cycle) + ': Hangs Error.')\n \n break\n \n if self.__fail_count == 0:\n UI.log('PASS', 'Cycle#' + str(i) + '/' + str(self.__cycle) + ': BMC_0430 Power_OFF_ON_Microserver is passed.')\n else:\n UI.log('FAIL', 'Cycle#' + str(i) + '/' + str(self.__cycle) + ': BMC_0430 Power_OFF_ON_Microserver is failed.')\n\n def stop(self):\n # Terminate interfaces and restore settings.\n self.__TELNET.close()\n # Stop logging the script.\n super().endLog()\n","sub_path":"Facebook/openbmc_0430.py","file_name":"openbmc_0430.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"122005064","text":"import os\nimport sys\nimport argparse\nimport random\nimport numpy as np\nfrom collections import OrderedDict\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torchvision.utils as vutils\n\nfrom model_32 import *\nfrom cifar_horse_car import *\nfrom arguments import *\nfrom utils import *\n\nopt = parser.parse_args()\n\n# set the random seed for everything\nmanual_seed = opt.random_seed\nrandom.seed(manual_seed)\ntorch.manual_seed(manual_seed)\ntorch.cuda.manual_seed_all(manual_seed)\n\n# make the checkpoint and results directories\nif not os.path.exists(opt.output_dir):\n os.makedirs(opt.output_dir)\n\ndset = CIFAR10_HORSE_CAR(root=\"./cifar_dataset\", train=True, \n transform=transforms.Compose([\n transforms.ToTensor()\n ]))\n\ntrain_loader = torch.utils.data.DataLoader(dset, batch_size=opt.batch_size, shuffle=True)\n\n# make the networks\nE = Encoder(opt.n_z, noise=True, add_batch_norm_main4=True,\n use_output_sigmoid=False).cuda().apply(weights_init)\nG = Generator(opt.n_z).cuda().apply(weights_init)\nD = Discriminator(opt.n_z, 0.2, output_size=1).cuda().apply(weights_init)\n\nif opt.guidance_type == \"excitation_mu3slim\":\n mu_classifier = nn.Sequential(\n nn.Linear(3, 1),\n nn.Sigmoid()\n ).cuda()\n mu_classifier.train()\nelse:\n raise ValueError(\"excitation should be mu3 slim\")\n\nif opt.inhibition_type == \"3layer\":\n mu_rest_classifier = nn.Sequential(\n nn.Linear(opt.n_z-3, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Linear(32, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Linear(32, 1),\n nn.Sigmoid()\n ).cuda()\n mu_rest_classifier.train()\n optimizerC = optim.Adam(mu_rest_classifier.parameters(), \n lr=opt.lr_d, betas=(0.5, 0.999))\n\noptimizerG = optim.Adam([{'params' : E.parameters()},\n {'params' : G.parameters()},\n {'params' : mu_classifier.parameters()}], lr=opt.lr_g, betas=(0.5,0.999))\n\noptimizerD = optim.Adam(D.parameters(), lr=opt.lr_d, betas=(0.5, 0.999))\n\nbce = nn.BCELoss().cuda()\nmse = nn.MSELoss().cuda()\nce = nn.CrossEntropyLoss().cuda()\n\nE.train()\nG.train()\nD.train()\n\n# the main training loop\nfor epoch in range(opt.epochs+1):\n print(\"EPOCH \",epoch)\n batch_num = 0\n for (img_real, target) in train_loader:\n img_real = img_real.cuda()\n target = target.cuda()\n if epoch==0 and batch_num==0:\n G.output_bias.data = get_log_odds(img_real)\n \n batch_size = img_real.size(0)\n real_label = torch.ones(batch_size).cuda()\n fake_label = torch.zeros(batch_size).cuda()\n\n z, mu, logvar = E(img_real)\n ## excitation loss using the MU_TOP\n mu_top = mu[:,0:3,0,0]\n classification_mutop = mu_classifier(mu_top).view(-1,1)\n guidance_loss = F.binary_cross_entropy(classification_mutop, target, reduction=\"mean\")*opt.lambda_guidance\n\n z_resampled = E.reparametrize(mu, logvar)\n img_recon = G(z_resampled)\n if opt.recon_type == \"mse0.5\":\n loss_recon = 0.5*((img_recon - img_real).pow(2).sum())/batch_size\n elif opt.recon_type == \"mse0.05\":\n loss_recon = 0.05*((img_recon - img_real).pow(2).sum())/batch_size\n elif opt.recon_type == \"mse0\":\n loss_recon = torch.tensor(0.00)\n \n fake_mu = torch.zeros(mu.size()).cuda()\n fake_sigma = torch.ones(mu.size()).cuda()\n if not opt.use_pure_normal:\n # copy the top 3 mu\n fake_mu[:,0:3,:,:] = mu[:,0:3,:,:]\n fake_sigma = torch.ones(mu.size()).cuda()\n z_fake = fake_mu + torch.randn(mu.size()).cuda()*fake_sigma\n\n img_fake = G(z_fake)\n\n output_real, _ = D(img_real + make_new_noise(img_real.size(), opt.epochs, epoch), z)\n output_fake, _ = D(img_fake + make_new_noise(img_real.size(), opt.epochs, epoch), z_fake)\n loss_d = bce(output_real, real_label) + bce(output_fake, fake_label)\n loss_g = bce(output_fake, real_label) + bce(output_real, fake_label)\n\n if opt.use_recon_gan:\n output_recon, _ = D(img_recon+make_new_noise(img_real.size(), opt.epochs, epoch), z_resampled)\n loss_d += bce(output_recon, fake_label)*opt.lambda_recon_gan\n loss_g += bce(output_recon, real_label)*opt.lambda_recon_gan\n \n if loss_g.data < float(opt.loss_g_filter):\n optimizerD.zero_grad()\n loss_d.backward(retain_graph=True)\n optimizerD.step()\n\n optimizerG.zero_grad()\n (loss_g+loss_recon+guidance_loss).backward(retain_graph=True)\n optimizerG.step()\n\n\n ## inhibition guidance \n if opt.lambda_inhibition > 0:\n # train the mu rest classifier to be optimal\n optimizerC.zero_grad()\n mu_rest = mu[:,3:,0,0]\n class_out_murest = mu_rest_classifier(mu_rest)\n inh_loss_A = F.binary_cross_entropy(class_out_murest, target,\n reduction=\"mean\")*opt.lambda_inhibition\n inh_loss_A.backward(retain_graph=True)\n optimizerC.step()\n\n # train the encoder to fool the mu rest classifier\n optimizerG.zero_grad()\n mu_rest = mu[:,3:,0,0]\n class_out_murest = mu_rest_classifier(mu_rest)\n mid_label = torch.empty((batch_size, 1)).fill_(0.5).cuda()\n inh_loss_B = F.binary_cross_entropy(class_out_murest, mid_label,\n reduction=\"mean\")*opt.lambda_inhibition\n inh_loss_B.backward()\n optimizerG.step()\n\n\n \n\n # print progress statements\n if batch_num%100 == 0 and opt.lambda_inhibition > 0:\n print(f\"batch number {batch_num:4d} \\t D loss {loss_d.data.item():.3f} \\t\"+\n f\"G loss {loss_g.data.item():.3f} \\t D(x) {output_real.mean().data.item():.3f} \\t\"+\n f\"D(G(x)): {output_fake.mean().data.item():.3f}\\t\"+\n f\"recon loss: {loss_recon.mean().data.item():.3f}\\t\"+\n f\"guidance loss: {guidance_loss.data.item():.3f}\\t\" +\n f\"inhibition loss A: {inh_loss_A.data.item():.3f}\\t\"+\n f\"inhibition loss B: {inh_loss_B.data.item():.3f}\\t\")\n vutils.save_image(img_fake.cpu().data[:opt.n_samples, ], './%s/fake.png' % (opt.output_dir))\n vutils.save_image(img_real.cpu().data[:opt.n_samples, ], './%s/real.png'% (opt.output_dir))\n vutils.save_image(img_recon.cpu().data[:opt.n_samples, ], './%s/recon.png'% (opt.output_dir))\n elif batch_num%100 == 0 and opt.lambda_inhibition == 0:\n print(f\"batch number {batch_num:4d} \\t D loss {loss_d.data.item():.3f} \\t\"+\n f\"G loss {loss_g.data.item():.3f} \\t D(x) {output_real.mean().data.item():.3f} \\t\"+\n f\"D(G(x)): {output_fake.mean().data.item():.3f}\\t\"+\n f\"recon loss: {loss_recon.mean().data.item():.3f}\\t\"+\n f\"guidance loss: {guidance_loss.data.item():.3f}\\t\")\n vutils.save_image(img_fake.cpu().data[:opt.n_samples, ], './%s/fake.png' % (opt.output_dir))\n vutils.save_image(img_real.cpu().data[:opt.n_samples, ], './%s/real.png'% (opt.output_dir))\n vutils.save_image(img_recon.cpu().data[:opt.n_samples, ], './%s/recon.png'% (opt.output_dir))\n batch_num+=1\n \n # save images at the end of all epochs\n vutils.save_image(img_fake.cpu().data[:opt.n_samples, ], './%s/fake_%d.png' % (opt.output_dir, epoch))\n vutils.save_image(img_recon.cpu().data[:opt.n_samples, ], './%s/recon_%d.png' % (opt.output_dir, epoch))\n vutils.save_image(img_real.cpu().data[:opt.n_samples, ], './%s/real_%d.png' % (opt.output_dir, epoch))\n if epoch%5 == 0:\n # save model to file after each epoch\n torch.save(G.state_dict(), '%s/netG_epoch_%d.pth' % (opt.output_dir, epoch))\n torch.save(E.state_dict(), '%s/netE_epoch_%d.pth' % (opt.output_dir, epoch))\n torch.save(D.state_dict(), '%s/netD_epoch_%d.pth' % (opt.output_dir, epoch))\n torch.save(mu_classifier.state_dict(), '%s/netMU_epoch_%d.pth'%(opt.output_dir, epoch))\n\n \"\"\"\n print Z, Z_fake, Z_recon to file\n \"\"\"\n filename = f\"./{opt.output_dir}/log_z_values_{epoch}.txt\"\n with open(filename, 'w') as f:\n z_list = z[0].view(-1).detach().cpu().tolist()\n f.write(\"Encoder output Z:\\n\")\n for item in z_list:\n f.write(\" %f\"%item)\n f.write(\"\\nEncoder output MU:\\n\")\n mu_list = mu[0].view(-1).detach().cpu().tolist()\n for item in mu_list:\n f.write(\" %f\"%item)\n f.write(\"\\nEncoder output LOGVAR:\\n\")\n logvar_list = logvar[0].view(-1).detach().cpu().tolist()\n for item in logvar_list:\n f.write(\" %f\"%item)\n f.write(\"\\nEncoder output e^(0.5*LOGVAR):\\n\")\n var_list = torch.exp(0.5 * logvar[0]).view(-1).detach().cpu().tolist()\n for item in var_list:\n f.write(\" %f\"%item)\n f.write(\"\\nFake generated Z:\\n\")\n z_fake_list = z_fake[0].view(-1).detach().cpu().tolist()\n for item in z_fake_list:\n f.write(\" %f\"%item)\n f.write(\"\\nReconstruction samples Z:\\n\")\n z_recon_list = z_resampled[0].view(-1).detach().cpu().tolist()\n for item in z_recon_list:\n f.write(\" %f\"%item)\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"417923792","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nsalesDist = pd.read_csv('./data/stores-dist.csv')\n# Verify the imported data\nsalesDist.head()\n\n# The district column has no relevance at this time, so it can be dropped.\nsalesDist = salesDist.rename(columns={'annual net sales':'sales','number of stores in district':'stores'})\nsalesDist.head()\n# Check correlation of data prior to doing the analysis\nsalesDist.corr()\n\n#sales = salesDist.drop(...)\nsales = salesDist.drop([\"district\"], axis=1)\nsales.head()\n\n# dependent variable for y axis\ny = sales['sales']\n# independent variable for x axis\nx = sales.stores\n\n# Increase the size of the plot\nplt.figure(figsize=(20,10))\n# Create a scatter plot: Number of stores in the District vs. Annual Net Sales\nplt.plot(x,y, 'o', markersize = 15)\n# Add axis labels and increase the font size\nplt.ylabel('Annual Net Sales', fontsize = 30)\nplt.xlabel('Number of Stores in the District', fontsize = 30)\n# Increase the font size on the ticks on the x and y axis\nplt.xticks(fontsize = 20)\nplt.yticks(fontsize = 20)\n# Display the scatter plot\nplt.show()\n\nm, b = np.polyfit(x,y,1)\nprint ('The slope of line is {:.2f}.'.format(m))\nprint ('The y-intercept is {:.2f}.'.format(b))\nprint ('The best fit simple linear regression line is {:.2f}x + {:.2f}.'.format(m,\nb))\n\ny_mean = y.mean()\n# x coordinate for centroid\nx_mean = x.mean()\nprint ('The centroid for this dataset is x = {:.2f} and y = {:.2f}.'.format(x_mean\n, y_mean))\n\n# Enlarge the plot size\nplt.figure(figsize=(20,10))\n# Plot the scatter plot of the data set\nplt.plot(x,y, 'o', markersize = 14, label = \"Annual Net Sales\")\n# Plot the centroid point\nplt.plot(x_mean,y_mean, '*', markersize = 30, color = \"r\")\n# Plot the linear regression line\nplt.plot(x, m*x + b, '-', label = 'Simple Linear Regression Line', linewidth = 4)\n# Create the x and y axis labels\nplt.ylabel('Annual Net Sales', fontsize = 30)\nplt.xlabel('Number of Stores in District', fontsize = 30)\n# Enlarge x and y tick marks\nplt.xticks(fontsize = 20)\nplt.yticks(fontsize = 20)\n# Point out the centroid point in the plot\nplt.annotate('Centroid', xy=(x_mean-0.1, y_mean-5), xytext=(x_mean-3, y_mean-20),\narrowprops=dict(facecolor='black', shrink=0.05), fontsize = 30)\n# Create legend\nplt.legend(loc = 'upper right', fontsize = 20)\n\ndef predict(query):\n if query >= 1:\n predict = m * query + b\n return predict\n else:\n print (\"You must have at least 1 store in the district to predict the annual net sales.\")\n# Code Cell 12\n# Enter the number of stores in the function to generate the net sales prediction.\n\nprint(predict(2))","sub_path":"linear_regretion.py","file_name":"linear_regretion.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"468672006","text":"from __future__ import print_function\nimport plac\nimport dill as pickle\nfrom tqdm import tqdm\nfrom thinc.neural.vec2vec import Model, ReLu, Softmax\nfrom thinc.api import clone, chain\nfrom thinc.neural.util import to_categorical\n\nfrom thinc.extra import datasets\nfrom thinc.neural.ops import CupyOps\n\n\ndef main(depth=2, width=512, nb_epoch=20):\n if CupyOps.xp != None:\n Model.ops = CupyOps()\n # Configuration here isn't especially good. But, for demo..\n with Model.define_operators({'**': clone, '>>': chain}):\n model = ReLu(width) ** depth >> Softmax()\n \n train_data, dev_data, _ = datasets.mnist()\n train_X, train_y = model.ops.unzip(train_data)\n dev_X, dev_y = model.ops.unzip(dev_data)\n\n dev_y = to_categorical(dev_y)\n with model.begin_training(train_X, train_y) as (trainer, optimizer):\n epoch_loss = [0.]\n def report_progress():\n with model.use_params(optimizer.averages):\n print(epoch_loss[-1], model.evaluate(dev_X, dev_y), trainer.dropout)\n epoch_loss.append(0.)\n \n trainer.each_epoch.append(report_progress)\n trainer.nb_epoch = nb_epoch\n trainer.dropout = 0.75\n trainer.batch_size = 128\n trainer.dropout_decay = 1e-4\n train_X = model.ops.asarray(train_X, dtype='float32')\n y_onehot = to_categorical(train_y)\n for X, y in trainer.iterate(train_X, y_onehot):\n yh, backprop = model.begin_update(X, drop=trainer.dropout)\n loss = ((yh-y)**2.).sum() / y.shape[0]\n backprop(yh-y, optimizer)\n epoch_loss[-1] += loss\n with model.use_params(optimizer.averages):\n print('Avg dev.: %.3f' % model.evaluate(dev_X, dev_y))\n with open('out.pickle', 'wb') as file_:\n pickle.dump(model, file_, -1)\n\n\nif __name__ == '__main__':\n plac.call(main)\n","sub_path":"examples/mnist_mlp.py","file_name":"mnist_mlp.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"408453636","text":"# coding: utf-8\n\nimport hashlib\nimport json\nimport os\nimport subprocess\nimport urllib\n\nfrom django.conf import settings\nfrom django.core.cache import caches\nfrom django.core.context_processors import csrf\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils.html import strip_tags, escape\nfrom django.views.generic import TemplateView\nfrom django.views.generic.base import TemplateResponseMixin\nfrom rest_framework import status\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework_xml.renderers import XMLRenderer\n\nfrom auxiliares import funcoes_auxiliares as aux\nfrom recomendacao.const import APP_NAME, ENCODING, CSE_ID, MAX_SIZE_SOBEK_OUTPUT\nfrom recomendacao.forms import FormText\nfrom recomendacao.search import GoogleSearchCse, GoogleSearchCseMarkup, GoogleSearchCseSeleniumMarkupImg\nfrom recomendacao.serializers import SerializerText\n\n\ndef strip_escape(text):\n text = strip_tags(text)\n #text = escape(text)\n return text\n\n\ndef encode_string(string):\n return string.encode(ENCODING)\n\n\ndef decode_string(string):\n return string.decode(ENCODING)\n\n\ndef serialize_render(data, renderer_class):\n renderer = renderer_class()\n return renderer.render(data)\n\n\nclass TemplateViewContext(TemplateView):\n extra_context = {}\n\n def get_context_data(self, request, **kwargs):\n context = super(TemplateViewContext, self).get_context_data(**kwargs)\n context.update(self.extra_context)\n self.get_request_data(request, context)\n return context\n\n def get_request_data(self, request, context):\n request_data_json = request.GET.get('data')\n if request_data_json:\n request_data = json.loads(request_data_json)\n context.update(request_data)\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(request, **kwargs)\n return self.render_to_response(context)\n\n\nclass TemplateViewContextPost(TemplateViewContext):\n http_method_names = ['post', 'put', 'patch', 'delete', 'head', 'options', 'trace']\n\n def get_request_data(self, request, context):\n request_data_json = request.body\n if request_data_json:\n request_data = json.loads(request_data_json)\n context.update(request_data)\n\n def post(self, request, *args, **kwargs):\n context = self.get_context_data(request, **kwargs)\n return self.render_to_response(context)\n\n\nclass TemplateViewBusca(TemplateViewContext):\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(request, **kwargs)\n form = FormText()\n\n context.update({\n 'form': form,\n })\n context.update(csrf(request))\n return render(request, self.template_name, context)\n\n\nclass EnviaTextoV1(TemplateResponseMixin, APIView):\n def post(self, request, format=None):\n serializer = SerializerText(data=request.data)\n if serializer.is_valid():\n self.request_data = serializer.data\n self.input_hash = hashlib.sha224(request.path_info + unicode(self.request_data)).hexdigest()\n response_data = self.get_response_data(request)\n return Response(response_data, status=status.HTTP_200_OK, template_name=self.template_name)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST, exception=True)\n\n def get_response_data(self, request):\n cache = caches['default']\n cache_reload = self.request_data.get('cache_reload')\n if cache.get(self.input_hash):\n json_response_data = self.read_response_data_file(self.input_hash, JSONRenderer.format)\n response_data = json.loads(json_response_data)\n else:\n response_data = self.process_text(request)\n cache.set(self.input_hash, True, cache_reload)\n return response_data\n\n def process_text(self, request):\n text = self.request_data['text']\n text = strip_escape(text)\n text = encode_string(text)\n response_data = {}\n\n sobek_output = self.run_sobek(text)\n while len(sobek_output.split()) > MAX_SIZE_SOBEK_OUTPUT:\n sobek_output = self.run_sobek(sobek_output)\n response_data['sobek_output'] = decode_string(sobek_output).split()\n\n response_data['text_hash'] = self.input_hash\n self.serialize_response_data(response_data)\n return response_data\n\n def run_sobek(self, text):\n sobek_path = os.path.join(settings.BASE_DIR, 'misc', 'webServiceSobek_Otavio.jar')\n\n try:\n quoted_text = urllib.quote(text)\n sobek_command = ['java', '-Dfile.encoding=' + ENCODING, '-jar', encode_string(sobek_path), '-b', '-t', '\"' + encode_string(quoted_text) + '\"']\n sobek_output = subprocess.check_output(sobek_command)\n except subprocess.CalledProcessError:\n text += ' ' + text\n\n quoted_text = urllib.quote(text)\n sobek_command = ['java', '-Dfile.encoding=' + ENCODING, '-jar', encode_string(sobek_path), '-b', '-t', '\"' + encode_string(quoted_text) + '\"']\n sobek_output = subprocess.check_output(sobek_command)\n\n sobek_output = sobek_output.replace('\\n', ' ')\n\n return sobek_output\n\n def serialize_response_data(self, response_data):\n xml_response_data = serialize_render(response_data, XMLRenderer)\n self.create_response_data_file(xml_response_data, self.input_hash, XMLRenderer.format)\n json_response_data = serialize_render(response_data, JSONRenderer)\n self.create_response_data_file(json_response_data, self.input_hash, JSONRenderer.format)\n\n def create_response_data_file(self, response_data, text_hash, file_format):\n filename = text_hash + '.' + file_format\n aux.make_sure_path_exists(settings.FILES_ROOT)\n with open(os.path.join(settings.FILES_ROOT, filename), 'wb') as response_data_file:\n response_data_file.write(response_data)\n response_data_file.close()\n\n def read_response_data_file(self, text_hash, file_format):\n filename = text_hash + '.' + file_format\n aux.make_sure_path_exists(settings.FILES_ROOT)\n with open(os.path.join(settings.FILES_ROOT, filename), 'rb') as response_data_file:\n response_data = response_data_file.read()\n response_data_file.close()\n return response_data\n\n\nclass EnviaTextoV2(EnviaTextoV1):\n def process_text(self, request):\n text = self.request_data['text']\n text = strip_escape(text)\n text = encode_string(text)\n response_data = {}\n\n sobek_output = self.run_sobek(text)\n while len(sobek_output.split()) > MAX_SIZE_SOBEK_OUTPUT:\n sobek_output = self.run_sobek(sobek_output)\n search_input = sobek_output\n results_list = self.run_xgoogle(search_input, request)\n response_data['sobek_output'] = decode_string(sobek_output).split()\n response_data['results_list'] = results_list\n\n response_data['text_hash'] = self.input_hash\n self.serialize_response_data(response_data)\n return response_data\n\n def run_xgoogle(self, search_input, request):\n gs = GoogleSearchCse(search_input, user_agent=request.META['HTTP_USER_AGENT'], lang='pt-br', tld='com.br', cx=CSE_ID)\n results = gs.get_results()\n\n results_list = []\n for res in results:\n result_dict = {}\n\n result_dict['title'] = res.title\n result_dict['url'] = res.url\n result_dict['snippet'] = res.desc\n\n results_list.append(result_dict)\n\n return results_list\n\n\nclass EnviaTextoV3(EnviaTextoV2):\n def process_text(self, request):\n text = self.request_data['text']\n text = strip_escape(text)\n text = encode_string(text)\n mode = self.request_data.get('mode')\n images = self.request_data.get('images')\n response_data = {}\n\n if mode == 'sobek':\n sobek_output = self.run_sobek(text)\n while len(sobek_output.split()) > MAX_SIZE_SOBEK_OUTPUT:\n sobek_output = self.run_sobek(sobek_output)\n response_data['sobek_output'] = decode_string(sobek_output).split()\n elif mode == 'google':\n search_input = text\n results_list = self.run_xgoogle(search_input, request, images)\n response_data['results_list'] = results_list\n else:\n sobek_output = self.run_sobek(text)\n while len(sobek_output.split()) > MAX_SIZE_SOBEK_OUTPUT:\n sobek_output = self.run_sobek(sobek_output)\n search_input = sobek_output\n results_list = self.run_xgoogle(search_input, request, images)\n response_data['sobek_output'] = decode_string(sobek_output).split()\n response_data['results_list'] = results_list\n\n response_data['text_hash'] = self.input_hash\n self.serialize_response_data(response_data)\n return response_data\n\n def run_xgoogle(self, search_input, request, images):\n if not images:\n gs = GoogleSearchCseMarkup(search_input, user_agent=request.META['HTTP_USER_AGENT'], lang='pt-br', tld='com.br', cx=CSE_ID)\n else:\n gs = GoogleSearchCseSeleniumMarkupImg(search_input, user_agent=request.META['HTTP_USER_AGENT'], lang='pt-br', tld='com.br', cx=CSE_ID)\n results = gs.get_results()\n\n results_list = []\n for res in results:\n result_dict = {}\n\n result_dict['title'] = res.title\n result_dict['url'] = res.url\n result_dict['snippet'] = res.desc\n\n result_dict['title_markup'] = res.title_markup\n result_dict['url_markup'] = res.url_markup\n result_dict['snippet_markup'] = res.desc_markup\n\n if images:\n result_dict['img'] = getattr(res, 'img', None)\n\n results_list.append(result_dict)\n\n return results_list\n","sub_path":"recomendacao/recomendacao/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"641055742","text":"from pathlib import Path\nimport numpy as np\nimport pytesseract\nimport SimpleITK as sitk\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport re\n\n\ndef getTesseractTagLine(np_array, pattern = None, tag_list = None, single_line=False):\n \"\"\"\n Run tesseract on an input image.\n np_array: is supposed to be a numpy 2d array\n tag_list: is a list of strings acceptable as tags\n NOTE/TODO: spaces are ignored in the tag list right now. \n So, POST PLAC, ANT PLAC, etc are not recognized yet.\n pattern: is an re expression in UPPER CASE\n \"\"\"\n \n if single_line:\n config_file = '--oem 1 --psm 7'\n else:\n config_file = '--oem 1 --psm 12'\n data = pytesseract.image_to_data(np_array, output_type= pytesseract.Output.DICT, config=config_file)\n final_tag = ('Undecided', -1)\n if len(data['conf']) == 1 and data['conf'][0] == '-1':\n # print('No text found')\n return final_tag\n \n # print(data)\n conf = list(map(int, data['conf']))\n if max(conf) > 0:\n if tag_list is not None:\n max_conf_tag_ind = conf.index(max(conf))\n tag = (data['text'][max_conf_tag_ind]).upper()\n tag = ''.join(e for e in tag if e.isalnum())\n # does this tag live in the list?\n if tag in tag_list:\n final_tag = (tag, max_conf_tag_ind)\n \n if pattern is not None:\n found_tags = [ (tag, conf) for (tag, conf) in zip(data['text'], data['conf']) if re.match(pattern, tag.upper()) ]\n if len(found_tags) > 1:\n print('----- WARNING found more than one tags for pattern: {}'.format(pattern))\n if len(found_tags) > 0:\n final_tag = found_tags[0]\n \n if pattern is None and tag_list is None:\n max_conf_tag_ind = conf.index(max(conf))\n tag = (data['text'][max_conf_tag_ind])\n final_tag = (tag, max_conf_tag_ind)\n\n return final_tag\n\ndef iterateThroughConfigs(input_image, pattern=None, tag_list=None, single_line=True, debug=False):\n \"\"\"\n Preprocess the image and get tag extraction.\n pattern: is an RE pattern to match the text extracted. Use single_line=True with this\n tag_list: is list of tags to look for. Sometimes single line helps with this, \n but for most part single_line=False will work. \n debug: If debug is true then images will be displayed using matplotlib\n \"\"\"\n\n threshold_method = [0, 1] #0 for binary, 1 for otsu\n scale = [1, 2] #sometimes scaling it twice helps to get the tag\n ballsize = [0, 1, 3] # Ball size for binary dilation\n smoothing = [0, 1] # level of smoothing. 1: pixel\n process_config = [ (t, s, b, sm) for t in threshold_method \n for s in scale \n for b in ballsize \n for sm in smoothing\n ]\n\n final_tag = 'Undecided'\n final_tag_list = []\n conf_list = []\n\n # go through each one\n for config in process_config:\n if debug:\n print(config)\n if config[3] > 0:\n cropped_image = sitk.DiscreteGaussian(input_image, float(config[3]))\n\n if config[0] == 0:\n thresholded_image = (input_image < 128)*255\n else:\n thresholded_image = sitk.OtsuThreshold(input_image)*255\n\n if config[1] == 1:\n expanded_image = thresholded_image\n else:\n expanded_image = sitk.Expand(thresholded_image, [config[1], config[1]], sitk.sitkLinear)\n \n if config[2] == 0:\n final_image = expanded_image\n else: \n final_image = sitk.BinaryDilate(expanded_image, config[2], sitk.sitkBall, 0., 255.)\n \n if debug:\n plt.imshow(sitk.GetArrayFromImage(final_image), cmap='gray')\n plt.pause(0.5)\n plt.show()\n\n tag_conf = getTesseractTagLine(sitk.GetArrayFromImage(final_image), pattern=pattern, tag_list=tag_list)\n \n if tag_list is not None and tag_conf[0] in tag_list:\n # When looking for a tag in tag list \n final_tag = tag_conf[0]\n break\n else:\n final_tag_list.append(tag_conf[0])\n conf_list.append(tag_conf[1])\n \n if tag_list is None:\n final_tag = final_tag_list [ conf_list.index( max(conf_list) ) ]\n \n del thresholded_image, expanded_image, final_image\n return final_tag\n\n\ndef processBoundingBox(sitk_image, pattern=None, tag_list=None, debug=False):\n \n cropped_image = sitk.RescaleIntensity(sitk_image)\n\n if debug:\n plt.imshow(sitk.GetArrayFromImage(cropped_image), cmap='gray')\n plt.pause(0.5)\n plt.show()\n\n use_single_line = pattern is not None and tag_list is None\n final_tag = iterateThroughConfigs(cropped_image, pattern=pattern, tag_list=tag_list, single_line=use_single_line, debug=debug)\n if final_tag == 'Undecided':\n # If the previous line method did not work, re do with switching the mode for single line.\n # The initial selection of using isngle lin ei based on how experimenst on whic method works best for which\n # rype of OCR task. (oattern extraction vs tag extraction)\n if debug:\n print('Trying the single line method: {}'.format(not use_single_line))\n final_tag = iterateThroughConfigs(cropped_image, pattern=pattern, tag_list=tag_list, single_line= (not use_single_line), debug=debug)\n return final_tag\n\ndef extractTagFromFrame(np_frame, bounding_box, tag_list):\n \"\"\"\n Do necessary preprocessing on the numpy array, and pass it to tesseract.\n np_fram: 2d grayscale numpy array\n bounding_box: a list of two lists defining the upper left and lower right corners of\n the bounding box where the tag is estimated to be. NOTE: this is very crucial here/\n tag_list: list of acceptable tags. See the note in getTesseractTag()\n \"\"\"\n #sub_image = np_frame[bounding_box[0][1] : bounding_box[1][1],\n # bounding_box[0][0] : bounding_box[1][0] ]\n Dimension = len(np_frame.shape)\n # check if the input dimension is 2\n if Dimension is not 2:\n print('Error: Expecting a 2d image as an input to extract the tag, got another dimension, returning')\n return None\n\n sitk_image = sitk.GetImageFromArray(np_frame)\n size = sitk_image.GetSize()\n tmp_image = sitk.Crop(sitk_image, bounding_box[0],\n [size[i] - bounding_box[1][i] for i in range(Dimension)])\n del sitk_image\n \n final_tag = processBoundingBox(tmp_image, tag_list=tag_list)\n return final_tag \n","sub_path":"src/py/FamliOCR/famlitesseract.py","file_name":"famlitesseract.py","file_ext":"py","file_size_in_byte":6670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"345904722","text":"# 기본적인 RNN 수행하기\nimport tensorflow as tf\nfrom tensorflow.models.rnn import rnn, rnn_cell\ntf.disable_v2_behavior()\nimport numpy as np\n\nchar_rdic = ['h', 'e', 'l', 'o']\nchar_dic = { w : i for i, w in enumerate(char_rdic)} # char -> id\nprint(char_dic)\n\nx_data = np.array([[1,0,0,0], # h\n [0,1,0,0], # e\n [0,0,1,0], # l\n [0,0,1,0]], # l\n dtype='f')\n\nsample = [char_dic[c] for c in 'hello'] # to index\nprint(sample)\n\n# 설정\nchar_vocab_size = len(char_dic)\nrnn_size = char_vocab_size # one hot coding(one of 4)\nstate_size = 0\ntime_step_size = 4 # hell -> predict ello\nbatch_size = 1 # one sample\n\n# # RNN model\nrnn_cell = rnn_cell.BasicRNNCell(rnn_size) # rnn_size는 output 사이즈와 관련\n# rnn_cell = rnn_cell.BasicLSTMCell(rnn_size) # rnn_size는 output 사이즈와 관련\n# rnn_cell = rnn_cell.BasicGRUCell(rnn_size) # rnn_size는 output 사이즈와 관련\n\nstate = tf.zeros([batch_size, rnn_cell, state_size]) # 초기엔 0 , state_size : rnn의 사이즈\nX_split = tf.split(0, time_step_size, x_data)\n\noutputs, state = rnn.rnn(rnn_cell, X_split, state) # X_split : 사용할 벡터들의 갯수\n\n# Cost\nlogits = tf.reshape(tf.concat(1, outputs), [-1, rnn_size]) # 예측값, -1은 n개라는 의미\ntargets = tf.reshape(sample[1:],[-1])\nweights = tf.ones([time_step_size * batch_size])\n\n# 학습\nloss = tf.nn.seq2seq.sequence_loss_by_example([logits], [targets], [weights])\ncost = tf.reduce_sum(loss)/batch_size\n\n# 최적화\ntrain_op = tf.train.RMSPropOptimizer(0.01, 0.9).minimize(cost)\n\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n\n for i in range(100):\n sess.run(train_op)\n result = sess.run(tf.arg_max(logits, 1))\n print(result, [char_rdic[t] for t in result])\n","sub_path":"DeepLearningZeroToAll/ch12/basicRNNCell.py","file_name":"basicRNNCell.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"183325156","text":"import re\nimport http.cookiejar\nimport urllib.request\nimport urllib.parse\n \ndef getXSRF(data):\n cer = re.compile('name=\"_xsrf\" value=\"(.*)\"', flags = 0)\n strlist = cer.findall(data)\n return strlist[0]\n \ndef getOpener(head):\n # deal with the Cookies\n cj = http.cookiejar.CookieJar()\n pro = urllib.request.HTTPCookieProcessor(cj)\n opener = urllib.request.build_opener(pro)\n header = []\n for key, value in head.items():\n elem = (key, value)\n header.append(elem)\n opener.addheaders = header\n return opener\n \nheader = {\n 'Connection': 'Keep-Alive',\n 'Accept': 'text/html, application/xhtml+xml, */*',\n 'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Host': 'http://4m3.tongji.edu.cn/eams/login.action',\n 'DNT': '1'\n}\n \nurl = 'http://www.tongji-pe.tongji.edu.cn/webscore/search/websearch.aspx'\nopener = getOpener(header)\nop = opener.open(url)\ndata = op.read()\n\nurl='http://www.tongji-pe.tongji.edu.cn/webscore/search/websearch.aspx'\nid = '1450856'\npassword = '1450856'\npostDict = {\n 'txt_stid': id,\n 'txt_pwd': password,\n}\npostData = urllib.parse.urlencode(postDict).encode()\nop = opener.open(url, postData)\ndata = op.read()\n \nprint(data.decode())\n\n\n\n","sub_path":"zhihu.py","file_name":"zhihu.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"394268679","text":"from backbones import PolyaBackboneStrategy\nfrom data import USAirportDataProvider\nfrom plot import PlotBuilder, MapVisualisation\n\nfrom matplotlib import pyplot as plt\n\ndata_provider = USAirportDataProvider()\n\nbackbone_strategy = PolyaBackboneStrategy(a = 1, integer_weights = False)\nvisualisation = MapVisualisation(data_provider, backbone_strategy)\nplot_builder = PlotBuilder(visualisation)\n\nedges = [(v, u) for (v, u) in data_provider.graph.edges if data_provider.graph[v][u][\"p\"] < 0.003]\nvisualisation.set_edges_to_display(edges)\n\nplot_builder.fig.set_size_inches(12, 8, forward = True)\nfor side in [\"top\", \"right\", \"bottom\", \"left\"]:\n plot_builder.ax.spines[side].set_visible(False)\n\nplt.gca().set_axis_off()\nplt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)\nplt.margins(0,0)\nplot_builder.draw(\n xlim = (-180, -64),\n ylim = ( 17, 72),\n node_size = 1,\n)\n","sub_path":"src/us_airport_polya.py","file_name":"us_airport_polya.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"445733903","text":"# -*- coding: utf-8 -*-\n# Copyright © IBM Corporation 2010, 2020\n# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.\n# pragma pylint: disable=unused-argument, no-self-use\n\n# Guardium Insights\nAPI_VERSION = \"v3\"\nANAMOLIES = \"https://{host}:{port}/api/\" + API_VERSION + \"/events\"\nANAMOLIES_DETAILS = \"https://{host}:{port}/api/\" + API_VERSION + \"/events/{event_id}/detail\"\nBLOCK_USER = \"https://{host}:{port}/api/\" + API_VERSION + \"/block_user\"\nLIST_REPORTS = \"https://{host}:{port}/api/\" + API_VERSION + \"/reports\"\nRUN_REPORT = \"https://{host}:{port}/api/\" + API_VERSION + \"/reports/run\"\n\n# Resilient Incident Restful Endpoints\nADD_INCIDENT = \"/incidents?want_full_data=false&want_tasks=false&handle_format=names\"\nINCIDENT_URL = \"/incidents/{}?handle_format=names\"\n# Resilient Table Restful Endpoints\nGET_TABLE_DATA = '/incidents/{inc_id}/table_data?handle_format=names'\nTABLE_ADD_ROW = \"/incidents/{inc_id}/table_data/{table_id}/row_data?handle_format=names\"\nDELETE_TABLE_ROW = \"/incidents/{inc_id}/table_data/{table_id}/row_data/{row_id}?handle_format=names\"\nGET_TABLE_DATA_BY_ID = \"/incidents/{inc_id}/table_data/{table_id}?handle_format=names\"\nASSETS = \"https://{host}:{port}/api/v3/assets?user_type=all_user_type\"\n\n# BSO firewall\nF_URL = \"https://{bso_ip}/netaccess/connstatus.html\"\nF_LOGIN_URL = \"https://{bso_ip}/netaccess/loginuser.html\"\n\nCLASSIFICATION_REPORT_BODY = {\n \"fetch_size\": 0,\n \"offset\": 0,\n \"report_definition\": {\n \"report_id\": \"000000000000000000001101\",\n \"category_id\": \"000000000000000000000006\",\n \"report_name\": \"Classification\",\n \"report_description\": \"This report details incoming classification data.\",\n \"report_tags\": [\n {\n \"nls_key\": \"REPORT_TYPE_CLASSIFICATION\",\n \"nls_value\": \"Classification\"\n }\n ],\n \"report_headers\": [\n {\n \"header_id\": \"911\",\n \"header_name\": \"StartDateTimeUTC\",\n \"header_description\": {\n \"nls_key\": \"STARTDATETIMEUTC_DESCRIPTION\",\n \"nls_value\": \"Date and time the classification process started running (local time).\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_STARTDATETIMEUTC\",\n \"nls_value\": \"Start date (local time)\"\n },\n \"header_type\": \"DATE_UTC\",\n \"header_type_length\": 10,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 1,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"916\",\n \"header_name\": \"DataSourceIP\",\n \"header_description\": {\n \"nls_key\": \"DATASOURCEIP_DESCRIPTION\",\n \"nls_value\": \"The datasource server IP\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_DATASOURCEIP\",\n \"nls_value\": \"Datasource IP\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 50,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 2,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"914\",\n \"header_name\": \"DataSourceName\",\n \"header_description\": {\n \"nls_key\": \"DATASOURCENAME_DESCRIPTION\",\n \"nls_value\": \"Full name of the data source.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_DATASOURCENAME\",\n \"nls_value\": \"Datasource name\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 255,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 3,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"915\",\n \"header_name\": \"DataSourceType\",\n \"header_description\": {\n \"nls_key\": \"DATASOURCETYPE_DESCRIPTION\",\n \"nls_value\": \"Database type.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_DATASOURCETYPE\",\n \"nls_value\": \"Datasource type\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 255,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 4,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"919\",\n \"header_name\": \"Port\",\n \"header_description\": {\n \"nls_key\": \"PORT_DESCRIPTION\",\n \"nls_value\": \"Port on the data source that the assessment runs on.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_PORT\",\n \"nls_value\": \"Port\"\n },\n \"header_type\": \"INTEGER\",\n \"header_type_length\": 4,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 5,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"917\",\n \"header_name\": \"ServiceName\",\n \"header_description\": {\n \"nls_key\": \"SERVICENAME_DESCRIPTION\",\n \"nls_value\": \"Service name for the interaction (or alias that is used until the service is connected).\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_SERVICENAME\",\n \"nls_value\": \"Service name\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 255,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 6,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"904\",\n \"header_name\": \"Schema\",\n \"header_description\": {\n \"nls_key\": \"SCHEMA_DESCRIPTION\",\n \"nls_value\": \"Displays if the data source includes schema details.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_SCHEMA\",\n \"nls_value\": \"Schema\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 255,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 7,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"903\",\n \"header_name\": \"Catalog\",\n \"header_description\": {\n \"nls_key\": \"CATALOG_DESCRIPTION\",\n \"nls_value\": \"Displays if the data source includes catalog details.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_CATALOG\",\n \"nls_value\": \"Catalog\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 255,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 8,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"905\",\n \"header_name\": \"TableName\",\n \"header_description\": {\n \"nls_key\": \"TABLENAME_DESCRIPTION\",\n \"nls_value\": \"Table name in the data source.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_TABLENAME\",\n \"nls_value\": \"Table\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 255,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 9,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"906\",\n \"header_name\": \"ColumnName\",\n \"header_description\": {\n \"nls_key\": \"COLUMNNAME_DESCRIPTION\",\n \"nls_value\": \"Column name in the data source.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_COLUMNNAME\",\n \"nls_value\": \"Column\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 255,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 10,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"902\",\n \"header_name\": \"ProcessDescription\",\n \"header_description\": {\n \"nls_key\": \"PROCESSDESCRIPTION_DESCRIPTION\",\n \"nls_value\": \"Classification process description.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_PROCESSDESCRIPTION\",\n \"nls_value\": \"Description\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 255,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 11,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"908\",\n \"header_name\": \"ClassificationName\",\n \"header_description\": {\n \"nls_key\": \"CLASSIFICATIONNAME_DESCRIPTION\",\n \"nls_value\": \"Classification of the policy rule as defined by the user.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_CLASSIFICATIONNAME\",\n \"nls_value\": \"Classification name\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 255,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 12,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"907\",\n \"header_name\": \"RuleDescription\",\n \"header_description\": {\n \"nls_key\": \"RULEDESCRIPTION_DESCRIPTION\",\n \"nls_value\": \"Classification rules use regular expressions, Luhn algorithms, and other criteria to define rules for matching content when applying a classification policy.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_RULEDESCRIPTION\",\n \"nls_value\": \"Classification rule\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 32000,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 13,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"909\",\n \"header_name\": \"Category\",\n \"header_description\": {\n \"nls_key\": \"CATEGORY_DESCRIPTION\",\n \"nls_value\": \"Categories are used to group policy violations for both reporting and incident management.\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_CATEGORY\",\n \"nls_value\": \"Category\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 255,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 14,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n },\n {\n \"header_id\": \"920\",\n \"header_name\": \"Comprehensive\",\n \"header_description\": {\n \"nls_key\": \"COMPREHENSIVE_DESCRIPTION\",\n \"nls_value\": \"Classification based on random sampling of data\"\n },\n \"field_name\": {\n \"nls_key\": \"CLASSIFICATION_COMPREHENSIVE\",\n \"nls_value\": \"Comprehensive\"\n },\n \"header_type\": \"STRING\",\n \"header_type_length\": 50,\n \"table_name\": \"CLASSIFICATION\",\n \"sequence\": 15,\n \"aggregation_type\": \"VALUE\",\n \"header_data_type\": \"TABLE_HEADER\"\n }\n ],\n \"runtime_parameters\": [\n {\n \"key\": \"QUERY_FROM_DATE\",\n \"label\": \"Time From\",\n \"runtime_parameter_type\": \"DATE_UTC\",\n \"runtime_parameter_type_length\": 10,\n \"operator_type\": \"GREATER_THAN_OR_EQUAL\"\n },\n {\n \"key\": \"QUERY_TO_DATE\",\n \"label\": \"Time To\",\n \"runtime_parameter_type\": \"DATE_UTC\",\n \"runtime_parameter_type_length\": 10,\n \"operator_type\": \"LESS_THAN_OR_EQUAL\"\n }\n ],\n \"report_filters\": {\n \"option_type\": \"AND\",\n \"filters_array\": [\n {\n \"condition\": {\n \"filter_id\": 1,\n \"sequence\": -1,\n \"header_id\": \"911\",\n \"header_name\": \"StartDateTimeUTC\",\n \"field_nls_translation_key\": \"CLASSIFICATION_STARTDATETIMEUTC\",\n \"table_name\": \"CLASSIFICATION\",\n \"parameter_type\": \"PARAMETER\",\n \"operator_type\": \"GREATER_THAN_OR_EQUAL\",\n \"values\": [\n \"QUERY_FROM_DATE\"\n ]\n }\n },\n {\n \"condition\": {\n \"filter_id\": 2,\n \"sequence\": -1,\n \"header_id\": \"911\",\n \"header_name\": \"StartDateTimeUTC\",\n \"field_nls_translation_key\": \"CLASSIFICATION_STARTDATETIMEUTC\",\n \"table_name\": \"CLASSIFICATION\",\n \"parameter_type\": \"PARAMETER\",\n \"operator_type\": \"LESS_THAN_OR_EQUAL\",\n \"values\": [\n \"QUERY_TO_DATE\"\n ]\n }\n }\n ]\n },\n \"default_timestamp_header_id\": \"911\",\n \"selected_timestamp_header_id\": \"911\"\n },\n \"runtime_parameter_list\": [\n {\n \"key\": \"QUERY_FROM_DATE\",\n \"label\": \"Enter Period From\",\n \"runtime_parameter_type\": \"DATE_UTC\",\n \"runtime_parameter_type_length\": 10,\n \"operator_type\": \"GREATER_THAN_OR_EQUAL\",\n \"value\": \"\"\n },\n {\n \"key\": \"QUERY_TO_DATE\",\n \"label\": \"Enter Period To\",\n \"runtime_parameter_type\": \"DATE_UTC\",\n \"runtime_parameter_type_length\": 10,\n \"operator_type\": \"LESS_THAN_OR_EQUAL\",\n \"value\": \"\"\n }\n ],\n \"without_limit\": False\n}\n","sub_path":"fn_guardium_insights_integration/fn_guardium_insights_integration/util/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":15693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"611519676","text":"from configparser import ConfigParser\nfrom ss_processor import SsProcessor\n\ns = SsProcessor()\n\n\ndef find_good_filter(self, target_image, target_item, known_value, try_max=100):\n #best values currently set for screenshot0005\n\n still_trying = True\n\n try_count = 0\n\n #value ranges\n # percent_range = iter(range(250,400))\n # zoom_range = iter(range(1,40))\n # radius_range = iter(range(1,20))\n # threshold_range = iter(range(1,20))\n\n while still_trying and try_count <= try_max:\n #unsharp radius\n # us_radius = random.randint(1, 10)\n us_radius = 2\n\n #unsharp percent\n us_percent = 250\n\n #unsharp threshold\n # us_threshold = random.randint(0, 5)\n # us_threshold = next(threshold_range)\n us_threshold = 2\n\n #zoom multiplier\n # zoom = random.randint(1, 30)\n zoom = 20\n\n img, val = self.scrape_data(target_image, target_item, us_radius=us_radius, us_percent=us_percent, us_threshold=us_threshold, zoom=zoom)\n if val == known_value:\n still_trying = False\n print(\"VALUE FOUND!\")\n\n print(try_count, \"TRYING >\", \"radius:\", us_radius, \"percent:\", us_percent, \"threshold:\", us_threshold, \"zoom:\", zoom, \" >> \", val)\n\n\n try_count += 1\n\n\ndef test_many_images(config_file, zoom, target_white):\n config = ConfigParser()\n config.read(config_file)\n\n for section_name in config.sections():\n ss_id = section_name.split(':')[0]\n item_number = int(section_name.split(':')[1])\n\n img = s.get_item_images(\"ScreenShots/ScreenShot\"+ss_id+\".jpg\", item_number, zoom)[0]\n img_bw = s.replace_whites(img, target_white)\n count = s.get_value(img_bw, digit=True)\n\n print(count, count == config[section_name]['count'], \"[\"+config[section_name]['count']+\"]\")\n\n\ndef count_colors(img):\n img_gray = img.convert(\"L\")\n pix = img_gray.load()\n count_dict = {}\n\n for i in range(img_gray.size[0]):\n for j in range(img_gray.size[1]):\n if pix[i, j] in count_dict.keys():\n count_dict[pix[i, j]] += 1\n else:\n count_dict[pix[i, j]] = 1\n\n return count_dict\n\n","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"235828915","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 23 17:35:29 2018\n\n@author: nicolamartin\n\"\"\"\n\nid_study_case=0\ndef main() :\n print (\"je suis appelé\")\n\n # Always import sys because it is needed everywhere\n import sys\n import copy \n import os\n import pandas as pd \n import numpy as np\n sys.path.append('./toolkit/')\n import class_ as cl\n import functions as fct\n import time\n import config\n from sqlalchemy import create_engine\n from config import engine\n import importlib\n import Ranking\n import Echangeurs\n\n # =============================================================================\n # Read data from database\n # =============================================================================\n \n \n # load param_sig\n (signaturestr, save_parameters)=config.get_params_sig()\n \n # loading parametrization parameters\n query=\"SELECT * FROM parametrization ORDER BY id\"\n result = engine.execute(query)\n parametrization={}\n for row in result:\n parametrization[row['name']]=row['value']\n \n # Load in variable\n Tpinch_param = int(parametrization['param_tpinch'])\n \n \n # count number of flux\n nbrFluxHot = engine.execute(\"SELECT count(*) FROM flux WHERE hotcold = 'hot' AND ID_Study_Case =\" +str(id_study_case))\n nbrFluxHot = nbrFluxHot.fetchone()[0]\n nbrFluxCold = engine.execute(\"SELECT count(*) FROM flux WHERE hotcold = 'cold' AND ID_Study_Case =\" +str(id_study_case))\n nbrFluxCold = nbrFluxCold.fetchone()[0]\n nflux = engine.execute(\"SELECT count(*) FROM flux WHERE ID_Study_Case =\" +str(id_study_case))\n nflux = nflux.fetchone()[0]\n \n print(\"Number of cold flux is : \"+str(nbrFluxCold))\n print(\"Number of hot flux is : \"+str(nbrFluxHot))\n print(\"Total number of flux is : \"+str(nflux))\n \n query=\"SELECT flux_id, tempIn, timestamp FROM raw_TS LEFT JOIN time ON (raw_TS.time_id=time.id)\"\n \n df_temp = pd.read_sql_query(query, engine)\n \n #%%\n # Create flux object from df\n \n fluxIndexList = pd.read_sql_query(\"SELECT id FROM flux WHERE ID_Study_Case =\" +str(id_study_case), engine).values.flatten().tolist()\n lflux = []\n for id_ in fluxIndexList :\n ff = cl.flux(\n id=id_, \n name=engine.execute('SELECT name FROM flux WHERE id={}'.format(id_)).fetchone()[0], \n exchanger=\"ech1\", \n type=engine.execute('SELECT media FROM flux WHERE id={}'.format(id_)).fetchone()[0], \n timeserieIn=pd.read_sql_query(\"SELECT tempIn FROM raw_TS LEFT JOIN time ON (raw_TS.time_id=time.id) WHERE flux_id={}\".format(id_), engine).values.flatten().tolist(), \n timeserieOut=pd.read_sql_query(\"SELECT tempOut FROM raw_TS LEFT JOIN time ON (raw_TS.time_id=time.id) WHERE flux_id={}\".format(id_), engine).values.flatten().tolist(),\n pressure=pd.read_sql_query(\"SELECT Pression FROM raw_TS LEFT JOIN time ON (raw_TS.time_id=time.id) WHERE flux_id={}\".format(id_), engine).values.flatten().tolist(),\n d=pd.read_sql_query(\"SELECT D FROM raw_TS LEFT JOIN time ON (raw_TS.time_id=time.id) WHERE flux_id={}\".format(id_), engine).values.flatten().tolist(),\n sensor=[\"capt1\",\"capt2\"],\n hotCold=engine.execute('SELECT hotcold FROM flux WHERE id={}'.format(id_)).fetchone()[0],\n flow=pd.read_sql_query(\"SELECT flow FROM raw_TS LEFT JOIN time ON (raw_TS.time_id=time.id) WHERE flux_id={}\".format(id_), engine).values.flatten().tolist(), \n Cp=pd.read_sql_query(\"SELECT Cp FROM raw_TS LEFT JOIN time ON (raw_TS.time_id=time.id) WHERE flux_id={}\".format(id_), engine).values.flatten().tolist()\n )\n lflux.append(ff)\n lfluxHot=[]\n lfluxCold=[]\n \n for flux in lflux :\n if flux.hotCold == 'hot' :\n lfluxHot.append(flux)\n elif flux.hotCold == 'cold' :\n lfluxCold.append(flux)\n else:\n print(\"Type of flux not understood : \"+str(flux.hotCold))\n sys.exit(1)\n if len(lfluxCold) +len(lfluxHot) != len(lflux):\n print(\"Flux must be either hot or cold\")\n sys.exit(1)\n \n #%%\n # =============================================================================\n # Set user Variables\n # =============================================================================\n \n pid=os.getpid()\n engine.execute(\"UPDATE configuration SET value = ? WHERE name = 'current_simulation_pid'\",(pid))\n engine.execute(\"UPDATE configuration SET value = 1 WHERE name = 'status'\")\n importlib.reload(config)\n print('SET SIMUL STATUS=1')\n print('SET SIMUL PID='+str(pid))\n \n verbose = True\n logfile = False\n logFileName = 'output_anagreen_main.txt'\n \n Tpinch = Tpinch_param\n \n if logfile == True :\n sys.stdout = open(logFileName, 'w', encoding='utf8')\n \n \n #%%\n # =============================================================================\n # create all system configurations\n # =============================================================================\n \n network_list = fct.generate_networks(lfluxHot, lfluxCold)\n ranking_list = []\n exchanger_computed_data = []\n # =============================================================================\n # 2 ANALYSES DES FLUX\n # =============================================================================\n \n # TODO : implement flexible numsteps \n \n minNumStep = 10000\n for flux in lflux : \n if (len(flux.timeserieIn) == 0) or (len(flux.timeserieOut) == 0) : \n print(\"Missing value in the temp TS of flux {}\".format(flux.name))\n sys.exit(0)\n if len(flux.timeserieIn) < minNumStep :\n minNumStep = len(flux.timeserieIn)\n elif len(flux.timeserieOut) < minNumStep :\n minNumStep = len(flux.timeserieOut)\n \n \n numSteps = minNumStep\n \n energydf = pd.DataFrame(columns=[])\n wrongWayExchange = list()\n QList = list()\n costList = list()\n \n for currStep in range(numSteps) :\n # Write progress into database so it is displayed onto the screen\n progress = (currStep+1) / numSteps *100\n # not secured way of doing it\n engine.execute(\"UPDATE configuration SET value = {} WHERE name = 'progress'\".format(progress))\n # time.sleep(3*3)\n print(progress)\n \n if verbose == True :\n print(\"####################\")\n print(\" STEP \"+str(currStep))\n print(\"####################\")\n step = currStep\n \n lflux_shifted = fct.shift_temp(lflux, Tpinch)\n nchaud, nfroid = fct.compute_nflux(lflux)\n temp_list = fct.make_temp_list(lflux_shifted, step)\n \n # compute cascade \n casc, MER = fct.compute_cascade(lflux_shifted, step=step, verbose=True)\n # plot cascade\n # fct.plot_cascade(casc, temp_list)\n \n cold_utility = casc[-1]\n hot_utility = casc[0]\n if verbose == True :\n print(\"L'utilite froide est de : {} KW\".format(cold_utility))\n print(\"L'utilite chaude est de : {} KW\".format(hot_utility))\n print(\"Le minimum d'energie requis (MER) est de : {} KW\".format(MER))\n # print(\"L'énergie totale récupérable ou exergie est : {} KW\".format(casc))\n \n # =============================================================================\n # Analyse the recycled or stored energy for all the created HEN\n # =============================================================================\n \n # Loop over all networks\n cpeau = 4180\n i= 0\n for hen_id, hen in enumerate(network_list) :\n\n totalRecycledEnergy = 0\n totalStoredEnergy = 0\n totRecycledEnergyFromStorage = 0\n storedEnergy = 0\n if verbose == True :\n print(\"======================= \\nProcessing Network #{}# \\n=======================\".format(hen_id))\n # loop over each couple of flux\n for couple in hen :\n hotFlux = next((i for i in couple if i.hotCold =='hot'), None)\n coldFlux = next((i for i in couple if i.hotCold =='cold'), None)\n if verbose == True :\n print(\"==> Assessing the exploitability of the couple : {} - {}\".format(couple[0].name,couple[1].name))\n # Computing Qf and QC\n Thi = hotFlux.timeserieIn[step]\n Tho = hotFlux.timeserieOut[step]\n deltaTCoupleHot = Thi - Tho\n Qh = deltaTCoupleHot * hotFlux.Cp[step] # TODO Cp = cp x flow NOT IMPLEMENT SO FAR\n Cp_f = coldFlux.Cp[step]\n Cp_c = hotFlux.Cp[step]\n Tci = coldFlux.timeserieIn[step]\n Tco = coldFlux.timeserieOut[step]\n deltaTCoupleCold = Tco - Tci\n Pc = hotFlux.pressure[step]\n Pf = coldFlux.pressure[step]\n Dc = hotFlux.d[step]\n Df = coldFlux.d[step]\n i = i + 1\n #### CODE ECHANGEUR ####\n\n \n #### PROVISOIR : on prend que la 5e température de sortie de chaque flux (état actuel : 5 lignes de température par flux dans raw_TS) ####\n #### Si on prend toutes les températures, la ranking list va faire 90 entrées pour un cas d'étude de 6 flux, aulieu de 18 (car 18*5=90) ####\n print(\"############ EXCHANGER FUNCTION #########################\")\n if step == (numSteps-1):\n print(\"############ RANKING###################################\")\n print((couple[0].id,couple[1].id))\n resultRanking = Ranking.Ranking.score(Tho,Tco,Pc,Pf,10000,Dc,Df,couple[0].id,couple[1].id)\n ranking_list.append(resultRanking)\n print(\"############ END RANKING###################################\")\n tab_data_exch=[]\n for id_exch in resultRanking[\"ID_Exchanger_Type\"]:\n if id_exch == 1: #coaxial\n data_Exch=Echangeurs.Echangeurs.Coaxial(Dc,Df,Cp_c,Cp_f,Thi,Tho,Tci,Tco)\n tab_data_exch.append((1,data_Exch))\n elif id_exch == 2: #tube\n data_Exch=Echangeurs.Echangeurs.Tubular(Dc,Df,Cp_c,Cp_f,Thi,Tho,Tci,Tco)\n tab_data_exch.append((2,data_Exch))\n #else: #plated\n \n #data_Exch=Echangeurs.Echangeurs.Plated()\n\n #left to implement\n \n\n \n exchanger_computed_data.append(tab_data_exch)\n \n \n print(\"############ END EXCHANGER FUNCTION #########################\")\n Qc = deltaTCoupleCold * coldFlux.Cp[step]\n QList.append([step, hen_id, str(couple[0].name+\"-\"+couple[1].name), Qc, Qh])\n if verbose == True :\n print (\"==++> Qh : {}\".format(Qh))\n print (\"==++> Qc : {}\".format(Qc))\n # studying different cases based on Qh and Qc diff\n if np.abs(Thi-Tci) < Tpinch : # test OK needs to take absolute value so we can proceed with other tests\n print(\"WARNING: difference of temperature ({}) between the two flux is bellow the pinch ({} deg)\".format(np.abs(Thi-Tci), Tpinch))\n print(\"WARNING: no energy will be recycled.\")\n recycledEnergy = 0\n # intervention d'une PAC possible ici \n else :\n #Test OK\n if Thi < Tci : # risk of exchanging in the wrong direction\n print (\"WARNING: heat exchange going is the wrong direction.\")\n wrongWayExchange.append([step, hen_id, 1])\n recycledEnergy = 0\n else :\n #Test OK\n if Qh < 0 :\n print(\"WARNING: Flux {} inexploitable.\".format(hotFlux.name))\n print(\"WARNING: Qh ({}) < 0\".format(Qh))\n recycledEnergy = 0\n #Test OK\n elif Qc < 0 :\n print(\"WARNING: Flux {} inexploitable.\".format(coldFlux.name))\n print(\"WARNING: Qc ({}) < 0\".format(Qc))\n # Consider possibility to improve. Change into hot flux and store or recycle energy. \n recycledEnergy = 0\n #TEST TODO\n if Qc >= Qh : # means that there is more need of cold than there is of heat offer \n print(\"Qc >= Qh\")\n # all the energy can be directly recycled without need for storage\n # BUT the energy left Qc - Qh is a need which is not satisfied\n # thereforer it should be takin into storage or added as a cost\n recycledEnergy = Qh\n # if storage is empty add as a cost\n # if storedEnergy >= Qc-Qh :\n # recycledEnergyFromStorage = Qc-Qh\n # if storage as enough energy take from storage and update storage\n # else :\n # costList.append = [step, hen_id, Qc-Qh]\n \n elif Qc < Qh : # means that there is more heat offer than there is cold need => potential storage\n print(\"Qc < Qh\")\n recycledEnergy = Qc\n storedEnergy = Qh - Qc # only the remaining energy is eligible for storage\n totalRecycledEnergy += recycledEnergy\n totalStoredEnergy += storedEnergy\n # totRecycledEnergyFromStorage += recycledEnergyFromStorage\n if verbose == True :\n print(\"The total amount of saved energy for this HEN of flux is : {}\".format(totalRecycledEnergy))\n print(\"The total amount of stored energy for this HEN of flux is : {}\".format(totalStoredEnergy))\n # fill the dataframe with freshly computed results\n temp = pd.DataFrame([{\"step\" : step, \"MER\" : MER , \"networkNumber\": hen_id, \n \"totalRecycledEnergy\": totalRecycledEnergy,\n \"totalStoredEnergy\": totalStoredEnergy,\n \"totRecycledEnergyFromStorage\" : totRecycledEnergyFromStorage,\n \"MER\" : MER}])\n energydf = energydf.append(temp, ignore_index=True)\n # savedEnergyList.append([hen_id, totalRecycledEnergy])\n \n # sort on the HEN number so it is easier to read\n energydf = energydf.sort_values('networkNumber')\n \n # Create a DF out of a list for easier visualisation\n Qdf = pd.DataFrame(columns=['step', 'hen_id', 'couple', 'Qc', 'Qh'], data=QList)\n wrongWayExchangedf = pd.DataFrame(columns=['step', 'HENID' ,'wrongWayExchangeNumber'], data=wrongWayExchange)\n \n \n # get the total saved energy per HEN and stores it into a dataframe. \n # this is only done for convenient printing.\n totSavedEnergyPerHendf = pd.DataFrame(columns=[\"HEN number\", \"TotalSavedEnergy\" ])\n \n # =============================================================================\n # WHAT TO DO WHEN ALGO DONE\n # =============================================================================\n \n \n # look in run.py for values of status\n engine.execute(\"UPDATE configuration SET value = 2 WHERE name = 'status'\")\n importlib.reload(config)\n \n \n\n# for hen in range(len(network_list)) :\n# totalSavedEnergy = savedEnergydf.savedEnergy[savedEnergydf.networkNumber==hen].sum()\n# temp = [{\"HEN number\" : hen, \"TotalSavedEnergy\" : totalSavedEnergy}]\n# totSavedEnergyPerHendf = totSavedEnergyPerHendf.append(temp)\n# \n #print(tabulate(totSavedEnergyPerHendf, headers='keys', tablefmt='psql'))\n ## =============================================================================\n ## Compute stored energie\n ## we consider that each couple of flux can store energy and that when storage\n ## is empty then it should be added as a cost. Nevertheless when the storage is\n ## used we can consider this energy as recycled energy, which should be added to\n ## the corresponding sum in the \n ## =============================================================================\n #hendf = pd.DataFrame(columns=[\"step\",\"networkNumber\", \"fluxCouple\", \"currStorage\", \"energyCost\", \"recycledEnergyFromStorage\"])\n #for idHen, hen in enumerate(network_list) :\n # print(\"# Studiyng network nbr #\"+str(idHen))\n # for idCouple, couple in enumerate(hen) :\n # print(\"# # Studiyng couple nbr #\"+str(idCouple))\n # prevStorage = 0\n # currStorage = 0\n # energyCost = 0\n # # find whose hot or cold in the couple studied\n # for currStep in range(numSteps) :\n # print(\"# # # Studiyng step nbr #\"+str(currStep))\n # print(\"temp chaude flux chaud \"+str(couple[0].timeserieIn[currStep]))\n # print(\"temp froide flux chaud \"+str(couple[0].timeserieOut[currStep]))\n # print(\"temp chaude flux froid\"+str(couple[1].timeserieIn[currStep]))\n # print(\"temp froide flux froid \"+str(couple[1].timeserieOut[currStep]))\n #\n # # probably need to be implemented the check that DT hot correspond to hot flux \n # deltaTHot = couple[0].timeserieIn[currStep] - couple[0].timeserieOut[currStep] \n # deltaTCold = couple[1].timeserieOut[currStep] - couple[1].timeserieIn[currStep]\n # print(\"Delta hot \"+str(deltaTHot))\n # print(\"Delta cold \"+str(deltaTCold))\n # # here is missing the possibility to have a varying Cp or flow\n # # one just needs to add [currStep] to take it into account\n # Qh = deltaTHot * couple[0].flow * couple[0].Cp\n # Qc = deltaTCold * couple[1].flow * couple[1].Cp\n # print(\"Qh \"+str(Qh))\n # print(\"Qc \"+str(Qc))\n # DQ = Qh - Qc\n # print(\"DQ \"+str(DQ))\n ## currStorage = DQ + prevStorage\n ## print(\"currStorage :\"+str(currStorage))\n ## prevStorage = currStorage\n # if prevStorage + DQ < 0 : # has to be a + cause DQ is neg when consuming energy\n # # There is not enough in the storage\n # # Take all the energy available and mark the rest as cost\n # print(\"info: not enough in storage\")\n # print(\"info: Current storage is {}\".format(currStorage))\n # print(\"info: Required energy is {}\".format(DQ))\n # energyCost = np.abs(DQ) - prevStorage\n # recycledEnergyFromStorage = prevStorage\n # # since we took all the energy back we set currStorage to 0\n # currStorage = 0\n # elif prevStorage + DQ >= 0 :\n # print(\"info: enough energy in storage\")\n # print(\"info: Current storage is {}\".format(currStorage))\n # print(\"info: Required energy is {}\".format(DQ))\n # print(\"info: recycled energy is {}\".format(DQ))\n # recycledEnergyFromStorage = np.abs(DQ)\n # currStorage = prevStorage + DQ\n # else :\n # print(\"ALERT: A problem occured while computing the storage state.\")\n # temp = pd.DataFrame([{\"step\" : currStep, \"networkNumber\" : idHen, \"fluxCouple\" : couple, \"currStorage\" : currStorage, \"energyCost\" : energyCost, \"recycledEnergyFromStorage\" : recycledEnergyFromStorage}])\n # hendf = hendf.append(temp)\n #\n #TotalStoragePerHen = []\n #for idHen in range(len(network_list)) : \n # TotalStoragePerHen.append([idHen,hendf[hendf.networkNumber==idHen].currStorage.tolist()[-1]]) \n # \n #def plot_storage_hen(hendf, nbrOfHen) :\n # f, ax = plt.subplots()\n # for henId in range(nbrOfHen) :\n # data = hendf[hendf.networkNumber==henId].currStorage.reset_index(drop=True)\n # ax.plot(data, label='HEN #{}'.format(henId))\n # plt.title(\"Current energy stored\")\n # plt.ylabel(\"Energy\")\n # plt.xlabel(\"Step\")\n # plt.legend()\n # f, ax = plt.subplots()\n # for henId in range(nbrOfHen) :\n # data = hendf[hendf.networkNumber==henId].recycledEnergyFromStorage.reset_index(drop=True)\n # ax.plot(data, label='HEN #{}'.format(henId))\n # plt.title(\"Energy recycled from storage\")\n # plt.ylabel(\"Energy\")\n # plt.xlabel(\"Step\")\n # plt.legend()\n # f, ax = plt.subplots()\n # for henId in range(nbrOfHen) :\n # data = hendf[hendf.networkNumber==henId].energyCost.reset_index(drop=True)\n # ax.plot(data, label='HEN #{}'.format(henId))\n # plt.title(\"Cost in energy\")\n # plt.ylabel(\"Energy\")\n # plt.xlabel(\"Step\")\n # plt.legend()\n # return\n \n # =============================================================================\n # LOGGING IN FILES SO WE CAN RE READ\n # =============================================================================\n \n # energydf.to_csv(\"energydf.csv\")\n # for objId, obj in enumerate(lflux) :\n # fct.save_obj(obj, \"flux{}.obj\".format(objId))\n #print(exchanger_computed_data)\n #print(len(exchanger_computed_data))\n #print(ranking_list)\n return energydf, network_list, signaturestr, ranking_list, exchanger_computed_data\n \n","sub_path":"anagreen_main.py","file_name":"anagreen_main.py","file_ext":"py","file_size_in_byte":22383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"171969158","text":"# Model Params\nT = 20\nN = 50\ninput_H = 224 \ninput_W = 224 \nfeatmap_H = (input_H // 32)\nfeatmap_W = (input_W // 32)\nvocab_size = 8803\nembed_dim = 1000\nlstm_dim = 1000\nmlp_hidden_dims = 500\n\n# Training Params\ngpu_id = 0\nmax_iter = 25000\n\nweights = '/x/dhpseth/VGG_ILSVRC_16_layers.caffemodel'\nfix_vgg = True # set False to finetune VGG net\nvgg_dropout = False\nmlp_dropout = False\n\n# Data Params\ndata_provider = 'referit_data_provider'\ndata_provider_layer = 'ReferitDataProviderLayer'\n\ndata_folder = './referit/data/train_batch_det/'\ndata_prefix = 'referit_train_det'\n\n\n","sub_path":"det_model/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"239465031","text":"\n\n\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\nimport requests\n#url = 'http://www.covers.com/pageLoader/pageLoader.aspx?page=/data/mlb/teams/pastresults/2009/team2962.html'\n\n\ndef parsePage(url,teamName):\n\n r = requests.get(url)\n data = r.text\n soup = BeautifulSoup(data, \"lxml\")\n table = soup.findAll('table', class_='data')\n\n\n if len(table) > 1:\n rows = table[1].find_all('tr')\n else:\n rows = table[0].find_all('tr')\n data2 = []\n for row in rows:\n data = row.find_all(\"td\")\n for x in range(7):\n data[x] = data[x].get_text().replace('\\n', ' ').replace('\\r', '').replace('\\t', '').replace(' ',\n '').replace(\"'\",\n '')\n data.append(teamName)\n data2.append(list(data))\n df = pd.DataFrame(data2)\n df = df.rename(columns=df.iloc[0])\n df = df.reindex(df.index.drop(0))\n df = df.reset_index()\n df.drop('index', inplace=True, axis=1)\n return df\n\n\n","sub_path":"soup.py","file_name":"soup.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"339077440","text":"\n# 获取本机IP地址,只输出10.91开头的那个\nimport socket\ndef GetLocalIPByPrefix(prefix):\n \"\"\" 多网卡情况下,根据前缀获取IP(Windows 下适用) \"\"\"\n localIP = ''\n for ip in socket.gethostbyname_ex(socket.gethostname())[2]:\n if ip.startswith(prefix):\n localIP = ip\n\n return localIP\n\nprint(\" \")\nprint(GetLocalIPByPrefix('10.91'))\nprint(\" \")\nprint(\"按回车关闭\")\ninput()\n","sub_path":"getip.py","file_name":"getip.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"623644026","text":"import secrets\nimport os\nfrom cryptography.hazmat.primitives.kdf.hkdf import HKDF\nfrom cryptography.hazmat.primitives import hashes, hmac\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography import exceptions\n\n\n\"\"\"\n Generate nonce\n\n Returns\n 16-byte-long-random bytearray to be used as nonce\n\"\"\"\n\ndef generateNonce():\n return secrets.token_bytes(16)\n\n\n\"\"\"\n Encrypt message with the provided Public Key\n \n The padding part is extracted from an example in Cryptography's Docs\n OAEP padding is the recommended choice for new protocols/applications.\n \n Args:\n publicKey: Cryptography's Serialized Public Key object\n message: message to be encrypted \n \n Returns:\n Encrypted message\n\"\"\"\n\ndef encryptWithPublicKey(publicKey, message):\n\n return publicKey.encrypt(\n message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n\n\n\"\"\"\n Decrypt packets encrypted with the Public Key schemes\n\n The padding part is extracted from an example in Cryptography's Docs\n OAEP padding is the recommended choice for new protocols/applications.\n \n Args:\n packet: Encrypted packet to be decrypted \n \n Returns:\n Decrypted message\n\"\"\"\n\n\ndef decryptWithPrivateKey(privateKey, message):\n\n return privateKey.decrypt(\n message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n\n\n\"\"\"\n Verify if the packet was signed with the provided Public Key\n\n Args:\n publicKey: Bytearray of the Sender's Public Key\n message: Message signed with corresponding Sender's Private Key\n signature: Signature of the message\n \n Returns:\n Wheter it was signed by the corresponding Private Key or not.\n\"\"\"\n\n\ndef verifySignature(publicKey, message, signature):\n\n matchesSignature = True\n\n # Load Client's Public Key Object\n if isinstance(publicKey, bytes):\n publicKey = serialization.load_pem_public_key(publicKey)\n\n try:\n publicKey.verify(\n signature,\n message,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n except exceptions.InvalidSignature:\n matchesSignature = False\n\n return matchesSignature\n\n\n\"\"\"\n Apply a message authentication code to a message using a key. To create the tag\n we are using the SHA256 Hash function.\n\n Args:\n key: A 32 byte string\n message: A common string\n \n Returns:\n The tag, A.K.A. MAC (Message Authentication Code)\n\"\"\"\n\n\ndef createTag(key, message):\n h = hmac.HMAC(key, hashes.SHA256())\n\n if isinstance(message, str):\n message = message.encode()\n\n h.update(message)\n\n # return b\"\".join([message, h.finalize()]) # to get the message with the MAC appended\n return h.finalize() # to get only the MAC\n\n\n\"\"\"\n Verify if a message's MAC is valid, given a pre-shared key. To create the tag\n we are using the SHA256 Hash function.\n\n Args:\n key: A 32 byte string\n message: A byte string\n sentMAC: A byte string\n \n Returns:\n It returns True if the MAC is valid or False if it isn't\n\"\"\"\n\n\ndef verifyTag(key, sentMessage, sentTag):\n h = hmac.HMAC(key, hashes.SHA256())\n\n messageAsBytes = sentMessage\n h.update(messageAsBytes)\n\n try:\n h.verify(sentTag)\n return True\n except exceptions.InvalidSignature:\n return False\n\n\n\"\"\"\n Apply a SHA256 function to create a Digest of the message.\n \n Args: \n message: A bytearray to be digested\n\n Return:\n A message digest\n\"\"\"\ndef createDigest(message):\n\n digest = hashes.Hash(hashes.SHA256())\n \n if isinstance(message, str):\n message = message.encode()\n\n digest.update(message)\n return digest.finalize()\n\n\"\"\"\n Verify if the message's digest matches the given tag \n \n Args: \n message: A bytearray to be checked against the tag\n tag: A bytearray representing the tag to be verified against\n\n Return:\n Wheter the tag matches or not the digest.\n\"\"\"\ndef verifyDigest(message, tag):\n \n return tag == createDigest(message)\n\n\n\"\"\"\n Use the master key to create two others keys, that are going to be used to\n encryption and MAC\n\n Args:\n masterKey: A 32 byte key in byte format\n salt: A 16 byte salt in byte format\n Returns:\n Two 32 byte keys[Symmetric, HMAC]\n\"\"\"\n\n\ndef generateKeysWithMS(masterKey, salt):\n hkdf = HKDF(\n algorithm=hashes.SHA256(),\n length=64,\n salt=salt,\n info=b\"\",\n )\n bigKey = hkdf.derive(masterKey)\n return bigKey[:(len(bigKey)//2)], bigKey[(len(bigKey)//2):]\n\n\n\"\"\"\n Generate a random master key of 256 bits (32 Bytes)\n\n Returns:\n It returns a master key in byte format\n\"\"\"\n\n\ndef generateMasterKey():\n return os.urandom(32)\n\n\n\"\"\"\n Generate a 32 byte key to be used in MAC\n\n Returns:\n It returns a key in byte format\n\"\"\"\n\n\ndef generateMACKey():\n return os.urandom(32)\n\n\n\"\"\"\n Generate a 16 byte salt\n \n Returns:\n Salt\n\"\"\"\n\n\ndef generateSalt():\n return os.urandom(16)\n\n\n\"\"\"\n Sign message with Client's Private Key\n\n Args:\n message: message to be signed\n\n Returns:\n Message Signature\n\"\"\"\n\n\ndef signMessage(privateKey, message):\n\n return privateKey.sign(\n message,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n\n\"\"\"\n Generate a Symmetric Key.\n \n Returns:\n A Symetric Key \n\"\"\"\n\n\ndef generateSymmetricKey():\n\n return os.urandom(32)\n\n\n\"\"\"\n Encrypt a message with a Key.\n Args:\n key: A key that will be used for encryption\n nonce: Nonce used to encrypt the message in bytes\n message: Message to be encrypted in bytes\n \n Returns:\n Encrypted message\n\"\"\"\n\n\ndef encryptMessageWithKeyAES(key, nonce, message):\n\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce))\n encryptor = cipher.encryptor()\n\n return encryptor.update(message) + encryptor.finalize()\n\n\n\"\"\"\n Decrypt a message with Symmetric Key.\n Args:\n key: A key that will be used for decryption\n nonce: Nonce used to encrypt the message in bytes\n message: Encrypted message in bytes\n \n Returns:\n Message decrypted \n\"\"\"\ndef decryptMessageWithKeyAES(key, nonce, message):\n\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce))\n decryptor = cipher.decryptor()\n\n return decryptor.update(message) + decryptor.finalize()\n","sub_path":"app/client/biblioteca/cripto.py","file_name":"cripto.py","file_ext":"py","file_size_in_byte":7024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"172440606","text":"import inspect\n\nimport numpy as np\nimport pytest\n\nfrom tensorflow.keras import Model\n\nfrom scikeras.wrappers import BaseWrapper, KerasClassifier, KerasRegressor\n\nfrom .mlp_models import dynamic_classifier, dynamic_regressor\n\n\nkeras_classifier_base_meta_set = {\n \"X_dtype_\",\n \"y_dtype_\",\n \"classes_\",\n \"target_type_\",\n \"n_classes_\",\n \"n_features_in_\",\n \"X_shape_\",\n \"n_outputs_expected_\",\n \"y_ndim_\",\n \"n_outputs_\",\n \"feature_encoder_\",\n \"target_encoder_\",\n}\n\nkeras_regressor_base_meta_set = {\n \"X_shape_\",\n \"n_outputs_expected_\",\n \"X_dtype_\",\n \"n_outputs_\",\n \"y_dtype_\",\n \"y_ndim_\",\n \"n_features_in_\",\n \"target_type_\",\n \"feature_encoder_\",\n \"target_encoder_\",\n}\n\n\n@pytest.mark.parametrize(\n \"wrapper, builder, expected_meta\",\n [\n (KerasClassifier, dynamic_classifier, keras_classifier_base_meta_set,),\n (KerasRegressor, dynamic_regressor, keras_regressor_base_meta_set,),\n ],\n)\ndef test_routing_basic(wrapper, builder, expected_meta):\n n, d = 20, 3\n n_classes = 3\n X = np.random.uniform(size=(n, d)).astype(float)\n y = np.random.choice(n_classes, size=n).astype(int)\n\n foo_val = object()\n\n def build_fn(hidden_layer_sizes, foo, compile_kwargs, params, meta):\n assert set(params.keys()) == set(est.get_params().keys())\n assert set(meta.keys()) == expected_meta\n assert set(compile_kwargs.keys()).issubset(wrapper._compile_kwargs)\n assert foo is foo_val\n return builder(\n hidden_layer_sizes=hidden_layer_sizes,\n compile_kwargs=compile_kwargs,\n meta=meta,\n )\n\n est = wrapper(model=build_fn, model__hidden_layer_sizes=(100,), model__foo=foo_val)\n est.fit(X, y)\n\n est = wrapper(model=build_fn, model__hidden_layer_sizes=(100,), foo=foo_val)\n est.fit(X, y)\n\n\n@pytest.mark.parametrize(\n \"wrapper, builder, expected_meta\",\n [\n (KerasClassifier, dynamic_classifier, keras_classifier_base_meta_set,),\n (KerasRegressor, dynamic_regressor, keras_regressor_base_meta_set,),\n ],\n)\ndef test_routing_kwargs(wrapper, builder, expected_meta):\n \"\"\"Tests that special parameters are passed if\n build_fn accepts kwargs.\n \"\"\"\n n, d = 20, 3\n n_classes = 3\n X = np.random.uniform(size=(n, d)).astype(float)\n y = np.random.choice(n_classes, size=n).astype(int)\n\n def build_fn(*args, **kwargs):\n assert len(args) == 0, \"No *args should be passed to `build_fn`\"\n assert tuple(kwargs.keys()) == (\n \"hidden_layer_sizes\",\n \"meta\",\n \"compile_kwargs\",\n \"params\",\n ), \"The number and order of **kwargs passed to `build_fn` should be fixed\"\n assert set(kwargs[\"meta\"].keys()) == expected_meta\n assert set(kwargs[\"compile_kwargs\"].keys()).issubset(wrapper._compile_kwargs)\n kwargs.pop(\"params\") # dynamic_classifier/regressor don't accept it\n return builder(*args, **kwargs)\n\n est = wrapper(model=build_fn, model__hidden_layer_sizes=(100,))\n est.fit(X, y)\n\n\n@pytest.mark.parametrize(\"dest\", [\"fit\", \"compile\", \"predict\"])\ndef test_routing_sets(dest):\n accepted_params = set(inspect.signature(getattr(Model, dest)).parameters.keys()) - {\n \"self\",\n \"kwargs\",\n }\n known_params = getattr(BaseWrapper, f\"_{dest}_kwargs\")\n assert known_params.issubset(accepted_params)\n\n\ndef test_routed_unrouted_equivalence():\n \"\"\"Test that `hidden_layer_sizes` and `model__hidden_layer_sizes`\n both work.\n \"\"\"\n n, d = 20, 3\n n_classes = 3\n X = np.random.uniform(size=(n, d)).astype(float)\n y = np.random.choice(n_classes, size=n).astype(int)\n\n clf = KerasClassifier(model=dynamic_classifier, model__hidden_layer_sizes=(100,))\n clf.fit(X, y)\n\n clf = KerasClassifier(model=dynamic_classifier, hidden_layer_sizes=(100,))\n clf.fit(X, y)\n","sub_path":"tests/test_param_routing.py","file_name":"test_param_routing.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"13200998","text":"# import time\n# import serial\nimport numpy as np\nfrom pytweening import easeInOutQuint, easeOutSine\nfrom scipy.misc import derivative\nfrom scipy.interpolate import interp1d\nfrom raspberryturk.embedded.motion.arm_movement_engine import ArmMovementEngine\nfrom pypose.ax12 import *\nfrom pypose.driver import Driver\n\nSERVO_1 = 16\nSERVO_2 = 10\nSERVO_3 = 15 # combined with SERVO_2\nSERVO_4 = 2\nSERVO_5 = 17 # combined with SERVO_4\nSERVO_6 = 11\n# 这里可以打包两组电机\nSERVOS = [SERVO_6, SERVO_4, SERVO_2, SERVO_1]\nMIN_SPEED = 20\nMAX_SPEED = 80\nRESTING_POSITION = (512, 512)\n\n# 输入一个list,返回list的第一位,和第二位二进制向左移8个0后对应的10进制的数\ndef _register_bytes_to_value(register_bytes):\n return register_bytes[0] + (register_bytes[1] << 8)\n\n\ndef _easing_derivative(p):\n d = 0.0\n try:\n d = derivative(easeInOutQuint(p), p, dx=1e-6)\n except ValueError:\n pass\n return d\n\n\ndef _adjusted_speed(start_position, goal_position, position):\n r = np.array([start_position, goal_position])\n clipped_position = np.clip(position, r.min(), r.max())\n f = interp1d(r, [0, 1])\n adj = _easing_derivative(f(clipped_position)) / _easing_derivative(0.5)\n amp = easeOutSine(abs(goal_position - +start_position) / 1023.0)\n return np.int_(MIN_SPEED + (MAX_SPEED - MIN_SPEED) * adj * amp)\n\n\nclass Arm(object):\n def __init__(self, port=\"/dev/ttyUSB0\"):\n self.driver = Driver(port=port)\n self.movement_engine = ArmMovementEngine()\n\n def close(self):\n self.driver.close() # 也不知道该咋改\n\n def recenter(self):\n self.move((512, 512))\n\n def return_to_rest(self): # position where dead pieces rest ?\n self.move_to_point([20, 13.5])\n\n def return_to_rest_new(self): # position where dead pieces rest ?\n self.move_new((512, 512, 512, 512))\n\n def move_new(self, goal_position):\n # start_position = self.current_position()\n self.set_speed([MIN_SPEED, MIN_SPEED])\n for i in SERVOS: # 遍历电机运动,打包两对电机 QAQ注意舵盘反向问题,改[i % 2]这个\n if i == SERVO_2:\n self.driver.syncWrite(P_GOAL_POSITION_L, [[SERVO_2, goal_position[1] % 256, goal_position[1] >> 8],\n [SERVO_3, goal_position[1] % 256, goal_position[1] >> 8]])\n elif i == SERVO_4:\n self.driver.syncWrite(P_GOAL_POSITION_L, [[SERVO_4, goal_position[2] % 256, goal_position[2] >> 8],\n [SERVO_5, goal_position[2] % 256, goal_position[2] >> 8]])\n elif i == SERVO_1:\n self.driver.setReg(i, P_GOAL_POSITION_L, [goal_position[0] % 256, goal_position[0] >> 8])\n elif i == SERVO_6:\n self.driver.setReg(i, P_GOAL_POSITION_L, [goal_position[3] % 256, goal_position[3] >> 8])\n '''while self._is_moving(): # 控制运动速度变化\n position = self.current_position()\n speed = [_adjusted_speed(start_position[i % 2], goal_position[i % 2], position[i % 2]) for i in SERVOS]\n self.set_speed(speed)'''\n\n def move(self, goal_position):\n start_position = self.current_position()\n self.set_speed([MIN_SPEED, MIN_SPEED]) # input 2 MIN_SPEED here ?\n # 根据坐标旋转底部电机对准角度\n # self.driver.setReg(1,P)\n\n # self.driver.setReg(2,P)\n # self.driver.setReg(3,P)\n\n # self.driver.setReg(4,P)\n # self.driver.setReg(5,P)\n\n # self.driver.setReg(6,P)\n # 保持执行器末端z轴不变运动到棋子上方\n for i in SERVOS: # 遍历电机运动,这里需要打包两对电机? 目前只能动俩个(组)舵机\n if i == SERVO_2:\n self.driver.setReg(i, P_GOAL_POSITION_L, [goal_position[i % 2] % 256, goal_position[i % 2] >> 8])\n self.driver.setReg(SERVO_3, P_GOAL_POSITION_L, [goal_position[i % 2] % 256, goal_position[i % 2] >> 8])\n # 需要反向\n elif i == SERVO_4:\n self.driver.setReg(i, P_GOAL_POSITION_L, [goal_position[i % 2] % 256, goal_position[i % 2] >> 8])\n self.driver.setReg(SERVO_5, P_GOAL_POSITION_L, [goal_position[i % 2] % 256, goal_position[i % 2] >> 8])\n # 需要反向\n else:\n self.driver.setReg(i, P_GOAL_POSITION_L, [goal_position[i % 2] % 256, goal_position[i % 2] >> 8])\n while self._is_moving(): # 控制运动速度变化\n position = self.current_position()\n speed = [_adjusted_speed(start_position[i % 2], goal_position[i % 2], position[i % 2]) for i in SERVOS]\n self.set_speed(speed)\n\n def move_to_point_new(self, pt, piece_type): # 将(x,y)+ piece_type 转化为关节的角度\n goal_position = self.movement_engine.convert_point_new(pt, piece_type) # 在此改逆运动学\n self.move_new(goal_position)\n\n def move_to_point(self, pt): # 在二维坐标系中,将(x,y)转化为俩个关节的角度\n goal_position = self.movement_engine.convert_point(pt) # 在此改逆运动学\n self.move(goal_position)\n\n def set_speed(self, speed): # 目前只能动俩个(组)舵机,但不必用到\n for i in SERVOS:\n self.driver.setReg(i, P_GOAL_SPEED_L, [speed[i % 2] % 256, speed[i % 2] >> 8])\n\n def current_position(self):\n return self._values_for_register(P_PRESENT_POSITION_L)\n\n def _is_moving(self):\n return any([self.driver.getReg(index, P_MOVING, 1) == 1 for index in SERVOS])\n\n def _values_for_register(self, register):\n return [_register_bytes_to_value(self.driver.getReg(index, register, 2)) for index in SERVOS]\n\n\ndef main():\n arm = Arm(port='COM3')\n arm.move(512)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"raspberryturk/embedded/motion/arm.py","file_name":"arm.py","file_ext":"py","file_size_in_byte":5875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"333145021","text":"import logging\n\nfrom restless.dj import DjangoResource\nfrom restless.preparers import FieldsPreparer\nfrom restless.exceptions import BadRequest\n\nfrom django.db import IntegrityError\n\nfrom postmon.responser import PostmonResponse\n\nfrom zipcodes.models import ZipCode\n\n# Get an instance os a logger\nlogger = logging.getLogger(__name__)\n\n\nclass ZipCodeResource(DjangoResource):\n\n preparer = FieldsPreparer(fields={\n 'address': 'address',\n 'neighborhood': 'neighborhood',\n 'city': 'city',\n 'state': 'state',\n 'zip_code': 'zip_code',\n })\n\n def is_authenticated(self):\n return True\n\n def list(self):\n if not self.request.GET.get('limit'):\n logger.info('[GET] List zip codes')\n return ZipCode.objects.all()\n\n limit = int(self.request.GET.get('limit'))\n logger.info('[GET] List zip codes limited: %s' % limit)\n return ZipCode.objects.all()[:limit]\n\n def create(self):\n zip_code = self.data.get('zip_code')\n try:\n postmon = PostmonResponse(zip_code)\n except:\n logger.error(\n '[Error] Incorrect zip code format: %s' % zip_code)\n raise BadRequest('Incorrect zip code format')\n\n response = postmon.response()\n try:\n created = ZipCode.objects.create(\n address=response['logradouro'],\n neighborhood=response['bairro'],\n city=response['cidade'],\n state=response['estado'],\n zip_code=response['cep']\n )\n logger.info('[API] New zip code created: %s' % zip_code)\n return created\n except KeyError:\n created = ZipCode.objects.create(\n city=response['cidade'],\n state=response['estado'],\n zip_code=response['cep']\n )\n logger.info('[API] New zip code created: %s' % zip_code)\n return created\n except IntegrityError:\n logger.error('[Error] Zip code already created: %s' % zip_code)\n raise BadRequest('Zip code already created')\n\n def delete(self, pk):\n logger.info('[API] Delete zip code: %s' % pk)\n ZipCode.objects.get(zip_code=pk).delete()\n\n def detail(self, pk):\n logger.info('[API] Get zip code: %s' % pk)\n return ZipCode.objects.get(zip_code=pk)\n","sub_path":"zipcodes/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"228301998","text":"# im so hungry right now -maha 2015-10-02 14:44\nimport os.path\nimport json\nimport codecs\nimport re\nimport subprocess\ntry:\n from git import Repo\nexcept OSError:\n print(\"Git module for python not installed. Installing now...\")\n subprocess.check_output(\"pip install gitpython\")\n# from pprint import pprint\n\n\ndef getCustomers(systemsFileName):\n if os.path.isfile(systemsFileName) is False:\n print('Creating ', systemsFileName, ' since it didnt exist')\n\n emptyCustTemplate = {\"customers\": [], \"type\": \"global\"}\n with open(systemsFileName, \"w\") as outfile:\n json.dump(emptyCustTemplate, outfile, indent=4)\n newGlobalJson = json.load(codecs.open(systemsFileName, 'r', 'utf-8'))\n return newGlobalJson\n else:\n globalJsonData = json.load(codecs.open(systemsFileName, 'r', 'utf-8'))\n return globalJsonData\n\n\ndef getNewCustname():\n\n newCustname = input('Name: ')\n # cant have duplicate names\n for customer in globalCustomerData['customers']:\n if customer['info']['name'].lower() == newCustname.lower():\n print(newCustname, \"already exists... try again\")\n getNewCustname()\n return newCustname.title()\n\n\ndef getNewCustIP():\n newCustIP = input('IP: ')\n regexPatternIP = re.compile('^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$')\n validIP = regexPatternIP.match(newCustIP)\n # cant have duplicate IP - or can we???? Commented for now because\n # you probably want to create several customers with the same ip\n # for customer in globalCustomerData['customers']:\n # if customer['info']['ip'] == newCustIP:\n # print(newCustIP, \"already exists... try again\")\n # getNewCustIP()\n if validIP is None:\n print(\"Wrong format\")\n getNewCustIP()\n return newCustIP\n\n\ndef getNewCIVersion():\n newCIVersion = input(\"CI version: (Leave empty if you want it to prompt each time) ex. 3.2.1: \")\n if newCIVersion == '':\n newCIVersion = 'select'\n return newCIVersion\n\n\ndef getAgentVersion():\n newAgentVersion = input(\"Agent version: (Leave empty if you want it to prompt each time) ex. 33445: \")\n if newAgentVersion == '':\n newAgentVersion = 'select'\n return newAgentVersion\n\n\ndef getCoachVersion():\n newCoachVersion = input(\"Coach version: (Leave empty if you want it to prompt each time) ex. 33445: \")\n if newCoachVersion == '':\n newCoachVersion = 'select'\n return newCoachVersion\n\n\ndef getPopRmi():\n newPopRMI = input(\"Do you want to pop rmi each time: y/n: \")\n if newPopRMI.lower() != \"y\":\n return False\n return True\n\n\ndef highestID():\n globalJsonData = json.load(codecs.open('globalSystems.json', 'r', 'utf-8'))\n ids = []\n for customer in globalJsonData['customers']:\n ids.append(int(customer['id']))\n if ids:\n return(max(ids))\n return 0\n\n\ndef addGlobalCustomerToJSon(name, ip, cIVersion, agentVersion, coachVersion, poprmi, currentID):\n newValues = {\"id\": currentID,\n \"info\":\n {\"name\": name,\n \"ip\": ip,\n \"pop-rmi\": poprmi,\n \"clearinteract-version\": cIVersion,\n \"agent-version\": agentVersion,\n \"coach-version\": coachVersion\n }\n }\n with open('globalSystems.json') as outfile:\n data = json.load(outfile)\n data['customers'].append(newValues)\n with open('globalSystems.json', 'w') as outfile:\n json.dump(data, outfile, indent=4)\n\n#\n# Actual program\n#\n\n# TODO: create method for checking for a settings.json -- also needed when creating local customers\n# something like this \"if os.path.isfile(settingsFileName) is False:\"\n# TODO: we should do git pull before this -- if the user doesnt have git it should say so in settings.json\nglobalCustomerData = getCustomers('globalSystems.json')\nname = getNewCustname()\nip = getNewCustIP()\ncIVersion = getNewCIVersion()\nagentVersion = getAgentVersion()\ncoachVersion = getCoachVersion()\npoprmi = getPopRmi()\ncurrentID = highestID() + 1\n# TODO: the user might want to submit more parameters, we should input these in csv/array to addGlobalCustomerToJSon.\naddGlobalCustomerToJSon(name, ip, cIVersion, agentVersion, coachVersion, poprmi, currentID)\n# TODO: Commit globalSystems.json here\n\n# TODO: call the updateAndCreateGlobal.py here\n","sub_path":"Config/createGlobalCustomer.py","file_name":"createGlobalCustomer.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"616158846","text":"import os\nimport copy\nimport time\nimport pickle\nimport inspect\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom scipy import stats, interpolate, signal, fftpack\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom .tools import *\n\nclass Vm:\n def __init__(self, main):\n '''Analyze Vm results.'''\n self.main = main\n self.vmcorr = None\n\n def plotvm(self, df, pops=[0, 1, 2, 3], ylim=(-80., -40.),\n plotn=4, figsize=(10, 10), t_len=200.):\n t0 = time.time()\n # t_min = df.time.min()\n fig, axs = plt.subplots(plotn, 1, figsize=figsize,\n sharex=True, sharey=True)\n ts = None\n for pop in pops:\n if pop >= plotn: continue\n df_pop = df[df.population == pop]\n if len(df_pop) == 0:\n continue\n arr = []\n for id in sorted(list(set(df_pop.id))):\n arr.append(\n df_pop[(df_pop.id==id)&(df_pop.time<=2*t_len)]['Vm'].values)\n if ts is None:\n ts = df_pop[(df_pop.id==id)&\n (df_pop.time<=2*t_len)]['time'].values\n axs[pop].plot(\n ts, np.array(arr).T,\n color=self.main.pdict['colors']['default'][pop])\n axs[pop].set_ylim(ylim)\n axs[pop].set_xlim((t_len, 2*t_len))\n\n plt.tight_layout()\n plt.savefig(os.path.join(self.main.path,\n '{}{}.png'.format(inspect.stack()[0][3], self.main.pdict['suffix'])))\n plt.close()\n\n print('plotvm() running time = {:.3f} s'.format(time.time()-t0))\n\n def plotvmfft(self, df, ai, pops=[0, 1, 2, 3], figsize=(8, 6),\n xlim=(-1., 100.), ylim=(-100., 50000.)):\n t0 = time.time()\n fig, axs = plt.subplots(1, 1, figsize=figsize)\n starts = np.arange(ai['start'], ai['end'], ai['len_seg'])\n ends = starts + ai['len_seg']\n spectrum_all = []\n for pop in pops:\n spectrums = []\n df_pop = df[df.population == pop]\n if len(df_pop) == 0:\n continue\n for id in sorted(list(set(df_pop.id))):\n arr_tV = df_pop[df_pop.id == id][['time', 'Vm']].values\n for l, (start, end) in enumerate(zip(starts, ends)):\n Vs_seg = arr_tV[(arr_tV[:, 0]>=start)&\n (arr_tV[:, 0]=start)&(vs_sample=-range_dt)&(dts<=range_dt)]})\n df_conn['source'] = pre_key\n df_conn['target'] = post_key\n # print('pre_key, post_key:')\n # print(pre_key, post_key)\n for k in range(n_pair):\n id1, id2 = nid_lists[i][k], nid_lists[j][k+n_pair]\n df1, df2 = df[df['id']==id1], df[df['id']==id2]\n Vm1 = df1[(df1['time']>=start)&(df1['time']=start)&(df2['time']=-range_dt)&(dts<=range_dt)]\n # self.vmcorr[pre_key][post_key][str(k)] = correls\n\n if isinstance(self.vmcorr, pd.DataFrame):\n self.vmcorr = pd.concat(\n [self.vmcorr, df_conn], ignore_index=True)\n else:\n self.vmcorr = copy.deepcopy(df_conn)\n self.main.rdict['results_vmc'] = copy.deepcopy(self.vmcorr)\n\n def do_Vm_(self, ai, plots=['corr']):\n df = self.main.df['voltmeter']\n if 'corr' in plots:\n self.calc_vmcorr_(df, ai['start'], ai['end'])\n # self.plotvmcorr_(self.vmcorr)\n\n def do_Vm(self, ai, plots=['corr']):\n df = self.main.df['voltmeter']\n if 'vm' in plots:\n self.plotvm(df)\n if 'corr' in plots:\n self.calc_vmcorr(df, ai)\n self.plotvmcorr(self.vmcorr['dts'], self.vmcorr['correls'])\n if 'fft' in plots:\n self.plotvmfft(df, ai)\n","sub_path":"analysis/vm.py","file_name":"vm.py","file_ext":"py","file_size_in_byte":7630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"464064812","text":"# Copyright 2020-present NAVER Corp. Under BSD 3-clause license\n\n\"\"\"\nKapture to openmvg export functions.\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport os.path as path\nfrom typing import Dict, List, Optional, Union\nimport quaternion\nimport numpy as np\nfrom tqdm import tqdm\n# kapture\nimport kapture\nimport kapture.io.csv\nfrom kapture.io.binary import TransferAction, transfer_files_from_dir, array_to_file\nfrom kapture.io.records import get_image_fullpath\nfrom kapture.io.features import keypoints_to_filepaths, image_keypoints_from_file, get_keypoints_fullpath\nfrom kapture.io.features import descriptors_to_filepaths, image_descriptors_from_file\nfrom kapture.io.features import matches_to_filepaths, image_matches_from_file\nimport kapture.io.structure\nfrom kapture.core.Trajectories import rigs_remove_inplace\nfrom kapture.utils.paths import safe_remove_file, safe_remove_any_path\n# local\nfrom .openmvg_commons import JSON_KEY, OPENMVG_SFM_DATA_VERSION_NUMBER, OPENMVG_DEFAULT_JSON_FILE_NAME\nfrom .openmvg_commons import CameraModel\n\nlogger = logging.getLogger('openmvg') # Using global openmvg logger\n\nNEW_ID_MASK = 1 << 31 # 10000000 00000000 00000000 00000000\nDEFAULT_FOCAL_LENGTH_FACTOR = 1.2\n\n\nclass CerealPointerRegistry:\n NEW_ID_MASK = 1 << 31 # 10000000 00000000 00000000 00000000\n NULL_ID = 1 << 30 # 01000000 00000000 00000000 00000000\n\n def __init__(self, value_key, id_key):\n self._value_key = value_key\n self._id_key = id_key\n self._ids = {}\n self._id_current = 1\n\n def get_ids_dict(self, value):\n \"\"\" :return either id if known or id + name if new \"\"\"\n if isinstance(value, dict) or value not in self._ids:\n # if this is the first time or a dict\n new_id = self._id_current\n if not isinstance(value, dict):\n self._ids[value] = new_id\n self._id_current += 1\n return {\n self._id_key: new_id | NEW_ID_MASK,\n self._value_key: value,\n }\n else:\n return {\n self._id_key: self._ids[value]\n }\n\n\ndef _get_data(camera_params: list) -> Dict:\n # w, h, f, cx, cy\n data: Dict[str, Union[int, float, List[float]]] = {JSON_KEY.WIDTH: int(camera_params[0]),\n JSON_KEY.HEIGHT: int(camera_params[1]),\n JSON_KEY.FOCAL_LENGTH: float(camera_params[2]),\n JSON_KEY.PRINCIPAL_POINT: [float(camera_params[3]),\n float(camera_params[4])]}\n return data\n\n\ndef _get_intrinsic_pinhole(camera_params: list) -> Dict:\n # w, h, f, cx, cy\n return _get_data(camera_params)\n\n\ndef _get_intrinsic_pinhole_radial_k1(camera_params: list) -> Dict:\n data = _get_data(camera_params)\n # w, h, f, cx, cy, k\n data[JSON_KEY.DISTO_K1] = [float(camera_params[5])]\n return data\n\n\ndef _get_intrinsic_pinhole_radial_k3(camera_params: list) -> Dict:\n data = _get_data(camera_params)\n # w, h, f, cx, cy, k1, k2, k3\n data[JSON_KEY.DISTO_K3] = [float(camera_params[5]), float(camera_params[6]), float(camera_params[7])]\n return data\n\n\ndef _get_intrinsic_pinhole_brown_t2(camera_params: list) -> Dict:\n # w, h, f, cx, cy, k1, k2, k3, t1, t2\n disto_t2 = [float(camera_params[5]), float(camera_params[6]), float(camera_params[7]),\n float(camera_params[8]), float(camera_params[9])]\n return {JSON_KEY.VALUE0: _get_data(camera_params), JSON_KEY.DISTO_T2: disto_t2}\n\n\ndef _get_intrinsic_fisheye(camera_params: list) -> Dict:\n # w, h, f, cx, cy, k1, k2, k3, k4\n fisheye = [float(camera_params[5]), float(camera_params[6]), float(camera_params[7]), float(camera_params[8])]\n return {JSON_KEY.VALUE0: _get_data(camera_params),\n JSON_KEY.FISHEYE: fisheye}\n\n\ndef get_openmvg_camera_id(kapture_camera_id, kapture_to_openmvg_cam_ids):\n \"\"\" return a valid openmvg camera id for the given kapture on.\n It keeps kapture_to_openmvg_cam_ids uptodate, and ensure there is no collision.\n \"\"\"\n\n if kapture_camera_id in kapture_to_openmvg_cam_ids:\n # already defined\n return kapture_to_openmvg_cam_ids[kapture_camera_id]\n\n # its not defined, then, make up one\n last_known_openmvg_camera_id = max(list(kapture_to_openmvg_cam_ids.values()) + [-1])\n assert type(last_known_openmvg_camera_id) == int\n openmvg_camera_id = last_known_openmvg_camera_id + 1\n assert openmvg_camera_id not in kapture_to_openmvg_cam_ids\n kapture_to_openmvg_cam_ids[kapture_camera_id] = openmvg_camera_id\n return openmvg_camera_id\n\n\ndef get_openmvg_image_path(kapture_image_name: str, flatten_path: bool = False):\n \"\"\" the openmvg image sub path corresponding to the given kapture one. \"\"\"\n return kapture_image_name if not flatten_path else kapture_image_name.replace('/', '_')\n\n\ndef export_openmvg_intrinsics(\n kapture_cameras,\n kapture_to_openmvg_cam_ids: Dict[str, int],\n polymorphic_registry: CerealPointerRegistry,\n ptr_wrapper_registry: CerealPointerRegistry,\n):\n \"\"\"\n Exports the given kapture cameras to the openMVG sfm_data structure.\n In openMVGm, cameras are referred as Intrinsics camera internal parameters.\n\n :param kapture_cameras: input kapture cameras to be exported (even if not used).\n :param kapture_to_openmvg_cam_ids: input/output dict that maps kapture camera ids to openMVG camera ids.\n :param polymorphic_registry: input/output polymorphic IDs status\n :param ptr_wrapper_registry: input/output polymorphic IDs status\n :return:\n \"\"\"\n openmvg_intrinsics = []\n # kapture_to_openmvg_cam_ids = {}\n # process all cameras\n for kapture_cam_id, kapture_camera in kapture_cameras.items():\n openmvg_camera_id = get_openmvg_camera_id(kapture_cam_id, kapture_to_openmvg_cam_ids)\n kapture_cam_type = kapture_camera.camera_type\n kapture_camera_params = kapture_camera.camera_params\n if kapture_cam_type == kapture.CameraType.SIMPLE_PINHOLE:\n # w, h, f, cx, cy\n opnmvg_cam_type = CameraModel.pinhole\n data = _get_intrinsic_pinhole(kapture_camera_params)\n elif kapture_cam_type == kapture.CameraType.PINHOLE:\n # w, h, f, cx, cy\n opnmvg_cam_type = CameraModel.pinhole\n faked_params = [kapture_camera_params[0], kapture_camera_params[1], # width height\n (kapture_camera_params[2] + kapture_camera_params[3]) / 2, # fx+fy/2 as f\n kapture_camera_params[4], kapture_camera_params[5]] # cx cy\n data = _get_intrinsic_pinhole(faked_params)\n elif kapture_cam_type == kapture.CameraType.SIMPLE_RADIAL:\n # w, h, f, cx, cy, k\n opnmvg_cam_type = CameraModel.pinhole_radial_k1\n data = _get_intrinsic_pinhole_radial_k1(kapture_camera_params)\n elif kapture_cam_type == kapture.CameraType.RADIAL:\n # w, h, f, cx, cy, k1, k2, k3\n opnmvg_cam_type = CameraModel.pinhole_radial_k3\n faked_params = [kapture_camera_params[0], kapture_camera_params[1], # width height\n kapture_camera_params[2], # f\n kapture_camera_params[3], kapture_camera_params[4], # cx cy\n kapture_camera_params[5], kapture_camera_params[6], 0 # k1, k2, k3\n ]\n data = _get_intrinsic_pinhole_radial_k3(faked_params)\n elif kapture_cam_type == kapture.CameraType.FULL_OPENCV or kapture_cam_type == kapture.CameraType.OPENCV:\n # w, h, f, cx, cy, k1, k2, k3, t1, t2\n opnmvg_cam_type = CameraModel.pinhole_brown_t2\n k3 = kapture_camera_params[10] if len(kapture_camera_params) > 10 else 0\n faked_params = [kapture_camera_params[0], kapture_camera_params[1], # width height\n (kapture_camera_params[2] + kapture_camera_params[3]) / 2, # fx+fy/2 as f\n kapture_camera_params[4], kapture_camera_params[5], # cx cy\n kapture_camera_params[6], kapture_camera_params[7], k3, # k1, k2, k3\n kapture_camera_params[8], kapture_camera_params[9] # p1, p2 (=t1, t2)\n ]\n data = _get_intrinsic_pinhole_brown_t2(faked_params)\n elif kapture_cam_type == kapture.CameraType.OPENCV_FISHEYE:\n logger.warning('OpenCV fisheye model is not compatible with OpenMVG. Forcing distortion to 0')\n # w, h, f, cx, cy, k1, k2, k3, k4\n opnmvg_cam_type = CameraModel.fisheye\n faked_params = [kapture_camera_params[0], kapture_camera_params[1], # width height\n (kapture_camera_params[2] + kapture_camera_params[3]) / 2, # fx+fy/2 as f\n kapture_camera_params[4], kapture_camera_params[5], # cx cy\n 0, 0, # k1, k2\n 0, 0 # k3, k4\n ]\n data = _get_intrinsic_fisheye(faked_params)\n elif kapture_cam_type == kapture.CameraType.RADIAL_FISHEYE or \\\n kapture_cam_type == kapture.CameraType.SIMPLE_RADIAL_FISHEYE:\n logger.warning('OpenCV fisheye model is not compatible with OpenMVG. Forcing distortion to 0')\n # w, h, f, cx, cy, k1, k2, k3, k4\n opnmvg_cam_type = CameraModel.fisheye\n faked_params = [kapture_camera_params[0], kapture_camera_params[1], # width height\n kapture_camera_params[2], # f\n kapture_camera_params[3], kapture_camera_params[4], # cx cy\n 0, 0, # k1, k2\n 0, 0 # k3, k4\n ]\n data = _get_intrinsic_fisheye(faked_params)\n elif kapture_cam_type == kapture.CameraType.UNKNOWN_CAMERA:\n logger.info(f'Camera {kapture_cam_id}: Unknown camera model, using simple radial')\n # Choose simple radial model, to allow openMVG to determine distortion param\n # w, h, f, cx, cy, k\n opnmvg_cam_type = CameraModel.pinhole_radial_k1\n faked_params = [kapture_camera_params[0], kapture_camera_params[1], # width height\n max(kapture_camera_params[0], kapture_camera_params[1]) * DEFAULT_FOCAL_LENGTH_FACTOR,\n # max(w,h)*1.2 as f\n int(kapture_camera_params[0] / 2), int(kapture_camera_params[1] / 2), # cx cy\n 0.0] # k1\n data = _get_intrinsic_pinhole_radial_k1(faked_params)\n else:\n raise ValueError(f'Camera model {kapture_cam_type.value} not supported')\n\n intrinsic = {}\n cam_type_poly_id = polymorphic_registry.get_ids_dict(opnmvg_cam_type.name)\n intrinsic.update(cam_type_poly_id)\n\n # it is assumed that this camera is only encountered once\n # set the first bit of ptr_wrapper_id_current to 1\n data_wrapper = ptr_wrapper_registry.get_ids_dict(data)\n\n intrinsic[JSON_KEY.PTR_WRAPPER] = data_wrapper\n openmvg_intrinsics.append({JSON_KEY.KEY: openmvg_camera_id, JSON_KEY.VALUE: intrinsic})\n\n return openmvg_intrinsics\n\n\ndef export_openmvg_views(\n kapture_cameras: kapture.Sensors,\n kapture_images: kapture.RecordsCamera,\n kapture_trajectories: kapture.Trajectories,\n kapture_to_openmvg_cam_ids: Dict[str, int],\n kapture_to_openmvg_view_ids: Dict[str, int],\n polymorphic_registry: CerealPointerRegistry,\n ptr_wrapper_registry: CerealPointerRegistry,\n image_path_flatten: bool,\n) -> List:\n \"\"\"\n\n :param kapture_cameras:\n :param kapture_images:\n :param kapture_trajectories:\n :param kapture_to_openmvg_cam_ids: input dict that maps kapture camera ids to openMVG camera ids.\n :param kapture_to_openmvg_view_ids: input dict that maps kapture image names to openMVG view ids.\n :param polymorphic_registry: input/output polymorphic IDs status\n :param ptr_wrapper_registry: input/output polymorphic IDs status\n :param image_path_flatten: flatten image path (eg. to avoid image name collision in openMVG regions).\n :return: views to be serialized\n \"\"\"\n views = []\n # process all images\n for timestamp, kapture_cam_id, kapture_image_name in kapture.flatten(kapture_images):\n assert kapture_cam_id in kapture_to_openmvg_cam_ids\n assert kapture_image_name in kapture_to_openmvg_view_ids\n openmvg_cam_id = get_openmvg_camera_id(kapture_cam_id, kapture_to_openmvg_cam_ids)\n openmvg_view_id = kapture_to_openmvg_view_ids[kapture_image_name]\n openmvg_image_filepath = get_openmvg_image_path(kapture_image_name, image_path_flatten)\n openmvg_image_filename = path.basename(openmvg_image_filepath)\n openmvg_image_local_path = path.dirname(openmvg_image_filepath)\n kapture_camera_params = kapture_cameras[kapture_cam_id].camera_params\n view_data = {JSON_KEY.LOCAL_PATH: openmvg_image_local_path,\n JSON_KEY.FILENAME: openmvg_image_filename,\n JSON_KEY.WIDTH: int(kapture_camera_params[0]),\n JSON_KEY.HEIGHT: int(kapture_camera_params[1]),\n JSON_KEY.ID_VIEW: openmvg_view_id,\n JSON_KEY.ID_INTRINSIC: openmvg_cam_id,\n JSON_KEY.ID_POSE: openmvg_view_id}\n\n view = {}\n # retrieve image pose from trajectories\n if timestamp not in kapture_trajectories:\n view[JSON_KEY.POLYMORPHIC_ID] = CerealPointerRegistry.NULL_ID\n else:\n # there is a pose for that timestamp\n # The poses are stored both as priors (in the 'views' table) and as known poses (in the 'extrinsics' table)\n assert kapture_cam_id in kapture_trajectories[timestamp]\n view_priors_id = polymorphic_registry.get_ids_dict(JSON_KEY.VIEW_PRIORS)\n view.update(view_priors_id)\n\n pose_tr = kapture_trajectories[timestamp].get(kapture_cam_id)\n prior_q = pose_tr.r\n prior_t = pose_tr.inverse().t_raw\n pose_data = {JSON_KEY.CENTER: prior_t,\n JSON_KEY.ROTATION: quaternion.as_rotation_matrix(prior_q).tolist()}\n\n view_data[JSON_KEY.USE_POSE_CENTER_PRIOR] = True\n view_data[JSON_KEY.CENTER_WEIGHT] = [1.0, 1.0, 1.0]\n view_data[JSON_KEY.CENTER] = prior_t\n view_data[JSON_KEY.USE_POSE_ROTATION_PRIOR] = True\n view_data[JSON_KEY.ROTATION_WEIGHT] = 1.0\n view_data[JSON_KEY.ROTATION] = pose_data[JSON_KEY.ROTATION]\n\n # it is assumed that this view is only encountered once\n view_wrapper = ptr_wrapper_registry.get_ids_dict(view_data)\n view[JSON_KEY.PTR_WRAPPER] = view_wrapper\n views.append({JSON_KEY.KEY: openmvg_view_id, JSON_KEY.VALUE: view})\n\n return views\n\n\ndef export_openmvg_poses(\n kapture_images: kapture.RecordsCamera,\n kapture_trajectories: kapture.Trajectories,\n kapture_to_openmvg_view_ids: Dict[str, int],\n) -> List:\n \"\"\"\n\n :param kapture_images:\n :param kapture_trajectories:\n :param kapture_to_openmvg_view_ids: input dict that maps kapture image ids to openMVG view ids.\n :return: extrinsics to be serialized\n \"\"\"\n extrinsics = []\n # process all images\n for timestamp, kapture_cam_id, kapture_image_name in kapture.flatten(kapture_images):\n assert kapture_image_name in kapture_to_openmvg_view_ids\n openmvg_view_id = kapture_to_openmvg_view_ids[kapture_image_name]\n # retrieve image pose from trajectories\n if timestamp in kapture_trajectories:\n # there is a pose for that timestamp\n # The poses are stored both as priors (in the 'views' table) and as known poses (in the 'extrinsics' table)\n assert kapture_cam_id in kapture_trajectories[timestamp]\n pose_tr = kapture_trajectories[timestamp].get(kapture_cam_id)\n prior_q = pose_tr.r\n prior_t = pose_tr.inverse().t_raw\n pose_data = {JSON_KEY.CENTER: prior_t,\n JSON_KEY.ROTATION: quaternion.as_rotation_matrix(prior_q).tolist()}\n extrinsics.append({JSON_KEY.KEY: openmvg_view_id, JSON_KEY.VALUE: pose_data})\n\n return extrinsics\n\n\ndef export_openmvg_structure(\n kapture_points_3d: kapture.Points3d,\n kapture_to_openmvg_view_ids: Dict[str, int],\n kapture_observations: Optional[kapture.Observations] = None,\n kapture_keypoints: Optional[kapture.Keypoints] = None,\n kapture_path: Optional[str] = None,\n):\n # early check\n if kapture_points_3d is None:\n logger.warning('no 3D points to export.')\n return\n\n xyz_coordinates = kapture_points_3d[:, 0:3]\n include_2d_observations = kapture_observations is not None\n openmvg_structure = []\n # this loop can be very long, lets show some progress\n hide_progress_bars = logger.getEffectiveLevel() > logging.INFO\n\n for point_idx, coords in enumerate(tqdm(xyz_coordinates, disable=hide_progress_bars)):\n point_3d_structure = {\n 'key': point_idx,\n 'value': {\n 'X': coords.tolist(),\n 'observations': []\n }\n }\n if include_2d_observations and point_idx in kapture_observations:\n for kapture_image_name, feature_point_id in kapture_observations[point_idx]:\n openmvg_view_id = kapture_to_openmvg_view_ids[kapture_image_name]\n point_2d_observation = {'key': openmvg_view_id,\n 'value': {'id_feat': feature_point_id, }}\n\n if kapture_path and kapture_keypoints is not None:\n # if given, load keypoints to populate 2D coordinates of the feature.\n keypoints_file_path = get_keypoints_fullpath(kapture_path, kapture_image_name)\n try:\n keypoints_data = image_keypoints_from_file(keypoints_file_path,\n dtype=kapture_keypoints.dtype,\n dsize=kapture_keypoints.dsize)\n point_2d_observation['value']['x'] = keypoints_data[feature_point_id, 0:2].tolist()\n except FileNotFoundError:\n logger.warning(f'unable to load keypoints file {keypoints_file_path}')\n\n point_3d_structure['value']['observations'].append(point_2d_observation)\n\n openmvg_structure.append(point_3d_structure)\n\n return openmvg_structure\n\n\ndef export_openmvg_sfm_data(\n kapture_path: str,\n kapture_data: kapture.Kapture,\n openmvg_sfm_data_file_path: str,\n openmvg_image_root_path: str,\n image_action: TransferAction,\n image_path_flatten: bool,\n force: bool,\n kapture_to_openmvg_view_ids: dict = {}\n) -> Dict:\n \"\"\"\n Convert the kapture data into an openMVG dataset stored as a dictionary.\n The format is defined here:\n https://openmvg.readthedocs.io/en/latest/software/SfM/SfM_OutputFormat/\n\n :param kapture_data: the kapture data\n :param kapture_path: top directory of the kapture data and the images\n :param openmvg_sfm_data_file_path: input path to the SfM data file to be written.\n :param openmvg_image_root_path: input path to openMVG image directory to be created.\n :param image_action: action to apply on images: link, copy, move or do nothing.\n :param image_path_flatten: flatten image path (eg. to avoid image name collision in openMVG regions).\n :param force: if true, will remove existing openMVG data without prompting the user.\n :param kapture_to_openmvg_view_ids: input/output mapping of kapture image name to corresponding openmvg view id.\n :return: an SfM_data, the openmvg structure, stored as a dictionary ready to be serialized\n \"\"\"\n\n if kapture_data.cameras is None or kapture_data.records_camera is None:\n raise ValueError('export_openmvg_sfm_data needs kapture camera and records_camera.')\n\n if image_action == TransferAction.root_link:\n raise NotImplementedError('root link is not implemented, use skip instead.')\n\n # refer to the original image dir when skipping image transfer.\n if image_action == TransferAction.skip:\n openmvg_image_root_path = get_image_fullpath(kapture_path)\n\n if openmvg_image_root_path is None:\n raise ValueError(f'openmvg_image_root_path must be defined to be able to perform {image_action}.')\n\n # make sure directory is ready to contain openmvg_sfm_data_file_path\n os.makedirs(path.dirname(openmvg_sfm_data_file_path), exist_ok=True)\n\n # Check we don't have other sensors defined\n if len(kapture_data.sensors) != len(kapture_data.cameras):\n extra_sensor_number = len(kapture_data.sensors) - len(kapture_data.cameras)\n logger.warning(f'We will ignore {extra_sensor_number} sensors that are not camera')\n\n # openmvg does not support rigs\n if kapture_data.rigs:\n logger.info('remove rigs notation.')\n rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)\n kapture_data.rigs.clear()\n\n # Compute root path and camera used in records\n kapture_to_openmvg_cam_ids = {} # kapture_cam_id -> openmvg_cam_id\n for i, (_, _, kapture_image_name) in enumerate(kapture.flatten(kapture_data.records_camera)):\n if kapture_image_name not in kapture_to_openmvg_view_ids:\n kapture_to_openmvg_view_ids[kapture_image_name] = i\n\n # polymorphic_status = PolymorphicStatus({}, 1, 1)\n polymorphic_registry = CerealPointerRegistry(id_key=JSON_KEY.POLYMORPHIC_ID, value_key=JSON_KEY.POLYMORPHIC_NAME)\n ptr_wrapper_registry = CerealPointerRegistry(id_key=JSON_KEY.ID, value_key=JSON_KEY.DATA)\n\n logger.debug('exporting intrinsics ...')\n openmvg_sfm_data_intrinsics = export_openmvg_intrinsics(\n kapture_cameras=kapture_data.cameras,\n kapture_to_openmvg_cam_ids=kapture_to_openmvg_cam_ids,\n polymorphic_registry=polymorphic_registry,\n ptr_wrapper_registry=ptr_wrapper_registry,\n )\n\n logger.debug('exporting views ...')\n openmvg_sfm_data_views = export_openmvg_views(\n kapture_cameras=kapture_data.cameras,\n kapture_images=kapture_data.records_camera,\n kapture_trajectories=kapture_data.trajectories,\n kapture_to_openmvg_cam_ids=kapture_to_openmvg_cam_ids,\n kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids,\n polymorphic_registry=polymorphic_registry,\n ptr_wrapper_registry=ptr_wrapper_registry,\n image_path_flatten=image_path_flatten,\n )\n\n logger.debug('exporting poses ...')\n openmvg_sfm_data_poses = export_openmvg_poses(\n kapture_images=kapture_data.records_camera,\n kapture_trajectories=kapture_data.trajectories,\n kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids)\n\n # structure : correspond to kapture observations + 3D points\n logger.debug('exporting structure ...')\n openmvg_sfm_data_structure = export_openmvg_structure(\n kapture_points_3d=kapture_data.points3d,\n kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids,\n kapture_observations=kapture_data.observations,\n kapture_keypoints=kapture_data.keypoints,\n kapture_path=kapture_path\n )\n\n openmvg_sfm_data = {\n JSON_KEY.SFM_DATA_VERSION: OPENMVG_SFM_DATA_VERSION_NUMBER,\n JSON_KEY.ROOT_PATH: path.abspath(openmvg_image_root_path),\n JSON_KEY.INTRINSICS: openmvg_sfm_data_intrinsics,\n JSON_KEY.VIEWS: openmvg_sfm_data_views,\n JSON_KEY.EXTRINSICS: openmvg_sfm_data_poses,\n JSON_KEY.STRUCTURE: openmvg_sfm_data_structure,\n JSON_KEY.CONTROL_POINTS: [],\n }\n\n logger.debug(f'Saving to openmvg {openmvg_sfm_data_file_path}...')\n with open(openmvg_sfm_data_file_path, \"w\") as fid:\n json.dump(openmvg_sfm_data, fid, indent=4)\n\n # do the actual image transfer\n if not image_action == TransferAction.skip:\n job_copy = (\n ( # source path -> dest path\n get_image_fullpath(kapture_path, kapture_image_name),\n path.join(openmvg_image_root_path, get_openmvg_image_path(kapture_image_name, image_path_flatten))\n )\n for _, _, kapture_image_name in kapture.flatten(kapture_data.records_camera)\n )\n source_filepath_list, destination_filepath_list = zip(*job_copy)\n transfer_files_from_dir(\n source_filepath_list=source_filepath_list,\n destination_filepath_list=destination_filepath_list,\n copy_strategy=image_action,\n force_overwrite=force\n )\n\n\ndef export_openmvg_regions(\n kapture_path: str,\n kapture_keypoints: Optional[kapture.Keypoints],\n kapture_descriptors: kapture.Descriptors,\n openmvg_regions_dir_path: str,\n image_path_flatten: bool\n):\n \"\"\"\n exports openMVG regions ie keypoints and descriptors.\n\n :param kapture_path: input path to root kapture directory.\n :param kapture_keypoints: input kapture keypoints. Could be None if no keypoints.\n :param kapture_descriptors: input kapture descriptors. Could be None if no descriptors.\n :param openmvg_regions_dir_path: input path to output openMVG regions directory.\n :param image_path_flatten: if true, it means that image path are to be flatten.\n :return:\n \"\"\"\n # early check we should do\n if kapture_keypoints is None or kapture_descriptors is None:\n logger.warning('no keypoints or descriptors to export.')\n return\n\n # make sure output directory is ready\n os.makedirs(openmvg_regions_dir_path, exist_ok=True)\n\n # only able to export SIFT\n if any([f.type_name.upper() != 'SIFT' for f in [kapture_keypoints, kapture_descriptors]]):\n raise ValueError(f'unable to export other regions than sift '\n f'(got {kapture_keypoints.type_name}/{kapture_descriptors.type_name})')\n\n os.makedirs(openmvg_regions_dir_path, exist_ok=True)\n polymorphic_registry = CerealPointerRegistry(id_key=JSON_KEY.POLYMORPHIC_ID, value_key=JSON_KEY.POLYMORPHIC_NAME)\n # create image_describer.json\n fake_regions_type = {\"ptr_wrapper\": {\"valid\": 1, \"data\": {\"value0\": [], \"value1\": []}}}\n fake_regions_type.update(polymorphic_registry.get_ids_dict('SIFT_Regions'))\n image_describer = {\n 'regions_type': fake_regions_type\n }\n image_describer_file_path = path.join(openmvg_regions_dir_path, 'image_describer.json')\n with open(image_describer_file_path, 'w') as fid:\n json.dump(image_describer, fid, indent=4)\n\n # this loop can be very long, lets show some progress\n hide_progress_bars = logger.getEffectiveLevel() > logging.INFO\n\n # copy keypoints files\n keypoints = keypoints_to_filepaths(kapture_keypoints, kapture_path)\n for kapture_image_name, kapture_keypoint_file_path in tqdm(keypoints.items(), disable=hide_progress_bars):\n openmvg_keypoint_file_name = get_openmvg_image_path(kapture_image_name, image_path_flatten)\n openmvg_keypoint_file_name = path.splitext(path.basename(openmvg_keypoint_file_name))[0] + '.feat'\n openmvg_keypoint_file_path = path.join(openmvg_regions_dir_path, openmvg_keypoint_file_name)\n keypoints_data = image_keypoints_from_file(kapture_keypoint_file_path,\n kapture_keypoints.dtype,\n kapture_keypoints.dsize)\n keypoints_data = keypoints_data[:, 0:4]\n np.savetxt(openmvg_keypoint_file_path, keypoints_data, fmt='%10.5f')\n\n # copy descriptors files\n \"\"\"\n from openMVG regions_factory.hpp\n using SIFT_Regions = Scalar_Regions;\n using AKAZE_Float_Regions = Scalar_Regions;\n using AKAZE_Liop_Regions = Scalar_Regions;\n using AKAZE_Binary_Regions = Binary_Regions;\n \"\"\"\n descriptors = descriptors_to_filepaths(kapture_descriptors, kapture_path)\n for kapture_image_name, kapture_descriptors_file_path in tqdm(descriptors.items(), disable=hide_progress_bars):\n openmvg_descriptors_file_name = get_openmvg_image_path(kapture_image_name, image_path_flatten)\n openmvg_descriptors_file_name = path.splitext(path.basename(openmvg_descriptors_file_name))[0] + '.desc'\n openmvg_descriptors_file_path = path.join(openmvg_regions_dir_path, openmvg_descriptors_file_name)\n kapture_descriptors_data = image_descriptors_from_file(kapture_descriptors_file_path,\n kapture_descriptors.dtype,\n kapture_descriptors.dsize)\n # assign a byte array of [size_t[1] + uint8[nb features x 128]\n size_t_len = 64 // 8\n openmvg_descriptors_data = np.empty(dtype=np.uint8, shape=(kapture_descriptors_data.size + size_t_len,))\n openmvg_descriptors_data[0:size_t_len].view(dtype=np.uint64)[0] = kapture_descriptors_data.shape[0]\n openmvg_descriptors_data[size_t_len:] = kapture_descriptors_data.flatten()\n array_to_file(openmvg_descriptors_file_path, openmvg_descriptors_data)\n\n\ndef export_openmvg_matches(\n kapture_path: str,\n kapture_data: kapture.Kapture,\n openmvg_matches_file_path: str,\n kapture_to_openmvg_view_ids: Dict[str, int]\n):\n if kapture_data.matches is None:\n logger.warning('No matches to be exported.')\n return\n\n if path.splitext(openmvg_matches_file_path)[1] != '.txt':\n logger.warning('Matches are exported as text format, even if file does not ends with .txt.')\n\n # make sure output directory is ready\n os.makedirs(path.dirname(openmvg_matches_file_path), exist_ok=True)\n\n hide_progress_bars = logger.getEffectiveLevel() > logging.INFO\n matches = matches_to_filepaths(kapture_data.matches, kapture_path)\n with open(openmvg_matches_file_path, 'w') as fid:\n for image_pair, kapture_matches_filepath in tqdm(matches.items(), disable=hide_progress_bars):\n # idx image1 idx image 2\n # nb pairs\n # pl1 pr1 pl2 pr2 ...\n i, j = [kapture_to_openmvg_view_ids[image_name] for image_name in image_pair]\n fid.write(f'{i} {j}\\n')\n matches_indices = image_matches_from_file(kapture_matches_filepath)[:, 0:2].astype(int)\n fid.write(f'{matches_indices.shape[0]}\\n')\n for indices_pair in matches_indices:\n fid.write(f'{indices_pair[0]} {indices_pair[1]}\\n')\n\n\ndef export_openmvg(\n kapture_path: str,\n openmvg_sfm_data_file_path: str,\n openmvg_image_root_path: str = None,\n openmvg_regions_dir_path: str = None,\n openmvg_matches_file_path: str = None,\n image_action: TransferAction = TransferAction.skip,\n image_path_flatten: bool = False,\n force: bool = False\n) -> None:\n \"\"\"\n Export the kapture data to an openMVG files.\n If the openmvg_path is a directory, it will create a JSON file (using the default name sfm_data.json)\n in that directory.\n\n :param kapture_path: full path to input kapture directory\n :param openmvg_sfm_data_file_path: input path to the SfM data file to be written.\n :param openmvg_image_root_path: optional input path to openMVG image directory to be created.\n :param openmvg_regions_dir_path: optional input path to openMVG regions (feat, desc) directory to be created.\n :param openmvg_matches_file_path: optional input path to openMVG matches file to be created.\n :param image_action: an action to apply on the images: relative linking, absolute linking, copy or move. Or top\n directory linking or skip to do nothing. If not \"skip\" equires openmvg_image_root_path to be defined.\n :param image_path_flatten: flatten image path (eg. to avoid image name collision in openMVG regions).\n :param force: if true, will remove existing openMVG data without prompting the user.\n \"\"\"\n\n if any(arg is not None and not isinstance(arg, str)\n for arg in [kapture_path, openmvg_image_root_path, openmvg_regions_dir_path, openmvg_matches_file_path]\n ):\n raise ValueError('expect str (or None) as path argument.')\n\n # clean before export\n safe_remove_file(openmvg_sfm_data_file_path, force)\n if path.exists(openmvg_sfm_data_file_path):\n raise ValueError(f'{openmvg_sfm_data_file_path} file already exist')\n\n # load kapture\n logger.info(f'loading kapture {kapture_path}...')\n kapture_data = kapture.io.csv.kapture_from_dir(kapture_path)\n if kapture_data is None or not isinstance(kapture_data, kapture.Kapture):\n raise ValueError(f'unable to load kapture from {kapture_path}')\n kapture_to_openmvg_view_ids = {}\n\n logger.info(f'exporting sfm data to {openmvg_sfm_data_file_path} ...')\n export_openmvg_sfm_data(\n kapture_data=kapture_data,\n kapture_path=kapture_path,\n openmvg_sfm_data_file_path=openmvg_sfm_data_file_path,\n openmvg_image_root_path=openmvg_image_root_path,\n image_action=image_action,\n image_path_flatten=image_path_flatten,\n force=force,\n kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids)\n\n if openmvg_regions_dir_path is not None:\n try:\n logger.info(f'exporting regions to {openmvg_regions_dir_path} ...')\n export_openmvg_regions(\n kapture_path=kapture_path,\n kapture_keypoints=kapture_data.keypoints,\n kapture_descriptors=kapture_data.descriptors,\n openmvg_regions_dir_path=openmvg_regions_dir_path,\n image_path_flatten=image_path_flatten\n )\n except ValueError as e:\n logger.error(e)\n\n if openmvg_matches_file_path is not None:\n try:\n logger.info(f'exporting matches to {openmvg_matches_file_path} ...')\n export_openmvg_matches(\n kapture_path=kapture_path,\n kapture_data=kapture_data,\n openmvg_matches_file_path=openmvg_matches_file_path,\n kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids\n )\n except ValueError as e:\n logger.error(e)\n","sub_path":"kapture/converter/openmvg/export_openmvg.py","file_name":"export_openmvg.py","file_ext":"py","file_size_in_byte":34702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"40446848","text":"#!/usr/bin/env python\n\nimport os\nimport setuptools\nimport versioneer\n\ninstall_requires = [\n line.rstrip() for line in open(os.path.join(os.path.dirname(__file__), \"REQUIREMENTS.txt\"))\n]\n\nsetuptools.setup(\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n extras_require={\n 'napari': ['napari==0.0.6'],\n },\n entry_points={\n 'console_scripts': [\n \"starfish=starfish.starfish:starfish\",\n ]\n },\n include_package_data=True,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"26200634","text":"import logging\nimport urllib.request\nfrom urllib.error import URLError\nimport json\nfrom pydblite.pydblite import Base\n\n\nlogger = logging.getLogger()\n\n\nclass AmazonProfile(object):\n def __init__(self, access_token=None):\n if access_token is not None:\n self._profile = self.get_profile(access_token)\n else:\n self._profile = None\n\n def get_profile(self, access_token):\n url = \"https://api.amazon.com/user/profile?access_token={}\".format(access_token)\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n try:\n req = urllib.request.Request(url,headers=headers)\n with urllib.request.urlopen(req) as response:\n data = response.read()\n encoding = response.info().get_content_charset('utf-8')\n self._profile = json.loads(data.decode(encoding))\n logger.critical('amazon_profile found {}'.format(self._profile))\n except URLError as e:\n self._profile = None\n if hasattr(e, 'reason'):\n logger.critical('We failed to reach a server.')\n logger.critical('Reason: ', e.reason)\n elif hasattr(e, 'code'):\n logger.critical('The server couldn\\'t fulfill the request.')\n logger.critical('Error code: ', e.code)\n return self._profile\n\n def get_zip_code(self):\n zip_code = 'not known'\n if self._profile is not None:\n if 'postal_code' in self._profile.keys():\n zip_code = self._profile['postal_code']\n if len(zip_code) > 5:\n zip_code = zip_code[:5]\n return zip_code\n\n\n\n\n\n\n","sub_path":"ask_amy/utilities/account_link.py","file_name":"account_link.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"160099077","text":"#!/usr/bin/python3\n#-*- coding: utf-8 -*-\n\nclass Person(object):\n \n my_class_var = 'sklee02'\n \n def __init__(self, year, month, day, sex):\n self.year = year\n self.month = month\n self.day = day\n self.sex = sex\n \n def __str__(self):\n return '{}년 {}월 {}일생 {}입니다.'.format(self.year, self.month, self.day, self.sex)\n \n @classmethod\n def ssn_constructor(cls, ssn):\n front, back = ssn.split('-')\n sex = back[0]\n \n if sex == '1' or sex == '2':\n year = '19' + front[:2]\n else:\n year = '20' + front[:2]\n \n if (int(sex) % 2) == 0:\n sex = '여성'\n else:\n sex = '남성'\n \n month = front[2:4]\n day = front[4:6]\n \n return cls(year, month, day, sex)\n \n @staticmethod\n def is_work_day(day):\n # weekday() 함수의 리턴값은\n # 월: 0, 화: 1, 수: 2, 목: 3, 금: 4, 토: 5, 일: 6\n if day.weekday() == 5 or day.weekday() == 6:\n return False\n return True\n \nssn_1 = '900829-1034356'\nssn_2 = '051224-4061569'\n\nperson_1 = Person.ssn_constructor(ssn_1)\nprint(person_1)\n\nperson_2 = Person.ssn_constructor(ssn_2)\nprint(person_2)\n\nimport datetime\n# 일요일 날짜 오브젝트 생성\nmy_date = datetime.date(2016, 10, 9)\n\n# 클래스를 통하여 스태틱 메소드 호출\nprint(Person.is_work_day(my_date))\n# 인스턴스를 통하여 스태틱 메소드 호출\nprint(person_1.is_work_day(my_date))\t\n","sub_path":"date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"322041870","text":"import multiprocessing\n\nimport os\nimport threading\nimport signal\nimport logging\nfrom datetime import datetime, timedelta\nfrom time import sleep\n\nimport click\n\n\nlog = logging.getLogger('pyFx')\n\n\nclass IntervalClock(object):\n def __init__(self, interval):\n self.interval = interval\n\n def __iter__(self):\n while True:\n yield datetime.utcnow()\n sleep(self.interval)\n\n\nclass SimulatedClock(object):\n def __init__(self, start, stop, interval):\n from app_conf import settings\n interval = settings.CLOCK_INTERVAL\n self.start = start\n self.stop = stop\n self.interval = timedelta(seconds=interval)\n\n def __iter__(self):\n current = self.start\n while current < self.stop:\n yield current\n current += self.interval\n\n\nclass ControllerBase(object):\n \"\"\"\n A controller class takes care to run the actions returned by the strategies\n for each clock tick. How exactly this is implemented is deferred to the\n concrete subclass.\n \"\"\"\n\n def __init__(self, clock, broker, portfolio, strategies):\n self._clock = clock\n self._broker = broker\n self._strategies = strategies\n self._portfolio = portfolio\n\n def initialize(self, tick):\n for strategy in self._strategies:\n strategy.start(self._broker, tick)\n\n def run(self):\n raise NotImplementedError()\n\n def run_until_stopped(self):\n raise NotImplementedError()\n\n def is_running(self):\n raise NotImplementedError()\n\n def stop(self):\n raise NotImplementedError()\n\n def execute_tick(self, tick):\n raise NotImplementedError()\n\n\nclass ThreadedControllerMixin(object):\n def __init__(self, *args, **kwargs):\n super(ThreadedControllerMixin, self).__init__(*args, **kwargs)\n self._stop_requested = False\n self._main_loop = None\n self._is_running = False\n self._thread_lock = threading.Condition()\n\n def run(self):\n assert self._main_loop is None\n self._main_loop = threading.Thread(target=self._run)\n self._main_loop.start()\n\n def run_until_stopped(self):\n self.run()\n while self.is_running():\n try:\n sleep(1024)\n except KeyboardInterrupt:\n if self.is_running():\n self.stop()\n break\n else:\n click.secho('The clock stopped ticking', fg='yellow')\n\n def is_running(self):\n return self._is_running\n\n def _run(self):\n self._is_running = True\n try:\n self._thread_lock.acquire()\n clock = iter(self._clock)\n self.initialize(next(clock))\n for tick in clock:\n if self._stop_requested:\n break\n self.execute_tick(tick)\n if self._stop_requested:\n break\n else:\n self._is_running = False\n os.kill(os.getpid(), signal.SIGINT)\n finally:\n self._is_running = False\n self._thread_lock.notify()\n\n def stop(self):\n click.secho('\\nSIGINT received, shutting down cleanly...', fg='yellow')\n self._stop_requested = True\n self._main_loop.join()\n\n\nclass SingleThreadedControllerMixin(object):\n def __init__(self, *args, **kwargs):\n super(SingleThreadedControllerMixin, self).__init__(*args, **kwargs)\n self._stop_requested = False\n self._is_running = False\n\n def run(self):\n raise NotImplementedError()\n\n def run_until_stopped(self):\n def stop(signal, frame):\n self.stop()\n\n signal.signal(signal.SIGINT, lambda signal, frame: self.stop())\n self._is_running = True\n try:\n clock = iter(self._clock)\n self.initialize(next(clock))\n for tick in clock:\n if self._stop_requested:\n break\n self.execute_tick(tick)\n if self._stop_requested:\n break\n else:\n click.secho('The clock stopped ticking', fg='yellow')\n finally:\n self._is_running = False\n\n def is_running(self):\n return self._is_running\n\n def stop(self):\n click.secho('\\nSIGINT received, shutting down cleanly...', fg='yellow')\n self._stop_requested = True\n\n\nclass Controller(SingleThreadedControllerMixin, ControllerBase):\n def execute_tick(self, tick):\n # Broker needs to know the current tick for backtesting & logging\n # TODO Solve in a more elegant way\n self._broker.set_current_tick(tick)\n\n operations = [strategy.tick(tick) for strategy in self._strategies]\n operations = [op for op in operations if op]\n\n # This will execute the new operations (and further required tasks)\n self._portfolio.run_operations(operations, self._strategies)\n\n","sub_path":"Python/Research/Demo_BackTester/trader/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"138425707","text":"#-------------------------------------------------------------------------------\n# Name: module2\n# Purpose:\n#\n# Author: mathew.jacob\n#\n# Created: 25/08/2016\n# Copyright: (c) mathew.jacob 2016\n# Licence: \n#-------------------------------------------------------------------------------\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport unittest\nimport sys\nimport os\nimport time\nimport traceback\ndir_path = os.path.dirname(os.path.realpath(__file__))\nfolder_path=os.path.abspath(os.path.join(dir_path, os.pardir))\nsys.path.insert(0,folder_path+\"\\Library\")\nsys.path.insert(0,folder_path+\"\\Syslibrary\")\nsys.path.insert(0,folder_path+\"\\Data\")\nsys.path.insert(0,folder_path+\"\\Object\")\nfrom launcheTender import LauncheTenderclass\nfrom logdriver import logvalue\nlogs=logvalue.logger\nlogclose=logvalue()\nftime = time.mktime(time.localtime())\nptime=time.strftime(\"%d-%m-%Y_%H%M%S\", time.localtime(ftime))\n#filename = 'TestCase-100002-{0}.png'.format(ptime)\ntf = 'test_openbrowser'\nfilename = 'Testcase-%s.png' %(tf)\npath= setupValue().screenpath\nfullpath = os.path.join(path,filename)\n\n#Test case no:100002\nclass LoginAction(unittest.TestCase):\n def test_openbrowser(self):\n try:\n launcheTender1 = LauncheTenderclass()\n browser = launcheTender1.launchetender()\n Login = browser.find_element_by_xpath(\"//button[@id='submitButton']\") #Login button\n Login1 = Login.text\n self.assertEqual(Login1, 'Log In')\n #browser.close()\n logs.info(\"Test Case No : 100002 Passed Successfully\")\n except Exception:\n logs.error(\"Validation with Test Case No: 100002 failed\")\n browser.save_screenshot(fullpath)\n traceback.print_exc(file=sys.stdout)\n self.fail(\"Test Case No: 100002 failed\")\n browser.implicitly_wait(5)\n","sub_path":"TestScript/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"254208051","text":"\r\n\r\nimport requests\r\nfrom json_with_dates import loads\r\n\r\nPORT = 54321\r\n\r\nr = requests.get(\"http://localhost:{}/chinook/invoice-item/invoice/customer\".format(PORT))\r\nrows = loads(r.text)\r\n\r\nlast_cust_id = -1\r\nlast_invoice_id = -1\r\nlast_fname = None\r\nlast_lname = None\r\nrunning_tot = None\r\nrunning_count = None\r\n\r\nfor (cust_id, fname, lname, inv_id, inv_tot) in rows:\r\n if cust_id != last_cust_id:\r\n if last_cust_id >= 0:\r\n template = \"{:3} {:15} {:15} {:3} {:8.2f}\"\r\n line = template.format(last_cust_id, last_fname,\r\n last_lname, running_count, running_tot)\r\n print(line)\r\n running_tot = 0\r\n running_count = 0\r\n last_cust_id = cust_id\r\n last_fname = fname\r\n last_lname = lname\r\n elif inv_id != last_invoice_id:\r\n running_tot += inv_tot\r\n last_invoice_id = inv_id\r\n running_count += 1\r\n\r\ntemplate = \"{:3} {:15} {:15} {:3} {:8.2f}\"\r\nline = template.format(last_cust_id, last_fname,\r\n last_lname, running_count, running_tot)\r\nprint(line)\r\n","sub_path":"In Class Code/in_class_190307/prep/rest_server/report_using_rest3.py","file_name":"report_using_rest3.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"316933205","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy\n\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.signals import user_logged_in\nfrom django.contrib.auth.signals import user_logged_out\nfrom django.contrib.auth.signals import user_login_failed\n\n\nLOGIN_ACTION = 'I'\nLOGOUT_ACTION = 'O'\nFAIL_ACTION = 'F'\n\nACTION_TYPES = (\n (LOGIN_ACTION, ugettext_lazy('Login')),\n (LOGOUT_ACTION, ugettext_lazy('Logout')),\n (FAIL_ACTION, ugettext_lazy('Fail')),\n)\n\n\nclass UserAuthAction(models.Model):\n\n action_type = models.CharField(\n ugettext_lazy(\"Action Type\"),\n max_length=1,\n choices=ACTION_TYPES\n )\n\n user = models.ForeignKey(\n User,\n null=True,\n blank=True\n )\n\n performed = models.DateTimeField(\n ugettext_lazy(\"Performed\"),\n auto_now_add=True\n )\n\n notes = models.TextField(\n ugettext_lazy(\"Notes\"),\n null=True,\n )\n\n class Meta:\n verbose_name = ugettext_lazy(\"Action\")\n verbose_name_plural = ugettext_lazy(\"Actions\")\n\n\ndef user_in(sender, user, request, **kwargs):\n UserAuthAction.objects.create(\n user=user,\n action_type=LOGIN_ACTION\n )\nuser_logged_in.connect(user_in)\n\n\ndef user_out(sender, user, request, **kwargs):\n UserAuthAction.objects.create(\n user=user,\n action_type=LOGOUT_ACTION\n )\nuser_logged_out.connect(user_out)\n\n\ndef user_fail(sender, credentials, **kwargs):\n UserAuthAction.objects.create(\n action_type=FAIL_ACTION\n )\nuser_login_failed.connect(user_fail)\n","sub_path":"loginaudit/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"490047224","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\nimport fileUtils\n\n\napp_list = [\"AXA BANK\", \"ARGENTA\", \"BELFIUS\", \"CBC\", \"BNPPF\", \"BPOST\", \"CRELAN\", \"EUROPA BANK\", \"HELLO BANK\",\n \"ING\", \"KBC\", \"KEY TRADE\", \"BDK\", \"DEUTSCHE BANK\", \"FINTRO\", \"NAGELMACKERS\", \"VDK\", \"CPH\", \"BEOBANK\"]\n\n# urls and details for all androïd apps\nandroid_bank_apps = {\n \"AXA BANK\": {\"id\": \"be.axa.mobilebanking\"},\n \"ARGENTA\": {\"id\": \"be.argenta.bankieren\"},\n \"BELFIUS\": {\"id\": \"be.belfius.directmobile.android\"},\n \"CBC\": {\"id\": \"com.kbc.mobile.android.phone.cbc\"},\n \"BNPPF\": {\"id\": \"com.bnpp.easybanking\"},\n \"BPOST\": {\"id\": \"com.bpb.mobilebanking.smartphone.prd\"},\n \"CRELAN\": {\"id\": \"be.crelan.channels.mobile.android.store\"},\n \"EUROPA BANK\": {\"id\": \"com.mobile.europabank\"},\n \"HELLO BANK\": {\"id\": \"com.bnpp.hellobank\"},\n \"ING\": {\"id\": \"MyING.be\"},\n \"KBC\": {\"id\": \"com.kbc.mobile.android.phone.kbc\"},\n \"KEY TRADE\": {\"id\": \"be.keytradebank.phone\"},\n \"BDK\": {\"id\": \"be.bankdekremer.mobile\"},\n \"DEUTSCHE BANK\": {\"id\": \"com.db.pbc.mybankbelgium\"},\n \"FINTRO\": {\"id\": \"com.bnpp.easybanking.fintro\"},\n \"NAGELMACKERS\": {\"id\": \"be.dlbank.mobilebankingapp\"},\n \"VDK\": {\"id\": \"com.vdk.prod\"},\n \"CPH\": {\"id\": \"be.cph.cphmobile\", \"hl\": \"en\"},\n \"BEOBANK\": {\"id\": \"com.beobank_prod.bad\", \"hl\": \"en\"}\n\n}\n\n\ndef truncate(n, decimals=1):\n multiplier = 10 ** decimals\n return int(n * multiplier) / multiplier\n\n\n# extracts the rates and the reviews for a given bank in the android_bank_apps\ndef get_android_data(bank):\n url = \"https://play.google.com/store/apps/details\"\n headers = {\n 'User-Agent': \"PostmanRuntime/7.17.1\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"4f005cb9-3a23-4061-b606-c745ebc28d14,00d8d48b-ae93-44d4-b579-a9c8315840d0\",\n 'Host': \"play.google.com\",\n 'Accept-Encoding': \"gzip, deflate\",\n 'Cookie': \"NID=193=BEOlb1_8iAn6lY1sHiOvduyvQdIQq8kFttG-CaBsKdF8ZWU_gZL3SAp2qNGp4MKR_Rlm5NUaAPh_1gIyJWZOf3pVRgPVq_IAssIcA9sdGrnNNEHLpnVXB2_bU-EcTQdJPdheC3bm0G0lb2QjgkMecdLKcfroZKnaVAbzJ2L0dks\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n response = requests.request(\"GET\", url, headers=headers, params=android_bank_apps[bank])\n\n # store the DOM element in a soup\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # get the script of type application/ld+json containing the data\n data = json.loads(soup.find('script', type='application/ld+json').text)\n\n # return the android data dict for a bank\n return {\"app\": \"{} ANDROID\".format(bank),\n \"rate\": truncate(float(data[\"aggregateRating\"][\"ratingValue\"])),\n \"reviews\": int(data[\"aggregateRating\"][\"ratingCount\"])\n }\n\n\n# urls and details for all IOS apps\nios_bank_apps = {\n \"CBC\": {\"id\": \"458081756\"},\n \"BELFIUS\": {\"id\": \"572835707\"},\n \"ARGENTA\": {\"id\": \"893585833\"},\n \"AXA BANK\": {\"id\": \"602565257\"},\n \"BNPPF\": {\"id\": \"516502006\"},\n \"BPOST\": {\"id\": \"1278930217\"},\n \"CRELAN\": {\"id\": \"893189359\"},\n \"HELLO BANK\": {\"id\": \"642897716\"},\n \"ING\": {\"id\": \"437203741\"},\n \"KBC\": {\"id\": \"458066754\"},\n \"KEY TRADE\": {\"id\": \"640974593\"},\n \"BDK\": {\"id\": \"1382705162\"},\n \"DEUTSCHE BANK\": {\"id\": \"1082668633\"},\n \"FINTRO\": {\"id\": \"544288649\"},\n \"NAGELMACKERS\": {\"id\": \"885804394\"},\n \"VDK\": {\"id\": \"895434057\"},\n \"CPH\": {\"id\": \"935210539\"},\n \"BEOBANK\": {\"id\": \"1008666594\"}\n}\n\n\n# extracts the rates and the reviews for a given bank in the IOS_bank_apps\ndef get_ios_data(bank):\n # builds the appropriate URL using bank id from apps dict\n url = \"https://amp-api.apps.apple.com/v1/catalog/BE/apps/{}\".format(ios_bank_apps[bank][\"id\"])\n querystring = {\"platform\": \"web\",\n \"extend\": \"description%2CdeveloperInfo%2CeditorialVideo%2Ceula%2CfileSizeByDevice%2Cmessages\"\n \"Screenshots%2CprivacyPolicyUrl%2CprivacyPolicyText%2CpromotionalText%2Cscreenshots\"\n \"ByType%2CsupportURLForLanguage%2CversionHistory%2CvideoPreviewsByType%2CwebsiteUrl\",\n \"include\": \"genres%2Cdeveloper%2Creviews%2Cmerchandised-in-apps%2Ccustomers-also-bought-apps%2\"\n \"Cdeveloper-other-apps%2Capp-bundles%2Ctop-in-apps%2Ceula\",\n \"l\": \"en-gb\"}\n\n headers = {\n 'Accept': \"application/json\",\n 'Authorization': \"Bearer eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IldlYlBsYXlLaWQifQ.eyJpc3MiOiJBTVBXZWJQb\"\n \"GF5IiwiaWF0IjoxNTc0MTk3NDA3LCJleHAiOjE1ODk3NDk0MDd9.ael_GP97O4fyXJuQAQlmC7ieY-t-OOGFwtXShhVA\"\n \"6m_p9Sq03D-_FiUKSfZ2iXGob3vPFnDe0s_OKI3Tg7KVaA\",\n 'Content-Type': \"application/x-www-form-urlencoded; charset=UTF-8\",\n 'Origin': \"https://apps.apple.com\",\n 'Referer': \"https://apps.apple.com/be/app/cbc-mobile/id458081756\",\n 'User-Agent': \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/78.0.3904.108 Mobile Safari/537.36\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"ccc7059d-2c3f-4091-928e-cdd99976cf64,41a5415b-0e99-49bd-a486-66c28b11dc79\",\n 'Host': \"amp-api.apps.apple.com\",\n 'Accept-Encoding': \"gzip, deflate\",\n 'Cookie': \"geo=BE\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n data = json.loads(response.text)\n\n # return the IOS data dict for a bank\n return {\n \"app\": \"{} IOS\".format(bank),\n \"rate\": data[\"data\"][0][\"attributes\"][\"userRating\"][\"value\"],\n \"reviews\": data[\"data\"][0][\"attributes\"][\"userRating\"][\"ratingCount\"]\n }\n\n\n# the rate_computer computes a weighted rate for apps\n# parameters are data dict with rate and reviews keys\ndef rate_computer(android_data, ios_data):\n total_reviews = android_data[\"reviews\"] + ios_data[\"reviews\"]\n return truncate((android_data[\"rate\"] * android_data[\"reviews\"] + ios_data[\"rate\"] * ios_data[\"reviews\"])\n / total_reviews)\n\n\ndef get_tc_rate():\n url = \"https://www.topcompare.be/fr/compte-courant/api/products\"\n\n headers = {\n 'User-Agent': \"PostmanRuntime/7.20.1\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"1fb23ed3-5d76-46f7-9f19-728d1a53fa78,7b298df6-081a-4a99-8209-4135e39954ee\",\n 'Host': \"www.topcompare.be\",\n 'Accept-Encoding': \"gzip, deflate\",\n 'Cookie': \"analytics_id=8945f211-a382-4c70-90c9-7c674c4963bb; JSESSIONID=33F5410109808647BB90BD0AB821BF0E\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n app_rates = {}\n response = requests.request(\"GET\", url, headers=headers)\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # get all the divs describing the products\n rate_tags = soup.find_all(\"div\", {\"class\": \"card-container row normal\"})\n\n # extract the score tag and the score value\n for tag in rate_tags:\n score_tag = tag.find(\"div\", {\"data-col-title\": \"ScoreMobileApp\"})\n app_rates[tag.attrs['data-cgg-id']] = float(\n score_tag.find(\"div\", {\"class\": \"column-primary card-column__value\"}) \\\n .text.strip().replace(\",\", \".\"))\n\n # return the rate of the apps in a dictionnary\n return app_rates\n\n\n# return the bank for a product type\ndef product_to_bank(product):\n for key in app_list:\n if product[:3] in key:\n return key\n\n\ndef topCompare_product_rates(tc_rate):\n prod_list = []\n p_hash ={}\n for value in list(tc_rate):\n prod_list.append({product_to_bank(value): tc_rate[value]})\n for elmt in fileUtils.no_double(prod_list):\n for key in elmt:\n p_hash[key] = elmt[key]\n return p_hash\n\n\ndef app_rate_frame():\n frame = []\n tc_website_rate = topCompare_product_rates(get_tc_rate())\n for bank_app in app_list:\n try:\n tc_rate = tc_website_rate[bank_app]\n except:\n tc_rate = \"-\"\n if bank_app in ios_bank_apps:\n if bank_app in android_bank_apps:\n and_data = get_android_data(bank_app)\n ios_data = get_ios_data(bank_app)\n frame.append([bank_app, and_data[\"rate\"], and_data[\"reviews\"], ios_data[\"rate\"], ios_data[\"reviews\"],\n rate_computer(and_data, ios_data), tc_rate])\n else:\n frame.append([bank_app, \"-\", \"-\", get_ios_data(bank_app)[\"reviews\"], get_ios_data(bank_app)[\"rate\"],\n get_ios_data(bank_app)[\"rate\"]])\n else:\n frame.append([bank_app, get_android_data(bank_app)[\"rate\"], get_android_data(bank_app)[\"reviews\"],\n \"-\", \"-\", get_android_data(bank_app)[\"rate\"], tc_rate])\n # fileUtils.displayRates(header, frame)\n return frame\n\n\n# compare the ratings and build a message\ndef compare_rate_and_notify(delta):\n message = []\n\n # TC website rates and products\n tc_product_and_rates = get_tc_rate()\n for pdt in tc_product_and_rates:\n # correspond a product to the appropriate bank\n bank = product_to_bank(pdt)\n\n # compute the weighted rate if the bank has both apps\n if bank in android_bank_apps:\n if bank in ios_bank_apps:\n actual_rate = rate_computer(get_android_data(bank), get_ios_data(bank))\n else:\n # else consider the unique rate\n actual_rate = get_android_data(bank)[\"rate\"]\n\n elif bank in ios_bank_apps:\n actual_rate = get_ios_data(bank)[\"rate\"]\n else:\n # if the bank is not present in any of the app repository\n actual_rate = None\n\n # if the difference btn the actual rate and the tc_web site rate gt delta\n if actual_rate and abs(actual_rate - tc_product_and_rates[pdt]) > delta:\n if not \"{} .... NOT OK!\".format(bank.lower()) in message:\n message += [\"{} .... NOT OK!\".format(bank.lower())]\n if not message:\n message += [\"\", \"APP RATINGS STATUS\", \"all app ratings ...................................OK!\"]\n else:\n message = [\"\", \"\", \"APP RATINGS STATUS WITH A DIFFERENCE OF MORE THAN: {}\".format(delta), \"\"] + message\n fileUtils.displayRates([\"APP\", \"ANDROID_RATINGS\", \"ANDROID_REVIEWS\", \"IOS_RATINGS\", \"IOS_REVIEWS\",\n \"WEIGHTED_RATINGS\", \"TOP COMPARE RATINGS\"], app_rate_frame())\n return message\n\n\n# print(compare_rate_and_notify(0.25))\n\n\n\n\n\n\n\n","sub_path":"app_rating.py","file_name":"app_rating.py","file_ext":"py","file_size_in_byte":10634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"392496195","text":"from core.config import Config\nimport json\n\n\nclass DatabaseConnector:\n def __init__(self, config: Config) -> None:\n super().__init__()\n self.__url = config.DATABASE_REST_URL\n self.__http_client_session = config.HTTP_CLIENT_SESSION\n self.__debug_logging = config.IS_DEBUG_LOGGING\n\n async def create_new_edge_id(self, plant: str) -> str:\n data = json.dumps({'plant': plant})\n url = self.__url + 'edge_id'\n headers = {'Accept': 'application/json', 'Content-type': 'application/json', 'Prefer': 'return=representation'}\n async with self.__http_client_session.post(url=url, data=data, headers=headers) as resp:\n if resp.status != 201:\n return \"Could not create edge_id\"\n resp_content = await resp.json()\n return str(resp_content[0]['id'])\n\n async def create_new_sensor_entry(self, data: str) -> str:\n url = self.__url + 'sensor'\n headers = {'Accept': 'application/json', 'Content-type': 'application/json'}\n async with self.__http_client_session.post(url=url, data=data, headers=headers) as resp:\n if resp.status != 201:\n if self.__debug_logging:\n print('Database insert error. Non 201 Code. Ignoring message')\n return '-1'\n if self.__debug_logging:\n print('Database insert successful')\n return '0'\n","sub_path":"cloud/src/core/databaseConnector.py","file_name":"databaseConnector.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"46087222","text":"\"\"\"\n氧气模型\n\n规则:\n1. 镜像边界\n2. 气体扩散公式\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom util import mkdirp\n\n\nOUTPUT_DIR = \"../out/2.oxygen/\"\n\nSIZE = 8\nCELLS = [(i, j) for i in range(0, SIZE) for j in range(0, SIZE)]\n\nMAX_ITERATION = 32\n\ninitial_oxygen_distribution: pd.DataFrame\ndistributions: [pd.DataFrame]\n\n\ndef init():\n global initial_oxygen_distribution, distributions\n\n initial_oxygen_distribution = pd.DataFrame(data=np.zeros([SIZE, SIZE]), dtype=float)\n for i in range(0, SIZE):\n for j in range(0, SIZE):\n initial_oxygen_distribution.iat[i, j] = np.random.rand()\n distributions = [initial_oxygen_distribution.copy(), initial_oxygen_distribution.copy()]\n\n\ndef show_oxygen_distribution(ax, board, title=None):\n ax.imshow(board, cmap=\"Blues\", vmin=0, vmax=1)\n ax.set_xticks(np.arange(0, SIZE, 1))\n ax.set_yticks(np.arange(0, SIZE, 1))\n ax.set_xticklabels(np.arange(1, SIZE + 1, 1))\n ax.set_yticklabels(np.arange(1, SIZE + 1, 1))\n ax.set_xticks(np.arange(-.5, SIZE, 1), minor=True)\n ax.set_yticks(np.arange(-.5, SIZE, 1), minor=True)\n ax.grid(which=\"minor\", color='w', linestyle='-', linewidth=2)\n if title:\n ax.title.set_text(title)\n\n\ndef iterate():\n global distributions\n\n for generation in range(1, MAX_ITERATION + 1):\n current_distribution = distributions[generation % 2]\n last_distribution = distributions[(generation + 1) % 2]\n\n for cols in current_distribution:\n current_distribution[cols].values[:] = 0\n\n for i, j in CELLS:\n total = 0\n for dx in range(-1, 2):\n for dy in range(-1, 2):\n ti, tj = (i + dx + SIZE) % SIZE, (j + dy + SIZE) % SIZE\n total = total + (1 - min(1, last_distribution.iat[ti, tj]))\n total = total - (1 - min(1, last_distribution.iat[i, j]))\n if total > 0: # 当周边存在格子氧气密度小于1时进行扩散\n diffusion_oxygen = last_distribution.iat[i, j] / 2 # 散发当前格子氧气浓度的一半\\\n total_shared = 0\n for dx in range(-1, 2):\n for dy in range(-1, 2):\n ti, tj = (i + dx + SIZE) % SIZE, (j + dy + SIZE) % SIZE\n target_cell = last_distribution.iat[ti, tj]\n shared_oxygen = diffusion_oxygen * ((1 - min(1, target_cell)) / total)\n total_shared = total_shared + shared_oxygen\n current_distribution.iat[ti, tj] = current_distribution.iat[ti, tj] + shared_oxygen\n # 为避免浮点误差,不应该简单地取当前格子氧气浓度的一半,而是上次的氧气浓度减去当前散发的氧气浓度。\n current_distribution.iat[i, j] = \\\n current_distribution.iat[i, j] + last_distribution.iat[i, j] - total_shared\n else:\n current_distribution.iat[i, j] = current_distribution.iat[i, j] + last_distribution.iat[i, j]\n\n print(\"process: \" + f\"{generation}/{MAX_ITERATION}\")\n if generation % 1 == 0:\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)\n show_oxygen_distribution(ax1, last_distribution, f\"Generation: {generation - 1}\")\n show_oxygen_distribution(ax2, current_distribution, f\"Generation: {generation}\")\n fig.tight_layout()\n fig.savefig(OUTPUT_DIR + str(generation))\n plt.close(fig)\n\n\nif __name__ == \"__main__\":\n mkdirp(OUTPUT_DIR)\n init()\n iterate()\n","sub_path":"src/2.oxygen.py","file_name":"2.oxygen.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"476137385","text":"from django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\nfrom .models import (Article,\n Image,\n Category,\n Tag,\n Comment)\n\n\n@admin.register(Category)\nclass CategoryAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {'fields': ('author', 'name',)}),\n )\n list_display = ('name', 'created')\n list_filter = ('author', 'name', 'created')\n search_fields = ('author__username', 'author__first_name',\n 'author__last_name', 'name__icontains')\n\n\n@admin.register(Tag)\nclass TagAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {'fields': ('author', 'name')}),\n )\n list_display = ('name', 'created')\n list_filter = ('author', 'name', 'created')\n search_fields = ('author__username', 'author__first_name',\n 'author__last_name', 'name__icontains')\n\n\n@admin.register(Article)\nclass ArticleAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {'fields': ('author',)}),\n (_('Article'), {'fields': ('title',\n 'body',\n 'abstract',\n 'category',\n 'tags')}),\n (_('Publish'), {'fields': ('draft', 'published')})\n )\n\n list_display = ('title', 'author', 'category',\n 'draft', 'published')\n list_filter = ('author', 'category', 'tags', 'draft', 'published')\n search_fields = ('author__username', 'author__first_name',\n 'author__last_name', 'title__icontains',\n 'body__icontains', 'abstract__icontains',\n 'category__icontains', 'tags__icontains')\n filter_horizontal = ('tags',)\n\n\n@admin.register(Image)\nclass ImageAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {'fields': ('name', 'image', 'article')}),\n )\n\n list_display = ('name', 'article', 'created')\n list_filter = ('name', 'article', 'created')\n search_fields = ('name__icontains', 'article__title')\n\n\n@admin.register(Comment)\nclass CommentAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {'fields': ('name', 'body')}),\n (_('Article'), {'fields': ('article',)})\n )\n list_display = ('name', 'article', 'created')\n list_filter = ('name', 'article', 'created')\n search_fields = ('name__icontains', 'body__icontains', 'article__title')\n","sub_path":"posts/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"335456564","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\nclass MpSession():\n \"\"\" Represents the state of the current macro_pack run \"\"\"\n def __init__(self, workingPath, version, mpType):\n self.workingPath = workingPath\n self.version = version\n self.mpType = mpType\n \n self.vbomEncode = False\n self.avBypass = False\n self.obfuscateForm = False \n self.obfuscateNames = False \n self.obfuscateStrings = False \n self.persist = False\n self.keepAlive = False\n self.trojan = False\n self.stealth = False\n self.vbaInput = None\n self.startFunction = None\n self.fileOutput = False\n self.excelFilePath = None \n self.excel97FilePath = None \n self.wordFilePath = None \n self.word97FilePath = None\n self.pptFilePath = None\n self.vbaFilePath = None\n self.stdinContent = None\n self.template = None\n self.ddeMode = False # attack using Dynamic Data Exchange (DDE) protocol (see https://sensepost.com/blog/2017/macro-less-code-exec-in-msword/)","sub_path":"src/common/mp_session.py","file_name":"mp_session.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"429416676","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport random\nimport copy\nfrom . import fasta\n\n\nclass DataSet(object):\n \"\"\"DataSet タンパク質/DNAの配列のデータセットを表すクラス。\n\n このクラスはabstractなクラス。実装はサブクラスに。\n ProteinDataSet、DNADataSet、RNADataSetなどなど。。。\n また、インスタンスの生成に関してはDataSetMakerに丸投げする。\n Factory Methodパターンを採用。もっといいのがあれば変えるかも。\n\n やっぱりただのTemplate Methodパターンにする。\n 「どこからデータが来たのか」をこのクラスで管理するのは、\n お門違いな感じがするため。そういうのはdataset_makerに任せてしまう。\"\"\"\n\n def __init__(self):\n \"\"\"コンストラクタ。データのリストが来たら、それをデータセット\n クラスのインスタンスによしなにしてやる。\n 将来的には、ストリームデータとかにも対応できるとよい。\n \"\"\"\n self.current_index = 0\n self.seqnum = 0\n #self.container = {} # 実際にデータを格納する場所.一応辞書を想定。\n #self.seqnum = 0# 格納されているデータの数\n #self.data_type = SomeType # どんなデータを格納しているか\n #self.identifiers = [] # 識別用。\n #self.name = \"\" # 名前。\n #self.labels = {} # ラベル\n #self.attributes = [self.seqnum, self.data_type, self.identifiers,\n # self.name, self.labels]\n # リストを想定している(removeメソッドが使える必要あり)\n\n def __len__(self):\n \"\"\"データの数を返す。\"\"\"\n return self.seqnum\n\n def __contains__(self, key):\n \"\"\"keyが存在しているかを判定する。具体的にはidentifierで\n 判断する。\"\"\"\n return key in self.identifiers\n\n def __getitem__(self, key):\n \"\"\"keyに対応するデータを取り出す。keyとして文字列(ID)または\n 整数値(インデクス)を受け付ける。両方あり得る場合、数字の方が\n 優先される。\"\"\"\n if isinstance(key, list) or isinstance(key, tuple):\n return [self[k] for k in key]\n if isinstance(key, int):\n return self.container[self.identifiers[key]]\n elif key in self.identifiers:\n return self.container[key]\n else:\n raise IndexError(key + \"に対応する値は存在しません。\")\n\n def __setitem__(self, key, val):\n \"\"\"keyに対応するデータを登録する。\n 既に登録済みの場合はエラーを出力し、keyはIDとしてあつかう。\"\"\"\n if isinstance(key, list) and isinstance(val, list):\n for i in range(len(key)):\n self[key[i]] = val[i]\n if key in self.identifiers:\n raise IndexError(key + \"は既に存在しています。\")\n elif not isinstance(val, self.data_type):\n print(type(val))\n raise TypeError(str(val) + \" の型が不正です。\")\n else:\n self.container[key] = val\n self.identifiers.append(key)\n self.seqnum += 1\n\n def __delitem__(self, key):\n \"\"\"keyに対応するアイテムを削除する。\"\"\"\n if isinstance(key, list) or isinstance(key, tuple):\n for k in key:\n del(self[k])\n if key in self.identifiers:\n self.identifiers.remove(key)\n del(self.container[key])\n self.seqnum -= 1\n elif key < self.seqnum:\n del(self.container[self.identifiers[key]])\n del(self.identifiers[key])\n self.seqnum -= 1\n else:\n raise IndexError(key + \"がありません。\")\n\n def __iter__(self):\n \"\"\"Iterationを作成する。個々のデータを取り出す。\n \"\"\"\n self.current_index = 0\n return self\n\n def __next__(self):\n \"\"\"イテレータの一部。次の要素を返す。\n \"\"\"\n if self.current_index < self.seqnum:\n self.current_index += 1\n return self.get_by_index(self.current_index - 1)\n else:\n raise StopIteration\n\n def __str__(self):\n '''REturn string'''\n txt = 'Name: ' + self.name + \"\\n\"\n txt += '\\tDataType: ' + str(self.data_type) + \"\\n\"\n txt += '\\tSeqNum : ' + str(self.seqnum) + \"\\n\"\n txt += '\\tID 00000: ' + self.identifiers[0] + \"\\n\"\n txt += '\\t...\\n'\n txt += \"\\t...{0} sequence(s)\".format(len(self))\n return txt\n\n def __repr__(self):\n txt = '\\n' + self.__str__()\n return txt\n\n def _calc_stat(self):\n \"\"\"コピーなどでseqnumなどの内部情報と実際のデータの数との\n 間に齟齬が生じた場合に、適切な値を計算し直す。\"\"\"\n self.seqnum = len(self.identifiers)\n\n def get_by_index(self, i):\n \"\"\"内部的な配列内の順番で取り出す。\n \"\"\"\n return self.container[self.identifiers[i]]\n\n def copy(self, idlist, do_deepcopy=False):\n \"\"\"idlist内にあるデータをコピーして、新たなデータセットを\n 返す。新たなリストを作って、そこからインスタンスを生成\n している。\"\"\"\n seq_list = self.copy_container(idlist, do_deepcopy)\n labels = self.copy_labels(idlist)\n return self.__class__(seq_list, name=self.name, labels=labels)\n #for identifier in idlist:\n # if do_deepcopy:\n # seq_list.append(copy.deepcopy(self[identifier]))\n # else:\n # seq_list.append(self[identifier])\n #return self.__class__(seq_list, name=self.name)\n\n def copy_container(self, idlist, do_deepcopy=False):\n '''Return the copy of self.container'''\n if do_deepcopy:\n return [copy.deepcopy(self.container[id]) for id in idlist]\n else:\n return [self.container[id] for id in idlist]\n\n def copy_labels(self, idlist):\n '''Return copy version of self.labels'''\n #return {id: self.get_label(id) for id in idlist}\n label = {}\n for key in idlist:\n label[key] = self.get_label(key)\n return label\n\n def sample(self, num):\n \"\"\"データセットからnum個のデータをサンプリングする。\"\"\"\n l = random.shuffle(list(range(len(self))))\n for i in range(num):\n print(l)\n pass\n\n def split_to(self, num, is_random=True, do_copy=False):\n \"\"\"データセットをnum個に分けて、同じくDatasetクラスの\n インスタンスのリスト(あるいはタプル)として返す。\n randomがTrueの時は、一回順番をシャッフルしてから\n splitする。\"\"\"\n if is_random:\n idlist = self.identifiers[:]\n random.shuffle(idlist)\n else:\n idlist = self.identifiers[:]\n generated_datasets = []\n size_per_subset = self.seqnum / num\n fraction = self.seqnum % num\n splitted = 0\n for i in range(num):\n start = splitted\n end = splitted + size_per_subset\n if fraction > 0:\n end += 1\n fraction -= 1\n splitted = end\n #generated_datasets.append(self.copy(range(start,end), do_copy))\n generated_datasets.append(self.copy(idlist[start:end], do_copy))\n return generated_datasets\n\n def split_by(self, num, is_random=True, do_copy=False):\n \"\"\"データセットをnum個ずつのサブ・データセットに\n 分けて、そのリストあるいはタプルを返す。\n split_toと同じように、random=Trueならシャッフルしてから\n 分割を行う。\"\"\"\n if is_random:\n idlist = self.identifiers[:]\n random.shuffle(idlist)\n else:\n idlist = self.identifiers[:]\n generated_datasets = []\n splitted = 0\n while splitted < self.seqnum:\n start = splitted\n end = splitted + num\n splitted = end\n generated_datasets.append(self.copy(idlist[start:end], do_copy))\n return generated_datasets\n\n def prepare_cross_validation(self, fold=5, is_random=True, do_deepcopy=False):\n \"\"\"上のsplit_*系の応用となるメソッドで、\n 何フォールドかを指定することで、それに適したデータセットの\n サブセットを返してくれる。辞書のリストを返す。\n [{'test':dataset_1, 'train':dataset_2}, {'test':dataset_3, ..\n のような感じ。\"\"\"\n splitted = self.split_to(fold, is_random, do_deepcopy)\n cv_data = []\n for i in range(fold):\n train = None\n for j in range(fold):\n if i == j:\n next\n elif train == None:\n # ここでcopyを使わないと、splitted[j]のサイズが\n # どんどん増えてしまう。\n train = copy.deepcopy(splitted[j])\n else:\n train.merge(splitted[j], do_deepcopy)\n cv_data.append({'test': splitted[i], 'train': train})\n return cv_data\n\n def cv(self, fold=5, is_random=True):\n \"\"\"prepare_cross_validationへのエイリアス。名前長い(;´Д`)\"\"\"\n return self.prepare_cross_validation(fold, is_random)\n\n def merge(self, other, do_copy=False, verbose=False):\n \"\"\"他のデータセット(一つ)とマージする。\n データの種類が違うとき(e.g.アミノ酸配列と塩基配列)は\n エラーを出力する。\n 複数のデータセットをマージしたい時はmerge_allを参照\n IDは、self[key] = ...のところで自動的に追加されている。\n たぶん配列の数も同じだと思う。。。\"\"\"\n if self.data_type != other.data_type:\n raise TypeError(other + \"のデータの種類が違います。\")\n if verbose:\n print(self, other)\n for key in other.identifiers:\n if key in self.identifiers:\n continue\n else:\n if do_copy:\n self[key] = copy.copy(other[key])\n else:\n self[key] = other[key]\n if key in other.labels:\n self.labels[key] = other.get_label(key)\n #self.identifiers.append(key)\n self._calc_stat()\n if verbose: print(self)\n\n def merge_all(self, others, return_new=False):\n \"\"\"複数のデータセットを一つにまとめる。\n\n return_new Trueの場合、新たなインスタンスとして返す。\n \"\"\"\n if not isinstance(others, list) or not isinstance(others, tuple):\n raise TypeError(others + \"はリストである必要があります。\")\n if return_new:\n original = copy.deepcopy(self)\n else:\n original = self\n for dataset in others:\n original.merge(dataset)\n if return_new:\n original\n\n def set_name(self, name):\n \"\"\"データセットに名前をつける。\"\"\"\n self.name = name\n\n def set_label(self, id, label):\n \"\"\"データセット中に、ラベルを設定する。\"\"\"\n if not self.labels:\n self.labels = {}\n if id in self.labels:\n self.labels[id] = label\n elif isinstance(id, int) and id < self.seqnum:\n self.labels[self.identifiers[id]] = label\n else:\n self.labels[id] = label\n\n def set_labels(self, label):\n \"\"\"データセット中のデータすべてに、labelを設定する。\"\"\"\n for id in self.identifiers:\n self.labels[id] = label\n\n def get_label(self, index):\n \"\"\"index番目のデータのラベルを取得する。\"\"\"\n if index==None:\n return self.labels\n elif index in self.labels:\n return self.labels[index]\n elif isinstance(index, int):\n return self.labels[self.identifiers[index]]\n\n def get_labels(self, type='list'):\n \"\"\"すべてのラベルをリストとして返す。\n 順番は、self.identifiersに入っているものと同様。\"\"\"\n return [self.get_label(id) for id in self.identifiers]\n\n def convert2num(self, charlist):\n \"\"\"文字列を数字にして返す。\"\"\"\n char_dic = {charlist[i]:i for i in range(len(charlist))}\n return [[char_dic[c] for c in seq] for seq in self]\n\n\nclass FastaDataSet(DataSet):\n \"\"\"FastaDataSet Fasta配列のデータセット。\n\n データセットを二つに分けたり、\n クロスバリデーション用にデータを分割して提供したり\n とかそんな感じ。\"\"\"\n\n def __init__(self, seq_list, name='', origin='', labels=None):\n \"\"\"seq_listから、新しいデータセットを作る。\n seq_listはリスト(あるいはタプルでもおk?)で、各要素は\n self.data_typeと一致していなければならない。\"\"\"\n self.identifiers = [seq.identifier for seq in seq_list]\n self.container = {}\n self.data_type = fasta.Fasta\n self.seqnum = 0\n for seq in seq_list:\n self.container[seq.identifier] = seq\n self.seqnum += 1\n if name:\n self.name = name\n if origin:\n self.origin = origin\n elif name:\n self.origin = name\n if labels != None and not type(labels) in (list, tuple, dict):\n self.labels = {}\n for key in self.identifiers:\n self.labels[key] = labels\n elif isinstance(labels, dict):\n self.labels = labels\n else:\n self.labels = {}\n self.set_labels(0)\n\n\nclass DNADataSet(DataSet):\n \"\"\"DNADataSet DNA配列を扱うデータセット。\n\n 出来���ことはタンパク質版のと大体一緒。\n 翻訳してProteinDataSetのインスタンスを作れるようにしても\n おもしろいかも(たぶんそこまで出来ないけど)\"\"\"\n pass\n\n\nclass RNADataSet(DataSet):\n \"\"\"RNADataSet RNA配列を扱うデータセット。\n\n ほぼDNADataSetと同じ。たぶん使わないし作らない。\"\"\"\n pass\n\n\n","sub_path":"predictor/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":14631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"473652806","text":"\nclass human:\n bornNum = 900\n def __new__(cls):\n print('new')\n return super().__new__(cls)\n def bornPlus(self):\n human.bornNum+=1\n def printBorn(self):\n print(self.bornNum)\ndef main():\n humanInstence = human()\n InsTest=human()\n #print(humanInstence.bornNum)\n humanInstence.printBorn()\n\n humanInstence.bornPlus()\n humanInstence.printBorn()\n InsTest.printBorn()\n InsTest.bornPlus()\n\n print(human.bornNum)\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"Assets/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"275519664","text":"'''\nImplementation of LCS algorithm\n'''\ndef lcs(str1, str2):\n m, n = len(str1), len(str2)\n dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n\n if str1[i - 1] == str2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(\n dp[i-1][j], \n dp[i][j-1] \n )\n\n print(dp)\n return dp[m][n]\n\n\nif __name__ == \"__main__\":\n str1 = \"petra\"\n str2 = \"peter\"\n ans = lcs(str1, str2)\n print(\"Length of LCS is \", ans)","sub_path":"longest_common_subsequence.py","file_name":"longest_common_subsequence.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"416238939","text":"import time\n## ________ Provided Solution __________\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\n# Provided Solution - Runtime O(n^n)\nduplicates = []\nfor name_1 in names_1: # O(n)\n for name_2 in names_2: # O(n)\n if name_1 == name_2:\n duplicates.append(name_1)\n\nend_time = time.time()\nprint (\"\\n\\nProvided Solution\")\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n## ________ My solution section ____________\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\n# My Solution - Runtime O(5n) \nnames1_set = set(names_1) # O(n) Use set() to remove duplicates within my lists. \nnames2_set = set(names_2) # O(n)\ncount_table = {}\nfor name in names1_set: # O(n)\n count_table[name] = 1\nfor name in names2_set: # O(n)\n if name in count_table:\n count_table[name] += 1\n elif name not in count_table:\n count_table[name] = 1\nduplicates = [k for (k,v) in count_table.items() if v > 1] # O(n)\n\n\nend_time = time.time()\nprint (\"\\n\\nMy Solution\")\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"540019745","text":"import numpy as np\nimport os\nimport json\nimport re\nimport urllib.request\nimport pycqed as pq\nimport qcodes as qc\n\nfrom pycqed.measurement import measurement_control\nfrom qcodes.instrument.base import Instrument\nfrom pycqed.measurement.demonstrator_helper.detectors import \\\n Quantumsim_Two_QB_Hard_Detector\nfrom pycqed.measurement import sweep_functions as swf\n\n\"\"\"\nMeasurementControl and other legacy imports\n\"\"\"\n\nfrom tess.TessConnect import TessConnection\nimport logging\n\"\"\"\nTessConnection is used to tell Tess that we are a kernel open for use\n\"\"\"\n\ndefault_execute_options = {}\n\ntc = TessConnection()\ntc.connect(\"execute_CCL\")\ndefault_simulate_options = {\n \"num_avg\": 10000,\n \"iterations\": 1\n}\n\"\"\"\nWe connect to tess by giving the kernel type as \"execute_CCL\"\n\"\"\"\n\ntry:\n MC = Instrument.find_instrument('MC')\n st = MC.station\n new_station = False\nexcept KeyError:\n st = qc.station.Station()\n new_station = True\n\n\"\"\"\nCreate the station for which the Instruments can connect to.\nA virtual representation of the physical setup. In our case, the CCL.\nSince we're calling the station,\n\"\"\"\ntry:\n MC_demo = measurement_control.MeasurementControl(\n 'Demonstrator_MC', live_plot_enabled=True, verbose=True)\n\n datadir = os.path.abspath(os.path.join(\n os.path.dirname(pq.__file__), os.pardir, 'demonstrator_execute_data'))\n MC_demo.datadir(datadir)\n MC_demo.station = st\n\n st.add_component(MC_demo)\nexcept KeyError:\n MC_demo = Instrument.find_instrument('Demonstrator_MC')\n\n\ndef execute(qisa_file_url: str, tqisa_file_url:str, qasm_file_url:str, config_json: str,\n verbosity_level: int=0):\n options = json.loads(config_json)\n\n if (not new_station):\n write_to_log('options:')\n write_to_log(options)\n write_to_log(qisa_file_url)\n write_to_log(tqisa_file_url)\n write_to_log(qasm_file_url)\n\n CCL = Instrument.find_instrument('CCL')\n device = Instrument.find_instrument('device')\n\n num_avg = int(options.get('num_avg', 512))\n\n nr_soft_averages = int(np.round(num_avg/512))\n MC_demo.soft_avg(nr_soft_averages)\n\n device.ro_acq_averages(512)\n\n # Get the qisa file\n qisa_fp = _retrieve_file_from_url(qisa_file_url)\n\n # Two ways to generate the sweep_points. Either I get from the file_url\n # or I get the appended options file which has the kw \"measurement_points\"\n # sweep_points_fp = _retrieve_file_from_url(sweep_points_file_url)\n # sweep_points = json.loads(sweep_points_fp)\n # sweep_points = sweep_points[\"measurement_points\"]\n\n # Ok, I am assured by stanvn that he will provide me a options with kw\n sweep_points = options[\"measurement_points\"]\n\n s = swf.OpenQL_File_Sweep(filename=qisa_fp, CCL=CCL,\n parameter_name='Points', unit='a.u.',\n upload=True)\n\n d = device.get_correlation_detector()\n\n MC_demo.set_sweep_function(s)\n MC_demo.set_sweep_points(sweep_points)\n MC_demo.set_detector_function(d)\n data = MC_demo.run('CCL_execute') # FIXME <- add the proper name\n\n else:\n qisa_fp = _retrieve_file_from_url(qisa_file_url)\n data = _simulate_quantumsim(qisa_fp, options)\n\n return _MC_result_to_chart_dict(data)\n\n\ndef calibrate(config_json: str):\n \"\"\"\n Perform calibrations based on the options specified in the config_json.\n Calibrations are performed using the dependency graph.\n\n N.B. on this helper no calibration protocol is defined so it only\n updates the calibration data in the overview.\n \"\"\"\n options = json.loads(config_json)\n\n # Get the kernel_type\n try:\n kernel_type = options['kernel_type']\n except:\n print('Could not find kernel_type in the json options file')\n kernel_type = 'execute_CCL'\n\n # Send over the results of the calibrations\n send_calibration_data(kernel_type)\n\n\ndef _retrieve_file_from_url(file_url: str):\n \"\"\"\n Self explanatory: we retrieve the file from the url given\n \"\"\"\n\n file_name = file_url.split(\"/\")[-1]\n base_path = os.path.join(\n pq.__path__[0], 'measurement', 'demonstrator_helper',\n 'qasm_files', file_name)\n file_path = base_path\n # download file from server\n urllib.request.urlretrieve(file_url, file_path)\n return file_path\n\n\ndef _get_qasm_sweep_points(file_path):\n \"\"\"\n I am unsure what this does. Will need to grep RO_acq_averages.\n Am guessing this is related to qubit_object, CCL_transmon.py.\n \"\"\"\n counter = 0\n with open(file_path) as f:\n line = f.readline()\n while(line):\n if re.match(r'(^|\\s+)(measure|RO)(\\s+|$)', line):\n counter += 1\n line = f.readline()\n\n return range(counter)\n\n\ndef _MC_result_to_chart_dict(result):\n for i in result:\n if(isinstance(result[i], np.ndarray)):\n result[i] = result[i].tolist()\n return [{\n \"data-type\": \"chart\",\n \"data\": result\n }]\n\n\ndef _simulate_quantumsim(file_path, options):\n \"\"\"\n We can remove this function as I am using this to dummy execute\n \"\"\"\n quantumsim_sweep = swf.None_Sweep()\n quantumsim_sweep.parameter_name = 'CCL number '\n quantumsim_sweep.unit = '#'\n\n qubit_parameters = {\n 'Q0': {'T1': 30e3, 'T2': 17e3, 'frac1_0': 0.0189, 'frac1_1': 0.918},\n 'Q1': {'T1': 30e3, 'T2': 17e3, 'frac1_0': 0.068, 'frac1_1': 0.949},\n 'q0': {'T1': 30e3, 'T2': 17e3, 'frac1_0': 0.0189, 'frac1_1': 0.918},\n 'q1': {'T1': 30e3, 'T2': 17e3, 'frac1_0': 0.068, 'frac1_1': 0.949}}\n\n quantumsim_det = Quantumsim_Two_QB_Hard_Detector(\n file_path, dt=(40, 280), qubit_parameters=qubit_parameters)\n sweep_points = range(len(quantumsim_det.parser.circuits))\n\n MC_demo.set_detector_function(quantumsim_det)\n MC_demo.set_sweep_function(quantumsim_sweep)\n MC_demo.set_sweep_points(sweep_points)\n dat = MC_demo.run(\"run QASM\")\n print('simulation execute_CCL finished')\n return dat\n\n\ndef send_calibration_data(kernel_type: str):\n \"\"\"\n Sends a snapshot containing the latest calibration data\n \"\"\"\n\n banned_pars = ['IDN', 'ro_acq_weight_func_I', 'ro_acq_weight_func_Q',\n 'qasm_config']\n snapshot = st.snapshot()\n calibration = {\n \"q0\": snapshot[\"instruments\"][\"QL\"],\n \"q1\": snapshot[\"instruments\"][\"QR\"]\n # 'fridge': snapshot[\"instruments\"][\"Maserati_fridge_mon\"]\n }\n for par in banned_pars:\n try:\n del calibration['q0']['parameters'][par]\n del calibration['q1']['parameters'][par]\n except KeyError as e:\n logging.warning(e)\n tc.client.publish_custom_msg({\n \"calibration\": calibration,\n \"kernel_type\": kernel_type\n })\n\n # print({\n # \"calibration\": calibration,\n # \"kernel_type\": kernel_type})\n print('Calibration data send')\n\n\ndef write_to_log(string):\n with open(r'D:\\Experiments\\1709_M18\\demo_log.txt', 'a+') as f:\n f.write(str(string) + '\\n')\n","sub_path":"pycqed/measurement/demonstrator_helper/execute_helpers_CCL.py","file_name":"execute_helpers_CCL.py","file_ext":"py","file_size_in_byte":7088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"632273107","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nx = np.random.random(50000)\ny = np.random.random(50000)\nboolean = np.ones(50000)\n\nplot_data = np.array([x, y, boolean]).T\n\n'''for i in plot_data:\n if i[0]**2 + i[1]**2 > 1:\n i[2] = 0'''\n# ==向量化:\nidx = plot_data[:,0]**2+plot_data[:,1]**2<1\nplot_data[idx,2] = 0\n\n\nplot_data = pd.DataFrame(plot_data, columns=['x', 'y', 'boolean'])\n\n# 区分内外点\nin_df = plot_data[plot_data['boolean'] == 1]\nout_df = plot_data[plot_data['boolean'] == 0]\n\na = plt.plot(in_df['x'], in_df['y'], '.')[0]\nplt.setp(a, markersize=0.3)\nb = plt.plot(out_df['x'], out_df['y'], '.')[0]\nplt.setp(b, markersize=0.3)\nprint('pi值模拟结果为' + str(len(in_df) / len(x) * 4))\n# 更多参数见http://matplotlib.org/users/pyplot_tutorial.html\n","sub_path":"estimate_pi.py","file_name":"estimate_pi.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"48931158","text":"#Check if Binary representation is Palindrome in Python *\n\n\n \ndef binaryPallindrome(num): \n \n # convert number into binary \n binary = bin(num) \n \n # skip first two characters of string \n # because bin function appends '0b' as \n # prefix in binary representation of \n # a number \n binary = binary[2:] \n \n # now reverse binary string and compare \n # it with original \n return binary == binary[-1::-1] \n \n# Driver program \nif __name__ == \"__main__\": \n num = int(input(\"Enter the number: \"))\n print (binaryPallindrome(num))","sub_path":"Personel/Siddhesh/Practice/Mar8/Binary.py","file_name":"Binary.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"231517304","text":"\"\"\" Config management handler\"\"\"\nimport host_service\nimport subprocess\nimport os\n\nMOD_NAME= 'cfg_mgmt'\n\nclass CFG_MGMT(host_service.HostModule):\n \"\"\"DBus endpoint that executes CFG_MGMT related commands \"\"\"\n\n @staticmethod\n def _run_command(commands, options):\n \"\"\" Run config mgmt command \"\"\"\n cmd = ['/usr/bin/config']\n if isinstance(commands, list):\n cmd.extend(commands)\n else:\n cmd.append(commands)\n \n for x in options: \n cmd.append(str(x)) \n output =\"\"\n try:\n print(\"cmd\", cmd)\n rc = 0\n output= subprocess.check_output(cmd)\n print('Output -> ',output)\n\n except subprocess.CalledProcessError as err:\n print(\"Exception when calling get_sonic_error -> %s\\n\" %(err))\n rc = err.returncode\n output = err.output\n \n return rc,output\n\n @host_service.method(host_service.bus_name(MOD_NAME), in_signature='as', out_signature='is')\n def save(self, options):\n return CFG_MGMT._run_command([\"save\",\"-y\"], options)\n\n @host_service.method(host_service.bus_name(MOD_NAME), in_signature='as', out_signature='is')\n def reload(self, options):\n return CFG_MGMT._run_command([\"reload\", \"-y\"], options)\n\n @host_service.method(host_service.bus_name(MOD_NAME), in_signature='as', out_signature='is')\n def load(self, options):\n return CFG_MGMT._run_command([\"load\", \"-y\"], options)\n \n @staticmethod\n def _get_version():\n '''Return the SONiC version string, or NONE if command to retrieve it fails'''\n try:\n proc = subprocess.Popen(\"sonic-cfggen -y /etc/sonic/sonic_version.yml -v build_version\", shell=True, stdout=subprocess.PIPE)\n out,err = proc.communicate()\n build_version_info = out.strip()\n return build_version_info.decode(\"utf-8\")\n except:\n return None\n\n @staticmethod\n def _create_host_file(fname, content=None):\n '''Create a file under /host'''\n version = CFG_MGMT._get_version()\n if version:\n filename = '/host/image-' + version + \"/\" + fname\n try:\n f = open(filename, \"w+\")\n if content:\n f.write(content)\n f.close()\n return 0, \"\"\n except IOError as e:\n return 1, (\"Unable to create file [%s] - %s\" % (filename, e))\n else:\n return 1, \"Unable to get SONiC version: operation not performed\"\n\n @staticmethod\n def _delete_host_file(fname):\n '''Delete a file under /host'''\n version = CFG_MGMT._get_version()\n if version:\n filename = '/host/image-' + version + \"/\" + fname\n if not os.path.exists(filename):\n return 1, \"No configuration erase operation to cancel.\"\n try:\n os.remove(filename)\n return 0, \"\"\n except IOError as e:\n return 1, (\"Unable to delete file [%s] - %s\" % (filename, e))\n else:\n return 1, \"Unable to get SONiC version: operation not performed\"\n\n\n @staticmethod\n def _run_command_erase(option):\n \"\"\" Run config mgmt command \"\"\"\n rc = 1\n if option == \"\":\n rc,err = CFG_MGMT._create_host_file(\"/pending_erase\")\n elif option == \"boot\":\n rc,err = CFG_MGMT._create_host_file(\"/pending_erase\", \"boot\")\n elif option == \"install\":\n rc,err = CFG_MGMT._create_host_file(\"/pending_erase\", \"install\")\n elif option == \"no\":\n rc,err = CFG_MGMT._delete_host_file(\"/pending_erase\")\n return rc, err\n\n @host_service.method(host_service.bus_name(MOD_NAME), in_signature='s', out_signature='is')\n def write_erase(self, option):\n return CFG_MGMT._run_command_erase(option)\n\ndef register():\n \"\"\"Return class name\"\"\"\n return CFG_MGMT, MOD_NAME\n","sub_path":"scripts/host_modules/cfg_mgmt.py","file_name":"cfg_mgmt.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"107291531","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torch.nn.modules.module import Module\nfrom torch.nn.parameter import Parameter\nimport functools\n \nfrom util import *\n \nclass _CBINorm(nn.modules.batchnorm._BatchNorm):\n def __init__(self, num_features, num_con=8, eps=1e-5, momentum=0.1, affine=False, track_running_stats=False):\n super(_CBINorm, self).__init__(\n num_features, eps, momentum, affine, track_running_stats)\n self.ConBias = nn.Sequential(\n nn.Linear(num_con, num_features),\n nn.Tanh()\n )\n \n def _check_input_dim(self, input):\n raise NotImplementedError\n \n def _load_from_state_dict(self, state_dict, prefix, metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n version = metadata.get('version', None)\n # at version 1: removed running_mean and running_var when\n # track_running_stats=False (default)\n if version is None and not self.track_running_stats:\n running_stats_keys = []\n for name in ('running_mean', 'running_var'):\n key = prefix + name\n if key in state_dict:\n running_stats_keys.append(key)\n if len(running_stats_keys) > 0:\n error_msgs.append(\n 'Unexpected running stats buffer(s) {names} for {klass} '\n 'with track_running_stats=False. If state_dict is a '\n 'checkpoint saved before 0.4.0, this may be expected '\n 'because {klass} does not track running stats by default '\n 'since 0.4.0. Please remove these keys from state_dict. If '\n 'the running stats are actually needed, instead set '\n 'track_running_stats=True in {klass} to enable them. See '\n 'the documentation of {klass} for details.'\n .format(names=\" and \".join('\"{}\"'.format(k) for k in running_stats_keys),\n klass=self.__class__.__name__))\n for key in running_stats_keys:\n state_dict.pop(key)\n\n super(_CBINorm, self)._load_from_state_dict(\n state_dict, prefix, metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n \n def forward(self, input, ConInfor):\n self._check_input_dim(input)\n b, c = input.size(0), input.size(1)\n tarBias = self.ConBias(ConInfor).view(b,c,1,1)\n out = F.instance_norm(\n input, self.running_mean, self.running_var, None, None,\n self.training or not self.track_running_stats, self.momentum, self.eps)\n \n if self.affine:\n bias = self.bias.repeat(b).view(b,c,1,1)\n weight = self.weight.repeat(b).view(b,c,1,1)\n return (out.view(b, c, *input.size()[2:])+tarBias)*weight + bias\n else:\n return out.view(b, c, *input.size()[2:])+tarBias\n\nclass CBINorm2d(_CBINorm):\n def _check_input_dim(self, input):\n if input.dim() != 4:\n raise ValueError('expected 4D input (got {}D input)'\n .format(input.dim()))\n \nclass _CBBNorm(Module):\n def __init__(self, num_features, num_con, eps=1e-5, momentum=0.1, affine=True,\n track_running_stats=True):\n super(_CBBNorm, self).__init__()\n self.num_features = num_features\n self.eps = eps\n self.momentum = momentum\n self.affine = affine\n self.track_running_stats = track_running_stats\n if self.affine:\n self.weight = Parameter(torch.Tensor(num_features))\n self.bias = Parameter(torch.Tensor(num_features))\n else:\n self.register_parameter('weight', None)\n self.register_parameter('bias', None)\n if self.track_running_stats:\n self.register_buffer('running_mean', torch.zeros(num_features))\n self.register_buffer('running_var', torch.ones(num_features))\n self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))\n else:\n self.register_parameter('running_mean', None)\n self.register_parameter('running_var', None)\n self.register_parameter('num_batches_tracked', None)\n self.reset_parameters()\n \n self.ConBias = nn.Sequential(\n nn.Linear(num_con, num_features),\n nn.Tanh()\n )\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n\n def reset_running_stats(self):\n if self.track_running_stats:\n self.running_mean.zero_()\n self.running_var.fill_(1)\n self.num_batches_tracked.zero_()\n\n def reset_parameters(self):\n self.reset_running_stats()\n if self.affine:\n self.weight.data.uniform_()\n self.bias.data.zero_()\n\n def _check_input_dim(self, input):\n raise NotImplementedError\n\n def forward(self, input, ConInfor):\n self._check_input_dim(input)\n b, c = input.size(0), input.size(1)\n exponential_average_factor = 0.0\n\n if self.training and self.track_running_stats:\n self.num_batches_tracked += 1\n if self.momentum is None: # use cumulative moving average\n exponential_average_factor = 1.0 / self.num_batches_tracked.item()\n else: # use exponential moving average\n exponential_average_factor = self.momentum\n \n out = F.batch_norm(\n input, self.running_mean, self.running_var, None, None,\n self.training or not self.track_running_stats,\n exponential_average_factor, self.eps)\n \n biasSor = self.avgpool(out)\n biasTar = self.ConBias(ConInfor).view(b,c,1,1)\n \n if self.affine:\n weight = self.weight.repeat(b).view(b,c,1,1)\n bias = self.bias.repeat(b).view(b,c,1,1)\n return (out - biasSor + biasTar)*weight + bias\n else:\n return out - biasSor + biasTar\n\n def extra_repr(self):\n return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \\\n 'track_running_stats={track_running_stats}'.format(**self.__dict__)\n\n def _load_from_state_dict(self, state_dict, prefix, metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n version = metadata.get('version', None)\n\n if (version is None or version < 2) and self.track_running_stats:\n # at version 2: added num_batches_tracked buffer\n # this should have a default value of 0\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key not in state_dict:\n state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)\n\n super(_BatchNorm, self)._load_from_state_dict(\n state_dict, prefix, metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\nclass CBBNorm2d(_CBBNorm):\n def _check_input_dim(self, input):\n if input.dim() != 4:\n raise ValueError('expected 4D input (got {}D input)'\n .format(input.dim()))\n \ndef get_norm_layer(layer_type='instance', num_con=2):\n if layer_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True)\n c_norm_layer = functools.partial(CBBNorm2d, affine=True, num_con=num_con)\n elif layer_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)\n c_norm_layer = functools.partial(CBINorm2d, affine=True, num_con=num_con)\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % layer_type)\n return norm_layer, c_norm_layer\n \n#############################################################################################################\n############################################# Generator #####################################################\n#############################################################################################################\n\nclass SingleResidualBlock(nn.Module):\n def __init__(self, nch, c_norm_layer):\n super(SingleResidualBlock, self).__init__()\n self.c1 = nn.Conv2d(nch, nch, kernel_size=3, stride=1, padding=1, bias=False)\n self.cn1 = c_norm_layer(nch)\n self.c2 = nn.Conv2d(nch, nch, kernel_size=3, stride=1, padding=1, bias=False)\n self.cn2 = c_norm_layer(nch)\n \n def forward(self, x):\n data, con = x[0], x[1]\n res = data\n res_out = nn.ReLU()(self.cn1(self.c1(data), con))\n res_out = self.cn2(self.c2(res_out), con)\n return torch.add(res_out, res), con\n\nclass SingleGenerator(nn.Module):\n def __init__(self, nch_in, nch, reduce=2, num_cls=3, res_num=6, norm_type=\"instance\", num_con=2, nch_out=None):\n super(SingleGenerator, self).__init__()\n if nch_out==None:\n nch_out = nch_in\n norm_layer, c_norm_layer = get_norm_layer(layer_type=norm_type, num_con=num_con)\n self.num_cls = num_cls\n\n # down sampling\n cnn_layers = [nn.Conv2d(nch_in*1, nch, kernel_size=7, stride=1, padding=3, bias=False)]\n cnorms = [c_norm_layer(nch)]\n for i in range(num_cls):\n cnn_layers.append(nn.Conv2d(nch*(2**i), nch*2**(i+1), kernel_size=2*reduce, stride=reduce, padding=int(reduce/2), bias=False))\n cnorms.append(c_norm_layer(nch*2**(i+1)))\n self.down_convs = nn.ModuleList(cnn_layers)\n self.down_cnorms = nn.ModuleList(cnorms)\n \n # residual block\n res_block = []\n for _ in range(res_num):\n res_block.append(SingleResidualBlock(nch*2**(num_cls), c_norm_layer))\n self.resBlocks = nn.Sequential(*res_block)\n \n # up sampling\n cnn_layers = [nn.ConvTranspose2d(nch*(2**(num_cls)), nch*2**(num_cls-1), kernel_size=2*reduce, stride=reduce, padding=int(reduce/2), bias=False)]\n norms = [norm_layer(nch*2**(num_cls-1))]\n for i in range(1, num_cls)[::-1]:\n cnn_layers.append(nn.ConvTranspose2d(nch*(2**(i)), nch*2**(i-1), kernel_size=2*reduce, stride=reduce, padding=int(reduce/2), bias=False))\n norms.append(norm_layer(nch*2**(i-1)))\n cnn_layers.append(nn.Conv2d(nch, nch_out, kernel_size=7, stride=1, padding=3, bias=False))\n self.up_convs = nn.ModuleList(cnn_layers)\n self.up_norms = nn.ModuleList(norms)\n \n def forward(self, x, c):\n for i in range(self.num_cls+1):\n x = self.down_convs[i](x)\n x = self.down_cnorms[i](x, c)\n x = nn.ReLU()(x)\n \n x = self.resBlocks([x, c])[0]\n for i in range(self.num_cls):\n x = self.up_convs[i](x)\n x = self.up_norms[i](x)\n x = nn.ReLU()(x)\n x = self.up_convs[-1](x)\n x = nn.Tanh()(x)\n return x\n \n#############################################################################################################\n########################################## Discriminator ####################################################\n#############################################################################################################\n\nclass SingleDiscriminator_original(nn.Module):\n def __init__(self, nch_in, nch, reduce=2, num_cls=3, norm_type=\"instance\", num_con=2):\n super(SingleDiscriminator_original, self).__init__()\n self.num_cls = num_cls\n\n # down sampling\n \n cnn_layers = [nn.Conv2d(nch_in*1, nch, kernel_size=4, stride=2, padding=1, bias=False),\n nn.LeakyReLU()]\n \n dim_in = nch\n for i in range(1, num_cls):\n \n dim_out = min(dim_in*2, nch*8)\n cnn_layers.append(nn.Conv2d(dim_in, dim_out, kernel_size=2*reduce, stride=reduce, padding=int(reduce/2), bias=False))\n cnn_layers.append(nn.LeakyReLU())\n dim_in = dim_out\n \n dim_out = min(dim_in*2, nch*8)\n cnn_layers.append(nn.Conv2d(dim_in, 1, kernel_size=4, stride=1, padding=1, bias=True))\n \n self.down_convs = nn.Sequential(*cnn_layers)\n\n def forward(self, x):\n return self.down_convs(x)\n \nclass SingleDiscriminator_original_multi(nn.Module):\n \n def __init__(self, nch_in, nch, reduce=2, num_cls=3, norm_type=\"instance\", num_con=2):\n super(SingleDiscriminator_original_multi, self).__init__()\n self.discriminator1 = SingleDiscriminator_original(nch_in, nch, reduce, num_cls, norm_type, num_con)\n self.down = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)\n self.discriminator2 = SingleDiscriminator_original(nch_in, nch//2, reduce, num_cls, norm_type, num_con)\n \n def forward(self, x):\n output1 = self.discriminator1(x)\n output2 = self.discriminator2(self.down(x))\n return [output1, output2]\n \nclass SingleDiscriminator_solo(nn.Module):\n def __init__(self, nch_in, nch, reduce=2, num_cls=3, norm_type=\"instance\", num_con=2):\n super(SingleDiscriminator_solo, self).__init__()\n \n self.num_cls = num_cls\n\n # down sampling\n \n cnn_layers = [nn.Conv2d(nch_in*1, nch, kernel_size=4, stride=2, padding=1, bias=False),\n nn.LeakyReLU()]\n \n dim_in = nch\n for i in range(1, num_cls):\n \n dim_out = min(dim_in*2, nch*8)\n cnn_layers.append(nn.Conv2d(dim_in, dim_out, kernel_size=2*reduce, stride=reduce, padding=int(reduce/2), bias=False))\n cnn_layers.append(nn.LeakyReLU())\n dim_in = dim_out\n \n self.down_convs = nn.Sequential(*cnn_layers)\n\n def forward(self, x):\n return self.down_convs(x)\n\nclass SingleDiscriminator_solo_multi(nn.Module):\n \n def __init__(self, nch_in, nch, reduce=2, num_cls=3, norm_type=\"instance\", n_class=4):\n super(SingleDiscriminator_solo_multi, self).__init__()\n self.n_class = n_class\n self.discriminator1 = SingleDiscriminator_solo(nch_in, nch, reduce, num_cls, norm_type, None)\n self.down = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)\n self.discriminator2 = SingleDiscriminator_solo(nch_in, nch//2, reduce, num_cls, norm_type, None)\n \n dim_in = min(nch*2**num_cls, nch*8)\n self.last_layer1 = nn.Conv2d(dim_in, 1, kernel_size=4, stride=1, padding=1, bias=True)\n self.last_layer2 = nn.Conv2d(dim_in//2, 1, kernel_size=4, stride=1, padding=1, bias=True)\n self.classification_layer1 = [nn.Conv2d(dim_in, n_class, kernel_size=8, stride=1, padding=0, bias=True)]\n self.classification_layer2 = [nn.Conv2d(dim_in//2, n_class, kernel_size=4, stride=1, padding=0, bias=True)]\n \n self.classification_layer1.append(nn.Softmax())\n self.classification_layer2.append(nn.Softmax())\n \n self.classification_layer1 = nn.Sequential(*self.classification_layer1)\n self.classification_layer2 = nn.Sequential(*self.classification_layer2)\n \n def forward(self, x):\n disout1 = self.discriminator1(x)\n disout2 = self.discriminator2(self.down(x))\n output1 = self.last_layer1(disout1)\n output2 = self.last_layer2(disout2)\n out_class1 = self.classification_layer1(disout1)\n out_class2 = self.classification_layer2(disout2)\n return [output1, output2], [out_class1.view(-1, self.n_class), out_class2.view(-1, self.n_class)]\n\n#############################################################################################################\n############################################# Encoder #######################################################\n#############################################################################################################\n\nclass BasicBlock(nn.Module):\n def __init__(self, nch_in, nch_out, c_norm_layer=None):\n super(BasicBlock, self).__init__()\n \n self.cnorm1 = c_norm_layer(nch_in)\n self.nl1 = nn.LeakyReLU(0.2)\n self.conv1 = nn.Conv2d(nch_in, nch_in, kernel_size=3, stride=1, padding=1, bias=False, padding_mode=\"reflect\")\n \n self.cnorm2 = c_norm_layer(nch_in)\n self.nl2 = nn.LeakyReLU(0.2)\n \n self.cmp = nn.Sequential(\n nn.Conv2d(nch_in, nch_out, kernel_size=3, stride=1, padding=1, bias=False, padding_mode=\"reflect\"),\n nn.AvgPool2d(2, 2)\n )\n self.shortcut = nn.Sequential(\n nn.AvgPool2d(2, 2),\n nn.Conv2d(nch_in, nch_out, kernel_size=1, stride=1, padding=0, bias=True),\n )\n\n def forward(self, input):\n x, d = input\n out = self.cmp(self.nl2(self.cnorm2(self.conv1(self.nl1(self.cnorm1(x,d))),d)))\n out = out + self.shortcut(x)\n return [out,d]\n\nclass Encoder_original(nn.Module):\n def __init__(self, nch_in, nch_out, nch=64, num_cls=3, norm_type=\"instance\", num_con=2, device=\"cpu\"):\n super(Encoder_original, self).__init__()\n _, c_norm_layer = get_norm_layer(layer_type=norm_type, num_con=num_con)\n self.num_cls = num_cls\n self.device = device\n\n self.first_layer = nn.Conv2d(nch_in*1, nch, kernel_size=7, stride=2, padding=1, bias=True)\n \n cnn_layers = []\n in_nch = nch\n for i in range(num_cls):\n out_nch = in_nch * 2\n cnn_layers.append(BasicBlock(in_nch, out_nch, c_norm_layer))\n in_nch = out_nch\n self.layers = nn.Sequential(*cnn_layers)\n self.last_layer = nn.Sequential(nn.LeakyReLU(0.2), nn.AdaptiveAvgPool2d(1))\n self.fcmean = nn.Linear(out_nch, nch_out)\n self.fcvar = nn.Linear(out_nch, nch_out)\n \n def reparametrize(self, mu, logvar):\n std = logvar.mul(0.5).exp_()\n eps = torch.FloatTensor(std.size()).normal_().to(self.device)\n eps = Variable(eps)\n return eps.mul(std).add_(mu)\n \n def forward(self, x, c):\n x_conv = self.last_layer(self.layers([self.first_layer(x),c])[0])\n b = x_conv.size(0)\n x_conv = x_conv.view(b, -1)\n mu = self.fcmean(x_conv)\n logvar = self.fcvar(x_conv)\n c_code = self.reparametrize(mu, logvar)\n return c_code, mu, logvar\n \nclass BasicBlock_classification(nn.Module):\n def __init__(self, nch_in, nch_out, norm_layer):\n super(BasicBlock_classification, self).__init__()\n \n self.norm1 = norm_layer(nch_in)\n self.nl1 = nn.LeakyReLU(0.2)\n self.conv1 = nn.Conv2d(nch_in, nch_in, kernel_size=3, stride=1, padding=1, bias=False, padding_mode=\"reflect\")\n \n self.norm2 = norm_layer(nch_in)\n self.nl2 = nn.LeakyReLU(0.2)\n \n self.cmp = nn.Sequential(\n nn.Conv2d(nch_in, nch_out, kernel_size=3, stride=1, padding=1, bias=False, padding_mode=\"reflect\"),\n nn.AvgPool2d(2, 2)\n )\n self.shortcut = nn.Sequential(\n nn.AvgPool2d(2, 2),\n nn.Conv2d(nch_in, nch_out, kernel_size=1, stride=1, padding=0, bias=True),\n )\n\n def forward(self, input):\n x = input\n out = self.cmp(self.nl2(self.norm2(self.conv1(self.nl1(self.norm1(x))))))\n out = out + self.shortcut(x)\n return out\n \nclass Encoder(nn.Module):\n def __init__(self, nch_in, nch_out, nch=64, num_cls=3, norm_type=\"instance\", num_con=2, device=\"cpu\"):\n super(Encoder, self).__init__()\n norm_layer, c_norm_layer = get_norm_layer(layer_type=norm_type, num_con=num_con)\n self.num_cls = num_cls\n self.device = device\n self.first_layer = nn.Conv2d(nch_in*1, nch, kernel_size=7, stride=2, padding=1, bias=True)\n \n cnn_layers = []\n in_nch = nch\n for i in range(num_cls):\n out_nch = in_nch * 2\n cnn_layers.append(BasicBlock_classification(in_nch, out_nch, norm_layer))\n in_nch = out_nch\n self.layers = nn.Sequential(*cnn_layers)\n self.last_layer = nn.Sequential(nn.LeakyReLU(0.2), nn.AdaptiveAvgPool2d(1))\n self.fcmean = nn.Linear(out_nch, nch_out)\n self.fcvar = nn.Linear(out_nch, nch_out)\n self.fcclass = nn.Linear(out_nch, num_con)\n \n def reparametrize(self, mu, logvar):\n std = logvar.mul(0.5).exp_()\n eps = torch.FloatTensor(std.size()).normal_().to(self.device)\n eps = Variable(eps)\n return eps.mul(std).add_(mu)\n \n def freeze_melt(self, classifier_layers, mode=\"freeze\"):\n netE_layers = list(self.state_dict().keys())\n for i, param in enumerate(self.parameters()):\n if netE_layers[i] in classifier_layers:\n if mode==\"freeze\":\n param.requires_grad = False\n elif mode==\"melt\":\n param.requires_grad = True\n \n def forward(self, x):\n feature = self.last_layer[0](self.layers(self.first_layer(x)))\n x_conv = feature\n mu = self.fcmean(self.last_layer[1](x_conv).view(feature.size(0),-1))\n logvar = self.fcvar(self.last_layer[1](x_conv).view(feature.size(0),-1))\n c_code = self.reparametrize(mu, logvar)\n class_output = self.fcclass(self.last_layer[1](x_conv).view(feature.size(0),-1))\n \n return c_code, mu, logvar, class_output, None\n \nclass Encoder_classifier(nn.Module):\n def __init__(self, nch_in, nch_out, nch=64, num_cls=3, norm_type=\"instance\", num_con=2):\n super(Encoder_classifier, self).__init__()\n norm_layer, c_norm_layer = get_norm_layer(layer_type=norm_type, num_con=num_con)\n self.num_cls = num_cls\n\n self.first_layer = nn.Conv2d(nch_in*1, nch, kernel_size=7, stride=2, padding=1, bias=True)\n \n cnn_layers = []\n in_nch = nch\n for i in range(num_cls):\n out_nch = in_nch * 2\n cnn_layers.append(BasicBlock_classification(in_nch, out_nch, norm_layer))\n in_nch = out_nch\n self.layers = nn.Sequential(*cnn_layers)\n self.last_layer = nn.Sequential(nn.LeakyReLU(0.2), nn.AdaptiveAvgPool2d(1))\n \n self.fcclass = nn.Linear(out_nch, num_con)\n \n def forward(self, x):\n x_conv = self.last_layer(self.layers(self.first_layer(x)))\n x_conv = x_conv.view(x_conv.size(0), -1)\n output_class = self.fcclass(x_conv)\n output_class = F.softmax(output_class)\n return output_class","sub_path":"pyfiles/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":22569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"103145517","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport glob\nimport pickle\n\nclass Camera_Calibration:\n def __init__(self):\n self.calib_images_loaded = True\n # Arrays to store object points and image points from all the images.\n self.obj_points = [] # 3d points in real world space\n self.img_points = [] # 2d points in image plane.\n self.ret = 0 # RMS re projection error. Should be between 0.1 and 1 pixel. An RMS error of 1.0 means that, on average, each of these projected points is 1.0 px away from its actual position. The error is not bounded in [0, 1], it can be considered as a distance.\n self.mtx = 0 # Camera calibration matrix\n self.dist = 0 # Distortion coefficient\n self.rvecs = 0 # Rotation Vector\n self.tvecs = 0 # Translation Vector\n self.nx = 9\n self.ny = 6\n\n def __find_points(self):\n \"\"\"\n Function to get object points and image points using the chessboard images\n :return:\n \"\"\"\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((self.ny * self.nx, 3), np.float32)\n objp[:, :2] = np.mgrid[0:self.nx, 0:self.ny].T.reshape(-1, 2) # X, Y coordinates\n\n # Make a list of calibration images\n images = glob.glob('camera_cal/calibration*.jpg')\n fig, axs = plt.subplots(5,4, figsize=(16, 11))\n fig.subplots_adjust(hspace = .2, wspace=.001)\n axs = axs.ravel()\n\n # Step through the list and search for chessboard corners\n for i, fname in enumerate(images):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (self.nx, self.ny), None)\n\n # If found, add object points, image points\n if ret == True:\n self.obj_points .append(objp)\n self.img_points .append(corners)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (self.nx,self.ny), corners, ret)\n axs[i].axis('off')\n axs[i].imshow(img)\n plt.show()\n self.__save_points()\n\n def __find_point(self, img,nx, ny):\n \"\"\"\n Finds image and object points in single image\n :param img: input image\n :param nx: number of inner corners in each row\n :param ny: number of inner corners in each column\n :return:\n \"\"\"\n self.nx = nx\n self.ny = ny\n self.obj_points.clear()\n self.img_points.clear()\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((self.ny * self.nx, 3), np.float32)\n objp[:, :2] = np.mgrid[0:self.nx, 0:self.ny].T.reshape(-1, 2) # X, Y coordinates\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (self.nx, self.ny), None)\n # If found, add object points, image points\n if ret == True:\n self.obj_points .append(objp)\n self.img_points .append(corners)\n # Draw and display the corners\n #img = cv2.drawChessboardCorners(img, (self.nx,self.ny), corners, ret)\n\n def __save_points(self):\n \"\"\"\n This function saves the image points and object points to use them later\n instead of computing every time\n :return:\n \"\"\"\n file_Name = \"imgpoints\"\n # open the file for writing\n fileObject = open(file_Name, 'wb')\n pickle.dump(self.img_points, fileObject)\n # close the fileObject\n fileObject.close()\n file_Name = \"objpoints\"\n # open the file for writing\n fileObject = open(file_Name, 'wb')\n # this writes the object a to the\n pickle.dump(self.obj_points, fileObject)\n # close the fileObject\n fileObject.close()\n\n def calibrate_camera(self,img, nx, ny, load_saved_points = True):\n \"\"\"\n Performs the camera calibration\n :param img: input image\n :return:\n \"\"\"\n img_size = (img.shape[1], img.shape[0])\n if load_saved_points:\n if self.calib_images_loaded:\n # load already saved object points and image points\n self.obj_points = pickle.load( open( \"src/objpoints\", \"rb\" ) )\n self.img_points = pickle.load( open( \"src/imgpoints\", \"rb\" ) )\n # Do camera calibration given object points and image points\n self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(self.obj_points,\n self.img_points, img_size,None,None)\n else:\n self.__find_points()\n # Do camera calibration given object points and image points\n self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(self.obj_points,\n self.img_points, img_size,None,None)\n\n else:\n self.__find_point(img,nx,ny)\n # Do camera calibration given object points and image points\n self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(self.obj_points,\n self.img_points, img_size,None,None)\n","sub_path":"src/Camera_Calibration.py","file_name":"Camera_Calibration.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"125705707","text":"# -*- coding=UTF-8 -*-\n# pyright: strict\n\nimport json\nimport logging\nimport os\nfrom typing import Dict, Text\n\nimport cv2\nimport numpy as np\nfrom PIL.Image import Image\n\nfrom .. import imagetools, mathtools, terminal\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass g:\n event_image_path: str = \"\"\n data_path: str = \"\"\n choices: Dict[Text, int] = {}\n\n\ndef reload() -> None:\n try:\n with open(g.data_path, \"r\", encoding=\"utf-8\") as f:\n g.choices = json.load(f)\n except OSError:\n pass\n\n\ndef _save() -> None:\n with open(g.data_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(g.choices, f, indent=2)\n\n\ndef get(event_screen: Image) -> int:\n rp = mathtools.ResizeProxy(event_screen.width)\n b_img = np.zeros((event_screen.height, event_screen.width))\n event_name_bbox = rp.vector4((75, 155, 305, 180), 466)\n options_bbox = rp.vector4((50, 200, 400, 570), 466)\n cv_event_name_img = np.asarray(event_screen.crop(event_name_bbox).convert(\"L\"))\n _, cv_event_name_img = cv2.threshold(cv_event_name_img, 220, 255, cv2.THRESH_TOZERO)\n\n l, t, r, b = event_name_bbox\n b_img[t:b, l:r] = cv_event_name_img\n\n cv_options_img = np.asarray(event_screen.crop(options_bbox).convert(\"L\"))\n\n option_rows = (cv2.reduce(cv_options_img, 1, cv2.REDUCE_MAX) == 255).astype(\n np.uint8\n )\n\n option_mask = np.repeat(option_rows, cv_options_img.shape[1], axis=1)\n\n cv_options_img = 255 - cv_options_img\n cv_options_img *= option_mask\n\n _, cv_options_img = cv2.threshold(cv_options_img, 128, 255, cv2.THRESH_BINARY)\n\n l, t, r, b = options_bbox\n b_img[t:b, l:r] = cv_options_img\n\n event_id = imagetools.md5(b_img, save_path=g.event_image_path)\n\n if os.getenv(\"DEBUG\") == __name__:\n cv2.imshow(\"option_mask\", option_mask)\n cv2.imshow(\"cv_event_name_img\", cv_event_name_img)\n cv2.imshow(\"cv_options_img\", cv_options_img)\n cv2.imshow(\"b_img\", b_img)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n if event_id not in g.choices:\n while True:\n ans = terminal.prompt(\"Choose event option(1/2/3/4/5):\")\n if ans in [\"1\", \"2\", \"3\", \"4\", \"5\"]:\n g.choices[event_id] = int(ans)\n _save()\n break\n ret = g.choices[event_id]\n LOGGER.info(\"event: id=%s choice=%d\", event_id, ret)\n return ret\n","sub_path":"auto_derby/single_mode/choice.py","file_name":"choice.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"138313983","text":"from urllib.parse import urlencode, urlparse\nimport collections\n\nfrom mitmproxy import http\nfrom mitmproxy import ctx\n\n\n\nXML = b\"\"\"\n\n\n\n\n \n\n\n]>\n\n&root;test\n\"\"\".replace(b'\\n', b'')\n\ndef request(flow: http.HTTPFlow) -> None:\n if 'simple' in flow.request.url:\n print(flow.request.content)\n flow.request.content = XML\n print(flow.request.content)\n\ndef response(flow: http.HTTPFlow) -> None:\n return\n","sub_path":"mitmproxy/02_injection_flaws/xxe_root_file.py","file_name":"xxe_root_file.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"449359091","text":"from tank_env import TankEnv\nimport numpy as np\nfrom numpy import savez_compressed\nimport argparse\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"num_obs\", type=int, help=\"Number of observations to generate\")\nparser.add_argument(\"game_path\", type=str, help=\"File path of game executable\")\nparser.add_argument(\"level_path\", type=str, help=\"File path of game level\")\nparser.add_argument(\"--canvas_game_path\", type=str, help=\"File path of seperate game executable to be used in canvas env\")\nparser.add_argument(\"--base_port\", type=int, default=50000, help=\"Base port to be used for game environment\")\nparser.add_argument(\"--my_port\", type=int, default=50500, help=\"Port to be used on Python side of network socket connection\")\nparser.add_argument(\"--save_loc\", type=str, default=\"dataset.npz\", help=\"File path to save data to\")\nparser.add_argument(\"--env_p\", type=int, default=3, help=\"Image-based environment will draw one in-game grid square as p^2 pixels\")\nargs = parser.parse_args()\nprint(args)\n\nobs_set = np.zeros((args.num_obs, 52), dtype=np.float32)\nimg_set = np.zeros((args.num_obs, 12*args.env_p, 20*args.env_p, 3), dtype=np.uint8)\ntry:\n env = TankEnv(args.game_path, \n opp_fp_and_elo=[], \n game_port=args.base_port, \n my_port=args.my_port,\n rand_opp=True\n )\n \n if not args.canvas_game_path:\n args.canvas_game_path = args.game_path\n canvas = TankEnv(args.canvas_game_path,\n opp_fp_and_elo=[], \n game_port=args.base_port+1, \n my_port=args.my_port+1, \n image_based=True,\n level_path=args.level_path,\n rand_opp=True,\n p=args.env_p\n )\n \n obs = env.reset()\n for i in tqdm(range(args.num_obs)):\n if i % (args.num_obs // 10) == 0:\n print(i/(args.num_obs//10), \"% complete\", sep=\"\", flush=True)\n # Save states\n obs_set[i] = obs.copy()\n canvas.draw_state(obs)\n img_set[i] = canvas.state.copy()\n # Generate next observation\n action = np.random.rand(5) * 2 - 1\n obs, _, done, _ = env.step(action)\n if done:\n obs = env.reset()\n \n savez_compressed(args.save_loc, obs=obs_set, img=img_set)\nfinally:\n env.close()\n canvas.close()","sub_path":"PythonScripts/gen_state_dataset.py","file_name":"gen_state_dataset.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"18720646","text":"# set base_year as year of 1st year students\nfrom srblib import SrbJson\nfrom srblib import debug, on_travis\n\n\ncache_path = '~/.config/nith_results/cache.json'\n\n_limits_template = \\\n{\n 'base_year' : None,\n 'default_no_of_std' : 99,\n 'iiitu_no_of_std' : 70,\n 'dual_no_of_std' : 70,\n 'max_seats' : 120,\n}\n\n_limits = SrbJson('~/.config/nith_results/limits.json',_limits_template)\n\nbase_year = _limits['base_year']\nif(not base_year):\n if on_travis: base_year = 18\n else: base_year = input('Please enter base-year(year of 1st year student right now) ex:18 ')\n _limits['base_year'] = base_year\n\ndefault_no_of_std = _limits['default_no_of_std']\niiitu_no_of_std = _limits['iiitu_no_of_std']\ndual_no_of_std = _limits['dual_no_of_std']\nmax_seats = _limits['max_seats']\n\nif (debug):\n default_no_of_std = 4\n iiitu_no_of_std = 4\n dual_no_of_std = 6\n max_seats = 4\n\ndef get_branch_set(roll):\n def get_batch(roll): # working with new one\n roll = str(roll)\n if(roll[0]=='i'):#iitu\n year = roll[5:7]\n else:\n year = roll[0:2]\n return year\n y = get_batch(roll)\n if(int(y) >= 18): # new style\n return [\n y+'1001',y+'2001',y+'3001',y+'4001',y+'5001',y+'6001',y+'7001',y+'8001',\n y+'4501',y+'5501',\n 'iiitu'+y+'101','iiitu'+y+'201','iiitu'+y+'301'\n ]\n\n classes_set = {\n \"14\":[\n y+'101',y+'201',y+'301',y+'401',y+'501',y+'601',y+'701',\n y+'mi501',y+'mi401',\n 'iiitu'+y+'101','iiitu'+y+'201'\n ],\n \"15\":[\n y+'101',y+'201',y+'301',y+'401',y+'501',y+'601',y+'701',\n y+'mi501',y+'mi401',\n 'iiitu'+y+'101','iiitu'+y+'201'\n ],\n \"16\":[\n y+'101',y+'201',y+'301',y+'401',y+'501',y+'601',y+'701',\n y+'mi501',y+'mi401',\n 'iiitu'+y+'101','iiitu'+y+'201'\n ],\n \"17\":[\n y+'101',y+'201',y+'301',y+'401',y+'501',y+'601',y+'701',y+'801',\n y+'mi501',y+'mi401',\n 'iiitu'+y+'101','iiitu'+y+'201','iiitu'+y+'301'\n ],\n }\n return classes_set[y]\n","sub_path":"nith_results/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"73653602","text":"import time\nimport asyncio\nfrom telethon import TelegramClient\nfrom telethon.tl.functions.channels import GetParticipantsRequest\nfrom telethon.tl.types import InputPeerUser\n\n# Project common modules\nimport sys, os\nsys.path.append(os.path.join(sys.path[0], '..', '..', 'common'))\nfrom auth import AuthTelethon_2\nfrom messagebox import MessageBox\nfrom logger import Logger\n\n# TelegramSpammer's modules\n# from conf import Conf\nfrom user_profiles import UserProfile, UserProfiles\n\n# KTC trade coin\nUSER_PROFILE_CSV_PATH = 'db/ktc_member_profiles.csv'\n# USER_PROFILE_CSV_PATH = 'db/demo_member_profiles.csv'\n\nasync def main():\n # config = Conf()\n logger = Logger.get_instance()\n\n profiles = get_user_profiles()\n\n auth_telethon = AuthTelethon_2()\n telethon_client = auth_telethon.get_client()\n await telethon_client.start()\n\n for profile in profiles:\n await send_message(telethon_client, profile)\n print('user: ', profile.user_id, profile.username, profile.user_hash)\n time.sleep(10)\n\n\n\ndef get_user_profiles() -> list:\n ''' Get all members' ID, username and access hash from CSV file\n '''\n user_profiles = UserProfiles(USER_PROFILE_CSV_PATH)\n user_profiles.read_csv()\n return user_profiles.profiles\n\n\nasync def send_message(client: TelegramClient, profile: UserProfile):\n if profile.username == 'None':\n receiver = InputPeerUser(profile.user_id, profile.user_hash)\n else:\n receiver = await client.get_input_entity(profile.username)\n\n msg = \"\"\"Hi bạn\n\nMình có khóa học Trade của Nukida\n(anh này có kênh YouTube gần 90 ngàn người đăng kí, nhiều video free về Trading Forex và Crypto rất hay)\n\nNukida cũng theo trường phái Price Action giống admin Kiên (KTC trade coin).\n\nĐợt rồi, mình mua khóa này 10 triệu. Giờ muốn share lại bạn với giá $10\nBao gồm khóa học A (cơ bản) và B (nâng cao) của Nukida. Có Video, tài liệu, kinh nghiệm thực chiến, Livestream, hỏi đáp, công cụ/phần mềm hỗ trợ. Video trên Youtube chỉ cơ bản lướt qua, còn khóa học thì giải thích chi tiết và thực chiến hơn nhiều.\nCả 2 chỉ $10\n\nLưu ý đây không phải là khóa học trực tuyến 1:1. Chỉ là video và file bài giảng thôi.\nBạn cần thêm thông tin gì thì nhắn tin mình.\"\"\"\n\n await client.send_message(entity=receiver, message=msg, parse_mode='html')\n\n\nasyncio.run(main())","sub_path":"CryptoUtilities/CryptoUtilities/TelegramSpammer/src/send_msg.py","file_name":"send_msg.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"380231860","text":"import numpy as np\r\nfrom skimage.color import rgb2gray\r\nfrom skimage.transform import resize\r\nimport tensorflow as tf\r\nfrom obstacle_tower_env import ObstacleTowerEnv\r\nfrom matplotlib import pyplot as plt\r\nfrom PIL import Image\r\n\r\n\r\nOUTPUT_GRAPH = True\r\nMAX_EPISODE = 1000\r\nDISPLAY_REWARD_THRESHOLD = 200 # renders environment if total episode reward is greater then this threshold\r\nMAX_EP_STEPS = 500 # maximum time step in one episode\r\nRENDER = False # rendering wastes time\r\nGAMMA = 0.9 # reward discount in TD error\r\nLR_A = 0.001 # actor learning rate\r\nLR_C = 0.01 # critic learning rate\r\n\r\n\r\nclass Actor(object):\r\n def __init__(self, sess, n_features, n_actions, lr=0.001):\r\n self.sess = sess\r\n\r\n self.s = tf.placeholder(tf.float32, [1, n_features, n_features], \"state\")\r\n self.a = tf.placeholder(tf.int32, None, \"act\")\r\n self.td_error = tf.placeholder(tf.float32, None, \"td_error\") # TD_error\r\n\r\n self.s = self.s[:, :, :, np.newaxis] # artifact to create 4dim space (1st dim = batchsize)\r\n # print(\"SELF.S SHAPE: \" + str(self.s.shape))\r\n with tf.variable_scope('Actor'):\r\n self.input_layer = tf.layers.conv2d(\r\n inputs=self.s,\r\n filters=512, # 2 * 2,\r\n kernel_size=[64, 64],\r\n activation=tf.nn.leaky_relu, #relu Todo: Try Leaky Relu\r\n kernel_initializer=tf.random_normal_initializer(0., .1), # weights\r\n bias_initializer=tf.constant_initializer(0.1), # biases\r\n name='input_layer'\r\n )\r\n\r\n self.first_conv = tf.layers.conv2d(\r\n inputs=self.input_layer,\r\n filters=256, # 2 * 2, # todo: increase to e.g. 256\r\n kernel_size=[16, 16],\r\n activation=tf.nn.leaky_relu, # tf.nn.relu,\r\n kernel_initializer=tf.random_normal_initializer(0., .1), # weights\r\n bias_initializer=tf.constant_initializer(0.1), # biases\r\n name='first_conv'\r\n )\r\n\r\n self.scnd_conv = tf.layers.conv2d(\r\n inputs=self.first_conv,\r\n filters=128, # 2 * 2,\r\n kernel_size=[4, 4],\r\n activation=tf.nn.leaky_relu, # relu, # get action probabilities\r\n kernel_initializer=tf.random_normal_initializer(0., .1), # weights\r\n bias_initializer=tf.constant_initializer(0.1), # biases\r\n name='scnd_conv'\r\n )\r\n\r\n self.batch_norm = tf.layers.batch_normalization(\r\n inputs=self.scnd_conv,\r\n name='batch_norm'\r\n )\r\n\r\n self.flatten = tf.layers.flatten(\r\n inputs=self.batch_norm,\r\n name='flatten'\r\n )\r\n\r\n self.dense = tf.layers.dense(\r\n inputs=self.flatten,\r\n units=64,\r\n activation=tf.nn.relu,\r\n name='1st_dense'\r\n )\r\n\r\n self.acts_prob = tf.layers.dense(\r\n inputs=self.dense,\r\n units=54,\r\n activation=tf.nn.softmax,\r\n name='acts_prob'\r\n )\r\n\r\n with tf.variable_scope('exp_v'):\r\n log_prob = tf.log(self.acts_prob[0, self.a])\r\n self.exp_v = tf.reduce_mean(log_prob * self.td_error) # advantage (TD_error) guided loss\r\n\r\n with tf.variable_scope('train'):\r\n self.train_op = tf.train.AdamOptimizer(lr).minimize(-self.exp_v) # minimize(-exp_v) = maximize(exp_v)\r\n\r\n def learn(self, s, a, td):\r\n s = s[np.newaxis, :]\r\n feed_dict = {self.s: s, self.a: a, self.td_error: td}\r\n _, exp_v = self.sess.run([self.train_op, self.exp_v], feed_dict)\r\n return exp_v\r\n\r\n # choose action based on previous state s\r\n def choose_action(self, s):\r\n s = s[np.newaxis, :]\r\n # wants s to be of dimsize self.s (1, 84, 84, 1)\r\n probs = self.sess.run(self.acts_prob, {self.s: s}) # get probabilities for all actions\r\n \"\"\"\r\n allowed = False\r\n while not allowed:\r\n action = np.random.choice(np.arange(probs.shape[1]), p=probs.ravel()) # return a int\r\n if action in [0,1,2,4,6,18]:\r\n allowed = True\r\n else:\r\n allowed = False\r\n \"\"\"\r\n return np.random.choice(np.arange(probs.shape[1]), p=probs.ravel()) # return a int\r\n\r\n\r\nclass Critic(object):\r\n def __init__(self, sess, n_features, lr=0.01):\r\n self.sess = sess\r\n\r\n self.s = tf.placeholder(tf.float32, [1, n_features, n_features], \"state\")\r\n self.v_ = tf.placeholder(tf.float32, [1, 1], \"v_next\")\r\n self.r = tf.placeholder(tf.float32, None, 'r')\r\n\r\n # self.s = np.expand_dims(self.s, axis=3)\r\n self.s = self.s[:, :, :, np.newaxis] # artifact to create 4dim space (1st dim = batchsize)\r\n with tf.variable_scope('Critic'):\r\n self.input_layer = tf.layers.conv2d(\r\n inputs=self.s,\r\n filters=512, # 2 * 2, # todo: increase to e.g. 256\r\n kernel_size=[64, 64],\r\n activation=tf.nn.leaky_relu, # tf.nn.relu,\r\n kernel_initializer=tf.random_normal_initializer(0., .1), # weights\r\n bias_initializer=tf.constant_initializer(0.1), # biases\r\n name='input_layer'\r\n )\r\n\r\n self.first_conv = tf.layers.conv2d(\r\n inputs=self.input_layer,\r\n filters=256, # 2 * 2, # todo: increase to e.g. 256\r\n kernel_size=[16, 16],\r\n activation=tf.nn.leaky_relu, # tf.nn.relu,\r\n kernel_initializer=tf.random_normal_initializer(0., .1), # weights\r\n bias_initializer=tf.constant_initializer(0.1), # biases\r\n name='first_conv'\r\n )\r\n\r\n self.scnd_conv = tf.layers.conv2d(\r\n inputs=self.first_conv,\r\n filters=128, #2 * 2,\r\n kernel_size=[4, 4],\r\n activation=tf.nn.leaky_relu, # relu, # get action probabilities\r\n kernel_initializer=tf.random_normal_initializer(0., .1), # weights\r\n bias_initializer=tf.constant_initializer(0.1), # biases\r\n name='scnd_conv'\r\n )\r\n\r\n self.batch_norm = tf.layers.batch_normalization(\r\n inputs=self.scnd_conv,\r\n name='batch_norm'\r\n )\r\n\r\n self.flatten = tf.layers.flatten(\r\n inputs=self.batch_norm,\r\n name='flatten'\r\n )\r\n print(self.flatten)\r\n\r\n # todo: evtl noch weiter runterscalen, try out diff. kernel sizes etc\r\n\r\n self.dense = tf.layers.dense(\r\n inputs=self.flatten,\r\n units=64,\r\n activation=tf.nn.relu,\r\n name='1st_dense'\r\n )\r\n\r\n self.v = tf.layers.dense(\r\n inputs=self.dense,\r\n units=1,\r\n activation=tf.nn.softmax,\r\n name='V'\r\n )\r\n\r\n with tf.variable_scope('squared_TD_error'):\r\n # Advantage Function td_error\r\n self.td_error = self.r + GAMMA * self.v_ - self.v\r\n with tf.variable_scope('Loss'):\r\n self.loss = tf.square(self.td_error) # TD_error = (r+gamma*V_next) - V_eval\r\n with tf.variable_scope('train'):\r\n self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)\r\n\r\n def learn(self, s, r, s_):\r\n s, s_ = s[np.newaxis, :], s_[np.newaxis, :]\r\n\r\n v_ = self.sess.run(self.v, {self.s: s_})\r\n td_error, _ = self.sess.run([self.td_error, self.train_op],\r\n {self.s: s, self.v_: v_, self.r: r})\r\n return td_error\r\n\r\n\r\ndef rgb2gray(rgb):\r\n return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Set retro=True to get integers for every action instead of MultiDiscrete\r\n env = ObstacleTowerEnv('./ObstacleTower/obstacletower', retro=True, realtime_mode=True)\r\n N_F = env.observation_space.shape[0] # number features 84\r\n N_A = env.action_space.n # number actions 54\r\n\r\n tf.reset_default_graph()\r\n #sess = tf.Session()\r\n config = tf.ConfigProto()\r\n config.gpu_options.allocator_type = 'BFC'\r\n sess = tf.Session(config=config)\r\n writer = tf.summary.FileWriter('./graphs', sess.graph)\r\n\r\n actor = Actor(sess, n_features=N_F, n_actions=N_A, lr=LR_A)\r\n critic = Critic(sess, n_features=N_F, lr=LR_C) # we need a good teacher, so the teacher should learn faster than the actor\r\n\r\n sess.run(tf.global_variables_initializer())\r\n # saves a model every 2 hours and maximum 4 latest models are saved.\r\n saver = tf.train.Saver(max_to_keep=4, keep_checkpoint_every_n_hours=2)\r\n saver.save(sess, 'actor_critic_model')\r\n\r\n if OUTPUT_GRAPH:\r\n tf.summary.FileWriter(\"logs/\", sess.graph)\r\n\r\n td_errors = []\r\n test_phase = False\r\n test_counter = 0\r\n keys_in_test = 0\r\n performance = []\r\n steps = [26, 14, 3]\r\n for i_episode in range(MAX_EPISODE):\r\n s = env.reset()\r\n config = {'tower-seed': 0, 'starting-floor': 10, 'dense-reward': 1, 'agent-perspective': 1, 'allowed-rooms': 1,\r\n 'allowed-modules': 0,\r\n 'allowed-floors': 0}\r\n obs = env.reset(config=config)\r\n np.trim_zeros(steps)\r\n\r\n def handcrafted_step(act, go):\r\n try:\r\n for i in range(go):\r\n env.step(act)\r\n except IndexError:\r\n pass\r\n\r\n handcrafted_step(18, steps[0])\r\n handcrafted_step(6, steps[1])\r\n handcrafted_step(18, steps[2])\r\n\r\n s, reward, done, info = env.step(18)\r\n s = rgb2gray(s)\r\n s = np.expand_dims(s, axis=2)\r\n\r\n t = 0\r\n track_score = []\r\n track_r = []\r\n track_a = []\r\n track_s = []\r\n bad_seq = 0\r\n score = 0\r\n steps_after_key = 0\r\n while True:\r\n if RENDER: env.render()\r\n\r\n a = actor.choose_action(s) # state of dim (1, 84, 84) fed to actor to choose the next action based on the current state\r\n\r\n s_, r, done, info = env.step(a)\r\n t += 1\r\n if i_episode % 100 == 0 and i_episode != 0:\r\n test_phase = True\r\n test_counter += 1\r\n if test_phase:\r\n if test_counter > 5:\r\n performance.append(keys_in_test / test_counter)\r\n if keys_in_test > 2:\r\n steps[-1] -= 1\r\n test_counter = 0\r\n keys_in_test = 0\r\n test_phase = False\r\n if info['total_keys'] > 0:\r\n print(info)\r\n keys_in_test += 1\r\n bad_seq = 0\r\n r = 1\r\n score += 1\r\n steps_after_key += 1\r\n if steps_after_key > 5:\r\n done = True\r\n # if one sequence is repeated often without getting rewards - punish it\r\n if bad_seq > 10:\r\n r = - 0.1\r\n score -= 0.1\r\n s_ = rgb2gray(s_)\r\n s_ = np.expand_dims(s_, axis=2)\r\n\r\n if done:\r\n r = -20\r\n\r\n track_score.append(score)\r\n track_r.append(r)\r\n track_a.append(a)\r\n track_s.append(s)\r\n\r\n # check if action gets repeated\r\n try:\r\n if a == track_a[-2]:\r\n bad_seq += 1\r\n else:\r\n if r == - 0.1:\r\n r += 0.1\r\n bad_seq = 0\r\n except IndexError:\r\n pass\r\n\r\n #td_error = critic.learn(s, score, s_) # gradient = grad[r + gamma * V(s_) - V(s)]\r\n #actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error]\r\n #print(\"Td_Error: \" + str(td_error) + \" Score: \" + str(score) + \" Action: \" + str(a))\r\n td_error = critic.learn(s, r, s_) # gradient = grad[r + gamma * V(s_) - V(s)]\r\n probabilities = actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error]\r\n print(\"Probs: \" + str(probabilities))\r\n print(\"Td_Error: \" + str(td_error) + \" Reward: \" + str(r) + \" Action: \" + str(a))\r\n td_errors.append(td_error)\r\n s = s_\r\n\r\n if done or t >= MAX_EP_STEPS:\r\n print(performance)\r\n ep_rs_sum = sum(track_r)\r\n\r\n if 'running_reward' not in globals():\r\n running_reward = ep_rs_sum\r\n else:\r\n running_reward = running_reward * 0.95 + ep_rs_sum * 0.05\r\n if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True # rendering\r\n print(\"episode:\", i_episode, \" reward:\", int(running_reward))\r\n break\r\n else:\r\n test_phase = False\r\n if info['total_keys'] > 0:\r\n print(info)\r\n bad_seq = 0\r\n r = 1\r\n score += 1\r\n steps_after_key += 1\r\n if steps_after_key > 5:\r\n done = True\r\n # if one sequence is repeated often without getting rewards - punish it\r\n if bad_seq > 10:\r\n r = - 0.1\r\n score -= 0.1\r\n s_ = rgb2gray(s_)\r\n s_ = np.expand_dims(s_, axis=2)\r\n\r\n if done:\r\n r = -20\r\n\r\n track_score.append(score)\r\n track_r.append(r)\r\n track_a.append(a)\r\n track_s.append(s)\r\n\r\n # check if action gets repeated\r\n try:\r\n if a == track_a[-2]:\r\n bad_seq += 1\r\n else:\r\n if r == - 0.1:\r\n r += 0.1\r\n bad_seq = 0\r\n except IndexError:\r\n pass\r\n\r\n # td_error = critic.learn(s, score, s_) # gradient = grad[r + gamma * V(s_) - V(s)]\r\n # actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error]\r\n # print(\"Td_Error: \" + str(td_error) + \" Score: \" + str(score) + \" Action: \" + str(a))\r\n td_error = critic.learn(s, r, s_) # gradient = grad[r + gamma * V(s_) - V(s)]\r\n # actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error]\r\n probabilities = actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error]\r\n print(\"Probs: \" + str(probabilities))\r\n print(\"Td_Error: \" + str(td_error) + \" Reward: \" + str(r) + \" Action: \" + str(a))\r\n td_errors.append(td_error)\r\n s = s_\r\n\r\n if done or t >= MAX_EP_STEPS:\r\n print(performance)\r\n ep_rs_sum = sum(track_r)\r\n\r\n if 'running_reward' not in globals():\r\n running_reward = ep_rs_sum\r\n else:\r\n running_reward = running_reward * 0.95 + ep_rs_sum * 0.05\r\n if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True # rendering\r\n print(\"episode:\", i_episode, \" reward:\", int(running_reward))\r\n break\r\n # actor.test(env, nb_episodes=5, visualize=True)\r\n plt.plot(td_errors)\r\n\r\n# TODO: Batch_NormaLIZATION","sub_path":"a2c.py","file_name":"a2c.py","file_ext":"py","file_size_in_byte":16185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"374452014","text":"\"\"\"\"Appium framework for Android and IOS\"\"\"\r\n\r\nimport capablities\r\nfrom appium import webdriver\r\nDesiredcap = {}\r\nDesiredcap['platformName'] = capablities.platform\r\nDesiredcap['platformVersion'] = capablities.platfromversion\r\nDesiredcap['deviceName'] = capablities.devicename\r\nDesiredcap['appPackage'] = capablities.apppackage\r\nDesiredcap['appActivity'] = capablities.appactivity\r\ndriver = webdriver.Remote('http://localhost:4723/wd/hub', Desiredcap)\r\n\r\n\r\ntfile = open(\"testing.txt\",'r')\r\nline = tfile.readlines()\r\n\r\nfor val in line:\r\n sample = val.split('\\t')\r\n Action = sample[0]\r\n Locatortype = sample[1]\r\n Locator = sample[2]\r\n Userinput = sample[3]\r\n #print(Action)\r\n #print(Locatortype)\r\n #print(Locator)\r\n try:\r\n if Action == \"Tap\" and Locatortype == \"ID\":\r\n driver.wait_activity(Locator, 120)\r\n driver.find_element_by_id(Locator).click()\r\n elif Action == \"Tap\" and Locatortype == \"Xpath\":\r\n driver.wait_activity(Locator, 10)\r\n driver.find_element_by_xpath(Locator).click()\r\n elif Action == \"Tap\" and Locatortype == \"Class\":\r\n driver.implicitly_wait(30)\r\n driver.find_element_by_class_name(Locator).click()\r\n elif Action == \"Sendkeys\" and Locatortype == \"Xpath\":\r\n driver.wait_activity(Locator, 10)\r\n driver.find_element_by_xpath(Locator).send_keys(Userinput)\r\n driver.hide_keyboard()\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n","sub_path":"AppiumCaps.py","file_name":"AppiumCaps.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"528247363","text":"\"\"\"\nGiven a list of non negative integers, arrange them such that they form the largest number.\n\nFor example:\n\nGiven [3, 30, 34, 5, 9], the largest formed number is 9534330.\n\nNote: The result may be very large, so you need to return a string instead of an integer.\n\nSource: https://www.interviewbit.com/courses/programming/topics/arrays/problems/largestnum/\n\"\"\"\n\n# @param A : list of integers\n# @return a strings\ndef largestNumber(A):\n A.sort(cmp=compareNumbers, reverse=True)\n if A[0] == 0:\n return '0'\n return ''.join(str(x) for x in A)\n\ndef compareNumbers(a, b):\n a_str = str(a)\n b_str = str(b)\n first = a_str + b_str\n second = b_str + a_str\n if first < second:\n return -1\n elif first == second:\n return 0\n else:\n return 1","sub_path":"179.py","file_name":"179.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"231352795","text":"import json\nimport uuid\nimport time\nimport hashlib\nimport requests\nimport mdpopups\n\nimport sublime\nimport sublime_plugin\n\nfrom .menus_creator import MenusCreator\nfrom ..log import Loger\n\n\nclass ReplaceWithTranslationCommand(sublime_plugin.TextCommand):\n def run(self, edit, begin, end, text):\n self.view.replace(edit, sublime.Region(begin, end), text)\n\n\nclass TranslatorBase(sublime_plugin.TextCommand):\n words = \"\"\n style = \"popup\"\n region = sublime.Region(0, 0)\n mdpopups_css = \"Packages/SuperMenus/mdpopups.css\"\n\n def run(self, edit):\n on_select = lambda: self.translate(self.words)\n Loger.threading(on_select, \"Translating...\", \"Succeed.\")\n\n def show_popup(self):\n mdpopups.show_popup(\n view=self.view,\n css=sublime.load_resource(self.mdpopups_css),\n max_width=480,\n max_height=320,\n location=(self.region.a + self.region.b)//2,\n content=self.formated(),\n on_navigate=self.on_navigate,\n md=True)\n\n def show_phantom(self):\n mdpopups.add_phantom(\n view=self.view,\n css=sublime.load_resource(self.mdpopups_css),\n key=\"Translator\",\n region=self.region,\n content=self.formated(),\n layout=sublime.LAYOUT_BELOW,\n on_navigate=self.on_navigate,\n md=True)\n\n def show_view(self):\n view = self.view.window().new_file(\n flags=sublime.TRANSIENT,\n syntax=\"Packages/JavaScript/JSON.sublime-syntax\")\n view.set_scratch(True)\n view.set_name(\"Translation\")\n view.run_command(\"append\", {\"characters\": str(self.received)})\n view.run_command(\"pretty_json\")\n\n def display(self):\n if self.style == \"popup\":\n self.show_popup()\n elif self.style == \"phantom\":\n self.show_phantom()\n elif self.style == \"view\":\n self.show_view()\n\n def translate(self, q):\n pass\n\n def formated(self, received):\n return \"\"\n\n def on_navigate(self, href):\n pass\n\n\nTRANSLATOR_TEMPLATE = \"\"\"\n---\nallow_code_wrap: true\n---\n!!! {}\n\"\"\"\n\nCOPY_AND_REPLACE = \"\"\"\nCopy\nReplace\n\"\"\"\n\nclass YoudaoTranslator(TranslatorBase):\n def translate(self, q):\n def truncate(q):\n size = len(q)\n return q if size <= 20 else q[0:10] + str(size) + q[size - 10:size]\n\n def encrypt(signStr):\n hash_algorithm = hashlib.sha256()\n hash_algorithm.update(signStr.encode('utf-8'))\n return hash_algorithm.hexdigest()\n\n platform = MenusTranslator.platforms[\"youdao\"]\n if (\"app_id\" in platform and \"app_key\" in platform and\n \"api_url\" in platform):\n apiurl = platform[\"api_url\"]\n appKey = platform[\"app_id\"]\n secret = platform[\"app_key\"]\n\n curtime = str(int(time.time()))\n salt = str(uuid.uuid1())\n sign = encrypt(appKey + truncate(q) + salt + curtime + secret)\n data = {\n \"q\": q,\n \"from\": platform[\"from\"],\n \"to\": platform[\"to\"],\n \"appKey\": appKey,\n \"salt\": salt,\n \"sign\": sign,\n \"signType\": \"v3\",\n \"curtime\": curtime\n }\n\n else:\n data = {}\n apiurl = \"https://fanyi.youdao.com/openapi.do?keyfrom=divinites&key=1583185521&type=data&doctype=json&version=1.1&q=%s\" % q\n\n try:\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n response = requests.post(apiurl, data=data, headers=headers)\n self.received = json.loads(response.content.decode('utf-8'))\n self.display()\n except Exception as e:\n sublime.error_message(u\"数据请求失败!\")\n\n def formated(self):\n received = self.received\n thread = \"\\n------------------------\\n{}\"\n body = TRANSLATOR_TEMPLATE.format(\"Youdao\")\n footer = \"\"\"
\"\"\"\n body += \"# {}\\n\".format(self.words)\n if \"basic\" in received:\n body += \"## 解释:\\n\"\n for explain in received[\"basic\"][\"explains\"]:\n body += \"- {}\\n\".format(explain)\n if \"translation\" in received:\n body += \"## 翻译:\\n\"\n for explain in received[\"translation\"]:\n body += \"- {}\\n\".format(explain)\n footer += COPY_AND_REPLACE\n if \"web\" in received:\n body += thread.format(\"## 网络释义:\\n\")\n for explain in received[\"web\"]:\n explains = \",\".join(explain[\"value\"])\n body += \"`{}`: {}\\n\".format(explain[\"key\"], explains)\n footer += \"\"\"×
\"\"\"\n return body + thread.format(footer)\n\n def on_navigate(self, href):\n if \"translation\" in self.received:\n text = self.received[\"translation\"][0]\n if href == \"replace\":\n begin, end = self.region.begin(), self.region.end()\n self.view.run_command(\"replace_with_translation\",\n { \"begin\": begin, \"end\": end, \"text\": text })\n if href == \"copy\":\n sublime.set_clipboard(text)\n sublime.status_message(\"Translation copied to clipboard\")\n self.view.hide_popup()\n self.view.erase_phantoms(\"Translator\")\n\n\nclass GoogleTranslator(TranslatorBase):\n def translate(self, q):\n pass\n\n def formated(self, words, received):\n pass\n\n\nclass MenusTranslator(MenusCreator):\n def __init__(self, caption=\"Translator\", auto_select=True, platforms={},\n separator=( \"|\\\\\\n\\f/:,;<>.+=-_~`'\\\"!@#$%^&*\"\n \"({[(《:;·,。—¥?!……‘’“”、》)]})\")):\n MenusTranslator.caption = caption\n MenusTranslator.platforms = platforms\n MenusTranslator.separator = separator\n MenusTranslator.auto_select = auto_select\n\n def item(self, caption, command):\n return { \"caption\": caption, \"command\": command }\n\n def get_words(self, view, event):\n if view.has_non_empty_selection_region():\n selected = view.sel()[0]\n words = view.substr(selected).strip(self.separator)\n if len(words) > 0:\n TranslatorBase.region = selected\n return words\n if self.auto_select is True:\n pt = view.window_to_text((event[\"x\"], event[\"y\"]))\n region = view.word(pt)\n word = view.substr(region).strip(self.separator)\n if len(word) > 0:\n TranslatorBase.region = region\n return word\n return None\n\n def create(self, view, event):\n if len(self.platforms) > 0:\n words = self.get_words(view, event)\n if words is not None:\n items = []\n TranslatorBase.words = words\n for p in sorted(self.platforms):\n platform = self.platforms[p]\n if platform.get(\"enabled\", True):\n caption = platform.get(\"caption\", p.title())\n command = \"{}_translator\".format(p.lower())\n items.append(self.item(caption, command))\n if len(items) > 1:\n return self.fold_items(items)\n if len(items) == 1:\n return items[0]\n return None\n","sub_path":"plugins/dymenus/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":7620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"43677052","text":"\"\"\"\nGiven a string, write a function to check if it is a permutation of\na palindrome. A palindrome is a word or phrase that is the same forwards and backwards. A\npermutation is a rearrangement of letters. The palindrome does not need to be limited to just\ndictionary words.\n\"\"\"\nstr = \"tact coa\"\ndef check(str):\n count = {}\n for i in str:\n if i == ' ':\n continue\n else:\n if i not in count.keys():\n count[i] = 1\n else:\n count[i] += 1\n count_odd = 0\n for i in count.values():\n if i%2 != 0:\n count_odd += 1\n if count_odd == 1:\n return True\n else:\n return False\nprint(check(str))","sub_path":"venv/Codes/Questions/is_permutation_of_palindrome.py","file_name":"is_permutation_of_palindrome.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"420246077","text":"from django import forms\n\nfrom haystack.forms import SearchForm\n\n\nclass DateRangeSearchForm(SearchForm):\n '''Search :class:`Post ` by published_on\n date.\n\n :param start_date: Optional.\n :param end_date: Optional.\n '''\n start_date = forms.DateField(\n help_text='YYYY-MM-DD',\n required=False,\n )\n end_date = forms.DateField(\n help_text='YYYY-MM-DD',\n required=False,\n )\n\n def search(self):\n sqs = super(DateRangeSearchForm, self).search()\n\n if not self.is_valid():\n return self.no_query_found()\n\n if self.cleaned_data['start_date']:\n sqs = sqs.filter(published_on__gte=self.cleaned_data['start_date'])\n\n if self.cleaned_data['end_date']:\n sqs = sqs.filter(published_on__lte=self.cleaned_data['end_date'])\n\n return sqs\n","sub_path":"utils/search_forms.py","file_name":"search_forms.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"593292360","text":"import tensorflow as tf\n\nfrom tensorflow.keras import layers, Model\nfrom tensorflow.keras.losses import SparseCategoricalCrossentropy\nfrom tensorflow.keras.metrics import SparseCategoricalAccuracy\nfrom tensorflow.keras.optimizers import Adam\n\nfrom dgl.nn.tensorflow import GraphConv\nfrom tensorflow.keras.activations import get\n\n\nclass GCN(Model):\n def __init__(self, in_channels, out_channels,\n hiddens=[16],\n activations=['relu'],\n dropout=0.5,\n weight_decay=5e-4,\n lr=0.01, use_bias=True):\n\n super().__init__()\n self.convs = []\n inc = in_channels\n for hidden, activation in zip(hiddens, activations):\n layer = GraphConv(inc, hidden, bias=use_bias,\n activation=get(activation))\n self.convs.append(layer)\n inc = hidden\n\n layer = GraphConv(inc, out_channels, bias=use_bias)\n self.convs.append(layer)\n self.dropout = layers.Dropout(dropout)\n self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),\n optimizer=Adam(lr=lr), metrics=['accuracy'])\n self.weight_decay = weight_decay\n self.metric = SparseCategoricalAccuracy()\n\n def call(self, inputs):\n h, g, idx = inputs\n for layer in self.convs[:-1]:\n h = layer(g, h)\n h = self.dropout(h)\n h = self.convs[-1](g, h)\n\n return tf.gather(h, idx)\n","sub_path":"graphgallery/nn/models/dgl_tf/gcn.py","file_name":"gcn.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"479646380","text":"import torch\nimport cv2\nimport urllib\nfrom PIL import Image\nimport numpy as np\nimport torchvision\nimport json\n\nfrom utils.visdom_feeder import VisdomFeeder\nimport coco_transform as T\n\n\ndef main():\n\n # cuda?\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\n # normalization based on https://pytorch.org/docs/stable/torchvision/models.html\n Normalize = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n IMG_SIZE = 224\n\n # visdom interface\n viz = VisdomFeeder(Normalize)\n\n # create generators\n training_set = torchvision.datasets.CocoDetection(root='/home/AD.ADASWORKS.COM/agoston.huszka/_Ago/projects/yolo3/coco/val2017',\n annFile='/home/AD.ADASWORKS.COM/agoston.huszka/_Ago/projects/yolo3/coco/annotations/instances_val2017.json',\n transforms=T.Compose([\n T.Resize(IMG_SIZE),\n T.ToTensor(),\n Normalize\n ]))\n training_gen = torch.utils.data.DataLoader(training_set,\n batch_size=1,\n shuffle=True,\n num_workers=1)\n\n # get a batch of training data\n local_batch, local_labels = next(iter(training_gen))\n viz.showCocoImg(local_batch, local_labels)\n\n return\n\n\n print(\"test backbone network\")\n print(torch.__version__)\n model = torch.hub.load('pytorch/vision:v0.5.0', 'mobilenet_v2', pretrained=True)\n model.eval()\n\n url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n try:\n urllib.URLopener().retrieve(url, filename)\n except:\n urllib.request.urlretrieve(url, filename)\n\n #img = cv2.imread(filename, 0)\n #cv2.imshow('image', img)\n #cv2.waitKey(0)\n\n input_image = Image.open(filename)\n preprocess = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n input_tensor = preprocess(input_image)\n input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n\n # move the input and model to GPU for speed if available\n if torch.cuda.is_available():\n input_batch = input_batch.to('cuda')\n model.to('cuda')\n\n with torch.no_grad():\n output = model(input_batch)\n # Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n # print(output[0])\n # The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n # print(torch.nn.functional.softmax(output[0], dim=0))\n\n percentage = torch.nn.functional.softmax(output[0], dim=0) * 100\n _, y_pred = torch.max(output, 1)\n print(y_pred)\n\n classes = []\n with open('imagenet_classes.txt') as f:\n classes = [line.strip() for line in f.readlines()]\n print(classes[y_pred], percentage[y_pred].item())\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/sandbox/backbone_test.py","file_name":"backbone_test.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"486627281","text":"#!/usr/bin/python3\n\n\nimport functools\nimport os\nimport sys\nfrom captools.argsparser import parse_args\nfrom captools.capturer import Capturer, run_download\n\n\nlink1 = \"http://releases.ubuntu.com/14.04/ubuntu-14.04.4-desktop-amd64.iso\"\ninfo1 = \"\"\"\\\nCapturing consists of downloading Ubuntu 14.04 distributive\nfrom releases.ubuntu.com\n\"\"\"\n# flag1 = \"\"\ntag1 = \"ubuntu_distributive\"\n\n\nlink2 = \"\"\"\\\nhttp://us.download.nvidia.com/Windows/359.06/\\\n359.06-desktop-win10-64bit-international-whql.exe\\\n\"\"\"\ninfo2 = \"\"\"\\\nCapturing consists of downloading driver for NVIDIA GeForce GTX TITAN X\nvideo adapter for Windows 10 64-bit OS, version 359.06\n\"\"\"\n# flag2 = \"\"\ntag2 = \"nvidia_driver\"\n\n\nlink3 = \"\"\"\nhttp://www2.ati.com/drivers/beta/\\\nRadeon-Software-Crimson-Edition-15.11.1-Beta-64Bit-Win10-Win8.1-Win7-Nov30.exe\\\n\"\"\"\ninfo3 = \"\"\"\\\nCapturing consists of downloading Crimson edition driver for ATI Radeon\nadapters, version 15.11.1\n\"\"\"\n# flag3 = \"--referer=http://support.amd.com \"\ntag3 = \"radeon_driver\"\n\nlink4 = \"\"\"\\\nftp://download.iwlab.foi.se/dataset/smia2011/Network_traffic/\\\nSMIA_2011-10-14_07%253A21%253A32_CEST_106467000_file4.pcap\\\n\"\"\"\ninfo4 = \"\"\"\\\nCapturing consists of downloading one of the pcap files from\nFOI Warfare Lab dataset\n\"\"\"\n# flag4 = \"\"\ntag4 = \"pcap_foi\"\n\n\nlink5 = \"http://download.gimp.org/mirror/pub/gimp/v2.8/osx/gimp-2.8.14.dmg\"\ninfo5 = \"\"\"\\\nCapturing consists of downloading GIMP 2.8.14 installing file\n\"\"\"\n# flag5 = \"\"\ntag5 = \"gimp\"\n\n\n\ndata = [\n (link1, info1, tag1),\n (link2, info2, tag2),\n # (link3, info3, tag3), - too slow\n (link4, info4, tag4),\n (link5, info5, tag5)\n]\n\n\ndata_combined = [\n (link1, info1, tag1),\n (link2, info2, tag2),\n (link1, info1, tag1)\n]\n\n\ndef make_data(data_list):\n capt_data = []\n for link, info, tag in data_list:\n capt_action = functools.partial(run_download, link)\n full_info = info + \"Initial link: \" + link + \"\\n\"\n capt_data.append((capt_action, full_info, tag))\n return capt_data\n\n\nif __name__ == \"__main__\":\n args = parse_args(sys.argv[1:])\n capt_data = make_data(data if args.sep else data_combined)\n Capturer(args.out_dir, capt_data, sep=args.sep).capture()\n\n\n# if __name__ == \"__main__\":\n# args = parse_args(sys.argv[1:])\n# capt_data = []\n\n# if args.sep:\n# for link, info, tag in data:\n# capt_action = functools.partial(run_download, link)\n# full_info = info + \"Initial link: \" + link + \"\\n\"\n# capt_data.append((capt_action, full_info, tag))\n# Capturer(args.out_dir, capt_data, sep=True).capture()\n# else:\n# for link, info, tag in data_combined:\n# capt_action = functools.partial(run_download, link)\n# full_info = info + \"Initial link: \" + link + \"\\n\"\n# capt_data.append((capt_action, full_info, tag))\n# Capturer(args.out_dir, capt_data, sep=False).capture()\n\n# args = parse_args(sys.argv[1:])\n# for url, info, flags, tag in data:\n# curr_out_dir = args.out_dir + \"/\" + tag\n# if not os.path.exists(curr_out_dir):\n# os.makedirs(curr_out_dir)\n# download = functools.partial(run_download, flags, url)\n# Capturer(curr_out_dir, download, tag, info).capture()","sub_path":"scripts/capturing_scripts/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"603429397","text":"arr=raw_input()\nnewarr=\"\"\nfor i in range(0,len(arr)-1):\n if i!=0:\n newarr=newarr+arr[i]\nlist=[int(n) for n in newarr.split(',')]\nk=int(input())\nx=int(input())\nnum=[]\nmark,left,right,min=-1,1,1,0\nfor i in range(0,len(list)):\n if list[i]==x:\n mark=i\n num.append(x)\n k=k-1\n break\nif mark==-1:\n min=x-list[0]\n if min<0:\n min=0-min\n mark=0\n for i in range(0,len(list)):\n a=x-list[i]\n if a<0:\n a=0-a\n if a0:\n le=ri=-1\n if mark-left>=0:\n le=x-list[mark-left]\n if (mark+right)=0 and ri>=0:\n if le<=ri:\n num.insert(0,list[mark-left])\n left=left+1\n else:\n num.append(list[mark+right])\n right=right+1\n elif le==-1 and ri>=0:\n num.append(list[mark + right])\n right = right + 1\n elif le>=0 and ri==-1:\n num.insert(0,list[mark - left])\n left = left + 1\n k=k-1\nfor i in range(0, k):\n num[i] = int(num[i])\nfor m in range(0,k):\n for n in range(i+1,k):\n if num[m]>num[n]:\n num[m],num[n]=num[n],num[m]\nprint(num)","sub_path":"Code/CodeRecords/2564/60795/246903.py","file_name":"246903.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"217824219","text":"class Solution(object):\n def numSquares(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n \"TLE: do not know why\"\n # Use BFS\n if n <= 0 : return 0\n queue = collections.deque([n])\n nextQ = collections.deque([])\n level = 0\n visited = [False for x in xrange(n+1)]\n # Think the BFS process as drawing a graph by BFS\n while queue:\n node = queue.popleft()\n if node == 0: \n return level\n i = 1\n while i**2 <= node:\n if not visited[node-i**2]: # avoid drawing repeated nodes at same level\n nextQ.append(node-i**2) # draw the node\n visited[node-i**2] = True # mark as the node has been drawn\n i+=1 \n if not queue:\n nextQ.reverse() # reverse to promote speed\n queue, nextQ = nextQ, queue\n level += 1","sub_path":"perfect_squares/BFS_2.py","file_name":"BFS_2.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"185023475","text":"import re\nfrom argparse import ArgumentDefaultsHelpFormatter\nfrom glob import glob\n\nfrom django.core.management.base import BaseCommand\n\nimport os\n\nimport file_access_utils\nfrom librarian.models import ExternalFileDirectory, Dataset\n\n\nclass Command(BaseCommand):\n help = 'Converts uploaded datasets to external datasets.'\n\n def add_arguments(self, parser):\n parser.formatter_class = ArgumentDefaultsHelpFormatter\n parser.add_argument(\"-x\",\n \"--extension\",\n help=\"file name extension\",\n default=\".fastq.gz\")\n parser.add_argument(\"-p\",\n \"--pattern\",\n help=\"description pattern\",\n default=r'.* from MiSeq run (.*)$')\n parser.add_argument(\"-d\",\n \"--dry_run\",\n action='store_true',\n help=\"don't make any changes\")\n parser.add_argument(\n \"-t\",\n \"--template\",\n help=\"folder template\",\n default=r'~/data/RAW_DATA/MiSeq/runs/\\1*/Data/Intensities/BaseCalls')\n\n def handle(self, *args, **options):\n description_pattern = options['pattern']\n folder_template = options['template']\n external_directories = ExternalFileDirectory.objects.order_by('-path')\n missing_folders = set()\n changed_files = set()\n\n datasets = Dataset.objects.filter(\n externalfiledirectory__isnull=True, # not already external\n file_source__isnull=True, # not an output\n usurps__isnull=True, # not generated by MD5 conflict\n name__endswith=options['extension'])\n print('Uploaded datasets:', datasets.count())\n for dataset in datasets:\n match = re.match(description_pattern, dataset.description)\n if not match:\n print('No pattern match:', dataset.description)\n continue\n folder_path = os.path.expanduser(match.expand(folder_template))\n expected_path = os.path.join(folder_path, dataset.name)\n files = glob(expected_path)\n if len(files) == 0:\n if len(glob(folder_path)) == 0:\n missing_folders.add(folder_path)\n else:\n print('Missing file:', expected_path)\n continue\n if len(files) > 1:\n print('Too many matches:', expected_path)\n continue\n found_file, = files\n for external_directory in external_directories:\n if found_file.startswith(external_directory.path):\n file_path = os.path.relpath(found_file,\n external_directory.path)\n if not self.is_md5_changed(dataset,\n found_file,\n changed_files):\n if not options['dry_run']:\n dataset.externalfiledirectory = external_directory\n dataset.external_path = file_path\n dataset.dataset_file.delete(save=True)\n print('.', end='')\n break\n else:\n print('Not under any external directory:', expected_path)\n for folder in sorted(missing_folders):\n print('Missing folder:', folder)\n\n def is_md5_changed(self, dataset, found_file, changed_files):\n old_md5 = dataset.MD5_checksum\n with open(found_file, \"rb\") as f:\n new_md5 = file_access_utils.compute_md5(f)\n is_changed = new_md5 != old_md5\n if is_changed:\n if found_file not in changed_files:\n print('MD5 changed:',\n old_md5,\n 'to',\n new_md5,\n found_file)\n changed_files.add(found_file)\n return is_changed\n","sub_path":"kive/librarian/management/commands/convert_external_datasets.py","file_name":"convert_external_datasets.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"536663503","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n# Copyright 2021 A10 Networks\n# GNU General Public License v3.0+\n# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nREQUIRED_NOT_SET = (False, \"One of ({}) must be set.\")\nREQUIRED_MUTEX = (False, \"Only one of ({}) can be set.\")\nREQUIRED_VALID = (True, \"\")\n\nDOCUMENTATION = r'''\nmodule: a10_ddos_zone_template_http\ndescription:\n - HTTP template Configuration\nauthor: A10 Networks\noptions:\n state:\n description:\n - State of the object to be created.\n choices:\n - noop\n - present\n - absent\n type: str\n required: True\n ansible_host:\n description:\n - Host for AXAPI authentication\n type: str\n required: True\n ansible_username:\n description:\n - Username for AXAPI authentication\n type: str\n required: True\n ansible_password:\n description:\n - Password for AXAPI authentication\n type: str\n required: True\n ansible_port:\n description:\n - Port for AXAPI authentication\n type: int\n required: True\n a10_device_context_id:\n description:\n - Device ID for aVCS configuration\n choices: [1-8]\n type: int\n required: False\n a10_partition:\n description:\n - Destination/target partition for object/command\n type: str\n required: False\n http_tmpl_name:\n description:\n - \"DDOS HTTP Template Name\"\n type: str\n required: True\n disable:\n description:\n - \"Disable this template\"\n type: bool\n required: False\n multi_pu_threshold_distribution:\n description:\n - \"Field multi_pu_threshold_distribution\"\n type: dict\n required: False\n suboptions:\n multi_pu_threshold_distribution_value:\n description:\n - \"Destination side rate limit only. Default= 0\"\n type: int\n multi_pu_threshold_distribution_disable:\n description:\n - \"'disable'= Destination side rate limit only. Default= Enable;\"\n type: str\n mss_timeout:\n description:\n - \"Field mss_timeout\"\n type: dict\n required: False\n suboptions:\n mss_percent:\n description:\n - \"Configure percentage of mss such that if a packet size is below the mss times\n mss-percent, packet is considered bad.\"\n type: int\n number_packets:\n description:\n - \"Specify percentage of mss. Default is 0, mss-timeout is not enabled.\"\n type: int\n mss_timeout_action_list_name:\n description:\n - \"Configure action-list to take\"\n type: str\n mss_timeout_action:\n description:\n - \"'drop'= Drop packets (Default); 'ignore'= Take no action; 'blacklist-src'=\n Blacklist-src; 'reset'= Reset client connection;\"\n type: str\n disallow_connect_method:\n description:\n - \"Do not allow HTTP Connect method (asymmetric mode only)\"\n type: bool\n required: False\n challenge:\n description:\n - \"Field challenge\"\n type: dict\n required: False\n suboptions:\n challenge_method:\n description:\n - \"'http-redirect'= http-redirect; 'javascript'= javascript;\"\n type: str\n challenge_redirect_code:\n description:\n - \"'302'= 302 Found; '307'= 307 Temporary Redirect;\"\n type: str\n challenge_uri_encode:\n description:\n - \"Encode the challenge phrase in uri instead of in http cookie. Default encoded\n in http cookie\"\n type: bool\n challenge_cookie_name:\n description:\n - \"Set the cookie name used to send back to client. Default is sto-idd\"\n type: str\n challenge_keep_cookie:\n description:\n - \"Keep the challenge cookie from client and forward to backend. Default is do not\n keep\"\n type: bool\n challenge_interval:\n description:\n - \"Specify the challenge interval. Default is 8 seconds\"\n type: int\n challenge_pass_action_list_name:\n description:\n - \"Configure action-list to take for passing the authentication\"\n type: str\n challenge_pass_action:\n description:\n - \"'authenticate-src'= Authenticate-src (Default);\"\n type: str\n challenge_fail_action_list_name:\n description:\n - \"Configure action-list to take for failing the authentication\"\n type: str\n challenge_fail_action:\n description:\n - \"'blacklist-src'= Blacklist-src; 'reset'= Reset client connection(Default);\"\n type: str\n non_http_bypass:\n description:\n - \"Bypass non-http traffic instead of dropping\"\n type: bool\n required: False\n client_source_ip:\n description:\n - \"Field client_source_ip\"\n type: dict\n required: False\n suboptions:\n client_source_ip:\n description:\n - \"Mitigate on src ip specified by http header for example X-Forwarded-For header.\n Default is disabled\"\n type: bool\n http_header_name:\n description:\n - \"Set the http header name to parse for client ip. Default is X-Forwarded-For\"\n type: str\n request_header:\n description:\n - \"Field request_header\"\n type: dict\n required: False\n suboptions:\n timeout:\n description:\n - \"Field timeout\"\n type: int\n header_timeout_action_list_name:\n description:\n - \"Configure action-list to take\"\n type: str\n header_timeout_action:\n description:\n - \"'drop'= Drop packets (Default); 'blacklist-src'= Blacklist-src; 'reset'= Reset\n client connection;\"\n type: str\n src:\n description:\n - \"Field src\"\n type: dict\n required: False\n suboptions:\n rate_limit:\n description:\n - \"Field rate_limit\"\n type: dict\n dst:\n description:\n - \"Field dst\"\n type: dict\n required: False\n suboptions:\n rate_limit:\n description:\n - \"Field rate_limit\"\n type: dict\n slow_read:\n description:\n - \"Field slow_read\"\n type: dict\n required: False\n suboptions:\n min_window_size:\n description:\n - \"minimum window size\"\n type: int\n min_window_count:\n description:\n - \"Number of packets\"\n type: int\n slow_read_action_list_name:\n description:\n - \"Configure action-list to take\"\n type: str\n slow_read_action:\n description:\n - \"'drop'= Drop packets (Default); 'blacklist-src'= Blacklist-src; 'ignore'= Take\n no action; 'reset'= Reset client connection;\"\n type: str\n out_of_order_queue_size:\n description:\n - \"Set the number of packets for the out-of-order HTTP queue (asym mode only)\"\n type: int\n required: False\n out_of_order_queue_timeout:\n description:\n - \"Set the timeout value in seconds for out-of-order queue in HTTP (asym mode\n only)\"\n type: int\n required: False\n idle_timeout:\n description:\n - \"Field idle_timeout\"\n type: dict\n required: False\n suboptions:\n idle_timeout_value:\n description:\n - \"Set the the idle timeout value in seconds for HTTP connections\"\n type: int\n ignore_zero_payload:\n description:\n - \"Don't reset idle timer on packets with zero payload length from clients\"\n type: bool\n idle_timeout_action_list_name:\n description:\n - \"Configure action-list to take\"\n type: str\n idle_timeout_action:\n description:\n - \"'drop'= Drop packets (Default); 'blacklist-src'= Blacklist-src; 'reset'= Reset\n client connection;\"\n type: str\n uuid:\n description:\n - \"uuid of the object\"\n type: str\n required: False\n user_tag:\n description:\n - \"Customized tag\"\n type: str\n required: False\n filter_list:\n description:\n - \"Field filter_list\"\n type: list\n required: False\n suboptions:\n http_filter_name:\n description:\n - \"Field http_filter_name\"\n type: str\n http_filter_seq:\n description:\n - \"Sequence number\"\n type: int\n http_header_cfg:\n description:\n - \"Field http_header_cfg\"\n type: dict\n http_referer_cfg:\n description:\n - \"Field http_referer_cfg\"\n type: dict\n http_agent_cfg:\n description:\n - \"Field http_agent_cfg\"\n type: dict\n http_uri_cfg:\n description:\n - \"Field http_uri_cfg\"\n type: dict\n dst:\n description:\n - \"Field dst\"\n type: dict\n http_filter_action_list_name:\n description:\n - \"Configure action-list to take\"\n type: str\n http_filter_action:\n description:\n - \"'drop'= Drop packets (Default); 'ignore'= Take no action; 'blacklist-src'=\n Blacklist-src; 'authenticate-src'= Authenticate-src; 'reset'= Reset client\n connection;\"\n type: str\n uuid:\n description:\n - \"uuid of the object\"\n type: str\n user_tag:\n description:\n - \"Customized tag\"\n type: str\n malformed_http:\n description:\n - \"Field malformed_http\"\n type: dict\n required: False\n suboptions:\n malformed_http:\n description:\n - \"'check'= Configure malformed HTTP parameters;\"\n type: str\n malformed_http_max_line_size:\n description:\n - \"Set the maximum line size. Default value is 32512\"\n type: int\n malformed_http_max_num_headers:\n description:\n - \"Set the maximum number of headers. Default value is 90\"\n type: int\n malformed_http_max_req_line_size:\n description:\n - \"Set the maximum request line size. Default value is 32512\"\n type: int\n malformed_http_max_header_name_size:\n description:\n - \"Set the maxinum header name length. Default value is 64.\"\n type: int\n malformed_http_max_content_length:\n description:\n - \"Set the maxinum content-length header. Default value is 4294967295 bytes\"\n type: int\n malformed_http_bad_chunk_mon_enabled:\n description:\n - \"Enabling bad chunk monitoring. Default is disabled\"\n type: bool\n malformed_http_action_list_name:\n description:\n - \"Configure action-list to take\"\n type: str\n malformed_http_action:\n description:\n - \"'drop'= Drop packets (Default); 'reset'= Reset client connection; 'blacklist-\n src'= Blacklist-src;\"\n type: str\n uuid:\n description:\n - \"uuid of the object\"\n type: str\n\n'''\n\nRETURN = r'''\nmodified_values:\n description:\n - Values modified (or potential changes if using check_mode) as a result of task operation\n returned: changed\n type: dict\naxapi_calls:\n description: Sequential list of AXAPI calls made by the task\n returned: always\n type: list\n elements: dict\n contains:\n endpoint:\n description: The AXAPI endpoint being accessed.\n type: str\n sample:\n - /axapi/v3/slb/virtual_server\n - /axapi/v3/file/ssl-cert\n http_method:\n description:\n - HTTP method being used by the primary task to interact with the AXAPI endpoint.\n type: str\n sample:\n - POST\n - GET\n request_body:\n description: Params used to query the AXAPI\n type: complex\n response_body:\n description: Response from the AXAPI\n type: complex\n'''\n\nEXAMPLES = \"\"\"\n\"\"\"\n\nimport copy\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.a10.acos_axapi.plugins.module_utils import \\\n errors as a10_ex\nfrom ansible_collections.a10.acos_axapi.plugins.module_utils import \\\n wrapper as api_client\nfrom ansible_collections.a10.acos_axapi.plugins.module_utils import \\\n utils\nfrom ansible_collections.a10.acos_axapi.plugins.module_utils.client import \\\n client_factory\nfrom ansible_collections.a10.acos_axapi.plugins.module_utils.kwbl import \\\n KW_OUT, translate_blacklist as translateBlacklist\n\n# Hacky way of having access to object properties for evaluation\nAVAILABLE_PROPERTIES = [\n \"challenge\", \"client_source_ip\", \"disable\", \"disallow_connect_method\", \"dst\", \"filter_list\", \"http_tmpl_name\", \"idle_timeout\", \"malformed_http\", \"mss_timeout\", \"multi_pu_threshold_distribution\", \"non_http_bypass\", \"out_of_order_queue_size\", \"out_of_order_queue_timeout\", \"request_header\", \"slow_read\", \"src\", \"user_tag\", \"uuid\",\n ]\n\n\ndef get_default_argspec():\n return dict(\n ansible_host=dict(type='str', required=True),\n ansible_username=dict(type='str', required=True),\n ansible_password=dict(type='str', required=True, no_log=True),\n state=dict(type='str', default=\"present\", choices=['noop', 'present', 'absent']),\n ansible_port=dict(type='int', choices=[80, 443], required=True),\n a10_partition=dict(type='str', required=False,\n ),\n a10_device_context_id=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8], required=False,\n ),\n get_type=dict(type='str', choices=[\"single\", \"list\", \"oper\", \"stats\"]),\n )\n\n\ndef get_argspec():\n rv = get_default_argspec()\n rv.update({\n 'http_tmpl_name': {\n 'type': 'str',\n 'required': True,\n },\n 'disable': {\n 'type': 'bool',\n },\n 'multi_pu_threshold_distribution': {\n 'type': 'dict',\n 'multi_pu_threshold_distribution_value': {\n 'type': 'int',\n },\n 'multi_pu_threshold_distribution_disable': {\n 'type': 'str',\n 'choices': ['disable']\n }\n },\n 'mss_timeout': {\n 'type': 'dict',\n 'mss_percent': {\n 'type': 'int',\n },\n 'number_packets': {\n 'type': 'int',\n },\n 'mss_timeout_action_list_name': {\n 'type': 'str',\n },\n 'mss_timeout_action': {\n 'type': 'str',\n 'choices': ['drop', 'ignore', 'blacklist-src', 'reset']\n }\n },\n 'disallow_connect_method': {\n 'type': 'bool',\n },\n 'challenge': {\n 'type': 'dict',\n 'challenge_method': {\n 'type': 'str',\n 'choices': ['http-redirect', 'javascript']\n },\n 'challenge_redirect_code': {\n 'type': 'str',\n 'choices': ['302', '307']\n },\n 'challenge_uri_encode': {\n 'type': 'bool',\n },\n 'challenge_cookie_name': {\n 'type': 'str',\n },\n 'challenge_keep_cookie': {\n 'type': 'bool',\n },\n 'challenge_interval': {\n 'type': 'int',\n },\n 'challenge_pass_action_list_name': {\n 'type': 'str',\n },\n 'challenge_pass_action': {\n 'type': 'str',\n 'choices': ['authenticate-src']\n },\n 'challenge_fail_action_list_name': {\n 'type': 'str',\n },\n 'challenge_fail_action': {\n 'type': 'str',\n 'choices': ['blacklist-src', 'reset']\n }\n },\n 'non_http_bypass': {\n 'type': 'bool',\n },\n 'client_source_ip': {\n 'type': 'dict',\n 'client_source_ip': {\n 'type': 'bool',\n },\n 'http_header_name': {\n 'type': 'str',\n }\n },\n 'request_header': {\n 'type': 'dict',\n 'timeout': {\n 'type': 'int',\n },\n 'header_timeout_action_list_name': {\n 'type': 'str',\n },\n 'header_timeout_action': {\n 'type': 'str',\n 'choices': ['drop', 'blacklist-src', 'reset']\n }\n },\n 'src': {\n 'type': 'dict',\n 'rate_limit': {\n 'type': 'dict',\n 'http_post': {\n 'type': 'dict',\n 'src_post_rate_limit': {\n 'type': 'int',\n },\n 'src_post_rate_limit_action_list_name': {\n 'type': 'str',\n },\n 'src_post_rate_limit_action': {\n 'type': 'str',\n 'choices': ['drop', 'ignore', 'reset', 'blacklist-src']\n }\n },\n 'http_request': {\n 'type': 'dict',\n 'src_request_rate': {\n 'type': 'int',\n },\n 'src_request_rate_limit_action_list_name': {\n 'type': 'str',\n },\n 'src_request_rate_limit_action': {\n 'type': 'str',\n 'choices': ['drop', 'ignore', 'reset', 'blacklist-src']\n }\n }\n }\n },\n 'dst': {\n 'type': 'dict',\n 'rate_limit': {\n 'type': 'dict',\n 'http_post': {\n 'type': 'dict',\n 'dst_post_rate_limit': {\n 'type': 'int',\n },\n 'dst_post_rate_limit_action_list_name': {\n 'type': 'str',\n },\n 'dst_post_rate_limit_action': {\n 'type': 'str',\n 'choices': ['drop', 'ignore', 'reset', 'blacklist-src']\n }\n },\n 'http_request': {\n 'type': 'dict',\n 'dst_request_rate': {\n 'type': 'int',\n },\n 'dst_request_rate_limit_action_list_name': {\n 'type': 'str',\n },\n 'dst_request_rate_limit_action': {\n 'type': 'str',\n 'choices': ['drop', 'ignore', 'reset', 'blacklist-src']\n }\n },\n 'response_size': {\n 'type': 'dict',\n 'less_cfg': {\n 'type': 'list',\n 'obj_less': {\n 'type': 'int',\n },\n 'obj_less_rate': {\n 'type': 'int',\n }\n },\n 'greater_cfg': {\n 'type': 'list',\n 'obj_greater': {\n 'type': 'int',\n },\n 'obj_greater_rate': {\n 'type': 'int',\n }\n },\n 'between_cfg': {\n 'type': 'list',\n 'obj_between1': {\n 'type': 'int',\n },\n 'obj_between2': {\n 'type': 'int',\n },\n 'obj_between_rate': {\n 'type': 'int',\n }\n },\n 'response_size_action_list_name': {\n 'type': 'str',\n },\n 'response_size_action': {\n 'type': 'str',\n 'choices': ['drop', 'ignore', 'blacklist-src', 'reset']\n }\n }\n }\n },\n 'slow_read': {\n 'type': 'dict',\n 'min_window_size': {\n 'type': 'int',\n },\n 'min_window_count': {\n 'type': 'int',\n },\n 'slow_read_action_list_name': {\n 'type': 'str',\n },\n 'slow_read_action': {\n 'type': 'str',\n 'choices': ['drop', 'blacklist-src', 'ignore', 'reset']\n }\n },\n 'out_of_order_queue_size': {\n 'type': 'int',\n },\n 'out_of_order_queue_timeout': {\n 'type': 'int',\n },\n 'idle_timeout': {\n 'type': 'dict',\n 'idle_timeout_value': {\n 'type': 'int',\n },\n 'ignore_zero_payload': {\n 'type': 'bool',\n },\n 'idle_timeout_action_list_name': {\n 'type': 'str',\n },\n 'idle_timeout_action': {\n 'type': 'str',\n 'choices': ['drop', 'blacklist-src', 'reset']\n }\n },\n 'uuid': {\n 'type': 'str',\n },\n 'user_tag': {\n 'type': 'str',\n },\n 'filter_list': {\n 'type': 'list',\n 'http_filter_name': {\n 'type': 'str',\n 'required': True,\n },\n 'http_filter_seq': {\n 'type': 'int',\n },\n 'http_header_cfg': {\n 'type': 'dict',\n 'http_filter_header_regex': {\n 'type': 'str',\n },\n 'http_filter_header_inverse_match': {\n 'type': 'bool',\n }\n },\n 'http_referer_cfg': {\n 'type': 'dict',\n 'referer_equals_cfg': {\n 'type': 'list',\n 'http_filter_referer_equals': {\n 'type': 'str',\n }\n },\n 'referer_contains_cfg': {\n 'type': 'list',\n 'http_filter_referer_contains': {\n 'type': 'str',\n }\n },\n 'referer_starts_cfg': {\n 'type': 'list',\n 'http_filter_referer_starts_with': {\n 'type': 'str',\n }\n },\n 'referer_ends_cfg': {\n 'type': 'list',\n 'http_filter_referer_ends_with': {\n 'type': 'str',\n }\n }\n },\n 'http_agent_cfg': {\n 'type': 'dict',\n 'agent_equals_cfg': {\n 'type': 'list',\n 'http_filter_agent_equals': {\n 'type': 'str',\n }\n },\n 'agent_contains_cfg': {\n 'type': 'list',\n 'http_filter_agent_contains': {\n 'type': 'str',\n }\n },\n 'agent_starts_cfg': {\n 'type': 'list',\n 'http_filter_agent_starts_with': {\n 'type': 'str',\n }\n },\n 'agent_ends_cfg': {\n 'type': 'list',\n 'http_filter_agent_ends_with': {\n 'type': 'str',\n }\n }\n },\n 'http_uri_cfg': {\n 'type': 'dict',\n 'uri_equal_cfg': {\n 'type': 'list',\n 'http_filter_uri_equals': {\n 'type': 'str',\n }\n },\n 'uri_contains_cfg': {\n 'type': 'list',\n 'http_filter_uri_contains': {\n 'type': 'str',\n }\n },\n 'uri_starts_cfg': {\n 'type': 'list',\n 'http_filter_uri_starts_with': {\n 'type': 'str',\n }\n },\n 'uri_ends_cfg': {\n 'type': 'list',\n 'http_filter_uri_ends_with': {\n 'type': 'str',\n }\n }\n },\n 'dst': {\n 'type': 'dict',\n 'http_filter_rate_limit': {\n 'type': 'int',\n }\n },\n 'http_filter_action_list_name': {\n 'type': 'str',\n },\n 'http_filter_action': {\n 'type': 'str',\n 'choices': ['drop', 'ignore', 'blacklist-src', 'authenticate-src', 'reset']\n },\n 'uuid': {\n 'type': 'str',\n },\n 'user_tag': {\n 'type': 'str',\n }\n },\n 'malformed_http': {\n 'type': 'dict',\n 'malformed_http': {\n 'type': 'str',\n 'choices': ['check']\n },\n 'malformed_http_max_line_size': {\n 'type': 'int',\n },\n 'malformed_http_max_num_headers': {\n 'type': 'int',\n },\n 'malformed_http_max_req_line_size': {\n 'type': 'int',\n },\n 'malformed_http_max_header_name_size': {\n 'type': 'int',\n },\n 'malformed_http_max_content_length': {\n 'type': 'int',\n },\n 'malformed_http_bad_chunk_mon_enabled': {\n 'type': 'bool',\n },\n 'malformed_http_action_list_name': {\n 'type': 'str',\n },\n 'malformed_http_action': {\n 'type': 'str',\n 'choices': ['drop', 'reset', 'blacklist-src']\n },\n 'uuid': {\n 'type': 'str',\n }\n }\n })\n return rv\n\n\ndef existing_url(module):\n \"\"\"Return the URL for an existing resource\"\"\"\n # Build the format dictionary\n url_base = \"/axapi/v3/ddos/zone-template/http/{http_tmpl_name}\"\n\n f_dict = {}\n if '/' in str(module.params[\"http_tmpl_name\"]):\n f_dict[\"http_tmpl_name\"] = module.params[\"http_tmpl_name\"].replace(\"/\", \"%2F\")\n else:\n f_dict[\"http_tmpl_name\"] = module.params[\"http_tmpl_name\"]\n\n return url_base.format(**f_dict)\n\n\ndef new_url(module):\n \"\"\"Return the URL for creating a resource\"\"\"\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/ddos/zone-template/http\"\n\n f_dict = {}\n f_dict[\"http_tmpl_name\"] = \"\"\n\n return url_base.format(**f_dict)\n\n\ndef report_changes(module, result, existing_config, payload):\n change_results = copy.deepcopy(result)\n if not existing_config:\n change_results[\"modified_values\"].update(**payload)\n return change_results\n\n config_changes = copy.deepcopy(existing_config)\n for k, v in payload[\"http\"].items():\n v = 1 if str(v).lower() == \"true\" else v\n v = 0 if str(v).lower() == \"false\" else v\n\n if config_changes[\"http\"].get(k) != v:\n change_results[\"changed\"] = True\n config_changes[\"http\"][k] = v\n\n change_results[\"modified_values\"].update(**config_changes)\n return change_results\n\n\ndef create(module, result, payload={}):\n call_result = api_client.post(module.client, new_url(module), payload)\n result[\"axapi_calls\"].append(call_result)\n result[\"modified_values\"].update(**call_result[\"response_body\"])\n result[\"changed\"] = True\n return result\n\n\ndef update(module, result, existing_config, payload={}):\n call_result = api_client.post(module.client, existing_url(module), payload)\n result[\"axapi_calls\"].append(call_result)\n if call_result[\"response_body\"] == existing_config:\n result[\"changed\"] = False\n else:\n result[\"modified_values\"].update(**call_result[\"response_body\"])\n result[\"changed\"] = True\n return result\n\n\ndef present(module, result, existing_config):\n payload = utils.build_json(\"http\", module.params, AVAILABLE_PROPERTIES)\n change_results = report_changes(module, result, existing_config, payload)\n if module.check_mode:\n return change_results\n elif not existing_config:\n return create(module, result, payload)\n elif existing_config and change_results.get('changed'):\n return update(module, result, existing_config, payload)\n return result\n\n\ndef delete(module, result):\n try:\n call_result = api_client.delete(module.client, existing_url(module))\n result[\"axapi_calls\"].append(call_result)\n result[\"changed\"] = True\n except a10_ex.NotFound:\n result[\"changed\"] = False\n return result\n\n\ndef absent(module, result, existing_config):\n if not existing_config:\n result[\"changed\"] = False\n return result\n\n if module.check_mode:\n result[\"changed\"] = True\n return result\n\n return delete(module, result)\n\n\ndef run_command(module):\n result = dict(changed=False, messages=\"\", modified_values={}, axapi_calls=[], ansible_facts={}, acos_info={})\n\n state = module.params[\"state\"]\n ansible_host = module.params[\"ansible_host\"]\n ansible_username = module.params[\"ansible_username\"]\n ansible_password = module.params[\"ansible_password\"]\n ansible_port = module.params[\"ansible_port\"]\n a10_partition = module.params[\"a10_partition\"]\n a10_device_context_id = module.params[\"a10_device_context_id\"]\n\n if ansible_port == 80:\n protocol = \"http\"\n elif ansible_port == 443:\n protocol = \"https\"\n\n module.client = client_factory(ansible_host, ansible_port, protocol, ansible_username, ansible_password)\n\n valid = True\n\n run_errors = []\n if state == 'present':\n requires_one_of = sorted([])\n valid, validation_errors = utils.validate(module.params, requires_one_of)\n for ve in validation_errors:\n run_errors.append(ve)\n\n if not valid:\n err_msg = \"\\n\".join(run_errors)\n result[\"messages\"] = \"Validation failure: \" + str(run_errors)\n module.fail_json(msg=err_msg, **result)\n\n try:\n if a10_partition:\n result[\"axapi_calls\"].append(api_client.active_partition(module.client, a10_partition))\n\n if a10_device_context_id:\n result[\"axapi_calls\"].append(api_client.switch_device_context(module.client, a10_device_context_id))\n\n existing_config = api_client.get(module.client, existing_url(module))\n result[\"axapi_calls\"].append(existing_config)\n if existing_config['response_body'] != 'NotFound':\n existing_config = existing_config[\"response_body\"]\n else:\n existing_config = None\n\n if state == 'present':\n result = present(module, result, existing_config)\n\n if state == 'absent':\n result = absent(module, result, existing_config)\n\n if state == 'noop':\n if module.params.get(\"get_type\") == \"single\":\n get_result = api_client.get(module.client, existing_url(module))\n result[\"axapi_calls\"].append(get_result)\n info = get_result[\"response_body\"]\n result[\"acos_info\"] = info[\"http\"] if info != \"NotFound\" else info\n elif module.params.get(\"get_type\") == \"list\":\n get_list_result = api_client.get_list(module.client, existing_url(module))\n result[\"axapi_calls\"].append(get_list_result)\n\n info = get_list_result[\"response_body\"]\n result[\"acos_info\"] = info[\"http-list\"] if info != \"NotFound\" else info\n except a10_ex.ACOSException as ex:\n module.fail_json(msg=ex.msg, **result)\n except Exception as gex:\n raise gex\n finally:\n if module.client.auth_session.session_id:\n module.client.auth_session.close()\n\n return result\n\n\ndef main():\n module = AnsibleModule(argument_spec=get_argspec(), supports_check_mode=True)\n result = run_command(module)\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"plugins/modules/a10_ddos_zone_template_http.py","file_name":"a10_ddos_zone_template_http.py","file_ext":"py","file_size_in_byte":34804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"503088311","text":"import logging\n\nimport numpy as np\n\nfrom .base import Attack\nfrom .base import generator_decorator\nfrom .gradient import GradientAttack\nfrom .. import rng\n\n\nclass SaliencyMapAttack(Attack):\n \"\"\"Implements the Saliency Map Attack.\n\n The attack was introduced in [1]_.\n\n References\n ----------\n .. [1] Nicolas Papernot, Patrick McDaniel, Somesh Jha, Matt Fredrikson,\n Z. Berkay Celik, Ananthram Swami, \"The Limitations of Deep Learning\n in Adversarial Settings\", https://arxiv.org/abs/1511.07528\n\n \"\"\"\n\n @generator_decorator\n def as_generator(\n self,\n a,\n max_iter=2000,\n num_random_targets=0,\n fast=True,\n theta=0.1,\n max_perturbations_per_pixel=7,\n ):\n\n \"\"\"Implements the Saliency Map Attack.\n\n Parameters\n ----------\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n The original, unperturbed input as a `numpy.ndarray` or\n an :class:`Adversarial` instance.\n label : int\n The reference label of the original input. Must be passed\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\n an :class:`Adversarial` instance.\n max_iter : int\n The maximum number of iterations to run.\n num_random_targets : int\n Number of random target classes if no target class is given\n by the criterion.\n fast : bool\n Whether to use the fast saliency map calculation.\n theta : float\n perturbation per pixel relative to [min, max] range.\n max_perturbations_per_pixel : int\n Maximum number of times a pixel can be modified.\n\n \"\"\"\n\n # TODO: the original algorithm works on pixels across channels!\n\n original_class = a.original_class\n\n target_class = a.target_class\n if target_class is None:\n if num_random_targets == 0:\n gradient_attack = GradientAttack()\n adv_grad = yield from gradient_attack.as_generator(a)\n adv_img = adv_grad.perturbed\n if adv_img is None: # pragma: no coverage\n # using GradientAttack did not work,\n # falling back to random target\n num_random_targets = 1\n logging.info(\n \"Using GradientAttack to determine a target \"\n \"class failed, falling back to a random target \"\n \"class\"\n )\n else:\n logits, _ = yield from a.forward_one(adv_img)\n target_class = np.argmax(logits)\n target_classes = [target_class]\n logging.info(\n \"Determined a target class using the \"\n \"GradientAttack: {}\".format(target_class)\n )\n else: # pragma: no coverage\n num_random_targets = 1\n\n if num_random_targets > 0:\n\n # draw num_random_targets random classes all of which are\n # different and not the original class\n\n num_classes = a.num_classes()\n assert num_random_targets <= num_classes - 1\n\n # sample one more than necessary\n # remove original class from samples\n # should be more efficient than other approaches, see\n # https://github.com/numpy/numpy/issues/2764\n target_classes = rng.sample(range(num_classes), num_random_targets + 1)\n target_classes = [t for t in target_classes if t != original_class]\n target_classes = target_classes[:num_random_targets]\n\n str_target_classes = [str(t) for t in target_classes]\n logging.info(\n \"Random target classes: {}\".format(\", \".join(str_target_classes))\n )\n else:\n target_classes = [target_class]\n\n # avoid mixing GradientAttack and SaliencyMapAttack\n a._reset()\n\n for target in target_classes:\n\n x = a.unperturbed\n\n # the mask defines the search domain\n # each modified pixel with border value is set to zero in mask\n mask = np.ones_like(x)\n\n # count tracks how often each pixel was changed\n counts = np.zeros_like(x)\n\n # TODO: shouldn't this be without target\n labels = range(a.num_classes())\n\n perturbed = x.copy()\n\n min_, max_ = a.bounds()\n\n # TODO: stop if mask is all zero\n for step in range(max_iter):\n _, is_adversarial = yield from a.forward_one(perturbed)\n if is_adversarial:\n return\n\n # get pixel location with highest influence on class\n idx, p_sign = yield from self._saliency_map(\n a, perturbed, target, labels, mask, fast=fast\n )\n\n # apply perturbation\n perturbed[idx] += -p_sign * theta * (max_ - min_)\n\n # tracks number of updates for each pixel\n counts[idx] += 1\n\n # remove pixel from search domain if it hits the bound\n if perturbed[idx] <= min_ or perturbed[idx] >= max_:\n mask[idx] = 0\n\n # remove pixel if it was changed too often\n if counts[idx] >= max_perturbations_per_pixel:\n mask[idx] = 0\n\n perturbed = np.clip(perturbed, min_, max_)\n\n def _saliency_map(self, a, x, target, labels, mask, fast=False):\n \"\"\"Implements Algorithm 3 in manuscript\n\n \"\"\"\n\n # pixel influence on target class\n alphas = yield from a.gradient_one(x, target)\n alphas *= mask\n\n # pixel influence on sum of residual classes\n # (don't evaluate if fast == True)\n if fast:\n betas = -np.ones_like(alphas)\n else:\n betas = []\n for label in labels:\n beta = yield from a.gradient_one(x, label)\n beta *= mask - alphas\n betas.append(beta)\n betas = np.sum(betas)\n\n # compute saliency map\n # (take into account both pos. & neg. perturbations)\n salmap = np.abs(alphas) * np.abs(betas) * np.sign(alphas * betas)\n\n # find optimal pixel & direction of perturbation\n idx = np.argmin(salmap)\n idx = np.unravel_index(idx, mask.shape)\n pix_sign = np.sign(alphas)[idx]\n\n return idx, pix_sign\n","sub_path":"cnns/foolbox/foolbox_2_3_0/attacks/saliency.py","file_name":"saliency.py","file_ext":"py","file_size_in_byte":6644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"206422489","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/2/13 22:06\n# @Author : Linder\n# @Email : lmj2018666@gmail.com\n# @Software: PyCharm\nimport heapq\n\nclass KthLargest:\n\tdef __init__(self, k, nums):\n\t\t\"\"\"\n\t\t:type k: int\n\t\t:type nums: List[int]\n\t\t\"\"\"\n\t\tself.k = k\n\t\tself.heap = []\n\t\tfor n in nums:\n\t\t\tself.add(n)\n\n\tdef add(self, val):\n\t\t\"\"\"\n\t\t:type val: int\n\t\t:rtype: int\n\t\t\"\"\"\n\t\tif len(self.heap) < self.k:\n\t\t\theapq.heappush(self.heap, val)\n\t\telif self.heap[0] < val:\n\t\t\theapq.heappop(self.heap)\n\t\t\theapq.heappush(self.heap, val)\n\t\treturn self.heap[0]\n\n\n\n\t\t# Your KthLargest object will be instantiated and called as such:\nobj = KthLargest(k=2, nums=[3,2,5])\nprint(obj.add(4))\n\n# 堆是一种特殊的数据结构,它的通常的表示是它的根结点的值最大或者是最小。\n# python中heapq的使用\n# 列出一些常见的用法:\n# heap = []#建立一个常见的堆\n# heappush(heap,item)#往堆中插入一条新的值\n# item = heappop(heap)#弹出最小的值","sub_path":"leetcode/0212数据流中的第K大元素.py","file_name":"0212数据流中的第K大元素.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"404806470","text":"import pathlib\n\nfrom typing import Dict, List, Optional, Tuple, TypedDict, Union\n\n\nclass KeyInfo(TypedDict):\n start: Tuple[int, int]\n direction: str\n\n\nPuzzle = List[List[str]]\nKey = Dict[str, KeyInfo]\nFit = Optional[Tuple[str, List[Tuple[int, int]]]]\nFits = Dict[str, List[Tuple[int, int]]]\nSavePath = Union[str, pathlib.Path, None]\n","sub_path":"src/word_search_generator/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"276442368","text":"#!/usr/bin/env python\nfrom __future__ import print_function, division\n\ndef some_func(m_chi):\n\tvmin = []\n\tvmin0 = []\n\tintegral = []\n\tintegral0 = []\n\tE = []\n\tEkeV = []\n\n\t# f = open(\"integral-afterv4.txt\",\"r\")\n\tf = open(\"fort98.txt\",\"r\")\n\tfor line in f:\n\t vmin0.append(line.split()[0])\n\t integral0.append(line.split()[1])\n\tf.close()\n\n\tm_A = 2.18017e-25 #xenon mass in kg (131.293u)\n\t#m_chi = 1.782662e-25 #kg (100GeV)\n\tmu_Achi = (m_A * m_chi)/(m_A + m_chi) #kg\n\n\tfor i in range(0,len(vmin0)):\n\t vmin.append(float(vmin0[i]) * 1e3)\n\t integral.append(float(integral0[i]) * 1e-3)\n\t # vmin.append(float(vmin0[i]))\n\t # integral.append(float(integral0[i]))\n\t E.append(((float(vmin[i])**2) * (2*mu_Achi**2)) / m_A)\n\t EkeV.append(E[i]/1.6e-16)\n\n\n\tfor i in range(0,len(EkeV)):\n\t\tprint(i, vmin[i], integral[i], E[i], EkeV[i])\n\n\tlowerlim = 3#integrate R from (keV)\n\tupperlim = 30#to (keV)\n\n\tfor i in range(0,len(E)):\n\t if E[i]/1.6e-16 <= lowerlim:\n\t continue\n\t else:\n\t indexlow = i\n\t break\n\n\tfor i in range(0,len(E)):\n\t if E[i]/1.6e-16 <= upperlim:\n\t continue\n\t else:\n\t indexhigh = i\n\t break\n\n\tprint(indexlow,indexhigh)\n\nsome_func(0.15*1.782662e-25)","sub_path":"check-index.py","file_name":"check-index.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"215337484","text":"\"\"\"\nGIVEN: a list and the invariant\nEFFECT: prints a item contents if its not a list, else calls itself\nINVARIANT: The invariant is inv, which decides how many tab \"\\t\" to be\nput in front of each list variable when printing\nthe default value of inv is 0\nSTRATEGY: General recursion\nEXAMPLES: fun1(items,0) -> prints the items in the list called items\nwhere items -> [\"member1\",\"member2\",[\"member1.1\",\"member1.2\"]]\n\"\"\"\ndef fun1(item,indent=False,inv=0):\n #inv = 0\n for items in item:\n if isinstance(items,list):\n #inv+=1\n fun1(items,indent,inv+1)\n else:\n if indent:\n for tab in range(inv):\n print(\"\\t\",end='')\n print(items)\n\n","sub_path":"build/lib/nester.py","file_name":"nester.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"121671714","text":"#-*-coding=utf-8-*-\n\nimport re\nfrom xapian_case.utils import load_scws, cut\n\ns = load_scws()\n\nclass CorpusParser:\n\n def __init__(self, filename):\n self.filename = filename\n self.regex = re.compile('^#\\s*\\d+')\n self.corpus = dict()\n\n def parse(self):\n docid = 0\n with open(self.filename) as f:\n for line in f:\n if docid == 0:\n docid += 1\n continue\n text = line.strip()\n terms = cut(s, text)\n self.corpus[str(docid)] = terms\n docid += 1\n\n def get_corpus(self):\n return self.corpus\n\n\nclass QueryParser:\n\n def __init__(self, filename):\n self.filename = filename\n self.queries = []\n\n def parse(self):\n keywords = []\n f = open(self.filename)\n for line in f:\n if '!' in line:\n strip_no_querys = []\n querys = line.strip().lstrip('(').rstrip(')').split(' | ')\n for q in querys:\n strip_no_querys.append(q.split(' !')[0])\n strip_no_querys = '+'.join(strip_no_querys)\n line = strip_no_querys\n keywords_para = line.strip().lstrip('(').rstrip(')').split(' | ')\n keywords.extend(keywords_para)\n f.close()\n self.queries = keywords\n\n def get_queries(self):\n return self.queries\n\n","sub_path":"tools_weekly/BM25/src/parse_boat.py","file_name":"parse_boat.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"336940589","text":"import base64\nimport collections\nimport glob\nimport json\nimport os\nimport queue\nimport sublime\nimport uuid\n\nfrom sublime_plugin import (\n EventListener,\n ListInputHandler,\n TextCommand,\n TextInputHandler,\n ViewEventListener,\n WindowCommand,\n)\n\nfrom threading import Thread\n\nfrom .src import selectors\nfrom .src import sexp\nfrom .src import forms\nfrom .src import formatter\nfrom .src import indent\nfrom .src import inline\nfrom .src import paredit\nfrom .src import namespace\nfrom .src import test\nfrom .src.repl import info\nfrom .src.repl import history\nfrom .src.repl import tap\nfrom .src.repl.client import Client\nfrom .src.repl.session import Session\n\nfrom .src.log import log, start_logging, stop_logging\n\n\nstate = {\n \"active_repl_view\": collections.defaultdict(dict),\n \"client_by_view\": collections.defaultdict(dict),\n}\n\n\ndef make_color_scheme(cache_dir):\n \"\"\"\n Add the tutkain.repl.standard-streams scope into the current color scheme.\n\n We want stdout/stderr messages in the same REPL output view as evaluation results, but we don't\n want them to be use syntax highlighting. We can use view.add_regions() to add a scope to such\n messages such that they are not highlighted. Unfortunately, it is not possible to use\n view.add_regions() to only set the foreground color of a region. Furthermore, if we set the\n background color of the scope to use exactly the same color as the global background color of\n the color scheme, Sublime Text refuses to apply the scope.\n\n We therefore have to resort to this awful hack where every time the plugin is loaded or the\n color scheme changes, we generate a new color scheme in the Sublime Text cache directory. That\n color scheme defines the tutkain.repl.stdout scope which has an almost-transparent background\n color, creating the illusion that we're only setting the foreground color of the text.\n\n Yeah. So, please go give this issue a thumbs-up:\n\n https://github.com/sublimehq/sublime_text/issues/817\n \"\"\"\n view = sublime.active_window().active_view()\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n if view:\n color_scheme = view.settings().get(\"color_scheme\")\n\n if color_scheme:\n (scheme_name, _) = os.path.splitext(os.path.basename(color_scheme))\n\n scheme_path = os.path.join(cache_dir, f\"{scheme_name}.sublime-color-scheme\")\n\n if not os.path.isfile(scheme_path):\n with open(scheme_path, \"w\") as scheme_file:\n scheme_file.write(\n json.dumps(\n {\n \"rules\": [\n {\n \"name\": \"Tutkain REPL Standard Output\",\n \"scope\": \"tutkain.repl.stdout\",\n \"background\": \"rgba(0, 0, 0, 0.01)\",\n },\n {\n \"name\": \"Tutkain REPL Standard Error\",\n \"scope\": \"tutkain.repl.stderr\",\n \"background\": \"rgba(0, 0, 0, 0.01)\",\n \"foreground\": view.style().get(\n \"redish\", \"crimson\"\n ),\n },\n ]\n }\n )\n )\n\n\ndef settings():\n return sublime.load_settings(\"Tutkain.sublime-settings\")\n\n\ndef plugin_loaded():\n start_logging(settings().get(\"debug\", False))\n\n preferences = sublime.load_settings(\"Preferences.sublime-settings\")\n\n cache_dir = os.path.join(sublime.cache_path(), \"Tutkain\")\n\n make_color_scheme(cache_dir)\n preferences.add_on_change(\"Tutkain\", lambda: make_color_scheme(cache_dir))\n\n\ndef plugin_unloaded():\n stop_logging()\n\n for window in sublime.windows():\n window.run_command(\"tutkain_disconnect\")\n\n view = sublime.active_window().active_view()\n view and inline.clear(view)\n\n preferences = sublime.load_settings(\"Preferences.sublime-settings\")\n preferences.clear_on_change(\"Tutkain\")\n\n\ndef print_characters(view, characters):\n if characters is not None:\n view.run_command(\"append\", {\"characters\": characters, \"scroll_to_end\": True})\n\n\ndef append_to_view(view, characters):\n if view and characters:\n view.set_read_only(False)\n print_characters(view, characters)\n view.set_read_only(True)\n view.run_command(\"move_to\", {\"to\": \"eof\"})\n\n\ndef get_active_repl_view(window):\n return state.get(\"active_repl_view\").get(window.id())\n\n\ndef set_active_repl_view(view):\n state[\"active_repl_view\"][view.window().id()] = view\n\n\ndef get_view_client(view):\n return view and state[\"client_by_view\"].get(view.id())\n\n\ndef get_active_view_client(window):\n return get_view_client(get_active_repl_view(window))\n\n\ndef get_active_view_sessions(window):\n client = get_active_view_client(window)\n return client and client.sessions_by_owner\n\n\ndef get_session_by_owner(window, owner):\n sessions = get_active_view_sessions(window)\n return sessions and sessions.get(owner)\n\n\ndef forget_repl_view(view):\n if view and view.id() in state[\"client_by_view\"]:\n del state[\"client_by_view\"][view.id()]\n\n\nclass TutkainClearOutputViewCommand(WindowCommand):\n def clear_view(self, view):\n if view:\n view.set_read_only(False)\n view.run_command(\"select_all\")\n view.run_command(\"right_delete\")\n view.set_read_only(True)\n inline.clear(self.window.active_view())\n\n def run(self):\n session = get_session_by_owner(self.window, \"user\")\n\n if session:\n self.clear_view(session.view)\n\n client = get_active_view_client(self.window)\n panel = tap.find_panel(self.window, client)\n panel and self.clear_view(panel)\n\n\nclass TutkainEvaluateFormCommand(TextCommand):\n def handler(self, region, session, file, ns, code, response, inline_result):\n def retry(ns, response):\n if response.get(\"status\") == [\"done\"]:\n session.send({\"op\": \"eval\", \"code\": code, \"file\": file, \"ns\": ns})\n\n if \"status\" in response and \"namespace-not-found\" in response[\"status\"]:\n ns_region = namespace.find_last(self.view)\n ns_form = sexp.outermost(self.view, ns_region.begin())\n\n if ns_form:\n session.send(\n {\n \"op\": \"eval\",\n \"code\": self.view.substr(ns_form.extent()),\n \"file\": file,\n },\n handler=lambda response: retry(ns, response),\n )\n elif inline_result and \"value\" in response:\n inline.clear(self.view)\n inline.show(self.view, region.end(), response[\"value\"])\n\n session.output(response)\n\n def get_eval_region(self, region, scope=\"outermost\", ignore={}):\n assert scope in {\"innermost\", \"outermost\"}\n\n if not region.empty():\n return region\n else:\n if scope == \"outermost\":\n outermost = sexp.outermost(self.view, region.begin(), ignore=ignore)\n return outermost and outermost.extent()\n elif scope == \"innermost\":\n innermost = sexp.innermost(self.view, region.begin(), edge=True)\n return innermost and innermost.extent()\n\n def run(self, edit, scope=\"outermost\", ignore={\"comment\"}, inline_result=False):\n session = get_session_by_owner(self.view.window(), \"user\")\n\n if session is None:\n self.view.window().status_message(\"ERR: Not connected to a REPL.\")\n else:\n for region in self.view.sel():\n eval_region = self.get_eval_region(\n region, scope=scope, ignore=set(ignore)\n )\n\n if eval_region:\n code = self.view.substr(eval_region)\n ns = namespace.find_declaration(self.view) or \"user\"\n file = self.view.file_name()\n\n session.output({\"in\": code, \"ns\": ns})\n\n log.debug(\n {\n \"event\": \"send\",\n \"scope\": \"form\",\n \"code\": code,\n \"file\": file,\n \"ns\": ns,\n }\n )\n\n def handler(response):\n self.handler(\n eval_region,\n session,\n file,\n ns,\n code,\n response,\n inline_result,\n )\n\n session.send(\n {\"op\": \"eval\", \"code\": code, \"file\": file, \"ns\": ns},\n handler=handler,\n )\n\n\nclass TutkainEvaluateViewCommand(TextCommand):\n def handler(self, session, response):\n if \"err\" in response:\n session.output({\"value\": \":tutkain/failed\\n\"})\n session.output(response)\n session.denounce(response)\n elif \"nrepl.middleware.caught/throwable\" in response:\n session.output(response)\n elif response.get(\"status\") == [\"done\"]:\n if not session.is_denounced(response):\n session.output({\"value\": \":tutkain/loaded\\n\"})\n\n def run(self, edit):\n window = self.view.window()\n session = get_session_by_owner(window, \"user\")\n\n if session is None:\n window.status_message(\"ERR: Not connected to a REPL.\")\n else:\n op = {\n \"op\": \"load-file\",\n \"file\": self.view.substr(sublime.Region(0, self.view.size())),\n }\n\n file_path = self.view.file_name()\n\n if file_path:\n op[\"file-name\"] = os.path.basename(file_path)\n op[\"file-path\"] = file_path\n\n session.send(op, handler=lambda response: self.handler(session, response))\n\n\nclass TutkainRunTestsInCurrentNamespaceCommand(TextCommand):\n def run(self, edit):\n session = get_session_by_owner(self.view.window(), \"plugin\")\n test.run(self.view, session)\n\n\nclass TutkainRunTestUnderCursorCommand(TextCommand):\n def run(self, edit):\n region = self.view.sel()[0]\n point = region.begin()\n test_var = test.current(self.view, point)\n\n if test_var:\n session = get_session_by_owner(self.view.window(), \"plugin\")\n test.run(self.view, session, test_vars=[test_var])\n\n\nclass HostInputHandler(TextInputHandler):\n def __init__(self, window):\n self.window = window\n\n def placeholder(self):\n return \"Host\"\n\n def validate(self, text):\n return len(text) > 0\n\n def initial_text(self):\n return \"localhost\"\n\n def read_port(self, path):\n with open(path, \"r\") as file:\n return (path, file.read())\n\n def possibilities(self, folder):\n yield os.path.join(folder, \".nrepl-port\")\n yield os.path.join(folder, \".shadow-cljs\", \"nrepl.port\")\n\n project_port_file = (\n self.window.project_data().get(\"tutkain\", {}).get(\"nrepl_port_file\")\n )\n\n if project_port_file:\n yield os.path.join(folder, project_port_file)\n\n def discover_ports(self):\n return [\n self.read_port(port_file)\n for folder in self.window.folders()\n for port_file in self.possibilities(folder)\n if os.path.isfile(port_file)\n ]\n\n def next_input(self, host):\n ports = self.discover_ports()\n\n if len(ports) > 1:\n return PortsInputHandler(ports)\n elif len(ports) == 1:\n return PortInputHandler(ports[0][1])\n else:\n return PortInputHandler(\"\")\n\n\nclass PortInputHandler(TextInputHandler):\n def __init__(self, default_value):\n self.default_value = default_value\n\n def name(self):\n return \"port\"\n\n def placeholder(self):\n return \"Port\"\n\n def validate(self, text):\n return text.isdigit()\n\n def initial_text(self):\n return self.default_value\n\n\nclass PortsInputHandler(ListInputHandler):\n def __init__(self, ports):\n self.ports = ports\n\n def name(self):\n return \"port\"\n\n def validate(self, text):\n return text.isdigit()\n\n def contract_path(self, path):\n return path.replace(os.path.expanduser(\"~\"), \"~\")\n\n def list_items(self):\n return list(\n map(lambda x: (f\"{x[1]} ({self.contract_path(x[0])})\", x[1]), self.ports)\n )\n\n\nclass TutkainEvaluateInputCommand(WindowCommand):\n def eval(self, session, code):\n session.output({\"in\": code, \"ns\": session.namespace})\n session.send({\"op\": \"eval\", \"code\": code, \"ns\": session.namespace})\n history.update(self.window, code)\n\n def noop(*args):\n pass\n\n def run(self):\n session = get_session_by_owner(self.window, \"user\")\n\n if session is None:\n self.window.status_message(\"ERR: Not connected to a REPL.\")\n else:\n view = self.window.show_input_panel(\n \"Input: \",\n history.get(self.window),\n lambda code: self.eval(session, code),\n self.noop,\n self.noop,\n )\n\n view.settings().set(\"tutkain_repl_input_panel\", True)\n view.assign_syntax(\"Clojure (Tutkain).sublime-syntax\")\n\n\nclass TutkainConnectCommand(WindowCommand):\n def handle_sideloader_provide_response(self, session, response):\n if \"status\" in response and \"unexpected-provide\" in response[\"status\"]:\n name = response[\"name\"]\n session.output({\"err\": f\"unexpected provide: {name}\"})\n\n def sideloader_provide(self, session, response):\n if \"name\" in response:\n name = response[\"name\"]\n\n op = {\n \"id\": response[\"id\"],\n \"op\": \"sideloader-provide\",\n \"type\": response[\"type\"],\n \"name\": name,\n }\n\n path = os.path.join(sublime.packages_path(), \"tutkain/clojure/src\", name)\n\n if os.path.isfile(path):\n log.debug({\"event\": \"sideloader/provide\", \"path\": path})\n\n with open(path, \"rb\") as file:\n op[\"content\"] = base64.b64encode(file.read()).decode(\"utf-8\")\n else:\n op[\"content\"] = \"\"\n\n session.send(\n op,\n handler=lambda response: self.handle_sideloader_provide_response(\n session, response\n ),\n )\n\n def create_sessions(self, client, sideloader, view, response):\n if response.get(\"status\") == [\"done\"]:\n info = response\n sideloader.info = info\n sideloader.output(response)\n\n def create_session(owner, response):\n if response.get(\"status\") == [\"done\"]:\n new_session_id = response[\"new-session\"]\n new_session = Session(new_session_id, client, view)\n new_session.info = info\n client.register_session(owner, new_session)\n\n sideloader.send(\n {\"op\": \"clone\", \"session\": sideloader.id},\n handler=lambda response: create_session(\"plugin\", response),\n )\n\n sideloader.send(\n {\"op\": \"clone\", \"session\": sideloader.id},\n handler=lambda response: create_session(\"user\", response),\n )\n\n def initialize(self, client, sideloader, view):\n def add_tap(response):\n if response.get(\"status\") == [\"done\"]:\n\n def handler(response):\n if response.get(\"status\") == [\"done\"]:\n sideloader.send(\n {\"op\": \"describe\"},\n handler=lambda response: self.create_sessions(\n client, sideloader, view, response\n ),\n )\n\n sideloader.send({\"op\": \"tutkain/add-tap\"}, handler=handler)\n\n def add_middleware(response):\n if response.get(\"status\") == [\"done\"]:\n sideloader.send(\n {\n \"op\": \"add-middleware\",\n \"middleware\": [\n \"tutkain.nrepl.middleware.test/wrap-test\",\n \"tutkain.nrepl.middleware.tap/wrap-tap\",\n ],\n },\n handler=add_tap,\n )\n\n sideloader.send(\n {\"op\": \"sideloader-start\"},\n handler=lambda response: self.sideloader_provide(sideloader, response),\n )\n\n sideloader.send(\n {\"op\": \"eval\", \"code\": \"\"\"(require 'tutkain.nrepl.util.pprint)\"\"\"},\n pprint=False,\n handler=add_middleware,\n )\n\n def print(self, view, item):\n if view:\n if {\n \"value\",\n \"nrepl.middleware.caught/throwable\",\n \"in\",\n \"versions\",\n \"summary\",\n } & item.keys():\n append_to_view(view, formatter.format(item))\n elif \"status\" in item and \"interrupted\" in item[\"status\"]:\n append_to_view(view, \":tutkain/interrupted\\n\")\n elif \"status\" in item and \"session-idle\" in item[\"status\"]:\n append_to_view(view, \":tutkain/nothing-to-interrupt\\n\")\n else:\n characters = formatter.format(item)\n\n if characters:\n append_to_view(view, characters)\n\n size = view.size()\n key = str(uuid.uuid4())\n regions = [sublime.Region(size - len(characters), size)]\n scope = (\n \"tutkain.repl.stderr\"\n if \"err\" in item\n else \"tutkain.repl.stdout\"\n )\n\n view.add_regions(\n key, regions, scope=scope, flags=sublime.DRAW_NO_OUTLINE\n )\n\n def print_loop(self, client):\n try:\n while True:\n item = client.recvq.get()\n\n if item is None:\n break\n\n log.debug({\"event\": \"printer/recv\", \"data\": item})\n\n session = client.sessions.get(item.get(\"session\"))\n\n if \"tap\" in item and settings().get(\"tap_panel\"):\n tap.show_panel(self.window, client)\n append_to_view(tap.find_panel(self.window, client), item[\"tap\"])\n elif session:\n self.print(session.view, item)\n\n view_size = session.view.size()\n last_char = session.view.substr(\n sublime.Region(view_size - 1, view_size)\n )\n\n if (\n \"status\" in item\n and \"done\" in item[\"status\"]\n and not (last_char == \"\\n\")\n ):\n append_to_view(session.view, \"\\n\")\n else:\n view = get_active_repl_view(self.window)\n self.print(view, item)\n finally:\n log.debug({\"event\": \"thread/exit\"})\n\n def set_layout(self):\n # Set up a two-row layout.\n #\n # TODO: Make configurable? This will clobber pre-existing layouts —\n # maybe add a setting for toggling this bit?\n\n if settings().get(\"layout\") == \"vertical\":\n layout = {\n \"cells\": [[0, 0, 1, 1], [1, 0, 2, 1]],\n \"cols\": [0.0, 0.5, 1.0],\n \"rows\": [0.0, 1.0],\n }\n else:\n layout = {\n \"cells\": [[0, 0, 1, 1], [0, 1, 1, 2]],\n \"cols\": [0.0, 1.0],\n \"rows\": [0.0, 0.75, 1.0],\n }\n\n self.window.set_layout(layout)\n\n def create_output_view(self, host, port):\n self.set_layout()\n active_view = self.window.active_view()\n\n view_count = len(self.window.views_in_group(1))\n suffix = \"\" if view_count == 0 else f\" ({view_count})\"\n\n view = self.window.new_file()\n view.set_name(f\"REPL | {host}:{port}{suffix}\")\n view.settings().set(\"line_numbers\", False)\n view.settings().set(\"gutter\", False)\n view.settings().set(\"is_widget\", True)\n view.settings().set(\"scroll_past_end\", False)\n view.settings().set(\"tutkain_repl_output_view\", True)\n view.set_read_only(True)\n view.set_scratch(True)\n\n view.assign_syntax(\"Clojure (Tutkain).sublime-syntax\")\n\n # Move the output view into the second row.\n self.window.set_view_index(view, 1, view_count)\n\n # Activate the output view and the view that was active prior to\n # creating the output view.\n self.window.focus_view(view)\n self.window.focus_view(active_view)\n\n return view\n\n def create_tap_panel(self, client):\n if not tap.find_panel(self.window, client):\n panel_name = tap.panel_name(self.window, client)\n panel = self.window.create_output_panel(panel_name)\n panel.settings().set(\"line_numbers\", False)\n panel.settings().set(\"gutter\", False)\n panel.settings().set(\"is_widget\", True)\n panel.settings().set(\"scroll_past_end\", False)\n panel.assign_syntax(\"Clojure (Tutkain).sublime-syntax\")\n\n def clone_handler(self, client, view, capabilities, response):\n if \"done\" in response.get(\"status\", []):\n session_id = response.get(\"new-session\")\n session = Session(session_id, client, view)\n\n # Start a worker thread that reads items from a queue and prints\n # them into an output panel.\n print_loop = Thread(daemon=True, target=self.print_loop, args=(client,))\n\n print_loop.name = \"tutkain.print_loop\"\n print_loop.start()\n\n if \"sideloader-start\" in capabilities[\"ops\"]:\n client.register_session(\"sideloader\", session)\n self.initialize(client, session, view)\n else:\n # Babashka\n client.register_session(\"plugin\", session)\n session.info = capabilities\n session.output(capabilities)\n\n def handler(response):\n if response.get(\"status\") == [\"done\"]:\n session = Session(response[\"new-session\"], client, view)\n session.info = capabilities\n client.register_session(\"user\", session)\n\n client.send({\"op\": \"clone\"}, handler=handler)\n\n def clone(self, client, view, response):\n if response.get(\"status\") == [\"done\"]:\n capabilities = response\n\n client.send(\n {\"op\": \"clone\"},\n handler=lambda response: self.clone_handler(\n client, view, capabilities, response\n ),\n )\n\n def describe(self, client, view):\n client.send(\n {\"op\": \"describe\"},\n handler=lambda response: self.clone(client, view, response),\n )\n\n def run(self, host, port):\n try:\n client = Client(host, int(port), queue.Queue(), queue.Queue()).go()\n self.create_tap_panel(client)\n view = self.create_output_view(host, port)\n state[\"client_by_view\"][view.id()] = client\n self.describe(client, view)\n except ConnectionRefusedError:\n self.window.status_message(f\"ERR: connection to {host}:{port} refused.\")\n\n def input(self, args):\n return HostInputHandler(self.window)\n\n\nclass TutkainDisconnectCommand(WindowCommand):\n def run(self):\n inline.clear(self.window.active_view())\n view = get_active_repl_view(self.window)\n view and view.close()\n\n\nclass TutkainNewScratchViewCommand(WindowCommand):\n def run(self):\n view = self.window.new_file()\n view.set_name(\"*scratch*\")\n view.set_scratch(True)\n view.assign_syntax(\"Clojure (Tutkain).sublime-syntax\")\n self.window.focus_view(view)\n\n\ndef completion_kinds():\n if int(sublime.version()) >= 4050:\n return {\n \"function\": sublime.KIND_FUNCTION,\n \"var\": sublime.KIND_VARIABLE,\n \"macro\": (sublime.KIND_ID_FUNCTION, \"m\", \"macro\"),\n \"namespace\": sublime.KIND_NAMESPACE,\n \"class\": sublime.KIND_TYPE,\n \"special-form\": (sublime.KIND_ID_FUNCTION, \"s\", \"special form\"),\n \"method\": sublime.KIND_FUNCTION,\n \"static-method\": sublime.KIND_FUNCTION,\n }\n else:\n return {}\n\n\nclass TutkainViewEventListener(ViewEventListener):\n def completion_item(self, item):\n return sublime.CompletionItem(\n item.get(\"candidate\"),\n kind=completion_kinds().get(item.get(\"type\"), sublime.KIND_AMBIGUOUS),\n )\n\n def handle_completions(self, completion_list, response):\n completions = map(self.completion_item, response.get(\"completions\", []))\n completion_list.set_completions(completions)\n\n def on_query_completions(self, prefix, locations):\n if int(sublime.version()) >= 4050:\n point = locations[0] - 1\n\n if self.view.match_selector(\n point, \"meta.symbol - meta.function.parameters\"\n ):\n session = get_session_by_owner(self.view.window(), \"plugin\")\n\n if session and session.supports(\"completions\"):\n scope = selectors.expand_by_selector(\n self.view, point, \"meta.symbol\"\n )\n\n if scope:\n prefix = self.view.substr(scope)\n\n completion_list = sublime.CompletionList()\n\n ns = namespace.find_declaration(self.view)\n\n op = {\"op\": \"completions\", \"prefix\": prefix}\n\n if ns:\n op[\"ns\"] = ns\n\n session.send(\n op,\n handler=lambda response: self.handle_completions(\n completion_list, response\n ),\n )\n\n return completion_list\n\n\ndef lookup(view, point, handler):\n is_repl_output_view = view.settings().get(\"tutkain_repl_output_view\")\n\n if (\n view.match_selector(point, \"source.clojure & meta.symbol\")\n and not is_repl_output_view\n ):\n symbol = selectors.expand_by_selector(view, point, \"meta.symbol\")\n\n if symbol:\n session = get_session_by_owner(view.window(), \"plugin\")\n\n # TODO: Cache lookup results?\n if session and session.supports(\"lookup\"):\n op = {\"op\": \"lookup\", \"sym\": view.substr(symbol)}\n ns = namespace.find_declaration(view)\n\n if ns:\n op[\"ns\"] = ns\n\n session.send(op, handler=handler)\n\n\nclass TutkainShowSymbolInformationCommand(TextCommand):\n def run(self, edit):\n lookup(\n self.view,\n self.view.sel()[0].begin(),\n lambda response: info.show_popup(\n self.view, self.view.sel()[0].begin(), response\n ),\n )\n\n\nclass TutkainGotoSymbolDefinitionCommand(TextCommand):\n def run(self, edit):\n lookup(\n self.view,\n self.view.sel()[0].begin(),\n lambda response: info.goto(\n self.view.window(), info.parse_location(response.get(\"info\"))\n ),\n )\n\n\nclass TutkainEventListener(EventListener):\n def on_modified_async(self, view):\n inline.clear(view)\n\n def on_deactivated_async(self, view):\n inline.clear(view)\n\n def on_activated(self, view):\n if view.settings().get(\"tutkain_repl_output_view\"):\n set_active_repl_view(view)\n\n def on_hover(self, view, point, hover_zone):\n lookup(view, point, lambda response: info.show_popup(view, point, response))\n\n def on_pre_close(self, view):\n if view and view.settings().get(\"tutkain_repl_output_view\"):\n window = view.window()\n client = get_view_client(view)\n\n if client:\n client.halt()\n forget_repl_view(view)\n\n # TODO: This sometimes crashes ST.\n #\n # window.set_layout({\n # 'cells': [[0, 0, 1, 1]],\n # 'cols': [0.0, 1.0],\n # 'rows': [0.0, 1.0]\n # })\n\n window.destroy_output_panel(tap.panel_name(window, client))\n\n if window:\n active_view = window.active_view()\n\n if active_view:\n active_view.run_command(\"tutkain_clear_test_markers\")\n window.focus_view(active_view)\n\n\nclass TutkainExpandSelectionCommand(TextCommand):\n def run(self, edit):\n view = self.view\n selections = view.sel()\n\n for region in selections:\n if not region.empty() or selectors.ignore(view, region.begin()):\n view.run_command(\"expand_selection\", {\"to\": \"scope\"})\n else:\n form = forms.find_adjacent(view, region.begin())\n form and selections.add(form)\n\n\nclass TutkainInterruptEvaluationCommand(WindowCommand):\n def run(self):\n session = get_session_by_owner(self.window, \"user\")\n\n if session is None:\n self.window.status_message(\"ERR: Not connected to a REPL.\")\n else:\n log.debug({\"event\": \"eval/interrupt\", \"id\": session.id})\n session.send({\"op\": \"interrupt\"})\n\n\nclass TutkainInsertNewlineCommand(TextCommand):\n def run(self, edit):\n indent.insert_newline_and_indent(self.view, edit)\n\n\nclass TutkainIndentSexpCommand(TextCommand):\n def run(self, edit, scope=\"outermost\", prune=False):\n for region in self.view.sel():\n if region.empty():\n if scope == \"outermost\":\n s = sexp.outermost(self.view, region.begin())\n elif scope == \"innermost\":\n s = sexp.innermost(self.view, region.begin())\n\n if s:\n indent.indent_region(self.view, edit, s.extent(), prune=prune)\n else:\n indent.indent_region(self.view, edit, region, prune=prune)\n\n\nclass TutkainPareditForwardCommand(TextCommand):\n def run(self, edit):\n paredit.move(self.view, True)\n\n\nclass TutkainPareditBackwardCommand(TextCommand):\n def run(self, edit):\n paredit.move(self.view, False)\n\n\nclass TutkainPareditOpenRoundCommand(TextCommand):\n def run(self, edit):\n paredit.open_bracket(self.view, edit, \"(\")\n\n\nclass TutkainPareditCloseRoundCommand(TextCommand):\n def run(self, edit):\n paredit.close_bracket(self.view, edit, \")\")\n\n\nclass TutkainPareditOpenSquareCommand(TextCommand):\n def run(self, edit):\n paredit.open_bracket(self.view, edit, \"[\")\n\n\nclass TutkainPareditCloseSquareCommand(TextCommand):\n def run(self, edit):\n paredit.close_bracket(self.view, edit, \"]\")\n\n\nclass TutkainPareditOpenCurlyCommand(TextCommand):\n def run(self, edit):\n paredit.open_bracket(self.view, edit, \"{\")\n\n\nclass TutkainPareditCloseCurlyCommand(TextCommand):\n def run(self, edit):\n paredit.close_bracket(self.view, edit, \"}\")\n\n\nclass TutkainPareditDoubleQuoteCommand(TextCommand):\n def run(self, edit):\n paredit.double_quote(self.view, edit)\n\n\nclass TutkainPareditForwardSlurpCommand(TextCommand):\n def run(self, edit):\n paredit.forward_slurp(self.view, edit)\n\n\nclass TutkainPareditBackwardSlurpCommand(TextCommand):\n def run(self, edit):\n paredit.backward_slurp(self.view, edit)\n\n\nclass TutkainPareditForwardBarfCommand(TextCommand):\n def run(self, edit):\n paredit.forward_barf(self.view, edit)\n\n\nclass TutkainPareditBackwardBarfCommand(TextCommand):\n def run(self, edit):\n paredit.backward_barf(self.view, edit)\n\n\nclass TutkainPareditWrapRoundCommand(TextCommand):\n def run(self, edit):\n paredit.wrap_bracket(self.view, edit, \"(\")\n\n\nclass TutkainPareditWrapSquareCommand(TextCommand):\n def run(self, edit):\n paredit.wrap_bracket(self.view, edit, \"[\")\n\n\nclass TutkainPareditWrapCurlyCommand(TextCommand):\n def run(self, edit):\n paredit.wrap_bracket(self.view, edit, \"{\")\n\n\nclass TutkainPareditForwardDeleteCommand(TextCommand):\n def run(self, edit):\n paredit.forward_delete(self.view, edit)\n\n\nclass TutkainPareditBackwardDeleteCommand(TextCommand):\n def run(self, edit):\n paredit.backward_delete(self.view, edit)\n\n\nclass TutkainPareditRaiseSexpCommand(TextCommand):\n def run(self, edit):\n paredit.raise_sexp(self.view, edit)\n\n\nclass TutkainPareditSpliceSexpCommand(TextCommand):\n def run(self, edit):\n paredit.splice_sexp(self.view, edit)\n\n\nclass TutkainPareditCommentDwimCommand(TextCommand):\n def run(self, edit):\n paredit.comment_dwim(self.view, edit)\n\n\nclass TutkainPareditSemicolonCommand(TextCommand):\n def run(self, edit):\n paredit.semicolon(self.view, edit)\n\n\nclass TutkainPareditSpliceSexpKillingForwardCommand(TextCommand):\n def run(self, edit):\n paredit.splice_sexp_killing_forward(self.view, edit)\n\n\nclass TutkainPareditSpliceSexpKillingBackwardCommand(TextCommand):\n def run(self, edit):\n paredit.splice_sexp_killing_backward(self.view, edit)\n\n\nclass TutkainPareditForwardKillFormCommand(TextCommand):\n def run(self, edit):\n paredit.kill_form(self.view, edit, True)\n\n\nclass TutkainPareditBackwardKillFormCommand(TextCommand):\n def run(self, edit):\n paredit.kill_form(self.view, edit, False)\n\n\nclass TutkainPareditBackwardMoveFormCommand(TextCommand):\n def run(self, edit):\n paredit.backward_move_form(self.view, edit)\n\n\nclass TutkainPareditForwardMoveFormCommand(TextCommand):\n def run(self, edit):\n paredit.forward_move_form(self.view, edit)\n\n\nclass TutkainPareditThreadFirstCommand(TextCommand):\n def run(self, edit):\n paredit.thread_first(self.view, edit)\n\n\nclass TutkainPareditThreadLastCommand(TextCommand):\n def run(self, edit):\n paredit.thread_last(self.view, edit)\n\n\nclass TutkainCycleCollectionTypeCommand(TextCommand):\n def run(self, edit):\n sexp.cycle_collection_type(self.view, edit)\n\n\nclass TutkainReplHistoryListener(EventListener):\n def on_deactivated(self, view):\n if view.settings().get(\"tutkain_repl_input_panel\"):\n history.index = None\n\n\nclass TutkainNavigateReplHistoryCommand(TextCommand):\n def run(self, edit, forward=False):\n history.navigate(self.view, edit, forward=forward)\n\n\nclass TutkainClearTestMarkersCommand(TextCommand):\n def run(self, edit):\n self.view.erase_regions(test.region_key(self.view, \"passes\"))\n self.view.erase_regions(test.region_key(self.view, \"failures\"))\n self.view.erase_regions(test.region_key(self.view, \"errors\"))\n\n\nclass TutkainOpenDiffWindowCommand(TextCommand):\n def run(self, edit, reference=\"\", actual=\"\"):\n self.view.window().run_command(\"new_window\")\n\n window = sublime.active_window()\n window.set_tabs_visible(False)\n window.set_minimap_visible(False)\n window.set_status_bar_visible(False)\n window.set_sidebar_visible(False)\n window.set_menu_visible(False)\n\n view = window.new_file()\n view.set_name(\"Tutkain: Diff\")\n view.assign_syntax(\"Clojure (Tutkain).sublime-syntax\")\n view.set_scratch(True)\n view.set_reference_document(reference)\n view.run_command(\"append\", {\"characters\": actual})\n view.set_read_only(True)\n\n # Hackity hack to try to ensure that the inline diff is open when the diff window opens.\n #\n # I have no idea why this works, or whether it actually even works.\n view.run_command(\"next_modification\")\n view.show(0)\n\n view.run_command(\"toggle_inline_diff\")\n\n\nclass TutkainShowUnsuccessfulTestsCommand(TextCommand):\n def get_preview(self, region):\n line = self.view.rowcol(region.begin())[0] + 1\n preview = self.view.substr(self.view.line(region)).lstrip()\n return f\"{line}: {preview}\"\n\n def run(self, args):\n view = self.view\n failures = test.regions(view, \"failures\")\n errors = test.regions(view, \"errors\")\n regions = failures + errors\n\n if regions:\n regions.sort()\n\n def goto(i):\n view.set_viewport_position(view.text_to_layout(regions[i].begin()))\n\n view.window().show_quick_panel(\n [self.get_preview(region) for region in regions],\n goto,\n flags=sublime.MONOSPACE_FONT,\n on_highlight=goto,\n )\n","sub_path":"package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":37557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"395810901","text":"import csv\nimport numpy as np\nimport pandas as pd\n#from uszipcode import SearchEngine\n\n\n# get LAT LONG from zipcodes:\n# zipcode = ZCDB[54115]\n# zipcode.latitude, zipcode.longitude\n\n#ZCDB = ZipCodeDatabase()\n\n\ndata = np.matrix([1, 2, 3, 4])\n\nwith open('../data/predictions.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n new = np.array([row[0], row[-3], row[-2], row[-1]])\n data = np.vstack((data, new))\n line_count += 1\n print(f'Processed {line_count} lines.')\n\ndata = np.delete(data, (0), axis=0)\ndataset = pd.DataFrame(data, columns=['name', 'zip', 'p', 'prediction'])\n#search = SearchEngine()\n\n\nimport plotly\nimport plotly.figure_factory as ff\n\ndf_sample = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/laucnty16.csv')\ndf_sample['State FIPS Code'] = df_sample['State FIPS Code'].apply(lambda x: str(x).zfill(2))\ndf_sample['County FIPS Code'] = df_sample['County FIPS Code'].apply(lambda x: str(x).zfill(3))\ndf_sample['FIPS'] = df_sample['State FIPS Code'] + df_sample['County FIPS Code']\n\nout = [0]*len(df_sample['Unemployment Rate (%)'])\n\nzip2fip = pd.read_csv(\"../data/zip_fips.csv\")\n\n\nfips = np.array(df_sample['FIPS'].astype(str).tolist())\n\nfor row in dataset.itertuples():\n S = zip2fip.loc[zip2fip['zip'] == int(row.zip), 'county']\n if not(S.empty):\n L = list(np.where(fips == str(S.iloc[0])))\n if L[0] != None and row.prediction == 'True':\n out[int(L[0])] += 1\n\n\ncolorscale = [\"#f7fbff\",\"#ebf3fb\",\"#deebf7\",\"#d2e3f3\",\"#c6dbef\",\"#b3d2e9\",\"#9ecae1\",\n \"#85bcdb\",\"#6baed6\",\"#57a0ce\",\"#4292c6\",\"#3082be\",\"#2171b5\",\"#1361a9\",\n \"#08519c\",\"#0b4083\",\"#08306b\"]\n\nendpts = list(np.linspace(0.001, np.max(out), len(colorscale) - 1))\nfips = df_sample['FIPS'].tolist()\nvalues = out\n\nfig = ff.create_choropleth(\n fips=fips, values=values, scope=['usa'],\n binning_endpoints=endpts, colorscale=colorscale,\n #show_state_data=False,\n #county_outline={'color': 'rgb(15, 15, 55)', 'width': 0.01},\n state_outline={'color': 'rgb(155, 155, 155)', 'width': 0.2},\n show_hover=True, centroid_marker={'opacity': 0},\n asp=2.9, title='USA by Total Likelihood of Bank Failure',\n legend_title='Total Failure'\n)\nfig['layout']['dragmode'] = 'pan'\nfig['layout']['margin']['b'] = 5\nfig['layout']['width'] = fig['layout']['width'] * 2\nfig['layout']['height'] = fig['layout']['height'] * 2\nfig['layout']['xaxis']['fixedrange'] = False\nfig['layout']['yaxis']['fixedrange'] = False\nplotly.offline.plot(fig, filename='choropleth_full_usa.html')\n\nkys = list(fig['layout'].keys())\nprint(kys)\nprint(list(fig['layout']['xaxis'].keys()))\nprint(list(fig['layout']['yaxis'].keys()))","sub_path":"bank_failure/interact.py","file_name":"interact.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"41665362","text":"def main():\n c, l = [int(x) for x in input().split()]\n matriz = [[10000000 for i in range(c)] for j in range(c)] #matriz de adjacência\n\n for _ in range(l):\n u, v, p = [int(x) for x in input().split()]\n\n if p < matriz[u][v]:\n matriz[u][v] = p\n matriz[v][u] = p\n\n Froyd_warshall(matriz, c)\n\n resposta = None\n for u in range(c):\n davez = max(matriz[u]) #pega a cidade mais distante da cidade 'u'\n if resposta is None or davez < resposta:\n resposta = davez\n\n print(resposta)\n\ndef Froyd_warshall(matriz, n):\n '''devolve a distancia de cada cidade para todas as outras'''\n for k in range(n):\n matriz[k][k] = 0\n for i in range(n):\n for j in range(n):\n if matriz[i][k] + matriz[k][j] < matriz[i][j]:\n matriz[i][j] = matriz[i][k] + matriz[k][j]\nmain()\n","sub_path":"Python/Aulas/tap/treino_1/2372.py","file_name":"2372.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"579771379","text":"import os\nimport time\nfrom io import StringIO, BytesIO\nfrom xml.dom.minidom import Document\nimport os\nfrom pathlib import Path\nfrom PIL import Image\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.db.models import Avg, Sum\nfrom django.shortcuts import render, redirect\nimport datetime\n# Create your views here.\nfrom textwrap import wrap\n\nfrom django.http import HttpResponse, HttpResponsePermanentRedirect\n\nfrom django.contrib.auth.decorators import login_required\n\n# restrict access to logged in users with an check if he is authenticated\n# see django docs\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.template.response import TemplateResponse\nfrom reportlab.lib.pagesizes import A4\nfrom reportlab.lib.units import cm\nfrom reportlab.lib.utils import ImageReader\n\nfrom .models import requirement, testcase, testrun, testcase_schritt, note, testrun_schritt, user_erweitern, projekt\n\nfrom django.views import generic\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse, reverse_lazy\n\nfrom .forms import RequirementForm, TestCaseForm, TestRunForm, TestCase_Schritt_Form, TestCase_Schritt_Form2, GroupForm, \\\n Note_Form, SignUpForm\nfrom .choices import *\nfrom django.shortcuts import render\n\n# relative import of forms\nfrom .forms import TestCase_Schritte_Form\n\n# importing formset_factory\nfrom django.forms import modelformset_factory, inlineformset_factory\nfrom django.db.models import Avg, Max, Min\n\n#Ende der Imports\n########################################################################################################################\n\n@login_required()\ndef view_dashboard(request):\n #Variable, um Fehler anzuzeigen, die man noch beheben sollte vor dem Erzeugen der PDF.\n check_for_errors = []\n\n #eingeloggten User und seine Gruppennummer erhalten\n #danach alle User mit derselben Gruppennummer speichern\n user = User.objects.get(username=request.user.username)\n user_gruppennummer = user.user_erweitern.gruppennummer\n all_users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n\n #Code um die Elemente der eigenen Gruppe zu bekommen\n #users ist wie all_users und wird zur Sicherheit bei allen ELementen definiert\n #Requirements\n ########################################################################################################################\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n req_for_usergroup = requirement.objects.filter(req_fk_ersteller__in=users)\n num_req = req_for_usergroup.count()\n\n #eigene Anzahl an Requirements\n num_req_eigene = req_for_usergroup.filter(req_fk_ersteller=request.user)\n num_req_eigene = num_req_eigene.count()\n\n #abgedeckte Requirements = Requiremtens mit TestCase\n lis_no_testcase = []\n lis_yes_testcase = []\n for req in req_for_usergroup:\n if req.testcase_set.all().exists():\n #wenn es mindestens 1 TestCase gibt\n lis_yes_testcase.append(req)\n else:\n #wenn es keinen TestCase gibt\n lis_no_testcase.append(req)\n\n num_yes_testcase = len(lis_yes_testcase)\n num_no_testcase = len(lis_no_testcase)\n\n if int(num_no_testcase) > 0:\n check_for_errors.append(\"Es gibt noch Requirements ohne TestCases!\")\n\n #TestCases\n ########################################################################################################################\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n testc_for_usergroup = testcase.objects.filter(testc_fk_ersteller__in=users)\n num_testc = testc_for_usergroup.count()\n\n #eigene Anzahl an TestCases\n num_testc_eigene = testc_for_usergroup.filter(testc_fk_ersteller=request.user)\n num_testc_eigene = num_testc_eigene.count()\n\n #abgedeckte TestCases = TestCases mit TestRuns\n lis_no_testrun_or_no_run = [] #enthält TestCases ohne TestRun\n lis_yes_testrun_run_failed = []\n lis_yes_testrun_run_passed = []\n\n for testc in testc_for_usergroup:\n if testc.testrun_set.all().exists():\n #bei beiden Fällen wird der letzte Run überprüft\n if testc.testrun_set.all().latest('testr_datum_aenderung').testr_status == 'f': #failed\n lis_yes_testrun_run_failed.append(testc)\n elif testc.testrun_set.all().latest('testr_datum_aenderung').testr_status == 'p': #passed\n lis_yes_testrun_run_passed.append(testc)\n else: #wenn es kein TestRun gibt\n lis_no_testrun_or_no_run.append(testc)\n\n num_lis_no_testrun_or_no_run = len(lis_no_testrun_or_no_run)\n num_lis_yes_testrun_run_failed = len(lis_yes_testrun_run_failed)\n num_lis_yes_testrun_run_passed = len(lis_yes_testrun_run_passed)\n\n if int(num_lis_no_testrun_or_no_run) > 0:\n check_for_errors.append(\"Es gibt noch TestCases ohne TestRuns!\")\n\n if int(num_lis_yes_testrun_run_failed) > 0:\n check_for_errors.append(\"Es gibt noch TestCases mit failed TestRuns!\")\n\n #TestRuns\n ########################################################################################################################\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n testr_for_usergroup = testrun.objects.filter(testr_fk_ersteller__in=users)\n num_testr = testr_for_usergroup.count()\n\n #eigene Anzahl an TestRuns\n num_testr_eigene = testr_for_usergroup.filter(testr_fk_ersteller=request.user)\n num_testr_eigene = num_testr_eigene.count()\n\n num_lis_no_testrun = testr_for_usergroup.filter(testr_status='n')\n num_lis_failed_testrun = testr_for_usergroup.filter(testr_status='f')\n num_lis_passed_testrun = testr_for_usergroup.filter(testr_status='p')\n\n num_lis_no_testrun = len(num_lis_no_testrun)\n num_lis_failed_testrun = len(num_lis_failed_testrun)\n num_lis_passed_testrun = len(num_lis_passed_testrun)\n\n if int(num_lis_no_testrun) > 0:\n check_for_errors.append(\"Es gibt noch TestRuns ohne Runs!\")\n\n #der Professor hat noch ein Form mit dem die Gruppennummer geändert werden kann\n if request.user.user_erweitern.rolle == 'p':\n #Professor Seite\n if request.method == 'POST':\n form = GroupForm(request.POST)\n if form.is_valid():\n user.user_erweitern.gruppennummer = form.cleaned_data['group_form_group']\n user.user_erweitern.save()\n\n return HttpResponseRedirect(reverse('aut:view_dashboard'))\n else:\n form = GroupForm(initial={'group_form_group': user.user_erweitern.gruppennummer,\n })\n #Context übergeben mit dem Form\n context = {\n 'user_gruppennummer': user_gruppennummer,\n 'all_users': all_users,\n\n 'num_req': num_req,\n 'num_req_eigene': num_req_eigene,\n 'num_yes_testcase': num_yes_testcase,\n 'num_no_testcase': num_no_testcase,\n\n 'num_testc': num_testc,\n 'num_testc_eigene': num_testc_eigene,\n 'num_lis_no_testrun_or_no_run': num_lis_no_testrun_or_no_run,\n 'num_lis_yes_testrun_run_failed': num_lis_yes_testrun_run_failed,\n 'num_lis_yes_testrun_run_passed': num_lis_yes_testrun_run_passed,\n\n 'num_testr': num_testr,\n 'num_testr_eigene': num_testr_eigene,\n 'num_lis_no_testrun': num_lis_no_testrun,\n 'num_lis_failed_testrun': num_lis_failed_testrun,\n 'num_lis_passed_testrun': num_lis_passed_testrun,\n\n 'check_for_errors': check_for_errors,\n\n 'form': form,\n }\n return render(request, 'aut/010_dashboard.html', context=context)\n\n #Context ohne Form, da der Student das nicht hat\n context = {\n 'user_gruppennummer': user_gruppennummer,\n 'all_users': all_users,\n\n 'num_req': num_req,\n 'num_req_eigene': num_req_eigene,\n 'num_yes_testcase': num_yes_testcase,\n 'num_no_testcase': num_no_testcase,\n\n 'num_testc': num_testc,\n 'num_testc_eigene': num_testc_eigene,\n 'num_lis_no_testrun_or_no_run': num_lis_no_testrun_or_no_run,\n 'num_lis_yes_testrun_run_failed': num_lis_yes_testrun_run_failed,\n 'num_lis_yes_testrun_run_passed': num_lis_yes_testrun_run_passed,\n\n 'num_testr': num_testr,\n 'num_testr_eigene': num_testr_eigene,\n 'num_lis_no_testrun': num_lis_no_testrun,\n 'num_lis_failed_testrun': num_lis_failed_testrun,\n 'num_lis_passed_testrun': num_lis_passed_testrun,\n\n 'check_for_errors': check_for_errors,\n }\n return render(request, 'aut/010_dashboard.html', context=context)\n\n#Ende des Dashboards\n########################################################################################################################\n\ndef view_requirement(request):\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer)\n req_for_usergroup = requirement.objects.filter(req_fk_ersteller__in=users).order_by('-req_pk_requirementid')\n\n context = {\n 'requirements': req_for_usergroup,\n }\n return render(request, 'aut/010_requirement.html', context=context)\n########################################################################################################################\n\ndef edit_requirement(request, pk=None):\n #das aktuelle Requirement\n requ_instance, created = requirement.objects.get_or_create(req_pk_requirementid=pk)\n if created == True:\n requ_instance.req_fk_ersteller = request.user\n requ_instance.save()\n\n #Alle TestCases zum Auswählen\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n testc_for_usergroup = testcase.objects.filter(testc_fk_ersteller__in=users)\n\n if request.method == 'POST':\n form = RequirementForm(request.POST,tecs=testc_for_usergroup)\n\n if request.POST.get(\"delete_requirement\"):\n name = \"ID:\" + str(requ_instance.req_pk_requirementid) + \" Name: \" + str(requ_instance.req_name)\n requ_instance.delete()\n return HttpResponse(name + \" wurde gelöscht\")\n\n if form.is_valid():\n requ_instance.req_kommentar = form.cleaned_data['req_form_kommentar']\n requ_instance.req_name = form.cleaned_data['req_form_name']\n requ_instance.req_beschreibung = form.cleaned_data['req_form_beschreibung']\n\n #Für alle TestCases das jetzige Requirement speichern als Foreign Key\n #das hier ist die andere Seite der Many-to-Many Relationship\n a = form.cleaned_data['req_form_fk_testcase']\n if a:\n for b in a:\n b.testc_fk_requirement.add(requ_instance)\n else:\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n testc_for_usergroup = testcase.objects.filter(testc_fk_ersteller__in=users)\n for b in testc_for_usergroup:\n b.testc_fk_requirement.remove(requ_instance)\n\n\n requ_instance.save()\n return HttpResponseRedirect(reverse('aut:requirement_change', kwargs={'pk': pk}))\n else:\n form = RequirementForm(initial={'req_form_kommentar': requ_instance.req_kommentar,\n 'req_form_name': requ_instance.req_name,\n 'req_form_beschreibung': requ_instance.req_beschreibung,\n 'req_form_fk_testcase': requ_instance.testcase_set.all(),\n\n }, tecs = testc_for_usergroup)\n\n context = {\n 'form': form,\n 'requ_instance': requ_instance,\n 'testc_for_usergroup':testc_for_usergroup\n }\n\n return render(request, 'aut/020_requirement_anpassen.html', context)\n#Ende der Requirements\n########################################################################################################################\n\ndef view_testcase(request):\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n testc_for_usergroup = testcase.objects.filter(testc_fk_ersteller__in=users).order_by('-testc_pk_testcaseid')\n\n context = {\n 'testcases': testc_for_usergroup,\n }\n return render(request, 'aut/010_testcase.html', context=context)\n########################################################################################################################\n\ndef edit_testcase(request, pk=None):\n testc_instance, created = testcase.objects.get_or_create(testc_pk_testcaseid=pk)\n if created == True:\n testc_instance.testc_fk_ersteller = request.user\n testc_instance.save()\n\n #oder einfach testcase instance\n tc = testcase.objects.get(testc_pk_testcaseid=testc_instance.testc_pk_testcaseid)\n\n TestCase_Schritt_FormSet = inlineformset_factory(testcase, testcase_schritt,\n fields=('schritt_schritte', 'schritt_erwartetesergebnis'),\n can_delete=True, extra=1)\n\n\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n req_for_usergroup = requirement.objects.filter(req_fk_ersteller__in=users)\n\n #Die TestCases sollen die zugehörigen TestRuns anzeigen können\n #alle TestRuns außer die, die noch nicht liefen\n testruns = testc_instance.testrun_set.exclude(testr_status='n')\n\n if request.method == 'POST':\n if request.POST.get(\"delete_testcase\"):\n testruns = testrun.objects.filter(testr_fk_testcaseid=testc_instance)\n for run in testruns:\n run.delete()\n testc_schritte = testcase_schritt.objects.filter(schritt_fk_testcase=testc_instance)\n for schritt in testc_schritte:\n schritt.delete()\n\n name = \"ID:\" + str(testc_instance.testc_pk_testcaseid) + \" Name: \" + str(testc_instance.testc_name)\n testc_instance.delete()\n return HttpResponse(name + \" wurde gelöscht\")\n\n form = TestCaseForm(request.POST, reqs=req_for_usergroup)\n schritt_form = TestCase_Schritt_Form(request.POST) #das kann weg\n formset = TestCase_Schritt_FormSet(request.POST, instance=tc)\n\n if formset.is_valid():\n formset.save()\n\n\n if form.is_valid():\n testc_instance.testc_vorbedingung = form.cleaned_data['testc_form_vorbedingung']\n testc_instance.testc_kommentar = form.cleaned_data['testc_form_kommentar']\n testc_instance.testc_name = form.cleaned_data['testc_form_name']\n testc_instance.testc_beschreibung = form.cleaned_data['testc_form_beschreibung']\n testc_instance.testc_fk_requirement.set(form.cleaned_data['testc_form_fk_requirement'])\n\n testc_instance.testc_fk_ersteller = request.user\n\n testc_instance.save()\n\n return HttpResponseRedirect(reverse('aut:testcase_change', kwargs={'pk': testc_instance.testc_pk_testcaseid}))\n else:\n\n formset = TestCase_Schritt_FormSet(instance=tc)\n schritt_form = TestCase_Schritt_Form()\n form = TestCaseForm(initial={'testc_form_name': testc_instance.testc_name,\n 'testc_form_beschreibung': testc_instance.testc_beschreibung,\n 'testc_form_kommentar': testc_instance.testc_kommentar,\n 'testc_form_vorbedingung': testc_instance.testc_vorbedingung,\n 'testc_form_fk_requirement': requirement.objects.filter(testcase=testc_instance),\n\n }, reqs=req_for_usergroup)\n\n schritte_instance = testcase_schritt.objects.filter(schritt_fk_testcase=testc_instance)\n context = {\n 'form': form,\n 'schritt_form': schritt_form,\n 'testc_instance': testc_instance,\n 'schritte_instance': schritte_instance,\n 'testruns': testruns,\n 'formset': formset,\n 'req_for_usergroup':req_for_usergroup\n }\n\n return render(request, 'aut/020_testcase_anpassen.html', context)\n#Ende der TestCases\n########################################################################################################################\n\ndef view_testrun(request):\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n testr_for_usergroup = testrun.objects.filter(testr_fk_ersteller__in=users).order_by('-testr_pk_testrunid')\n\n context = {\n 'testruns': testr_for_usergroup,\n }\n return render(request, 'aut/010_testrun.html', context=context)\n########################################################################################################################\n\ndef edit_testrun(request, pk=None):\n testr_instance, created = testrun.objects.get_or_create(testr_pk_testrunid=pk)\n if created == True:\n testr_instance.testr_fk_ersteller = request.user\n testr_instance.testr_status = 'n'\n testr_instance.save()\n\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n testc_for_usergroup = testcase.objects.filter(testc_fk_ersteller__in=users)\n req_for_usergroup = requirement.objects.filter(req_fk_ersteller__in=users)\n testc_with_requirement = testc_for_usergroup.filter(testc_fk_requirement__in=req_for_usergroup).filter(testcase_schritt__isnull=False).distinct()\n\n\n if request.method == 'POST':\n form = TestRunForm(request.POST,tecs=testc_with_requirement)\n\n if request.POST.get(\"delete_testrun\"):\n name = \"ID:\" + str(testr_instance.testr_pk_testrunid) + \" Name: \" + str(testr_instance.testr_name)\n testr_instance.delete()\n return HttpResponse(name + \" wurde gelöscht\")\n\n if form.is_valid():\n testr_instance.testr_name = form.cleaned_data['testr_form_name']\n testr_instance.testr_kommentar = form.cleaned_data['testr_form_kommentar']\n testr_instance.testr_beschreibung = form.cleaned_data['testr_form_beschreibung']\n\n testr_instance.testr_fk_testcaseid = form.cleaned_data['testr_form_fk_testcase']\n testr_instance.testr_fk_ersteller = request.user\n testr_instance.save()\n\n if request.POST.get(\"save_and_testrun\"):\n link = reverse('aut:testrun_run', kwargs={'pk': pk})\n html = \"\"\n return HttpResponse(html)\n\n return HttpResponseRedirect(reverse('aut:testrun_change', kwargs={'pk': pk}))\n else:\n form = TestRunForm(initial={'testr_form_name': testr_instance.testr_name,\n 'testr_form_kommentar': testr_instance.testr_kommentar,\n 'testr_form_beschreibung': testr_instance.testr_beschreibung,\n 'testr_form_status': testr_instance.testr_status,\n 'testr_form_fk_testcase':testr_instance.testr_fk_testcaseid\n\n },tecs=testc_with_requirement)\n\n\n testc_schritte = testcase_schritt.objects.filter(schritt_fk_testcase=testr_instance.testr_fk_testcaseid)\n testr_schritte = testrun_schritt.objects.filter(schritt_fk_testrun=testr_instance).order_by('schritt_pk_id')\n for schritt in testr_schritte:\n print(schritt.schritt_schritte)\n\n context = {\n 'form': form,\n 'testr_instance': testr_instance,\n 'testc_schritte':testc_schritte,\n 'testr_schritte':testr_schritte\n\n }\n\n return render(request, 'aut/020_testrun_anpassen.html', context)\n########################################################################################################################\n\ndef testrun_run(request, pk):\n testr_instance = get_object_or_404(testrun, testr_pk_testrunid=pk)\n\n testc_schritte = testcase_schritt.objects.filter(schritt_fk_testcase=testr_instance.testr_fk_testcaseid)\n\n\n zeilen = testc_schritte.count()\n TestRun_Schritt_FormSet = inlineformset_factory(testrun, testrun_schritt,fields=('schritt_tatsaechlichesergebnis', 'schritt_ergebnis'),can_delete=False, widgets={'schritt_ergebnis': forms.RadioSelect(choices=RUN_STATUS)}, extra=zeilen)\n\n\n\n #Zeit speichern bei Abgabe\n if request.method == 'POST':\n formset = TestRun_Schritt_FormSet(request.POST, instance=testr_instance)\n\n if formset.is_valid():\n formset.save()\n list_mit_Ergebnissen = []\n for form in formset:\n if form.cleaned_data:\n dran = form.cleaned_data['schritt_ergebnis']\n else:\n dran = 'f'\n\n list_mit_Ergebnissen.append(dran)\n form.save()\n formset.save()\n # Generator\n if all(item == list_mit_Ergebnissen[0] == 'p' for item in list_mit_Ergebnissen):\n testr_instance.testr_status = 'p'\n testr_instance.save()\n else:\n testr_instance.testr_status = 'f'\n testr_instance.save()\n\n if request.POST.get(\"TIME\"):\n testr_instance.testr_dauer = int(request.POST.get(\"TIME\"))\n testr_instance.save()\n\n testc = testcase.objects.get(testc_pk_testcaseid=testr_instance.testr_fk_testcaseid.testc_pk_testcaseid)\n testr_instance.testr_testc_datum = testc.testc_datum_aenderung\n testr_instance.save()\n\n # hier die Zuordnung der Schritte\n testr_schritte = testrun_schritt.objects.filter(schritt_fk_testrun=testr_instance)\n testc_schritte = testc_schritte.order_by('schritt_pk_id')\n\n aktueller_testc = testc_schritte.first()\n\n for schritt in testr_schritte:\n schritt.schritt_schritte = aktueller_testc.schritt_schritte\n schritt.schritt_erwartetesergebnis = aktueller_testc.schritt_erwartetesergebnis\n\n schritt.schritt_fk_testcase_schritt = aktueller_testc\n schritt.save()\n aktueller_testc = testc_schritte.filter(schritt_pk_id__gt=aktueller_testc.schritt_pk_id).order_by( 'schritt_pk_id').first()\n\n #pass\n #testr_schritte = testrun_schritt.objects.filter(schritt_fk_testrun=testr_instance)\n #for schritt in testc_schritte:\n # schritt.schritt_schritte = testcase schritt.get(foreign kes vom testr schritt) . die Sache\n # schritt.schritt_fk_testcase_schritt\n # noch die Zuordnung vob den Teytschritten zuerienander\n #\n #testr_schritte.schritt_schritte = testc_schritte.schritt_schritte\n #testr_schritte.schritt_erwartetesergebnis = testc_schritte.schritt_erwartetesergebnis\n #testr_schritte.save()\n #print(testr_schritte)\n #pass\n\n\n\n return HttpResponseRedirect(reverse('aut:view_testrun'))\n #return HttpResponseRedirect(reverse('aut:testrun_change', kwargs={'pk': pk}))\n\n else:\n\n formset = TestRun_Schritt_FormSet(instance=testr_instance, )\n\n if testr_instance.testrun_schritt_set.exists:\n testr_schritte = testrun_schritt.objects.filter(schritt_fk_testrun=testr_instance)\n print(testr_schritte)\n liste_mit_dictios = []\n for schritt in testr_schritte:\n dictio = {}\n dictio['schritt_tatsaechlichesergebnis'] = schritt.schritt_tatsaechlichesergebnis\n dictio['schritt_ergebnis'] = schritt.schritt_ergebnis\n liste_mit_dictios.append(dictio)\n\n print(liste_mit_dictios)\n\n formset.initial = liste_mit_dictios\n\n\n context = {\n 'testr_instance': testr_instance,\n 'testc_schritte': testc_schritte,\n 'formset': formset,\n\n }\n\n return render(request, 'aut/020_testrun_run.html', context)\n#Ende der TestRuns\n########################################################################################################################\n\ndef view_statistik(request):\n #TestCase Coverage: Requriements mit TestCase / alle Requirements\n #Für die Gruppe die Requirements\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n req_for_usergroup = requirement.objects.filter(req_fk_ersteller__in=users)\n\n int_req_with_testcase = 0\n for req in req_for_usergroup:\n if req.testcase_set.all().exists():\n int_req_with_testcase += 1\n num_req = req_for_usergroup.count()\n if num_req != 0:\n TestCase_Coverage = int_req_with_testcase/num_req * 100 #Für Prozent\n elif num_req == 0:\n TestCase_Coverage = 0\n\n #TestRun Coverage: Requirement mit erfolgreichem TestRun / alle Requirements\n #Für die Gruppe die Requirements\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n req_for_usergroup = requirement.objects.filter(req_fk_ersteller__in=users)\n\n int_req_with_testrun = 0\n for req in req_for_usergroup:\n all_passed = True\n if req.testcase_set.all().exists(): #pro Req die TestCases TEST\n for testc in req.testcase_set.all(): #pro Req die TestCases\n if testc.testrun_set.all().exists():\n if testc.testrun_set.all().latest('testr_datum_aenderung').testr_status == 'f' or testc.testrun_set.all().latest('testr_datum_aenderung').testr_status == 'n':\n all_passed = False\n else:\n all_passed = False\n else:\n all_passed = False\n\n if all_passed == True:\n int_req_with_testrun += 1\n\n num_req = req_for_usergroup.count()\n if num_req != 0:\n TestRun_Coverage = int_req_with_testrun/num_req * 100 #Für Prozent\n elif num_req == 0:\n TestRun_Coverage = 0\n\n #Alle ELemente für eine Gruppe an Studenten\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n req_for_usergroup = requirement.objects.filter(req_fk_ersteller__in=users)\n testc_for_usergroup = testcase.objects.filter(testc_fk_ersteller__in=users)\n testr_for_usergroup = testrun.objects.filter(testr_fk_ersteller__in=users)\n\n #Projektstatisitk über Studenten der gleichen Gruppe:\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n\n #Zeiten der Testruns:\n min_testrun = testr_for_usergroup.aggregate(Min('testr_dauer'))\n max_testrun = testr_for_usergroup.aggregate(Max('testr_dauer'))\n durchschnitt_testrun = testr_for_usergroup.aggregate(Avg('testr_dauer'))\n summe_testrun = testr_for_usergroup.aggregate(Sum('testr_dauer'))\n\n #Usergruppe:\n usergruppe = request.user.user_erweitern.gruppennummer\n\n\n context = {\n 'all_requirements': req_for_usergroup,\n 'all_testcases': testc_for_usergroup,\n 'all_testruns': testr_for_usergroup,\n 'TestCase_Coverage': TestCase_Coverage,\n 'TestRun_Coverage': TestRun_Coverage,\n 'users': users,\n 'min_testrun': min_testrun,\n 'max_testrun': max_testrun,\n 'durchschnitt_testrun': durchschnitt_testrun,\n 'summe_testrun':summe_testrun,\n 'usergruppe': usergruppe\n }\n\n return render(request, 'aut/010_statistik.html', context)\n\n\ndef req_desc(request):\n return render(request, 'aut/040_req_desc.html')\ndef testc_desc(request):\n return render(request, 'aut/040_testc_desc.html')\ndef testr_desc(request):\n return render(request, 'aut/040_testr_desc.html')\n\n#Für die TestCases die Schritte\ndef formset_view(request, id):\n tc = testcase.objects.get(testc_pk_testcaseid=id)\n\n # creating a formset\n TestCase_Schritt_FormSet = inlineformset_factory(testcase, testcase_schritt, fields=('schritt_schritte', 'schritt_erwartetesergebnis'), can_delete=True, extra=1)\n\n if request.method == 'POST':\n formset = TestCase_Schritt_FormSet(request.POST, instance=tc)\n if formset.is_valid():\n formset.save()\n return redirect('aut:schritte', id=tc.testc_pk_testcaseid)\n\n formset = TestCase_Schritt_FormSet(instance=tc)\n return render(request, \"aut/home.html\", {'formset': formset})\n\n#Für die TestRuns die Schritte\ndef formset_view_run(request, id):\n tc = testcase.objects.get(testc_pk_testcaseid=id)\n\n # creating a formset\n TestCase_Schritt_FormSet = inlineformset_factory(testcase, testrun_schritt, fields=('schritt_tatsaechlichesergebnis', 'schritt_ergebnis'), can_delete=False, extra=0)\n\n if request.method == 'POST':\n formset = TestCase_Schritt_FormSet(request.POST, instance=tc)\n if formset.is_valid():\n formset.save()\n return redirect('aut:schritte_run', id=tc.testc_pk_testcaseid)\n\n formset = TestCase_Schritt_FormSet(instance=tc)\n return render(request, \"aut/schritte_run.html\", {'formset': formset})\n\n\n#NotePad als Spielerei\ndef form_note(request):\n #get über den foreign key vom usre, wenn der schon eins hat dann ok, wenn nciht machent\n note_instance, created = note.objects.get_or_create(note_fk_ersteller=request.user)\n\n if request.method == 'POST':\n form = Note_Form(request.POST)\n\n if form.is_valid():\n note_instance.notes = form.cleaned_data['note_form']\n note_instance.save()\n return HttpResponseRedirect(reverse('aut:notepad'))\n else:\n form = Note_Form(initial={'note_form': note_instance.notes,})\n\n context = {\n 'form': form,\n 'note_instance': note_instance\n }\n\n return render(request, 'aut/030_special_notepad_inner.html', context)\n\n\nimport io\nfrom django.http import FileResponse\nfrom reportlab.pdfgen import canvas\n\ndef TestDocument(request):\n\n\n buffer = io.BytesIO()\n\n # Create the PDF object, using the buffer as its \"file.\"\n p = canvas.Canvas(buffer, pagesize=A4)\n p.setFont(\"Times-Roman\", 12)\n\n # Draw things on the PDF. Here's where the PDF generation happens.\n # See the ReportLab documentation for the full list of functionality.\n users = User.objects.filter(user_erweitern__gruppennummer=request.user.user_erweitern.gruppennummer).filter(user_erweitern__rolle='s')\n\n text = p.beginText(1 * cm, 29* cm)\n text.setFont(\"Times-Roman\", 12)\n text.textLine(str(request.user.user_erweitern.gruppennummer))\n text.textLine(\"Gruppenmitglieder\")\n\n for user in users:\n text.textLine(str(user))\n\n p.drawText(text)\n # ###################################\n # 3) Draw a line\n p.line(0* cm, 25 * cm, 30*cm, 25 * cm)\n\n\n\n\n text = p.beginText(1*cm, 24*cm)\n text.setFont(\"Times-Roman\", 12)\n text.textLine(\"Requirements:\")\n p.drawText(text)\n\n\n\n text = p.beginText(1 * cm, 23 * cm)\n text.setFont(\"Times-Roman\", 12)\n i = 0 #nur 4 Elemente auf 1 Seite\n req_for_usergroup = requirement.objects.filter(req_fk_ersteller__in=users)\n for req in req_for_usergroup:\n\n text.textLines(\"Name: \" + \"\\n\".join(wrap(str(req), 80)))\n text.textLines(\"Beschreibung: \" + \"\\n\".join(wrap(str(req.req_beschreibung), 80)))\n text.textLines(\"Kommentar: \" + \"\\n\".join(wrap(str(req.req_kommentar), 80)))\n text.textLine(\"Ersteller: \" + str(req.req_fk_ersteller))\n text.textLine(\"Erstelldatum: \" + str(req.req_datum_erstellung))\n text.textLine(\"Änderungsdatum: \" + str(req.req_datum_aenderung))\n\n text.textLine(\"\")\n i = i + 1\n if i > 3:\n p.drawText(text)\n p.showPage() #neue Seite\n text = p.beginText(1 * cm, 23 * cm)\n text.setFont(\"Times-Roman\", 12)\n i = 0\n p.drawText(text)\n p.showPage()\n\n text = p.beginText(1*cm, 24*cm)\n text.setFont(\"Times-Roman\", 12)\n text.textLine(\"TestCases:\")\n p.drawText(text)\n\n text = p.beginText(1 * cm, 23 * cm)\n text.setFont(\"Times-Roman\", 12)\n i = 0 # nur 4 Elemente auf 1 Seite\n testc_for_usergroup = testcase.objects.filter(testc_fk_ersteller__in=users)\n for testc in testc_for_usergroup:\n\n text.textLines(\"Name: \" + \"\\n\".join(wrap(str(testc), 80)))\n text.textLines(\"Beschreibung: \" + \"\\n\".join(wrap(str(testc.testc_beschreibung), 80)))\n text.textLines(\"Vorbedingung: \" + \"\\n\".join(wrap(str(testc.testc_vorbedingung), 80)))\n text.textLines(\"Kommentar: \" + \"\\n\".join(wrap(str(testc.testc_kommentar), 80)))\n reqsliste = []\n\n for a in testc.testc_fk_requirement.all():\n reqsliste.append(str(a))\n\n text.textLines(\"Requirements: \" + \"\\n\".join(wrap(str(reqsliste), 80)))\n\n text.textLine(\"Ersteller: \" + str(req.req_fk_ersteller))\n text.textLine(\"Erstelldatum: \" + str(req.req_datum_erstellung))\n text.textLine(\"Änderungsdatum: \" + str(req.req_datum_aenderung))\n\n text.textLine(\"\")\n i = i + 1\n if i > 3:\n p.drawText(text)\n p.showPage() # neue Seite\n text = p.beginText(1 * cm, 23 * cm)\n text.setFont(\"Times-Roman\", 12)\n i = 0\n p.drawText(text)\n p.showPage()\n\n\n text = p.beginText(1 * cm, 24 * cm)\n text.setFont(\"Times-Roman\", 12)\n text.textLine(\"TestRuns:\")\n p.drawText(text)\n\n text = p.beginText(1 * cm, 23 * cm)\n text.setFont(\"Times-Roman\", 12)\n i = 0 # nur 4 Elemente auf 1 Seite\n testr_for_usergroup = testrun.objects.filter(testr_fk_ersteller__in=users)\n for testr in testr_for_usergroup:\n text.textLines(\"Name: \" + \"\\n\".join(wrap(str(testr), 80)))\n text.textLines(\"Beschreibung: \" + \"\\n\".join(wrap(str(testr.testr_beschreibung), 80)))\n text.textLines(\"Kommentar: \" + \"\\n\".join(wrap(str(testr.testr_kommentar), 80)))\n\n text.textLines(\"Testcase: \" + \"\\n\".join(wrap(str(testr.testr_fk_testcaseid), 80)))\n\n text.textLine(\"Status: \" + str(testr.testr_status))\n\n if testr.testr_dauer:\n text.textLine(\"Dauer: \" + str(datetime.timedelta(seconds=int(testr.testr_dauer))))\n\n\n text.textLine(\"Ersteller: \" + str(testr.testr_fk_ersteller))\n text.textLine(\"Erstelldatum: \" + str(testr.testr_datum_erstellung))\n text.textLine(\"Änderungsdatum: \" + str(testr.testr_datum_aenderung))\n\n text.textLine(\"\")\n i = i + 1\n if i > 3:\n p.drawText(text)\n p.showPage() # neue Seite\n text = p.beginText(1 * cm, 23 * cm)\n text.setFont(\"Times-Roman\", 12)\n i = 0\n p.drawText(text)\n\n\n # Close the PDF object cleanly, and we're done.\n p.save()\n\n # FileResponse sets the Content-Disposition header so that browsers\n # present the option to save the file.\n buffer.seek(0)\n return FileResponse(buffer, as_attachment=True, filename='anfundtest.pdf')\n return response\n\n\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.shortcuts import render, redirect\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n user_instance_erweitern, created = user_erweitern.objects.get_or_create(user=user)\n user_instance_erweitern.gruppennummer = projekt.objects.get(gruppennummer=form.data.get('gruppennummer'))\n user_instance_erweitern.rolle = 's'\n user_instance_erweitern.save()\n login(request, user)\n return HttpResponseRedirect(reverse('aut:view_dashboard'))\n\n\n else:\n form = SignUpForm()\n\n projekte = projekt.objects.all()\n return render(request, 'aut/signup.html', {'form': form, 'projekte': projekte})\n\n\n\ndef easteregg(request):\n return redirect('https://www.youtube.com/watch?v=DLzxrzFCyOs&ab_channel=AllKindsOfStuff')","sub_path":"aut/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":36681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"251845040","text":"f = open('text.enc','r')\ntext = f.read()\nf.close()\nalph = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'\n\ndef decrypt(t):\n #print(t[1:10])\n\n\n alphs = []\n\n\n for i in range(0,len(alph)):\n alphs.append(alph[i:len(alph)+1]+alph[0:i])\n\n key = 'крипто'\n print(' key: '+key)\n for rt in range(0,len(alph)):\n ROT = rt\n i = -1\n decrypt = ''\n for sym in t:\n i = (i+1)%len(key)\n alphInd = alph.find(sym)\n SymInd = alphs[(alphInd+ROT)%len(alph)].find(key[i])\n decrSym = alph[SymInd]\n decrypt = decrypt+decrSym\n print(str(rt)+': '+decrypt[0:30])\n if(decrypt.find('флаг') != -1):\n print(' '+str(rt)+': флаг')\n #print(rt)\n f = open(str(rt)+'decr.txt','w')\n f.write(decrypt)\n f.close()\n\ndef analyze(t,symbols):\n t2 = t[symbols:len(t)]+t[0:symbols]\n #print(t2)\n n = 0\n for i in range(0,len(t)):\n if t[i] == t2[i]: n = n+1\n if n/len(t)>0.05:\n print(str(n)+'/'+str(len(t))+' = '+str(n/len(t)))\n #print(' const = 0,0553')\n\n#for a in range(1,100):\n# print(str(a)+' ')\n# analyze(text,a)\n# 17\n\n#text = '1234567890'\ngrps = []\ngrpsRot = []\nkeyLen = 17\nfor grp in range(0,keyLen):\n grps.append('')\n grpsRot.append('')\n ind = grp\n while indmaxCh[grpNum]: maxCh[grpNum]=ch[sym]\n for sym in alph:\n if maxCh[grpNum]-ch[sym]<30:\n f.write(sym+': '+str(ch[sym])+'\\n')\n for sym in ch:\n if maxCh[grpNum]==ch[sym]:\n maxCh[grpNum]=sym\n break\n\nindO = alph.find('о')\n#print(indO)\nfor grp in range(0,len(grps)):\n Rot = alph.find(maxCh[grp])-indO\n if grp==0:\n Rot= Rot+15\n if grp==6:\n Rot= Rot+10\n #print(Rot)\n for i in range(0,len(grps[grp])):\n s = alph[(alph.find(grps[grp][i])-Rot)%len(alph)]\n grpsRot[grp] = grpsRot[grp]+s\n #grps[grp][i] =\n\ndecrypt = ''\nfor sym in range(0,len(grpsRot[0])):\n for grp in range(0,len(grpsRot)):\n if sym>>> ' + self.path)\n url = urlparse(self.path)\n qls = parse_qsl(url.query)\n\n # self.log_message(str(url))\n # self.log_message(str(qls))\n\n # sys.stderr.write(url.path + \"\\n\")\n\n if url.path == '/login/google':\n self.handle_google_login(dict(qls))\n else:\n self.not_found()\n\n def not_found(self):\n self.send_response(404)\n self.end_headers()\n self.wfile.write('NOT FOUND'.encode(ENCODING_UTF8))\n # self.server_shutdown()\n\n# def server_shutdown(self):\n# x = threading.Thread(target=self.server.shutdown)\n# x.start()\n\n def handle_google_login(self, data):\n\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n if data.get('error'):\n errormessage = data.get('error')\n self.wfile.write(errormessage.encode(ENCODING_UTF8))\n # self.server_shutdown()\n\n else:\n c = Client(token_endpoint=TOKEN_ENDPOINT,\n resource_endpoint=RESOURCE_ENDPOINT,\n client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n token_transport=transport_headers)\n\n redirect_uri = REDIRECT_URI % self.server.server_port\n\n c.request_token(code=data['code'],\n redirect_uri=redirect_uri)\n\n save_refresh_token(self.server.mailbox, c.refresh_token)\n\n self.wfile.write(\"AUTHENTICATION SUCCEEDED\".encode(ENCODING_UTF8))\n\n self.server.data = c.request('/mail/feed/atom', parser=lambda c: c)\n","sub_path":"mailindicator/gmailoauth2.py","file_name":"gmailoauth2.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"228658548","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom collections import defaultdict\r\nimport nltk\r\nnltk.download(\"stopwords\")\r\nfrom nltk.corpus import stopwords,wordnet\r\nfrom nltk import pos_tag,WordNetLemmatizer\r\nimport re\r\nfrom tqdm import tqdm\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nimport heapq\r\nfrom sklearn.decomposition import PCA\r\n\r\ndf = pd.read_csv(\"news_data.csv\")\r\ndf = df.drop([\"Unnamed: 0\"],axis=1)\r\ndf.isnull().sum()\r\n\r\nsns.countplot(x = 'category',data = df)\r\n\r\ntext= []\r\nfor i in tqdm(range(len(df))):\r\n text.append(df[\"headline\"][i])\r\n\r\n\r\ncat_map = {}\r\nfor i in tqdm(range(len(df))):\r\n if df[\"category\"][i] not in cat_map:\r\n cat_map[df[\"category\"][i]] = [i]\r\n else:\r\n cat_map[df[\"category\"][i]].append(i)\r\n\r\ndef text_preproccesing(text):\r\n hm = defaultdict(lambda: wordnet.NOUN)\r\n hm['J'] = wordnet.ADJ\r\n hm['V'] = wordnet.VERB\r\n hm['R'] = wordnet.ADV\r\n for i in tqdm(range(len(text))):\r\n text[i] = re.sub('[^a-zA-Z]',' ',text[i])\r\n text[i] = text[i].split()\r\n text[i] = [word for word in text[i] if not word in set(stopwords.words(\"english\"))]\r\n lemm = WordNetLemmatizer()\r\n sent = \"\"\r\n for token,tag in pos_tag(text[i]):\r\n word = lemm.lemmatize(token,hm[tag[0]])\r\n sent += word+ \" \"\r\n sent = sent[:-1]\r\n text[i] = sent.lower()\r\n return text\r\n\r\ntext = text_preproccesing(text)\r\n \r\ndef Vectorization(corpus):\r\n vector = TfidfVectorizer(max_features = 3500)\r\n X = vector.fit_transform(corpus).toarray()\r\n return X,vector\r\n\r\nX,vector = Vectorization(text)\r\n\r\n#only execute when required\r\ndef elbow_curve(X):\r\n wcss = []\r\n for i in tqdm(range(1,81)):\r\n kmeans = KMeans(n_clusters = i, init = 'k-means++',max_iter = 300, n_init = 80,random_state = 21 ,n_jobs = -1)\r\n kmeans.fit(X)\r\n wcss.append(kmeans.inertia_)\r\n plt.plot(range(1,81),wcss)\r\n plt.xlabel(\"Iteration\")\r\n plt.ylabel(\"WCSS\")\r\n\r\nelbow_curve(X)\r\n\r\n \r\nkmeans = KMeans(n_clusters = 7, init = \"k-means++\", max_iter = 300, n_init = 10, random_state = 21)\r\nkmeans.fit(X)\r\ny = kmeans.predict(X)\r\n\r\ndef pcaPlot(X,y):\r\n pca = PCA(n_components = 3)\r\n comp = pca.fit_transform(X)\r\n comp = pd.DataFrame(comp)\r\n comp['label'] = pd.DataFrame(y)\r\n sns.pairplot(data = comp,hue = 'label') \r\n \r\npcaPlot(X,y)\r\n\r\ndef Mapping(kmeans):\r\n label = kmeans.labels_\r\n map = {}\r\n for l in tqdm(range(len(label))):\r\n if label[l] not in map:\r\n map[label[l]] = [l]\r\n else:\r\n map[label[l]].append(l)\r\n return map\r\n\r\nmap = Mapping(kmeans)\r\n \r\nglobal X\r\nglobal y\r\nglobal map\r\nglobal cat_map\r\n\r\ndef getReccomendation(index,df):\r\n #recc by Model\r\n prediction = y[index]\r\n vector_arr= []\r\n for i in map[prediction]:\r\n if i!= index:\r\n vector_arr.append(X[i])\r\n vector_arr = np.array(vector_arr)\r\n sim_array = cosine_similarity([X[index]],vector_arr)\r\n sim_array = list(sim_array[0])\r\n \r\n #general recc. from category\r\n cat = df[\"category\"][index]\r\n new_vect_arr =[]\r\n for i in cat_map[cat]:\r\n if i!= index:\r\n new_vect_arr.append(X[i])\r\n new_vect_arr = np.array(new_vect_arr)\r\n cat_sim = cosine_similarity([X[index]],new_vect_arr)\r\n cat_sim = list(cat_sim[0])\r\n K=10\r\n gen_rec= []\r\n heap =[]\r\n cat_sim.extend(sim_array)\r\n new_arr = list(set(cat_sim))\r\n for i in range(0,len(new_arr)):\r\n heapq.heappush(heap,(new_arr[i],i))\r\n if len(heap)>K:\r\n heapq.heappop(heap)\r\n score=[]\r\n while(len(heap)):\r\n t = heapq.heappop(heap)\r\n score.append(t[0])\r\n gen_rec.append(t[1])\r\n return gen_rec,score\r\n\r\n\r\ndef reccomendations(index, df):\r\n t,score = getReccomendation(index, df)\r\n t,score = t[::-1],score[::-1]\r\n X = df.iloc[:,:].values\r\n arr =[]\r\n for i in t:\r\n arr.append(X[i][:])\r\n arr = pd.DataFrame(arr)\r\n arr['score'] =score\r\n return arr\r\n \r\nrec_df = reccomendations(7,df)","sub_path":"minor/project/project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"425832662","text":"from getIndexes import getIndexes\nfrom getCoefficientMatrix import getCoefficientMatrix\nfrom getSolutionVect import getSolutionVect\nfrom reconstructImg import reconstructImg\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2 as cv\nfrom numpy.linalg import inv\nfrom scipy import signal\n\ndef maskImage(img):\n mask = np.array(img.convert('L'))\n high_i,high_j = np.where(mask>40)\n low_i,low_j = np.where(mask<=40)\n mask[high_i,high_j] = 1\n mask[low_i,low_j] = 0\n return mask\n\ndef seamlessCloningPoisson(sourceImg, targetImg, mask, offsetX, offsetY):\n source = np.array(sourceImg)/255\n target = np.array(targetImg)/255\n sourceH,sourceW = mask.shape[0],mask.shape[1]\n targetH, targetW = target.shape[0],target.shape[1]\n indexes = getIndexes(mask, targetH, targetW, offsetX, offsetY)\n coeffA = getCoefficientMatrix(indexes)\n source_R = source[0:sourceH,0:sourceW,0]\n source_G = source[0:sourceH,0:sourceW,1]\n source_B = source[0:sourceH,0:sourceW,2]\n target_R = target[0:targetH,0:targetW,0]\n target_G = target[0:targetH,0:targetW,1]\n target_B = target[0:targetH,0:targetW,2]\n res_R = getSolutionVect(indexes, source_R, target_R, offsetX, offsetY)\n res_G = getSolutionVect(indexes, source_G, target_G, offsetX, offsetY)\n res_B = getSolutionVect(indexes, source_B, target_B, offsetX, offsetY)\n coeffA_inv = inv(coeffA)\n red = np.dot(coeffA_inv, res_R)\n green = np.dot(coeffA_inv, res_G)\n blue = np.dot(coeffA_inv, res_B)\n red = np.clip(red,0,1)\n green = np.clip(green,0,1)\n blue = np.clip(blue,0,1)\n red *= 255\n green *= 255\n blue *= 255\n copy = np.array(targetImg)\n resultImg = reconstructImg(indexes, red, green, blue, copy)\n return resultImg\n\nsourceImg = Image.open('mia.jpg')\ntargetImg = Image.open('call_me_by_your_name.jpg')\nimg_mask = Image.open('mia_mask.jpg')\nmask = maskImage(img_mask)\noffsetX = 400\noffsetY = 200\n\nresultImg = seamlessCloningPoisson(sourceImg, targetImg, mask, offsetX, offsetY)\nplt.imshow(resultImg, interpolation='nearest')\nplt.show()","sub_path":"image_blending/Python/seamlessCloningPoisson.py","file_name":"seamlessCloningPoisson.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"41477793","text":"# -*- coding: utf-8 -*-\n\"\"\"Function used to compute the loss.\"\"\"\nimport numpy as np\n\ndef compute_loss(y, tx, w, error_type='mae'):\n\te = y - tx.dot(w)\n\n\tif error_type == 'mse':\n\t\treturn (1.0 / (2 * len(y))) * e.T.dot(e)\n\telif error_type == 'mae':\n\t\treturn (1.0 / len(y)) * np.sum(np.abs(e))\n\telse:\n\t\traise ValueError(\"error_type must be 'mse' or 'mae'\")","sub_path":"labs/ex02/template/costs.py","file_name":"costs.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"216960903","text":"from iowaHomes.predict.predictModel.run_prediction import run_prediction\nfrom iowaHomes.predict.templates.predict.feature_element_def import elements\nfrom iowaHomes.predict.predictModel.main import engineered_features\nfrom iowaHomes.predict.predictModel.score_models import score_models\n\nimport pickle, os, random\n\ndef test_run_prediction(n=10):\n \"\"\"\n Runs the run_predicition function n number of times, using a random value\n for the user_input every time the function runs\n\n Expected result: Function runs N times with no errors\n\n :return None:\n \"\"\"\n\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n FOPATH = os.path.join(BASE_DIR, 'iowaHomes/predict/predictModel/predictionModels/training_data/featureOrder.sav')\n\n featureOrder = pickle.load(open(FOPATH, 'rb'))\n\n target_features = []\n for feat in featureOrder:\n target_features.append(feat)\n for e_feat in engineered_features:\n if e_feat['name'] == feat:\n target_features.remove(feat)\n for dep in e_feat['dependencies']:\n target_features.append(dep)\n\n for x in range(n):\n args = {}\n for feat in target_features:\n if elements[feat]['type'] == \"dropdown\":\n argsFeatFieldsLen = elements[feat]['fields'].__len__()\n args[feat] = elements[feat]['fields'][random.randint(0,argsFeatFieldsLen-1)]['value']\n elif elements[feat]['type'] == \"slider\":\n min = int(elements[feat]['min'])\n max = int(elements[feat]['max'])\n args[feat] = random.randint(min, max)\n\n print(run_prediction(args))\n\ndef test_score_models():\n \"\"\"\n Runs the score_models function that outputs the result of different\n metrics that measure the accuracy of all the models\n\n Expected result: Function runs and prints the results of all the models\n\n :return None:\n \"\"\"\n score_models()\n\ndef run_tests():\n test_score_models()\n test_run_prediction(15)\n\nrun_tests()","sub_path":"automated_tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"357571280","text":"_base_ = [\n '../_base_/datasets/s3dis_seg-3d-13class.py',\n '../_base_/models/paconv_ssg.py', '../_base_/schedules/seg_cosine_150e.py',\n '../_base_/default_runtime.py'\n]\n\n# data settings\nclass_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door',\n 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter')\nnum_points = 4096\ntrain_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=False,\n with_seg_3d=True),\n dict(\n type='PointSegClassMapping',\n valid_cat_ids=tuple(range(len(class_names))),\n max_cat_id=13),\n dict(\n type='IndoorPatchPointSample',\n num_points=num_points,\n block_size=1.0,\n use_normalized_coord=True,\n num_try=10000,\n enlarge_size=None,\n min_unique_num=num_points // 4,\n eps=0.0),\n dict(type='NormalizePointsColor', color_mean=None),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0.0, 6.283185307179586], # [0, 2 * pi]\n scale_ratio_range=[0.8, 1.2],\n translation_std=[0, 0, 0]),\n dict(\n type='RandomJitterPoints',\n jitter_std=[0.01, 0.01, 0.01],\n clip_range=[-0.05, 0.05]),\n dict(type='RandomDropPointsColor', drop_ratio=0.2),\n dict(type='DefaultFormatBundle3D', class_names=class_names),\n dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])\n]\n\ndata = dict(samples_per_gpu=8, train=dict(pipeline=train_pipeline))\nevaluation = dict(interval=1)\n\n# model settings\nmodel = dict(\n decode_head=dict(\n num_classes=13, ignore_index=13,\n loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight\n test_cfg=dict(\n num_points=4096,\n block_size=1.0,\n sample_rate=0.5,\n use_normalized_coord=True,\n batch_size=12))\n","sub_path":"configs/paconv/paconv_ssg_8x8_cosine_150e_s3dis_seg-3d-13class.py","file_name":"paconv_ssg_8x8_cosine_150e_s3dis_seg-3d-13class.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"505049665","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_one_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/redshift/delete-hsm-configuration.html\nif __name__ == '__main__':\n \"\"\"\n\tcreate-hsm-configuration : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/redshift/create-hsm-configuration.html\n\tdescribe-hsm-configurations : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/redshift/describe-hsm-configurations.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # hsm-configuration-identifier : The identifier of the Amazon Redshift HSM configuration to be deleted.\n \"\"\"\n add_option_dict = {}\n\n #######################################################################\n # parameter display string\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_one_parameter(\"redshift\", \"delete-hsm-configuration\", \"hsm-configuration-identifier\", add_option_dict)\n\n\n\n\n\n","sub_path":"redshift_write_1/hsm-configuration_delete.py","file_name":"hsm-configuration_delete.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"62908603","text":"import csv\nimport time\nfrom datetime import datetime\nf1=open(\"/Users/StanleyLIn/Desktop/專題研究/2013.csv\")\nf2=open(\"2013Fre_H_data_Five.csv\",'w')\nf3=open(\"2013Fre_L_data_Five.csv\",'w')\n#f4=open(\"/Users/StanleyLIn/Desktop/2014.csv\")\nID_NO_Duplicate=dict()\ncount1=0\nfor row in csv.DictReader(f1):\n\tcount1+=1\n\tprint(count1)\n\tif(row[\"DEP_TIME\"]!=\"\"):\n\t\tif(row[\"CUSTOMER_ID\"] not in ID_NO_Duplicate.keys()):\n\t\t\tID_NO_Duplicate[row[\"CUSTOMER_ID\"]]=1\n\t\telse:\n\t\t\tID_NO_Duplicate[row[\"CUSTOMER_ID\"]]+=1\nwriter_HighF= csv.writer(f2, delimiter=',',dialect='excel')\nwriter_LowF= csv.writer(f3, delimiter=',',dialect='excel')\ncount2=0\nprint(ID_NO_Duplicate)\nf1.close()\ncount_high=0\ncount_low=0\nf1=open(\"/Users/StanleyLIn/Desktop/專題研究/2013.csv\")\nfor row in csv.DictReader(f1):\n\n\tcount2+=1\n\tprint(count2)\n\tif(count2==1):\n\t\t#print(row)\n\t\twriter_HighF.writerow(row)\n\t\twriter_LowF.writerow(row)\n\t#print(row)\n\tif(row[\"DEP_TIME\"]!=\"\"):\n\t\t#print(ID_NO_Duplicate)\n\t\tif(ID_NO_Duplicate[row[\"CUSTOMER_ID\"]]>=5):\n\n\t\t#\tprint(str(count2)+str(Count[ID_NO_Duplicate.index(row[\"CUSTOMER_ID\"])]))\n\t\t\twriter_HighF.writerow(row.values())\n\t\t\tcount_high+=1\n\t\tif(ID_NO_Duplicate[row[\"CUSTOMER_ID\"]]<5):\n\t\t\twriter_LowF.writerow(row.values())\n\t\t\tcount_low+=1\n\nprint(count_high)\nprint(count_low)\n\t","sub_path":"First Semester/Frequency2013.py","file_name":"Frequency2013.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"252067547","text":"\r\nimport sys\r\nimport copy\r\nimport math\r\n\"\"\"\r\nDON'T FORGET TO ATTRIBUTE CODE\r\n\"\"\"\r\n\r\nNUM_PIECES = 4\r\nMAX_DIST = 7\r\nBOARDDIM = 3\r\nNUM_PLAYERS = 3\r\n\r\nMAX_DEPTH = 3\r\n\r\n_FINISHING_HEXES = {\r\n 'red': {(3, -3), (3, -2), (3, -1), (3, 0)},\r\n 'green': {(-3, 3), (-2, 3), (-1, 3), (0, 3)},\r\n 'blue': {(-3, 0), (-2, -1), (-1, -2), (0, -3)},\r\n}\r\n_ADJACENT_STEPS = [(-1, +0), (+0, -1), (+1, -1), (+1, +0), (+0, +1), (-1, +1)]\r\n\r\nRAN = range(-BOARDDIM, BOARDDIM + 1)\r\nHEXES = [(q, r) for q in RAN for r in RAN if -q - r in RAN]\r\nCOLOURS = ['red', 'green', 'blue']\r\nCOLOUR_DICT = {'red': 0, 'green': 1, 'blue': 2}\r\nNEXT_COLOUR = {'red': 'green', 'green': 'blue', 'blue': 'red'}\r\n\r\ndef maxn(colour, score, board):\r\n return recur_maxn(game_tree.root, COLOUR_DICT[colour], 0)[1]\r\n\r\ndef recur_maxn(board_state, depth):\r\n if depth == MAX_DEPTH:\r\n return board_state.eval_scores(), board_state.action\r\n else:\r\n max_eval = - NUM_PIECES * MAX_DIST - 1\r\n best_score_dict = {}\r\n best_action = None\r\n for next_state in board_state.available_actions():\r\n eval_score_dict = recur_maxn(next_state, depth+1)[0]\r\n eval_score = eval(eval_score_dict, COLOURS[(colour_index + depth) % NUM_PLAYERS])\r\n if eval_score > max_eval:\r\n# if ((eval_score > max_eval) or\r\n# (eval_score == max_eval and (best_action and best_action[0] != \"JUMP\" and child.boardstate.action == \"JUMP\"))):\r\n max_eval = eval_score\r\n best_score_dict = eval_score_dict\r\n best_action = child.boardstate.action\r\n return best_score_dict, best_action\r\n\r\n\r\ndef eval(eval_score_dict, curr_colour):\r\n min_opposition = NUM_PIECES * MAX_DIST\r\n for colour in COLOURS:\r\n if colour != curr_colour and eval_score_dict[colour] < min_opposition:\r\n min_opposition = eval_score_dict[colour]\r\n return min_opposition - eval_score_dict[curr_colour]\r\n# return - eval_score_dict[curr_colour]\r\n\r\nclass GameNode:\r\n def __init__(self, board_state):\r\n self.boardstate = board_state # the board state\r\n # the three tuple of distance from end\r\n self.value = {'red': 0, 'green': 0, 'blue': 0}\r\n# self.parent = parent # a node reference\r\n self.children = [] # a list of nodes\r\n\r\n def add_child(self, child_node):\r\n self.children.append(child_node)\r\n\r\n def __str__(self, level=0):\r\n cells = []\r\n for qr in HEXES:\r\n cells.append(_DISPLAY[self.boardstate.board[qr]])\r\n ret = \"\\t\" * level + _TEMPLATE_DEBUG.format(self.value, *cells) + \"\\n\"\r\n for child in self.children:\r\n ret += child.__str__(level + 1)\r\n return ret\r\n\r\n def __repr__(self):\r\n return ''\r\n\r\n\r\nclass GameTree:\r\n def __init__(self, my_colour):\r\n self.root = None\r\n self.colourindex = COLOUR_DICT[my_colour]\r\n\r\n def build_tree(self, board):\r\n \"\"\"\r\n :param data_list: Take data in list format\r\n :return: Parse a tree from it\r\n \"\"\"\r\n self.root = GameNode(BoardState(board))\r\n self.parse_subtree(self.root, 0)\r\n\r\n def parse_subtree(self, curr_node, depth):\r\n # base case\r\n if depth == MAX_DEPTH:\r\n curr_node.value = curr_node.boardstate.eval_scores()\r\n return\r\n else:\r\n next_board_states = self.create(curr_node.boardstate, COLOURS[(self.colourindex + depth) % NUM_PLAYERS])\r\n depth += 1\r\n for next_board_state in next_board_states:\r\n child = GameNode(next_board_state)\r\n # print(str(tree_node))\r\n curr_node.add_child(child)\r\n self.parse_subtree(child, depth)\r\n\r\n def create(self, board_state, colour):\r\n all_board_states = []\r\n for qr in HEXES:\r\n if board_state.board[qr] == colour:\r\n if qr in _FINISHING_HEXES[colour]:\r\n action = (\"EXIT\", qr)\r\n all_board_states.append(self.change(board_state, action, colour))\r\n q, r = qr\r\n for dq, dr in _ADJACENT_STEPS:\r\n for i, atype in [(1, \"MOVE\"), (2, \"JUMP\")]:\r\n tqr = q + dq * i, r + dr * i\r\n if tqr in HEXES:\r\n if board_state.board[tqr] == ' ':\r\n action = (atype, (qr, tqr))\r\n all_board_states.append(\r\n self.change(board_state, action, colour))\r\n break\r\n if not all_board_states:\r\n action = (\"PASS\", None)\r\n all_board_states.append(self.change(board_state, action, colour))\r\n # print(all_boards)\r\n return all_board_states\r\n\r\n def change(self, board_state, action, colour):\r\n new_board = copy.copy(board_state.board)\r\n new_score = copy.copy(board_state.score)\r\n atype, aargs = action\r\n if atype == \"MOVE\":\r\n qr_a, qr_b = aargs\r\n new_board[qr_a] = ' '\r\n new_board[qr_b] = colour\r\n elif atype == \"JUMP\":\r\n qr_a, qr_b = (q_a, r_a), (q_b, r_b) = aargs\r\n qr_c = (q_a + q_b) // 2, (r_a + r_b) // 2\r\n new_board[qr_a] = ' '\r\n new_board[qr_b] = colour\r\n new_board[qr_c] = colour\r\n elif atype == \"EXIT\":\r\n qr = aargs\r\n new_board[qr] = ' '\r\n new_score[colour] += 1\r\n else: # atype == \"PASS\":\r\n pass\r\n\r\n return BoardState(new_board, action, new_score)\r\n\r\n\r\nclass BoardState:\r\n def __init__(self, board, colour, action=None, score={'red': 0, 'green': 0, 'blue': 0}):\r\n self.board = board\r\n self.colour = colour\r\n self.action = action\r\n self.score = score\r\n\r\n def piece_lists(self):\r\n piecelists = {'red': set(), 'green': set(), 'blue': set()}\r\n for qr in HEXES:\r\n if self.board[qr] != ' ':\r\n piecelists[self.board[qr]].add(qr)\r\n return piecelists\r\n\r\n def eval_scores(self):\r\n \"\"\"\r\n Since four pieces must exit, we sum the individual distances of\r\n the best four pieces from exiting, with a distance of zero if a piece\r\n has already exited. If there are not enough remaining pieces on the\r\n board, add on 7 (the max distance possible)\r\n \"\"\"\r\n eval_score = {'red': 0, 'green': 0, 'blue': 0}\r\n for colour in eval_score:\r\n exit_dists = []\r\n for qr in self.piece_lists()[colour]:\r\n exit_dists.append((exit_dist(qr, colour)))\r\n for i in range(NUM_PIECES - self.score[colour] - len(exit_dists)):\r\n exit_dists.append(MAX_DIST)\r\n eval_score[colour] = sum(sorted(exit_dists)[:(NUM_PIECES - self.score[colour])])\r\n return eval_score\r\n\r\n def available_actions(self):\r\n all_board_states = []\r\n for qr in HEXES:\r\n if self.board[qr] == colour:\r\n if qr in _FINISHING_HEXES[colour]:\r\n action = (\"EXIT\", qr)\r\n all_board_states.append(self.change(action))\r\n q, r = qr\r\n for dq, dr in _ADJACENT_STEPS:\r\n for i, atype in [(1, \"MOVE\"), (2, \"JUMP\")]:\r\n tqr = q + dq * i, r + dr * i\r\n if tqr in HEXES:\r\n if self.board[tqr] == ' ':\r\n action = (atype, (qr, tqr))\r\n all_board_states.append(self.change(action))\r\n break\r\n if not all_board_states:\r\n action = (\"PASS\", None)\r\n all_board_states.append(self.change(action))\r\n return all_board_states\r\n\r\n def change(self, action):\r\n new_board = copy.copy(self.board)\r\n new_score = copy.copy(self.score)\r\n atype, aargs = action\r\n if atype == \"MOVE\":\r\n qr_a, qr_b = aargs\r\n new_board[qr_a] = ' '\r\n new_board[qr_b] = colour\r\n elif atype == \"JUMP\":\r\n qr_a, qr_b = (q_a, r_a), (q_b, r_b) = aargs\r\n qr_c = (q_a + q_b) // 2, (r_a + r_b) // 2\r\n new_board[qr_a] = ' '\r\n new_board[qr_b] = colour\r\n new_board[qr_c] = colour\r\n elif atype == \"EXIT\":\r\n qr = aargs\r\n new_board[qr] = ' '\r\n new_score[colour] += 1\r\n else: # atype == \"PASS\":\r\n pass\r\n\r\n return BoardState(new_board, NEXT_COLOUR[colour], action, new_score)\r\n\r\n\r\ndef exit_dist(qr, colour):\r\n \"\"\"how many HEXES away from a coordinate is the nearest exiting hex?\"\"\"\r\n q, r = qr\r\n if colour == 'red':\r\n return BOARDDIM - q\r\n if colour == 'green':\r\n return BOARDDIM - r\r\n if colour == 'blue':\r\n return BOARDDIM - (-q - r)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Killer_Pythons1/gametree.py","file_name":"gametree.py","file_ext":"py","file_size_in_byte":9013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"531103806","text":"import itertools\nimport numpy as np\nimport spotipy.util\nimport sklearn.cluster\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef add_uris(fetched):\n for item in fetched['tracks']['items']:\n uris.add(item['track']['uri'])\n\ndef features_to_vector(item):\n return np.array([item[key] for key in FEATURE_VECTOR])\n\n# Gets an X-matrix given data as 2-element tuples with IDs and vectors.\ndef get_x(values):\n return np.vstack([x[1] for x in values])\n\n# Given an object with a .transform(), apply it to the data vectors.\ndef apply_transform(transformer, data):\n return [(x[0], transformer.transform(x[1].reshape(1, -1))) for x in data]\n\ndef train_and_apply(transformer, data):\n X = get_x(data)\n transformer.fit(X)\n return apply_transform(transformer, data)\n\n\n# privileged_song = input(\"Enter a song URL bit\")\nprivileged_song = '0UqShk7xMPzDWsJB9s0eFF'\n\n# Create your own Spotify app to get the ID and secret.\n# https://beta.developer.spotify.com/dashboard/applications\nCLIENT_ID = ''\nCLIENT_SECRET = ''\n\n# Put your regular Spotify username here.\nUSERNAME = ''\n\nREDIRECT_URI = 'https://www.google.com/'\nSCOPE = 'user-library-read playlist-modify-public'\n\n# Create a Spotify client that can access my saved song information.\ntoken = spotipy.util.prompt_for_user_token(USERNAME,\n SCOPE,\n client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n redirect_uri=REDIRECT_URI)\n\nsp = spotipy.Spotify(auth=token)\n\n# Get the Spotify URIs of each of my saved songs.\nuris = set([])\n\n\nplaylist = 'https://open.spotify.com/playlist/3PP4DLeNOFKlK0QwI5P7rf'\nresults = sp.playlist(playlist)\n# results = sp.current_user_saved_tracks()\nadd_uris(results)\nuris.add(privileged_song)\n\nwhile results['tracks']['next']:\n results = sp.next(results)\n add_uris(results)\n\n# Function that returns the next n elements from the iterator. Used because\n# Spotify limits how many items you can group into each of its API calls.\ndef grouper(n, iterable):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n yield chunk\n\n# Get the audio features of each of the URIs fetched above.\nuris_to_features = {}\nfor group in grouper(50, uris):\n res = sp.audio_features(tracks=group)\n for item in res:\n uris_to_features[item['uri']] = item\n\nFEATURE_VECTOR = [\n 'acousticness',\n 'danceability',\n 'duration_ms',\n 'energy',\n 'instrumentalness',\n 'key',\n 'liveness',\n 'loudness',\n 'mode',\n 'speechiness',\n 'tempo',\n 'time_signature',\n 'valence'\n]\n\n\n\nvectors = [(x[0], features_to_vector(x[1])) for x in uris_to_features.items()]\n\n\nscaled = train_and_apply(sklearn.preprocessing.StandardScaler(), vectors)\n\nRUN_ON = scaled\n\nNUM_CLUSTERS = 4\nPLAYLIST_NAME_FMT = 'Version {}: Cluster {}'\nVERSION = 7\n\nmodel = sklearn.cluster.KMeans(n_clusters=NUM_CLUSTERS,\n n_jobs=-1)\nmodel.fit(get_x(RUN_ON))\nclassified = {}\ndistance_vals = {}\ndistance_counts = {}\nfinal_playlist_to_check = -1\nfinal_distance = -1\nlowest_dist = 100\nhighest_dist = 0\nplaylists = {}\nfor i in range(0, NUM_CLUSTERS):\n distance_vals[i] = 0\n distance_counts[i] = 0\n\n\nj=0\nfor x in RUN_ON:\n rval1, rval2 = model.predict(x[1])\n playlist = rval1[0]\n distance = rval2[0]\n\n playlists[j] = playlist\n j+=1\n\n if(distance > highest_dist):\n highest_dist = distance\n elif(distance < lowest_dist):\n lowest_dist = distance\n\n classified[x[0]] = model.predict(x[1])\n distance_vals[playlist] += distance\n distance_counts[playlist] += 1\n if(privileged_song in x[0]):\n final_playlist_to_check = playlist\n final_distance = distance\n\n\nif final_playlist_to_check >= 0:\n avg_dist = (distance_vals[final_playlist_to_check]) / (distance_counts[final_playlist_to_check])\n n1 = round(avg_dist, 2)\n n2 = round(final_distance, 2)\n n3 = round(highest_dist, 2)\n n4 = round(lowest_dist, 2)\n\n # print(\"AVG score: {} yours: {} highest: {} lowest {}\".format(n1, n2, n3, n4))\n\n labels = ['AVG score', 'Your score', 'Highest score', 'Lowest score', 'acousticness', 'danceability',\n 'duration', 'energy', 'instrumentalness', 'key', 'liveness', 'loudness', 'mode', 'speechiness',\n 'tempo', 'time signature', 'valence']\n\n\n attrs = []\n for i in range(0, 13):\n # attrs[i] = 0\n attrs.append(0)\n\n j = 0\n n_songs = 0\n for i in playlists.keys():\n if(playlists[i] == final_playlist_to_check):\n j = 0\n for attr in scaled[i][1][0]:\n attrs[j] += attr\n j+=1\n n_songs += 1\n\n\n for j in range(0, 13):\n attrs[j] /= n_songs\n attrs[j] *= 10\n\n\n men_means = [n1, n2, n3, n4]\n\n disp_list = men_means + attrs\n\n x = np.arange(len(labels)) # the label locations\n\n plt.figure(figsize=(20, 10))\n plt.bar(x, disp_list, align='center', alpha=0.5)\n plt.xticks(x, labels)\n plt.ylabel('Distance from most \\'average\\' song in sub-genre')\n plt.title('Song analysis results')\n\n ax = plt.gca()\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(6)\n\n plt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"82929344","text":"import numpy\nimport cv2\nimport random\nimport math\n\nWINDOW = \"RRT_Widnow\"\n\n\nDISPLAY = True\nSHOWREWIRE = True\nDISPINT = 1\nMAXNODES = 2000\n\nWHITE = (255,255,255)\nBLACK = (0,0,0)\nRED = (0,0,255)\nGREEN = (0,255,0)\nBLUE = (255,0,0)\nMAGENTA = (255,0,255)\nYELLOW = (0,255,255)\nCYAN = (255,255,0)\n\n\n\nBGCOLOR = BLACK\nNODECOLOR = CYAN\nLINECOLOR = CYAN\nPATHCOLOR = RED\n\nMAPHEIGHT = 1000\nMAPWIDTH = MAPHEIGHT\n\nSTART = (100,500) \nEND = (MAPWIDTH-1,MAPHEIGHT-1)\n\nTEXTLOC = (5+0,MAPHEIGHT-5)\n\nLINESIZE = 2\nNODESIZE = (LINESIZE + 1)\n\nSTEPSIZE = (MAPHEIGHT + MAPWIDTH)/2 * .5\n\ndef destroyImage(image):\n cv2.imshow(WINDOW, image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\ndef draw(image, nodes, pathed,redraw):\n \n cv2.circle(image, END, int(STEPSIZE), PATHCOLOR)\n\n if SHOWREWIRE:\n for place in redraw:\n cv2.line(image, place, redraw[place], WHITE, LINESIZE)\n cv2.imshow(WINDOW, image)\n cv2.waitKey(1)\n for place in redraw:\n cv2.line(image,place,redraw[place],BGCOLOR,LINESIZE)\n\n redraw.clear() \n\n for place in nodes:\n\n cv2.circle(image,place, NODESIZE, NODECOLOR,-1)\n cv2.line(image,place,nodes[place],LINECOLOR,LINESIZE)\n\n if pathed == 1:\n pathedDraw(image,nodes)\n \n cv2.circle(image, START, NODESIZE+2, PATHCOLOR, -1)\n cv2.circle(image, END, NODESIZE+2, PATHCOLOR, -1)\n cv2.putText(image, \"N: \" + str(len(nodes)-DISPINT), TEXTLOC, 5, 1, BGCOLOR, 1)\n cv2.putText(image, \"N: \" + str(len(nodes)), TEXTLOC, 5, 1, RED, 1)\n\n cv2.imshow(WINDOW,image)\n cv2.waitKey(1)\n\ndef pathedDraw(image, nodes):\n #print(\"display path\")\n node = END\n #counter = 0\n \n while node is not START:\n #counter +=1\n #print(\"counter\" + str(counter))\n #print(\"node\" + str(node))\n #print(\"parent\" + str(nodes[node]))\n \n \n cv2.line(image,node,nodes[node],RED,LINESIZE)\n cv2.circle(image, node, NODESIZE, RED, -1)\n node = nodes[node]\n #if counter > 75:\n #cv2.imshow(WINDOW, image)\n #cv2.waitKey(0)\n #counter = \"hello\"\n #cv2.circle(image, node, NODESIZE+1, BGCOLOR, -1)\n\n\ndef euclDist(n1,n2):\n #returns euclidian distance\n return math.sqrt( ((n1[0] - n2[0])**2) + ((n1[1] - n2[1])**2) )\ndef distanceSortLambda(origin):\n '''\n returns a lambda to sort a function by distance from a point\n \n :param origin: the point to find distance from\n :return lambda: function that finds distance between origin and x\n ------stolen from James----------\n '''\n return lambda x: euclDist(origin, x)\ndef rewire(newNode,nodelist,costs,image):\n redraw = {}\n for node in nodelist:\n if node == newNode:\n continue\n if euclDist(node, newNode) > STEPSIZE:\n continue\n newCost = costs[newNode] + euclDist(node, newNode)\n if newCost < costs[node]:\n cv2.line(image, node, nodelist[node], BGCOLOR, LINESIZE)\n redraw[node]= nodelist[node]\n if node == nodelist[node]:\n print(\"5\")\n return\n nodelist[node] = newNode\n if node == newNode:\n print(\"6\")\n return\n costs[node] = newCost\n return redraw \n\ndef finalPath(image, nodes):\n if END not in nodes:\n print(\"END NOT FOUND (try increasing MAXNODES)\")\n return\n path = [END]\n place = nodes[END]\n while place is not START:\n print(\"place: \" + str(place))\n path.append(place)\n place = nodes[place]\n path.append(place)\n path = path[::-1]\n\n\n for i in range(0,len(path)-1):\n current = path[i]\n next = path[i+1]\n\n x,y = current\n xf,yf = next\n\n dx = xf-x\n dy = yf-y\n\n d = dx\n\n if d == dx:\n while x != xf:\n cv2.circle(image, (x,y), LINESIZE+5, MAGENTA, -1)\n cv2.imshow(WINDOW, image)\n cv2.waitKey(2)\n dx = xf-x\n dy = yf-y\n x+=1\n y+=int(dy/dx)\n '''elif d ==dy:\n while y != yf:\n cv2.circle(image, (x,y), LINESIZE+2, CYAN, -1)\n cv2.imshow(WINDOW, image)\n cv2.waitKey(2)\n y+=1\n x+=int(dx/dy)'''\n\n\n\n cv2.imshow(WINDOW, image)\n\ndef main():\n endFound = 0\n cv2.namedWindow(WINDOW, cv2.WINDOW_NORMAL)\n scale = 800/MAPHEIGHT\n cv2.resizeWindow(WINDOW, 800, 800)\n\n #make the map\n map = numpy.zeros((MAPWIDTH,MAPHEIGHT,3))\n map[:][:] = BGCOLOR\n\n nodelist = {START:START}\n costs = {}\n\n costs[START] = 0\n redraw = {}\n while len(nodelist) < MAXNODES:\n #if len(nodelist) % DISPINT == 0:\n #print(len(nodelist))\n newX = random.randint(0,MAPWIDTH)\n newY = random.randint(0,MAPHEIGHT)\n newNode = (newX,newY)\n\n #set default distance and parent, in case the start is the least dist\n parent = START\n\n neighbors = sorted( nodelist, key=distanceSortLambda(newNode))\n parent = neighbors[0]\n minCost = costs[parent] + euclDist(newNode, parent)\n\n for place in neighbors[0:4]:\n compCost = costs[place] + euclDist(newNode, place)\n if compCost < minCost:\n minCost = compCost\n parent = place\n dist = euclDist(newNode, parent)\n if nodelist.has_key(newNode):\n continue\n if newNode == END:\n continue\n if dist > STEPSIZE:\n continue\n\n\n costs[newNode] = minCost\n \n nodelist[newNode] = parent\n if newNode == parent:\n print(\"1\")\n return\n redraw.update(rewire(newNode, nodelist, costs, map))\n\n\n if euclDist(newNode, END) < STEPSIZE:\n if endFound == 1:\n if euclDist(newNode, END) < euclDist(nodelist[END], END):\n redraw[END] = nodelist[END]\n if END == nodelist[END]:\n print(\"2\")\n return\n nodelist[END]=newNode\n if END == newNode:\n print(\"3\")\n return\n costs[END]= costs[newNode] + euclDist(END, newNode)\n endFound = 1\n #print(\"FOUND THE END\")\n else:\n nodelist[END]=newNode\n if END == newNode:\n print(\"4\")\n return\n costs[END]= costs[newNode] + euclDist(END, newNode)\n endFound = 1\n\n \n\n #if the newly added node is close enough to the end, go for it!\n '''\n if euclDist(newNode, END) < STEPSIZE:\n dist = euclDist(newNode, END)\n for place in nodelist:\n testDist = euclDist(place, END)\n if testDist > dist:\n nodelist[END]=newNode\n '''\n \n if len(nodelist) % DISPINT == 0 and DISPLAY:\n draw(map,nodelist,endFound,redraw)\n draw(map, nodelist, endFound, redraw)\n #finalPath(map,nodelist)\n destroyImage(map)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"RRTStar.py","file_name":"RRTStar.py","file_ext":"py","file_size_in_byte":7392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"589711743","text":"import pickle\r\n\r\nX_user = [2018, \"Luzon\", \"Solar\"]\r\n\r\ngrid = [\"Luzon\", \"Mindanao\", \"Visayas\"]\r\ngrid_val = [0, 0, 0]\r\n\r\nsubtype = [\"Biomass\", \"Geothermal\", \"Hydro\", \"Solar\", \"Wind\", \"Coal\", \"Combined\", \"Diesel\", \"Gas Turbine\", \"Natural Gas\", \"Oil Thermal\"]\r\nsubtype_val = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\nfor g in grid:\r\n if X_user[1] == g:\r\n grid_val[grid.index(g)] = 1\r\nsubtype_val[subtype.index(X_user[2])] = 1\r\nX_test = [X_user[0]] + grid_val + subtype_val\r\n# print(X_test)\r\n\r\nloaded_model = pickle.load(open(\"ML_C_rf.pkl\", \"rb\"))\r\nresult = loaded_model.predict([X_test])\r\n# print(result)\r\n","sub_path":"ML Load C DOE.py","file_name":"ML Load C DOE.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"471795443","text":"import lib.parser as parser\nfrom lib.base import Atom, RelationCall\n\na = RelationCall(id='uio',\n args=[Atom(name='abcd',\n args=[Atom('defg'),\n Atom('xyz',\n args=[\n Atom(\n 'vbn')])])])\n\nb = RelationCall(id='lop',\n args=[\n Atom(name='abcd',\n args=[Atom(\n 'defg'),\n Atom(\n 'xyz')])])\n\nc = RelationCall(id='kek', args=[])\n\nd = RelationCall(id='cdef', args=[Atom(name='abcd')])\n\ne = RelationCall(id='hjk',\n args=[Atom(name='abcd', args=[Atom('defg')])])\n\n\ndef test_rel_call():\n assert parser.rel_call.parse('uio (abcd {defg, xyz {vbn}})') == a\n assert parser.rel_call.parse('lop (abcd {defg, xyz})') == b\n assert parser.rel_call.parse('kek ()') == c\n assert parser.rel_call.parse('cdef (abcd)') == d\n assert parser.rel_call.parse('hjk (abcd {defg})') == e\n\n\ndef test_rel_call_repr():\n assert 'CALL(uio, [ATOM(CONS, abcd, [ATOM(VAR, defg), ATOM(CONS, xyz, [ATOM(VAR, vbn)])])])' == str(a)\n assert 'CALL(lop, [ATOM(CONS, abcd, [ATOM(VAR, defg), ATOM(VAR, xyz)])])' == str(b)\n assert 'CALL(kek, [])' == str(c)\n assert 'CALL(cdef, [ATOM(VAR, abcd)])' == str(d)\n assert 'CALL(hjk, [ATOM(CONS, abcd, [ATOM(VAR, defg)])])' == str(e)\n","sub_path":"tests/test_rel_call.py","file_name":"test_rel_call.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"222326951","text":"import os\n\nfrom UCTB.utils import multiple_process\n\n\ndef task_func(share_queue, locker, data, parameters):\n\n print('Child process %s with pid %s' % (parameters[0], os.getpid()))\n\n for task in data:\n print('Child process', parameters[0], 'running', task)\n exec_str = 'python HMM.py --Dataset %s --City %s ' % (task[0], task[1])\n if task[2] != '':\n exec_str += task[2]\n os.system(exec_str)\n\n locker.acquire()\n share_queue.put(None)\n locker.release()\n\n\nif __name__ == '__main__':\n\n task_list = [\n ['Bike', 'NYC', ''],\n ['Bike', 'Chicago', ''],\n ['Bike', 'DC', ''],\n ['Metro', 'Chongqing', ''],\n ['Metro', 'Shanghai', ''],\n ['DiDi', 'Chengdu', ''],\n ['DiDi', 'Xian', ''],\n ['ChargeStation', 'Beijing', '']\n ]\n\n n_jobs = 2\n\n multiple_process(distribute_list=task_list,\n partition_func=lambda data, i, n_job: [data[e] for e in range(len(data)) if e % n_job == i],\n task_func=task_func, n_jobs=n_jobs,\n reduce_func=lambda x, y: None, parameters=[])\n\n","sub_path":"Experiments/HMM/trials.py","file_name":"trials.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"589343492","text":"\"\"\"\r\nExposes logging and debugging operations.\r\n\r\nUse the 'debug', 'info', 'warning', 'error', or 'critial' methods on the 'log'\r\nobject to send messages to the stderr (which appear in the console in Sublime).\r\n\r\nA log file is also created in the plugin folder for messages at the level set\r\nby the properties below.\r\n\"\"\"\r\n\r\nimport logging\r\n\r\nlog = logging.getLogger('BillsSTP')\r\nlog.setLevel(logging.DEBUG)\r\n\r\n_logFile = None\r\n_logCons = None\r\n\r\n\r\ndef configure_logging(console_level, file_level, file_path):\r\n \"\"\"Configures logging for the plugin\r\n\r\n console_level and file_level should be string values such as 'error',\r\n 'warn', 'info', or 'debug'.\r\n\r\n file_path is only used on the first call to set the log file location.\r\n \"\"\"\r\n\r\n global log, _logFile, _logCons\r\n\r\n # Get the numeric values from the logging module based on string names\r\n cons_level_value = logging.__dict__[console_level.upper()]\r\n file_level_value = logging.__dict__[file_level.upper()]\r\n\r\n # Only create & attach the handlers on the first call\r\n if not _logFile:\r\n # If this is not set duplicate entries appear in the console\r\n log.propagate = False\r\n\r\n pattern = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')\r\n\r\n _logFile = logging.FileHandler(file_path, mode='w')\r\n _logFile.setFormatter(pattern)\r\n log.addHandler(_logFile)\r\n\r\n _logCons = logging.StreamHandler()\r\n _logCons.setFormatter(pattern)\r\n log.addHandler(_logCons)\r\n\r\n _logCons.setLevel(cons_level_value)\r\n _logFile.setLevel(file_level_value)\r\n","sub_path":"stplib/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"585937963","text":"\r\n#\r\nfrom .zhichat import zhiChat\r\nfrom .basebot import basebot\r\nfrom ..zhimsg import async_send\r\n\r\nimport logging\r\n_LOGGER = logging.getLogger(__name__)\r\n\r\n\r\nclass dingbot(basebot):\r\n\r\n def check(self, request, data):\r\n if data['chatbotUserId'] in self.conf:\r\n return True\r\n return super().check(request, data)\r\n\r\n def config_done(self, data):\r\n self.conf.append(data['chatbotUserId'])\r\n\r\n def config_desc(self, data):\r\n return \"钉钉群“%s”的“%s”正在试图访问“%s”。\\n\\nchatbotUserId: %s\" % (data['conversationTitle'], data['senderNick'], data['text']['content'], data['chatbotUserId'])\r\n\r\n async def async_handle(self, data):\r\n query = data['text']['content'].strip()\r\n if self.name:\r\n return await async_send(self.name, query)\r\n else:\r\n return await zhiChat(self.hass, query)\r\n\r\n def response(self, answer):\r\n return self.json({'msgtype': 'text', 'text': {'content': answer}})\r\n","sub_path":"custom_components/zhibot/dingbot.py","file_name":"dingbot.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"204230517","text":"from flask import Flask\n# from flask import render_template\nfrom flask.ext.bootstrap import Bootstrap\nfrom flask import request\n\nimport demo.predict as predict\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'hard to guess string'\nbootstrap = Bootstrap(app)\n\n\n@app.route('/add', methods=['GET'])\ndef service():\n a = request.args.get(\"a\")\n b = request.args.get(\"b\")\n return str(predict.predict(float(a), float(b))[0][0])\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"demo/predict_web.py","file_name":"predict_web.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"87296741","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n\n\"\"\" \n@version: v1.0 \n@author: 330mlcc \n@Software: PyCharm\n@license: Apache Licence \n@Email : mlcc330@hotmail.com\n@contact: 3323202070@qq.com\n@site: \n@software: PyCharm \n@file: dictComprehension.py\n@time: 18-8-30 下午10:54 \nDescription:\n字典T\n\"\"\"\n\nimport sys\nimport re\nimport collections\n\ndef handleDictComprehension():\n '''\n 字典推导式的例子\n :return:\n '''\n DIAL_CODES = [\n (86, 'china'),\n (91, 'India'),\n (1, 'United States'),\n (62, 'Indonesia'),\n (55, 'Brazil'),\n (92, 'Pakistan'),\n (880, 'Bangladesh'),\n (234, 'Nigeria'),\n (7, 'Russia'),\n (81, 'Japan'),\n ]\n\n country_code = {country: code for code, country in DIAL_CODES}\n print(country_code)\n\n results = {code: str(country).upper() for country, code in DIAL_CODES}\n print(results)\n\n print('DIAL_CODES.__iter__() is : ',DIAL_CODES.__iter__())\n\n pass\n\ndef countWordInFileUseSetdefault():\n \"\"\"\n 使用setdefault处理找不到的键\n :return:\n \"\"\"\n WORD_CONTENT = re.compile(r'\\w+')\n index = {}\n\n with open(sys.argv[1],encoding='utf-8') as fp:\n for line_no, line in enumerate(fp,1):\n # print(line_no,\" : \",line)\n for match in WORD_CONTENT.finditer(line):\n word = match.group()\n # print('word is : ',word)\n column_no = match.start() + 1\n # print('column_no is : ',column_no)\n location = (line_no,column_no)\n\n # 以下这是一种非常不好的实现\n # 提取word出现的情况,如果没有记录就返回空\n # occurrences = index.get(word,[])\n # 把单词新出现的位置添加到列表后面\n # occurrences.append(location)\n # 把新的列表放回到字典种,多了一次查询操作\n # index[word] = occurrences\n\n # 建议使用好的方法替换以上的逻辑\n # 如果单词不存在,把单词和空列表放进映射,然后返回这个空列表,可以不进行二次查找就可更新列表\n index.setdefault(word,[]).append(location)\n\n for word in sorted(index,key=str.upper):\n # sorted函数的key=参数没有调用Str.upper《而是把这个方法的引用传递给Sorted函数,排序时,单词被规范成统一格式\n print(word,index[word])\n\ndef countWordInFileUseDefaultdict():\n WORD_CONTENT = re.compile(r'\\w+')\n # 把List构造方法作为Default_factory来创建一个Defaultdict\n index = collections.defaultdict(list)\n\n with open(sys.argv[1], encoding='utf-8') as fp:\n for line_no, line in enumerate(fp, 1):\n for match in WORD_CONTENT.finditer(line):\n word = match.group()\n # print('word is : ',word)\n column_no = match.start() + 1\n # print('column_no is : ',column_no)\n location = (line_no, column_no)\n # 如果index没有word的记录,那么default_Factory会被调用\n index[word].append(location)\n\n for word in sorted(index, key=str.upper):\n print(word, index[word])\n\ndef main():\n # handleDictComprehension()\n countWordInFileUseSetdefault()\n\nif __name__ == '__main__':\n main()\n pass\n","sub_path":"src/reading/fluentpython/charpt3/dictComprehension.py","file_name":"dictComprehension.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"443900881","text":"import jinja2\nimport pandas as pd\n\nfrom eurocalliopelib import filters\n\nimport util\n\nTEMPLATE = \"\"\"\noverrides:\n {% for scenario in [\"2030_current\", \"2030_neutral\", \"2050_current\", \"2050_neutral\"] %}\n {{ scenario }}:\n group_constraints:\n {% for row_id, row in emissions_targets.iterrows() %}\n systemwide_co2_max_{{ row_id }}:\n locs: {{ per_target_regions[row_id] }}\n cost_max.co2: {{ (row[starting_point] - less_coal[row_id]) * (1 - row[scenario]) * 1e6 * scaling_factors.co2_cost }} # {{ (1 / scaling_factors.co2_cost) | unit(\"tCO2\") }}\n {% endfor %}\n {% endfor %}\n\"\"\"\n\n\ndef generate_emissions_scenarios(path_to_emissions_targets, path_to_regions, path_to_annual_demand, scaling_factors, year, projection_year, path_to_result):\n \"\"\"Generate a file that represents links in Calliope.\"\"\"\n emissions_targets = pd.read_csv(path_to_emissions_targets, header=0)\n regions = pd.read_csv(path_to_regions, header=0, index_col=0, squeeze=True)\n annual_demand = util.read_tdf(path_to_annual_demand).xs(year, level=\"year\")\n per_target_regions = {}\n less_coal = {}\n if projection_year == \"current\":\n starting_point = \"1990_energy_mtCO2eq\"\n elif projection_year in [\"2050\", 2050]:\n starting_point = \"1990_energy_steel_chemical_mtCO2eq\"\n for _idx in emissions_targets.index:\n per_target_regions[_idx] = [\n i for i in regions.index\n if regions.loc[i] in emissions_targets.loc[_idx, \"region\"].split(\",\")\n ]\n try:\n less_coal[_idx] = (\n annual_demand.xs(\n (\"industry_demand\", \"industry\", \"coal\"),\n level=(\"dataset\", \"cat_name\", \"end_use\")\n )\n .droplevel(\"unit\")\n .loc[per_target_regions[_idx]]\n .mul(0.034) # emissions factor MtCO2/0.1TWh\n .sum()\n )\n except KeyError:\n less_coal[_idx] = 0\n env = jinja2.Environment(lstrip_blocks=True, trim_blocks=True)\n env.filters[\"unit\"] = filters.unit\n\n scenarios = env.from_string(TEMPLATE).render(\n emissions_targets=emissions_targets,\n scaling_factors=scaling_factors,\n per_target_regions=per_target_regions,\n starting_point=starting_point,\n less_coal=less_coal\n )\n with open(path_to_result, \"w\") as result_file:\n result_file.write(scenarios)\n\n\nif __name__ == \"__main__\":\n generate_emissions_scenarios(\n path_to_emissions_targets=snakemake.input.emissions_targets,\n path_to_regions=snakemake.input.regions,\n path_to_annual_demand=snakemake.input.annual_demand,\n scaling_factors=snakemake.params.scaling_factors,\n year=snakemake.params.year,\n projection_year=snakemake.params.projection_year,\n path_to_result=snakemake.output[0],\n )\n","sub_path":"src/construct/template_emissions.py","file_name":"template_emissions.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"652703771","text":"from flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\n\ndb = SQLAlchemy()\nma = Marshmallow()\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(50))\n password = db.Column(db.String(80))\n display_name = db.Column(db.String(150))\n review = db.relationship('Feedback', backref='user_detail')\n\n def __init__(self, username, password, display_name):\n self.username = username\n self.password = password\n self.display_name = display_name\n\nclass Places(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), unique=True)\n location = db.Column(db.String(255))\n lat = db.Column(db.Float)\n long = db.Column(db.Float)\n description = db.Column(db.String(255))\n image_path = db.Column(db.String(255), unique=True)\n review = db.relationship('Feedback', backref='place_detail')\n\nclass Image(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n place_id = db.Column(db.Integer, db.ForeignKey('places.id'))\n image_path = db.Column(db.String(255), unique=True)\n content_description = db.Column(db.String(255))\n place_name = db.relationship('Places', backref='image_url')\n\nclass Feedback(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n place_id = db.Column(db.Integer, db.ForeignKey('places.id'))\n rating = db.Column(db.Float)\n desc = db.Column(db.String(255))\n date = db.Column(db.Date)\n name = db.relationship('Places', backref='reviews')\n user = db.relationship('User', backref='reviewer')\n\nclass UserSchema(ma.Schema):\n class Meta:\n fields = ('id', 'username')\n\nclass Wishlist(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n place_id = db.Column(db.Integer, db.ForeignKey('places.id'))\n place_detail = db.relationship('Places', backref='place_detail')\n\nclass PlacesDetail(ma.Schema):\n class Meta:\n fields = ('id', 'name', 'location', 'lat', 'long', 'description', 'image_path', 'links')\n\nclass FeedbackSchema(ma.Schema):\n class Meta:\n fields = ('id', 'user_id', 'place_id', 'rating', 'desc', 'date', 'place_detail', 'user_detail')\n \n place_detail = ma.Nested(PlacesDetail, only=(\"id\", \"name\"))\n user_detail = ma.Nested(UserSchema)\n \nclass WishlistSchema(ma.Schema):\n class Meta:\n fields = ('id', 'user_id', 'place_id', 'place_detail', 'links')\n \n place_detail = ma.Nested(PlacesDetail, only=(\"id\", \"name\", \"image_path\"))\n links = ma.Hyperlinks(\n {\n 'next': ma.URLFor('place', values=dict(id=\"\"))\n }\n )\n\nclass ImageSchema(ma.Schema):\n class Meta:\n fields = ('id', 'place_id', 'image_path', 'content_description')\n\nclass PlacesSchema(ma.Schema):\n class Meta:\n fields = ('id', 'name', 'location', 'lat', 'long', 'description', 'image_path', 'image_url', 'reviews', 'links')\n \n image_url = ma.Nested(ImageSchema, many=True, only=(\"image_path\", \"content_description\"))\n reviews = ma.Nested(FeedbackSchema, many=True, exclude=['place_id', 'user_id',])\n links = ma.Hyperlinks(\n {\n 'next': ma.URLFor('place', values=dict(id=\"\"))\n }\n )","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"478053042","text":"#!/usr/bin/env python\n# -* coding: utf-8 *-\nfrom urllib import urlencode\n\nfrom .handshaker import Handshaker\n\n\nclass OAuth(object):\n RESPONSE_CODE = 'code'\n\n host = None\n authorize_url = None\n access_token = None\n\n def __init__(self, client_id, client_secret=None, redirect_uri=None):\n self.client_id = client_id\n self.client_secret = client_secret\n self.redirect_uri = redirect_uri\n\n def get_login_url(self, scope=None):\n \"\"\"Get actual login url\"\"\"\n params = {\n \"client_id\": self.client_id,\n \"redirect_uri\": self.redirect_uri,\n \"response_type\": OAuth.RESPONSE_CODE,\n }\n\n if scope:\n params.update(scope=' '.join(scope))\n\n return Handshaker.get_login_url(authorize_url=self.authorize_url,\n params=params)\n","sub_path":"paraleech/auth/OAuth.py","file_name":"OAuth.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"539633675","text":"import os\nimport re\nimport functools\nimport itertools\nimport nltk\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom operator import mul\nfrom string import punctuation\nfrom collections import Counter, defaultdict\n\n\nPUNTUACION = re.compile(r'[{}¡¿…«»—]'.format(punctuation))\n\n\ndef _obtener_ubicacion_absoluta(ubicacion):\n directorio_actual = os.getcwd()\n if '..' in ubicacion:\n lista_ubicacion = ubicacion.split('/')\n lista_directorio_actual = directorio_actual.split('/')\n ubicacion_real = '{}'.format('/'.join(lista_directorio_actual[:-lista_ubicacion.count('..')] + lista_ubicacion[lista_ubicacion.count('..'):]))\n elif '.' in ubicacion:\n ubicacion_real = '{}/{}'.format(directorio_actual, ubicacion)\n else:\n ubicacion_real = '{}/{}'.format(directorio_actual, ubicacion)\n return ubicacion_real\n\n##### MÉTODOS DE TF-IDF Y DISTANCIA COSENO DEBERÍAN APLICARSE CON SCIPY O SKLEARN\ndef _similitud_coseno(vector1, vector2):\n similitud = np.dot(vector1, vector2) / (np.linalg.norm(vector1)*np.linalg.norm(vector2))\n return similitud\n\n##### MÉTODOS DE TF-IDF Y DISTANCIA COSENO DEBERÍAN APLICARSE CON SCIPY O SKLEARN\ndef calcular_similitudes(matriz):\n documentos = matriz.columns\n pares = itertools.combinations(documentos, 2)\n similitudes = {'documento_1':[], 'documento_2':[], 'similitud':[]}\n for doc1, doc2 in pares:\n vector1 = matriz[doc1].values\n vector2 = matriz[doc2].values\n similitudes['documento_1'].append(doc1)\n similitudes['documento_2'].append(doc2)\n valor_similitud = _similitud_coseno(vector1, vector2)\n similitudes['similitud'].append(valor_similitud)\n similitudes = pd.DataFrame(similitudes)\n return similitudes\n\n##### MÉTODOS DE TF-IDF Y DISTANCIA COSENO DEBERÍAN APLICARSE CON SCIPY O SKLEARN\ndef calcular_similitud(matriz, documento1, documento2):\n vector1 = matriz[documento1].values\n vector2 = matriz[documento2].values\n similitud = _similitud_coseno(vector1, vector2)\n return similitud\n\n\nclass Documento:\n \n def __init__(self, **kwargs):\n self.texto = None\n self.metadata = dict()\n self.conteo = None\n self.vocabulario = None\n for atributo, valor in kwargs.items():\n setattr(self, atributo, valor)\n \n def _contar_palabras_texto(funcion_lectura_texto):\n def contar_palabras_texto(self, *args):\n if hasattr(self, 'tokenizador_palabras'):\n self.conteo = Counter(self.tokenizador_palabras.tokenize(funcion_lectura_texto(self, *args)))\n else:\n self.conteo = Counter(funcion_lectura_texto(self, *args).split())\n self.vocabulario = list(self.conteo.keys())\n if hasattr(self, 'tokenizador_oraciones'):\n self.oraciones = self.tokenizador_oraciones.tokenize(self.texto)\n else:\n self.oraciones = nltk.tokenize.sent_tokenize(self.texto)\n self.cantidad_types = len(self.vocabulario)\n self.cantidad_tokens = sum(self.conteo.values())\n return contar_palabras_texto\n \n def _extraer_n_gramas_oracion(self, oracion, n, remover_puntuacion = True, minusculas = True):\n inicio = (n-1) * ['']\n final = (n-1) * ['']\n if minusculas:\n oracion = oracion.lower()\n if remover_puntuacion:\n oracion = PUNTUACION.sub('', oracion)\n if hasattr(self, 'tokenizador_palabras'): \n tokens = self.tokenizador_palabras.tokenize(oracion)\n else:\n tokens = oracion.split()\n cadena = inicio + tokens + final\n for indice in range(len(tokens) + n - 1):\n yield tuple(cadena[indice:indice+n])\n\n @_contar_palabras_texto\n def leer_archivo_texto(self, ubicacion, codificacion = 'utf8'):\n with open(ubicacion, encoding = codificacion) as archivo:\n texto = '\\n'.join(archivo.readlines())\n self.texto = texto\n ubicacion_real = _obtener_ubicacion_absoluta(ubicacion)\n self.metadata['archivo de origen'] = ubicacion_real\n return texto\n \n @_contar_palabras_texto\n def leer_variable_texto(self, variable):\n self.texto = variable\n return texto\n \n def calcular_n_gramas(self, n, remover_puntuacion = True, minusculas = True):\n n_gramas = Counter()\n for oracion in self.oraciones:\n n_gramas += Counter(self._extraer_n_gramas_oracion(oracion, n, remover_puntuacion = remover_puntuacion, minusculas = minusculas))\n return n_gramas\n \n def incorporar_metadata(self, **kwargs):\n for campo, valor in kwargs.items():\n self.metadata[campo] = valor\n \n def calcular_zipf(self):\n recuento = pd.Series(self.conteo)\n ranking = (\n recuento.reset_index(name = 'cantidad')\n .rename({'index':'palabra'}, axis = 1)\n .sort_values('cantidad', ascending = False)\n .reset_index(drop = 'true')\n )\n ranking['orden'] = ranking.index + 1\n return ranking\n \n def graficar_zipf(self, log = True, dimension = (10,10), **kwargs):\n datos = self.calcular_zipf()\n plt.figure(figsize = dimension)\n sns.set_style('whitegrid')\n if log:\n plt.xscale('log')\n plt.yscale('log')\n plt.tight_layout()\n sns.lineplot(x='orden', y='cantidad',data=datos, **kwargs)\n plt.show()\n\n \nclass Corpus:\n \n def __init__(self, **kwargs):\n self.documentos = dict()\n self.metadata = pd.DataFrame()\n self.conteo = Counter()\n self.vocabulario = self.conteo.keys()\n self.frecuencia_en_documentos = Counter()\n for atributo, valor in kwargs.items():\n setattr(self, atributo, valor)\n \n def actualizar_metadata(self):\n self.metadata = pd.DataFrame()\n for nombre_documento, documento in self.documentos.items():\n self.metadata = pd.concat([self.metadata, pd.DataFrame(documento.metadata, [0])], sort = True).reset_index(drop = True)\n \n def _actualizar(funcion_lectura_texto):\n def actualizar(self, *args):\n estado_previo = set(self.documentos.items())\n funcion_lectura_texto(self, *args)\n estado_nuevo = set(self.documentos.items())\n diferencia = set(estado_nuevo).difference(set(estado_previo))\n for nombre_documento, valores_documento in diferencia:\n self.conteo += self.documentos[nombre_documento].conteo\n self.metadata = pd.concat([self.metadata, pd.DataFrame(self.documentos[nombre_documento].metadata, [0])], sort = True)\n self.frecuencia_en_documentos += Counter(valores_documento.vocabulario)\n self.vocabulario = self.conteo.keys()\n self.cantidad_tokens = sum(self.conteo.values())\n self.cantidad_types = len(self.vocabulario)\n self.cantidad_documentos = len(self.documentos.items())\n self.metadata = self.metadata.reset_index(drop = True)\n return actualizar\n \n def _graficar(funcion_para_armar_grafico):\n def graficar(self, dimension = (10,10), mostrar = True, ubicacion = False, *args, **kwargs):\n sns.set_style('whitegrid')\n plt.figure(figsize = dimension)\n funcion_para_armar_grafico(self, *args, **kwargs)\n if ubicacion:\n plt.savefig(ubicacion, bbox_inches = 'tight')\n if mostrar:\n plt.show()\n else:\n plt.close()\n return graficar\n \n @_actualizar\n def leer_objeto_documento(self, objeto_documento):\n if 'archivo de origen' in objeto_documento.metadata.keys():\n self.documentos[objeto_documento.metadata['archivo de origen']] = objeto_documento\n else:\n self.documentos['documento_{}'.format(len(self.documentos.keys()))] = objeto_documento\n \n @_actualizar\n def leer_archivo(self, ubicacion, codificacion = 'utf8'):\n doc = Documento()\n if hasattr(self, 'tokenizador'):\n doc.tokenizador_palabras = self.tokenizador_palabras\n doc.leer_archivo_texto(ubicacion, codificacion)\n self.documentos[doc.metadata['archivo de origen']] = doc\n \n @_actualizar\n def leer_lista_de_archivos(self, lista, codificacion = 'utf8'):\n for item in lista:\n doc = Documento()\n if hasattr(self, 'tokenizador_palabras'):\n doc.tokenizador_palabras = self.tokenizador_palabras\n doc.leer_archivo_texto(item, codificacion)\n self.documentos[doc.metadata['archivo de origen']] = doc\n \n def leer_directorio(self, ubicacion, codificacion = 'utf8'):\n contenido = os.listdir(ubicacion)\n lista = ['{}/{}'.format(ubicacion, archivo) for archivo in contenido]\n self.leer_lista_de_archivos(lista)\n \n def consultar_documento(self, documento):\n return self.documentos[documento]\n \n def consultar_datos_documentos(self):\n datos_documentos = pd.DataFrame()\n for documento, datos in self.documentos.items():\n registro = pd.DataFrame(\n {\n 'archivo de origen':self.documentos[documento].metadata['archivo de origen'],\n 'cantidad de types': self.documentos[documento].cantidad_types,\n 'cantidad de tokens': self.documentos[documento].cantidad_tokens\n }, index = [0]\n )\n datos_documentos = pd.concat([datos_documentos, registro], sort = True)\n datos_documentos = datos_documentos.reset_index(drop = True).merge(self.metadata)\n return datos_documentos\n \n # FUNCIÓN PELIGROSA - Sólo sirve con etiquetadores que tengan un método 'tag' y funcionen sobre oraciones tokenizadas\n def etiquetar_corpus(self, etiquetador, nombre_etiqueta = None):\n if not nombre_etiqueta:\n nombre_etiqueta = etiquetador.__repr__\n for documento, datos in self.documentos.items():\n def etiquetar():\n for oracion in datos.oraciones:\n if hasattr(self, 'tokenizador_palabras'):\n tokenizada = self.tokenizador_palabras.tokenize(oracion)\n else:\n tokenizada = oracion.split()\n etiquetada = etiquetador.tag(tokenizada)\n yield etiquetada\n datos_etiquetados = list(etiquetar())\n setattr(datos, str(nombre_etiqueta), datos_etiquetados)\n \n # FUNCIÓN PELIGROSA - Si el tokenizador no es el mismo que el del tagger, los resultados van a dar sí o sí mal\n def parear_etiquetas(self, nombre_etiqueta, prefijo = 'etiqueta_'):\n for documento, datos in self.documentos.items():\n pares = zip(datos.oraciones, getattr(datos, nombre_etiqueta))\n setattr(datos, '{}{}'.format(prefijo, nombre_etiqueta), list(pares))\n \n def contar_etiquetas_por_documento(self, nombre_etiqueta, prefijo = 'conteo_'):\n for documento, datos in self.documentos.items():\n lista_etiquetada = getattr(datos, nombre_etiqueta)\n conteo_documento = Counter()\n for oracion in lista_etiquetada:\n conteo_oracion = Counter([item[1] for item in oracion])\n conteo_documento += conteo_oracion\n setattr(datos, '{}{}'.format(prefijo, nombre_etiqueta), conteo_documento)\n \n def contar_etiquetas(self, nombre_etiqueta, prefijo = 'conteo_'):\n self.contar_etiquetas_por_documento(nombre_etiqueta, prefijo)\n total = Counter()\n for documento, datos in self.documentos.items():\n total += getattr(datos, '{}{}'.format(prefijo, nombre_etiqueta))\n total = pd.Series(total).reset_index(name = 'cantidad').sort_values('cantidad', ascending = False)\n return total\n \n @_graficar\n def graficar_etiquetas_por_frecuencia(self, nombre_etiqueta, prefijo = 'conteo_', **kwargs):\n datos = self.contar_etiquetas(nombre_etiqueta, prefijo)\n sns.barplot(x='cantidad',y='index', data = datos, palette='viridis')\n \n def calcular_n_gramas(self, n, remover_puntuacion = True, minusculas = True):\n total = Counter()\n for documento, datos in self.documentos.items():\n total += datos.calcular_n_gramas(n, remover_puntuacion = True, minusculas = True)\n return total\n \n ##### MÉTODOS DE TF-IDF Y DISTANCIA COSENO DEBERÍAN APLICARSE CON SCIPY O SKLEARN\n def calcular_matriz_termino_documento(self):\n matriz = pd.DataFrame()\n for documento, datos in self.documentos.items():\n cantidades = pd.Series(datos.conteo).T\n cantidades.name = documento\n matriz = matriz.append(cantidades)\n matriz = matriz.T.fillna(0)\n return matriz\n \n ##### MÉTODOS DE TF-IDF Y DISTANCIA COSENO DEBERÍAN APLICARSE CON SCIPY O SKLEARN\n ##### EL CÁLCULO DE TF-IDF ACÁ ES DUDOSO\n def calcular_matriz_tf_idf(self):\n matriz = pd.DataFrame()\n termino_documento = self.calcular_matriz_termino_documento()\n for termino in termino_documento.index:\n idf = np.log(len(self.documentos.items())/self.frecuencia_en_documentos[termino])\n if idf == 0:\n idf = .1\n tfs = termino_documento.loc[termino]\n tfidf = tfs / idf\n tfidf.name = termino\n matriz = matriz.append(tfidf)\n matriz = matriz.fillna(0)\n return matriz\n \n def calcular_zipf(self):\n recuento = pd.Series(self.conteo)\n ranking = (\n recuento.reset_index(name = 'cantidad')\n .rename({'index':'palabra'}, axis = 1)\n .sort_values('cantidad', ascending = False)\n .reset_index(drop = 'true')\n )\n ranking['orden'] = ranking.index + 1\n return ranking\n \n @_graficar\n def graficar_zipf(self, log = True, **kwargs):\n datos = self.calcular_zipf()\n if log:\n plt.xscale('log')\n plt.yscale('log')\n plt.tight_layout()\n sns.lineplot(x='orden', y='cantidad',data=datos, **kwargs)\n \n @_graficar\n def graficar_heaps(self, log = True, **kwargs):\n datos = self.consultar_datos_documentos()\n if log:\n plt.xscale('log')\n plt.yscale('log')\n plt.tight_layout()\n sns.lineplot(x='cantidad de tokens' ,y='cantidad de types', data=datos, **kwargs)\n sns.scatterplot(x='cantidad de tokens' ,y='cantidad de types', data=datos, **kwargs, s = 200)\n \n def seleccionar_subconjunto(self, lista_documentos):\n subconjunto = Corpus()\n for documento in lista_documentos:\n subconjunto.leer_objeto_documento(documento)\n return subconjunto\n \n \nclass NGramas: #### TODA ESTA CLASE ESTÁ MEDIO MOCHA\n \n def __init__(self, corpus, n):\n self.n = n\n setattr(self, '_{}_gramas'.format(n), dict(corpus.calcular_n_gramas(n)))\n setattr(self, '_{}_gramas'.format(n-1), dict(corpus.calcular_n_gramas(n-1)))\n setattr(self, 'probabilidad_{}_gramas'.format(n), sum(getattr(self, '_{}_gramas'.format(n)).values()))\n setattr(self, 'probabilidad_{}_gramas'.format(n-1), sum(getattr(self, '_{}_gramas'.format(n-1)).values()))\n \n \n #### ESTA FUNCIÓN ANDA LISA Y LLANAMENTE MAL. REVISAR. \n def calcular_probabilidad_oracion(self, oracion, tokenizador = None):\n inicio = (self.n-1) * ['']\n final = (self.n-1) * ['']\n oracion = oracion.lower()\n if tokenizador: \n tokens = tokenizador.tokenize(oracion)\n else:\n tokens = oracion.split()\n cadena = inicio + tokens + final\n def probabilidad_ngramas_oracion():\n for indice in range(len(tokens) + self.n):\n if not tuple(cadena[indice:indice+self.n]) in getattr(self, '_{}_gramas'.format(self.n)):\n frecuencia_cadena = 0.01\n else:\n frecuencia_cadena = getattr(self, '_{}_gramas'.format(self.n))[tuple(cadena[indice:indice+self.n])]\n if not tuple(cadena[indice:indice+self.n-1]) in getattr(self, '_{}_gramas'.format(self.n-1)):\n frecuencia_previos = 0.01\n else:\n frecuencia_previos = getattr(self, '_{}_gramas'.format(self.n-1))[tuple(cadena[indice:indice+self.n-1])]\n probabilidad_cadena = frecuencia_cadena/getattr(self, 'probabilidad_{}_gramas'.format(self.n))\n probabilidad_previos = frecuencia_previos/getattr(self, 'probabilidad_{}_gramas'.format(self.n-1))\n yield probabilidad_cadena/probabilidad_previos\n probabilidad_oracion = functools.reduce(mul, probabilidad_ngramas_oracion())\n return probabilidad_oracion\n \n def generar_tupla(self, inicio):\n posibilidades = [(tupla,getattr(self, '_{}_gramas'.format(self.n))[tupla]) for tupla in getattr(self, '_{}_gramas'.format(self.n)) if tuple(tupla[:self.n-1]) == inicio]\n valores_muestreo = np.cumsum([valor[1] for valor in posibilidades])\n pares_muestreo = zip(valores_muestreo, [valor[0] for valor in posibilidades])\n tabla_muestreo = {valor:tupla for valor,tupla in pares_muestreo}\n muestra = np.random.randint(max(tabla_muestreo.keys()))\n if muestra in tabla_muestreo.keys():\n tupla = tabla_muestreo[muestra]\n else:\n resto = [valor for valor in tabla_muestreo.keys() if valor > muestra]\n tupla = tabla_muestreo[min(resto)]\n return tupla[-1]\n \n def generar_texto(self, oracion = None):\n if not oracion:\n oracion = tuple((self.n-1) * [''])\n nueva_palabra = self.generar_tupla(tuple(oracion[-(self.n-1):]))\n while nueva_palabra != '':\n nueva_palabra = self.generar_tupla(tuple(oracion[-(self.n-1):]))\n oracion = list(oracion) + [nueva_palabra]\n oracion_final = ' '.join(oracion[self.n:-1]).capitalize() +'.'\n return oracion_final","sub_path":"ejercicio_final/herramientas_corpora.py","file_name":"herramientas_corpora.py","file_ext":"py","file_size_in_byte":18344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"303001592","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/10/24 11:07\n# @Author : LCH\n# @Site : \n# @File : config.py\n# @Software: PyCharm\n# oralce 连接字段\nuser = \"psmuser\" #用户名\npwd = \"zaq12WSX\" #密码\nhost = \"10.159.3.34\" #主机\n# host = \"localhost\"\nport =\"1521\" #端口\nsid = \"de3db\" #sid\ntable = 'identifyobject' #要查询的表名","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"457625850","text":"import re\r\nimport sys\r\nimport sqlite3\r\nimport codecs\r\n\t\t\r\n#db = Db(sqlite3.connect('dbtest2' + '.db'), Sql())\r\n\t\t\r\n\r\n'''\r\ndb = sqlite3.connect('dbtest1.db')\r\ncur = db.cursor()\r\n\r\n##add data file into database\r\ncategory = \"Sports\"\r\nwith open(\"w2_.txt\", \"r\") as sports:\r\n lines = sports.readlines()\r\n\r\nfor line in lines:\r\n # Split the line on whitespace\r\n data = line.split()\r\n number = data[0]\r\n word1 = data[1]\r\n print data\r\n cur.execute(INSERT INTO users(person_id, category, type)\r\n\t\t\tVALUES(?,?,?), (number, category, word1))\r\n\r\ndb.commit()\r\ndb.close()\t\r\n\t\t\r\ncategory = \"Sports\"\r\nwith open(\"w2_.txt\", \"r\") as sports:\r\n lines = sports.readlines()\r\n\r\nfor line in lines:\r\n # Split the line on whitespace\r\n data = line.split()\r\n number = data[0]\r\n word1 = data[1]\r\nprint \"end\"\t'''\r\n############### CREATE DB ##########################\r\n#db = sqlite3.connect('dbtest16.db')\r\n#cursor = db.cursor()\r\n#cursor.execute('''\r\n # CREATE TABLE word(word1 TEXT,\r\n # word2 TEXT,word3 TEXT, count INTEGER)''')\r\n#cursor.execute('''\r\n# CREATE TABLE param(name TEXT,\r\n# value INTEGER)''')\r\n\r\n\t\t\t\t\t \r\n#db.commit()\r\n#db.close()\r\n#################INSERT DB #########################\r\ndb = sqlite3.connect('dbtest16.db')\r\ncursor = db.cursor()\r\n\r\nimport codecs\r\n\r\n\t\r\nwith open(\"w3.txt\", \"r\") as sports:\r\n lines = sports.readlines()\r\n\t\r\nfor line in lines:\r\n # Split the line on whitespace\r\n data = line.split()\r\n number = data[0]\r\n word1 = data[1]\r\n word2= data[2]\r\n word3 = data[3]\r\n name ='depth'\r\n value = 2\r\n #print word2\r\n cursor.execute('''INSERT INTO word(word1,word2,word3,count)\r\n\t\tVALUES(?,?,?,?)''', (word1,word2,word3,number))\t\r\n\t\t\r\n cursor.execute('''INSERT INTO param(name,value)\r\n\t\tVALUES(?,?)''', (name,value))\t\r\n#print \"end\"\t\r\n \r\ndb.commit()\r\ndb.close()\r\n","sub_path":"db_from_txt.py","file_name":"db_from_txt.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"198832452","text":"\"\"\"\r\nCSCI-603: Graphs\r\nAuthor: Sean Strout @ RIT CS\r\nAuthor: Pavan Prabhakar Bhat (pxb8715@rit.edu)\r\n\r\nAn implementation of a graph data structure as an adjacency list.\r\n\r\nCode taken from the online textbook and modified:\r\n\r\nhttp://interactivepython.org/runestone/static/pythonds/Graphs/Implementation.html\r\n\"\"\"\r\n\r\nimport sys # argv\r\nimport collections\r\nimport os.path\r\nimport math\r\n\r\nmaxScore = 0\r\nscore = 0\r\ncolor = 0\r\ncows = []\r\ncows1 = []\r\ncowColors = []\r\ncowColors1 = []\r\ncurrent = []\r\n\r\nclass Graph:\r\n \"\"\"\r\n A graph implemented as an adjacency list of vertices.\r\n\r\n :slot: vertList (dict): A dictionary that maps a vertex key to a Vertex\r\n object\r\n :slot: numVertices (int): The total number of vertices in the graph\r\n \"\"\"\r\n\r\n __slots__ = 'vertList', 'numVertices', 'srcFile'\r\n\r\n def __init__(self, srcFile):\r\n \"\"\"\r\n Initialize the graph\r\n :return: None\r\n \"\"\"\r\n self.vertList = {}\r\n self.numVertices = 0\r\n self.srcFile = srcFile\r\n\r\n def addVertex(self, key):\r\n \"\"\"\r\n Add a new vertex to the graph.\r\n :param key: The identifier for the vertex (typically a string)\r\n :return: Vertex\r\n \"\"\"\r\n # count this vertex if not already present\r\n if self.getVertex(key) == None:\r\n self.numVertices += 1\r\n vertex = Vertex(key)\r\n self.vertList[key] = vertex\r\n return vertex\r\n\r\n def getVertex(self, key):\r\n \"\"\"\r\n Retrieve the vertex from the graph.\r\n :param key: The vertex identifier\r\n :return: Vertex if it is present, otherwise None\r\n \"\"\"\r\n if key in self.vertList:\r\n return self.vertList[key]\r\n else:\r\n return None\r\n\r\n def __contains__(self, key):\r\n \"\"\"\r\n Returns whether the vertex is in the graph or not. This allows the\r\n user to do:\r\n\r\n key in graph\r\n\r\n :param key: The vertex identifier\r\n :return: True if the vertex is present, and False if not\r\n \"\"\"\r\n return key in self.vertList\r\n\r\n def addEdge(self, src, dest, cost=0):\r\n \"\"\"\r\n Add a new directed edge from a source to a destination of an edge cost.\r\n :param src: The source vertex identifier\r\n :param dest: The destination vertex identifier\r\n :param cost: The edge cost (defaults to 0)\r\n :return: None\r\n \"\"\"\r\n if src not in self.vertList:\r\n self.addVertex(src)\r\n if dest not in self.vertList:\r\n self.addVertex(dest)\r\n self.vertList[src].addNeighbor(self.vertList[dest], cost)\r\n\r\n def getVertices(self):\r\n \"\"\"\r\n Return the collection of vertex identifiers in the graph.\r\n :return: A list of vertex identifiers\r\n \"\"\"\r\n return self.vertList.keys()\r\n\r\n def __iter__(self):\r\n \"\"\"\r\n Return an iterator over the vertices in the graph. This allows the\r\n user to do:\r\n\r\n for vertex in graph:\r\n ...\r\n\r\n :return: A list iterator over Vertex objects\r\n \"\"\"\r\n return iter(self.vertList.values())\r\n\r\n def beginSimulation(self, keys, values, start=\"\"):\r\n \"\"\"\r\n This function is used to begin simulation of the graph.\r\n :param keys: Holds the paint balls used to paint the cows or to trigger different paint balls\r\n :param values: Holds the cows and other colors that can be triggered\r\n :param start: Starting value from which the triggering of a paint ball will begin\r\n :return: Score as to how many cows were painted by a single paint ball\r\n \"\"\"\r\n # global constants\r\n global score, cows, cowColors, maxScore, cows1, cowColors1, current\r\n\r\n for i in range(len(keys)):\r\n if start == keys[i].vertexName and values[i].vertexType == 'cow':\r\n print('\\t', values[i].vertexName, 'is painted', start + '!')\r\n cows.append(values[i].vertexName)\r\n cowColors.append(start)\r\n score += 1\r\n elif start == keys[i].vertexName:\r\n print('\\t', values[i].vertexName, 'paint ball is triggered by', start, 'paint ball')\r\n print(current, values[i].vertexName)\r\n if values[i].vertexName not in current:\r\n self.beginSimulation(keys, values, values[i].vertexName)\r\n return score\r\n\r\nclass Vertex:\r\n \"\"\"\r\n An individual vertex in the graph.\r\n\r\n :slots: id: The identifier for this vertex (user defined, typically\r\n a string)\r\n :slots: connectedTo: A dictionary of adjacent neighbors, where the key is\r\n the neighbor (Vertex), and the value is the edge cost (int)\r\n \"\"\"\r\n\r\n __slots__ = 'id', 'connectedTo'\r\n\r\n def __init__(self, key):\r\n \"\"\"\r\n Initialize a vertex\r\n :param key: The identifier for this vertex\r\n :return: None\r\n \"\"\"\r\n self.id = key\r\n self.connectedTo = {}\r\n\r\n def addNeighbor(self, nbr, weight=0):\r\n \"\"\"\r\n Connect this vertex to a neighbor with a given weight (default is 0).\r\n :param nbr (Vertex): The neighbor vertex\r\n :param weight (int): The edge cost\r\n :return: None\r\n \"\"\"\r\n self.connectedTo[nbr] = weight\r\n\r\n def __str__(self):\r\n \"\"\"\r\n Return a string representation of the vertex and its direct neighbors:\r\n\r\n vertex-id connectedTo [neighbor-1-id, neighbor-2-id, ...]\r\n\r\n :return: The string\r\n \"\"\"\r\n return str(self.id) + ' connectedTo: ' + str([str(x.id) for x in self.connectedTo])\r\n\r\n def getConnections(self):\r\n \"\"\"\r\n Get the neighbor vertices.\r\n :return: A list of Vertex neighbors\r\n \"\"\"\r\n return self.connectedTo.keys()\r\n\r\n def getWeight(self, nbr):\r\n \"\"\"\r\n Get the edge cost to a neighbor.\r\n :param nbr (Vertex): The neighbor vertex\r\n :return: The weight (int)\r\n \"\"\"\r\n return self.connectedTo[nbr]\r\n\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n A main function for the Graph class.\r\n :return: None\r\n \"\"\"\r\n # Checks if the number of arguments entered through the command line are appropriate or exits\r\n if len(sys.argv) != 3:\r\n print('Usage: python3 holicow.py source-file.txt')\r\n sys.exit(0)\r\n\r\n # Holds temporary values of the vertices\r\n tempVertices = []\r\n # Holds the index of the list which is read from the file\r\n count = 0\r\n # Holds the details of the vertices temporarily before inputting the named tuple\r\n list1 = []\r\n # A temporary list that holds the vertices in the form of a named tuple\r\n vertex = collections.namedtuple('Vertices', ['vertexType', 'vertexName', 'x', 'y', 'splatterRadius'])\r\n # Holds the paint balls that are supposed to trigger\r\n keys = []\r\n # Holds the color or the cow which is being triggered by another color held in keys\r\n values = []\r\n # Holds a list of names of cows on the field\r\n listOfCows = []\r\n # Global constants used\r\n global maxScore, score, color, cows, cows1, cowColors, cowColors1, current\r\n # object of class Graph\r\n graph = Graph(sys.argv[2])\r\n try:\r\n # File contains the appropriate file contents\r\n file = open(sys.argv[2])\r\n except IOError:\r\n # Handles the exception if the file is not found\r\n print(\"File Not Found: \", sys.argv[2])\r\n sys.exit(0)\r\n\r\n print('Field of Dreams')\r\n print('---------------')\r\n for l in file:\r\n if l.find(\"paintball\") :\r\n s = l.split()\r\n list1.append(s)\r\n # Contains the list of cows in the field\r\n listOfCows.append(list1[count][1])\r\n # Contains cows\r\n tempVertices.append(vertex(list1[count][0], list1[count][1], float(list1[count][2]), \\\r\n float(list1[count][3]), float(0)))\r\n count += 1\r\n elif l.find(\"cow\"):\r\n s = l.split()\r\n list1.append(s)\r\n # Contains paint balls\r\n tempVertices.append(vertex(list1[count][0], list1[count][1], float(list1[count][2]), \\\r\n float(list1[count][3]), float(list1[count][4])))\r\n count += 1\r\n else:\r\n print('Unknown input')\r\n\r\n for i in range(len(tempVertices)):\r\n if(tempVertices[i].vertexType == 'paintball'):\r\n for j in range(len(tempVertices)):\r\n if ( i is not j):\r\n # Distance formula to calculate the distance from the neighbouring vertices\r\n distance = math.sqrt(((tempVertices[j][2] - tempVertices[i][2]) ** 2) \\\r\n + ((tempVertices[j][3] - tempVertices[i][3]) ** 2))\r\n if(distance <= tempVertices[i][4]):\r\n # adds the edges of the graph\r\n graph.addEdge(tempVertices[i].vertexName, tempVertices[j].vertexName)\r\n # appends the paint balls and the colors and cows triggered by it\r\n keys.append(tempVertices[i])\r\n values.append(tempVertices[j])\r\n # Displays the field through the connections of each vertice\r\n for i in graph:\r\n print(i)\r\n # contains the paint balls to be triggered\r\n paintballs = []\r\n # contains the score of the paint balls\r\n paintballScore = []\r\n a = 0\r\n\r\n print()\r\n print('Beginning simulation...')\r\n for j in range(len(tempVertices)):\r\n score = 0\r\n if(tempVertices[j].vertexType == 'paintball'):\r\n print('Triggering', tempVertices[j].vertexName, 'paint ball...')\r\n current[:] = []\r\n current.append(tempVertices[j].vertexName)\r\n paintballs.append(tempVertices[j].vertexName)\r\n start = tempVertices[j].vertexName\r\n paintballScore.append(graph.beginSimulation(keys, values, start))\r\n # check for the list of cows to be colored\r\n if len(cows) > len(cows1):\r\n cows1 = cows\r\n cowColors1 = cowColors\r\n cows = []\r\n cowColors = []\r\n\r\n\r\n temp = max(paintballScore)\r\n\r\n for i in range(len(paintballScore)):\r\n if temp == paintballScore[i]:\r\n a = i\r\n break\r\n\r\n print()\r\n print('Results:')\r\n if paintballScore[a] == 0:\r\n print(\"No cows were painted by any starting paint ball!\")\r\n else:\r\n print('Triggering the', paintballs[a], 'paint ball is the best choice with', paintballScore[a],\\\r\n 'total paint on the cows:')\r\n cowColor = {}\r\n\r\n # forms a dictionary of cows and their paints\r\n\r\n for i in range(len(cows1)):\r\n if cows1[i] not in cowColor:\r\n cowColor[cows1[i]] = [cowColors1[i]]\r\n elif cowColors1[i] not in cowColor[cows1[i]]:\r\n cowColor[cows1[i]].append(cowColors1[i])\r\n\r\n\r\n for j in cowColor.items():\r\n print('\\t'+j[0]+\"'s colors: {\" + str(j[1])[1: len(str(j[1]))-1] + \"}\")\r\n\r\n for l in listOfCows:\r\n\r\n if l not in [k[0] for k in cowColor.items()]:\r\n print('\\t' + l + \"'s colors: {\" + \"}\")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"holicow.py","file_name":"holicow.py","file_ext":"py","file_size_in_byte":11294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"335411901","text":"# _*_ coding: utf-8 _*_\n\"\"\"This file is a Jobomas spider created on top of the ATSSpider\nscrapy crawl jobomas -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://br.jobomas.com/Emprego-Pais-Brasil\"\nsample url:\n http://br.jobomas.com/Emprego-Pais-Brasil\n\"\"\"\nfrom urlparse import urljoin\nfrom zlib import crc32\nfrom scrapy.conf import settings\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, Prefix, NormalizedJoin, Replace\n\n\nclass Jobomas(ATSSpider):\n\n name = 'jobomas'\n\n # invalid company name ,This value should be removed from company name\n company_remove_string = \"Vagas de emprego relacionadas\"\n\n def __init__(self, *args, **kwargs):\n super(Jobomas, self).__init__(*args, **kwargs)\n # due to blocking\n settings.overrides['USER_AGENT'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0'\n self.location_xpath = [\n \"//*[@class='caja_interior_detalle' or @id='datos-vacante']//*[contains(text(),'%s')]/following-sibling::td//text()\" % unicode('Cidade/Bairro obrigatório:', 'utf-8'),\n \"//*[@class='caja_interior_detalle' or @id='datos-vacante']//*[contains(text(), '%s')]/following-sibling::td[1]//text()\" % unicode('Região/Estado:', 'utf-8'),\n \"//*[@class='caja_interior_detalle' or @id='datos-vacante']//*[contains(text(),'%s')]/following-sibling::td//text()\" % unicode('País:', 'utf-8')\n ]\n self.company_xpath = \"//*[contains(text(), '%s')]/following-sibling::td//text()\" % unicode('Empresa:', 'utf-8')\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\n \"//div[@class='cont_listado_vacantes']/div\"\n )\n for job in jobs:\n job_link = job.xpath(\".//p[@class='tituloListado']/a/@href\").extract()\n if job_link:\n job_url = urljoin(response.url, job_link[0])\n meta = {\n 'title': job.xpath(\".//p[@class='tituloListado']/a/text()\").extract(),\n }\n yield Request(\n job_url, meta=meta, callback=self.parse_job_callback()\n )\n\n next_page = sel.xpath(\n \"//div[@class='paginate']/span[contains(@class, 'cur-page')]/following-sibling::a[1]/@href\"\n ).extract()\n if next_page:\n next_url = urljoin(response.url, next_page[0])\n yield Request(next_url, callback=self.parse)\n\n def parse_job(self, response):\n description_xpaths = [\n \"//div[@itemprop='description']/node()\",\n \"//tr/td/p[@itemprop='description']\",\n \"//div/p[@itemprop='description']\",\n ]\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('apply_url', response.url)\n loader.add_value('title', response.meta['title'])\n loader.add_xpath('location', self.location_xpath, NormalizedJoin(\", \"))\n for description_xpath in description_xpaths:\n loader.add_xpath(\n 'description',\n description_xpath\n )\n if loader.get_output_value('description'):\n break\n loader.add_xpath(\n 'referencenumber',\n str(crc32(response.url)),\n Prefix(self.name+\"-\")\n )\n loader.add_xpath(\n 'company', self.company_xpath,\n Replace(self.company_remove_string, \"\")\n )\n loader.add_xpath(\n 'date',\n \"//tr/*[contains(text(), '%s')]/following-sibling::td[1]//text()\" % unicode('Data de publicação:', 'utf-8'),\n ConvertDateString('%m/%d/%Y')\n )\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/jobomas.py","file_name":"jobomas.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"553998123","text":"#!/usr/bin/env python2\n\n# $Date$\n# $Revision$\n# $Author$\n# $HeadURL$\n# $Id$\n\n\nimport sys, re, os\n\nfrom subprocess import Popen, PIPE\n\nclass SVN(object):\n\n cmd_status = 'svn status %s %' # options, files\n cmd_diff = 'svn diff %s %s' # options, files\n\n def __init__(self):\n\n tokens = {\n 'A' : 'Green',\n 'M' : 'Orange',\n '\\?' : 'Purple',\n 'D' : 'Red',\n 'I' : 'Teal',\n 'C' : 'Blue',\n }\n self.replacements = {} \n for key in tokens.keys():\n colour = tokens[key]\n p = re.compile('^(%s.*)$'%key)\n #print p.pattern\n self.replacements[p] = colour\n\n\n process = Popen(cmd,shell=True,stdout=PIPE)\n #prettyPrint(process)\n \n while True:\n line = process.stdout.readline()\n if not line:\n break\n line = line.rstrip('\\n')\n line = line.rstrip('\\r')\n skip = False\n for p in replacements.keys():\n m = p.match(line)\n if m:\n if args.ignore and line[0]=='A':\n skip = True\n break\n line = '%s%s%s'%(mycolours[replacements[p]],m.group(1),mycolours['Off'])\n break\n if skip:\n continue\n output.write('%s%s'%(line,cr))\n\n del process\n\n if args.html:\n output.write('\\n')\n output.close()\n \n return\n\nif __name__ == '__main__': main()\n","sub_path":"svn.py","file_name":"svn.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"417555544","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom django.views import View\nfrom django.views.decorators.csrf import csrf_exempt\nimport sys\nfrom .base import APIView, Resp404\n\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse, Http404, HttpResponseBadRequest\n# from celery_tasks import run, run_cmd, terraform_plan\nimport json\n\nfrom rest_framework.views import APIView as drf_APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom ..models import Stack, Template\nfrom ..consts import StackStatus\n# from ..engine.terraform import terraform_show\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\n@method_decorator(require_http_methods(['POST', 'GET']), name='dispatch')\nclass TestAPIView(APIView):\n\n def get(self, request, *args, **kwargs):\n return JsonResponse(\n {\"hello\": \"world\"}\n )\n\n def post(self, request, *args, **kwargs):\n data = request.body.decode() if request.body else '{}'\n data = json.loads(data) or {}\n uuid = data.get('uuid', None)\n stack = Stack.objects.get(owner=request.user, uuid=uuid)\n\n if not stack:\n return Resp404('Stack %s is not found.' % uuid)\n\n rc = ''\n try:\n task = terraform_plan(stack)\n print(task.id)\n print(task.status)\n print(task.result)\n\n except FileNotFoundError as err:\n log.error(str(err))\n return JsonResponse(\n {\"error\": \"Stack work folder is not found.\",\n \"msg\": \"You probably removed the stack working folder by mistake. \"\n \"Please set stack status to '%s' and ran again.\" % StackStatus.get_text(StackStatus.NEW)},\n status=500,\n # reason='Stack work folder is not found.'\n )\n except Exception as err:\n log.exception(err)\n return JsonResponse(\n {\"error\": str(err), \"msg\": rc},\n status=500,\n # reason=str(err)\n )\n\n return JsonResponse({\n \"task\": task.id,\n \"result\": task.result,\n \"status\": task.status\n })\n\n\n# @method_decorator(csrf_exempt, name='dispatch')\n# @method_decorator(require_http_methods(['POST', 'GET']), name='dispatch')\nclass Test1APIView(drf_APIView):\n permission_classes = (IsAuthenticated, )\n def get(self, request, format=None):\n return Response(\n {\"hello\": \"world\"}\n )\n","sub_path":"cpsapi/views/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"445790999","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport jsontableschema\nfrom tabulator import Stream\nfrom jsontableschema import Schema, validate\nfrom ..register import preset\nfrom ..spec import spec\n\n\n# Module API\n\n@preset('table')\ndef table(source, schema=None, **options):\n errors = []\n tables = []\n\n # Prepare schema\n if schema is not None:\n descriptor = schema\n try:\n # https://github.com/frictionlessdata/jsontableschema-py/issues/113\n from jsontableschema.helpers import load_json_source\n loaded_descriptor = load_json_source(schema)\n validate(loaded_descriptor, no_fail_fast=True)\n schema = Schema(loaded_descriptor)\n except jsontableschema.exceptions.MultipleInvalid as exception:\n for error in exception.errors:\n # Error message should contain schema source (often it's path)\n message = spec['errors']['jsontableschema-error']['message']\n message = message.format(\n error_message='{problem} [{source}]'.format(\n problem=str(error).splitlines()[0],\n source=str(descriptor)))\n errors.append({\n 'code': 'jsontableschema-error',\n 'message': message,\n 'row-number': None,\n 'column-number': None,\n })\n\n # Add table\n if not errors:\n options.setdefault('headers', 1)\n tables.append({\n 'source': str(source),\n 'stream': Stream(source, **options),\n 'schema': schema,\n 'extra': {},\n })\n\n return errors, tables\n","sub_path":"goodtables/presets/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"204161410","text":"import sys\nimport matplotlib\n#matplotlib.use('Agg')\n#matplotlib.use('macosx')\n#matplotlib.use('Qt4Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.cm as cm\nfrom colorsys import hls_to_rgb\n\n#https://stackoverflow.com/questions/37299142/how-to-set-a-colormap-which-can-give-me-over-20-distinct-colors-in-matplotlib\n# User Neocortex\ndef get_distinct_colors(n):\n colors = []\n for i in np.arange(0., 360., 360. / n):\n h = i / 360.\n l = (50 + np.random.rand() * 10) / 100.\n s = (90 + np.random.rand() * 10) / 100.\n colors.append(hls_to_rgb(h, l, s))\n return colors\n\nmy_cols = get_distinct_colors(17)\nprint(my_cols)\n\n'''\n The data you are drawing from is stacked in set Pe_B planes. So,\n rows 1-11 (that's 11 rows) are all binary values which indicate\n phase separation ( 0 = no, 1 = yes) at Pe_B = 0. Thus, rows \n 12-22 are for Pe_B = 10 etc. etc. until Pe_B = 150 (16 total\n planes).\n \n Ultimately, Mathematica was better at this, check out \n \"exp_theory_overlay.nb\"\n'''\n\n#file = str(sys.argv[1])\nfile = \"/Users/kolbt/Desktop/phase_3D.txt\"\ndata = np.loadtxt(file, dtype=np.int8)\n# start with 1D array\npeb = np.zeros((16), dtype=np.ndarray)\nvar = 0\nfor i in range(0,16):\n peb[i] = data[var:var+11, :16]\n var += 11\n\n# for loop the fuck outta this\nphase_3D = np.zeros((16,16,11), dtype=np.int8)\nfor j in range(0,16):\n r=0\n c=0\n for i in range(0,len(data)):\n phase_3D[c][j][r] = data[i][j]\n r += 1\n if r == 11:\n r = 0\n c += 1\n\n# add together set points over pb (min = 0, max = 16)\nplane_heatmap = np.zeros((16, 11), dtype = np.int)\nfor iii in range(0,16):\n for jjj in range(0,11):\n for lll in range(0,16):\n plane_heatmap[iii][jjj] += phase_3D[iii][lll][jjj]\n\nprint(plane_heatmap)\n\nplt.imshow(plane_heatmap.T, origin='lower', cmap='gnuplot_r', vmin=0, vmax=16)\nplt.colorbar()\nplt.show()\n\n## start plotting the data as voxels\n#fig = plt.figure()\n#ax = fig.add_subplot(111, projection='3d')\n#\n#for idx in range(0,16):\n# for idy in range(0,16):\n# for idz in range(0,11):\n# if phase_3D[idx,idy,idz] != 0:\n# ax.scatter(idx, idy, idz, c=my_cols[idz], s=40, alpha=0.8, depthshade=True)\n\n","sub_path":"phase_diagrammer/plane_heatmap.py","file_name":"plane_heatmap.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"360572862","text":"# -*- coding: utf-8 -*-\n\"\"\"\nEdit Toolbar middleware\n\"\"\"\nfrom cms.cms_toolbar import CMSToolbar\nfrom django import template\nfrom django.core.urlresolvers import reverse, NoReverseMatch\nfrom django.http import HttpResponse\nfrom django.template.context import RequestContext\nfrom django.template.loader import render_to_string\nimport re\nimport warnings\n\nHTML_TYPES = ('text/html', 'application/xhtml+xml')\n\ntry:\n ADMIN_BASE = reverse(\"admin:index\")\nexcept NoReverseMatch:\n ADMIN_BASE = None\n\nBODY_RE = re.compile(r'', re.IGNORECASE)\nBACKWARDS_COMPAT_TEMPLATE = template.Template(\n \"{% load cms_tags %}{{ pre|safe }}{% cms_toolbar %}{{ post|safe }}\"\n)\n\ndef toolbar_plugin_processor(instance, placeholder, rendered_content, original_context):\n data = {\n 'instance': instance,\n 'rendered_content': rendered_content\n }\n return render_to_string('cms/toolbar/placeholder_wrapper.html', data)\n\ndef _patch(data, request):\n match = BODY_RE.search(data)\n if not match:\n return data\n warnings.warn(\"You have to use the {% cms_toolbar %} tag in your templates \"\n \"if you use the cms.middleware.toolbar.ToolbarMiddleware.\",\n DeprecationWarning)\n end = match.end()\n ctx = RequestContext(request)\n ctx['pre'] = data[:end]\n ctx['post'] = data[end:]\n return BACKWARDS_COMPAT_TEMPLATE.render(ctx)\n\nclass ToolbarMiddleware(object):\n \"\"\"\n Middleware to set up CMS Toolbar.\n \"\"\"\n\n def should_show_toolbar(self, request):\n \"\"\"\n Check if we should show the toolbar for this request or not.\n \"\"\"\n if ADMIN_BASE and request.path.startswith(ADMIN_BASE):\n return False\n # check session\n if request.session.get('cms_edit', False):\n return True\n # check GET\n if 'edit' in request.GET:\n request.session['cms_edit'] = True\n return True\n return False\n\n def process_request(self, request):\n \"\"\"\n If we should show the toolbar for this request, put it on\n request.toolbar. Then call the request_hook on the toolbar.\n \"\"\"\n if self.should_show_toolbar(request):\n request.toolbar = CMSToolbar()\n response = request.toolbar.request_hook(request)\n if isinstance(response, HttpResponse):\n return response\n \n def process_response(self, request, response):\n \"\"\"\n For backwards compatibility, will be removed in 2.3\n \"\"\"\n \n if not getattr(request, 'toolbar', False):\n return response\n if getattr(request, '_cms_toolbar_tag_used', False):\n return response\n if not response['Content-Type'].startswith(HTML_TYPES):\n return response\n response.content = _patch(response.content, request)\n return response","sub_path":"cms/middleware/toolbar.py","file_name":"toolbar.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"466414484","text":"import numpy as np\nimport cv2\nimport pickle\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import svm\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.multiclass import OneVsOneClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import GaussianNB\n\n\nwith open(\"a_hue\",\"r\") as f:\n\tl0 = pickle.load(f)\n\nwith open(\"b_hue\",\"r\") as f:\n\tl1 = pickle.load(f)\n\nwith open(\"c_hue\",\"r\") as f:\n\tl2 = pickle.load(f)\n\nwith open(\"d_hue\",\"r\") as f:\n\tl3 = pickle.load(f)\n\nwith open(\"e_hue\",\"r\") as f:\n\tl4 = pickle.load(f)\n\nwith open(\"f_hue\",\"r\") as f:\n\tl5 = pickle.load(f)\n\nwith open(\"i_hue\",\"r\") as f:\n\tl6 = pickle.load(f)\n\nwith open(\"k_hue\",\"r\") as f:\n\tl7 = pickle.load(f)\n\nwith open(\"l_hue\",\"r\") as f:\n\tl8 = pickle.load(f)\n\nwith open(\"n_hue\",\"r\") as f:\n\tl9 = pickle.load(f)\n\nwith open(\"r_hue\",\"r\") as f:\n\tl10 = pickle.load(f)\n\nwith open(\"s_hue\",\"r\") as f:\n\tl11 = pickle.load(f)\n\nwith open(\"t_hue\",\"r\") as f:\n\tl12 = pickle.load(f)\n\nwith open(\"u_hue\",\"r\") as f:\n\tl13 = pickle.load(f)\n\nwith open(\"v_hue\",\"r\") as f:\n\tl14 = pickle.load(f)\n\nwith open(\"w_hue\",\"r\") as f:\n\tl15 = pickle.load(f)\n\nwith open(\"x_hue\",\"r\") as f:\n\tl16 = pickle.load(f)\n\nwith open(\"y_hue\",\"r\") as f:\n\tl17 = pickle.load(f)\n\nX = np.concatenate((l0, l1), axis=0)\nX = np.concatenate((X, l2), axis=0)\nX = np.concatenate((X, l3), axis=0)\nX = np.concatenate((X, l4), axis=0)\nX = np.concatenate((X, l5), axis=0)\nX = np.concatenate((X, l6), axis=0)\nX = np.concatenate((X, l7), axis=0)\nX = np.concatenate((X, l8), axis=0)\nX = np.concatenate((X, l9), axis=0)\nX = np.concatenate((X, l10), axis=0)\nX = np.concatenate((X, l11), axis=0)\nX = np.concatenate((X, l12), axis=0)\nX = np.concatenate((X, l13), axis=0)\nX = np.concatenate((X, l14), axis=0)\nX = np.concatenate((X, l15), axis=0)\nX = np.concatenate((X, l16), axis=0)\nX = np.concatenate((X, l17), axis=0)\n\n\nprint(len(X))\n\n\n\nscalar = StandardScaler()\nx=scalar.fit(X)\n\nX= scalar.transform(X)\n\n\na = np.zeros((1,3000))\n\nb= np.ones((1,3000))\n\nc = np.full((1,3000), 2)\n\nd = np.full((1,3000), 3)\n\ne = np.full((1,3000), 4)\n\nf = np.full((1,3000), 5)\n\ni = np.full((1,3000), 6)\n\nk = np.full((1,3000), 7)\n\nl = np.full((1,3000), 8)\n\nn= np.full((1,3000), 9)\n\nr = np.full((1,3000), 10)\n\ns = np.full((1,3000), 11)\n\nt = np.full((1,3000), 12)\n\nu = np.full((1,3000), 13)\n\nv = np.full((1,3000), 14)\n\nw = np.full((1,3000), 15)\n\nx = np.full((1,3000), 16)\n\ny = np.full((1,3000), 17)\n\n\n\nY = np.append(a, b)\nY = np.append(Y, c)\nY = np.append(Y, d)\nY = np.append(Y, e)\nY = np.append(Y, f)\nY = np.append(Y, i)\nY = np.append(Y, k)\nY = np.append(Y, l)\nY = np.append(Y, n)\nY = np.append(Y, r)\nY = np.append(Y, s)\nY = np.append(Y, t)\nY = np.append(Y, u)\nY = np.append(Y, v)\nY = np.append(Y, w)\nY = np.append(Y, x)\nY = np.append(Y, y)\n\n\nprint(len(Y))\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)\n\ngnb = GaussianNB()\n\nmodel = gnb.fit(X_train, y_train)\n\ntuple_objects = (scalar,model)\npickle.dump(tuple_objects,open(\"tuple.pkl\",'wb'))\n\n\n\n\ne = X_test[0]\ny_predict = model.predict(X_test)\nacc = accuracy_score(y_test, y_predict)\nprint(\"accuracy=\")\nprint(acc)\n\nprint(\"printing l\")\n\nmat = confusion_matrix(y_test, y_predict, labels=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17])\nprint(mat)\n\nprint(model.predict([X_test[0]]))\nprint(y_test[0])\n\n\n\n\n\n\n","sub_path":"CNN_LOGISTIC_SVM_NAIVE/naive.py","file_name":"naive.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"548266073","text":"#!/usr/bin/env python\n\nimport os\nfrom pathlib import Path\n\nimport pydub\nimport librosa.display\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef cqt(wav_file):\n # make CQT and save\n plt.figure(figsize=(7.5, 3.75))\n y, sr = librosa.load(wav_file)\n C = librosa.cqt(y, sr=sr)\n librosa.display.specshow(librosa.amplitude_to_db(C, ref=np.max),\n sr=sr)\n plt.axis('off')\n plt.savefig(wav_file.replace(\".wav\", \".png\"), bbox_inches=\"tight\")\n plt.close('all')\n\ndef set_file_to_1channel_wav( filename):\n if filename is None:\n return None\n my_sound = None\n if filename.endswith('.mp3'):\n my_sound = pydub.AudioSegment.from_mp3(filename)\n elif filename.endswith('wav'):\n my_sound = pydub.AudioSegment.from_wav(filename)\n my_sound = my_sound.set_channels(1)\n return my_sound\n\ndef split_wav(mp3_file, target_dir, start_miliseconds, end_miliseconds, i, newAudio): #ms\n song_name = os.path.split(mp3_file)[-1][:-4]\n newAudio_length_miliseconds = newAudio.duration_seconds * 1000\n newAudio2 = newAudio[start_miliseconds : min(newAudio_length_miliseconds, end_miliseconds)]\n save_name = os.path.join(target_dir, song_name + f\"_{i}.wav\")\n newAudio2.export(save_name, format=\"wav\")\n cqt(save_name)\n\ndef split_midi(mid_file, target_dir, default_tempo=500000, target_segment_len=1.0):\n\n import mido\n from mido import MidiFile, MidiTrack, Message, MetaMessage\n song_name = os.path.split(mid_file)[-1][:-4]\n mid = MidiFile(mid_file)\n\n # identify the meta messages\n metas = []\n tempo = default_tempo\n for msg in mid:\n if msg.type is 'set_tempo':\n tempo = msg.tempo\n if msg.is_meta:\n metas.append(msg)\n for meta in metas:\n meta.time = int(mido.second2tick(meta.time, mid.ticks_per_beat, tempo))\n\n target = MidiFile()\n track = MidiTrack()\n track.extend(metas)\n target.tracks.append(track)\n prefix = 0\n time_elapsed = 0\n absolute_time = 0\n for msg in mid:\n # Skip non-note related messages\n if msg.is_meta:\n continue\n time_elapsed += msg.time\n if msg.type is not 'end_of_track':\n msg.time = int(mido.second2tick(msg.time, mid.ticks_per_beat, tempo))\n track.append(msg)\n #print(f'{msg} time1: {time_elapsed}')\n if msg.type is 'end_of_track' or time_elapsed >= target_segment_len:\n\n track.append(MetaMessage('end_of_track'))\n Path(os.path.join(target_dir + '_mid')).mkdir(parents=True, exist_ok=True)\n target.save(os.path.join(target_dir + '_mid', song_name + f'_{prefix}.mid'))\n # print(f\"END OF A TRACK {absolute_time} {(absolute_time + time_elapsed)}\")\n newAudio = set_file_to_1channel_wav(mid_file.replace(\".mid\", \".mp3\"))\n\n Path(os.path.join(target_dir + '_wav')).mkdir(parents=True, exist_ok=True)\n split_wav(mid_file.replace(\".mid\", \".mp3\"),\n target_dir + '_wav', absolute_time * 1000, (absolute_time + time_elapsed) * 1000, prefix, newAudio)\n absolute_time += time_elapsed\n\n target = MidiFile()\n track = MidiTrack()\n track.extend(metas)\n target.tracks.append(track)\n time_elapsed = 0\n prefix += 1\n\ndef main():\n target = \"train/train_quad\"\n directory = f'Magisterka/www.audiolabs-erlangen.de/content/resources/MIR/SMD/02-midi/data'\n for filename in os.listdir(directory):\n if filename.endswith(\".mid\"):\n midfile = os.path.join(directory, filename)\n print(midfile)\n split_midi(midfile, target, target_segment_len=0.250)\n\n# main()\n","sub_path":"playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"37335782","text":"#-----------------------------------------------------------------------------------------------------------------------\r\n#introdução a Programação de Computadores\r\n#Prof. Jucimar Jr.\r\n#Adham Lucas da Silva Oliveira 1715310001\r\n#Erik Atilio Silva Rey 1715310059\r\n#Enrique Leão Barbosa Izel 1715310048\r\n#Guilherme Silva de Oliveira 1715310034\r\n#Lukas Michel Souza Mota 1715310018\r\n#Ulisses Antonio Antonino da Costa 1515090555\r\n#\r\n# Faça um programa que receba dois números,\r\n# clacule e mostre a subtração do primeiro pelo segundo\r\n# ----------------------------------------------------------\r\n\r\nnumber_1 = int(input(\"Digite o valor de a:\"))\r\nnumber_2 = int(input(\"Digite o valor de b:\"))\r\n\r\nprint(\"a - b = %d\" % (number_1 - number_2))\r\n","sub_path":"lista1.5/lista1.5_questao01.py","file_name":"lista1.5_questao01.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"92544082","text":"'''\n 가지고온 이미지들을 cv2\n cv2.imread 하게되면..numpy 배열로....\n numpy 나온걸 이웃알고리즘....\n cv2.imread predict 하게되면... 어떤...\n'''\nimport numpy as np\n\nimport cv2\nGREEN = 0\nBLUE = 1\nRED = 2\n\ndef resized20(img):\n resized = cv2.resize(img,(20,20))\n return resized.reshape(-1,400).astype(np.float32)\n\ndef getcolors(img, color):\n other_1 = (color + 1) % 3\n other_2 = (color + 2) % 3\n # 불리언 인덱싱\n indexes = img[:, :, other_1] == 255\n img[indexes] = [0, 0, 0]\n # cv2.imshow('image', img)\n # cv2.waitKey(0)\n indexes = img[:, :, other_2] == 255\n img[indexes] = [0, 0, 0]\n # cv2.imshow('image', img)\n # cv2.waitKey(0)\n indexes = img[:, :, color] < 170\n img[indexes] = [0, 0, 0]\n # cv2.imshow('image', img)\n # cv2.waitKey(0)\n indexes = img[:, :, color] != 0\n img[indexes] = [255, 255, 255]\n # cv2.imshow('image', img)\n # cv2.waitKey(0)\n return img\n\ndef extract_chars(img):\n chars = []\n colors = [BLUE,GREEN,RED]\n for color in colors:\n imgs = getcolors(img.copy(),color)\n gray_imgs = cv2.cvtColor(imgs,cv2.COLOR_BGR2GRAY) #흑백처리\n ret, thre_imgs = cv2.threshold(gray_imgs,127,255,cv2.THRESH_BINARY) #쓰레스홀드.. 한계점.. 127이상 255 127미만 0처리\n # 외곽선을 찾아라\n contours, _ = cv2.findContours(thre_imgs,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n for contour in contours:\n #cv2.drawContours(gray_imgs, contour,0,(0,0,255),2)\n # cv2.imshow('text',gray_imgs)\n # cv2.waitKey(0)\n area = cv2.contourArea(contour)\n #cv2.contourArea() 외곽선이 감싸는 영역의 면적을 반환합니다.\n # print('area',area)\n if area > 50:\n x,y,width,height = cv2.boundingRect(contour)\n roi = gray_imgs[y:y+height, x:x+width]\n chars.append((x,roi))\n # cv2.imshow('roi',roi)\n # cv2.waitKey(0)\n\n chars = sorted(chars, key = lambda char:char[0])\n return chars\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"0712img/0712Image/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"81107813","text":"#!/usr/bin/python\nfrom os import listdir\nfrom os.path import isfile, join\n\nimgpath = './img/'\n\nimgs = [f for f in listdir(imgpath) if isfile(join(imgpath, f))]\n\n# imgs = ['4871539221530_.pic.jpg', '4881539221531_.pic.jpg']\n\nfor img in imgs:\n print('\"hi\"\\n' % img)\n\nprint('\"hi\"\\n')\n","sub_path":"alpaca.py","file_name":"alpaca.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"414753512","text":"#!usr/bin/env python\n\n# Kyle Lueptow\n# Summer 2013\n\n################\n# ROS IMPORTS: #\n################\nimport roslib\nroslib.load_manifest('nxr_baxter')\nimport rospy\nfrom std_msgs.msg import UInt16\n\n####################\n# RETHINK IMPORTS: #\n####################\nimport baxter_interface\n\n##################\n# OTHER IMPORTS: #\n##################\nimport math\nimport Queue\nimport threading\nimport os\nimport traceback\n\n\nclass PanHead():\n \"\"\"\n PanHead class controls how Baxter \"watches\" people as they walk by\n \"\"\"\n def __init__(self):\n self.head = baxter_interface.Head()\n \n def pan(self, headx, headz, timeout=15.0):\n \"\"\"\n Calculates the angle at which Baxter's head should look\n using the x and z coordinates of the person's head\n \"\"\"\n theta = math.atan(headx / headz)\n self.head.set_pan(theta, 50, 5)\n \n \nclass PickandPlace():\n \"\"\"\n PickandPlace class controls Baxter's idle pick and place operations,\n as he moves two objects regularly through three different positions.\n \"\"\"\n def __init__(self):\n left = baxter_interface.Gripper('left')\n left.reboot()\n self.left_arm = baxter_interface.Limb('left')\n self.pub_rate = rospy.Publisher('/robot/joint_state_publish_rate', UInt16)\n self.pub_rate.publish(500)\n \"\"\"\n The following 6 dictionaries are positions for Baxter's left arm built such that\n each will put Baxter's arm over (for overposition dictionaries) one of the three\n bowls, or into (for inposition dictionaries) one of the bowls.\n \"\"\"\n self.overposition1 = dict(zip()) #We need to write all of these dictionaries.\n self.overposition2 = dict(zip())\n self.overposition3 = dict(zip())\n self.inposition1 = dict(zip())\n self.inposition2 = dict(zip())\n self.inposition3 = dict(zip())\n\n def idle(self, emptybowl, queue, timeout=15.0):\n \"\"\"\n Decides which bowl is empty to determine where it wil be picking\n and placing from and to.\n \"\"\"\n \n if emptybowl == 1:\n emptybowl = pickplace(3)\n elif emptybowl == 2:\n emptybowl = pickplace(1)\n elif emptybowl == 3:\n emptybowl = pickplace(2)\n else:\n emptybowl = pickplace(1)\n \n queue.put(None)\n return emptybowl\n \n def pickplace(self, pick):\n\n \"\"\"\n This checks which bowl is empty, and then assigns the pre-defined dictionaries\n to the two sets of two positions for positioning over the bowls, and then\n moving the grippers into the bowls.\n \"\"\"\n if pick == 1:\n overpickpos = self.overposition1\n overplacepos = self.overposition2\n pickpos = self.inposition1\n placepos = self.inposition2\n elif pick == 2:\n overpickpos = self.overposition2\n overplacepos = self.overposition3\n pickpos = self.inposition2\n placepos = self.inposition3\n elif pick == 3:\n overpickpos = self.overposition3\n overplacepos = self.overposition1\n pickpos = self.inposition3\n placepos = self.inposition1\n \n \"\"\"\n The following defines the order in which Baxter moves to positions to pick up\n the object, then place it in the next bowl. The function then returns the value\n of the now-empty bowl.\n \"\"\"\n\n self.left_arm.move_to_joint_positions(self.overpickpos)\n self.left_arm.move_to_joint_positions(self.pickpos)\n left.close()\n self.left_arm.move_to_joint_positions(self.overpickpos)\n self.left_arm.move_to_joint_positions(self.overplacepos)\n self.left_arm.move_to_joint_positions(self.placepos)\n left.open()\n self.left_arm.move_to_joint_positions(self.overplacepos)\n \n return pick","sub_path":"src/idle.py","file_name":"idle.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"573131745","text":"\"\"\"\r\nPeak Power Detect using API for RSA306\r\nAuthor: Morgan Allison\r\nDate created: 6/24/15\r\nDate edited: 11/18/15\r\nWindows 7 64-bit\r\nPython 2.7.9 64-bit (Anaconda 3.7.0)\r\nNumPy 1.8.1, MatPlotLib 1.3.1\r\nTo get Anaconda: http://continuum.io/downloads\r\nAnaconda includes NumPy and MatPlotLib\r\n\"\"\"\r\n\r\nfrom ctypes import *\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\n\"\"\"\r\n################################################################\r\nC:\\Tektronix\\RSA306 API\\lib\\x64 needs to be added to the\r\nPATH system environment variable\r\n################################################################\r\n\"\"\"\r\nos.chdir(\"E:/项目/洪(私)/pro/RSA_API/lib/x64\")\r\nrsa300 = WinDLL(\"RSA_API.dll\")\r\n\r\n#create Spectrum_Settings data structure\r\nclass Spectrum_Settings(Structure):\r\n _fields_ = [('span', c_double),\r\n ('rbw', c_double),\r\n ('enableVBW', c_bool),\r\n ('vbw', c_double),\r\n ('traceLength', c_int),\r\n ('window', c_int),\r\n ('verticalUnit', c_int),\r\n ('actualStartFreq', c_double),\r\n ('actualStopFreq', c_double),\r\n ('actualFreqStepSize', c_double),\r\n ('actualRBW', c_double),\r\n ('actualVBW', c_double),\r\n ('actualNumIQSamples', c_double)]\r\n\r\n#initialize variables\r\nspecSet = Spectrum_Settings()\r\nlongArray = c_long*10\r\ndeviceIDs = longArray()\r\ndeviceSerial = c_wchar_p('')\r\nnumFound = c_int(0)\r\nenable = c_bool(True) #spectrum enable\r\ncf = c_double(20e6) #center freq\r\nrefLevel = c_double(0) #ref level\r\nready = c_bool(False) #ready\r\ntimeoutMsec = c_int(500) #timeout\r\ntrace = c_int(0) #select Trace 1\r\ndetector = c_int(1) #set detector type to max\r\n\r\n#search the USB 3.0 bus for an RSA306\r\nret = rsa300.Search(deviceIDs, byref(deviceSerial), byref(numFound))\r\nif ret != 0:\r\n print('Error in Search: ' + str(ret))\r\nif numFound.value < 1:\r\n print('No instruments found. Exiting script.')\r\n exit()\r\nelif numFound.value == 1:\r\n print('One device found.')\r\n print('Device Serial Number: ' + deviceSerial.value)\r\nelse:\r\n print('2 or more instruments found.')\r\n #note: the API can only currently access one at a time\r\n\r\n#connect to the first RSA306\r\nret = rsa300.Connect(deviceIDs[0])\r\nif ret != 0:\r\n print('Error in Connect: ' + str(ret))\r\n\r\n#preset the RSA306 and configure spectrum settings\r\nrsa300.Preset()\r\nrsa300.SetCenterFreq(cf)\r\nrsa300.SetReferenceLevel(refLevel)\r\nrsa300.SPECTRUM_SetEnable(enable)\r\nrsa300.SPECTRUM_SetDefault()\r\nrsa300.SPECTRUM_GetSettings(byref(specSet))\r\n\r\n#configure desired spectrum settings\r\n#some fields are left blank because the default\r\n#values set by SPECTRUM_SetDefault() are acceptable\r\nspecSet.span = c_double(10e6)\r\nspecSet.rbw = c_double(300e3)\r\n#specSet.enableVBW =\r\n#specSet.vbw =\r\nspecSet.traceLength = c_int(801)\r\n#specSet.window =\r\nspecSet.verticalUnit = c_int(4)\r\nspecSet.actualStartFreq =c_double(8e8)\r\nspecSet.actualStopFreq = c_double(1.2e9)\r\n#specSet.actualFreqStepSize =c_double(50000.0)\r\n#specSet.actualRBW =\r\n#specSet.actualVBW =\r\n#specSet.actualNumIQSamples =\r\n\r\n#set desired spectrum settings\r\nrsa300.SPECTRUM_SetSettings(specSet)\r\nrsa300.SPECTRUM_GetSettings(byref(specSet))\r\n\r\n#uncomment this if you want to print out the spectrum settings\r\n\r\n#print out spectrum settings for a sanity check\r\nprint('Span: ' + str(specSet.span))\r\nprint('RBW: ' + str(specSet.rbw))\r\nprint('VBW Enabled: ' + str(specSet.enableVBW))\r\nprint('VBW: ' + str(specSet.vbw))\r\nprint('Trace Length: ' + str(specSet.traceLength))\r\nprint('Window: ' + str(specSet.window))\r\nprint('Vertical Unit: ' + str(specSet.verticalUnit))\r\nprint('Actual Start Freq: ' + str(specSet.actualStartFreq))\r\nprint('Actual End Freq: ' + str(specSet.actualStopFreq))\r\nprint('Actual Freq Step Size: ' + str(specSet.actualFreqStepSize))\r\nprint('Actual RBW: ' + str(specSet.actualRBW))\r\nprint('Actual VBW: ' + str(specSet.actualVBW))\r\n\r\n\r\n#initialize variables for GetTrace\r\ntraceArray = c_float * specSet.traceLength\r\ntraceData = traceArray()\r\noutTracePoints = c_int()\r\n\r\n#generate frequency array for plotting the spectrum\r\nfreq = np.arange(specSet.actualStartFreq,\r\n specSet.actualStartFreq + specSet.actualFreqStepSize*specSet.traceLength,\r\n specSet.actualFreqStepSize)\r\n\r\n#start acquisition\r\nrsa300.Run()\r\nwhile ready.value == False:\r\n rsa300.SPECTRUM_WaitForDataReady(timeoutMsec, byref(ready))\r\n\r\nrsa300.SPECTRUM_GetTrace(c_int(0), specSet.traceLength,\r\n byref(traceData), byref(outTracePoints))\r\nprint('Got trace data.')\r\n\r\n#convert trace data from a ctypes array to a numpy array\r\ntrace = np.ctypeslib.as_array(traceData)\r\n\r\n#Peak power and frequency calculations\r\npeakPower = np.amax(trace)\r\npeakPowerFreq = freq[np.argmax(trace)]\r\nprint('Peak power in spectrum: %4.3f dBm @ %d Hz' % (peakPower, peakPowerFreq))\r\n\r\n#plot the spectrum trace (optional)\r\nplt.plot(freq, traceData)\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Amplitude (dBm)')\r\nplt.title('Spectrum')\r\n\r\n#BONUS clean up plot axes\r\nxmin = np.amin(freq)\r\nxmax = np.amax(freq)\r\nplt.xlim(xmin,xmax)\r\nymin = np.amin(trace)-10\r\nymax = np.amax(trace)+10\r\n\r\nplt.show()\r\n#print(traceData)\r\nprint('Disconnecting.')\r\nrsa300.Disconnect()","sub_path":"part1/peak_power_detector.py","file_name":"peak_power_detector.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"360055434","text":"import platform\n\na = {\"a\": 1, \"b\": 2}\nprint(a)\n\n\ndef dic(*args, **kwargs):\n print(args)\n print(kwargs)\n if kwargs: # 判断参数是否有值\n items = kwargs.items()\n for item in items:\n print(item)\n print(\"true\")\n else:\n print(\"false\")\n\n\narray = platform.architecture()\nfor arr in array:\n print(arr)\n\n# dic(\"q\") # false\ndic(\"q\", a=1, b=2) # true\n\ntup = (1, 2, 3, 4)\nprint(list(tup))\n\nl1 = [\"a\", \"b\", \"c\"]\nl2 = [1, 2, 3]\nprint(dict)\n\nif __name__ == '__main__':\n dic((9,))\n","sub_path":"common_use/dic_test.py","file_name":"dic_test.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"262387242","text":"\n\n#calss header\nclass _BYLAW():\n\tdef __init__(self,): \n\t\tself.name = \"BYLAW\"\n\t\tself.definitions = [u'a law made by local government that only relates to its particular region', u'a rule that governs the members of an organization']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_bylaw.py","file_name":"_bylaw.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"551913924","text":"# Released under the MIT License. See LICENSE for details.\n#\n\"\"\"Stage assets for a build.\"\"\"\n\nfrom __future__ import annotations\n\nimport hashlib\nimport os\nimport sys\nimport subprocess\nfrom functools import partial\nfrom typing import TYPE_CHECKING\n\nfrom efrotools import PYVER\n\nif TYPE_CHECKING:\n pass\n\n# Suffix for the pyc files we include in stagings.\n# We're using deterministic opt pyc files; see PEP 552.\n# Note: this means anyone wanting to modify .py files in a build\n# will need to wipe out the existing .pyc files first or the changes\n# will be ignored.\nOPT_PYC_SUFFIX = 'cpython-' + PYVER.replace('.', '') + '.opt-1.pyc'\n\n\nclass Config:\n \"\"\"Encapsulates command options.\"\"\"\n\n def __init__(self, projroot: str) -> None:\n self.projroot = projroot\n # We always calc src relative to this script.\n self.src = self.projroot + '/build/assets'\n self.dst: str | None = None\n self.serverdst: str | None = None\n self.win_extras_src: str | None = None\n self.win_platform: str | None = None\n self.win_type: str | None = None\n self.include_audio = True\n self.include_meshes = True\n self.include_collision_meshes = True\n self.include_scripts = True\n self.include_python = True\n self.include_textures = True\n self.include_fonts = True\n self.include_json = True\n self.include_pylib = False\n self.pylib_src_name: str | None = None\n self.include_payload_file = False\n self.tex_suffix: str | None = None\n self.is_payload_full = False\n self.debug: bool | None = None\n\n def _parse_android_args(self, args: list[str]) -> None:\n # On Android we get nitpicky with what\n # we want to copy in since we can speed up\n # iterations by installing stripped down\n # apks.\n self.dst = 'assets/ballistica_files'\n self.pylib_src_name = 'pylib-android'\n self.include_payload_file = True\n self.tex_suffix = '.ktx'\n self.include_audio = False\n self.include_meshes = False\n self.include_collision_meshes = False\n self.include_scripts = False\n self.include_python = False\n self.include_textures = False\n self.include_fonts = False\n self.include_json = False\n self.include_pylib = False\n for arg in args:\n if arg == '-full':\n self.include_audio = True\n self.include_meshes = True\n self.include_collision_meshes = True\n self.include_scripts = True\n self.include_python = True\n self.include_textures = True\n self.include_fonts = True\n self.include_json = True\n self.is_payload_full = True\n self.include_pylib = True\n elif arg == '-none':\n pass\n elif arg == '-meshes':\n self.include_meshes = True\n self.include_collision_meshes = True\n elif arg == '-python':\n self.include_python = True\n self.include_pylib = True\n elif arg == '-textures':\n self.include_textures = True\n elif arg == '-fonts':\n self.include_fonts = True\n elif arg == '-scripts':\n self.include_scripts = True\n elif arg == '-audio':\n self.include_audio = True\n\n def _parse_win_platform(self, platform: str, args: list[str]) -> None:\n \"\"\"Parse sub-args in the windows platform string.\"\"\"\n winempty, wintype, winplt, wincfg = platform.split('-')\n self.win_platform = winplt\n self.win_type = wintype\n assert winempty == ''\n self.tex_suffix = '.dds'\n\n if wintype == 'win':\n self.dst = args[-1]\n elif wintype == 'winserver':\n self.dst = os.path.join(args[-1], 'dist')\n self.serverdst = args[-1]\n self.include_textures = False\n self.include_audio = False\n self.include_meshes = False\n else:\n raise RuntimeError(f'Invalid wintype: \"{wintype}\"')\n\n if winplt == 'Win32':\n self.win_extras_src = self.projroot + '/build/assets/windows/Win32'\n elif winplt == 'x64':\n self.win_extras_src = self.projroot + '/build/assets/windows/x64'\n else:\n raise RuntimeError(f'Invalid winplt: \"{winplt}\"')\n\n if wincfg == 'Debug':\n self.debug = True\n elif wincfg == 'Release':\n self.debug = False\n else:\n raise RuntimeError(f'Invalid wincfg: \"{wincfg}\"')\n\n def parse_args(self, args: list[str]) -> None:\n \"\"\"Parse args and apply to the cfg.\"\"\"\n if len(args) < 1:\n raise RuntimeError('Expected a platform argument.')\n platform = args[0]\n if platform == '-android':\n self._parse_android_args(args)\n elif platform.startswith('-win'):\n self._parse_win_platform(platform, args)\n elif platform == '-cmake':\n self.dst = args[1]\n self.tex_suffix = '.dds'\n elif '-cmakeserver' in args:\n self.dst = os.path.join(args[-1], 'dist')\n self.serverdst = args[-1]\n self.include_textures = False\n self.include_audio = False\n self.include_meshes = False\n\n # Require either -debug or -release in args.\n # FIXME: should require this for all platforms for consistency.\n if '-debug' in args:\n self.debug = True\n assert '-release' not in args\n elif '-release' in args:\n self.debug = False\n else:\n raise RuntimeError(\n \"Expected either '-debug' or '-release' in args.\"\n )\n elif '-xcode-mac' in args:\n self.src = os.environ['SOURCE_ROOT'] + '/build/assets'\n self.dst = (\n os.environ['TARGET_BUILD_DIR']\n + '/'\n + os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']\n )\n self.include_pylib = True\n self.pylib_src_name = 'pylib-apple'\n self.tex_suffix = '.dds'\n elif '-xcode-ios' in args:\n self.src = os.environ['SOURCE_ROOT'] + '/build/assets'\n self.dst = (\n os.environ['TARGET_BUILD_DIR']\n + '/'\n + os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']\n )\n self.include_pylib = True\n self.pylib_src_name = 'pylib-apple'\n self.tex_suffix = '.pvr'\n else:\n raise RuntimeError('No valid platform arg provided.')\n\n\ndef md5sum(filename: str) -> str:\n \"\"\"Generate an md5sum given a filename.\"\"\"\n md5 = hashlib.md5()\n with open(filename, mode='rb') as infile:\n for buf in iter(partial(infile.read, 1024), b''):\n md5.update(buf)\n return md5.hexdigest()\n\n\ndef _run(cmd: str, echo: bool = False) -> None:\n \"\"\"Run an os command; raise Exception on non-zero return value.\"\"\"\n if echo:\n print(cmd)\n result = os.system(cmd)\n if result != 0:\n raise RuntimeError(f\"Error running cmd: '{cmd}'.\")\n\n\ndef _write_payload_file(assets_root: str, full: bool) -> None:\n if not assets_root.endswith('/'):\n assets_root = assets_root + '/'\n\n # Now construct a payload file if we have any files.\n file_list = []\n payload_str = ''\n for root, _subdirs, fnames in os.walk(assets_root):\n for fname in fnames:\n if fname.startswith('.'):\n continue\n if fname == 'payload_info':\n continue\n fpath = os.path.join(root, fname)\n fpathshort = fpath.replace(assets_root, '')\n if ' ' in fpathshort:\n raise RuntimeError(\n f\"Invalid filename (contains spaces): '{fpathshort}'\"\n )\n payload_str += fpathshort + ' ' + md5sum(fpath) + '\\n'\n file_list.append(fpathshort)\n\n payload_path = assets_root + '/payload_info'\n if file_list:\n # Write the file count, whether this is a 'full' payload, and finally\n # the file list.\n payload_str = (\n str(len(file_list))\n + '\\n'\n + ('1' if full else '0')\n + '\\n'\n + payload_str\n )\n with open(payload_path, 'w', encoding='utf-8') as outfile:\n outfile.write(payload_str)\n else:\n # Remove the payload file; this will cause the game to completely\n # skip the payload processing step.\n if os.path.exists(payload_path):\n os.unlink(payload_path)\n\n\ndef _sync_windows_extras(cfg: Config) -> None:\n # pylint: disable=too-many-branches\n assert cfg.win_extras_src is not None\n assert cfg.win_platform is not None\n assert cfg.win_type is not None\n if not os.path.isdir(cfg.win_extras_src):\n raise RuntimeError(\n \"Win extras src dir not found: '{cfg.win_extras_src}'.\"\n )\n\n # Ok, lets do full syncs on each subdir we find so we\n # properly delete anything in dst that disappeared from src.\n # Lastly we'll sync over the remaining top level files.\n # Note: technically it'll be possible to leave orphaned top level\n # files in dst, so when building packages/etc. we should always start\n # from scratch.\n assert cfg.dst is not None\n assert cfg.debug is not None\n if cfg.debug:\n pyd_rules = \"--include '*_d.pyd'\"\n else:\n pyd_rules = \"--exclude '*_d.pyd' --include '*.pyd'\"\n\n for dirname in ('DLLs', 'Lib'):\n # EWW: seems windows python currently sets its path to ./lib but it\n # comes with Lib. Windows is normally case-insensitive but this messes\n # it up when running under WSL. Let's install it as lib for now.\n dstdirname = 'lib' if dirname == 'Lib' else dirname\n _run(f'mkdir -p \"{cfg.dst}/{dstdirname}\"')\n cmd = (\n 'rsync --recursive --update --delete --delete-excluded '\n ' --prune-empty-dirs'\n \" --include '*.ico' --include '*.cat'\"\n f\" --include '*.dll' {pyd_rules}\"\n \" --include '*.py' --include '*.\" + OPT_PYC_SUFFIX + \"'\"\n \" --include '*/' --exclude '*' \\\"\"\n + os.path.join(cfg.win_extras_src, dirname)\n + '/\" '\n '\"' + cfg.dst + '/' + dstdirname + '/\"'\n )\n _run(cmd)\n\n # Now sync the top level individual files that we want.\n # We could technically copy everything over but this keeps staging\n # dirs a bit tidier.\n dbgsfx = '_d' if cfg.debug else ''\n # Note: Below needs updating when Python version changes (currently 3.11)\n toplevelfiles: list[str] = [f'python311{dbgsfx}.dll']\n\n if cfg.win_type == 'win':\n toplevelfiles += [\n 'libvorbis.dll',\n 'libvorbisfile.dll',\n 'ogg.dll',\n 'OpenAL32.dll',\n 'SDL2.dll',\n ]\n elif cfg.win_type == 'winserver':\n toplevelfiles += [f'python{dbgsfx}.exe']\n\n # Include debug dlls so folks without msvc can run them.\n if cfg.debug:\n if cfg.win_platform == 'x64':\n toplevelfiles += [\n 'msvcp140d.dll',\n 'vcruntime140d.dll',\n 'vcruntime140_1d.dll',\n 'ucrtbased.dll',\n ]\n else:\n toplevelfiles += [\n 'msvcp140d.dll',\n 'vcruntime140d.dll',\n 'ucrtbased.dll',\n ]\n\n # Include the runtime redistributables in release builds.\n if not cfg.debug:\n if cfg.win_platform == 'x64':\n toplevelfiles.append('vc_redist.x64.exe')\n elif cfg.win_platform == 'Win32':\n toplevelfiles.append('vc_redist.x86.exe')\n else:\n raise RuntimeError(f'Invalid win_platform {cfg.win_platform}')\n\n cmd2 = (\n ['rsync', '--update']\n + [os.path.join(cfg.win_extras_src, f) for f in toplevelfiles]\n + [cfg.dst + '/']\n )\n subprocess.run(cmd2, check=True)\n\n # If we're running under WSL we won't be able to launch these .exe files\n # unless they're marked executable, so do that here.\n # Update: gonna try simply setting this flag on the source side.\n # _run(f'chmod +x {cfg.dst}/*.exe')\n\n\ndef _sync_pylib(cfg: Config) -> None:\n assert cfg.pylib_src_name is not None\n assert cfg.dst is not None\n _run(f'mkdir -p \"{cfg.dst}/pylib\"')\n cmd = (\n f'rsync --recursive --update --delete --delete-excluded '\n f' --prune-empty-dirs'\n f\" --include '*.py' --include '*.{OPT_PYC_SUFFIX}'\"\n f\" --include '*/' --exclude '*'\"\n f' \"{cfg.src}/{cfg.pylib_src_name}/\" '\n f'\"{cfg.dst}/pylib/\"'\n )\n _run(cmd)\n\n\ndef _sync_standard_game_data(cfg: Config) -> None:\n assert cfg.dst is not None\n _run('mkdir -p \"' + cfg.dst + '/ba_data\"')\n cmd = (\n 'rsync --recursive --update --delete --delete-excluded'\n ' --prune-empty-dirs'\n )\n\n if cfg.include_scripts:\n cmd += (\n f\" --include '*.py' --include '*.pem'\"\n f\" --include '*.{OPT_PYC_SUFFIX}'\"\n )\n\n if cfg.include_textures:\n assert cfg.tex_suffix is not None\n cmd += \" --include '*\" + cfg.tex_suffix + \"'\"\n\n if cfg.include_audio:\n cmd += \" --include '*.ogg'\"\n\n if cfg.include_fonts:\n cmd += \" --include '*.fdata'\"\n\n if cfg.include_json:\n cmd += \" --include '*.json'\"\n\n if cfg.include_meshes:\n cmd += \" --include '*.bob'\"\n\n if cfg.include_collision_meshes:\n cmd += \" --include '*.cob'\"\n\n cmd += (\n \" --include='*/' --exclude='*' \\\"\"\n + cfg.src\n + '/ba_data/\" \"'\n + cfg.dst\n + '/ba_data/\"'\n )\n _run(cmd)\n\n\ndef _sync_server_files(cfg: Config) -> None:\n assert cfg.serverdst is not None\n assert cfg.debug is not None\n modeval = 'debug' if cfg.debug else 'release'\n\n # NOTE: staging these directly from src; not build.\n stage_server_file(\n projroot=cfg.projroot,\n mode=modeval,\n infilename=f'{cfg.projroot}/src/assets/server_package/'\n 'ballisticakit_server.py',\n outfilename=os.path.join(\n cfg.serverdst,\n 'ballisticakit_server.py'\n if cfg.win_type is not None\n else 'ballisticakit_server',\n ),\n )\n stage_server_file(\n projroot=cfg.projroot,\n mode=modeval,\n infilename=f'{cfg.projroot}/src/assets/server_package/README.txt',\n outfilename=os.path.join(cfg.serverdst, 'README.txt'),\n )\n stage_server_file(\n projroot=cfg.projroot,\n mode=modeval,\n infilename=f'{cfg.projroot}/src/assets/server_package/'\n 'config_template.yaml',\n outfilename=os.path.join(cfg.serverdst, 'config_template.yaml'),\n )\n if cfg.win_type is not None:\n fname = 'launch_ballisticakit_server.bat'\n stage_server_file(\n projroot=cfg.projroot,\n mode=modeval,\n infilename=f'{cfg.projroot}/src/assets/server_package/{fname}',\n outfilename=os.path.join(cfg.serverdst, fname),\n )\n\n\ndef _write_if_changed(\n path: str, contents: str, make_executable: bool = False\n) -> None:\n changed: bool\n try:\n with open(path, encoding='utf-8') as infile:\n existing = infile.read()\n changed = contents != existing\n except FileNotFoundError:\n changed = True\n if changed:\n with open(path, 'w', encoding='utf-8') as outfile:\n outfile.write(contents)\n if make_executable:\n subprocess.run(['chmod', '+x', path], check=True)\n\n\ndef stage_server_file(\n projroot: str, mode: str, infilename: str, outfilename: str\n) -> None:\n \"\"\"Stage files for the server environment with some filtering.\"\"\"\n import batools.build\n from efrotools import replace_exact\n\n if mode not in ('debug', 'release'):\n raise RuntimeError(\n f\"Invalid server-file-staging mode '{mode}';\"\n f\" expected 'debug' or 'release'.\"\n )\n\n print(f'Building server file: {os.path.basename(outfilename)}')\n\n os.makedirs(os.path.dirname(outfilename), exist_ok=True)\n\n basename = os.path.basename(infilename)\n if basename == 'config_template.yaml':\n # Inject all available config values into the config file.\n _write_if_changed(\n outfilename,\n batools.build.filter_server_config(str(projroot), infilename),\n )\n\n elif basename == 'ballisticakit_server.py':\n # Run Python in opt mode for release builds.\n with open(infilename, encoding='utf-8') as infile:\n lines = infile.read().splitlines()\n if mode == 'release':\n lines[0] = replace_exact(\n lines[0],\n f'#!/usr/bin/env python{PYVER}',\n f'#!/usr/bin/env -S python{PYVER} -O',\n )\n _write_if_changed(\n outfilename, '\\n'.join(lines) + '\\n', make_executable=True\n )\n elif basename == 'README.txt':\n with open(infilename, encoding='utf-8') as infile:\n readme = infile.read()\n _write_if_changed(outfilename, readme)\n elif basename == 'launch_ballisticakit_server.bat':\n # Run Python in opt mode for release builds.\n with open(infilename, encoding='utf-8') as infile:\n lines = infile.read().splitlines()\n if mode == 'release':\n lines[1] = replace_exact(\n lines[1],\n ':: Python interpreter.',\n ':: Python interpreter.'\n ' (in opt mode so we use bundled .opt-1.pyc files)',\n )\n lines[2] = replace_exact(\n lines[2],\n 'dist\\\\\\\\python.exe ballisticakit_server.py',\n 'dist\\\\\\\\python.exe -O ballisticakit_server.py',\n )\n else:\n # In debug mode we use the bundled debug interpreter.\n lines[2] = replace_exact(\n lines[2],\n 'dist\\\\\\\\python.exe ballisticakit_server.py',\n 'dist\\\\\\\\python_d.exe ballisticakit_server.py',\n )\n\n _write_if_changed(outfilename, '\\n'.join(lines) + '\\n')\n else:\n raise RuntimeError(f\"Unknown server file for staging: '{basename}'.\")\n\n\ndef main(projroot: str, args: list[str] | None = None) -> None:\n \"\"\"Stage assets for a build.\"\"\"\n\n if args is None:\n args = sys.argv\n\n cfg = Config(projroot)\n cfg.parse_args(args)\n\n # Ok, now for every top level dir in src, come up with a nice single\n # command to sync the needed subset of it to dst.\n\n # We can now use simple speedy timestamp based updates since\n # we no longer have to try to preserve timestamps to get .pyc files\n # to behave (hooray!)\n\n # Do our stripped down pylib dir for platforms that use that.\n if cfg.include_pylib:\n _sync_pylib(cfg)\n else:\n if cfg.dst is not None and os.path.isdir(cfg.dst + '/pylib'):\n subprocess.run(['rm', '-rf', cfg.dst + '/pylib'], check=True)\n\n # Sync our server files if we're doing that.\n if cfg.serverdst is not None:\n _sync_server_files(cfg)\n\n # On windows we need to pull in some dlls and this and that\n # (we also include a non-stripped-down set of python libs).\n if cfg.win_extras_src is not None:\n _sync_windows_extras(cfg)\n\n # Standard stuff in ba_data\n _sync_standard_game_data(cfg)\n\n # On Android we need to build a payload file so it knows\n # what to pull out of the apk.\n if cfg.include_payload_file:\n assert cfg.dst is not None\n _write_payload_file(cfg.dst, cfg.is_payload_full)\n","sub_path":"tools/batools/assetstaging.py","file_name":"assetstaging.py","file_ext":"py","file_size_in_byte":19823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"621445437","text":"# start.py\n# Timur Jaganov\n# andeniel@gmail.com\n# created 22.05.2017\n# File Handle, read, write\n\nfileHandle = open(\"files/example.txt\", \"r\")\n\nprint(type(fileHandle))\n\ncontent = fileHandle.read()\nprint(content)\n\nfileHandle.seek(0)\nlines = fileHandle.readlines()\nprint(lines)\nlines = [i.rstrip(\"\\n\") for i in lines]\nprint(lines)\n\nfileHandle.close()\n","sub_path":"section05/start1.py","file_name":"start1.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"170771753","text":"import os\n\nimport tensorflow as tf\nimport numpy as np\n\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Input, Lambda\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.models import Model, Sequential\nfrom tensorflow.keras.optimizers import Adam, SGD\n\nfrom FaceLoader import FaceLoader\n\nclass SiameseNetwork:\n\n\n def __init__(self, dataset_path, learning_rate, batch_size, use_augmentation,\n l2_param, tensorboard_log_path):\n\n self.input_shape = (105, 105, 1)\n self.model = []\n\n self.learning_rate = learning_rate\n self.batch_size = batch_size # u want to have a image loader here that takes images as pair from # DEBUG:\n self.summary_writer = tf.summary.create_file_writer(tensorboard_log_path)\n self.face_loader = FaceLoader(dataset_path=dataset_path, use_augmentation=use_augmentation, batch_size=batch_size)\n self._construct_network(l2_param)\n\n def _construct_network(self, l2_param):\n\n\n encoder = Sequential()\n encoder.add(Conv2D(filters=64, kernel_size=(10, 10),\n activation='relu', input_shape=self.input_shape,\n kernel_regularizer=l2(l2_param['Conv1']),\n kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=10**-2, seed=None),\n bias_initializer=tf.random_normal_initializer(mean=0.5, stddev=10**-2, seed=None),\n name='Conv1'))\n encoder.add(MaxPool2D())\n\n encoder.add(Conv2D(filters=128, kernel_size=(7, 7),\n activation='relu',\n kernel_regularizer=l2(l2_param['Conv2']),\n kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=10**-2, seed=None),\n bias_initializer=tf.random_normal_initializer(mean=0.5, stddev=10**-2, seed=None),\n name='Conv2'))\n encoder.add(MaxPool2D())\n\n encoder.add(Conv2D(filters=128, kernel_size=(4, 4),\n activation='relu',\n kernel_regularizer=l2(l2_param['Conv3']),\n kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=10**-2, seed=None),\n bias_initializer=tf.random_normal_initializer(mean=0.5, stddev=10**-2, seed=None),\n name='Conv3'))\n encoder.add(MaxPool2D())\n\n encoder.add(Conv2D(filters=256, kernel_size=(4, 4),\n activation='relu',\n kernel_regularizer=l2(l2_param['Conv4']),\n kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=10**-2, seed=None),\n bias_initializer=tf.random_normal_initializer(mean=0.5, stddev=10**-2, seed=None),\n name='Conv4'))\n # encoder.add(MaxPool2D())\n\n encoder.add(Flatten())\n encoder.add(Dense(units=4096, activation='sigmoid',\n kernel_regularizer=l2(l2_param['Dense1']),\n kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=2*(10**-1), seed=None),\n bias_initializer=tf.random_normal_initializer(mean=0.5, stddev=10**-2, seed=None),\n name='Dense1'))\n\n input_img_1 = Input(self.input_shape)\n input_img_2 = Input(self.input_shape)\n\n encoded_img_1 = encoder(input_img_1)\n encoded_img_2 = encoder(input_img_2)\n\n l1_distance_layer = Lambda(lambda tensors: K.abs(tensors[0]-tensors[1]))\n l1_distance = l1_distance_layer([encoded_img_1, encoded_img_2])\n\n prediction = Dense(units=1,\n kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=2*(10**-1), seed=None),\n bias_initializer=tf.random_normal_initializer(mean=0.5, stddev=10**-2, seed=None),\n activation='sigmoid')(l1_distance)\n\n self.model = Model(inputs=[input_img_1, input_img_2], outputs=prediction)\n\n optimizer = SGD(lr=self.learning_rate, momentum=0.5,name=\"SGD\")\n # optimizer = Adam(lr=self.learning_rate)\n self.model.compile(loss='binary_crossentropy', metrics=['binary_accuracy'], optimizer=optimizer)\n\n def _write_logs_to_tensorboard(self, current_iteration, train_losses,\n train_accuracies, validation_accuracy,\n evaluate_each):\n \"\"\" Writes the logs to a tensorflow log file\n\n This allows us to see the loss curves and the metrics in tensorboard.\n If we wrote every iteration, the training process would be slow, so\n instead we write the logs every evaluate_each iteration.\n\n Arguments:\n current_iteration: iteration to be written in the log file\n train_losses: contains the train losses from the last evaluate_each\n iterations.\n train_accuracies: the same as train_losses but with the accuracies\n in the training set.\n validation_accuracy: accuracy in the current one-shot task in the\n validation set\n evaluate each: number of iterations defined to evaluate the one-shot\n tasks.\n \"\"\"\n\n # Write to log file the values from the last evaluate_every iterations\n\n with self.summary_writer.as_default():\n for index in range(0, evaluate_each):\n tf.summary.scalar(\"train_loss\", train_losses[index], step=current_iteration - evaluate_each + index + 1)\n tf.summary.scalar(\"train_acc\", train_accuracies[index], step=current_iteration - evaluate_each + index + 1)\n if index == (evaluate_each - 1):\n tf.summary.scalar(\"one_shot_val_acc\", validation_accuracy, step=current_iteration - evaluate_each + index + 1)\n\n self.summary_writer.flush()\n\n def train_network(self, number_of_iterations, support_set_size,\n final_momentum, momentum_slope, evaluate_each,\n model_name):\n\n self.face_loader.split_train_dataset()\n\n train_losses = np.zeros(shape=(evaluate_each))\n train_accuracies = np.zeros(shape=(evaluate_each))\n count = 0\n earrly_stop = 0\n # Stop criteria variables\n best_validation_accuracy = 0.0\n best_accuracy_iteration = 0\n validation_accuracy = 0.0\n\n for iteration in range(number_of_iterations):\n images, labels = self.face_loader.get_train_batch()\n train_loss, train_accuracy = self.model.train_on_batch(images, labels)\n\n\n # Decay learning rate 1 % per 500 iterations (in the paper the decay is\n # 1% per epoch). Also update linearly the momentum (starting from 0.5 to 1)\n if (iteration + 1) % 500 == 0:\n K.set_value(self.model.optimizer.lr, K.get_value(self.model.optimizer.lr) * 0.99)\n if K.get_value(self.model.optimizer.momentum) < final_momentum:\n K.set_value(self.model.optimizer.momentum, K.get_value(self.model.optimizer.momentum) + momentum_slope)\n\n train_losses[count] = train_loss\n train_accuracies[count] = train_accuracy\n\n # validation set\n count += 1\n print('Iteration %d/%d: Train loss: %f, Train Accuracy: %f, lr = %f' %\n (iteration + 1, number_of_iterations, train_loss, train_accuracy, K.get_value(\n self.model.optimizer.lr)))\n\n if (iteration + 1) % evaluate_each == 0:\n number_of_runs_per_person = 40\n # use a support set size equal to the number of character in the alphabet\n validation_accuracy = self.face_loader.one_shot_test(self.model, support_set_size, number_of_runs_per_person, is_validation=True)\n\n self._write_logs_to_tensorboard(iteration, train_losses, train_accuracies, validation_accuracy, evaluate_each)\n count = 0\n\n # Some hyperparameters lead to 100%, although the output is almost the same in\n # all images.\n if (validation_accuracy == 1.0 and train_accuracy == 0.5):\n print('Early Stopping: Gradient Explosion')\n print('Validation Accuracy = ' + str(best_validation_accuracy))\n return 0\n elif train_accuracy == 0.0:\n return 0\n else:\n # Save the model\n if validation_accuracy > best_validation_accuracy:\n best_validation_accuracy = validation_accuracy\n best_accuracy_iteration = iteration\n\n model_json = self.model.to_json()\n\n if not os.path.exists('./models'):\n os.makedirs('./models')\n with open('models/' + model_name + '.json', \"w\") as json_file:\n json_file.write(model_json)\n self.model.save_weights('models/' + model_name + '.h5')\n\n # If accuracy does not improve for 10000 batches stop the training\n if iteration - best_accuracy_iteration > 20000:\n print('Early Stopping: validation accuracy did not increase for 20000 iterations')\n print('Best Validation Accuracy = ' + str(best_validation_accuracy))\n print('Validation Accuracy = ' + str(best_validation_accuracy))\n break\n\n print('Trained Ended!')\n return best_validation_accuracy\n","sub_path":"SiameseNetwork.py","file_name":"SiameseNetwork.py","file_ext":"py","file_size_in_byte":9751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"8690954","text":"#!/usr/bin/python3\n'''\nScript : main.py\n\nCopyright: LiKneu 2019\n'''\n\nimport os\nimport sys\nimport glob\nfrom PyQt5.Qt import Qt\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import QEvent\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWidgets import QMainWindow\nfrom PyQt5.QtWidgets import QSystemTrayIcon\nfrom PyQt5.QtWidgets import QMenu\n\n# Create a class of the main window to allow all widgets that are placed on it\n# to communicate with each other\nclass MyWindow(QMainWindow):\n '''Class of the Main Window'''\n\n def __init__(self):\n '''Initializes the Main Window'''\n super(MyWindow, self).__init__()\n self.setGeometry(800, 200, 300, 167)\n self.setWindowTitle('g-i-s v.2020-01-10')\n self.init_ui()\n self.user_command = ''\n self.user_input = ''\n\n def init_ui(self):\n '''Initializes the elements on the Main Window'''\n # Tray icon\n self.tray_icon = QSystemTrayIcon(QIcon('./config_files/Cubitus.ico'), parent=self)\n self.tray_icon.setToolTip('get-it-slim')\n self.tray_icon.show()\n self.tray_menu = QMenu()\n \n self.tray_menu_show = self.tray_menu.addAction('show')\n # ~ self.tray_menu_show.triggered.connect(self.show_gis)\n self.tray_menu_hide = self.tray_menu.addAction('hide')\n # ~ self.tray_menu_show.triggered.connect(self.hide_gis)\n self.tray_icon.setContextMenu(self.tray_menu)\n \n # Line Edit\n self.le_input = QtWidgets.QLineEdit(self)\n self.le_input.setGeometry(2, 0, 296, 30)\n self.le_input.setPlaceholderText(' ')\n self.le_input.setClearButtonEnabled(True)\n self.le_input.installEventFilter(self)\n self.le_input.textChanged.connect(self.input_changed)\n\n # List Widget\n self.lst = QtWidgets.QListWidget(self)\n self.lst.setSortingEnabled(True)\n self.lst.setAlternatingRowColors(True)\n self.lst.setGeometry(2, 35, 296, 130)\n self.lst.installEventFilter(self)\n\n def eventFilter(self, obj, event):\n '''Event filter needed to catch the change of focus.'''\n if event.type() == QEvent.FocusIn:\n if obj == self.lst:\n print('List Widget')\n self.FOCUS = 'List Widget'\n if obj == self.le_input:\n print('Line Edit')\n self.FOCUS = 'Line Edit'\n # Deselect all list items if List Widget looses the focus\n self.lst.setCurrentRow(0)\n self.lst.clearSelection()\n\n return super(MyWindow, self).eventFilter(obj, event)\n\n def keyPressEvent(self, event):\n '''Detect that the RETURN key was pressed.'''\n if event.key() == Qt.Key_Return:\n self.return_pressed()\n elif event.key() == Qt.Key_Enter:\n print('Enter')\n\n def input_changed(self):\n '''Handling of the users input is mainly done here.'''\n\n # Try to split the user input into the parts 'command' and 'user_input'.\n # The delimiter is a space. Thus it's possible to have commands of arbi-\n # trary length.\n try:\n self.user_command, self.user_input = self.le_input.text().split(' ', 1)\n print(f'\\tUser input: <{self.user_command}>, <{self.user_input}>')\n except:\n # Reset user command and input to empty strings\n self.user_command = ''\n self.user_input = ''\n \n # List Widget has to be cleared if no valid user input is available\n self.lst.clear()\n self.main_win_small()\n return\n\n # Handling of bookmarks\n if self.user_command == 'b' and self.user_input == '':\n my_bookmarks = self.read_bookmark_files()\n self.lst.clear()\n for bm in my_bookmarks:\n self.lst.addItem(bm)\n self.main_win_large()\n elif self.user_command == 'b' and self.user_input:\n my_bookmarks = self.read_bookmark_files()\n self.lst.clear()\n for bm in my_bookmarks:\n if self.user_input.lower() in bm.lower():\n self.lst.addItem(bm)\n self.main_win_large()\n # No known command\n else:\n self.lst.clear()\n self.main_win_small()\n\n def main_win_large(self):\n '''Set dimensions of main window so that all widgets can be seen.'''\n self.resize(300, 167)\n\n def main_win_small(self):\n '''Set dimensions of main window so that just Line Edit can be seen.'''\n self.resize(300, 32)\n\n def return_pressed(self):\n print('return_pressed', self.FOCUS)\n if self.FOCUS == 'List Widget':\n self.list_return()\n elif self.FOCUS == 'Line Edit':\n if self.user_command == 'b':\n self.line_edit_return()\n elif self.user_command == 'p' and self.user_input:\n settings = self.read_config_file()\n cmd = settings['PCR-URL']\n cmd = cmd + self.user_input\n self.run_command(cmd)\n # Handle CSCs\n elif self.user_command == 'c' and self.user_input:\n settings = self.read_config_file()\n cmd = settings['CSC-URL']\n cmd = cmd + self.user_input\n self.run_command(cmd)\n else:\n print('Error in RETURN handler')\n\n def line_edit_return(self):\n '''Returns the first item of the list on RETURN in line edit field.'''\n\n # Get the text of the topmost item\n if self.lst.item(0):\n bookmark_key = self.lst.item(0).text()\n # TODO: remove print statement\n print('\\tBookmark title:', bookmark_key)\n my_bookmarks = self.read_bookmark_files()\n # TODO: remove print statement\n print('\\tBookmark command:', my_bookmarks[bookmark_key])\n self.run_command(my_bookmarks[bookmark_key])\n\n def list_return(self):\n '''Returns the selected list item on RETURN'''\n if self.lst.currentItem():\n bookmark_key = self.lst.currentItem().text()\n # TODO: remove print statement\n print('\\tBookmark title:', bookmark_key)\n my_bookmarks = self.read_bookmark_files()\n # TODO: remove print statement\n print('\\tBookmark command:', my_bookmarks[bookmark_key])\n self.run_command(my_bookmarks[bookmark_key])\n\n def run_command(self, command):\n '''Runs the command provided by Line Edit or List Widget.'''\n my_os = os.name\n # TODO: Remove print statement\n print('OS:', my_os)\n print(f'Try to run command: {command}')\n # FIXIT: In case the app runs on Linux add '&' to the command\n if my_os == 'posix':\n command = command + ' &'\n os.system(command)\n elif my_os == 'windows':\n command = 'start ' + command\n os.system(command)\n else:\n print(f'Unknown OS: {my_os}. Script might not work as expected.')\n\n def read_bookmark_files(self, filetype='_bookmarks.txt'):\n \"\"\"Returns a list of lists containing the bookmarks titles and URLs.\"\"\"\n\n lines = {}\n for entry in glob.glob('./config_files/bookmarks/*'):\n if filetype in entry:\n with open(entry) as cf:\n # Read the file content line by line\n for line in cf:\n # Ensure the line is not empty\n if '|' in line:\n # Remove whitespace from the line\n line = line.strip()\n # Split the string into Title and Command\n title, command = line.split('|')\n # Append Title and Command to the others\n lines[title] = command\n return lines\n\n def read_config_file(self):\n '''Returns configuration settings in a dict'''\n settings = {}\n cfg_file = './config_files/get-it-slim_conf.txt'\n with open(cfg_file) as cfg:\n for line in cfg:\n if '|' in line:\n line = line.strip()\n key, value = line.split('|', 1)\n settings[key] = value\n return settings\n\ndef window():\n '''Main function'''\n app = QApplication(sys.argv)\n win = MyWindow()\n win.show()\n sys.exit(app.exec_())\n\nwindow()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"54299567","text":"from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget , QFrame , QHBoxLayout, QSplitter , QLineEdit\nimport sys\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import Qt\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n\n self.title=\"Frame Window\"\n self.top=100\n self.left=100\n self.width=400\n self.height=120\n self.iconName = \"logo.png\"\n\n self.setWindowTitle(self.title)\n self.setWindowIcon(QtGui.QIcon(self.iconName))\n self.setGeometry(self.left, self.top, self.width, self.height)\n \n hbox = QHBoxLayout()\n left = QFrame()\n left.setFrameShape(QFrame.StyledPanel)\n\n bottom = QFrame()\n bottom.setFrameShape(QFrame.StyledPanel)\n\n splitter1 = QSplitter(Qt.Horizontal)\n \n lineedit1 = QLineEdit()\n splitter1.addWidget(left)\n splitter1.addWidget(lineedit1)\n splitter1.setSizes([200,200])\n\n splitter2 = QSplitter(Qt.Vertical)\n splitter2.addWidget(splitter1)\n splitter2.addWidget(bottom)\n\n hbox.addWidget(splitter2)\n\n self.setLayout(hbox)\n\n\n self.show()\n\nif __name__ == \"__main__\":\n App = QApplication(sys.argv)\n window = Window()\n sys.exit(App.exec())","sub_path":"qsplitter.py","file_name":"qsplitter.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"160141869","text":"\n\n\nclass OrthoGroup:\n \"\"\"Sets up an orthoGroup object when given a Dict of form: KEYS:, VALUES:\n Manages the searching of self with TAMO motif object, and modifies 'hitList' attribute in self\n to reflect k-mer searched and positive fraction.\"\"\"\n \n\n\n \n \n \n def __init__(self, orthoDict):\n \n # object data \n self.hitList = []\n self.orthoDict = orthoDict\n \n self._validate()\n \n def _validate(self):\n assert type(self.orthoDict) == type({}), \\\n '''orthoSet.orthoDict must be a dictionary.\n You provided:%s which was %s'''\\\n % (self.orthoDict, type(self.orthoDict))\n \n \n def searchMotif(self,TAMO_motif_obj,scoreFactor=0.75):\n \"\"\"Scans all seqs in self using TAMO motif obj and scoreFactor. Records k-mer string and fraction\n of positive seqs in self into self.hitlist.\"\"\"\n \n assert type(scoreFactor) == type(1) or type(scoreFactor) == type(0.1), \\\n '''orthoSet.searchMotif argument \"scoreFactor\" must be of type int or float.\n You provided: %s which was %s'''\\\n % (scoreFactor,type(scoreFactor))\n \n hits = 0\n for seq in self.orthoDict:\n if TAMO_motif_obj.scan(self.orthoDict[seq],factor=scoreFactor)[0]: # ALWAYS returns a tuple, so check first index\n hits+=1\n \n # update hitList with: [kMerString, fracOfPositives, scoreFactorUsed]\n self.hitList.append([TAMO_motif_obj.oneletter, hits/float(len(self.orthoDict)), scoreFactor])\n\n\n\n\n\n\nchangeLog = \\\n\"\"\"2009-03-28 -- creation\n2009-03-28 -- added orthoSet.__init__()\n2009-03-28 -- added orthoSet.searchMotif()\n2009-03-28 -- Changed orthoSet class name to 'OrthoGroup' bc it is not a PySetObj and I tend to use that convention\n\n\"\"\"\n\n\n","sub_path":"gusPyCode/MDOSX_proj/MDOSX_defs/MDOSX_classes.py","file_name":"MDOSX_classes.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"537460262","text":"from BSFC.apps.item.models import Item\nfrom BSFC.apps.cost.models import Cost\nfrom BSFC.apps.revenue.models import Revenue\nfrom BSFC.processing.timezone_convert import (\n convert_utc_to_pacific, convert_pacific_to_utc\n)\nfrom BSFC.processing.validation import validate_item\nfrom BSFC.processing.validation import validate_line_item\nfrom BSFC.processing.api_data import get_api_data\nimport time\nfrom datetime import datetime, timedelta\nimport logging\nfrom BSFC.processing.constants import PACIFIC_TIMEZONE\nimport pytz\nlog = logging.getLogger('django')\n\ndef adjust_for_refunds(dates):\n \"\"\"\n After database is initally populated in :func:`~BSFC.BSFC.BSFC.processing.population.populate_date_range()`, this function is called to account for refunds by subtracting amount refunded from the relevant field in each item's :class:`~BSFC.BSFC.BSFC.apps.revenue.models.Revenue` attribute.\n\n :param dates: List of dates for which to adjust for refunds. \n :type dates: list\n \"\"\"\n for idx, date in enumerate(dates):\n todays_datetime = date\n tz = pytz.timezone(PACIFIC_TIMEZONE)\n todays_datetime_pacific = tz.localize(\n datetime(todays_datetime.year, todays_datetime.month,\n todays_datetime.day, todays_datetime.hour, todays_datetime.minute,\n todays_datetime.second, todays_datetime.microsecond),\n is_dst=None\n )\n pacific_datetime_beginning_of_day = tz.localize(\n datetime(todays_datetime.year, todays_datetime.month, todays_datetime.day, 0, 0, 0, 0),\n is_dst=None\n )\n pacific_datetime_end_of_day = tz.localize(\n datetime(todays_datetime.year, todays_datetime.month, todays_datetime.day, 23, 59, 59, 999999),\n is_dst=None\n )\n utc_datetime_beginning_of_day = convert_pacific_to_utc(pacific_datetime_beginning_of_day)\n utc_datetime_end_of_day = convert_pacific_to_utc(pacific_datetime_end_of_day)\n utc_datetime_beginning_of_day_ms = int(time.mktime(utc_datetime_beginning_of_day.timetuple())) * 1000\n utc_datetime_end_of_day_ms = int(time.mktime(utc_datetime_end_of_day.timetuple())) * 1000\n todays_refunds = get_api_data(\n 'refunds',\n filterItems=['createdTime>' + str(utc_datetime_beginning_of_day_ms), 'createdTime<' + str(utc_datetime_end_of_day_ms)],\n expandItems=['lineItems']\n )\n for refund in todays_refunds:\n line_item_dict = refund['lineItems']['elements']\n for refunded_item_dict in line_item_dict:\n item_qs = Item.objects.filter(name=refunded_item_dict['name'], created_at__gte=pacific_datetime_beginning_of_day, created_at__lte=pacific_datetime_end_of_day)\n if item_qs.exists():\n if 'unitQty' in refunded_item_dict:\n item_qs[0].revenue.update_revenue_field('SOLD', -1*refunded_item_dict['unitQty']/1000.00)\n else:\n item_qs[0].revenue.update_revenue_field('SOLD', -1)\n\n else:\n item_dict = validate_line_item(refunded_item_dict) #returns None if refunded_item_dict is validated properly; in that case proceed to make REST API call\n if item_dict == None:\n item_dict = get_api_data(\n 'items/' + str(line_item_dict['item']['id']),\n expandItems=['categories']\n\t )\n revenue_object = Revenue()\n if 'unitQty' in refunded_item_dict:\n\t revenue_object.update_revenue_field('SOLD', -1*refunded_item_dict['unitQty']/1000.00)\n else:\n\t revenue_object.update_revenue_field('SOLD', -1)\n revenue_object.save()\n item_dict = validate_item(item_dict)\n cost = Cost.objects.create(item_cost=item_dict['cost'])\n Item.objects.create(\n name=item_dict['name'],\n cost=cost,\n price=item_dict['price'],\n price_type=item_dict['priceType'],\n unit_name=item_dict['unitName'] if 'unitName' in item_dict else None,\n category=item_dict['categories']['elements'][0]['name'],\n revenue=revenue_object,\n created_at=todays_datetime_pacific\n\t )\n","sub_path":"BSFC/BSFC/processing/refunds_adjustment.py","file_name":"refunds_adjustment.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"10946181","text":"import pygame\r\nfrom pygame.locals import *\r\nimport sys, os\r\nimport bullet\r\nfrom enemy import area\r\nfrom datetime import datetime, timedelta\r\n\r\nmy_plane_image1 = '.\\\\images\\\\me1.png'\r\nmy_plane_image2 = '.\\\\images\\\\me2.png'\r\n\r\nme_down1 = '.\\\\images\\\\me_destroy_1.png'\r\nme_down2= '.\\\\images\\\\me_destroy_2.png'\r\nme_down3 = '.\\\\images\\\\me_destroy_3.png'\r\nme_down4 = '.\\\\images\\\\me_destroy_4.png'\r\n\r\nfire_sound = '.\\\\sound\\\\bullet.wav'\r\ndown_sound = '.\\\\sound\\\\me_down.wav'\r\n\r\nclass MyPlane(object):\r\n\r\n fire_cool = 5\r\n \r\n def __init__(self, world):\r\n self.score = 0\r\n self.life = 500\r\n self.bullet = bullet.bullet1\r\n self.position = (230, 500)\r\n self.image1 = pygame.image.load(my_plane_image1).convert_alpha()\r\n self.image2 = pygame.image.load(my_plane_image2).convert_alpha()\r\n self.image = self.image1\r\n self.world = world\r\n self.world.I_join_the_battle(self)\r\n self.alive = True\r\n self.fire_sound = pygame.mixer.Sound(fire_sound)\r\n self.down_sound = pygame.mixer.Sound(down_sound)\r\n\r\n w, h = self.image.get_size()\r\n x, y = self.position\r\n self.range = area((x-w/2, x+w/2), (y-h/2, y+h/2))\r\n\r\n down1 = pygame.image.load(me_down1).convert_alpha()\r\n down2 = pygame.image.load(me_down2).convert_alpha()\r\n down3 = pygame.image.load(me_down3).convert_alpha()\r\n down4 = pygame.image.load(me_down4).convert_alpha()\r\n self.down_order = (down1, down2, down3, down4)\r\n self.down_now = 0\r\n\r\n self.fire_count = 5\r\n\r\n self.bullet2_endure = 0\r\n\r\n def process(self, time):\r\n if self.life > 0:\r\n x, y = pygame.mouse.get_pos()\r\n new_x = x if 0 <= x <= self.world.size[0] else self.position[0]\r\n new_y = y if 0 <= y <= self.world.size[1] else self.position[1]\r\n self.position = (new_x, new_y)\r\n w, h = self.image.get_size()\r\n self.range = area((new_x-w/2, new_x+w/2), (new_y-h/2, new_y+h/2))\r\n if self.image == self.image1:\r\n self.image = self.image2\r\n else:\r\n self.image = self.image1\r\n self.fire()\r\n self.get_hit()\r\n self.get_supply()\r\n if type(self.bullet) != type:\r\n self.bullet2_endure += 1\r\n if self.bullet2_endure > 1200:\r\n self.bullet = bullet.bullet1\r\n self.bullet2_endure = 0\r\n else:\r\n if self.down_now == 0:\r\n self.down_sound.play()\r\n if self.down_now < len(self.down_order):\r\n self.image = self.down_order[self.down_now]\r\n else:\r\n self.alive = False\r\n self.down_now += 1\r\n\r\n def render(self, surface):\r\n x, y = self.position\r\n w, h = self.image.get_size()\r\n surface.blit(self.image, (x-w/2, y-h/2))\r\n\r\n def fire(self):\r\n if self.fire_count >= self.fire_cool:\r\n x, y = self.position\r\n w, h = self.image.get_size()\r\n head = y - h/2\r\n self.world.add_bullet(self.bullet((x, head)))\r\n self.fire_count = 0\r\n self.fire_sound.play()\r\n else:\r\n self.fire_count += 1\r\n\r\n def get_hit(self):\r\n x, y = self.position\r\n left, right = max(x-85, 0), min(x+85+1, 480+1)\r\n for p in self.world.map.Plane[left: right]:\r\n while p.next:\r\n p = p.next\r\n plane = p.value\r\n if self.range.if_over_stack(plane.range):\r\n if plane.life > 0:\r\n p_life = plane.life\r\n plane.life -= self.life\r\n self.life -= p_life\r\n if p_life > 0 and plane.life <= 0:\r\n self.world.score += plane.score \r\n\r\n def get_supply(self):\r\n if self.world.supplies:\r\n for supply in self.world.supplies:\r\n if self.range.if_over_stack(supply.range):\r\n supply.get = True\r\n supply.get_sound.play()\r\n\r\n def switch_bullet(self):\r\n #self.bullet2_endure = 0\r\n self.bullet = bullet.bullet2\r\n","sub_path":"MyPlane.py","file_name":"MyPlane.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"198557084","text":"from numpy import shape, zeros\r\n\r\n\r\ndef divdif(x, y):\r\n size = shape(y)[0]\r\n table = zeros([size, size])\r\n table[:, 0] = y # первая колонна таблицы = y\r\n\r\n for j in range(1, size):\r\n for i in range(size - j):\r\n dif_y = table[i + 1][j - 1] - table[i][j - 1]\r\n dif_x = x[i + j] - x[i]\r\n table[i][j] = dif_y / dif_x\r\n\r\n return table[0] # возвращаем первую строку\r\n\r\n\r\ndef poly_from(b, xi):\r\n polynom = \"\"\r\n for i in range(len(b)):\r\n polynom += \"+ b[\" + str(i) + \"] \"\r\n for j in range(i):\r\n polynom += \" * (x - xi[\" + str(j) + \"]) \"\r\n\r\n def compute(x):\r\n b\r\n xi\r\n return eval(polynom)\r\n\r\n return compute\r\n\r\n","sub_path":"newton.py","file_name":"newton.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"62144358","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom PyQt5.QtWidgets import QDialog, QCheckBox, QVBoxLayout, QLineEdit, QComboBox,QPushButton, QMessageBox, QToolButton, QTextEdit\nfrom PyQt5.QtGui import QIcon\n# from qtconsole.qt import QtCore\n\nimport libpg\nimport model_tags\nimport qlib as qc\nimport data_sets\nimport liblogos\nimport parameters as gl\nimport brands\n\n\nclass ModelEdit(QDialog):\n def __init__(self, md_id, brand_id, brand_name, parent=None):\n super(ModelEdit, self).__init__(parent)\n self.resize(600, 400)\n # vars\n self.model_id = md_id\n self.brand_id = brand_id\n \n self.setWindowTitle('Ficha do Modelo')\n # self.tabuladorTabWidget = QTabWidget()\n masterLayout = QVBoxLayout(self)\n self.md_id = QLineEdit()\n self.md_id.setMaximumWidth(80)\n # self.md_id.setReadOnly(True)\n # self.md_id.setStyleSheet(\"background-color: #dbf3ff;\")\n # masterLayout.addLayout(qc.addHLayout(['ID', self.md_id, True], lw=150))\n self.md_name = QLineEdit()\n self.md_name.setMaxLength(40)\n masterLayout.addLayout(qc.addHLayout(['Modelo:', self.md_name], lw=150))\n self.md_brand_id = QLineEdit()\n self.md_brand_id.setReadOnly(True)\n self.md_brand_id.setMinimumWidth(200)\n self.md_brand_id.setMaximumWidth(200)\n self.md_brand_id.setReadOnly(True)\n self.md_brand_id.setStyleSheet(\"background-color: #dbf3ff;\")\n brandChangeTBtn = QToolButton()\n brandChangeTBtn.setText('☢️')\n \n brandChangeTBtn.clicked.connect(self.change_brand)\n \n masterLayout.addLayout(qc.addHLayout(['Marca:', self.md_brand_id, True, brandChangeTBtn], lw=150))\n self.hw_type_name = QComboBox()\n self.hw_type_name.setMaximumWidth(250)\n self.hw_type_name.addItems(gl.hardware_types)\n masterLayout.addLayout(qc.addHLayout(['Tipo:', self.hw_type_name, True], lw=150))\n self.md_ref_oem = QLineEdit()\n self.md_ref_oem.setMinimumWidth(150)\n self.md_ref_oem.setMaxLength(30)\n masterLayout.addLayout(qc.addHLayout(['Ref. Fabricante:', self.md_ref_oem], lw=150))\n self.md_ref_gc = QLineEdit()\n self.md_ref_gc.setMaxLength(30)\n masterLayout.addLayout(qc.addHLayout(['Ref. Gestao Comercial:', self.md_ref_gc], lw=150))\n self.lifeSpanCbox = QComboBox()\n self.lifeSpanCbox.addItems(['6 meses', '12 meses', '24 meses', '36 meses', '48 meses', '60 meses'])\n self.disabelChk = QCheckBox('Descontinuado')\n masterLayout.addLayout(qc.addHLayout(['Ciclo de Vida:', self.lifeSpanCbox, True, self.disabelChk], lw=150))\n self.add_tag = QToolButton()\n self.add_tag.setIcon(QIcon('.//img//add2.png'))\n self.add_tag.clicked.connect(self.add_tags_click)\n \n masterLayout.addLayout(qc.addHLayout([self.add_tag, True]))\n self.md_tags_txt = QTextEdit()\n masterLayout.addLayout(qc.addVLayout([self.md_tags_txt]))\n masterLayout.addStretch()\n save_Btn = QPushButton('Grava')\n save_Btn.clicked.connect(self.update_model)\n \n masterLayout.addWidget(save_Btn)\n if self.model_id > 0:\n self.model_dict = data_sets.model_to_dict(self.model_id)\n self.tags_stack = self.get_tags()\n # self.lifeSpanCbox.setEnabled(False)\n self.refresh_form()\n else:\n self.md_brand_id.setText(brand_name)\n \n def refresh_form(self):\n self.md_id.setText(str(self.model_dict['md_id']))\n self.md_ref_oem.setText(self.model_dict['md_ref_oem'])\n self.md_name.setText(self.model_dict['md_name'])\n self.md_ref_gc.setText(self.model_dict['md_ref_gc'])\n self.md_brand_id.setText(self.model_dict['br_name'])\n self.md_tags_txt.setText(self.model_dict['md_tags'])\n self.hw_type_name.setCurrentText(self.model_dict['hw_type_name'])\n self.lifeSpanCbox.setCurrentIndex(self.model_dict['md_lifespan'])\n\n def insert_model(self):\n sql = '''INSERT INTO models( md_name, md_ref_gc, md_brand_id, md_ref_oem, md_tags, md_hardware_type, md_lifespan)\n VALUES (%s,%s,%s,%s,%s, (SELECT hw_type_id from hardware_types where lower(hw_type_name)=%s), %s)'''\n data = (self.md_name.text().upper(),self.md_ref_gc.text().upper(), self.brand_id,self.md_ref_oem.text().upper(),\n self.md_tags_txt.toPlainText(), self.hw_type_name.currentText().lower(),self.lifeSpanCbox.currentIndex())\n libpg.execute_query(sql, data)\n a = libpg.sql_query('''Select max(md_id)+1 as t from models''')[0][0]\n if not self.md_tags_txt.toPlainText():\n pass\n else:\n liblogos.update_model_tags(a, self.md_tags_txt.toPlainText().lower())\n self.close()\n\n def update_model(self):\n if self.model_id > 0:\n sql = '''UPDATE models\n SET md_name=%s, md_ref_gc=%s, md_ref_oem=%s, md_tags=%s, md_lifespan= %s,\n md_hardware_type = (select hw_type_id from hardware_types where lower(hw_type_name) = %s)\n WHERE md_id= %s'''\n data = (self.md_name.text(), self.md_ref_gc.text().upper(), self.md_ref_oem.text(), self.md_tags_txt.toPlainText().lower(),self.lifeSpanCbox.currentIndex(),\n self.hw_type_name.currentText().lower(), self.model_dict['md_id'])\n libpg.execute_query(sql, data)\n if self.tags_stack != self.md_tags_txt.toPlainText():\n liblogos.update_model_tags(self.model_dict['md_id'], self.md_tags_txt.toPlainText())\n else:\n self.insert_model()\n self.close()\n \n def get_tags(self):\n sql = '''SELECT tags_model.tam_name FROM tag_md_refs, tags_model WHERE\n tag_md_refs.tam_tag_id = tags_model.tam_id and tam_model_id=%s;'''\n a = libpg.query_many(sql, (self.model_dict['md_id'],))\n tags = ''\n for n in a:\n tags += n[0].lower() + ','\n return tags.rstrip(',')\n \n def add_tags_click(self):\n self.save_warning = True\n form = model_tags.ModelTagsBrowser()\n form.exec_()\n if form.tag_list[0] == 0:\n self.md_tags_txt.setPlainText(form.tag_list[1])\n elif form.tag_list[0] == 1:\n self.md_tags_txt.setPlainText(self.md_tags_txt.toPlainText() + ',' + form.tag_list[1])\n\n def change_brand(self):\n form = brands.BrandsBrowser()\n form.exec_()\n if form.ret['brand_id'] > 0:\n sql = 'update models set md_brand_id = %s where md_id = %s'\n libpg.execute_query(sql, (form.ret['brand_id'], self.model_dict['md_id']))\n self.model_dict = data_sets.model_to_dict(self.model_id)\n self.tags_stack = self.get_tags()\n self.refresh_form()\n\n\nclass AddBrand(QDialog):\n def __init__(self, parent=None):\n super(AddBrand, self).__init__(parent)\n self.setWindowTitle('Adiciona Marca')\n masterLayout = QVBoxLayout(self)\n self.text_1 = QLineEdit()\n masterLayout.addLayout(qc.addHLayout(['Marca', self.text_1, True], lw=100))\n ok_Btn = QPushButton('Adiciona')\n ok_Btn.clicked.connect(self.save_click)\n cancela_Btn = QPushButton('Cancela')\n cancela_Btn.clicked.connect(self.cancel_btn_click)\n masterLayout.addLayout(qc.addHLayout([ok_Btn, cancela_Btn]))\n \n def cancel_btn_click(self):\n self.close()\n \n def save_click(self):\n a = libpg.query_one('select br_name from brands where lower(br_name)= %s', (self.text_1.text().lower(),))\n if a is None:\n libpg.execute_query('insert into brands (br_name) VALUES (%s)', (str(self.text_1.text()),))\n self.close()\n else:\n QMessageBox.warning(None, \"Marca Duplicada\", \"Esta marca já existe!\")\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"models_lib.py","file_name":"models_lib.py","file_ext":"py","file_size_in_byte":7915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"90331692","text":"#!/usr/bin/env python\n\nimport smtplib\n\nSERVER = \"mail.ldmailmasters.com\"\nFROM = \"ldsupport@ldmailmasters.com\"\nTO = [\"spencerrathbun@gmail.com\"]\n\nSUBJECT = \"Test of smtp\"\nTEXT = \"This is a test of the smtp protocol to an outside email.\"\n\n# Prepare actual message\nmessage = \"\"\"From: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\n\\\n\n%s\n\"\"\" % (FROM, \", \".join(TO), SUBJECT, TEXT)\n\n# Send the mail\nserver = smtplib.SMTP(SERVER)\nserver.sendmail(FROM, TO, message)\nserver.quit()\n","sub_path":"mailTest.py","file_name":"mailTest.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"223065164","text":"# -*- coding: utf-8 -*-\r\n#根据单张图片的xml文件,在图片上标注出目标\r\nimport xml.etree.ElementTree as ET\r\nimport cv2\r\n\r\n\r\n\r\n\r\nxml_file = 'voc_data/1.xml'\r\ntree = ET.parse(xml_file)\r\nroot = tree.getroot()\r\nimgfile = 'images/1.jpg'\r\nim = cv2.imread(imgfile)\r\nfor object in root.findall('object'):\r\n object_name = object.find('name').text\r\n print(object_name)\r\n Xmin = int(object.find('bndbox').find('xmin').text)\r\n Ymin = int(object.find('bndbox').find('ymin').text)\r\n Xmax = int(object.find('bndbox').find('xmax').text)\r\n Ymax = int(object.find('bndbox').find('ymax').text)\r\n color = (4, 250, 7)\r\n cv2.rectangle(im, (Xmin, Ymin), (Xmax, Ymax), color, 2)\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n cv2.putText(im, object_name, (Xmin, Ymin - 7), font, 0.5, (6, 230, 230), 2)\r\n # cv2.imshow('1', im)\r\n # cv2.waitKey(100)\r\ncv2.imwrite('1.jpg', im)\r\n","sub_path":"工作内容/作品著作权/test/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"557741397","text":"import numpy as np\nimport operator\n\nfrom galry import *\n\n\n__all__ = ['SpikeDataOrganizer', 'HighlightManager']\n\n\nclass SpikeDataOrganizer(object):\n def __init__(self, *args, **kwargs):\n # set data\n self.set_data(*args, **kwargs)\n # reorder data\n self.reorder()\n \n def set_data(self, data, clusters=None, cluster_colors=None, masks=None,\n nchannels=None, spike_ids=None):\n \"\"\"\n Arguments:\n * data: a Nspikes x ?? (x ??) array\n * clusters: a Nspikes array, dtype=int, absolute indices\n * cluster_colors: as a function of the RELATIVE index\n \"\"\"\n # get the number of spikes from the first dimension of data\n self.nspikes = data.shape[0]\n self.ndim = data.ndim\n \n # check arguments\n if nchannels is None:\n raise TypeError(\"The number of channels should be specified.\")\n \n # default arguments\n if clusters is None:\n clusters = np.zeros(self.nspikes, dtype=np.int)\n if masks is None:\n masks = np.ones((self.nspikes, self.nchannels))\n if spike_ids is None:\n spike_ids = np.arange(self.nspikes)\n \n self.data = enforce_dtype(data, np.float32)\n self.clusters = enforce_dtype(clusters, np.int32)\n self.masks = enforce_dtype(masks, np.float32)\n self.cluster_colors = enforce_dtype(cluster_colors, np.float32)\n \n # unique clusters\n self.clusters_unique = np.unique(clusters)\n self.clusters_unique.sort()\n self.nclusters = len(self.clusters_unique)\n \n if cluster_colors is None:\n cluster_colors = np.ones((self.nclusters, 3))\n \n # same as clusters, but with relative indexing instead of absolute\n clusters_rel = np.arange(self.clusters_unique.max() + 1)\n clusters_rel[self.clusters_unique] = np.arange(self.nclusters)\n self.clusters_rel = clusters_rel[self.clusters]\n \n def get_reordering(self):\n # regroup spikes from the same clusters, so that all data from\n # one cluster are contiguous in memory (better for OpenGL rendering)\n # permutation contains the spike indices in successive clusters\n self.permutation = []\n self.cluster_sizes_dict = {}\n self.cluster_sizes_cum = {}\n counter = 0\n for cluster in self.clusters_unique:\n # spike indices in the current cluster\n ids = np.nonzero(self.clusters == cluster)[0]\n # size of the current cluster\n size = len(ids)\n # record the size\n self.cluster_sizes_dict[cluster] = size\n # record the total number of spikes before the first spike in the\n # current cluster\n self.cluster_sizes_cum[cluster] = counter\n # create the spike permutation to regroup those in the same clusters\n self.permutation.append(ids)\n counter += size\n self.permutation = np.hstack(self.permutation)\n return self.permutation\n \n def reorder(self, permutation=None):\n if permutation is None:\n permutation = self.get_reordering()\n # reorder data\n if self.ndim == 1:\n self.data_reordered = self.data[permutation]\n elif self.ndim == 2:\n self.data_reordered = self.data[permutation,:]\n elif self.ndim == 3:\n self.data_reordered = self.data[permutation,:,:]\n \n # reorder masks\n self.masks = self.masks[permutation,:]\n self.clusters = self.clusters[permutation,:]\n self.clusters_rel = self.clusters_rel[permutation,:]\n \n # array of cluster sizes as a function of the relative index\n self.cluster_sizes = np.array(map(operator.itemgetter(1),\n sorted(self.cluster_sizes_dict.iteritems(),\n key=operator.itemgetter(0))))\n \n return self.data_reordered\n\n\nclass HighlightManager(object):\n \n highlight_rectangle_color = (0.75, 0.75, 1., .25)\n \n def initialize(self):\n self.highlight_box = None\n self.paint_manager.ds_highlight_rectangle = \\\n self.paint_manager.create_dataset(RectanglesTemplate,\n coordinates=(0., 0., 0., 0.),\n color=self.highlight_rectangle_color,\n is_static=True,\n visible=False)\n \n def highlight(self, enclosing_box):\n # get the enclosing box in the window relative coordinates\n x0, y0, x1, y1 = enclosing_box\n \n # set the highlight box, in window relative coordinates, used\n # for displaying the selection rectangle on the screen\n self.highlight_box = (x0, y0, x1, y1)\n \n # paint highlight box\n self.paint_manager.set_data(visible=True,\n coordinates=self.highlight_box,\n dataset=self.paint_manager.ds_highlight_rectangle)\n \n # convert the box coordinates in the data coordinate system\n x0, y0 = self.interaction_manager.get_data_coordinates(x0, y0)\n x1, y1 = self.interaction_manager.get_data_coordinates(x1, y1)\n \n self.highlighted((x0, y0, x1, y1))\n \n def highlighted(self, box):\n pass\n\n def cancel_highlight(self):\n # self.set_highlighted_spikes([])\n if self.highlight_box is not None:\n self.paint_manager.set_data(visible=False,\n dataset=self.paint_manager.ds_highlight_rectangle)\n self.highlight_box = None\n \n\n","sub_path":"spiky/views/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"218695167","text":"import json\n\nfrom rest_framework import status\n\nfrom .base_test import BaseEndPointTest\n\n\nclass EmployeeEndPointTest(BaseEndPointTest):\n\n def test_get_list_employee(self):\n \"\"\"GET /employee/ must return status code 200\"\"\"\n response = self.client.get(self.url('employee-list'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n def test_get_number_of_registers(self):\n \"\"\"GET /employee/ must return 3 itens\"\"\"\n response = self.client.get(self.url('employee-list'))\n itens = self.create_json(response)\n self.assertEqual(3, itens['count'])\n\n def test_get_detail_employee(self):\n \"\"\"GET /employee/1/ must return Arnaldo Pereira employee\"\"\"\n response = self.client.get(self.url('employee-detail', 1))\n employee = self.create_json(response)\n self.assertEqual(employee['name'], 'Arnaldo Pereira')\n\n def test_put_employee(self):\n \"\"\"PUT /employee/1/ must return status code 200\"\"\"\n response = self.client.put(\n self.url('employee-detail', 1),\n data=json.dumps(\n {\n 'name': 'Shimira Cheng',\n 'email': 'shimira@cheng.com', \n 'department': 'Mobile',\n }\n ),\n content_type='application/json'\n )\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n employee = self.create_json(response)\n self.assertEqual(employee['name'], 'Shimira Cheng')\n self.assertEqual(employee['email'], 'shimira@cheng.com')\n self.assertEqual(employee['department'], 'Mobile')\n\n def test_patch_employee(self):\n \"\"\"PATCH /employee/1/ must return status code 200\"\"\"\n response = self.client.patch(\n self.url('employee-detail', 2),\n data=json.dumps(\n {'name': 'Ryoga Kamishiro', 'email': 'ryoga@kamishiro.com'}\n ),\n content_type='application/json'\n )\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n employee = self.create_json(response)\n self.assertEqual(employee['name'], 'Ryoga Kamishiro')\n self.assertEqual(employee['email'], 'ryoga@kamishiro.com')\n\n def test_delete_employee(self):\n \"\"\"DELETE /employee/1/ must return status code 204\"\"\"\n response = self.client.delete(self.url('employee-detail', 3))\n self.assertEqual(status.HTTP_204_NO_CONTENT, response.status_code)\n","sub_path":"apps/employee/test/test_employee_endpoint.py","file_name":"test_employee_endpoint.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"68814371","text":"from neuroevolution.controllers import ProblemController\nfrom xor_example.xor_thread import Xor\nimport numpy as np\n\n\nclass XorController(ProblemController):\n def __init__(self, network_controller, demo):\n super(XorController, self).__init__(network_controller, demo)\n self.problem = Xor(demo)\n\n def prepare(self):\n pass\n\n def run_test(self):\n self.problem.start()\n while True:\n problem = self.problem.out_queue.get(block=True)\n if problem is None:\n break\n network_output = self.network_controller.process_input(np.array(problem))\n for output_index in range(len(network_output)):\n if network_output[output_index] is None:\n network_output[output_index] = 0\n answer = (problem, [network_output[0]])\n self.problem.in_queue.put(answer)\n self.problem.join()\n self.score = self.problem.score\n","sub_path":"xor_example/xor_controller.py","file_name":"xor_controller.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"271157665","text":"from itertools import combinations\nfrom math import sqrt\n\nclass fourpointpoly():\n def __init__(self, p1='q', x1y1 =(0,0), p2='u', x2y2=(1,1), p3='a', x3y3=(2,2), p4='d', x4y4=(3,3)):\n self.__p1 = {p1:x1y1}\n self.__p2 = {p2:x2y2}\n self.__p3 = {p3:x3y3}\n self.__p4 = {p4:x4y4}\n self.__pnt_nm = [p1, p2, p3, p4]\n self.__pnt_val = [x1y1, x2y2, x3y3, x4y4]\n self.__segments = {}\n nm = list(combinations(self.__pnt_nm,2))\n val = list(combinations(self.__pnt_val,2))\n for i in range(len(nm)):\n seg = nm[i][0]+nm[i][1]\n self.__segments[seg] = val[i]\n\n def __calc_pnt_dist(self):\n self.__euc_dist = {}\n for k,v in self.__segments.items():\n a, b = self.__segments.get(k)\n xa, ya = a\n xb, yb = b\n dis = sqrt (\n (xa - xb)**2 +\n (ya - yb)**2\n )\n self.__euc_dist[k] = dis\n return self.__euc_dist\n\n def __calc_slope(self):\n self.__slope = {}\n for k,v in self.__segments.items():\n a, b = self.__segments.get(k)\n xa, ya = a\n xb, yb = b\n slope = (\n (yb - ya) / (xb-xa) \n )\n self.__slope[k] = slope\n return self.__slope\n\n def __calc_adj_mid_pnt(self):\n __p1 = [k for k in self.__p1.keys()][0]\n __p2 = [k for k in self.__p2.keys()][0]\n __p3 = [k for k in self.__p3.keys()][0]\n __p4 = [k for k in self.__p4.keys()][0]\n x1, y1 = [v for v in self.__p1.values()][0] \n x3, y3 = [v for v in self.__p3.values()][0]\n x2, y2 = [v for v in self.__p2.values()][0]\n x4, y4 = [v for v in self.__p4.values()][0]\n \n adj_mid1 = (\n ((x1 + x3) / 2) , ((y1 + y3) / 2)\n )\n\n adj_mid2 = (\n ((x2 + x4) / 2) , ((y2 + y4) / 2)\n )\n self.__adj_mid_pid = {\n (__p1 + __p3) : {\n ((x1, y1), (x3, y3)): adj_mid1\n }, \n (__p2 + __p4) : {\n ((x2,y2), (x4,y4)): adj_mid2\n }\n }\n return self.__adj_mid_pid\n\n def print_dist(self):\n for k,v in self.__calc_pnt_dist().items():\n print(f'The distance between segment {k} (points {self.__segments.get(k)[0]} and {self.__segments.get(k)[1]}) is {v:.3g}')\n\n def print_slope(self):\n for k,v in self.__calc_slope().items():\n print(f'The slope of segment {k} (points {self.__segments.get(k)[0]} and {self.__segments.get(k)[1]}) is {v:.3g}')\n\n def print_mid_pnt(self):\n for k1,v1 in self.__calc_adj_mid_pnt().items():\n for k2,v2 in v1.items():\n print(f'The mid point of segment {k1} (points {k2[0]}, {k2[1]}) is {v2} ')\n\n\np1='Q'\np2='U'\np3='A'\np4='D'\nx1y1 = (0,-5)\nx2y2 = (2,-3)\nx3y3 = (1, 3)\nx4y4 = (-1, 1)\nx = fourpointpoly(p1, x1y1, p2, x2y2, p3, x3y3, p4, x4y4)\nx.print_dist()\nx.print_slope()\nx.print_mid_pnt()","sub_path":"annemarie2.py","file_name":"annemarie2.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"11697132","text":"# -*- python -*-\n\n# This software was produced by NIST, an agency of the U.S. government,\n# and by statute is not subject to copyright in the United States.\n# Recipients of this software assume all responsibilities associated\n# with its operation, modification and maintenance. However, to\n# facilitate maintenance we ask that before distributing modified\n# versions of this software, you first contact the authors at\n# oof_manager@nist.gov. \n\n# Menu commands for manipulating PixelGroups\n\nfrom ooflib.SWIG.common import burn\nfrom ooflib.SWIG.common import config\nfrom ooflib.SWIG.common import ooferror\nfrom ooflib.SWIG.common import pixelgroup\nfrom ooflib.SWIG.common import progress\nfrom ooflib.SWIG.common import statgroups\nfrom ooflib.SWIG.common import switchboard\nfrom ooflib.common import debug\nfrom ooflib.common import enum\nfrom ooflib.common import parallel_enable\nfrom ooflib.common import primitives\nfrom ooflib.common import runtimeflags\nfrom ooflib.common import utils\nfrom ooflib.common.IO import automatic\nfrom ooflib.common.IO import microstructureIO\nfrom ooflib.common.IO import parameter\nfrom ooflib.common.IO import reporter\nfrom ooflib.common.IO import whoville\nfrom ooflib.common.IO import xmlmenudump\nfrom ooflib.common.IO.mainmenu import OOF\nfrom ooflib.common.IO.oofmenu import OOFMenuItem\nfrom ooflib.common.IO.pixelgroupparam import PixelGroupParameter\nimport ooflib.common.microstructure # a local variable is named 'microstructure'\n\nif parallel_enable.enabled():\n from ooflib.common.IO import pixelgroupIPC\n\nBooleanParameter = parameter.BooleanParameter\nAutomaticNameParameter = parameter.AutomaticNameParameter\nStringParameter = parameter.StringParameter\n\npixgrpmenu = OOF.addItem(OOFMenuItem(\n 'PixelGroup', cli_only=1,\n help='Create and manipulate pixel groups.',\n discussion=xmlmenudump.loadFile('DISCUSSIONS/common/menu/pixelgroup.xml')\n ))\n\n##########################\n\n\n# PixelGroup menu items are responsible for issuing the appropriate\n# switchboard notifications when pixel group memberships change, so\n# that the skeletons etc. can recompute their homogeneity. In\n# particular, the \"changed pixel group\" signal is emitted in these\n# menu items, outside of the microstructure lock. This is so that\n# switchboard callbacks don't have to worry about locking issues.\n\ndef newPixelGroup(menuitem, name, microstructure):\n if parallel_enable.enabled():\n pixelgroupIPC.ipcpixgrpmenu.New(name=name,\n microstructure=microstructure)\n return\n if name and microstructure:\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n mscontext.begin_writing()\n try:\n if ms:\n (grp, newness) = ms.getGroup(name) \n finally:\n mscontext.end_writing()\n\n if newness:\n switchboard.notify(\"new pixel group\", grp)\n return grp\n \n reporter.report(\"Failed to create group\", name, \"in microstructure\",\n microstructure)\n\ndef pixelGroupNameResolver(param, startname):\n if param.automatic():\n basename = 'pixelgroup'\n else:\n basename = startname\n msname = param.group['microstructure'].value\n ms = ooflib.common.microstructure.getMicrostructure(msname)\n return ms.uniqueGroupName(basename)\n\npixgrpmenu.addItem(OOFMenuItem(\n 'New',\n callback=newPixelGroup,\n params=parameter.ParameterGroup(\n AutomaticNameParameter('name', value=automatic.automatic,\n resolver=pixelGroupNameResolver,\n tip=\"Group name.\"),\n whoville.WhoParameter('microstructure', whoville.getClass('Microstructure'),\n tip=\n \"Microstructure in which to create this PixelGroup.\")\n ),\n help='Create a new PixelGroup in the given Microstructure.',\n discussion=\"\"\"\n\n Create a new &pixelgroup;. The name of the\n group must be unique within the µ. If it is not unique, a\n suffix of the form <x> will be\n appended, for some integer x.\n\n \"\"\"))\n\n#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#\n\n# AutoGroup uses a statistical method to create groups. Each group\n# is assumed to contain a distribution of pixel values. A pixel value\n# is compared to the mean and deviation of each existing group, and\n# the pixel added to the group to which it's the fewest deviations\n# from the mean. If it's not close enough to any group, a new group\n# is created. Adding a pixel to a group changes the group's mean and\n# deviation. If two groups get close to one another, they are merged.\n\n# After all pixels have been added to groups, each group is split into\n# disconnected regions. Regions containing fewer than minsize pixels\n# are merged into adjacent groups, pixel by pixel. If a pixel is\n# adjacent to more than one group, its put into the group with more\n# neighbors. If the pixel is adjacent to the same number of neighbors\n# in more than one group, it's put into the group with the closest\n# mean.\n\n# The standard deviation, sigma0, to be used for a group containing a\n# single pixel is set in the PixelGrouperParameter, not in AutoGroup,\n# because its default value depends on which PixelGrouper is selected.\n\ndef autoPixelGroup(menuitem, grouper, delta, gamma, minsize, contiguous,\n name_template, clear):\n ms = grouper.mscontext.getObject()\n if \"%n\" not in name_template:\n name_template = name_template + \"%n\"\n prog = progress.getProgress('AutoGroup', progress.DEFINITE)\n prog.setMessage('Grouping pixels...')\n grouper.mscontext.begin_writing()\n newgrpname = None\n try:\n newgrpname = statgroups.statgroups(ms, grouper.cobj, delta, gamma,\n minsize,\n contiguous,\n name_template, clear);\n finally:\n prog.finish()\n grouper.mscontext.end_writing()\n if newgrpname:\n switchboard.notify(\"new pixel group\", ms.findGroup(newgrpname))\n switchboard.notify(\"changed pixel groups\", ms.name())\n switchboard.notify(\"redraw\")\n\npixgrpmenu.addItem(OOFMenuItem(\n \"AutoGroup\",\n callback=autoPixelGroup,\n params=[\n statgroups.PixelGrouperParameter(\n 'grouper',\n tip=\"Which pixel values to use, and how to compute\"\n \" the difference between them.\"),\n parameter.FloatParameter(\n 'delta', value=2.0,\n tip=\"Pixels within this many standard deviations of a group's mean\"\n \" will be added to the group.\"),\n parameter.FloatParameter(\n 'gamma',\n value=2.0,\n tip=\"Groups within this many standard deviations of each other's\"\n \" means will be merged.\"),\n parameter.IntParameter(\n 'minsize', value=0,\n tip=\"Don't create groups or isolated parts of groups with fewer\"\n \" than this many pixels. Instead, assign pixels to the nearest\"\n \" large group. Set minsize=0 to skip this step.\"),\n parameter.BooleanParameter(\n 'contiguous', value=True,\n tip=\"Create only contiguous groups. Similar pixels that aren't\"\n \" connected to one another will be put into separate groups.\"),\n parameter.StringParameter(\n \"name_template\",\n value=\"group_%n\",\n tip=\"Name for the new pixel groups.\"\n \" '%n' will be replaced by an integer.\"),\n parameter.BooleanParameter(\n \"clear\", value=True,\n tip=\"Clear pre-existing groups before adding pixels to them.\"\n \" This will NOT clear groups to which no pixels are being added.\")\n ],\n help=\"Put all pixels into pixel groups, sorted by color or orientation.\",\n discussion=xmlmenudump.loadFile('DISCUSSIONS/common/menu/autogroup.xml')\n))\n\n \n \n \n\n##########################\n\ndef renamePixelGroup(menuitem, microstructure, group, new_name):\n if parallel_enable.enabled():\n pixelgroupIPC.ipcpixgrpmenu.Rename(microstructure=microstructure,\n group=group,\n new_name=new_name)\n return\n\n # \"group\" arg is the old group name.\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n mscontext.begin_writing()\n renamed = False\n try:\n grp = ms.findGroup(group)\n # Don't just say \"if grp\" here. PixelGroup has a __len__\n # function, so empty groups evaluate to \"false\".\n if grp is not None:\n ms.renameGroup(group, new_name)\n renamed = True\n if config.dimension() == 2 and runtimeflags.surface_mode:\n interfacemsplugin=ms.getPlugIn(\"Interfaces\")\n interfacemsplugin.renameGroup(group, new_name)\n else:\n raise ooferror.PyErrUserError(\"There is no pixel group named %s!\"\n % group)\n finally:\n mscontext.end_writing()\n\n if renamed:\n switchboard.notify('renamed pixel group', grp, group, new_name)\n\npixgrpmenu.addItem(OOFMenuItem(\n 'Rename', callback=renamePixelGroup,\n params=[\n whoville.WhoParameter('microstructure',\n ooflib.common.microstructure.microStructures,\n tip=parameter.emptyTipString),\n PixelGroupParameter('group', tip='PixelGroup to be renamed.'),\n StringParameter('new_name', \n tip='New name for the group, in quotation marks.')\n ],\n help='Rename an existing PixelGroup in the given Microstructure.',\n discussion=\"\"\"\n\n Assign a new name to a &pixelgroup;. The\n new_name must be unique, just as it must be for\n a new group.\n\n \"\"\"))\n\n##########################\n\ndef copyPixelGroup(menuitem, microstructure, group, name):\n if parallel_enable.enabled():\n pixelgroupIPC.ipcpixgrpmenu.Copy(microstructure=microstructure,\n group=group,\n name=name)\n return\n if group != name:\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n mscontext.begin_writing()\n newness = False\n try:\n oldgroup = ms.findGroup(group)\n if oldgroup is not None:\n (newgroup, newness) = ms.getGroup(name)\n newgroup.addWithoutCheck(oldgroup.members())\n else:\n raise ooferror.PyErrUserError(\"There is no pixel group named %s!\"\n % group)\n finally:\n mscontext.end_writing()\n \n if newness:\n switchboard.notify(\"new pixel group\", newgroup)\n switchboard.notify(\"changed pixel group\", newgroup, microstructure)\n \n \npixgrpmenu.addItem(OOFMenuItem(\n 'Copy', callback=copyPixelGroup,\n params=parameter.ParameterGroup(\n whoville.WhoParameter('microstructure',\n ooflib.common.microstructure.microStructures,\n tip=parameter.emptyTipString),\n PixelGroupParameter('group', tip='PixelGroup to be copied.'),\n AutomaticNameParameter('name', value=automatic.automatic,\n resolver=pixelGroupNameResolver,\n tip=\"Group name.\")\n ),\n help='Make a copy of an existing pixel group',\n discussion=\"\"\"\n\n Copy an exisiting &pixelgroup;. The name must\n be unique, just as it must be for a new group.\n\n \"\"\"))\n\n##########################\n\ndef destroyPixelGroup(menuitem, microstructure, group):\n if parallel_enable.enabled():\n pixelgroupIPC.ipcpixgrpmenu.Delete(microstructure=microstructure,\n group=group)\n return\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n mscontext.begin_writing()\n try:\n # Need the group object for the switchboard signal.\n grp = ms.findGroup(group)\n ms.removeGroup(group) \n finally:\n mscontext.end_writing()\n\n if grp is not None:\n switchboard.notify(\"destroy pixel group\", grp, microstructure)\n switchboard.notify('redraw')\n\n\n\npixgrpmenu.addItem(OOFMenuItem(\n 'Delete',\n callback=destroyPixelGroup,\n params=[\n whoville.WhoParameter('microstructure',\n ooflib.common.microstructure.microStructures,\n tip=parameter.emptyTipString),\n PixelGroupParameter('group', tip='PixelGroup to be destroyed.')\n ],\n help='Delete the selected Pixel Group.',\n discussion=\"Remove a &pixelgroup; completely from a µ.\"\n ))\n\ndef destroyAllPixelGroups(menuitem, microstructure):\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n mscontext.begin_writing()\n try:\n ms.removeAllGroups()\n finally:\n mscontext.end_writing()\n switchboard.notify(\"destroy pixel group\", None, microstructure)\n\npixgrpmenu.addItem(OOFMenuItem(\n 'DeleteAll',\n callback=destroyAllPixelGroups,\n params=[\n whoville.WhoParameter('microstructure',\n ooflib.common.microstructure.microStructures,\n tip=parameter.emptyTipString),\n ],\n help='Delete all Pixel Groups.',\n discussion=\"Remove all &pixelgroups; from a µ.\"\n ))\n \n##########################\n\ndef meshablePixelGroup(menuitem, microstructure, group, meshable):\n if parallel_enable.enabled():\n pixelgroupIPC.ipcpixgrpmenu.Meshable(microstructure=microstructure,\n group=group,\n meshable=meshable)\n return\n\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n mscontext.begin_writing()\n try:\n grp = ms.findGroup(group)\n if grp is not None:\n grp.set_meshable(meshable)\n ms.recategorize()\n else:\n raise ooferror.PyErrUserError(\"There is no pixel group named %s!\"\n % group)\n finally:\n mscontext.end_writing()\n\n switchboard.notify('redraw')\n if grp is not None:\n switchboard.notify(\"changed pixel group\", grp, microstructure)\n \npixgrpmenu.addItem(OOFMenuItem(\n 'Meshable',\n callback=meshablePixelGroup,\n params=[\n whoville.WhoParameter('microstructure',\n ooflib.common.microstructure.microStructures,\n tip=parameter.emptyTipString),\n PixelGroupParameter('group', tip=\"Pixel group.\"),\n BooleanParameter('meshable', tip=\"1 (true) for meshable and 0 (false) for non-meshable.\")],\n help=\"Should adaptive Skeletons follow the boundaries of the given group?\",\n discussion=\"\"\"\n\n If a &pixelgroup; is meshable, then the\n boundaries of the group are respected by the &skel; modification\n (adaptive meshing) tools. That is, the tools attempt to create\n &skels; that resolve the meshable group\n boundaries as well as the &material; boundaries. By default,\n new\n &pixelgroups; are meshable.\n\n \"\"\"))\n \n\n##########################\n\ndef addSelection(menuitem, microstructure, group):\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n ms.pixelselection.begin_reading()\n try:\n sel = ms.pixelselection.getObject()\n pxls = sel.members()\n finally:\n ms.pixelselection.end_reading()\n mscontext.begin_writing()\n try:\n grp = ms.findGroup(group)\n grp.add(pxls)\n finally:\n mscontext.end_writing()\n\n if grp is not None:\n switchboard.notify(\"changed pixel group\", grp, microstructure)\n switchboard.notify('redraw')\n\n\npixgrpmenu.addItem(OOFMenuItem(\n 'AddSelection',\n callback=addSelection,\n params=[\n whoville.WhoParameter('microstructure',\n ooflib.common.microstructure.microStructures,\n tip=parameter.emptyTipString),\n PixelGroupParameter('group',\n tip=\"Group to which to add the selected pixels.\")\n ],\n help='Add the currently selected pixels to the given PixelGroup.',\n discussion=\"\"\"\n The pixels that are currently selected\n will be added to the given &pixelgroup;.\n \"\"\"))\n \n#########################\n\ndef removeSelection(menuitem, microstructure, group):\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n ms.pixelselection.begin_reading()\n try:\n sel = ms.pixelselection.getObject()\n pxls = sel.members()\n finally:\n ms.pixelselection.end_reading()\n \n mscontext.begin_writing()\n try:\n grp = ms.findGroup(group)\n grp.remove(pxls) # calls ms.recategorize(), which\n # increments the timestamp of ms AND\n # issues \"changed pixel group\" signal.\n finally:\n mscontext.end_writing()\n\n if grp is not None:\n switchboard.notify(\"changed pixel group\", grp, microstructure)\n switchboard.notify('redraw') \n \n\npixgrpmenu.addItem(OOFMenuItem(\n 'RemoveSelection',\n callback=removeSelection,\n params=[\n whoville.WhoParameter('microstructure',\n ooflib.common.microstructure.microStructures,\n tip=parameter.emptyTipString),\n PixelGroupParameter('group',\n tip=\"Group from which to remove the selected pixels.\")\n ],\n help='Remove the currently selected pixels from the given PixelGroup.',\n discussion=\"\"\"\n\n Any pixels that are currently selected\n and belong to the given &pixelgroup; will be removed from the\n group.\n\n \"\"\"))\n\n#########################\n\ndef clearGroup(menuitem, microstructure, group):\n if parallel_enable.enabled():\n pixelgroupIPC.ipcpixgrpmenu.Clear(microstructure=microstructure,\n group=group)\n return\n\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n mscontext.begin_writing()\n try:\n grp = ms.findGroup(group)\n grp.clear() \n finally:\n mscontext.end_writing()\n\n if grp is not None:\n switchboard.notify(\"changed pixel group\", grp, microstructure)\n switchboard.notify('redraw')\n \npixgrpmenu.addItem(OOFMenuItem(\n 'Clear',\n callback=clearGroup,\n params=[\n whoville.WhoParameter('microstructure',\n ooflib.common.microstructure.microStructures,\n tip=parameter.emptyTipString),\n PixelGroupParameter('group', tip='Group from which to remove all pixels.')\n ],\n help=\"Remove all pixels from the given PixelGroup.\",\n discussion=\"Empty the selected &pixelgroup;.\"))\n\ndef queryGroup(menuitem, microstructure, group):\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n mscontext.begin_reading()\n try:\n grp = ms.findGroup(group)\n nop = len(grp)\n areaOfGroup = nop*ms.areaOfPixels()\n finally:\n mscontext.end_reading()\n reporter.report(\">>> \", nop, \" pixels, \", \"area = \", areaOfGroup)\n\npixgrpmenu.addItem(OOFMenuItem(\n 'Query',\n callback=queryGroup,\n params=[\n whoville.WhoParameter('microstructure',\n ooflib.common.microstructure.microStructures,\n tip=parameter.emptyTipString),\n PixelGroupParameter('group', tip='Get information on this group.')\n ],\n help=\"Query the given PixelGroup.\",\n discussion=\"Print some information about the given &pixelgroup;.\"))\n \n \n############################\n\n# Functions for reading and writing pixelgroups in a data file. \n\ndef _writeData(self, dfile, microstructure, pixel):\n grpnames = pixelgroup.pixelGroupNames(microstructure, pixel)\n if grpnames:\n dfile.argument('groups', grpnames)\n return 1\n return 0\n\npixelgroup.PixelGroupAttributeRegistration.writeData = _writeData\n\ndef _readPixelGroups(menuitem, microstructure, category, groups):\n mscontext = ooflib.common.microstructure.microStructures[microstructure]\n ms = mscontext.getObject()\n mscontext.begin_writing()\n new_group_list = []\n all_group_dict = {}\n try:\n pixels = microstructureIO.getCategoryPixels(microstructure, category)\n for groupname in groups:\n (grp, newness) = ms.getGroup(groupname)\n grp.add(pixels)\n all_group_dict[grp]=1\n if newness:\n new_group_list.append(grp)\n finally:\n mscontext.end_writing()\n \n for g in all_group_dict:\n switchboard.notify(\"changed pixel group\", g, microstructure)\n for g in new_group_list:\n switchboard.notify(\"new pixel group\", g)\n\nmicrostructureIO.categorymenu.addItem(OOFMenuItem(\n pixelgroup.attributeReg.name(),\n callback=_readPixelGroups,\n params=[\n whoville.WhoParameter('microstructure',\n ooflib.common.microstructure.microStructures,\n tip=parameter.emptyTipString),\n parameter.IntParameter('category', tip=\"Pixel category.\"),\n parameter.ListOfStringsParameter('groups', tip=\"List of names of pixel groups.\")\n ],\n help=\"Assign pixel groups to pixel categories. Used internally in data files.\",\n discussion=xmlmenudump.loadFile('DISCUSSIONS/common/menu/pgroupcategory.xml')\n ))\n","sub_path":"SRC/common/IO/pixelgroupmenu.py","file_name":"pixelgroupmenu.py","file_ext":"py","file_size_in_byte":22599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"556817967","text":"#!/usr/bin/env python\nimport rospy as ros;\nimport tf_conversions as tfConversions;\nimport tf2_ros as tfRos;\nimport geometry_msgs.msg as geoMsg;\nfrom geometry_msgs.msg import Twist;\nfrom std_msgs.msg import Int16;\nimport math;\n\nx = 0;\ny = 0;\nth = 0.0;\n\ndef handleZumoHeading(headingMsg):\n\tglobal th;\n\tprint(headingMsg.data);\n\tth = headingMsg.data * math.pi / 180;\n\tprint(th);\n\ndef handleZumoPos(velMsg):\n\tglobal x;\n\tglobal y;\n\tglobal th;\n\tbr = tfRos.TransformBroadcaster()\n\tt = geoMsg.TransformStamped();\n\n\tprint(\"x: \", x);\n\tprint(\"y: \", y);\n\tprint(\"t: \", th);\n\n\tvx = 0;\n\n\tif (velMsg.linear.x == 1.0):\n\t\tvx = 0.1;\n\telif (velMsg.linear.x == 1.0):\n\t\tvx = -0.1;\n\n\tdeltaX = vx * math.cos(th);\n\tdeltaY = vx * math.sin(th);\n\tdeltaTh = 0;\n\n\tx += deltaX;\n\ty += deltaY;\n\tth += deltaTh;\n\n\tt.header.stamp = ros.Time.now();\n\tt.header.frame_id = \"world\";\n\tt.child_frame_id = \"zumo\";\n\n\tt.transform.translation.x = x;\n\tt.transform.translation.y = y;\n\tt.transform.translation.z = 0.0;\n\n\tq = tfConversions.transformations.quaternion_from_euler(0, 0, th);\n\tt.transform.rotation.x = q[0];\n\tt.transform.rotation.y = q[1];\n\tt.transform.rotation.z = q[2];\n\tt.transform.rotation.w = q[3];\n\n\tbr.sendTransform(t);\n\nif __name__ == '__main__':\n\tros.init_node('ZumoTFBroadcaster');\n\tros.Subscriber('/zumo/cmd_vel', Twist, handleZumoPos);\n\tros.Subscriber('/zumo/heading', Int16, handleZumoHeading);\n\tros.spin();\n\n","sub_path":"src/sit310_lab7/scripts/ZumoTFBroadcasterCompass.py","file_name":"ZumoTFBroadcasterCompass.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"27884187","text":"import sys\nsys.path.append('../..')\nfrom pathlib import Path\n\nfrom vimms.Chemicals import ChemicalCreator\nfrom vimms.MassSpec import IndependentMassSpectrometer\nfrom vimms.Controller import SimpleMs1Controller\nfrom vimms.Environment import Environment\nfrom vimms.Common import *\n\n\ndef simple_ms1_processor():\n print('#'*10, 'Load previously trained spectral feature database and the list of extracted metabolites, \\\n created in 01. Download Data')\n #-----------------\n mypath = 'documents/simple_ms1/example_data'\n #-----------------\n base_dir = os.path.abspath(mypath)\n ps = load_obj(Path(base_dir, 'peak_sampler_mz_rt_int_19_beers_fullscan.p'))\n hmdb = load_obj(Path(base_dir, 'hmdb_compounds.p'))\n\n # set_log_level_debug()\n out_dir = Path(base_dir, 'results', 'MS1_single')\n # the list of ROI sources created in the previous notebook '01. Download Data.ipynb'\n ROI_Sources = [str(Path(base_dir,'DsDA', 'DsDA_Beer', 'beer_t10_simulator_files'))]\n\n # minimum MS1 intensity of chemicals\n min_ms1_intensity = 1.75E5\n\n # m/z and RT range of chemicals\n rt_range = [(0, 1440)]\n mz_range = [(0, 1050)]\n\n # the number of chemicals in the sample\n n_chems = 6500\n\n # maximum MS level (we do not generate fragmentation peaks when this value is 1)\n ms_level = 1\n\n chems = ChemicalCreator(ps, ROI_Sources, hmdb)\n dataset = chems.sample(mz_range, rt_range, min_ms1_intensity, n_chems, ms_level)\n save_obj(dataset, Path(out_dir, 'dataset.p'))\n\n for chem in dataset[0:10]:\n print(chem)\n print('#'*10, 'Run MS1 controller on the samples and generate .mzML files')\n min_rt = rt_range[0][0]\n max_rt = rt_range[0][1]\n\n mass_spec = IndependentMassSpectrometer(POSITIVE, dataset, ps)\n controller = SimpleMs1Controller()\n\n # create an environment to run both the mass spec and controller\n env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True)\n\n # set the log level to WARNING so we don't see too many messages when environment is running\n set_log_level_warning()\n\n # run the simulation\n env.run()\n set_log_level_debug()\n mzml_filename = 'ms1_controller.mzML'\n env.write_mzML(out_dir, mzml_filename)\n return str(Path(mypath, 'results', 'MS1_single')) + '/' + mzml_filename","sub_path":"university_counselor/a5267vimms_django/vimms_django/vimms_app/processor_simple_ms1.py","file_name":"processor_simple_ms1.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"232845678","text":"def main(**keys):\n\n\n Lesson = keys['Lesson']\n Massenger = keys ['Massenger']\n Admin = keys['Admin']\n Role = keys['Role']\n Option = keys['Option'] \n\n\n\n\n \n _lessons_list={ # Lessons_List\n #\"example\" : {'small_name':'xmp','name':'تست' },\n \"math\" : {'small_name':'mth','name':'ریاضی' },\n \"chemistry\" : {'small_name':'chm','name':'شیمی' },\n }\n\n\n _massenger_list={\n \"soroush+\" : {'sender':'_soroush+_driver'}\n }\n\n\n _admin_list=[\n \"1UGgWYRWoxeafE2VPZTzUZ-YFgheBOWeypJSddLi5Fyo-_qUGl_eAKOF9Jc\",\n \"1uxTbAgywOEtM3SfU5YtuXA6Hf115TOPj9ku6p9vMlw5xfSLRpET16N8SnQ\",\n ]\n\n\n _role_list=[\n {\"name\":'admin.math' ,\"Fname\":'سرگروه ریاضی' ,\"options\":[Option._give('adm_mth'),Option._give('stu')]} ,\n {\"name\":'admin.chemistry' ,\"Fname\":'سرگروه شیمی' ,\"options\":[Option._give('adm_chm'),Option._give('stu')]} ,\n ]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n for nm ,info in list(_lessons_list.items()):\n Lesson.add(nm,**info)\n\n\n\n\n\n\n for nm ,info in list(_massenger_list.items()):\n Massenger.add(nm,**info)\n\n\n\n\n\n\n for q in _admin_list :\n Admin._give(q)\n\n\n\n\n\n for info in _role_list:\n Role._give(**info)\n\n\n\n\n\n","sub_path":"SDimServer/Damasanj/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"410242260","text":"from ucb import main, trace, interact\nfrom scheme_tokens import tokenize_lines, DELIMITERS\nfrom buffer import Buffer, InputReader\n\n# Pairs and Scheme lists\n\nclass Pair(object):\n \"\"\"A pair has two instance attributes: first and second. For a Pair to be\n a well-formed list, second is either a well-formed list or nil. Some\n methods only apply to well-formed lists.\n\n >>> s = Pair(1, Pair(2, nil))\n >>> s\n Pair(1, Pair(2, nil))\n >>> print(s)\n (1 2)\n >>> len(s)\n 2\n >>> s[1]\n 2\n >>> print(s.map(lambda x: x+4))\n (5 6)\n \"\"\"\n def __init__(self, first, second):\n self.first = first\n self.second = second\n\n def __repr__(self):\n return \"Pair({0}, {1})\".format(repr(self.first), repr(self.second))\n\n def __str__(self):\n s = \"(\" + str(self.first)\n second = self.second\n while isinstance(second, Pair):\n s += \" \" + str(second.first)\n second = second.second\n if second is not nil: # The second is still a number, so it's an ill-formed list.\n s += \" . \" + str(second)\n return s + \")\"\n\n def __len__(self):\n n, second = 1, self.second\n while isinstance(second, Pair):\n n += 1\n second = second.second\n if second is not nil: # When the while loop stopped, second is supposed to be a nil.\n raise TypeError(\"length attempted on improper list\")\n return n\n\n def __getitem__(self, k):\n if k < 0:\n raise IndexError(\"negative index into list\")\n y = self\n for _ in range(k): # using k times the expression: y = y.second.\n if y.second is nil:\n raise IndexError(\"list index out of bounds\")\n elif not isinstance(y.second, Pair):\n raise TypeError(\"ill-formed list\")\n y = y.second\n return y.first\n\n def map(self, fn):\n \"\"\"Return a Scheme list after mapping Python function FN to SELF.\"\"\"\n mapped = fn(self.first)\n if self.second is nil or isinstance(self.second, Pair):\n return Pair(mapped, self.second.map(fn)) # map fn over nil is still nil.\n else:\n raise TypeError(\"ill-formed list\") # Only two situations or Error.\n\nclass nil(object):\n \"\"\"The empty list\"\"\"\n\n def __repr__(self):\n return \"nil\"\n\n def __str__(self):\n return \"()\"\n\n def __len__(self):\n return 0\n\n def __getitem__(self, k):\n if k < 0:\n raise IndexError(\"negative index into list\")\n raise IndexError(\"list index out of bounds\")\n\n def map(self, fn): # fn map nil returns nil.\n return self\n\nnil = nil() # Assignment hides the nil class; there is only one instance\n\n\n# Scheme list parser, without quotation or dotted lists.\n\ndef scheme_read(src):\n \"\"\"Read the next expression from src, a Buffer of tokens.\n\n >>> lines = ['(+ 1 ', '(+ 23 4)) (']\n >>> src = Buffer(tokenize_lines(lines))\n >>> print(scheme_read(src))\n (+ 1 (+ 23 4))\n \"\"\"\n if src.current() is None:\n raise EOFError\n val = src.pop() # each time you read just one value, and the index++ in the src.\n if val == 'nil':\n return nil\n elif val not in DELIMITERS: # ( ) ' . paranthesis or quotation or dot.\n return val # get the numbers or symbols as the base case.\n elif val == \"(\":\n return read_tail(src) # To find the tail ')'\n else:\n raise SyntaxError(\"unexpected token: {0}\".format(val))\n\ndef read_tail(src): # This is a Pair constructor.\n \"\"\"Return the remainder of a list in src, starting before an element or ).\n\n >>> read_tail(Buffer(tokenize_lines([')'])))\n nil\n >>> read_tail(Buffer(tokenize_lines(['2 3)'])))\n Pair(2, Pair(3, nil))\n >>> read_tail(Buffer(tokenize_lines(['2 (3 4))'])))\n Pair(2, Pair(Pair(3, Pair(4, nil)), nil))\n \"\"\"\n if src.current() is None:\n raise SyntaxError(\"unexpected end of file\")\n if src.current() == \")\":\n src.pop() # Everytime we read src once, we need to pop() it, which means the index++, just like move one step of the pointer.\n return nil\n first = scheme_read(src)\n rest = read_tail(src) # if in the self.current_line there's no ')', then the index will point to the last element in self.current_line. then go to src.pop(), go to self.current, then go to next(self.source), then go to next(tokenize_lines), go to next(input), finally go to the InputReader, input('next_line--> ') means print('next_line--> ) and then wait user to input.\n return Pair(first, rest) # if there's '(', then calls read_tail, which returns a Pair(first, rest) constructor.\n\n\n# Interactive loop\n\ndef buffer_input():\n return Buffer(tokenize_lines(InputReader('read> '))) # 'read> ' is an initial prompt in InputReader Class, everytime we call buffer_input, we first print('> ') and then wait for user to input.\n\n@main # This means the main function automatically run.\ndef read_print_loop():\n \"\"\"Run a read-print loop for Scheme expressions.\"\"\"\n while True: # This is a while loop runs forever, even if we have an Error, we can get back to buffer_input() to input again, with a starter '> '\n try:\n src = buffer_input() # src is a Buffer of an iterator-->[[line1], [line2],...]\n while src.more_on_line: # continue to read element in current_line.\n expression = scheme_read(src) # each time read just one element in self.current_line.\n print(str(expression))\n print(repr(expression))\n except (SyntaxError, ValueError) as err:\n print(type(err).__name__ + ':', err)\n except (KeyboardInterrupt, EOFError): # -D, etc.\n return # this is the end of while True loop.\n","sub_path":"functions/interpreter/scalc/scheme_reader.py","file_name":"scheme_reader.py","file_ext":"py","file_size_in_byte":5938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"253766469","text":"import itertools\nfrom typing import (\n TYPE_CHECKING,\n Any,\n List,\n Mapping,\n NoReturn,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n)\n\nfrom graphql import (\n GraphQLAbstractType,\n GraphQLNamedType,\n GraphQLResolveInfo,\n GraphQLType,\n GraphQLTypeResolver,\n GraphQLUnionType,\n)\n\nfrom strawberry.annotation import StrawberryAnnotation\nfrom strawberry.exceptions import (\n InvalidUnionType,\n UnallowedReturnTypeForUnion,\n WrongReturnTypeForUnion,\n)\nfrom strawberry.type import StrawberryOptional, StrawberryType\n\n\nif TYPE_CHECKING:\n from strawberry.schema.types.concrete_type import TypeMap\n from strawberry.types.types import TypeDefinition\n\n\nclass StrawberryUnion(StrawberryType):\n def __init__(\n self,\n name: Optional[str] = None,\n type_annotations: Tuple[\"StrawberryAnnotation\", ...] = tuple(),\n description: Optional[str] = None,\n ):\n self.graphql_name = name\n self.type_annotations = type_annotations\n self.description = description\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, StrawberryType):\n if isinstance(other, StrawberryUnion):\n return (\n self.graphql_name == other.graphql_name\n and self.type_annotations == other.type_annotations\n and self.description == other.description\n )\n return False\n\n return super().__eq__(other)\n\n def __hash__(self) -> int:\n # TODO: Is this a bad idea? __eq__ objects are supposed to have the same hash\n return id(self)\n\n def __or__(self, other: Union[StrawberryType, type]) -> StrawberryType:\n if other is None:\n # Return the correct notation when using `StrawberryUnion | None`.\n return StrawberryOptional(of_type=self)\n\n # Raise an error in any other case.\n # There is Work in progress to deal with more merging cases, see:\n # https://github.com/strawberry-graphql/strawberry/pull/1455\n raise InvalidUnionType(other)\n\n @property\n def types(self) -> Tuple[StrawberryType, ...]:\n return tuple(\n cast(StrawberryType, annotation.resolve())\n for annotation in self.type_annotations\n )\n\n @property\n def type_params(self) -> List[TypeVar]:\n def _get_type_params(type_: StrawberryType):\n if hasattr(type_, \"_type_definition\"):\n parameters = getattr(type_, \"__parameters__\", None)\n\n return list(parameters) if parameters else []\n\n return type_.type_params\n\n # TODO: check if order is important:\n # https://github.com/strawberry-graphql/strawberry/issues/445\n return list(\n set(itertools.chain(*(_get_type_params(type_) for type_ in self.types)))\n )\n\n @property\n def is_generic(self) -> bool:\n return len(self.type_params) > 0\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> StrawberryType:\n if not self.is_generic:\n return self\n\n new_types = []\n for type_ in self.types:\n new_type: Union[StrawberryType, type]\n\n if hasattr(type_, \"_type_definition\"):\n type_definition: TypeDefinition = type_._type_definition # type: ignore\n\n if type_definition.is_generic:\n new_type = type_definition.copy_with(type_var_map)\n if isinstance(type_, StrawberryType) and type_.is_generic:\n new_type = type_.copy_with(type_var_map)\n else:\n new_type = type_\n\n new_types.append(new_type)\n\n return StrawberryUnion(\n type_annotations=tuple(map(StrawberryAnnotation, new_types)),\n description=self.description,\n )\n\n def __call__(self, *_args, **_kwargs) -> NoReturn:\n \"\"\"Do not use.\n\n Used to bypass\n https://github.com/python/cpython/blob/5efb1a77e75648012f8b52960c8637fc296a5c6d/Lib/typing.py#L148-L149\n \"\"\"\n raise ValueError(\"Cannot use union type directly\")\n\n def get_type_resolver(self, type_map: \"TypeMap\") -> GraphQLTypeResolver:\n def _resolve_union_type(\n root: Any, info: GraphQLResolveInfo, type_: GraphQLAbstractType\n ) -> str:\n assert isinstance(type_, GraphQLUnionType)\n\n from strawberry.types.types import TypeDefinition\n\n # If the type given is not an Object type, try resolving using `is_type_of`\n # defined on the union's inner types\n if not hasattr(root, \"_type_definition\"):\n for inner_type in type_.types:\n if inner_type.is_type_of is not None and inner_type.is_type_of(\n root, info\n ):\n return inner_type.name\n\n # Couldn't resolve using `is_type_of``\n raise WrongReturnTypeForUnion(info.field_name, str(type(root)))\n\n return_type: Optional[GraphQLType]\n\n # Iterate over all of our known types and find the first concrete type that\n # implements the type\n for possible_concrete_type in type_map.values():\n possible_type = possible_concrete_type.definition\n if not isinstance(possible_type, TypeDefinition):\n continue\n if possible_type.is_implemented_by(root):\n return_type = possible_concrete_type.implementation\n break\n else:\n return_type = None\n\n # Make sure the found type is expected by the Union\n if return_type is None or return_type not in type_.types:\n raise UnallowedReturnTypeForUnion(\n info.field_name, str(type(root)), set(type_.types)\n )\n\n # Return the name of the type. Returning the actual type is now deprecated\n if isinstance(return_type, GraphQLNamedType):\n # TODO: Can return_type ever _not_ be a GraphQLNamedType?\n return return_type.name\n else:\n # todo: check if this is correct\n return return_type.__name__ # type: ignore\n\n return _resolve_union_type\n\n\nTypes = TypeVar(\"Types\", bound=Type)\n\n\n# We return a Union type here in order to allow to use the union type as type\n# annotation.\n# For the `types` argument we'd ideally use a TypeVarTuple, but that's not\n# yet supported in any python implementation (or in typing_extensions).\n# See https://www.python.org/dev/peps/pep-0646/ for more information\ndef union(\n name: str, types: Tuple[Types, ...], *, description: str = None\n) -> Union[Types]:\n \"\"\"Creates a new named Union type.\n\n Example usages:\n\n >>> @strawberry.type\n ... class A: ...\n >>> @strawberry.type\n ... class B: ...\n >>> strawberry.union(\"Name\", (A, Optional[B]))\n \"\"\"\n\n # Validate types\n if len(types) == 0:\n raise TypeError(\"No types passed to `union`\")\n\n for _type in types:\n if not isinstance(_type, TypeVar) and not hasattr(_type, \"_type_definition\"):\n raise InvalidUnionType(\n f\"Type `{_type.__name__}` cannot be used in a GraphQL Union\"\n )\n\n union_definition = StrawberryUnion(\n name=name,\n type_annotations=tuple(StrawberryAnnotation(type_) for type_ in types),\n description=description,\n )\n\n return union_definition # type: ignore\n","sub_path":"strawberry/union.py","file_name":"union.py","file_ext":"py","file_size_in_byte":7581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"574206395","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint\n\nfrom flask_mongorest.methods import *\n\n\ndef register_class(app, klass, **kwargs):\n # Construct a url based on a 'name' kwarg with a fallback to the\n # view's class name. Note that the name must be unique.\n name = kwargs.pop(\"name\", klass.__name__)\n view_func = klass.as_view(name)\n url = kwargs.pop(\"url\", None)\n if not url:\n document_name = klass.resource.document.__name__.lower()\n url = f\"/{document_name}/\"\n\n # Insert the url prefix, if it exists\n url_prefix = kwargs.pop(\"url_prefix\", \"\")\n if url_prefix:\n url = f\"{url_prefix}{url}\"\n\n # Add url rules\n klass_methods = set(klass.methods)\n if Create in klass_methods and BulkCreate in klass_methods:\n raise ValueError(\"Use either Create or BulkCreate!\")\n\n for x in klass_methods & {Fetch, Update, Delete}:\n endpoint = view_func.__name__ + x.__name__\n app.add_url_rule(\n f\"{url}/\",\n defaults={\"short_mime\": None},\n view_func=view_func,\n methods=[x.method],\n endpoint=endpoint,\n **kwargs,\n )\n\n for x in klass_methods & {Create, BulkFetch, BulkCreate, BulkUpdate, BulkDelete}:\n endpoint = view_func.__name__ + x.__name__\n app.add_url_rule(\n url,\n defaults={\"pk\": None, \"short_mime\": None},\n view_func=view_func,\n methods=[x.method],\n endpoint=endpoint,\n **kwargs,\n )\n\n if Download in klass.methods:\n endpoint = view_func.__name__ + Download.__name__\n app.add_url_rule(\n f\"{url}download//\",\n defaults={\"pk\": None, \"short_mime\": \"gz\"},\n view_func=view_func,\n methods=[Download.method],\n endpoint=endpoint,\n **kwargs,\n )\n\n\nclass MongoRest(object):\n def __init__(self, app, **kwargs):\n self.app = app\n self.url_prefix = kwargs.pop(\"url_prefix\", \"\")\n app.register_blueprint(\n Blueprint(self.url_prefix, __name__, template_folder=\"templates\")\n )\n\n def register(self, **kwargs):\n def decorator(klass):\n register_class(self.app, klass, **kwargs)\n return klass\n\n return decorator\n","sub_path":"flask_mongorest/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"344508635","text":"from random import randint\n\n\"\"\"Create die and roll function\"\"\"\nclass Die:\n\n def __init__(self, sides=6):\n self.sides = sides\n\n def roll_die(self):\n result = randint(1, self.sides)\n return result\n\ndie1 = Die()\ndie2 = Die()\nnumbs = []\n\n\"\"\"Roll two dice\"\"\"\noutcome1 = die1.roll_die()\nnumbs.append(outcome1)\noutcome2 = die2.roll_die()\nnumbs.append(outcome2)\n\nprint(\"Numbers rolled: \")\nprint(numbs)\n","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"91660479","text":"\n# 去掉两个最大,两个最小 算平均lots\ndef AvgLotL(bookLL):\n ll = CleanLotL(bookLL[:])\n cnt,tt = 0,0\n for v in ll[:]:\n cnt = cnt + 1\n tt = tt+ v\n avg = tt/cnt\n return avg\n\n# 去掉最大最小值后的lots\ndef CleanLotL(bookLL):\n lotL = []\n for b in bookLL[0]: #只需要交易量的数组\n lotL.append(b[1])\n for b in bookLL[1]:\n lotL.append(b[1])\n DelMin(lotL)\n DelMax(lotL)\n return lotL\n\ndef DelMin(li):\n li.pop(li.index(min(li)))\n li.pop(li.index(min(li)))\n\n\ndef DelMax(li):\n li.pop(li.index(max(li)))\n li.pop(li.index(max(li)))\n\n\n# 遍历转类型\ndef TupleToList(tu):\n rst = []\n for t in tu:\n rst.append(t)\n return rst","sub_path":"Alchemy/bookut/utlist.py","file_name":"utlist.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"358734248","text":"from app_jiao_ben import AppReadBase\nimport random\nimport uiautomator2\nimport time\n\n\nclass JinRiTouTiao(AppReadBase):\n def __init__(self, phone_serial, pp):\n super(JinRiTouTiao, self).__init__(phone_serial, pp)\n # self.pp = uiautomator2.connect_usb()\n self.pp.watcher('tip1').when('我知道了').click()\n self.pp.watcher('tip5').when('以后再说').click()\n self.pp.watcher('tip2').when(xpath='//*[@resource-id=\"com.ss.android.article.lite:id/b6y\"]').click()\n self.pp.watcher('tip3').when(xpath='//*[@resource-id=\"com.ss.android.article.lite:id/a0j\"]').click()\n self.pp.watcher('tip4').when(xpath='//*[@text=\"0x+wcp2R1bM4bU8gAAAABJRU5ErkJggg==\"]').click()\n self.pp.watcher.start(0.5)\n\n def sign_in(self):\n self.logger.info(f'开始签到')\n self.pp(text='任务').wait()\n self.pp(text='任务').click(offset=(random.random(), random.random()))\n time.sleep(random.random() + 5)\n if self.pp.xpath('//android.app.Dialog/android.view.View[1]/android.view.View[2]/android.view.View[5]').exists:\n self.click_random_position(self.pp.xpath('//android.app.Dialog/android.view.View[1]/android.view.View[2]/'\n 'android.view.View[5]').get().bounds)\n\n def _adjust_lan_mu(self):\n self.logger.info(f'开始调整栏目')\n lan_mu_num_end = len(self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/a66\"]/'\n 'android.widget.LinearLayout[1]//android.widget.FrameLayout').all()) - 1\n if lan_mu_num_end <= 5:\n return\n if self.pp(resourceId=\"com.ss.android.article.lite:id/a_u\").exists:\n self.pp(resourceId=\"com.ss.android.article.lite:id/a_u\").click(\n offset=(random.uniform(0.5, 0.9), random.random()))\n time.sleep(random.random() + 1)\n self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/a3h\"]/android.view.View[3]').wait()\n self.pp.click(0.928, 0.107)\n time.sleep(random.random() + 1)\n lan_mu_num = 2\n for j in reversed(self.pp(resourceId='com.ss.android.article.lite:id/aa0')):\n j.click_exists()\n time.sleep(random.random() + 1)\n for j in reversed(self.pp(resourceId='com.ss.android.article.lite:id/awh')):\n if j.get_text()[-2:] not in ['抗疫', '视频', '图片', '值点', '小说', '音频', '娱乐'] and \\\n random.random() < 0.5 and lan_mu_num < lan_mu_num_end:\n j.click_exists()\n lan_mu_num += 1\n time.sleep(random.random() + 1)\n self.pp.click(0.928, 0.107)\n time.sleep(random.random() + 1)\n self.pp.press('back')\n time.sleep(random.random() + 1)\n\n def read_issue(self, duration, target_coin):\n self.logger.info(f'开始阅读文章')\n time.sleep(random.random() + 1)\n self.click_random_position(self.pp.xpath('//*[@resource-id=\"android:id/tabs\"]/android.widget.RelativeLayout[1]')\n .get().bounds)\n time.sleep(random.random() + 1)\n # 看情况调整栏目\n if self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/a66\"]/android.widget.LinearLayout[1]/'\n 'android.widget.FrameLayout[last()]').get().bounds[2] > \\\n self.pp(resourceId=\"com.ss.android.article.lite:id/a_u\").bounds()[0]:\n self._adjust_lan_mu()\n # 获取栏目\n lan_mu_num = len(self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/a66\"]/'\n 'android.widget.LinearLayout[1]//android.widget.FrameLayout').all())\n random_list = [x for x in range(1, lan_mu_num)]\n random.shuffle(random_list)\n # 看文章统计\n p = 0\n for j in random_list:\n t = time.time()\n self.click_random_position(self.pp.xpath(f'//*[@resource-id=\"com.ss.android.article.lite:id/a66\"]/'\n f'android.widget.LinearLayout[1]//'\n f'android.widget.FrameLayout[{j + 1}]').get().bounds)\n time.sleep(random.random() + 1)\n for i in range(random.randint(8, 12)): # 每个栏目下滑随机次\n # 每个栏目下的文章标题\n for title in self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/'\n 'bz\" or @resource-id=\"com.ss.android.article.lite:id/km\"]').all():\n # 需要满足看文章概率\n if random.random() >= self.probability_read_issue:\n continue\n self.click_random_position(title.bounds)\n time.sleep(random.random() + 2)\n # 如果是搜索按钮点进去的,那就跳过\n if self.pp(description='返回').exists:\n self.pp.press('back')\n time.sleep(random.random() + 1)\n # 没有奖励的就跳过不看了\n # if not (self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/aak\"]').exists or\n # self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/l9\"]').exists) or\n # self.pp.xpath('//*[@resource-id=\"com.ss.android.newugc:id/round_write_button\"]').exists or\n # self.pp.xpath('//*[@resource-id=\"com.ss.android.newugc:id/wenda_detail_title_image\"]').exists:\n # self.pp.press('back')\n # time.sleep(random.random() + 1)\n # continue\n issue_time_start = time.time() # 开始计时\n read_issue_time = random.randrange(5, 125) # 看文章的随机时间\n read_video_time = random.randrange(5, 185) # 看视频的随机时间\n # 按照设定的关注概率,随机关注\n if self.pp(text=\"关注\").exists and random.random() < self.probability_focus:\n self.pp(text=\"关注\").click(offset=(random.random(), random.random()))\n time.sleep(random.random() + 1)\n # 看下是视频还是文章,视频就停着看,文章就下滑看\n if self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/a0h\"]').exists:\n while not (self.pp(text='重播').exists or time.time() - issue_time_start > read_video_time):\n if self.pp(text='关闭广告').exists:\n self.pp(text='关闭广告').click(offset=(random.random(), random.random()))\n time.sleep(1)\n else:\n while time.time() - issue_time_start <= read_issue_time:\n time.sleep(random.uniform(3, 5))\n self.scroll_read_issue()\n self.pp(scrollable=True).scroll.toEnd(steps=10)\n # 看文章数加 1\n p += 1\n # 按照设定的点赞概率,随机点赞\n if self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/d\"]').exists and \\\n random.random() < self.probability_thumb_up:\n self.click_random_position(self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/'\n 'd\"]').get().bounds)\n time.sleep(random.random() + 1)\n # 按照设定的评论概率,随机评论\n if self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/d5\"]').exists and \\\n random.random() < self.probability_commit:\n self.click_random_position(self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/'\n 'd5\"]').get().bounds)\n time.sleep(random.random() + 1)\n self.pp(resourceId='com.ss.android.article.lite:id/b88').wait()\n self.pp(resourceId='com.ss.android.article.lite:id/b88') \\\n .set_text(random.choice(self.commit))\n time.sleep(random.random() + 1)\n self.pp(text='发布').click(offset=(random.random(), random.random()))\n time.sleep(random.random() + 1)\n # 阅读完文章返回\n while not self.pp(text='我的').exists:\n self.pp.press('back')\n time.sleep(random.random() + 1)\n time.sleep(random.random() + 1)\n if time.time() - t > duration or p >= 30:\n self.logger.info(f'今日阅读时间超过了{duration}秒,不再阅读了')\n return\n # 随机下滑1-4次\n for k in range(random.randint(1, 2)):\n self.pp.swipe(random.uniform(0.3, 0.6), random.uniform(0.7, 0.8), random.uniform(0.3, 0.6),\n random.uniform(0.2, 0.3), steps=random.randint(20, 60))\n time.sleep(random.random())\n time.sleep(random.random() + 1)\n coin_len = self.today_coin()\n if coin_len < target_coin:\n self.click_random_position(self.pp.xpath('//*[@resource-id=\"android:id/tabs\"]/'\n 'android.widget.RelativeLayout[1]').get().bounds)\n time.sleep(random.random() + 1)\n else:\n self.logger.info(f'今日已经获取超过 {target_coin} 个金币,不再阅读了')\n return\n self.logger.info('看完这个栏目了,换个栏目')\n\n def today_coin(self):\n self.logger.info('获取今日金币数量')\n self.pp(text='我的').click(offset=(random.random(), random.random()))\n self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/z8\"]').wait()\n coin = self.pp.xpath('//*[@resource-id=\"com.ss.android.article.lite:id/z8\"]').get_text()\n time.sleep(random.random() + 1)\n if 'w' in coin:\n coin = int(float(coin.replace('w', '')) * 10000)\n else:\n coin = int(coin)\n self.logger.info(f'今日已经获取金币 {coin}')\n return coin\n\n def clean_cache(self):\n self.logger.info(f'开始清理缓存')\n self.pp(text='我的').click(offset=(random.random(), random.random()))\n self.pp(text=\"系统设置\").wait()\n self.pp(text=\"系统设置\").click(offset=(random.random(), random.random()))\n self.pp(text=\"清除缓存\").wait()\n self.pp(text=\"清除缓存\").click(offset=(random.random(), random.random()))\n self.pp(text=\"确认\").wait()\n self.pp(text=\"确认\").click(offset=(random.random(), random.random()))\n\n def main_do(self, duration, target_coin, cash_out):\n # raise\n self.app_start('今日头条极速版')\n self.pp(text='我的').wait(timeout=30)\n self.sign_in()\n coin = self.today_coin()\n if coin < target_coin:\n self.read_issue(duration, target_coin)\n self.clean_cache()\n self.app_end()\n","sub_path":"read_phone_project/app_jiao_ben/jin_ri_tou_tiao.py","file_name":"jin_ri_tou_tiao.py","file_ext":"py","file_size_in_byte":11562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"148639271","text":"dead_ends = 0\nmummies = 0 \n\nprint (\"escaping the tomb...\")\n\nfor adventure in range (0, 4, 1):\n print (\"what lies before me?\")\n \n response_1 = input()\n\n if (response_1 == \"a dead end\"):\n dead_ends = dead_ends + 1 \n print (\"Time to turn back\")\n elif (response_1 == \"a mummy\"):\n mummies = mummies + 1 \n print (\"better find another way.\")\n else:\n print (\"lets move around it.\")\n\nprint (\"encounted\" + str(dead_ends) + \" dead ends and\" + str(mummies) + \"mummies\" )","sub_path":"testing/playing around.py","file_name":"playing around.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"131182566","text":"def early_stopping_main(args, model, train_loader, val_loader):\n\n use_cuda = not args['no_cuda'] and torch.cuda.is_available()\n device = torch.device('cuda' if use_cuda else 'cpu')\n\n model = model.to(device)\n optimizer = optim.SGD(model.parameters(),\n lr=args['lr'],\n momentum=args['momentum'])\n\n best_acc = 0.0\n best_epoch = 0\n\n # Number of successive epochs that you want to wait before stopping training process\n patience = 20\n\n # Keps track of number of epochs during which the val_acc was less than best_acc\n wait = 0\n\n val_acc_list, train_acc_list = [], []\n for epoch in tqdm(range(args['epochs'])):\n\n # train the model\n train(args, model, device, train_loader, optimizer)\n\n # calculate training accuracy\n train_acc = test(model, device, train_loader)\n\n # calculate validation accuracy\n val_acc = test(model, device, val_loader)\n\n if (val_acc > best_acc):\n best_acc = val_acc\n best_epoch = epoch\n best_model = copy.deepcopy(model)\n wait = 0\n else:\n wait += 1\n\n if (wait > patience):\n print(f'early stopped on epoch: {epoch}')\n break\n\n train_acc_list.append(train_acc)\n val_acc_list.append(val_acc)\n\n return val_acc_list, train_acc_list, best_model, best_epoch\n\n\nargs = {\n 'epochs': 200,\n 'lr': 5e-4,\n 'momentum': 0.99,\n 'no_cuda': False,\n}\n\nmodel = AnimalNet()\n\n## Uncomment to test\nval_acc_earlystop, train_acc_earlystop, _, best_epoch = early_stopping_main(args, model, train_loader, val_loader)\nprint(f'Maximum Validation Accuracy is reached at epoch: {best_epoch:2d}')\nwith plt.xkcd():\n early_stop_plot(train_acc_earlystop, val_acc_earlystop, best_epoch)","sub_path":"tutorials/W1D5_Regularization/solutions/W1D5_Tutorial1_Solution_dd5edfb8.py","file_name":"W1D5_Tutorial1_Solution_dd5edfb8.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"514470308","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\n#Nagentes(i), \"|\", dados(i, 1) / real(s), \"|\", dados(i, 2) / real(s), \"|\", s \ndados_names = ['Nv','custo','nviz','amostras']\n\nfolder = \"/home/paulo/Dropbox/Profissional/usp/Mobilidade/resultados/dinamica8/res30abcdef/\"\n\ndados30a = pd.read_csv(folder+'dados_30a.txt', header = 1, sep = '|')\ndados30a.columns = dados_names\n\ndados30b = pd.read_csv(folder+'dados_30b.txt', header = 1, sep = '|')\ndados30b.columns = dados_names\n\n\ndados30c = pd.read_csv(folder+'dados_30c.txt', header = 1, sep = '|')\ndados30c.columns = dados_names\n\ndados30d = pd.read_csv(folder+'dados_30d.txt', header = 1, sep = '|')\ndados30d.columns = dados_names\n\ndados30e = pd.read_csv(folder+'dados_30e.txt', header = 1, sep = '|')\ndados30e.columns = dados_names\n\ndados30f = pd.read_csv(folder+'dados_30f.txt', header = 1, sep = '|')\ndados30f.columns = dados_names\n\nfolderK4 = \"/home/paulo/Dropbox/Profissional/usp/Mobilidade/Calibracao/referencia/K4/\"\nnome = ['0','01', '03', '05b']\nNn = len(nome)\ncopy =[0.0, 0.1, 0.3, 0.5] \ntipo = ['or', '^g', 'vb', 'sk']\nlista_L, lista_cost = [[] for i2 in range(Nn)], [[] for i2 in range(Nn)]\nfor i1 in range(Nn):\n\tfile_p0 = open(folderK4+'custo_'+nome[i1]+'.txt',\"r\")\n\tfor linha in file_p0:\n\t\ta1, b1 = linha.split()\n\t\tlista_L[i1].append(int(a1))\n\t\tlista_cost[i1].append(float(b1))\n\tfile_p0.close()\n\nvetorNv = pd.read_csv('vetorNv14.txt', header = None, sep = '|')\nN = 12\nsizeL = np.array(vetorNv)\nNl = len(sizeL)\nct = np.zeros(Nl, dtype = float)\nct2 = np.zeros(Nl, dtype = float)\nlambdaN = 0.99978\nfor i3 in range(Nl):\n\tct[i3] = sizeL[i3]/(2**N*(1-lambdaN**sizeL[i3]))\n\tct2[i3] = sizeL[i3]/(2**N)\n\n\nplt.loglog(dados30a.Nv,dados30a.custo,'-sg', ms = 4, label = r'$v_0 = 5 $')\nplt.loglog(dados30b.Nv,dados30b.custo,'--og', ms = 4, label = r'$v_0 = 0 $')\nplt.loglog(dados30c.Nv,dados30c.custo,'-sr', ms = 4, label = r'$v_0 = 5 $')\nplt.loglog(dados30d.Nv,dados30d.custo,'--or', ms = 4, label = r'$v_0 = 0 $')\nplt.loglog(dados30e.Nv,dados30e.custo,'-sb', ms = 4, label = r'$v_0 = 5 $')\nplt.loglog(dados30f.Nv,dados30f.custo,'--ob', ms = 4, label = r'$v_0 = 0 $')\n\n#plt.loglog(sizeL,ct2,'--', color = 'gray',lw = 2, label = r'$L/2^N$')\n#plt.loglog(lista_L[3],lista_cost[3],'-', color = 'gray', label = 'static')\n#plt.loglog(sizeL,ct,'-g',lw = 2, label = 'Eq. 4')\nplt.loglog(sizeL,ct2,'--k',lw = 2, label = r'$L/2^N$')\nplt.loglog(lista_L[3],lista_cost[3],'-k', label = 'static')\nplt.legend(loc='upper left',fontsize = 10)\nplt.xlabel(r'Number of agents $M$',fontsize = 16)\nplt.ylabel(r'Computational cost $C$',fontsize = 16)\n#plt.title(r'$K=4$, $p = 0.5$, $\\delta = 2.0$.',fontsize=16)\nplt.grid(True)\nplt.xticks(size=14)\nplt.yticks(size=14)\naxes = plt.gca()\naxes.set_xlim([2,6e3])\naxes.set_ylim([6e-1,1.9])\nplt.text(120,1.7,r'$v_i(t)=v_0 R \\Phi_i(t)$',fontsize = 15, color = 'g', backgroundcolor = 'w')\nplt.text(120,1.5,r'$v(t)=v_0 R \\Phi_M(t)$',fontsize = 15, color = 'r', backgroundcolor = 'w')\nplt.text(140,1.3,r'$v=v_0 R \\Phi_g$',fontsize = 15, color = 'b', backgroundcolor = 'w')\nplt.savefig('figura_3.pdf',dpi = 300, bbox_inches='tight') \nplt.close()\n","sub_path":"Mobilidade/figura_3.py","file_name":"figura_3.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"250478527","text":"import re\n\n\nclass FindSpam:\n rules = [\n {'regex': \"(?i)\\\\b(baba(ji)?|nike|vashikaran|porn)\\\\b\", 'all': True,\n 'sites': [], 'reason': \"Bad keyword detected\"},\n {'regex': \"\\\\+\\\\d{10}|\\\\+?\\\\d{2}[\\\\s\\\\-]?\\\\d{8,11}\", 'all': True, \n 'sites': [\"patents.stackexchange.com\"], 'reason': \"Phone number detected\"},\n {'regex': \"(?i)\\\\b([Nn]igga|[Nn]igger|niga|[Aa]sshole|crap|fag|[Ff]uck|idiot|[Ss]hit|[Ww]hore)s?\\\\b\", 'all': True,\n 'sites': [], 'reason': \"Offensive title detected\",'insensitive':True},\n {'regex': \"^[A-Z0-9\\\\(\\\\)\\\\.\\\\-\\\\?\\\\s'\\\"]*$\", 'all': True, 'sites': [], 'reason': \"All-caps title\"}\n ]\n\n @staticmethod\n def testpost(title, site):\n result = [];\n for rule in FindSpam.rules:\n if rule['all'] != (site in rule['sites']):\n if re.compile(rule['regex']).search(title):\n result.append(rule['reason'])\n return result\n\n @staticmethod\n def testtitle(title):\n regexes=[\"\\\\b(baba(ji)?|vashikaran|fashion|here is|porn)\\\\b\",\"\\\\+\\\\d{10}\",\"\\\\+?\\\\d{2}\\\\s?\\\\d{8}\",\"\\\\b(asshole|crap|fag|fuck|idiot|shit|whore)s?\\\\b\"]\n result = []\n p = [not not re.compile(s).search(title) for s in regexes]\n if 'vashikaran' in title or 'baba' in title or True in p:\n result.append('Possible spam')\n # magic if matches word\n return result\n","sub_path":"findspam.py","file_name":"findspam.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"554625699","text":"from django.contrib import messages\nfrom django.shortcuts import redirect\nfrom selection.models import Office\nfrom offices.models import Reservation\nfrom datetime import datetime\n\n\ndef is_overlap(request, req_start_date, req_end_date):\n user_reservations = list(Reservation.objects.filter(user_id=request.user).values())\n\n for booking in user_reservations:\n if (booking['start_date'] <= datetime.strptime(req_start_date, '%Y-%m-%d').date() <=\n booking['end_date']) or \\\n (datetime.strptime(req_start_date, '%Y-%m-%d').date() <= booking['start_date'] <=\n datetime.strptime(req_end_date, '%Y-%m-%d').date()):\n return False\n return True\n\n\ndef check_availability(reservation, start_date, end_date):\n start_y, start_m, start_d = (int(start_date.split('-')[0]),\n int(start_date.split('-')[1]),\n int(start_date.split('-')[2]))\n end_y, end_m, end_d = (int(end_date.split('-')[0]), int(end_date.split('-')[1]), int(end_date.split('-')[2]))\n booked_offices = Reservation.objects.filter(end_date__gte=datetime(start_y, start_m, start_d), start_date__lte=datetime(end_y, end_m, end_d)).values_list('office_id', flat=True)\n return Office.objects.exclude(id__in=booked_offices)\n\n","sub_path":"offices/booking_functions/availability.py","file_name":"availability.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"605010399","text":"import gc\nimport argparse\nimport tempfile\nimport aiohttp\nimport asyncio\nimport logging\nimport vapoursynth\nfrom config import config\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot\nfrom functools import partial\nfrom handle_messages import private_msg_file, private_msg, delete_user_message\nfrom cmd_manager.decorators import register_command, add_argument\n\ncore = vapoursynth.core\ncore.add_cache = False\ncore.accept_lowercase = True\nimwri = getattr(core, \"imwri\", getattr(core, \"imwrif\", None))\n\n\nclass GetNative:\n user_cooldown = set()\n\n def __init__(self, msg_author, img_url=None, filename=None, kernel=None, b=None, c=None, taps=None, ar=None,\n approx=None, min_h=None, max_h=None):\n self.plotScaling = 'log'\n self.min_h = min_h\n self.max_h = max_h\n self.ar = ar\n self.msg_author = msg_author\n self.img_url = img_url\n self.b = b\n self.c = c\n self.taps = taps\n self.approx = approx\n self.kernel = kernel\n self.txt_output = \"\"\n self.resolutions = []\n self.filename = filename\n self.tmp_dir = tempfile.TemporaryDirectory()\n self.path = self.tmp_dir.name\n\n async def run(self):\n self.user_cooldown.add(self.msg_author)\n asyncio.get_event_loop().call_later(60, lambda: self.user_cooldown.discard(self.msg_author))\n\n image = await self.get_image()\n if image is None:\n return True, \"Can't load image. Pls try it again later.\"\n\n src = imwri.Read(image)\n if self.ar is 0:\n self.ar = src.width / src.height\n\n matrix_s = '709' if src.format.color_family == vapoursynth.RGB else None\n src_luma32 = core.resize.Point(src, format=vapoursynth.YUV444PS, matrix_s=matrix_s)\n src_luma32 = core.std.ShufflePlanes(src_luma32, 0, vapoursynth.GRAY)\n src_luma32 = core.std.Cache(src_luma32)\n\n # descale each individual frame\n resizer = descale_approx if self.approx else descale_accurate\n clip_list = []\n for h in range(self.min_h, self.max_h + 1):\n clip_list.append(resizer(src_luma32, self.getw(h), h, self.kernel, self.b, self.c, self.taps))\n full_clip = core.std.Splice(clip_list, mismatch=True)\n full_clip = upscale(full_clip, self.getw(src.height), src.height, self.kernel, self.b, self.c, self.taps)\n if self.ar != src.width / src.height:\n src_luma32 = upscale(src_luma32, self.getw(src.height), src.height, self.kernel, self.b, self.c, self.taps)\n expr_full = core.std.Expr([src_luma32 * full_clip.num_frames, full_clip], 'x y - abs dup 0.015 > swap 0 ?')\n full_clip = core.std.CropRel(expr_full, 5, 5, 5, 5)\n full_clip = core.std.PlaneStats(full_clip)\n full_clip = core.std.Cache(full_clip)\n\n tasks_pending = set()\n futures = {}\n vals = []\n for frame_index in range(len(full_clip)):\n fut = asyncio.ensure_future(asyncio.wrap_future(full_clip.get_frame_async(frame_index)))\n tasks_pending.add(fut)\n futures[fut] = frame_index\n while len(tasks_pending) >= core.num_threads * (2 if self.approx else 1) + 2:\n tasks_done, tasks_pending = await asyncio.wait(\n tasks_pending, return_when=asyncio.FIRST_COMPLETED)\n vals += [(futures.pop(task), task.result().props.PlaneStatsAverage) for task in tasks_done]\n\n tasks_done, _ = await asyncio.wait(tasks_pending)\n vals += [(futures.pop(task), task.result().props.PlaneStatsAverage) for task in tasks_done]\n vals = [v for _, v in sorted(vals)]\n ratios, vals, best_value = self.analyze_results(vals)\n self.save_plot(vals)\n self.txt_output += 'Raw data:\\nResolution\\t | Relative Error\\t | Relative difference from last\\n'\n for i, error in enumerate(vals):\n self.txt_output += f'{i + self.min_h:4d}\\t\\t | {error:.10f}\\t\\t\\t | {ratios[i]:.2f}\\n'\n\n with open(f\"{self.path}/{self.filename}.txt\", \"w\") as file_open:\n file_open.writelines(self.txt_output)\n\n return False, best_value\n\n def getw(self, h, only_even=True):\n w = h * self.ar\n w = int(round(w))\n if only_even:\n w = w // 2 * 2\n\n return w\n\n def analyze_results(self, vals):\n ratios = [0.0]\n for i in range(1, len(vals)):\n last = vals[i - 1]\n current = vals[i]\n ratios.append(current and last / current)\n sorted_array = sorted(ratios, reverse=True) # make a copy of the array because we need the unsorted array later\n max_difference = sorted_array[0]\n\n differences = [s for s in sorted_array if s - 1 > (max_difference - 1) * 0.33][:5]\n\n for diff in differences:\n current = ratios.index(diff)\n # don't allow results within 20px of each other\n for res in self.resolutions:\n if res - 20 < current < res + 20:\n break\n else:\n self.resolutions.append(current)\n\n bicubic_params = self.kernel == 'bicubic' and f'Scaling parameters:\\nb = {self.b:.2f}\\nc = {self.c:.2f}\\n' or ''\n best_values = f\"{'p, '.join([str(r + self.min_h) for r in self.resolutions])}p\"\n self.txt_output += f\"Resize Kernel: {self.kernel}\\n{bicubic_params}Native resolution(s) (best guess): \" \\\n f\"{best_values}\\nPlease check the graph manually for more accurate results\\n\\n\"\n\n return ratios, vals, f\"Native resolution(s) (best guess): {best_values}\"\n\n def save_plot(self, vals):\n matplotlib.pyplot.style.use('dark_background')\n matplotlib.pyplot.plot(range(self.min_h, self.max_h + 1), vals, '.w-')\n matplotlib.pyplot.title(self.filename)\n matplotlib.pyplot.ylabel('Relative error')\n matplotlib.pyplot.xlabel('Resolution')\n matplotlib.pyplot.yscale(self.plotScaling)\n matplotlib.pyplot.savefig(f'{self.path}/{self.filename}.png')\n matplotlib.pyplot.clf()\n\n async def get_image(self):\n with aiohttp.ClientSession() as sess:\n async with sess.get(self.img_url) as resp:\n if resp.status != 200:\n return None\n with open(f\"{self.path}/{self.filename}\", 'wb') as f:\n f.write(await resp.read())\n return f\"{self.path}/{self.filename}\"\n\n\ndef upscale(src, width, height, kernel, b, c, taps):\n resizer = getattr(src.resize, kernel.title())\n if not resizer:\n return src.fmtc.resample(width, height, kernel=kernel, a1=b, a2=c, taps=taps)\n if kernel == 'bicubic':\n resizer = partial(resizer, filter_param_a=b, filter_param_b=c)\n elif kernel == 'lanczos':\n resizer = partial(resizer, filter_param_a=taps)\n\n return resizer(width, height)\n\n\ndef descale_accurate(src, width, height, kernel, b, c, taps):\n descale = getattr(src, 'descale_getnative', None)\n if descale is None:\n descale = getattr(src, 'descale')\n descale = getattr(descale, 'De' + kernel)\n if kernel == 'bicubic':\n descale = partial(descale, b=b, c=c)\n elif kernel == 'lanczos':\n descale = partial(descale, taps=taps)\n\n return descale(width, height)\n\n\ndef descale_approx(src, width, height, kernel, b, c, taps):\n return src.fmtc.resample(width, height, kernel=kernel, taps=taps, a1=b, a2=c, invks=True, invkstaps=taps)\n\n\ndef to_float(str_value):\n if set(str_value) - set(\"0123456789./\"):\n raise argparse.ArgumentTypeError(\"Invalid characters in float parameter\")\n try:\n return eval(str_value) if \"/\" in str_value else float(str_value)\n except (SyntaxError, ZeroDivisionError, TypeError, ValueError):\n raise argparse.ArgumentTypeError(\"Exception while parsing float\") from None\n\n\n@register_command('getnative', description='Find the native resolution(s) of upscaled material (mostly anime)')\n@add_argument('--kernel', '-k', dest='kernel', type=str.lower, default='bilinear', help='Resize kernel to be used')\n@add_argument('--bicubic-b', '-b', dest='b', type=to_float, default=\"1/3\", help='B parameter of bicubic resize')\n@add_argument('--bicubic-c', '-c', dest='c', type=to_float, default=\"1/3\", help='C parameter of bicubic resize')\n@add_argument('--lanczos-taps', '-t', dest='taps', type=int, default=3, help='Taps parameter of lanczos resize')\n@add_argument('--aspect-ratio', '-ar', dest='ar', type=to_float, default=0, help='Force aspect ratio. Only useful for anamorphic input')\n@add_argument('--approx', '-ap', dest=\"approx\", action=\"store_true\", help='Use fmtc instead of descale [faster, loss of accuracy]')\n@add_argument('--min-height', '-min', dest=\"min_h\", type=int, default=500, help='Minimum height to consider')\n@add_argument('--max-heigth', '-max', dest=\"max_h\", type=int, default=1000, help='Maximum height to consider [max 1080 atm]')\nasync def getnative(client, message, args):\n if not message.attachments:\n await delete_user_message(message)\n return await private_msg(message, \"Picture as attachment is needed.\")\n elif \"width\" not in message.attachments[0]:\n await delete_user_message(message)\n return await private_msg(message, \"Filetype is not allowed!\")\n\n if message.author.id in GetNative.user_cooldown:\n await delete_user_message(message)\n return await private_msg(message, \"Pls use this command only every 1min.\")\n\n width = message.attachments[0][\"width\"]\n height = message.attachments[0][\"height\"]\n if width * height > 8300000:\n await delete_user_message(message)\n return await private_msg(message, \"Picture is too big.\")\n elif args.min_h >= height:\n return await private_msg(message, f\"Picture is to small or equal for min height {args.min_h}.\")\n elif args.min_h >= args.max_h:\n return await private_msg(message, f\"Your min height is bigger or equal to max height.\")\n elif args.max_h - args.min_h > 1000:\n return await private_msg(message, f\"Max - min height bigger than 1000 is not allowed\")\n elif args.max_h > height:\n await private_msg(message, f\"Your max height cant be bigger than your image dimensions. New max height is {height}\")\n args.max_h = height\n\n if args.approx:\n try:\n core.fmtc.resample(core.std.BlankClip(), kernel=args.kernel)\n except vapoursynth.Error:\n return await private_msg(message, 'fmtc: Invalid kernel specified.')\n else:\n if args.kernel not in ['spline36', 'spline16', 'lanczos', 'bicubic', 'bilinear']:\n return await private_msg(message, f'descale: {args.kernel} is not a supported kernel. Try -ap for approximation.')\n\n delete_message = await client.send_file(message.channel, config.PICTURE.spam + \"tenor_loading.gif\")\n\n kwargs = args.__dict__.copy()\n del kwargs[\"command\"]\n img_url = message.attachments[0][\"url\"]\n filename = message.attachments[0][\"filename\"]\n kwargs[\"img_url\"] = img_url\n kwargs[\"filename\"] = filename\n\n msg_author = message.author.id\n import time\n starttime = time.time()\n getnative = GetNative(msg_author, **kwargs)\n try:\n forbidden_error, best_value = await getnative.run()\n except BaseException as err:\n forbidden_error = True\n best_value = \"Error in Getnative, can't process your picture.\"\n logging.info(f\"Error in getnative: {err}\")\n gc.collect()\n print('done in {:.2f} s'.format(time.time() - starttime))\n\n if not forbidden_error:\n content = ''.join([\n f\"Output:\"\n f\"\\nKernel: {getnative.kernel} \",\n f\"AR: {getnative.ar:.2f} \",\n f\"B: {getnative.b:.2f} C: {getnative.c:.2f} \" if getnative.kernel == \"bicubic\" else \"\",\n f\"Taps: {getnative.taps} \" if getnative.kernel == \"lanczos\" else \"\",\n f\"\\n{best_value}\",\n f\"\\n[approximation]\" if getnative.approx else \"\",\n ])\n await private_msg_file(message, f\"{getnative.path}/{filename}.txt\", \"Output from getnative.\")\n await client.send_file(message.channel, getnative.path + f'/{filename}', content=f\"Input\\n{message.author}: \\\"{message.content}\\\"\")\n await client.send_file(message.channel, getnative.path + f'/{filename}.png', content=content)\n else:\n await private_msg(message, best_value)\n\n await delete_user_message(message)\n await delete_user_message(delete_message)\n getnative.tmp_dir.cleanup()\n","sub_path":"commands/getnative.py","file_name":"getnative.py","file_ext":"py","file_size_in_byte":12461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"125808862","text":"import cv2\nimport numpy\nfrom sklearn import svm\nfrom sklearn.externals import joblib\n\nwin_x=40\nwin_y=120\ndef loadImages(fileName):\n\tf=open(fileName)\n\tlines=f.readlines()\n\timgList=[]\n\tfor line in lines:\n#\t\tprint \"loading\",line\n\t\tline=line.strip()\n\t\tif len(line)>0:\n\t\t\timg=cv2.imread(line)\n\t\t\tif img != None:\n\t\t\t\timgList.append(img)\n#\t\t\tcv2.imshow(\"img\",cv2.imread(line.strip()))\n#\t\t\tcv2.waitKey(1)\n\treturn imgList\n\n\ndef getHog(imgList):\n\thog = cv2.HOGDescriptor(_winSize=(win_x,win_y), _blockSize=(16,16), _blockStride=(8,8), _cellSize=(8,8), _nbins=9)\n\tdescriptors=numpy.ndarray([len(imgList),hog.getDescriptorSize()],dtype='float')\n\n\tfor i in range(len(imgList)):\n\t\tdes=hog.compute(imgList[i])\n\t\tdescriptors[i,:]=des.transpose()\n\treturn descriptors\n\n\n\ndef detect(trainedsvm,img):\n\trect=[]\n\tfor y in range(0,img.shape[0],8):\n\t\tfor x in range(0,img.shape[1],8):\n\t\t\ty1=y\n\t\t\ty2=y1+win_y\n\t\t\tx1=x\n\t\t\tx2=x1+win_x\n\t\t\tif(y2>img.shape[0] or x2 > img.shape[1]):\n\t\t\t\tcontinue\n\t\t\ttmpImg=img[y1:y2,x1:x2]\n\t\t\tdes=getHog([tmpImg])\n\t\t\tresult=trainedsvm.predict_proba(des[0,:])\n\t\t\t#print result\n\t\t\tif (result[0][1]>0.8) :\n\t\t\t\trect.append([(x1,y1),(x2,y2)])\n\t\t\t\t#cv2.imshow(\"pedestrian\",tmpImg)\n\t\t\t\t#cv2.rectangle(imgorig,(x1*4,y1*4),(x2*4,y2*4),(0,255,0))\n\t\t\t\t#cv2.waitKey(0)\n\treturn rect\n\ndef detectMultiScale(trainedsvm,Image,level):\n\tdetectedRect=[]\n\ttmpImg=Image\n\tscale=2\n\tfor l in range(1,level+1):\n\t\ttmpImg=cv2.pyrDown(tmpImg)#,dstsize=(int(tmpImg.shape[0]/scale)+1,int(tmpImg.shape[1]/scale)+1))\n\t#\tprint tmpImg.shape\n\t\trectangles=detect(trainedsvm,tmpImg)\n\n\t\tif(len(rectangles)>0):\n\t\t\t#print l, scale\n\t\t\tfor rect in rectangles:\n\t\t\t\tcv2.rectangle(tmpImg, rect[0],rect[1],(0,255,0))\n\t\t\t\tdetectedRect.append([tuple( r*((scale**l)) for r in rect[0]),tuple( r*((scale**l)) for r in rect[1])])\n\t\t#cv2.imshow(\"img\",tmpImg)\n\t\t\n\t\tcv2.waitKey(0)\n\treturn detectedRect\n\t\n\ndef mergeRectangles(rectList):\n\trect=[]\n\tgroupby_x=[]\n\tfor r in rectList:\n\t\trect.append([r[0][0],r[0][1],r[1][0],r[1][1]])\n\tsorted_x=rect #sorted(rect,key=lambda x:x[0])\n\tfor s_x in sorted_x:\n\t\tisIntersect=False\n\t\tcount=0\n\t\tfor gp_x in groupby_x:\n\t\t\tfor g_x in gp_x: \n\t\t\t\tinter_left=max(g_x[0],s_x[0]) \n\t\t\t\tinter_top= max(g_x[1],s_x[1]) \n\t\t\t\tinter_right=min(g_x[2],s_x[2]) \n\t\t\t\tinter_bottom=min(g_x[3],s_x[3]) \n\t\t\t\tarea1=(g_x[2]-g_x[0]) *(g_x[3]-g_x[1])\n\t\t\t\tarea2= (s_x[2]-s_x[0]) *(s_x[3]-s_x[1])\n\t\t\t\tarea=max(area1,area2)\n\t\t\t\tif (inter_right >inter_left and inter_bottom >inter_top ):\n\t\t\t\t\tinter_area= (inter_right-inter_left) * (inter_bottom-inter_top)\n\t\t\t\t\tif inter_area/(area*1.0) > 0.7:\n\t\t\t\t\t\t#print \"Intersection found\"\n\t\t\t\t\t\tisIntersect=True\n\t\t\t\t\tbreak\n\t\t\tif isIntersect==True:\n\t\t\t\tgroupby_x[count].append(s_x)\n\t\t\t\tbreak\n\t\t\tcount=count+1\n\t\tif isIntersect==False:\n\t\t\t\n\t\t\tgroupby_x.append([s_x])\n\n\tgrouped=[]\n\tfor gp in groupby_x:\n\t\t#print len(gp)\n\t\tavg_left=0\n\t\tavg_top=0\n\t\tavg_right=0\n\t\tavg_bottom=0\n\t\tcount=0\n\t\tfor rec in gp:\n\t\t\tavg_left=avg_left+rec[0]\n\t\t\tavg_top=avg_top+rec[1]\n\t\t\tavg_right=avg_right+rec[2]\n\t\t\tavg_bottom=avg_bottom+rec[3]\n\t\t\tcount=count+1\n\t\tavg_left=int(avg_left/count)\n\t\tavg_top=int(avg_top/count)\n\t\tavg_right=int(avg_right/count)\n\t\tavg_bottom=int(avg_bottom/count)\n\t\tgrouped.append([(avg_left,avg_top),(avg_right,avg_bottom)])\n\treturn grouped\n\n\n\ndef cropImages(imgFile,outFolder):\n#imgFile=\"/home/juned/Code/pycon2013/database/negative/image_001115.jpg\"\n#outFolder=\"/home/juned/Code/pycon2013/database/negative/patches\"\n\timg=cv2.imread(imgFile);\n#\twin_x=64\n#\twin_y=128\n\ttmpImg=img[0:128,0:64]\n\tcounter=950\n\tfor y in range(0,img.shape[0],8):\n\t\tfor x in range(0,img.shape[1],8):\n\t\t\ty1=y\n\t\t\ty2=y1+win_y\n\t\t\tx1=x\n\t\t\tx2=x1+win_x\n\t\t\tif(y2>img.shape[0] or x2 > img.shape[1]):\n\t\t\t\tcontinue\n\t\t\tcounter=counter+1\n\t\t\ttmpImg=img[y1:y2,x1:x2]\n\t\t\tcv2.imwrite(outFolder+str(counter)+\".jpg\",tmpImg)\n\t\t\t\t","sub_path":"04-ObjectDetection/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"536421056","text":"from flask import Flask, session, request, redirect, render_template\nimport random\napp=Flask(__name__)\napp.secret_key=\"9824hfiuwenfiwenfoe\"\nprint('\\n','= = = starting server = = =')\n\n@app.route('/')\ndef index():\n print('-=-=--=-=-=-= in index =-=-=-=-=-=- ')\n return render_template('index.html')\n\n@app.route('/process', methods=['post'])\ndef process():\n print('\\n','-=-=- in /process -=--=-=-')\n # print('form data ====> ', request.form)\n\n if 'total_gold' not in session:\n session['total_gold'] = 0\n if 'msg' not in session:\n session['msg'] = ''\n \n if request.form['building'] == 'farm':\n fgold = random.randrange(5,10)\n session['total_gold'] += fgold\n session['msg'] = \"

you got \"+str(fgold)+\" gold from FARM

\"+session['msg']\n\n if request.form['building'] == 'casino':\n cgold = random.randrange(-50,50)\n if cgold < 0:\n session['total_gold'] += cgold\n session['msg'] = \"

you LOST \"+str(cgold)+\" gold from CASINO

\"+session['msg']\n if cgold >= 0:\n session['total_gold'] += cgold\n session['msg'] = \"

LUCKY *** you WON \"+str(cgold)+\" gold from CASINO

\"+session['msg']\n return redirect('/')\n\n@app.route('/reset')\ndef reset():\n session.clear()\n return redirect('/')\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"flask_fundamentals/ninja-gold-redo/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"73851162","text":"import datetime\nimport json\nfrom enum import Enum\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom pydantic import BaseModel, validator\nfrom qcelemental.models import ComputeError\n\nfrom .common_models import ObjectId\n\n\nclass DBRef(BaseModel):\n ref: str\n id: ObjectId\n\n\nclass TaskStatusEnum(str, Enum):\n running = \"RUNNING\"\n waiting = \"WAITING\"\n error = \"ERROR\"\n complete = \"COMPLETE\"\n\n\nclass ManagerStatusEnum(str, Enum):\n active = 'ACTIVE'\n inactive = 'INACTIVE'\n\nclass PriorityEnum(int, Enum):\n HIGH = 2\n NORMAL = 1\n LOW = 0\n\n\nclass BaseResultEnum(str, Enum):\n result = \"result\"\n procedure = \"procedure\"\n\n\nclass PythonComputeSpec(BaseModel):\n function: str\n args: List[Any]\n kwargs: Dict[str, Any]\n\n\nclass TaskRecord(BaseModel):\n\n id: ObjectId = None\n\n spec: PythonComputeSpec\n parser: str\n status: TaskStatusEnum = \"WAITING\"\n\n # Compute blockers and prevention\n program: str\n procedure: Optional[str] = None\n manager: Optional[str] = None\n\n # Sortables\n priority: PriorityEnum = PriorityEnum.NORMAL\n tag: Optional[str] = None\n\n # Link back to the base Result\n base_result: Union[DBRef, int]\n error: Optional[ComputeError] = None\n\n # Modified data\n modified_on: datetime.datetime = None\n created_on: datetime.datetime = None\n\n\n def __init__(self, **data):\n\n # Set datetime defaults if not present\n dt = datetime.datetime.utcnow()\n data.setdefault(\"modified_on\", dt)\n data.setdefault(\"created_on\", dt)\n\n super().__init__(**data)\n\n class Config:\n extra = \"forbid\"\n\n @validator('priority', pre=True)\n def munge_priority(cls, v):\n if isinstance(v, str):\n v = PriorityEnum[v.upper()]\n elif v is None:\n v = PriorityEnum.NORMAL\n return v\n\n @validator('program')\n def check_program(cls, v):\n return v.lower()\n\n @validator('procedure')\n def check_procedure(cls, v):\n return v.lower()\n\n def json_dict(self, *args, **kwargs):\n return json.loads(self.json(*args, **kwargs))","sub_path":"qcfractal/interface/models/task_models.py","file_name":"task_models.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"573220097","text":"import torch\nfrom torch.utils.data import ConcatDataset, DataLoader\n\nfrom . import collate_fn\nfrom .datasets import *\n\ncityscapes_images_dir = '/data7/lufficc/cityscapes/leftImg8bit'\nfoggy_cityscapes_images_dir = '/data7/lufficc/cityscapes/leftImg8bit_foggy'\n\nDATASETS = {\n 'cityscapes_train': {\n 'ann_file': '/data7/lufficc/cityscapes/cityscapes_coco_train.json',\n 'root': cityscapes_images_dir,\n },\n\n 'cityscapes_val': {\n 'ann_file': '/data7/lufficc/cityscapes/cityscapes_coco_val.json',\n 'root': cityscapes_images_dir,\n },\n\n 'cityscapes_test': {\n 'ann_file': '/data7/lufficc/cityscapes/cityscapes_coco_test.json',\n 'root': cityscapes_images_dir,\n },\n\n 'foggy_cityscapes_train': {\n 'ann_file': '/data7/lufficc/cityscapes/foggy_cityscapes_coco_train.json',\n 'root': foggy_cityscapes_images_dir,\n },\n\n 'foggy_cityscapes_val': {\n 'ann_file': '/data7/lufficc/cityscapes/foggy_cityscapes_coco_val.json',\n 'root': foggy_cityscapes_images_dir,\n },\n\n 'foggy_cityscapes_train_0.02': {\n 'ann_file': '/data7/lufficc/cityscapes/foggy_cityscapes_coco_train.json',\n 'root': foggy_cityscapes_images_dir,\n 'betas': 0.02,\n },\n\n 'foggy_cityscapes_val_0.02': {\n 'ann_file': '/data7/lufficc/cityscapes/foggy_cityscapes_coco_val.json',\n 'root': foggy_cityscapes_images_dir,\n 'betas': 0.02,\n },\n\n 'foggy_cityscapes_test': {\n 'ann_file': '/data7/lufficc/cityscapes/foggy_cityscapes_coco_test.json',\n 'root': foggy_cityscapes_images_dir,\n },\n \"coco_2017_train\": {\n \"ann_file\": \"/data7/lufficc/coco/annotations/instances_train2017.json\",\n \"root\": \"/data7/lufficc/coco/train2017\",\n },\n \"coco_2017_val\": {\n \"ann_file\": \"/data7/lufficc/coco/annotations/instances_val2017.json\",\n \"root\": \"/data7/lufficc/coco/val2017\",\n },\n\n 'voc_2007_trainval': {\n 'root': '/data7/lufficc/voc/VOCdevkit/VOC2007',\n 'split': 'trainval',\n },\n\n 'voc_2012_trainval': {\n 'root': '/data7/lufficc/voc/VOCdevkit/VOC2012',\n 'split': 'trainval',\n },\n\n 'voc_2007_test': {\n 'root': '/data7/lufficc/voc/VOCdevkit/VOC2007',\n 'split': 'test',\n },\n\n # -----------watercolor----------\n 'watercolor_voc_2012_trainval': {\n 'root': '/data7/lufficc/voc/VOCdevkit/VOC2012',\n 'split': 'trainval',\n },\n\n 'watercolor_voc_2007_trainval': {\n 'root': '/data7/lufficc/voc/VOCdevkit/VOC2007',\n 'split': 'trainval',\n },\n 'watercolor_train': {\n 'root': '/data7/lufficc/cross_domain_detection/watercolor',\n 'split': 'train',\n },\n 'watercolor_test': {\n 'root': '/data7/lufficc/cross_domain_detection/watercolor',\n 'split': 'test',\n },\n\n # -----------clipart----------\n 'voc_clipart_train': {\n 'root': '/data7/lufficc/cross_domain_detection/clipart',\n 'split': 'train',\n },\n 'voc_clipart_test': {\n 'root': '/data7/lufficc/cross_domain_detection/clipart',\n 'split': 'test',\n },\n 'voc_clipart_traintest': {\n 'root': '/data7/lufficc/cross_domain_detection/clipart',\n 'split': 'traintest',\n },\n\n # -----------sim10k----------\n 'sim10k': {\n 'root': '/data7/lufficc/cross_domain_detection/sim10k/repro_10k_images/',\n 'split': 'all',\n },\n 'cityscapes_car_train': {\n 'ann_file': '/data7/lufficc/cityscapes/cityscapes_coco_train.json',\n 'root': cityscapes_images_dir,\n },\n\n 'cityscapes_car_val': {\n 'ann_file': '/data7/lufficc/cityscapes/cityscapes_coco_val.json',\n 'root': cityscapes_images_dir,\n },\n\n 'car_city_val': {\n 'ann_file': '/data7/lufficc/cityscapes/annotations/instances_car_only_filtered_gtFine_val.json',\n 'root': cityscapes_images_dir + '/val',\n },\n\n '6cats_city_val': {\n 'ann_file': '/data7/lufficc/cityscapes/cityscapes_6cats_coco_val.json',\n 'root': cityscapes_images_dir,\n },\n\n 'foggy_cityscapes_car_train': {\n 'ann_file': '/data7/lufficc/cityscapes/foggy_cityscapes_coco_train.json',\n 'root': foggy_cityscapes_images_dir,\n },\n\n 'foggy_cityscapes_car_val': {\n 'ann_file': '/data7/lufficc/cityscapes/foggy_cityscapes_coco_val.json',\n 'root': foggy_cityscapes_images_dir,\n },\n\n # -----------kitti----------\n 'kitti_train': {\n 'root': '/data7/lufficc/cross_domain_detection/kitti/VOC2012/',\n 'split': 'train',\n },\n\n 'vkitti': {\n 'ann_file': '/data8/lufficc/datasets/VirtualKITTI-InstanceSeg-COCO.json',\n 'root': '/data8/lufficc/datasets/VKITTI/vkitti_1.3.1_rgb',\n },\n\n 'SYNTHIA_mask': {\n 'ann_file': '/data8/lufficc/datasets/RAND_CITYSCAPES-COCO.json',\n 'root': '/data8/lufficc/datasets/SYNTHIA/RAND_CITYSCAPES/RGB',\n },\n}\n\n\ndef build_datasets(names, transforms, is_train=True):\n assert len(names) > 0\n datasets = []\n for name in names:\n cfg = DATASETS[name].copy()\n cfg['dataset_name'] = name\n cfg['train'] = is_train\n cfg['transforms'] = transforms\n if 'watercolor' in name:\n dataset = WatercolorDataset(**cfg)\n elif 'cityscapes_car' in name:\n dataset = CityscapeCarDataset(**cfg)\n elif 'sim10k' in name:\n dataset = Sim10kDataset(**cfg)\n elif 'vkitti' in name:\n dataset = VKITTI(**cfg)\n elif 'kitti' in name:\n dataset = KITTIDataset(**cfg)\n elif 'cityscapes' in name:\n dataset = CityscapeDataset(**cfg)\n elif 'coco' in name:\n dataset = MSCOCODataset(**cfg)\n elif 'voc' in name:\n dataset = CustomVocDataset(**cfg)\n elif 'car_city_val' in name:\n dataset = CityscapeDataset(**cfg)\n elif '6cats_city_val' in name:\n dataset = CityscapeDataset(**cfg)\n elif 'SYNTHIA_mask' in name:\n dataset = SYNTHIAMask(**cfg)\n else:\n raise NotImplementedError\n datasets.append(dataset)\n if is_train:\n return datasets if len(datasets) == 1 else [ConcatDataset(datasets)]\n return datasets\n\n\ndef build_data_loaders(names, transforms, is_train=True, distributed=False, batch_size=1, num_workers=8):\n datasets = build_datasets(names, transforms=transforms, is_train=is_train)\n data_loaders = []\n for dataset in datasets:\n if distributed:\n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n elif is_train:\n sampler = torch.utils.data.RandomSampler(dataset)\n else:\n sampler = torch.utils.data.SequentialSampler(dataset)\n if is_train:\n batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size, drop_last=False)\n loader = DataLoader(dataset, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn)\n else:\n loader = DataLoader(dataset, batch_size=1, sampler=sampler, num_workers=num_workers, collate_fn=collate_fn)\n\n data_loaders.append(loader)\n\n if is_train:\n assert len(data_loaders) == 1, 'When training, only support one dataset.'\n return data_loaders[0]\n return data_loaders\n","sub_path":"detection/data/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":7262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"577512964","text":"import robinhoodwrapper\nimport logging\nimport inspect\nimport pandas as pd\nimport commonqueries\nimport numpy as np\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nimport pandas_market_calendars as mcal\nimport pytz\nimport os\nimport configwrapper\n\nclass TradeRobinhood():\n\tdef __init__(self,config_file):\n\t\tself.config = configwrapper.ConfigWrapper(config_file=config_file)\n\t\tdata_collections=self.build_collections('FINANCIALDATA_COLLECTIONS')\n\t\tuser_collections=self.build_collections('USERS_COLLECTIONS')\n\t\tself.data_collections=data_collections\n\t\tself.user_collections=user_collections\n\t\tself.data_cq=commonqueries.CommonQueries(port=self.config.get_int('FINANCIALDATA_MONGO','port'),host=self.config.get_string('FINANCIALDATA_MONGO','host'), username=self.config.get_string('FINANCIALDATA_MONGO','username'), password=self.config.get_string('FINANCIALDATA_MONGO','password'), dbname=self.config.get_string('FINANCIALDATA_MONGO','dbname'),collections=data_collections)\n\t\tself.user_cq=commonqueries.CommonQueries(port=self.config.get_int('USERS_MONGO','port'),host=self.config.get_string('USERS_MONGO','host'), username=self.config.get_string('USERS_MONGO','username'), password=self.config.get_string('USERS_MONGO','password'), dbname=self.config.get_string('USERS_MONGO','dbname'),collections=user_collections)\n\t\treturn\n\tdef get_trade_now(self):\n\t\tx=mcal.get_calendar('NYSE').schedule(start_date=datetime.now().date()-relativedelta(days=7),end_date=datetime.now().date()+relativedelta(days=7))\n\t\tnow = pytz.utc.localize(datetime.utcnow())\n\t\ttoday=now.date()\n\t\tx=x[pd.to_datetime(x['market_open'])>=now]\n\t\ttime_until_market_open=float((x['market_open'].iloc[0]-now).total_seconds())\n\t\tmax_time_between_close_and_open=float(17.5*60*60) #4:00pm until 9:30 the next day, is 7.5 hours\n\t\ttradenow=True\n\t\tif time_until_market_open>max_time_between_close_and_open:\n\t\t\tlogging.info('more than 7.5 hours until the next market open, not trading now')\n\t\t\ttradenow=False\n\t\treturn tradenow\n\tdef build_collections(self,section='FINANCIALDATA_COLLECTIONS'):\n\t\tself.user_collections={}\n\t\tfor option in self.config.get_options(section):\n\t\t\tself.user_collections[option]=self.config.get_string(section,option)\n\t\treturn self.user_collections\n\tdef trade_robinhood(self):\n\n\n\t\trecommended_portfolio=pd.DataFrame(list(self.data_cq.mongo.db[self.data_collections['quantative_value_recommended']].find({},{'_id':0})))\n\t\t#calculate aech companyies empercenftage.\n\t\tfor row, company in recommended_portfolio.iterrows():\n\t\t\tempercentage=self.data_cq.get_percent_greater_than(self.data_collections['metrics'],self.data_cq.ticker2cik(company['ticker']),'emyield')\n\t\t\trecommended_portfolio.loc[row,'empercentage']=1-empercentage\n\t\tmin_empercentage=float(recommended_portfolio['empercentage'].min()) #the value where we will sell any stock less than this number\n\n\t\tuser_df = pd.DataFrame(list(self.user_cq.mongo.db[self.user_collections['robinhood_users']].find()))\n\t\tuser_df = user_df.sort_values('username')\n\t\tuser_df = user_df.drop_duplicates('username') # has the usernames and passwords of all robinhood users\n\n\t\trh_generic = robinhoodwrapper.RobinHoodWrapper(instruments=self.data_cq.get_robinhood_instruments())\n\n\t\tfor row, data in recommended_portfolio.iterrows():\n\t\t\trecommended_portfolio.loc[row, 'robinhood_price'] = rh_generic.get_last_price(data['ticker'])\n\t\t\trecommended_portfolio.loc[row, 'instrument'] = rh_generic.symbol2instrument(data['ticker'])\n\n\t\tif (recommended_portfolio['price'] != recommended_portfolio['robinhood_price']).any():\n\t\t\tlogging.error('pricemismatch')\n\t\t\tlogging.error(str(recommended_portfolio[recommended_portfolio['price'] != recommended_portfolio['robinhood_price']]))\n\t\t\trecommended_portfolio.to_csv('recommended_portfolio.csv')\n\t\t\tif len(recommended_portfolio[recommended_portfolio['price'] != recommended_portfolio['robinhood_price']]) >= .1 * float(len(recommended_portfolio)): # if more than 10% of the companies dont match\n\t\t\t\tlogging.error('more than 10 percent of the companies dont match, dont trade, something is wrong')\n\t\t\t\treturn\n\t\trecommended_portfolio=recommended_portfolio[pd.notnull(recommended_portfolio['price'])]\n\t\trecommended_portfolio=recommended_portfolio[pd.notnull(recommended_portfolio['robinhood_price'])]\n\t\trecommended_portfolio['price']=recommended_portfolio['price'].round(2)\n\t\trecommended_portfolio['robinhood_price']=recommended_portfolio['robinhood_price'].round(2)\n\t\trecommended_portfolio['weight']=recommended_portfolio['weight']/(recommended_portfolio['weight'].sum())\n\t\trecommended_portfolio=recommended_portfolio.set_index('ticker',drop=False)\n\t\tif len(recommended_portfolio)==0:\n\t\t\tlogging.error('empty trade dataframe')\n\t\t\treturn\n\t\trecommended_portfolio_orig = recommended_portfolio.copy(deep=True)\n\t\tfor index,account in user_df.iterrows():\n\t\t\trh_user=robinhoodwrapper.RobinHoodWrapper(username=account['username'],password=account['password'],instruments=self.data_cq.get_robinhood_instruments())\n\n\t\t\t#get all the options from the user\n\t\t\tuser_trade_options=account['trade']\n\t\t\tshould_trade_now=self.get_trade_now()\n\t\t\tlive_trade=user_trade_options['live_trade']\n\t\t\toptions_trade=user_trade_options['options_trade']\n\t\t\tcan_trade_options=rh_user.can_trade_options()\n\t\t\tmaster_options_trade=self.config.get_bool('TRADING','trade_options')\n\t\t\tmaster_live_trade=self.config.get_bool('TRADING','live_trade')\n\n\t\t\tif master_options_trade is False or not can_trade_options or not options_trade:\n\t\t\t\toptions_trade=False\n\t\t\tif not live_trade or not should_trade_now or master_live_trade is False:\n\t\t\t\tlive_trade=False\n\t\t\tif float(rh_user.get_accounts()[0]['cash'])==0:\n\t\t\t\tlogging.info('we have no money to trade today')\n\t\t\t\tcontinue\n\n\t\t\t#FIRST WE DO THE BUYS\n\t\t\trecommended_portfolio=recommended_portfolio_orig.copy(deep=True)\n\n\t\t\t#filter out wash sale symbols, this way we are always fully invested as we are able\n\t\t\twashsalesymboles=rh_user.get_wash_sale_symbols()\n\t\t\trecommended_portfolio=recommended_portfolio[~recommended_portfolio['ticker'].isin(washsalesymboles)]\n\t\t\trecommended_portfolio['weight']=recommended_portfolio['weight']/(recommended_portfolio['weight'].sum())\n\t\t\tcurrent_positions=rh_user.get_positions()\n\t\t\trecommended_portfolio['desired_value']=recommended_portfolio['weight']*(float(rh_user.get_total_portfolio_value())+float(rh_user.get_accounts()[0]['cash']))\n\t\t\tcurrent_positions=current_positions[current_positions['instrument'].isin(recommended_portfolio['instrument'])] #filter our current positions so we only look at positions we have that we also want to buy\n\t\t\trecommended_portfolio['current_value']=float(0)\n\t\t\tfor index,row in current_positions.iterrows():\n\t\t\t\trecommended_portfolio.loc[rh_user.instrument2symbol(row['instrument']),'current_value']=float(row['quantity'])*float(row['last_trade_price'])\n\n\t\t\t#we need to see if we have any current put option positions and take this into account and modify the current_value\n\t\t\tif options_trade is True:\n\t\t\t\tcurrent_options_positions=rh_user.get_options_positions()\n\t\t\t\tif current_options_positions is not None and len(current_options_positions)>0:\n\t\t\t\t\t#todo 6/28 we still need to adjust hte current value of positions with outstanding options, both call and put\n\t\t\t\t\tcurrent_options_positions=current_options_positions[current_options_positions['type']=='put']\n\t\t\t\t\tif len(current_options_positions)>0:\n\t\t\t\t\t\tlogging.error('we need to do something with the optoins we have in our account because we now actually have put options')\n\t\t\t\t\t\tcurrent_options_positions.to_csv('current_options_positions.csv')\n\t\t\t\t\t\texit()\n\n\t\t\trecommended_portfolio['new_value']=recommended_portfolio['desired_value']-recommended_portfolio['current_value']\n\t\t\trecommended_portfolio=recommended_portfolio[recommended_portfolio['new_value']>0] #we only take buys, we dont worry about that we are overallocated to\n\t\t\trecommended_portfolio['new_weight']=recommended_portfolio['new_value']/(recommended_portfolio['new_value'].sum())\n\t\t\trecommended_portfolio['today_value_add']=recommended_portfolio['new_weight']*float(rh_user.get_accounts()[0]['cash'])\n\t\t\trecommended_portfolio['shares']=recommended_portfolio['today_value_add']/(recommended_portfolio['price'])\n\t\t\trecommended_portfolio['max_shares']=np.floor(recommended_portfolio['new_value']/(recommended_portfolio['price'])) #the maximum number of shares we would want to purchase today\n\t\t\trecommended_portfolio=recommended_portfolio.sort_values('shares',ascending=False)\n\n\t\t\twhile any(recommended_portfolio['shares']<1) and len(recommended_portfolio)>0:\n\t\t\t\trecommended_portfolio=recommended_portfolio[:-1]\n\t\t\t\trecommended_portfolio['new_weight']=recommended_portfolio['today_value_add']/(recommended_portfolio['today_value_add'].sum())\n\t\t\t\trecommended_portfolio['today_value_add']=recommended_portfolio['new_weight']*float(rh_user.get_accounts()[0]['cash'])\n\t\t\t\trecommended_portfolio['shares']=recommended_portfolio['today_value_add']/(recommended_portfolio['price']) #we will only purchase at this limit price\n\t\t\t\trecommended_portfolio=recommended_portfolio.sort_values('shares',ascending=False)\n\t\t\tif len(recommended_portfolio)==0:\n\t\t\t\tlogging.info('empty recommended df after filtering for shares')\n\t\t\t\tcontinue\n\t\t\trecommended_portfolio['shares']=np.floor(recommended_portfolio['shares'])\n\t\t\trecommended_portfolio['shares']=recommended_portfolio[['shares','max_shares']].min(axis=1) #take the minimum of what we are going to by, and the max we should, this will ensure that we never overallocate\n\n\t\t\tif live_trade:\n\t\t\t\trh_user.cancel_all_orders() #ONLY REMOVE THE open stock orders, we really should not NEED to cancel, we can work it into our calculations\n\t\t\t\tif options_trade is True:\n\t\t\t\t\trh_user.cancel_all_options_orders() #removes all current option orders\n\t\t\tlogging.info(recommended_portfolio)\n\t\t\tfor symbol,order in recommended_portfolio.iterrows():\n\t\t\t\tif options_trade is True:\n\t\t\t\t\tif float(order['shares'])>100:\n\t\t\t\t\t\toption_chain=self.data_cq.convert_option_chain_rh2td(symbol=symbol,stock_price=rh_user.get_last_price(symbol),option_chain=rh_user.get_options_instrument_data(symbol=symbol))\n\t\t\t\t\t\tbest_put_to_sell=self.data_cq.get_best_put_to_sell(symbol,option_chain=option_chain,exercise_fee=0,trading_fee=0,contract_fee=0)\n\t\t\t\t\t\tif pd.notnull(best_put_to_sell):\n\t\t\t\t\t\t\tlogging.error('prehaps we want to sell a put option?')\n\t\t\t\t\t\t\tlogging.error('we also need to change the robinhoodwrapper get_positions_by_odrers to incorporate options events...')\n\t\t\t\t\t\t\texit()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif live_trade:\n\t\t\t\t\t\t\t\trh_user.submitt_order(symbol=symbol, quantity=order['shares'], price=float(order['price']))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif live_trade:\n\t\t\t\t\t\t\trh_user.submitt_order(symbol=symbol,quantity=order['shares'],price=float(order['price']))\n\t\t\t\telse:\n\t\t\t\t\tif live_trade:\n\t\t\t\t\t\trh_user.submitt_order(symbol=symbol, quantity=order['shares'], price=float(order['price']))\n\t\t\tif options_trade is True:\n\t\t\t\t#see if we have to sell any calls, this will later go inside of the if statement, we need to also see if we have any calls already\n\t\t\t\tpositions=rh_user.get_positions()\n\t\t\t\tpositions['shares_to_sell']=positions['quantity'].astype('float')-positions['shares_held'].astype('float')\n\t\t\t\tpositions=positions[positions['shares_to_sell']>=100]\n\t\t\t\tfor row,position in positions.iterrows():\n\t\t\t\t\toption_chain=self.data_cq.convert_option_chain_rh2td(symbol=position['symbol'],stock_price=rh_user.get_last_price(position['symbol']),option_chain=rh_user.get_options_instrument_data(symbol=position['symbol']))\n\t\t\t\t\tpositions.loc[row,'call_to_sell_symbol']=self.data_cq.get_best_call_to_sell(position['symbol'],option_chain=option_chain,exercise_fee=0,trading_fee=0,contract_fee=0)\n\t\t\t\t\tpositions.loc[row,'num_calls_to_sell']=np.floor(float(position['shares_to_sell'])/100)\n\t\t\t\t\tpositions.loc[row,'valid_trade']=self.data_cq.is_valid_trade(cik=self.data_cq.ticker2cik(position['symbol']))\n\t\t\t\t\tpositions.loc[row, 'has_split']=self.data_cq.has_split(cik=self.data_cq.ticker2cik(position['symbol']))\n\t\t\t\tpositions=positions[pd.notnull(positions['call_to_sell_symbol'])]\n\t\t\t\tpositions=positions[positions['num_calls_to_sell']>0]\n\t\t\t\tpositions = positions[positions['valid_trade'] == True]\n\t\t\t\tpositions = positions[positions['has_split'] == False]\n\n\t\t\t\tpositions=positions[['symbol','quantity','num_calls_to_sell','call_to_sell_symbol']]\n\n\t\t\t\tpositions['num_calls_to_sell']=(positions['num_calls_to_sell'].astype('int'))*-1\n\t\t\t\tfor index,position in positions.iterrows():\n\t\t\t\t\toption_chain=self.data_cq.convert_option_chain_rh2td(symbol=position['symbol'],stock_price=rh_user.get_last_price(position['symbol']),option_chain=rh_user.get_options_instrument_data(symbol=position['symbol']))\n\t\t\t\t\tcall_option_to_sell=option_chain[option_chain['url']==position['call_to_sell_symbol']].iloc[0].to_dict()\n\t\t\t\t\tif live_trade:\n\t\t\t\t\t\trh_user.submitt_option_order(position_effect=\"open\",options_instrument_url_id=position['call_to_sell_symbol'],order_type=\"limit\",quantity=position['num_calls_to_sell'],price=call_option_to_sell['bid_price'],account=None,time_in_force='gfd')\n\t\t\telse:\n\t\t\t\tpass\n\n\t\t\t#NOW WE DO THE SELLS\n\t\t\tsell_items=rh_user.get_sell_items()\n\t\t\tlogging.info(sell_items)\n\t\t\tfor symbol,value in sell_items.iteritems():\n\t\t\t\tlast_price=rh_user.get_last_price(symbol=symbol)\n\t\t\t\tvalid_trade=self.data_cq.is_valid_trade(cik=self.data_cq.ticker2cik(symbol))\n\t\t\t\thas_split=self.data_cq.has_split(cik=self.data_cq.ticker2cik(symbol))\n\n\t\t\t\tempercentage=self.data_cq.get_percent_greater_than(self.data_collections['metrics'],self.data_cq.ticker2cik(symbol),'emyield')\n\t\t\t\tif empercentage is None:\n\t\t\t\t\tempercentage=1 #if we can't find one, then we sell because assume that everyone is greater than us\n\t\t\t\tempercentage=1-empercentage\n\n\t\t\t\tnext_release=self.data_cq.get_next_release(self.data_cq.ticker2cik(ticker=symbol))\n\t\t\t\tif next_release is None:\n\t\t\t\t\tnext_release=datetime.now()+relativedelta(years=1) #this will ensure that we sell the stock\n\t\t\t\tdays_until_release=self.data_cq.get_days_until_date(date=next_release)\n\n\t\t\t\tif empercentage1 and valid_trade and has_split is False:\n\t\t\t\t\tlogging.info('we are selling stock:'+symbol+\":\"+str(value))\n\t\t\t\t\tif live_trade:\n\t\t\t\t\t\trh_user.submitt_order(symbol=symbol, quantity=int(value), price=float(last_price))\n\t\t\trh_user.logout() #we probably dont actually need to logout\n\t\treturn\ndef main(config_file):\n\tt=TradeRobinhood(config_file=config_file)\n\tt.trade_robinhood()\n\treturn\nif __name__ == '__main__':\n\tif os.path.exists(inspect.stack()[0][1].replace('py','log')):\n\t\tos.remove(inspect.stack()[0][1].replace('py','log'))\n\tlogging.basicConfig(filename=inspect.stack()[0][1].replace('py','log'),level=logging.INFO,format='%(asctime)s:%(levelname)s:%(message)s')\n\tmain(config_file='finance_cfg.cfg')","sub_path":"src/traderobinhood.py","file_name":"traderobinhood.py","file_ext":"py","file_size_in_byte":14759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"305141274","text":"import numpy as np \nimport torch\nimport time\nimport os\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\nfrom tensorboardX import SummaryWriter\n\nclass Solver(object):\n default_adam_args ={ 'lr': 1e-4,\n 'betas': (0.9, 0.999),\n 'eps': 1e-8,\n 'weight_decay': 0.0\n }\n def __init__(self, optim=torch.optim.Adam, optim_args={}, \n loss_func=torch.nn.L1Loss(reduction='sum')):\n\n optim_args_merged = self.default_adam_args.copy()\n optim_args_merged.update(optim_args)\n\n self.optim_args = optim_args_merged\n self.optim = optim\n self.loss_func = loss_func\n\n self._reset_histories()\n\n self.writer = SummaryWriter('/home/huiminzeng/Desktop/Code/Recovery/TensorBoardX')\n\n self.best_model = None\n self.best_valloss = 10000000\n self.lr = self.optim_args['lr']\n\n \n def _reset_histories(self):\n self.train_loss_history = []\n self.val_loss_history = []\n self.train_loss_plot = []\n self.val_loss_plot = []\n self.model = []\n \n def train(self, model, train_loader, val_loader, num_epochs=10, log_nth=0):\n optim = self.optim(model.parameters(), **self.optim_args)\n self._reset_histories()\n iter_per_epoch = len(train_loader)\n\n model.cuda()\n\n print(\"TRAINING START!!!!!!!!!!\")\n print()\n\n for epoch in range(num_epochs):\n \n if epoch < 7:\n lr = self.lr\n optim = self.optim(model.parameters(), lr)\n elif epoch < 15:\n lr = self.lr * 0.5\n optim = self.optim(model.parameters(), lr) \n else:\n lr = self.lr * 0.25\n optim = self.optim(model.parameters(), lr)\n print('learning rate: ', lr)\n \n\n #TRAINING\n epo_start = time.time()\n model.train()\n for i, (x_train,y_train) in enumerate(train_loader):\n\n #if overfitting\n if i > 0:\n break\n model.train()\n optim.zero_grad()\n\n iter_start = time.time()\n\n if torch.cuda.is_available():\n x_train = x_train.cuda()\n y_train = y_train.cuda()\n\n mask = x_train[:, [-1]].eq(-1).float()\n output_train = model(x_train)\n\n output_masked = output_train*mask\n y_masked = y_train*mask\n \n loss = self.loss_func(output_masked, y_masked) \n #print('loss: ', loss.data.cpu().numpy())\n forward_end = time.time()\n\n #print(\"forward pass:\", forward_end - iter_start)\n loss.backward()\n backward_end = time.time()\n optim.step()\n #print(\"back propagation: \", backward_end-forward_end)\n loss_value = loss.data.cpu().numpy()/(len(train_loader))\n\n self.writer.add_scalar('train_loss/iteration', loss_value, i+epoch*iter_per_epoch)\n\n\n self.train_loss_history.append(loss_value)\n \n \n if log_nth and i % log_nth == 0 and i!=0:\n print(i)\n print()\n if i == 0:\n print(\"TRAINING VISUALIZATION!!!!!!!!!!\")\n v_time1 = time.time()\n voxels_train = self.get_voxel(x_train, y_train, output_train)\n self.voxel_plot(voxels_train[0], voxels_train[1], voxels_train[2])\n v_time2 = time.time()\n print(\"time: \", v_time2-v_time1)\n \n print()\n print('ITERATION TIME CONSUMPTION! ', backward_end-iter_start)\n\n last_log_nth_losses = np.mean(self.train_loss_history[-log_nth:])\n\n self.writer.add_scalar('train_log/iteration', last_log_nth_losses, i+epoch*iter_per_epoch)\n\n self.train_loss_plot.append(last_log_nth_losses)\n train_loss = last_log_nth_losses\n print('logged: [Iteration %d/%d] TRAIN LOSS: %.3f' \n % ((i + iter_per_epoch * epoch) , (num_epochs * iter_per_epoch), train_loss))\n\n \n self.validation(model, val_loader, i, epoch, iter_per_epoch)\n\n epo_end = time.time()\n epo_time = epo_end - epo_start\n print('[EPOCH %d] time: %.3f' % (epoch, epo_time))\n\n def validation(self, model, val_loader, iteration, epoch, iter_per_epoch):\n model.eval()\n val_time1 = time.time()\n\n for step, (x_val,y_val) in enumerate(val_loader):\n\n \n x_val = x_val.cuda()\n y_val = y_val.cuda()\n\n mask = x_val[:, [-1]].eq(-1).float()\n\n output_val = model(x_val)\n\n #close1 = output_val[:,[0]] <= 2\n #close2 = y_val[:,[0]] <= 2\n\n #mask = (unknown & (close1 | close2)).float()\n\n output_masked = output_val*mask\n y_masked = y_val*mask\n \n loss = self.loss_func(output_masked, y_masked)/6250 \n self.val_loss_history.append(loss.data.cpu().numpy())\n\n\n\n val_time2 = time.time()\n print(\"VAL TIME: \", val_time2 - val_time1)\n val_loss = np.mean(self.val_loss_history[-len(val_loader):])\n\n self.writer.add_scalar('val_log/iteration', val_loss, iteration + iter_per_epoch * epoch)\n\n if val_loss < self.best_valloss:\n self.best_model = model\n self.best_valloss = val_loss\n torch.save(self.best_model, 'models/best_model')\n\n self.val_loss_plot.append(val_loss)\n print('logged: [Iteration %d/Epoch %d] VAL LOSS: %.3f' % ((iteration + iter_per_epoch * epoch), epoch, self.val_loss_plot[-1])) \n\n \n print(\"VALIDATION VISUALIZATION!!!!!!!!!!\")\n voxels_val = self.get_voxel(x_val, y_val, output_val)\n self.voxel_plot(voxels_val[0], voxels_val[1], voxels_val[2])\n \n\n def get_voxel(self,x ,y , output):\n voxel_input = x[3][0].cpu().detach().numpy()\n #print(voxel_input.shape)\n voxel_target = y[3].cpu().detach().numpy().squeeze()\n #print(voxel_target.shape)\n voxel_output = output[3].cpu().detach().numpy().squeeze()\n #print(voxel_output.shape)\n return voxel_input, voxel_target, voxel_output\n \n def voxel_plot(self, voxel_input, voxel_target, voxel_output):\n x, y, z = np.indices((32, 32, 32))\n\n vinput = voxel_input < 0.7\n vtarget = voxel_target < 0.7\n voutput = voxel_output < 0.7\n\n\n fig = plt.figure(figsize=plt.figaspect(0.3))\n\n #===============\n # First subplot\n #===============\n # set up the axes for the first plot\n ax = fig.add_subplot(1, 3, 1, projection='3d')\n colors = np.empty(voxel_input.shape, dtype=object)\n plt.title(\"input\")\n ax.voxels(vinput, facecolors=colors, edgecolor='k')\n\n ax = fig.add_subplot(1, 3, 2, projection='3d')\n plt.title(\"prediction\")\n ax = fig.gca(projection='3d')\n ax.voxels(voutput, facecolors=colors, edgecolor='k')\n\n ax = fig.add_subplot(1, 3, 3, projection='3d')\n plt.title(\"target\")\n ax = fig.gca(projection='3d')\n ax.voxels(vtarget, facecolors=colors, edgecolor='k')\n \n plt.show()\n\n def test(self, test_loader):\n test_prediction = []\n test_loss_history = []\n model = self.best_model\n model.eval()\n model.cuda()\n for i, (x_test,y_test) in enumerate(test_loader, 1):\n if torch.cuda.is_available():\n x_test = x_test.cuda()\n y_test = y_test.cuda()\n\n output_test = model(x_test)\n test_prediction.append(output_test.cpu().detach().numpy())\n #print(test_prediction[i-1].shape)\n \n loss = self.loss_func(output_test, y_test) \n test_loss_history.append(loss.data.cpu().numpy())\n\n overall_test_loss = np.mean(test_loss_history)\n return overall_test_loss, test_prediction\n\n\n","sub_path":"solver_gan.py","file_name":"solver_gan.py","file_ext":"py","file_size_in_byte":8370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"45770685","text":"# -*- coding: utf-8 -*-\n\"\"\"\nЗадание 5.2a\n\nВсё, как в задании 5.2, но, если пользователь ввел адрес хоста, а не адрес сети,\nнадо преобразовать адрес хоста в адрес сети и вывести адрес сети и маску,\nкак в задании 5.2.\n\nПример адреса сети (все биты хостовой части равны нулю):\n* 10.0.1.0/24\n* 190.1.0.0/16\n\nПример адреса хоста:\n* 10.0.1.1/24 - хост из сети 10.0.1.0/24\n* 10.0.5.195/28 - хост из сети 10.0.5.192/28\n\nЕсли пользователь ввел адрес 10.0.1.1/24, вывод должен быть таким:\n\nNetwork:\n10 0 1 0\n00001010 00000000 00000001 00000000\n\nMask:\n/24\n255 255 255 0\n11111111 11111111 11111111 00000000\n\n\nПроверить работу скрипта на разных комбинациях хост/маска, например:\n 10.0.5.195/28, 10.0.1.1/24\n\nВывод сети и маски должен быть упорядочен также, как в примере:\n- столбцами\n- ширина столбца 10 символов (в двоичном формате\n надо добавить два пробела между столбцами\n для разделения октетов между собой)\n\n\nПодсказка:\nЕсть адрес хоста в двоичном формате и маска сети 28. Адрес сети это первые 28 бит\nадреса хоста + 4 ноля.\nТо есть, например, адрес хоста 10.1.1.195/28 в двоичном формате будет\nbin_ip = \"00001010000000010000000111000011\"\n\nА адрес сети будет первых 28 символов из bin_ip + 0000 (4 потому что всего\nв адресе может быть 32 бита, а 32 - 28 = 4)\n00001010000000010000000111000000\n\nОграничение: Все задания надо выполнять используя только пройденные темы.\n\n\"\"\"\n\n# answer\nnetwork = input('Введите адрес сети в формате ip/mask: ')\n# разделяем ip и mask\nip = (network.split('/')[0]).split('.')\nmask = int(network.split('/')[1])\n# преобразуем ip в bin\ntemplate0 = \"\"\"\n{0:08b}{1:08b}{2:08b}{3:08b}\n\"\"\"\nipbin = template0.format(int(ip[0]), int(ip[1]), int(ip[2]), int(ip[3]))\n# делаем срез ip по маске и заменяем вырезанное нулями (эквивалентно ip & mask) + удаляем пробельные символы\nipbin = (ipbin[:(mask + 1)] + str('0' * (32 - mask))).strip()\n# выводим Network\ntemplate1 = \"\"\"\nNetwork:\n{0:<10}{1:<10}{2:<10}{3:<10}\n{0:08b} {1:08b} {2:08b} {3:08b}\n\"\"\"\nprint(template1.format(int(ipbin[0:8], 2), int(ipbin[8:16], 2), int(ipbin[16:24], 2), int(ipbin[24:32], 2)))\n# выводим Mask\nmaskbin = '1' * mask + '0' * (32 - mask)\ntemplate2 = \"\"\"\nMask:\n/{0:}\n{1:<10}{2:<10}{3:<10}{4:<10}\n{1:08b} {2:08b} {3:08b} {4:08b}\n\"\"\"\nprint(template2.format(int(mask), int(maskbin[0:8], 2), int(maskbin[8:16], 2), int(maskbin[16:24], 2),\n int(maskbin[24:32], 2)))\n","sub_path":"exercises/05_basic_scripts/task_5_2a.py","file_name":"task_5_2a.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"535731594","text":"# Copyright 2020 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Creates service account, custom role and builds DS API image and deploys to Cloud Run.\"\"\"\n\n\ndef GenerateConfig(context):\n \"\"\"Generate YAML resource configuration.\"\"\"\n\n cmd = \"https://github.com/GoogleCloudPlatform/datashare-toolkit.git\"\n git_release_version = \"master\"\n namespace = \"datashare-apis\"\n cluster_name = \"datashare\"\n cluster_location = context.properties['gkeZone']\n cloud_run_deploy_name = context.properties['cloudRunDeployName']\n container_tag = context.properties['containerTag']\n region = context.properties['region']\n service_acct_name = context.properties['serviceAccountName']\n service_acct_descr = context.properties['serviceAccountDesc']\n custom_role_name = context.properties['customRoleName']\n delete_timeout = '120s'\n general_timeout = context.properties['timeout']\n # admin_sa = context.properties['adminServiceAccount']\n if context.properties['datashareGitReleaseTag'] != None:\n git_release_version = context.properties['datashareGitReleaseTag']\n\n steps = [\n { # Create a service account\n 'name': 'gcr.io/google.com/cloudsdktool/cloud-sdk',\n 'entrypoint': '/bin/bash',\n 'args': ['-c',\n 'gcloud iam service-accounts create ' + service_acct_name + ' --display-name=\"' + service_acct_descr + '\" --format=disable || exit 0'\n ]\n },\n { # Clone the Datashare repository\n 'name': 'gcr.io/cloud-builders/git',\n 'dir': 'ds', # changes the working directory to /workspace/ds/\n 'args': ['clone', cmd]\n },\n { # Create the custom role\n 'name': 'gcr.io/google.com/cloudsdktool/cloud-sdk',\n 'entrypoint': '/bin/bash',\n 'args': ['-c', 'gcloud iam roles create ' + custom_role_name + ' --project=$PROJECT_ID --file=config/ds-api-mgr-role-definition.yaml --format=disable || exit 0'],\n 'dir': 'ds/datashare-toolkit/api' # changes the working directory to /workspace/ds/datashare-toolkit/api\n },\n { # Assign the service account to the custom role\n 'name': 'gcr.io/google.com/cloudsdktool/cloud-sdk',\n 'entrypoint': '/bin/bash',\n 'args': ['-c', 'gcloud projects add-iam-policy-binding $PROJECT_ID --member=serviceAccount:' + service_acct_name + '@$PROJECT_ID.iam.gserviceaccount.com --role=\"projects/$PROJECT_ID/roles/' + custom_role_name + '\" --format=disable'\n ]\n },\n { # Submit the build configuration to Cloud Build to be the Datashare API container image only if the ds-api:dev image does not exist\n 'name': 'gcr.io/google.com/cloudsdktool/cloud-sdk',\n 'dir': 'ds/datashare-toolkit',\n 'entrypoint': 'bash',\n 'args': ['-c', 'if ! gcloud container images describe gcr.io/$PROJECT_ID/ds-api:dev; then ' + \n 'gcloud builds submit . --config=api/v1alpha/cloudbuild.yaml --substitutions=TAG_NAME=' + container_tag + \n '; else exit 0; fi'\n ]\n },\n { # Deploy the container image to Cloud Run\n 'name': 'gcr.io/cloud-builders/gcloud',\n 'dir': 'ds/datashare-toolkit',\n 'entrypoint': 'gcloud'\n }\n ]\n # select the correct deploy command based on whether deployToGke is True or False\n if context.properties['deployToGke'] is False or context.properties['deployToGke'] == \"false\":\n steps[5]['args'] = [\n 'run',\n 'deploy',\n cloud_run_deploy_name,\n '--image=gcr.io/$PROJECT_ID/' + cloud_run_deploy_name + ':' + container_tag,\n '--region=' + region,\n '--allow-unauthenticated',\n '--platform=managed',\n '--service-account=' + service_acct_name + '@$PROJECT_ID.iam.gserviceaccount.com'\n ]\n else:\n steps[5]['args'] = [\n 'alpha',\n 'run',\n 'deploy',\n cloud_run_deploy_name,\n '--cluster=' + cluster_name,\n '--cluster-location=' + cluster_location,\n '--namespace=' + namespace,\n '--min-instances=1',\n '--image=gcr.io/$PROJECT_ID/' + cloud_run_deploy_name + ':' + container_tag, \n '--platform=gke',\n '--service-account=' + service_acct_name\n ]\n\n git_release = { # Checkout the correct release\n 'name': 'gcr.io/cloud-builders/git',\n 'dir': 'ds/datashare-toolkit', # changes the working directory to /workspace/ds/datashare-toolkit\n 'args': ['checkout', git_release_version]\n }\n\n if git_release_version != \"master\":\n steps.insert(2, git_release) # insert the git checkout command into after the git clone step\n\n resources = None\n # include the dependsOn property if we are deploying all the components\n use_runtime_config_waiter = context.properties['useRuntimeConfigWaiter']\n if use_runtime_config_waiter:\n waiter_name = context.properties['waiterName']\n resources = [{\n 'name': 'ds-api-build',\n 'action': 'gcp-types/cloudbuild-v1:cloudbuild.projects.builds.create',\n 'metadata': {\n 'runtimePolicy': ['UPDATE_ALWAYS'],\n 'dependsOn': [waiter_name]\n },\n 'properties': {\n 'steps': steps,\n 'timeout': general_timeout\n }\n }]\n else:\n resources = [{\n 'name': 'ds-api-build',\n 'action': 'gcp-types/cloudbuild-v1:cloudbuild.projects.builds.create',\n 'metadata': {\n 'runtimePolicy': ['UPDATE_ALWAYS']\n },\n 'properties': {\n 'steps': steps,\n 'timeout': general_timeout\n }\n }]\n\n # Listen for delete events and delete the API\n delete_action = {\n 'name': 'delete-api',\n 'action': 'gcp-types/cloudbuild-v1:cloudbuild.projects.builds.create',\n 'metadata': {\n 'dependsOn': ['ds-api-build'],\n 'runtimePolicy': ['DELETE']\n },\n 'properties': {\n 'steps': [{\n 'name': 'gcr.io/google.com/cloudsdktool/cloud-sdk',\n 'entrypoint': '/bin/bash',\n 'args': ['-c', 'gcloud run services delete ' + cloud_run_deploy_name + ' --platform=gke --cluster=datashare' +\n ' --cluster-location=' + region + ' --quiet || exit 0'\n ]\n }],\n 'timeout': delete_timeout\n }\n }\n if context.properties['deployToGke'] is False or context.properties['deployToGke'] == \"false\":\n delete_action['properties']['steps'][0]['args'][1] = 'gcloud run services delete ' + cloud_run_deploy_name + ' --platform=managed --region=' + region + ' --quiet || exit 0'\n resources.append(delete_action)\n\n return {'resources': resources}\n","sub_path":"api/deploy_ds_api.py","file_name":"deploy_ds_api.py","file_ext":"py","file_size_in_byte":7712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"173352546","text":"# Task 4.2\n\nmy_list = [0, -4, 2, 0, -4, -5, 0, 4, 13, 18, -55]\n\n# 1) Use while\ni = 0\nmy_new_list = []\n\nwhile i < len(my_list):\n if my_list[i] % 2 == 0:\n my_new_list.append(my_list[i])\n i += 1\n\n# 2) Use for\nmy_new_list2 = [n for n in my_list if n % 2 == 0]\n\nprint(\"Исходный список:\", my_list)\nprint(\"Список четных чисел (используя while):\", my_new_list)\nprint(\"Список четных чисел (используя for):\", my_new_list2)\n\n","sub_path":"students/shloma/004_homework_04/task_4_2.py","file_name":"task_4_2.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"442237058","text":"n = int(input())\narr = map(int, input().split())\narr = list(arr)\nc = False\n\nfor i in range(1,n):\n if arr[i]*arr[i-1] > 0:\n print(\"YES\")\n c = True\n break\nif not c:\n print(\"NO\")\n","sub_path":"week10/informatics/4/4E.py","file_name":"4E.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"10874383","text":"#!/usr/bin/env python\n\n'''\n@author: Santiago Villarreal\nCreated on October, 2019\n\nDescription:\nLaunches the SAM template of pod-slack-notifier lambda function, for each config file exists in the env to process, so that it gets integrated to the related Slack channel\n\nInputs:\n Parameter 1: the directory to process (dev, it, prod, non-prod). It should match with the folder inside the config directory of the pod-slack-alert-notifier cloned repo\n Parameter 2: the path of the pod-slack-alert-notifier cloned repo\n Parameter 3: s3 location of the pod-slack-notifier artifact\n\nNOTE 1: Check that an session to awssso is opened\nNOTE 2: Before running, clone the following repo: git@github.platforms.engineering:POD-Inc/pod-slack-alert-notifier.git\n\nExample 1 (Will find all the configuration files for all the channels in the dev environment):\n python slack_notifiers.py dev /Users/svill4/dev/repos/pod-slack-alert-notifier s3://vcis-lambda-github-source-infra/badb7daac156e951c522a52c879ea548\n\nExample 2 - If we want to run the same but for prod, just change the folder:\n python slack_notifiers.py prod /Users/svill4/dev/repos/pod-slack-alert-notifier s3://vcis-lambda-github-source-infra/badb7daac156e951c522a52c879ea548\n'''\n\nimport boto3\nimport sys\nimport json\nimport glob\nfrom botocore.exceptions import ClientError\nfrom colorama import Fore, Style, Style\nfrom ruamel.yaml import YAML\nimport os\nimport atexit\n\nsam_location = sys.argv[2] + '/saml.yaml'\n\ndef exit_handler():\n print(Style.RESET_ALL + 'The stack creator has ended!')\n os.remove(sam_location + '.tmp')\natexit.register(exit_handler)\n\ncf = boto3.client('cloudformation', region_name='us-east-1')\n\ndef stack_exists(name):\n try:\n data = cf.describe_stacks(StackName = name)\n except ClientError:\n return False\n return data['Stacks'][0]['StackStatus'] == 'CREATE_COMPLETE' or data['Stacks'][0]['StackStatus'] == 'ROLLBACK_COMPLETE'\n\ndef parse_parameters(parameters):\n with open(parameters) as parameter_fileobj:\n parameter_data = json.load(parameter_fileobj)\n return parameter_data\n\ndef parse_sam_template(template):\n yaml=YAML()\n yaml.preserve_quotes = True\n with open(template) as template_fileobj:\n cf_yaml_obj = yaml.load(template_fileobj)\n\n # Set the S3 location of the lambda function implementation\n cf_yaml_obj['Resources']['LogProccesor']['Properties']['CodeUri'] = sys.argv[3]\n\n with open(template + '.tmp', 'w') as fp:\n yaml.dump(cf_yaml_obj, fp)\n\n with open(template + '.tmp') as template_fileobj:\n template_data = template_fileobj.read()\n\n cf.validate_template(TemplateBody=template_data)\n\n return template_data\n\ndef _create_stack(stack_name, template_data, parameter_data):\n print('\\tStack is being created..')\n cf.create_stack(\n StackName=stack_name,\n TemplateBody=template_data,\n Parameters=parameter_data,\n Capabilities=['CAPABILITY_IAM','CAPABILITY_AUTO_EXPAND']\n )\n print('\\t... Waiting for the stack to be ready...')\n waiter = cf.get_waiter('stack_create_complete')\n waiter.wait(StackName=stack_name)\n print(Fore.GREEN)\n print('\\tStack has just been created.')\n\ndef _delete_stack(stack_name):\n print('\\tStack is being deleted..')\n cf.delete_stack(\n StackName=stack_name\n )\n waiter = cf.get_waiter('stack_delete_complete')\n waiter.wait(StackName=stack_name)\n print(Fore.GREEN)\n print('\\tStack has been deleted.')\n\ndef main():\n template_data = parse_sam_template(sam_location)\n config_files = glob.glob(sys.argv[2] + '/config/' + sys.argv[1] + '/*')\n for config_file in config_files:\n parameter_data = parse_parameters(config_file)\n file_name = config_file.split('/')[-1]\n stack_name = file_name.replace('-params.json', '')\n print(Style.RESET_ALL + Fore.BLUE + '------------------------------------------------------------------------------------------')\n print('Processing stack ' + stack_name)\n print('------------------------------------------------------------------------------------------' + Style.DIM)\n try:\n if not stack_exists(stack_name):\n print('\\tStack does not exists')\n _create_stack(stack_name, template_data, parameter_data)\n else:\n print('\\tStack already exists, deleting..')\n _delete_stack(stack_name)\n _create_stack(stack_name, template_data, parameter_data)\n\n except Exception as e:\n print(Fore.RED + '\\t' + str(e))\n print('\\tCould not create the stack, skipping...')\n print(Style.RESET_ALL)\n print(Style.RESET_ALL)\nif __name__ == '__main__':\n main()\n","sub_path":"vcis-app-cloudformation-templates/templates/foundations/scripts/slack-notifier/slack_notifiers.py","file_name":"slack_notifiers.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"642515462","text":"\"\"\"\r\nAssignment #5, Part 3a\r\nJun Seob Shim\r\n15/10/2020\r\nIntro to Programming Section 012\r\nPrime Number Finder\r\n\"\"\"\r\nimport math\r\n\r\ninvalidnum = True\r\n\r\n#ask for number to test\r\nwhile invalidnum:\r\n num = int(input(\"Enter a positive number to test: \"))\r\n\r\n if num > 0:\r\n invalidnum = False\r\n else:\r\n print(\"Invalid, try again\")\r\n\r\n#account for 1 and 2\r\nif num == 1:\r\n print(\"1 is technically not a prime number.\")\r\nelif num == 2:\r\n print(\"2 is a prime number!\")\r\n \r\n#test whether number is prime\r\nelse:\r\n #for determining whether number is prime at the end \r\n prime = True\r\n\r\n #divide by integers up to the square root of the number until a divisor is found \r\n for i in range(2,math.ceil(num**(1/2))+1):\r\n #if i is not a factor\r\n if num % i != 0:\r\n print(i,\"is NOT a divisor of\",num,\"... continuing\")\r\n \r\n #if i is a factor\r\n else:\r\n print(i,\"is a divisor of\",num,\"... stopping\")\r\n prime = False\r\n break\r\n\r\n print()\r\n \r\n #print if prime or not\r\n if prime == True:\r\n print(num,\"is a prime number!\")\r\n else:\r\n print(num,\"is not a prime number.\")\r\n","sub_path":"Intro to Programming/Assignments/Assignment 5/ShimJunSeob_assign5_part3a.py","file_name":"ShimJunSeob_assign5_part3a.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"254699145","text":"from socket import *\nfrom threading import *\nimport time\nfrom random import *\nfrom math import floor\n\n#alpha = 0.1\n#sendingTime = 0.0\n\n#resultsFile = open(\"ClientDynamicData.txt\", \"w\")\n\nMTU = 1400\nN = MTU\nNMutex = Lock()\nn = 0.5\npacketsOut = 0\npacketNumber = 0\n\nserverName = '127.0.0.1'\nserverPort = 12001\naddr = (serverName, serverPort)\n\n\na = []\nfor i in range(4*MTU): a += str(randint(0,9))\ndata = ''.join(a)\n\n\nclass GetN(Thread):\n def __init__(self, message):\n self.message = message\n Thread.__init__(self)\n self.start() \n \n def run(self):\n global N\n global MTU\n global packetsOut\n \n for chunk in self.message.split():\n print (\"Interarrival time received is: \",chunk)\n # decrement the packets that are out since we just received the response\n packetsOut -= 1\n #print(\"Interarrival time is: \",self.message,'\\n------\\n')\n iarrTime = float(chunk.split(',')[0])\n packNum = int(chunk.split(',')[1])\n\n print(\"Received Ack from packet %i\"%packNum)\n \n if iarrTime <= 0:\n N = MTU\n else:\n N = floor(min(MTU*3,max(60,MTU*(0.001/iarrTime - packetsOut)) ))\n print(\"Max N calculated is: %i and packets out is %i\"%(N,packetsOut))\n \n \nclass SendData():\n def __init__(self, data, grace):\n self.data = data\n self.grace = grace\n self.N = len(data)\n Thread.__init__(self)\n self.work()\n \n def work(self):\n global packetsOut\n global packetNumber\n global n\n global addr\n \n while self.N > 0:\n if self.N <= MTU:\n clientSocket.sendto((self.data+\",%f,%i\"%(self.grace,packetNumber)).encode(), addr)\n packetNumber += 1\n packetsOut += 1\n #print(\"sending last packet\")\n break\n else:\n clientSocket.sendto((self.data[:MTU]+\",%f,%i\"%(self.grace,packetNumber)).encode(), addr)\n packetNumber += 1\n self.N -= MTU\n self.data = self.data[MTU:]\n #print(\"snding intermediate packet\")\n packetsOut += 1\n \n self.grace = n/5\n time.sleep(self.grace)\n \n \nclass ReceiveIarrTime(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.start()\n \n def run(self):\n while(1):\n message = clientSocket.recv(1024).decode()\n GetN(message)\n\n\n#********\n# main:\n\nclientSocket = socket(AF_INET, SOCK_DGRAM)\nclientSocket.bind(('', serverPort+1))\n\ntry:\n #clientSocket.connect((serverName, serverPort))\n ReceiveIarrTime()\n sleepTime = 0;\n \n while 1:\n #sendingTime = time.time()\n #data = str(line[:-1]) + \",\" + \"%.9f\" % n\n start = time.time()\n \n SendData(data[:N], sleepTime)\n \n finish = time.time() \n sleepTime = n-(finish - start)\n # to simulate sending data to the monitoring center every n seconds\n time.sleep(sleepTime)\n\nexcept Exception as e:\n print(e)\n\nfinally:\n clientSocket.close()\n","sub_path":"UDPClient.py","file_name":"UDPClient.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"101252016","text":"import os\nimport sqlite3\n\nimport flask\nimport flask_sqlalchemy\nimport flask_praetorian\nimport flask_cors\nimport sqlalchemy\nfrom collections import defaultdict\n\nimport sys\nimport csv\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.tri as tri\nfrom flask import request, jsonify ,send_file\n\ndb = flask_sqlalchemy.SQLAlchemy()\nguard = flask_praetorian.Praetorian()\ncors = flask_cors.CORS()\n\n\n# User model\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(80), unique=True, nullable=False)\n password = db.Column(db.Text)\n roles = db.Column(db.Text)\n\n @classmethod\n def lookup(cls, username):\n return cls.query.filter_by(username=username).one_or_none()\n\n @property\n def rolenames(self):\n try:\n return self.roles.split(',')\n except Exception:\n return []\n\n @classmethod\n def identify(cls, id):\n return cls.query.get(id)\n\n @property\n def identity(self):\n return self.id\n\n\n# initialize app\napp = flask.Flask(__name__)\napp.debug = True\napp.config['SECRET_KEY'] = 'GFRAC'\napp.config['JWT_ACCESS_LIFESPAN'] = {'hours': 0.5}\napp.config['JWT_REFRESH_LIFESPAN'] = {'days': 1}\n\n# flask praetorian instance\nguard.init_app(app, User)\n\n# initialize local DB\napp.config['SQLALCHEMY_DATABASE_URI'] = f\"sqlite:///{os.path.join(os.getcwd(), 'database.db')}\"\ndb.init_app(app)\n\n# intialize cors\n\ncors.init_app(app)\n\n# add admin user\nwith app.app_context():\n db.create_all()\n if db.session.query(User).filter_by(username='admin').count() < 1:\n db.session.add(User(\n username='admin',\n password=guard.hash_password('abcd1234'),\n roles='admin',\n ))\n db.session.commit()\n\n\n# Custom Exception\nclass NoEntryFoundException(Exception):\n pass\n\n\n# API's\n\n@app.route('/api/login', methods=['POST'])\ndef login():\n print(flask.request)\n \"\"\"\n Logs a user in by parsing a POST request containing user credentials and\n issues a JWT token.\n .. example::\n $ curl http://localhost:5000/api/login -X POST \\\n -d '{\"username\":\"admin\",\"password\":\"abcd1234\"}'\n \"\"\"\n request = flask.request.get_json(force=True)\n username = request.get('username', None)\n password = request.get('password', None)\n user = guard.authenticate(username, password)\n u = User.query.filter_by(username=username).first()\n ret = {'access_token': guard.encode_jwt_token(user), 'role': u.roles}\n\n # returning access token , status\n return ret, 200\n\n\n@app.route('/api/refresh', methods=['POST'])\ndef refresh():\n \"\"\"\n Refreshes an existing JWT by creating a new one with new expiration time\n .. example::\n $ curl http://localhost:5000/refresh -X GET \\\n -H \"Authorization: Bearer \"\n \"\"\"\n print(\"refresh request\")\n old_token = flask.request.get_data()\n new_token = guard.refresh_jwt_token(old_token)\n ret = {'access_token': new_token}\n return ret, 200\n\n\n@app.route('/api/protected')\n@flask_praetorian.auth_required\ndef protected():\n \"\"\"\n A protected endpoint. The auth_required decorator will require a header\n containing a valid JWT\n .. example::\n $ curl http://localhost:5000/api/protected -X GET \\\n -H \"Authorization: Bearer \"\n \"\"\"\n return {\"message\": f'protected endpoint (allowed user {flask_praetorian.current_user().username})'}\n\n#\n\n\n@app.route('/api/addnewuser', methods=['POST'])\n@flask_praetorian.auth_required\n@flask_praetorian.roles_required(\"admin\")\ndef new_user():\n \"\"\"\n adds a new user to the database\n .. example::\n $ curl http://localhost:5000/api/addnewuser -X POST \\\n -d '{\"username\":\"a\",\"password\":\"a\"}' -H \"Authorization: Bearer \"\n \"\"\"\n print(\"a new user request\")\n request = flask.request.get_json(force=True)\n username = request.get('username', None)\n password = request.get('password', None)\n roles = 'guest'\n guest = User(username=username,\n password=guard.hash_password(password), roles=roles)\n db.session.add(guest)\n try:\n db.session.commit()\n except sqlalchemy.exc.SQLAlchemyError as err:\n return {\"message\": \"Not created, User already exists with same username\"}, 500\n return {\"message\": \"user created\"}, 200\n\n\n@app.route('/api/deleteuser', methods=['DELETE'])\n@flask_praetorian.auth_required\ndef delete_user():\n \"\"\"\n Delete the user details with the given user name\n .. example::\n $ curl http://localhost:5000/api/deleteuser -X POST \\\n -d '{\"username\":\"a\"}' -H \"Authorization: Bearer \"\n \"\"\"\n print(\"a new user request\")\n request = flask.request.get_json(force=True)\n username = request.get('username', None)\n #password = request.get('password', None)\n #roles = 'guest';\n user = User.query.filter_by(username=username).first()\n #guest = User(username = username,password=guard.hash_password(password),roles=roles);\n if user:\n db.session.delete(user)\n db.session.commit()\n return {\"message\": \"User account deleted\"}, 200\n else:\n return {\"message\": \"User doesn't exist\"}, 500\n\n\n@app.route('/api/users', methods=['GET'])\n@flask_praetorian.roles_required(\"admin\")\ndef get_all_users():\n \"\"\"\n A protected endpoint that requires a role. The roles_required decorator\n will require that the supplied JWT includes the required roles\n .. example::\n $ curl http://localhost:5000/api/users -X GET \\\n -H \"Authorization: Bearer \"\n \"\"\"\n users = User.query.all()\n user_details = []\n for user in users:\n user_d = {}\n user_d[\"username\"] = user.username\n user_d[\"role\"] = user.roles\n user_details.append(user_d)\n return {'users': user_details}, 200\n\n@app.route('/api/cartesianPlot', methods=['POST'])\ndef CartesianGraph():\n request = flask.request.get_json(force=True)\n fileName = request.get('fileName', None)\n print(\"FileName is :\", fileName)\n if fileName is None:\n return \"Error: No fileName field provided. Please specify an fileName.\"\n file = 'C:/R3D/HF_PST_PROCESSING_OUTPUT_FILES/' + fileName + '.dat'\n # file = '/home/nithivarn/Downloads/' + fileName + '.dat'\n data = pd.read_csv(file, header=None, skiprows=3)\n data1 = data[0].str.split(expand=True)\n X, Y, Z = data1[0].astype('float').values, data1[1].astype('float').values, data1[2].astype('float').values\n fig = plt.figure()\n\n triang = tri.Triangulation(X, Y)\n tcf = plt.tricontourf(triang, Z, cmap='jet')\n plt.title(fileName, fontsize=14, fontweight='bold')\n plt.xlabel('X', fontsize=14, fontweight='bold')\n plt.ylabel('Y', fontsize=14, fontweight='bold')\n plt.colorbar()\n plt.savefig(fileName+'.png')\n return send_file(fileName+\".png\", mimetype='image/gif')\n\n@app.route('/api/plot', methods=['POST'])\ndef LinePlot():\n request = flask.request.get_json(force=True)\n fileName = request.get('fileName', None)\n fig = plt.figure()\n\n if fileName is None:\n return \"Error: No fileName field provided. Please specify an fileName.\"\n fig = plt.figure()\n\n file = 'C:/R3D/HF_PROJECT_DATA/'+fileName+'.dat'\n # file = '/home/nithivarn/Downloads/' + fileName + '.dat'\n data = pd.read_csv(file, skiprows=4, delim_whitespace=True, names=['x', 'y'])\n X, Y = data['x'].values, data['y'].values\n\n if (\"Aper_at_injec_Frac\" in fileName):\n xaxis = \"time(min)\"\n yaxis = \"Fracture Aperture(m) at Injection Point\"\n if (\"Flowrate_Cluster\" in fileName):\n xaxis = \"time(min)\"\n yaxis = \"Flow rate (bpm)\"\n if (\"Length_Frac\" in fileName):\n xaxis = \"time(min)\"\n yaxis = \"Fracture length (m)\"\n if (\"Perforation_Fric_Cluster\" in fileName):\n xaxis = \"time(min)\"\n yaxis = \"Perforation friction (MPa)\"\n\n plt.plot(X, Y)\n plt.xlabel(xaxis)\n plt.ylabel(yaxis)\n # plt.show()\n plt.savefig(fileName+'.png')\n return send_file(fileName+\".png\", mimetype='image/gif')\n\n@app.route('/api/examples', methods=['POST'])\ndef examples_files():\n file = 'D:/R3D_New_Examples.rar'\n return send_file(file, as_attachment=True)\n\n# Run the example\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"backend/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"346437340","text":"def solution(number, k):\n target = len(number) - k\n if len(number) == 1:\n return number\n number = list(number)\n answer = []\n index = 0\n while True:\n temp_index = 0\n max_v = \"-1\"\n if k == 0:\n answer.extend(number[index:])\n break\n for i in range(index, index + k + 1):\n if max_v < number[i]:\n max_v = number[i]\n temp_index = i\n if number[i] == \"9\":\n break\n answer.append(max_v)\n if len(answer) == target:\n break\n k -= temp_index - index\n index = temp_index + 1\n answer = \"\".join(answer)\n return answer\n\n\ntc = [\n [\"1924\", 2],\n [\"1231234\", 3],\n [\"4177252841\", 4],\n [\"9999999999999999999999999999999999999989999\", 40],\n]\nfor n, c in tc:\n print(solution(n, c))\n","sub_path":"programmers/making_biggest_num_programmers.py","file_name":"making_biggest_num_programmers.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"435429589","text":"import pytest\nfrom src import create_app\nfrom datetime import datetime\n\nclass Test_Prequestion_Question_Relationship:\n\n app = create_app(\n mode=\"development\",\n static_path=\"../static\",\n templates_path=\"../templates\",\n instance_path=\"../instance\"\n )\n\n def test_create_relationship(self):\n with self.app.app_context():\n from src.database.db import init_db, distroy_db, get_db\n from src.models.question_model import Question, PreQuestion\n\n distroy_db(self.app)\n init_db(self.app)\n\n current_transaction = get_db().transaction\n\n with current_transaction:\n test_question_1 = Question(\n question=\"Test Question 1\",\n slug=\"test_question_1\",\n language=\"en\"\n )\n test_question_1.save()\n test_prequestion_1 = PreQuestion(\n text = \"This is an example prequestion\",\n slug = \"test_prequestion_1\",\n language = \"en\"\n )\n test_prequestion_1.save()\n\n rel = test_prequestion_1.questions.connect(test_question_1)\n\n pytest.test_question_1 = test_question_1\n pytest.test_prequestion_1 = test_prequestion_1\n pytest.test_prequestion_question_rel_1 = rel\n","sub_path":"app/tests/model_tests/relationship_tests/test_prequestion_question_relationship.py","file_name":"test_prequestion_question_relationship.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"315080759","text":"from __future__ import absolute_import, division\n\nimport os\nimport sys\nimport logging\nimport numpy as np\nfrom wikipedia2vec import Wikipedia2Vec\n\n# Set PATHs\nPATH_TO_SENTEVAL = '../'\nPATH_TO_DATA = '../data'\n# Download Russian model from https://wikipedia2vec.github.io/wikipedia2vec/pretrained/ to folder 'word2vec_ru'\nPATH_TO_MODEL = os.path.join('word2vec_ru', 'ruwiki_20180420_300d.pkl')\n\n# import SentEval\nsys.path.insert(0, PATH_TO_SENTEVAL)\nimport senteval\n\n\n# SentEval prepare and batcher\ndef prepare(params, samples):\n # Load model\n if not os.path.exists(PATH_TO_MODEL):\n raise Exception(\"There are no pretrained model in \\\"\" + PATH_TO_MODEL + \"\\\"\")\n\n params.model = Wikipedia2Vec.load(PATH_TO_MODEL)\n return\n\n\ndef get_sentence_embedding(sentence, params):\n embedding = np.zeros((300,), dtype=np.float32)\n for token in sentence:\n token = token.lower()\n if params.model.dictionary.get_word(token) is not None:\n embedding += params.model.get_word_vector(token)\n embedding = embedding / len(sentence)\n return embedding\n\n\ndef batcher(params, batch):\n batch = [sent if sent != [] else [''] for sent in batch]\n embeddings = [get_sentence_embedding(sentence, params) for sentence in batch]\n return embeddings\n\n\n# Set params for SentEval\nparams_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': False, 'kfold': 5, 'batch_size': 128,\n 'classifier': {'nhid': 0, 'optim': 'rmsprop', 'tenacity': 3, 'epoch_size': 2}}\n\n# Set up logger\nlogging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)\n\nif __name__ == \"__main__\":\n se = senteval.engine.SE(params_senteval, batcher, prepare)\n transfer_tasks = ['SICKEntailment_RU', 'SST2_RU', 'SST3_RU', 'TREC_RU', 'MRPC_RU'\n 'STSBenchmark_RU', 'SICKRelatedness_RU'\n ]\n results = se.eval(transfer_tasks)\n print(results)\n","sub_path":"RU_EN_examples/word2vec_ru.py","file_name":"word2vec_ru.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"84207340","text":"def max_crossing_sum(arr, low, mid, high):\n sums = 0\n left_sum = float('-inf')\n\n for i in range(mid, 0, -1):\n sums = sums + arr[i]\n\n if(sums > left_sum):\n left_sum = sums\n\n right_sum = float('-inf')\n sums = 0\n\n for j in range(mid + 1, high + 1):\n sums = sums + arr[j]\n\n if(sums > right_sum):\n right_sum = sums\n\n return max(left_sum + right_sum, left_sum, right_sum)\n\n\ndef max_subarray(arr, low, high):\n if(high == low):\n return(arr[high])\n\n mid = (low + high)/2\n\n return max(\n max_subarray(arr, low, mid),\n max_subarray(arr, mid +1, high),\n max_crossing_sum(arr, low, mid, high)\n )\n\na = [13, -3, -25, 20, -3, -16, -23, 18, 20, -7, 12, -5, -22, 15, -4, 7]\n\nprint(max_subarray(a, 0, len(a) - 1))\n\n\n# O(n log n)\n","sub_path":"max_subarray_sum_divide_and_conquer.py","file_name":"max_subarray_sum_divide_and_conquer.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"61113016","text":"from flask import Blueprint, request, jsonify\nfrom tools.auth import authorize\nfrom models.task import Task\nfrom database import db\n\n\ntasks = Blueprint(\"tasks\", __name__)\n\n\n@tasks.route(\"/tasks\", methods=[\"POST\", \"GET\"])\n@authorize\ndef add_or_list(user):\n if request.method == \"POST\":\n task = Task(**request.get_json(), user=user)\n db.session.add(task)\n db.session.commit()\n\n return jsonify(task=task.for_task), 200\n else:\n tasks = Task.query.filter_by(user_id=user.id).all()\n list_of_tasks = [task.for_task for task in tasks]\n\n return jsonify(tasks=list_of_tasks), 200\n\n\n@tasks.route(\"/tasks/\", methods=[\"PUT\", \"DELETE\"])\n@authorize\ndef delete_task(user, task_id):\n task = Task.query.get(task_id)\n if not task:\n return \"Task doesn\\'t exist\", 400\n\n if task.user_id != user.id:\n return \"You don\\'t own this task!\", 401\n\n if request.method == \"PUT\":\n new_body = request.get_json()\n if task.body != new_body[\"body\"]:\n task['body'] = new_body[\"body\"]\n db.session.commit()\n\n return jsonify(task=task.for_task), 200\n\n else:\n db.session.delete(task)\n db.session.commit()\n return jsonify(Status=\"OK\"), 200\n","sub_path":"views/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"214704662","text":"'''\nCreated on Aug 6, 2012\n\n@author: cristi\n'''\n\ndef read_list(filename):\n f_in = open(filename, 'r')\n input_list = []\n d = {}\n\n for line in f_in:\n if line == '\\n':\n input_list.append(d)\n d = {}\n else:\n tokens = line.split()\n key = tokens[0]\n value = int(tokens[1])\n d[key] = value\n else:\n if d != {}:\n input_list.append(d)\n\n f_in.close()\n return input_list\n\n\ndef write_list(filename, lst):\n f_out = open(filename, 'w')\n f_out.close()\n\n\ndef write_indices(filename, lst):\n f_out = open(filename, 'w')\n \n for index in lst:\n f_out.write(str(index) + ' ')\n \n f_out.close()\n","sub_path":"plp/beginner/p3/util/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"595113224","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 28 16:25:29 2019\n\n@author: drumm\n\"\"\"\nfrom neuron import Neuron\n#inputs are the data you feed it the neuron inputs are the inputs from other neurons\ninputs = [[-1,0,0],[-1,0,1],[-1,1,0],[-1,1,1]]\nn1= Neuron(3,[-.05,-.02,.02],0,.25)\n\n#loop for this many running through inputs\nloopNum= 20\nfor x in range(0,loopNum):\n if(x%len(inputs) == 0):\n print(\"RUN: \" + str(int(x/len(inputs))) + \"\\n==================================\\n\")\n n1.setInput(inputs[x%len(inputs)])\n if inputs[x%len(inputs)] == [-1,0,0]:\n n1.setTarget(0)\n else:\n n1.setTarget(1)\n print(\"input was: \" + str(inputs[x%len(inputs)]) + \" |\" + str(n1.output()) + \"\\nactivation was: \" + str(n1.total))\n\nn1.getWeights()","sub_path":"MachineLearning/Perceptron Learning/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"469427818","text":"def login_attemp(n, s, a):\n asize = len(a)\n b = 0\n t = []\n for i in s:\n curr = ''\n lens = len(i)\n for j in range(lens):\n curr += a[b + j]\n if b + lens - 1 < asize:\n if i == curr:\n b += lens\n t.append(i)\n if b == asize :\n for i in t:\n print(s[i])\n\n\n\n '''\n for i in range(n):\n comp = ''\n size = len(s[i])\n for p in range(size):\n print(b)\n comp += a[b + p]\n print(comp)\n if b + size - 1 < asize:\n if s[i] == comp:\n b+=size\n t.append(i)\n i=-1\n '''\n if b==asize:\n for i in range(len(t)):\n print(s[t[i]], end=\" \")\n else:\n print(\"WRONG PASSWORD\")\n return None\n'''\nfor _ in range(int(input())):\n n = int(input())\n s = [x for x in input().split()]\n a = input()\n'''\nlogin_attemp(3, [\"ab\", \"abcd\", \"cd\"], \"abcd\")\n\n\n\n\n\n\n\n\n\n\n","sub_path":"codechef/C002_guess.py","file_name":"C002_guess.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"90843516","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\nprint('Please Wait...')\n\nimport serial as serial\nimport face_recognition\nimport cv2\nimport numpy as np\nimport firebase as firebase\nimport glob\nimport os\nfrom datetime import datetime\nimport serial as serial\nimport pandas as pd\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtWidgets import QMainWindow, QLabel,QDateEdit, QGridLayout,QTimeEdit,QWidget,QFrame,QErrorMessage,QFileDialog,QAction,QToolBar,QComboBox, qApp, QApplication,QWidget, QSlider, QLineEdit, QLabel, QPushButton, QScrollArea,QHBoxLayout,QVBoxLayout\nfrom PyQt5.QtCore import QSize,Qt, QTimer\nfrom PyQt5.QtGui import *\nimport sys\nimport math\nimport os\nfrom PyQt5.QtPrintSupport import *\nfrom PyQt5.QtMultimedia import *\nfrom PyQt5.QtMultimediaWidgets import *\nimport cv2\nimport numpy as np\nimport glob\nimport random\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\nfrom matplotlib.figure import Figure\nimport time\n\n\n# In[2]:\n\n\n\n\nclass PageWindow(QtWidgets.QMainWindow):\n gotoSignal = QtCore.pyqtSignal(str)\n\n def goto(self, name):\n self.gotoSignal.emit(name)\n \n \n\n\n# In[3]:\n\n\nclass MainWindow(PageWindow):\n def __init__(self):\n super().__init__()\n #self.ser = serial.Serial(port='COM3', baudrate=115200, bytesize=8, parity='N', stopbits=1)\n \n self.seperator_vertical = QVSeperationLine()\n self.seperator_horizontal = QHSeperationLine()\n self.initUI()\n self.setWindowTitle(\"MainWindow\")\n\n def initUI(self):\n self.homeUI()\n \n \n def homeUI(self):\n self.preview_widget = QWidget()\n self.footer_widget = QWidget()\n self.home = True\n self.viewfinder = QCameraViewfinder()\n self.viewfinder.show()\n self.setCentralWidget(self.viewfinder)\n self.cap = None # -capture <-> +cap\n self.horizontalLayout = QHBoxLayout()\n self.horizontalLayout2 = QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.verticalLayout = QVBoxLayout()\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.image_label = QLabel()\n self.image_label.setText(\"\")\n self.image_label.setObjectName(\"image_label\")\n \n self.temp = QLabel()\n self.temp.setText(\"Temperature:\")\n self.temp.setObjectName(\"temp\")\n \n self.temp_reading = QLabel()\n self.temp_reading.setText(\"temp_reading\")\n self.temp_reading.setObjectName(\"temp_reading\")\n \n self.temp2 = QLabel()\n self.temp2.setText(\"Temperature 2:\")\n self.temp2.setObjectName(\"temp2\")\n \n self.temp2_reading = QLabel()\n self.temp2_reading.setText(\"Temperature2 Reading\")\n self.temp2_reading.setObjectName(\"temp2_reading\")\n \n self.image_label.setScaledContents(True)\n \n self.matplotlibWidget = MatplotlibWidget(self)\n self.threadSample = ThreadSample(self)\n self.threadSample.newSample.connect(self.on_threadSample_newSample)\n self.threadSample.finished.connect(self.on_threadSample_finished)\n \n self.gridLayout = QGridLayout(self) \n self.gridLayout.addWidget(self.temp,0,0)\n self.gridLayout.addWidget(self.temp_reading,0,1)\n self.gridLayout.addWidget(self.temp2,1,0)\n self.gridLayout.addWidget(self.temp2_reading,1,1)\n \n \n self.horizontalLayout.addWidget(self.image_label)\n self.horizontalLayout.addWidget(self.seperator_vertical)\n self.horizontalLayout.addLayout(self.gridLayout)\n \n self.preview_widget.setLayout(self.horizontalLayout)\n self.preview_widget.setMinimumHeight(200)\n self.preview_widget.setMaximumHeight(200)\n self.preview_widget.setMinimumWidth(600)\n self.preview_widget.setMaximumWidth(600)\n \n \n \n \n self.horizontalLayout2.addWidget(self.matplotlibWidget)\n self.horizontalLayout2.addWidget(self.seperator_vertical)\n #self.horizontalLayout2.addWidget(self.clock)\n self.clock(self.horizontalLayout2)\n \n self.footer_widget.setLayout(self.horizontalLayout2)\n self.footer_widget.setMinimumHeight(250)\n self.footer_widget.setMaximumHeight(250)\n self.footer_widget.setMinimumWidth(600)\n self.footer_widget.setMaximumWidth(600)\n \n self.verticalLayout.addWidget(self.preview_widget)\n self.verticalLayout.addWidget(self.seperator_horizontal)\n self.verticalLayout.addWidget(self.footer_widget)\n #self.verticalLayout.addWidget(self.image_label2)\n \n \n self.timer = QTimer(self, interval=5)\n self.timer.timeout.connect(self.update_frame)\n self._image_counter = 0\n centralWidget = QWidget(self) \n self.setCentralWidget(centralWidget) \n self.centralWidget().setLayout(self.verticalLayout)\n# self.setCentralWidget(self.scroll)\n self.setGeometry( 300, 300, 400, 700 )\n\n def read_temp(self):\n temp = []\n while True:\n self.ser.write(b'0x55,0xAA,5,1,4')\n response = self.ser.readline()\n #print(str(response))\n if 'body' in str(response):\n temp.append(str(response))\n #print(\"temp-\"+str(response))\n #print(temp)\n elif 'Vbat' in str(response):\n if len(temp)!=0:\n print(\"Done-\"+ ' '.join(temp))\n self.start_webcam()\n self.update_frame(self.filter(''.join(temp)))\n temp = []\n def filter(self,text):\n \n text = text.replace('bTbody','body')\n text = text.replace('\\'','')\n \n text = text.replace('\\\\r\\n\\'b\\'Tbody','-')\n text = text.replace('\\\\r','')\n text = text.replace('\\r','')\n text = text.replace('\\\\xa8','')\n text = text.replace('\\\\xa1\\\\xe6','')\n text = text.replace('\\\\n','-')\n text = text.replace(' ','')\n text = text.replace(', ',',')\n text = text.replace('=','_')\n text = text.replace(',','-')\n return text \n \n \n @QtCore.pyqtSlot()\n def start_webcam(self):\n if self.cap is None:\n self.cap = cv2.VideoCapture(0)\n self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n self.timer.start()\n \n def closeEvent(self, event):\n print(\"closing PyQtTest\")\n self.cap.close()\n\n @QtCore.pyqtSlot()\n def update_frame(self,file_name):\n ret, image = self.cap.read()\n self.face_detect(image,file_name)\n simage = cv2.flip(image, 1)\n self.displayImage(image, True)\n\n def face_detect(self,image,file_name):\n frame = image\n face_locations = []\n face_encodings = []\n face_names = []\n process_this_frame = True\n i = 0\n face_detect = True\n # Resize frame of video to 1/4 size for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n self.dirname = 'd:'\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n # See if the face is a match for the known face(s)\n face_detect=False\n name = \"Unknown\"\n cv2.imwrite(os.path.abspath(os.path.join(self.dirname,(datetime.today().strftime('%Y-%m-%d')+'-'+file_name+'-'+str(++i)+'.png'))),frame)\n #self.storage.upload(self.dirname,(datetime.today().strftime('%Y-%m-%d')+'-'+file_name+'-'+str(i)+'.png'))\n\n\n i = i+1\n \n\n print(\"I see someone named {}!\".format(name))\n # # If a match was found in known_face_encodings, just use the first one.\n # if True in matches:\n # first_match_index = matches.index(True)\n # name = known_face_names[first_match_index]\n\n # Or instead, use the known face with the smallest distance to the new face\n \n\n process_this_frame = not process_this_frame\n\n\n \n @staticmethod\n @QtCore.pyqtSlot()\n def capture_image(self):\n flag, frame = self.cap.read()\n timestamp = time.strftime(\"%d-%b-%Y-%H_%M_%S\")\n \n self.save_seq += 1\n\n path = self.save_path # \n if flag:\n QtWidgets.QApplication.beep()\n name = \"my_image.jpg\"\n cv2.imwrite(os.path.join(self.save_path, \"%s-%04d-%s.jpg\" % (\n self.current_camera_name,\n self.save_seq,\n timestamp\n )), frame)\n self._image_counter += 1\n\n \n def displayImage(self, img, window=True):\n qformat = QImage.Format_Indexed8\n if len(img.shape)==3 :\n if img.shape[2]==4:\n qformat = QImage.Format_RGBA8888\n else:\n qformat = QImage.Format_RGB888\n outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat)\n outImage = outImage.rgbSwapped()\n if window:\n self.image_label.setStyleSheet(\"\"\"\n QLabel {\n height:300px !important;\n \n }\n \"\"\")\n \n self.image_label.setPixmap(QPixmap.fromImage(outImage))\n \n def clock(self,layout):\n self.verticalLayoutClock = QVBoxLayout(self)\n self.dateEdit = QDateEdit(self)\n self.dateEdit.setDisplayFormat(\"MMM dd yyyy\")\n self.dateEdit.setDisabled(True) \n self.verticalLayoutClock.addWidget(self.dateEdit)\n self.timeEdit = QTimeEdit(self)\n self.timeEdit.setDisplayFormat(\"hh:mm:ss AP\")\n self.timeEdit.setDisabled(True) \n self.verticalLayoutClock.addWidget(self.timeEdit)\n self.updateTime()\n self.timer = QTimer(self)\n self.timer.timeout.connect(self.updateTime)\n self.timer.start(1000)\n layout.addLayout(self.verticalLayoutClock)\n \n @QtCore.pyqtSlot(list)\n def on_threadSample_newSample(self, sample):\n self.matplotlibWidget.axis.plot(sample)\n self.matplotlibWidget.canvas.draw()\n\n @QtCore.pyqtSlot()\n def on_threadSample_finished(self):\n self.samples += 1\n if self.samples <= 2:\n self.threadSample.start()\n \n @QtCore.pyqtSlot()\n def on_pushButtonPlot_clicked(self):\n self.samples = 0\n self.matplotlibWidget.axis.clear()\n self.threadSample.start()\n \n def updateTime(self):\n current = QtCore.QDateTime.currentDateTime()\n self.dateEdit.setDate(current.date())\n self.timeEdit.setTime(current.time())\n \n \n\n\n# In[4]:\n\n\nclass GalleryWindow(PageWindow):\n def __init__(self):\n super().__init__()\n self.initUI()\n def initUI(self):\n self.scrollView()\n \n def scrollView(self):\n self.home = False\n self.scroll = QScrollArea() # Scroll Area which contains the widgets, set as the centralWidget\n self.widget = QWidget() \n self.widget_image = QWidget() # Widget that contains the collection of Vertical Box\n self.hbox = QHBoxLayout() # The Vertical Box that contains the Horizontal Boxes of labels and buttons\n self.vbox = QVBoxLayout() \n \n self.gridLayout_gallery = QGridLayout(self) \n \n \n for img in self.getImage():\n widget_image = QWidget()\n hbox = QHBoxLayout()\n gridLayout_gallery = QGridLayout(self)\n object = QLabel(\"TextLabel\")\n temp = QLabel(\"Temp:\")\n temp_reading = QLabel(\"Reading\")\n temp2 = QLabel(\"Temp2:\")\n temp2_reading = QLabel(\"Reading\")\n \n time = QLabel(\"Time:\")\n time_reading = QLabel(\"Reading\")\n \n date = QLabel(\"Date:\")\n date_reading = QLabel(\"Reading\")\n \n object.setPixmap(QPixmap(img))\n hbox.addWidget(object)\n \n \n gridLayout_gallery.addWidget(time,0,0)\n gridLayout_gallery.addWidget(time_reading,0,1)\n gridLayout_gallery.addWidget(date,1,0)\n gridLayout_gallery.addWidget(date_reading,1,1)\n gridLayout_gallery.addWidget(temp,2,0)\n gridLayout_gallery.addWidget(temp_reading,2,1)\n gridLayout_gallery.addWidget(temp2,3,0)\n gridLayout_gallery.addWidget(temp2_reading,3,1)\n \n \n hbox.addLayout(gridLayout_gallery)\n widget_image.setLayout(hbox)\n widget_image.setMaximumHeight(250)\n widget_image.setMinimumHeight(250)\n self.vbox.addWidget(widget_image)\n \n \n\n self.widget.setLayout(self.vbox)\n self.widget.setMaximumWidth(350)\n self.widget.setMinimumWidth(350)\n\n #Scroll Area Properties\n self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\n self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.scroll.setWidgetResizable(True)\n self.scroll.setWidget(self.widget)\n self.setGeometry( 300, 300, 400, 700 )\n self.setCentralWidget(self.scroll)\n \n def getImage(self):\n img_path = []\n for img in glob.glob(\"d:/*.jpg\"):\n\n img_path.append(img)\n return img_path\n \n\n \n\n\n# In[5]:\n\n\nclass MatplotlibWidget(QtWidgets.QWidget):\n def __init__(self, parent=None):\n super(MatplotlibWidget, self).__init__(parent)\n\n self.figure = Figure()\n self.canvas = FigureCanvasQTAgg(self.figure)\n\n self.axis = self.figure.add_subplot(111)\n \n self.layoutVertical = QtWidgets.QVBoxLayout(self)#QVBoxLayout\n self.layoutVertical.addWidget(self.canvas)\n\nclass ThreadSample(QtCore.QThread):\n newSample = QtCore.pyqtSignal(list)\n\n def __init__(self, parent=None):\n super(ThreadSample, self).__init__(parent)\n\n def run(self):\n randomSample = random.sample(range(0, 10), 10)\n\n self.newSample.emit(randomSample)\n\n\n# In[6]:\n\n\nclass QHSeperationLine(QFrame):\n '''\n a horizontal seperation line\\n\n '''\n def __init__(self):\n super().__init__()\n self.setMinimumWidth(1)\n self.setFixedHeight(20)\n self.setFrameShape(QtWidgets.QFrame.HLine)\n self.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)\n return\n\nclass QVSeperationLine(QFrame):\n '''\n a vertical seperation line\\n\n '''\n def __init__(self):\n super().__init__()\n self.setFixedWidth(20)\n self.setMinimumHeight(1)\n self.setFrameShape(QtWidgets.QFrame.VLine)\n self.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)\n return\n\n\n# In[7]:\n\n\nclass Overlay(QWidget):\n\n def __init__(self, parent = None):\n \n QWidget.__init__(self, parent)\n palette = QPalette(self.palette())\n palette.setColor(palette.Background, Qt.transparent)\n self.setPalette(palette)\n \n def paintEvent(self, event):\n \n painter = QPainter()\n painter.begin(self)\n painter.setRenderHint(QPainter.Antialiasing)\n painter.fillRect(event.rect(), QBrush(QColor(255, 255, 255, 127)))\n painter.setPen(QPen(Qt.NoPen))\n \n for i in range(6):\n if (self.counter / 5) % 6 == i:\n painter.setBrush(QBrush(QColor(127 + (self.counter % 5)*32, 127, 127)))\n else:\n painter.setBrush(QBrush(QColor(127, 127, 127)))\n painter.drawEllipse(\n self.width()/2 + 30 * math.cos(2 * math.pi * i / 6.0) - 10,\n self.height()/2 + 30 * math.sin(2 * math.pi * i / 6.0) - 10,\n 20, 20)\n \n painter.end()\n \n \n \n \n def showEvent(self, event):\n \n self.timer = self.startTimer(50)\n self.counter = 0\n \n def timerEvent(self, event):\n \n self.counter += 1\n self.update()\n if self.counter == 600:\n self.killTimer(self.timer)\n self.hide()\n\n\n# In[8]:\n\n\n\nclass ARCWindow(QMainWindow):\n def __init__(self):\n self.home = True\n QMainWindow.__init__(self)\n self.seperator_vertical = QVSeperationLine()\n self.seperator_horizontal = QHSeperationLine()\n #self.homeUI()\n self.setMinimumSize(QSize(640, 480)) \n self.setWindowTitle(\"ARC\") \n self.statusmessage(\"Ready\")\n self.available_cameras = QCameraInfo.availableCameras()\n if not self.available_cameras:\n pass #quit\n\n# self.status = QStatusBar()\n# self.setStatusBar(self.status)\n\n self.stacked_widget = QtWidgets.QStackedWidget()\n self.setCentralWidget(self.stacked_widget)\n\n self.m_pages = {}\n\n self.register(MainWindow(), \"main\")\n self.register(GalleryWindow(), \"gallery\")\n\n self.goto()\n\n \n \n\n\n self.save_path = \"\"\n\n \n\n # Set the default camera.\n #self.select_camera(0)\n \n# menuBar\n \n \n self.menubarUI()\n \n \n \n\n \n \n# Toolbar\n\n self.toolbarUI()\n \n # scrollview\n #self.scrollView()\n \n \n \n #gridLayout = QGridLayout(self) \n #centralWidget.setLayout(gridLayout) \n \n #time = QLabel(\"Time:\", self)\n #temp = QLabel(\"temp:\", self)\n #image = QLabel(\"Image:\", self)\n \n #time.setAlignment(QtCore.Qt.AlignRight) \n \n #temp.setAlignment(QtCore.Qt.AlignRight) \n #gridLayout.addWidget(time, 0, 1)\n #gridLayout.addWidget(temp, 1, 1)\n #gridLayout.addWidget(temp, 0, 0,2,2)\n self.overlay = Overlay(self.centralWidget())\n self.overlay.hide()\n \n def register(self, widget, name):\n self.m_pages[name] = widget\n self.stacked_widget.addWidget(widget)\n if isinstance(widget, PageWindow):\n widget.gotoSignal.connect(self.goto)\n\n \n def goto(self):\n #name = 'main' if self.home else 'gallery'\n if self.home:\n self.name = 'main'\n self.home = False\n else:\n self.name = 'gallery'\n self.home=True\n if self.name in self.m_pages:\n widget = self.m_pages[self.name]\n self.stacked_widget.setCurrentWidget(widget)\n #self.setWindowTitle(widget.windowTitle())\n \n \n \n\n \n @QtCore.pyqtSlot()\n def capture_image(self):\n flag, frame = self.cap.read()\n timestamp = time.strftime(\"%d-%b-%Y-%H_%M_%S\")\n self.current_camera_name = 0\n self.save_seq += 1\n \n path = self.save_path # \n if flag:\n QtWidgets.QApplication.beep()\n name = \"my_image.jpg\"\n cv2.imwrite(os.path.join(self.save_path, \"%s-%04d-%s.jpg\" % (\n self.current_camera_name,\n self.save_seq,\n timestamp\n )), frame)\n self._image_counter += 1\n\n \n \n \n \n def toolbarUI(self):\n \n \n plotGraph = QAction(QIcon(os.path.join('icon', 'chart_line.jpg')), 'Plot', self)\n plotGraph.setShortcut('Ctrl+P')\n \n plotGraph.setStatusTip(\"Plot Graph\")\n #self.pushButtonPlot.setText(\"Plot\")\n # plotGraph.triggered.connect(self.on_pushButtonPlot_clicked)\n #self.pushButtonPlot.clicked.connect(self.on_pushButtonPlot_clicked)\n \n \n\n exitAct = QAction(QIcon(os.path.join('icon', 'exit.jpg')), 'Exit', self)\n exitAct.setShortcut('Ctrl+Q')\n exitAct.setStatusTip(\"Exit\")\n exitAct.triggered.connect(qApp.quit)\n\n msg = 'Gallery' if self.home else 'Home'\n \n \n \n \n changePageAct = QAction(QIcon(os.path.join('icon', 'gallery.png')), msg, self)\n \n changePageAct.triggered.connect(self.goto)\n changePageAct.setStatusTip(msg)\n \n \n \n self.toolbar = self.addToolBar('tools')\n self.toolbar.setIconSize(QSize(22, 22))\n self.toolbar.addAction(exitAct)\n self.toolbar.addAction(plotGraph)\n self.toolbar.addAction(changePageAct)\n \n camera_toolbar = QToolBar(\"Camera\")\n camera_toolbar.setIconSize(QSize(22, 22))\n self.addToolBar(camera_toolbar)\n\n photo_action = QAction(QIcon(os.path.join('icon', 'camera-black.png')), \"Take photo...\", self)\n photo_action.setStatusTip(\"Take photo of current view\")\n #photo_action.triggered.connect(self.take_photo)\n photo_action.triggered.connect(MainWindow.capture_image)\n camera_toolbar.addAction(photo_action)\n\n change_folder_action = QAction(QIcon(os.path.join('icon', 'blue-folder-horizontal-open.png')), \"Change save location...\", self)\n change_folder_action.setStatusTip(\"Change folder where photos are saved.\")\n change_folder_action.triggered.connect(self.change_folder)\n camera_toolbar.addAction(change_folder_action)\n\n camera_selector = QComboBox()\n camera_selector.addItems([c.description() for c in self.available_cameras])\n camera_selector.currentIndexChanged.connect( self.select_camera )\n\n camera_toolbar.addWidget(camera_selector)\n \n def select_camera(self, i):\n self.camera = QCamera(self.available_cameras[i])\n self.camera.setViewfinder(self.viewfinder)\n self.camera.setCaptureMode(QCamera.CaptureStillImage)\n self.camera.error.connect(lambda: self.alert(self.camera.errorString()))\n self.camera.start()\n\n self.capture = QCameraImageCapture(self.camera)\n self.capture.error.connect(lambda i, e, s: self.alert(s))\n self.capture.imageCaptured.connect(lambda d, i: self.statusmessage(\"Image %04d captured\" % self.save_seq))\n\n self.current_camera_name = self.available_cameras[i].description()\n self.save_seq = 0\n \n \n\n \n \n \n \n def change_folder(self):\n path = QFileDialog.getExistingDirectory(self, \"Snapshot save location\", \"\")\n if path:\n self.save_path = path\n self.save_seq = 0\n \n \n \n def alert(self, s):\n \"\"\"\n Handle errors coming from QCamera dn QCameraImageCapture by displaying alerts.\n \"\"\"\n err = QErrorMessage(self)\n err.showMessage(s)\n \n \n def menubarUI(self):\n exitAct = QAction(QIcon('exit.png'), '&Exit', self)\n exitAct.setShortcut('Ctrl+Q')\n exitAct.setStatusTip('Exit application')\n exitAct.triggered.connect(qApp.quit)\n \n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&File')\n fileMenu.addAction(exitAct)\n \n def statusmessage(self,msg):\n self.statusBar().showMessage(msg)\n \n\n \n \n def resizeEvent(self, event):\n \n self.overlay.resize(event.size())\n event.accept()\n \n def take_photo(self):\n timestamp = time.strftime(\"%d-%b-%Y-%H_%M_%S\")\n self.capture.capture(os.path.join(self.save_path, \"%s-%04d-%s.jpg\" % (\n self.current_camera_name,\n self.save_seq,\n timestamp\n )))\n self.save_seq += 1\n \n \n \n\n\n \n\n \n def closeEvent(self, event):\n #self\n print('test')\n\n\n\ndef make_label(master, x, y, h, w, *args, **kwargs):\n f = Frame(master, height=h, width=w)\n f.pack_propagate(0) # don't shrink\n f.place(x=x, y=y)\n label = Label(f, *args, **kwargs)\n label.pack(fill=BOTH, expand=1)\n return label\n\n\n# In[ ]:\n\n\n\n \nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n app.setStyle(\"Fusion\")\n \n\n # Now use a palette to switch to dark colors:\n# palette = QPalette()\n# palette.setColor(QPalette.Window, QColor(53, 53, 53))\n# palette.setColor(QPalette.WindowText, Qt.white)\n# palette.setColor(QPalette.Base, QColor(25, 25, 25))\n# palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n# palette.setColor(QPalette.ToolTipBase, Qt.white)\n# palette.setColor(QPalette.ToolTipText, Qt.white)\n# palette.setColor(QPalette.Text, Qt.white)\n# palette.setColor(QPalette.Button, QColor(53, 53, 53))\n# palette.setColor(QPalette.ButtonText, Qt.white)\n# palette.setColor(QPalette.BrightText, Qt.red)\n# palette.setColor(QPalette.Link, QColor(42, 130, 218))\n# palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n# palette.setColor(QPalette.HighlightedText, Qt.black)\n# app.setPalette(palette)\n \n \n mainWin = ARCWindow()\n mainWin.show()\n sys.exit( app.exec_() )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nb'====================================\\r\\n'\nb'T Ambience = 33.372 C\\r\\n'\nb'1234.534\\r\\n'\nb'1229.298\\r\\n'\nb'vs = -5.236\\r\\n'\nb'vs = -1.632, calibrate modify\\r\\n'\nb'vs = -1.667, emissivity compensate\\r\\n'\nb'to1 = 32.360\\r\\n'\nb'to2 = 33.207\\r\\n'\nb'T Object = 32.671 C\\r\\n'\nb'T body = 32.557 C, ambience compensate\\r\\n'\nb'T body = 36.196 C, weak low\\r\\n'\nb'cfg.mode = 1\\r\\n'\n","sub_path":"home_screen.py","file_name":"home_screen.py","file_ext":"py","file_size_in_byte":26001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"421233344","text":"# coding: utf-8\n__author__ = 'liuyf'\n__date__ = '2018/1/15 18:33'\n\nfrom config.singleton import Singleton\nfrom kazoo.client import KazooClient\nfrom tools.get_dict_res import GetDictRes\nfrom config.configuration import CONFIG\n\nclass ZookeeperClient(Singleton): # 继承单例模式\n \"\"\"\n zookeeper数据模型接口操作 \n \"\"\"\n def __init__(self):\n if self._first_init:\n self.zk_start()\n self._first_init = False\n\n # def __init__(self):\n # self.zk_start()\n\n def zk_start(self):\n \"\"\"\n 与zookeeper建立连接\n \"\"\"\n self.zk = KazooClient(hosts=CONFIG[\"zk_host\"]+\":\"+CONFIG[\"zk_port\"])\n self.zk.start()\n\n def get_zk_client(self):\n return self.zk\n\n def zk_stop(self):\n \"\"\"\n 关闭zookeeper连接\n :return:\n \"\"\"\n self.zk.stop()\n\n def check_node_exists(self, node_path):\n \"\"\"\n 判断node是否存在\n :param node_path: str \n :return: dict_res\n \"\"\"\n dict_res = GetDictRes().get_dict_res()\n if self.zk.exists(node_path):\n dict_res['status'] = 0\n dict_res['result'] = node_path+\" exists.\"\n else:\n dict_res['status'] = 1\n dict_res['result'] = node_path+\" not exists.\"\n return dict_res\n\n def create_node_info(self, node_path, node_info, cover=False):\n \"\"\"\n 新建node,写入node_info\n :param node_path: str\n :param node_info: str\n :param cover: bool, 是否覆盖 \n :return: dict_res\n \"\"\"\n dict_res = self.check_node_exists(node_path)\n if dict_res['status'] == 0 and cover == False: # 是否覆盖\n dict_res['status'] = 1\n dict_res['result'] = node_path + \" already exists.\"\n return dict_res\n self.zk.ensure_path(node_path)\n self.zk.set(node_path, node_info)\n dict_res = GetDictRes().get_dict_res()\n dict_res['result'] = \"create node info, node path: \"+node_path+\\\n \", node_info: \"+node_info\n return dict_res\n\n def get_node_info(self, node_path):\n \"\"\"\n 获取node信息\n :param node_path: str\n :return: dict_res\n \"\"\"\n dict_res = self.check_node_exists(node_path)\n if dict_res['status'] == 0:\n node_info, stat = self.zk.get(node_path)\n dict_res['result'] = node_info\n return dict_res\n\n def delete_node_info(self, node_path, recursive=False):\n \"\"\"\n 删除node\n :param node_path: str\n :param recursive: bool\n :return: dict_res\n \"\"\"\n dict_res = self.check_node_exists(node_path)\n if dict_res['status'] == 0:\n try:\n self.zk.delete(node_path, recursive=recursive)\n dict_res['result'] = \"delete \"+node_path\n except: # 是否递归删除文��夹\n dict_res['status'] = 1\n dict_res['result'] = node_path+\" not empty.\"\n return dict_res\n\n def list_node_name(self, node_path):\n \"\"\"\n node名列表\n :param node_path: str\n :return: dict_res\n \"\"\"\n dict_res = self.check_node_exists(node_path)\n if dict_res['status'] == 0:\n node_name_list = self.zk.get_children(node_path)\n dict_res['result'] = node_name_list\n return dict_res\n\n def list_node_info(self, node_path):\n \"\"\"\n node信息列表\n :param node_path: str\n :return: dict_res\n \"\"\"\n dict_res = self.list_node_name(node_path)\n if dict_res['status'] == 0:\n file_name_list = dict_res['result']\n file_info_list = []\n for file_name in file_name_list:\n file_info = self.get_node_info(node_path+\"/\"+file_name)\n file_info = file_info['result']\n file_info_list.append(file_info)\n dict_res['result'] = file_info_list\n return dict_res\n\n # (list 递归列表)\n\n","sub_path":"jcs_proxy/metadata/zookeeper_client.py","file_name":"zookeeper_client.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"150872247","text":"#----------------\n# Evaluate on the test sequences\n#-----------------\nimport sys, os\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\n\nfrom dataset import DataSet\nfrom fine_tune_net import FineTuneNet\n\nimport logging\nimport argparse\nimport ast\nfrom PIL import Image\n\nclass DataSet(Dataset):\n def __init__(self, image_list, image_dir, device, transform=None):\n self.transform = transform\n self.imgs = pd.read_csv(image_list) \n self.image_dir = image_dir\n self.device = device\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, index):\n row = self.imgs.iloc[index, :]\n video = row[0]\n frames = ast.literal_eval(row[1])\n labels = ast.literal_eval(row[2])\n label = torch.tensor(float(1 in labels))\n files = [os.path.join(self.image_dir, video, \"%03d.jpg\"%x) for x in frames]\n #print (\"files\", files)\n imgs = [Image.open(fn) for fn in files]\n if self.transform is not None:\n imgs = [self.transform(i) for i in imgs]\n imgs = torch.stack(imgs)\n #print (imgs.shape)\n if self.device is not None:\n imgs = imgs.to(self.device)\n label = label.to(self.device)\n return {\"imgs\": imgs, \"label\": label, \"filename\": video + \"/%03d.jpg\"%frames[0]}\n\n\ndef eval(loader, model):\n model.eval()\n fns = [] \n y_true = []\n y_pred = []\n correct = 0\n FN = 0\n FP = 0 \n TN = 0\n TP = 0\n# the size length of \n for _, sequence in enumerate(loader):\n x_batch = torch.squeeze(sequence['imgs'])\n y_batch = sequence['label'] \n logit = model(x_batch) \n prob = torch.nn.functional.softmax(logit, dim=1)\n y_true_n = y_batch.data.cpu().numpy()\n y_pred_n = np.mean(prob.data.cpu().numpy()[:,1])\n y_true.append(y_true_n[0])\n #print (y_pred_n, y_pred)\n y_pred.append(y_pred_n)\n fns += sequence['filename'] \n #print (prob, y_batch.data)\n pred_label = int(y_pred_n > 0.5)\n #print(pred_label, y_batch)\n if pred_label == y_batch:\n correct += 1\n if pred_label == 1:\n TP += 1\n else:\n TN += 1\n else:\n if pred_label == 1:\n FP += 1\n else:\n FN += 1\n results = {}\n results[\"probs\"] = y_pred\n results[\"ground_true\"] = y_true\n results[\"fns\"] = fns \n\n logging.info(\"TP/TN/FP/FN: {}/{}/{}/{}\".format(TP, TN, FP, FN))\n a = float(correct)/len(loader)\n logging.info(\"Accuracy: {}\".format(a))\n p = float(TP)/(TP + FP)\n r = float(TP)/ (TP + FN)\n f = 2 * p * r / (p + r)\n logging.info(\"Precision: {}\".format(p))\n logging.info(\"Recall: {}\".format(r))\n logging.info(\"F1: {}\".format(f))\n print(\"TP/TN/FP/FN: {}/{}/{}/{}\\n{}, {}, {}, {}\"\n .format(TP, TN, FP, FN, a, p ,r,f))\n return results\n \ndef _parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--img_dir\", type=str, default=\"data\", help=\"directory that contains image files\")\n parser.add_argument(\"--seq_length\", type=int, default=12, help=\"4 | 6 | 8 | 10 | 12\")\n parser.add_argument(\"--add_args\", type=str ,default=\"\", help = \"additional CSV description | SHORT | SINGLE\")\n parser.add_argument(\"--gpu_id\", type=int, default=0, help=\"gpu id\")\n parser.add_argument(\"--output_dir\", type=str, default=\"results\", help=\"log result filename\")\n\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n import time\n args = _parse_args()\n fmt = \"%(message)s\"\n log_fn = os.path.join(args.output_dir, \"valid_{}{}.log\".format(args.seq_length, args.add_args))\n fmt = \"%(message)s\"\n logging.basicConfig(filename=log_fn, format=fmt, level=logging.INFO)\n logging.info(args)\n\n device = None\n if torch.cuda.is_available():\n device = torch.device(\"cuda:\"+str(args.gpu_id))\n print ('loading model')\n model = torch.load(\"test_resnet.pth\") \n if device:\n model.to(device)\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n tran = transforms.Compose([\n transforms.Resize((model.input_size, model.input_size)),\n transforms.ToTensor(),\n normalize,\n ])\n \n csv_file = os.path.join(\"testCSVs\", \"valid3_{}frame{}.csv\".format(args.seq_length, args.add_args))\n\n print (\"Using CSV\", csv_file)\n dataset = DataSet(csv_file, args.img_dir, device, transform=tran) \n logging.info(\"Total Sample count: {}\".format(len(dataset)))\n\n\n logging.info('Starting evaluation at ' + time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))\n\n loader = DataLoader(dataset, 1, shuffle=False, drop_last=False, num_workers=0)\n result = eval(loader, model)\n\n result_df = pd.DataFrame(data=result)\n ppn = os.path.join(args.output_dir, \"valid_{}{}_result.csv\".format(args.seq_length, args.add_args)) \n result_df.to_csv(ppn)\n\n logging.info('Ending evaluation at ' + time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))\n\n","sub_path":"ResNet/eval_test.py","file_name":"eval_test.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"162474875","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom .base import * # noqa\n\n# don't use an unicode string\nlocaleID = 'it'\n\ndateSep = ['/', '-']\nusesMeridian = False\nuses24 = True\n\nWeekdays = [\n 'lunedì', 'martedì', 'mercoledì',\n 'giovedì', 'venerdì', 'sabato', 'domenica',\n]\nshortWeekdays = [\n 'lun', 'mar', 'mer',\n 'gio', 'ven', 'sab', 'dom',\n]\nMonths = [\n 'gennaio', 'febbraio', 'marzo',\n 'aprile', 'maggio', 'giugno',\n 'luglio', 'agosto', 'settembre',\n 'ottobre', 'novembre', 'dicembre',\n]\nshortMonths = [\n 'gen', 'feb', 'mar',\n 'apr', 'mag', 'giu',\n 'lug', 'ago', 'set',\n 'ott', 'nov', 'dic',\n]\n\ndateFormats = {\n 'full': 'EEEE, dd MMMM yyyy',\n 'long': 'dd MMMM yyyy',\n 'medium': 'dd-MM-yyyy',\n 'short': 'dd-MM-yy',\n}\n\ntimeFormats = {\n 'full': 'HH:mm:ss v',\n 'long': 'HH:mm:ss z',\n 'medium': 'HH:mm:ss',\n 'short': 'HH:mm',\n}\n\ndp_order = ['d', 'm', 'y']\n\n# Used to parse expressions like \"in 5 hours\"\nnumbers = {\n 'zero': 0,\n 'uno': 1,\n 'una': 1,\n 'un': 1,\n 'un\\'': 1,\n 'due': 2,\n 'tre': 3,\n 'quattro': 4,\n 'cinque': 5,\n 'sei': 6,\n 'sette': 7,\n 'otto': 8,\n 'nove': 9,\n 'dieci': 10,\n 'undici': 11,\n 'tredici': 13,\n 'quattordici': 14,\n 'quindici': 15,\n 'sedici': 16,\n 'diciassette': 17,\n 'diciotto': 18,\n 'diciannove': 19,\n 'venti': 20,\n}\n\ndecimal_mark = ','\n\n# the short version would be a capital M,\n# as I understand it we can't distinguish\n# between m for minutes and M for months.\n\nunits = {\n 'seconds': ['secondo', 'secondi', 'sec', 's'],\n 'minutes': ['minuto', 'minuti', 'min', 'm'],\n 'hours': ['ora', 'ore', 'h'],\n 'days': ['giorno', 'giorni', 'g'],\n 'weeks': ['settimana', 'settimane'],\n 'months': ['mese', 'mesi'],\n 'years': ['anno', 'anni'],\n}\n\nre_values = re_values.copy()\nre_values.update({\n 'specials': 'in|alle|per le| all\\'| al',\n 'timeseparator': ':',\n 'rangeseparator': '-',\n 'daysuffix': '',\n 'qunits': 'h|m|s|d|w|m|j',\n 'now': ['adesso'],\n})\n\n# Used to adjust the returned date before/after the source\n# still looking for insight on how to translate all of them to german.\nModifiers = {\n 'da': 1,\n 'dalle': 1,\n 'prima': -1,\n 'dopo': 1,\n 'fa': -1,\n 'poi': 1,\n 'precendente': -1,\n 'fine di': 0,\n 'questo': 0,\n}\n\n# morgen/abermorgen does not work, see\n# http://code.google.com/p/parsedatetime/issues/detail?id=19\ndayOffsets = {\n 'dopodomani': 2,\n 'domani': 1,\n 'oggi': 0,\n 'ieri': -1,\n 'l\\'altro ieri': -2,\n}\n\n# special day and/or times, i.e. lunch, noon, evening\n# each element in the dictionary is a dictionary that is used\n# to fill in any value to be replace - the current date/time will\n# already have been populated by the method buildSources\nre_sources = {\n 'mezzogiorno': {'hr': 12, 'mn': 0, 'sec': 0},\n 'mezzanotte': {'hr': 0, 'mn': 0, 'sec': 0},\n}\n\nsmall = {\n 'zero': 0,\n 'uno': 1,\n 'un': 1,\n 'una': 1,\n 'un\\'': 1,\n 'due': 2,\n 'tre': 3,\n 'quattro': 4,\n 'cinque': 5,\n 'sei': 6,\n 'sette': 7,\n 'otto': 8,\n 'nove': 9,\n 'dieci': 10,\n 'undici': 11,\n 'dodici': 12,\n 'tredici': 13,\n 'quattordici': 14,\n 'quindici': 15,\n 'sedici': 16,\n 'diciassette': 17,\n 'diciotto': 18,\n 'diciannove': 19,\n 'venti': 20,\n 'trenta': 30,\n 'quaranta': 40,\n 'cinquanta': 50,\n 'sessanta': 60,\n 'settanta': 70,\n 'ottanta': 80,\n 'novanta': 90\n}\n","sub_path":"parsedatetime/pdt_locales/it.py","file_name":"it.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"390005085","text":"# Quantum Search\n\nfrom openql import openql as ql\nimport os\n\ncurdir = os.path.dirname(__file__)\noutput_dir = os.path.join(curdir, 'cqasm_files')\nql.set_option('output_dir', output_dir)\nql.set_option('write_qasm_files', 'yes')\n\nconfig_fn = os.path.join(curdir, 'config_qx.json')\nplatform = ql.Platform('platform_none', config_fn)\n\nnum_qubits = 3\np = ql.Program('exercise_qasm_004', platform, num_qubits)\n\nk1 = ql.Kernel(\"initialize\", platform, num_qubits)\nfor i in range(0, num_qubits):\n\tk1.gate('prep_z', [i])\t# Initialize all qubits to |0>\nfor i in range(0, num_qubits):\n\tk1.gate('h', [i])\t\t# Create full superposition\nk1.display()\np.add_kernel(k1)\n\nk2 = ql.Kernel(\"oracle\", platform, num_qubits)\n\nk2.gate('x', [2]) \nk2.gate('h', [2]) \nk2.gate('toffoli', [0,1,2])\nk2.gate('h', [2]) \nk2.gate('x', [2])\n\nk2.display() # Mark |011>\np.add_kernel(k2)\n\nk3 = ql.Kernel(\"amplify\", platform, num_qubits)\n\nfor i in range(0, num_qubits):\n\tk3.gate('h', [i])\nfor i in range(0, num_qubits):\n\tk3.gate('x', [i])\nk3.gate('h', [2]) \nk3.gate('toffoli', [0,1,2])\nk3.gate('h', [2])\nfor i in range(0, num_qubits):\n\tk3.gate('x', [i])\nfor i in range(0, num_qubits):\n\tk3.gate('h', [i])\n \nk3.display()\np.add_kernel(k3)\n\np.compile()\n\nqasm = p.qasm()\t\t\t# Get the cqasm generated by OpenQL\nprint(qasm)","sub_path":"tbd/exercise006_shor_factorization.py","file_name":"exercise006_shor_factorization.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"264158794","text":"\"\"\"Add tags association table\n\nRevision ID: 8af38383af50\nRevises: 0d94ad5ada4c\nCreate Date: 2019-05-10 22:50:38.654470\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8af38383af50'\ndown_revision = '0d94ad5ada4c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('tags_association',\n sa.Column('tag_id', sa.Integer(), nullable=False),\n sa.Column('project_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ),\n sa.ForeignKeyConstraint(['tag_id'], ['tags.id'], ),\n sa.PrimaryKeyConstraint('tag_id', 'project_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('tags_association')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/8af38383af50_add_tags_association_table.py","file_name":"8af38383af50_add_tags_association_table.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"622539477","text":"from klientpay.models import Sub, AutoTransactions, CashTransactions\nfrom django.db.models import Q\nimport datetime\nimport calendar\n\n\ndef get_all_subs_porch(sub):\n \"\"\"\n :param sub: Get Subscriber\n :return: All Subscriber living in a stairwell\n \"\"\"\n city, street, home, porch = sub.city, sub.street, sub.home, sub.porch\n return Sub.objects.filter(Q(city__iexact=city), Q(street__iexact=street), Q(home__iexact=home),\n Q(porch__iexact=porch))\n\n\ndef auto_connection_transaction(date, operator):\n \"\"\"\n :param date: Get payment date\n :param operator: Who make transaction\n Makes automatic debiting for all online subscribers on the basis of payment options\n \"\"\"\n subs = Sub.objects.filter(online=True)\n new_transaction = AutoTransactions(payment_date=date, operator=operator)\n new_transaction.save()\n for sub in subs:\n new_transaction.sub.add(sub)\n amount = sub.payment_type.license_fee\n new_payment = CashTransactions(sub=sub, transaction_date=datetime.datetime.now(), transaction_type='Авто',\n amount=amount, balance=sub.balance, operator=operator,\n comment='Авто за ' + date.strftime(\"%m/%Y\"))\n new_payment.save()\n sub.balance -= amount\n sub.save()\n\n\ndef add_months(source_date, months):\n \"\"\"\n :param source_date: Get date from start\n :param months: Get count add months\n :return: date\n \"\"\"\n month = source_date.month - 1 + months\n year = source_date.year + int(month / 12)\n month = month % 12 + 1\n day = min(source_date.day, calendar.monthrange(year,month)[1])\n return datetime.date(year, month, day)","sub_path":"klientpay/logics.py","file_name":"logics.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"466968530","text":"from bpy.utils import previews\nfrom os import path\nfrom .. import var\n\n\npreview_collections = {}\n\n\nicon_names = (\n\t'cut-round',\n\t'cut-oval',\n\t'cut-emerald',\n\t'cut-marquise',\n\t'cut-pear',\n\t'cut-baguette',\n\t'cut-square',\n\t'cut-asscher',\n\t'cut-cushion',\n\t'cut-princess',\n\t'cut-trillion',\n\t'cut-octagon',\n\t'cut-heart',\n\t'cut-radiant',\n\t'cut-flanders',\n\t'cut-trilliant',\n\t'cut-triangle',\n\n\t'tool-cut',\n\t'tool-single_prong',\n\t'tool-cutter',\n\t'tool-cutter_seat',\n\t'tool-imitation_3_prong',\n)\n\nicons = previews.new()\nload = icons.load\n\nfor name in icon_names:\n\tload(name.upper(), path.join(var.icons_path, name + '.png'), 'IMAGE')\n\n\npreview_collections['icons'] = icons\n","sub_path":"scripts/addons_extern/blender-addon-jewelcraft-master/modules/icons.py","file_name":"icons.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"594235232","text":"\n# āēêīōūů\n# ΔδϴθŽžZzYyKkLlMmNnǪǫPpRrSsTtFfXxX̌x̌CcȤȥČ芚ГɣГ̌ǯQqHhJǰðγʒj\n\n\n\nimport csv\n\nimport re\n\nimport os\n\nwith open('all_types.csv', newline='') as f:\n myreader = csv.reader(f, dialect='unix', delimiter='\\t')\n all = list(myreader)\n\n\nwith open('../data/chars_zarubin_vowels.txt', 'r') as f:\n VOWELS = f.read().strip()\n VOWELS = VOWELS.replace('\\n', '')\n\nwith open('../data/chars_zarubin_consonants.txt', 'r') as f:\n CONSONANTS = f.read().strip()\n CONSONANTS = CONSONANTS.replace('\\n', '')\n\nprint(VOWELS)\nprint(CONSONANTS)\n\n# AaĀāĒēÊêĘęIiĪīŌōUuŪūŮů\n# BbVvWwGgDdΔδϴθŽžZzYyKkLlMmNnǪǫPpRrSsTtFfXxX̌x̌CcȤȥČ芚ГɣГ̌ǯQqHhJǰðγʒj\n\n\n\ndef get_vowel(word):\n word = word.split(', ')\n word = word[0]\n\n if word.endswith('ōw'):\n word = word.rstrip('ōw')\n regexp = '.*?(āw|āy|êw|[{}])[{}]*$'.format(VOWELS, CONSONANTS)\n vowel = re.search(regexp, word, flags=re.U | re.DOTALL)\n if vowel:\n vowel = vowel.group(1)\n if vowel is None:\n vowel = 'x'\n return vowel\n\ndef get_vowel_no_diftongs(word):\n word = word.split(', ')\n word = word[0]\n\n if word.endswith('ōw'):\n word = word.rstrip('ōw')\n regexp = '.*?([{}])[{}]*$'.format(VOWELS, CONSONANTS)\n vowel = re.search(regexp, word, flags=re.U | re.DOTALL)\n if vowel:\n vowel = vowel.group(1)\n if vowel is None:\n vowel = 'x'\n return vowel\n\n# āēêīōūů\n# ΔδϴθŽžZzYyKkLlMmNnǪǫPpRrSsTtFfXxX̌x̌CcȤȥČ芚ГɣГ̌ǯQqHhJǰðγʒj\n\nc = 0\nd = {}\narr = []\ncs = set()\nfor row in all:\n for form in row[1].split(', '):\n if form.endswith('d'):\n c = form[-2]\n if c == '̌':\n c0 = form[-3]\n c = c0+c\n cs.add(c)\nprint(cs)\n\n\ncs2 = set()\nfor row in all:\n for form in row[1].split(', '):\n if form.endswith('t'):\n c = form[-2]\n if c == '̌':\n c0 = form[-3]\n c = c0+c\n cs2.add(c)\nprint(cs2)\nprint(cs&cs2)\n# with open('zarubin_karam.csv', 'w', newline='') as f:\n# mywriter = csv.writer(f, dialect='unix', delimiter='\\t')\n# mywriter.writerows(arr)\n\n\n# for row in all:\n# if row[7] == '-':\n# continue\n# form = row[2]\n# if form.endswith('id') or form.endswith('êd'):\n# print(form)\n# print(row)\n\n# arr = []\n# count = 0\n#\n# bflag = False\n# for row in all:\n# if row[7] == '-':\n# continue\n# # if len(row[7]) == 5:\n# # continue\n# for el in row[:5]:\n# if bflag:\n# bflag = False\n# break\n# for form in el.split(', '):\n# if 'ē' in form:\n# print(form)\n# print(row)\n# arr.append(row)\n# bflag = True\n# break\n# print(count)\n# #\n# # with open('with_e_flat.csv', 'w', newline='') as f:\n# # mywriter = csv.writer(f, dialect='unix', delimiter='\\t')\n# # mywriter.writerows(arr)\n\n# arr = []\n# count = 0\n# d = {}\n# for row in all:\n# for form in row[:5]:\n# try:\n# char = form[-1]\n# if char in VOWELS:\n# print(form)\n# print(row)\n# arr.append(row)\n# if char in d:\n# d[char] += 1\n# else:\n# d[char] = 1\n# break\n# except IndexError:\n# continue\n# print(count)\n# dlist = []\n# for key in d:\n# item = [key, d[key]]\n# dlist.append(item)\n# dlist = sorted(dlist, key=lambda x: x[1], reverse=True)\n# for item in dlist:\n# print(str(item[0]) + ' - ' + str(item[1]))\n# #\n# # with open('ends_with_i.csv', 'w', newline='') as f:\n# # mywriter = csv.writer(f, dialect='unix', delimiter='\\t')\n# # mywriter.writerows(arr)\n\n\n\n# arr = []\n# arr2 = []\n# d = {}\n# count = 0\n# for row in all:\n# form = row[0]\n# m = re.search('[ēůūī][mn]', form, flags=re.U|re.DOTALL)\n# if m and row[7] != '-':\n# print(m.group(0))\n# print(row)\n#\n# print(count)\n\n# arr.extend(arr2)\n#\n# with open('probably_causatives.csv', 'w', newline='') as f:\n# mywriter = csv.writer(f, dialect='unix', delimiter='\\t')\n# mywriter.writerows(arr)\n\n\n# caus = []\n# for key, value in d.items():\n# if value > 1:\n# caus.append(key)\n#\n# causd = {}\n# caus_rows = []\n# for row in all:\n# form = row[0]\n# v = get_vowel_no_diftongs(form)\n# i = form.rfind(v)\n# form = form[:i]+form[i+1:]\n# if form in caus:\n# caus_rows.append(row)\n# if form in causd:\n# causd[form].append(row)\n# else:\n# causd[form] = [row]\n#\n#\n# with open('causatives.csv', 'w', newline='') as f:\n# mywriter = csv.writer(f, dialect='unix', delimiter='\\t')\n# mywriter.writerows(caus_rows)\n\n\n\n\n# with open('temp.csv', 'w', newline='') as f:\n# mywriter = csv.writer(f, dialect='unix', delimiter='\\t')\n# mywriter.writerows(arr1)\n\n\n# arr1 = []\n# arr2 = []\n# d = {}\n# # for row in all:\n# # if row[7] == '-':\n# # continue\n# # for form in row[:5]:\n# # if 'ē' in form and 'ēw' not in form:\n# # print(row)\n# # break\n#\n# for row in all:\n# if len(row[7]) > 0:\n# continue\n# if row not in arr1:\n# arr2.append(row)\n#\n# print(len(arr1))\n# print(len(arr2))\n#\n#\n# # with open('have_multiple_forms.csv', 'w', newline='') as f:\n# # mywriter = csv.writer(f, dialect='unix', delimiter='\\t')\n# # mywriter.writerows(arr1)\n# # #\n# with open('unrecognized.csv', 'w', newline='') as f:\n# mywriter = csv.writer(f, dialect='unix', delimiter='\\t')\n# mywriter.writerows(arr2)\n\n\n\n# arr = []\n# d = {}\n# for row in all:\n# if row[6] == '-':\n# continue\n# v1 = get_vowel(row[0])\n# v2 = get_vowel(row[1])\n# v3 = get_vowel(row[4])\n# if len(v1) > 1 or len(v2) > 1 or len(v3) > 1:\n# # print(v1)\n# # print(v2)\n# # print(row)\n# arr.append(row)\n# elif ('1' in row[6] or '2' in row[6]) and len(row[6]) > 1:\n# arr.append(row)\n# elif row[0].endswith('i'):\n# print(row)\n#\n# print(len(arr))\n# with open('diftongoids.csv', 'w', newline='') as f:\n# mywriter = csv.writer(f, dialect='unix', delimiter='\\t')\n# mywriter.writerows(arr)\n\n\n##find êw\n# arr = []\n# d = {}\n# for row in all:\n# if row[6] == '-':\n# continue\n# for i, form in enumerate(row):\n# if i > 4:\n# continue\n# v = get_vowel(form)\n# if v == 'êw':\n# print(row)\n# arr.append(row)\n# if row[5] in d:\n# d[row[5]] += 1\n# else:\n# d[row[5]] = 1\n#\n# dlist = []\n# for key in d:\n# item = [key, d[key]]\n# dlist.append(item)\n# dlist = sorted(dlist, key=lambda x: x[1], reverse=True)\n# for item in dlist:\n# print(str(item[0]) + ' - ' + str(item[1]))\n\n# arr = [list(x) for x in set(tuple(x) for x in arr)]\n\n# with open('ew.csv', 'w', newline='') as f:\n# mywriter = csv.writer(f, dialect='unix', delimiter='\\t')\n# mywriter.writerows(arr)\n\n\n# vs = set()\n# for row in all:\n# if row[6] == '-':\n# continue\n# for i, form in enumerate(row):\n# if i > 4:\n# continue\n# v = get_vowel(form)\n# # if i == 4:\n# # if v not in ['i', 'ī', 'ê', 'êw']:\n# # print(form)\n# # if i == 2:\n# # if v not in ['i', 'ī', 'ê', 'u', 'ū', 'ō']:\n# # print(v)\n# # print(form)\n# if i == 0:\n# if v not in ['i', 'ī', 'ê', 'a', 'ā', 'ō', 'êw', 'āw']:\n# print(v)\n# print(form)\n# vs.add(v)\n# print(vs)","sub_path":"new/find_all_columns.py","file_name":"find_all_columns.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"198798187","text":"# Ввод строки\nstring = input('Введите строку: ')\nprint('Вы ввели \"', string, '\"', sep='')\n\n# Приглашение можно не указывать:\n# string = input()\n# Результат, который возвращает любая функция,\n# можно не привязывать ни к какому имени.\n# Таким образом, следующая строка просто заставит\n# программу ждать, пока пользователь что-то\n# введёт или просто нажмёт Enter, а затем\n# она продолжит выполнение.\ninput()\n\n\n# введём два числа\nn = int(input('Введите первое число: '))\nm = int(input('Введите второе число: '))\nprint('{} + {} = {}'.format(n, m, n + m))","sub_path":"Starter/002_Examples/13-input.py","file_name":"13-input.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"446854158","text":"from toydist.misc import \\\n Extension\n\nfrom toydist.config_parser.parser import \\\n AST\n\nclass PackageDescription:\n def __init__(self, name, version=None, summary=None, url=None,\n author=None, author_email=None, maintainer=None,\n maintainer_email=None, license=None, description=None,\n platforms=None, packages=None, py_modules=None, extensions=None):\n # XXX: should we check that we have sequences when required\n # (py_modules, etc...) ?\n\n # Package metadata\n self.name = name\n\n if not version:\n # Distutils default\n self.version = '0.0.0'\n else:\n self.version = version\n\n self.summary = summary\n self.url = url\n self.author = author\n self.author_email = author_email\n self.maintainer = maintainer\n self.maintainer_email = maintainer_email\n self.license = license\n self.description = description\n\n if not platforms:\n self.platforms = []\n else:\n self.platforms = platforms\n\n # Package content\n if not packages:\n self.packages = []\n else:\n self.packages = packages\n\n if not py_modules:\n self.py_modules = []\n else:\n self.py_modules = py_modules\n\n if not extensions:\n self.extensions = []\n else:\n self.extensions = extensions\n\n def to_dict(self):\n \"\"\"Return a distutils.core.setup compatible dict.\"\"\"\n d = {'name': self.name,\n 'version': self.version,\n 'description': self.summary,\n 'url': self.url,\n 'author': self.author,\n 'author_email': self.author_email,\n 'maintainer': self.maintainer,\n 'maintainer_email': self.maintainer_email,\n 'license': self.license,\n 'long_description': self.description,\n 'platforms': self.platforms,\n 'py_modules': self.py_modules,\n 'ext_modules': self.extensions,\n 'packages': self.packages}\n\n return d\n\n\ndef _parse_static(cnt):\n \"\"\"Parse a static file. cnt is assumed to be the content of the static file\n in one string\"\"\"\n ast = AST()\n ast.parse_string(cnt)\n return PackageDescription(**ast.to_dict())\n\ndef parse_static(filename):\n f = open(filename)\n try:\n cnt = \"\\n\".join(f.readlines())\n return _parse_static(cnt)\n finally:\n f.close()\n\ndef static_representation(pkg):\n \"\"\"Return the static representation of the given PackageDescription\n instance as a string.\"\"\"\n r = []\n if pkg.name:\n r.append(\"Name: %s\" % pkg.name)\n if pkg.version:\n r.append(\"Version: %s\" % pkg.version)\n if pkg.summary:\n r.append(\"Summary: %s\" % pkg.summary)\n if pkg.url:\n r.append(\"Url: %s\" % pkg.url)\n if pkg.description:\n r.append(\"Description: %s\" % pkg.description)\n if pkg.author:\n r.append(\"Author: %s\" % pkg.author)\n if pkg.author_email:\n r.append(\"AuthorEmail: %s\" % pkg.author_email)\n if pkg.maintainer:\n r.append(\"Author: %s\" % pkg.maintainer)\n if pkg.maintainer_email:\n r.append(\"AuthorEmail: %s\" % pkg.maintainer_email)\n\n # Fix indentation handling instead of hardcoding it\n r.append(\"Library:\")\n\n if pkg.py_modules:\n r.append(\"\"\"\\\n Modules:\n %s\"\"\" % \" \\n,\".join(pkg.py_modules))\n if pkg.packages:\n r.append(\"\"\"\\\n Packages:\n %s\"\"\" % \" \\n,\".join(pkg.packages))\n\n if pkg.extensions:\n for e in pkg.extensions:\n r.append(\"\"\"\\\n Extension: %s\n sources:\n %s\"\"\" % (e.name, \" \\n,\".join(e.sources)))\n\n\n return \"\\n\".join(r)\n\n","sub_path":"toydist/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"146641276","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nimport shutil\nimport uuid\nimport os\nimport sys\n#import yaml\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.spatial as scsp\nimport time\nimport subprocess\nimport shlex\nimport dftbplus_utils as dftb\n\nHToEV=27.21138505\nAToBohr=1.889725989\nhbar_eVs=6.582e-16 # eV\ne=1.6e-19 # C\nm_to_A=1e-10\nc = 2.998e8 # m/s\nNA = 6.022e23 # g/mol\nelementmasses={\"C\":12.01115,\n \"H\":1.00797,\n \"O\":15.99940,\n \"N\":14.007\n }\n\n# def sampling(settings, coords, elements, hess, num = 100):\n# coords_init=[]\n# elements_init=[]\n# n = settings[\"n\"]\n# for idx in range(num):\n# clash = True\n# while clash:\n# c_here = np.copy(coords)\n# for v_idx in range(6,3*n):\n# r = np.random.randn()*0.1\n# c_here += r*hess[v_idx]\n# ds = np.sort(scsp.distance.cdist(c_here,c_here).flatten())[n:]\n# if min(ds)>0.07:\n# clash = False\n# coords_init.append(c_here)\n# elements_init.append(elements)\n#\n# return(coords_init, elements_init)\n\ndef sampling(settings, coords, elements, hess, num=100):\n # reduced_masses_np = settings[\"reduced_masses\"]\n wavenumbers_np = settings[\"vibspectrum\"][-17:] ## 1/cm\n # reduced_masses_g = reduced_masses_np / NA # g\n # reduced_masses_kg = reduced_masses_g * 0.001 # kg\n # forceconstants = 4.0 * np.pi ** 2 * c ** 2 * wavenumbers_np ** 2 * 100 ** 2.0 * reduced_masses_kg ## N/m or J/m^2\n # forceconstants = forceconstants / e * m_to_A ** 2.0 ## eV / A^2\n #\n # n = settings[\"n\"]\n # vectors = hess[-len(reduced_masses_np):]\n # # print(vectors[0])\n # # vectors = vectors.reshape(3*n-6,n*3)\n # # print(vectors[0])\n # # exit()\n # # generate mass weighted vectors that are orthogonal\n # mass_per_atom_vector = []\n # for element in elements:\n # for i in range(0, 3):\n # mass_per_atom_vector.append(elementmasses[element])\n # mass_per_atom_vector = np.array(mass_per_atom_vector)\n # vectors_massweighted = np.zeros((len(vectors), len(coords), 3))\n #\n # for vec_idx, vec in enumerate(vectors):\n # vectors_massweighted[vec_idx] = (vec.flatten() * mass_per_atom_vector ** 0.5).reshape((len(coords), 3))\n #\n # scalar_products_massweighted=np.zeros((len(vectors),len(vectors)))\n #\n # for idx1 in range(len(vectors)):\n # for idx2 in range(len(vectors)):\n # scalar_products_massweighted[idx1][idx2]=np.sum(vectors_massweighted[idx1].flatten()*vectors_massweighted[idx2].flatten())\n # #scalar_products_massweighted[idx1][idx2]=np.sum(vectors[idx1].flatten()*vectors[idx2].flatten())\n # #print([scalar_products_massweighted[i][i] for i in range(len(scalar_products_massweighted))])\n # #exit()\n #\n # # generate orthonormal vectors: they were used by ANI authors\n # vectors_orthonormal = np.zeros((len(vectors), len(coords), 3))\n # for vec_idx, vec in enumerate(vectors_massweighted):\n # vectors_orthonormal[vec_idx] = np.copy(vec) / np.linalg.norm(vec.flatten())\n # # some parameters from the paper\n Nf = len(wavenumbers_np)\n # Na = float(len(coords))\n # T = 400.0\n # kBT_eV = 0.025 / 300.0 * T\n # # the non-stochastic part of the coefficients\n # Rs0 = np.sqrt((3.0 * Na * kBT_eV) / (forceconstants))\n\n coords_init=[]\n elements_init=[]\n n = settings[\"n\"]\n for idx in range(num):\n clash = True\n while clash:\n old=True\n if old:\n c_here = np.copy(coords)\n for v_idx in range(6,3*n):\n r = np.random.randn()*settings[\"amplitude\"]\n c_here += r*hess[v_idx]\n else:\n # get random numbers with sum 1\n cs=get_cs(Nf, n)\n # get random signs\n signs=(np.random.randint(0,2,size=Nf)*2-1)\n # get the coefficients\n # Rs=signs*Rs0*np.sqrt(cs)#/2.0**0.5\n # calculate the coordinates of the new conformer\n c_here=np.copy(coords)\n # for R_idx,R in enumerate(Rs):\n # c_here+=R*np.copy(vectors_orthonormal[R_idx])\n ds = np.sort(scsp.distance.cdist(c_here,c_here).flatten())[n:]\n if min(ds)>0.07:\n clash = False\n coords_init.append(c_here)\n elements_init.append(elements)\n return(coords_init, elements_init)\n\ndef get_cs(n_frequencies, n_atoms):\n # sequential generation of random numbers\n cs=np.zeros((n_frequencies))\n s=0.0\n order=np.array(range(n_frequencies))\n np.random.shuffle(order)\n #c_max=1.2\n cs_sum=np.random.random()*(float(int(n_atoms)))**0.5#*2.0#**0.5\n\n for idx in order:\n c_new=100.0\n while c_new>cs_sum:\n c_new=np.abs(np.random.normal(scale=1.0))/float(n_frequencies)\n cs[idx]=c_new*(cs_sum-s)#np.exp(-1.0/(1.0-s)))#0.5*(1.0-s))\n s=np.sum(cs)\n cs=np.abs(cs)\n return(cs)\n\n\ndef do_sampling(settings, name, num):\n outdir = settings[\"outdir\"]\n if os.path.exists(\"%s/%s.xyz\"%(outdir, name)) and not settings[\"overwrite\"]:\n coords_sampled, elements_sampled = dftb.readXYZs(\"%s/%s.xyz\"%(outdir, name))\n num = len(coords_sampled)\n print(\" --- load %i %s points\"%(num, name))\n else:\n print(\" --- generate %i %s points\"%(num, name))\n coords_sampled, elements_sampled = sampling(settings, settings[\"coords\"], settings[\"elements\"], settings[\"hess\"], num)\n dftb.exportXYZs(coords_sampled, elements_sampled, \"%s/%s.xyz\"%(outdir, name))\n return(coords_sampled, elements_sampled)\n\n\n\n\n\n# def vibrations(settings, vib = 0):\n# outdir = settings[\"outdir\"]\n# if os.path.exists(\"%s/vib.xyz\"%(outdir)) and not settings[\"overwrite\"]:\n# print(\" --- load vibration mode\")\n# coords_vib, elements_vib = dftb.readXYZs(\"%s/vib.xyz\"%(outdir))\n# else:\n# print(\" --- test vibration mode\")\n# coords_vib=[]\n# elements_vib=[]\n# for idx in range(-100,100):\n# coords_vib.append(settings[\"coords\"]+idx/100*settings[\"hess\"][vib])\n# elements_vib.append(settings[\"elements\"])\n# dftb.exportXYZs(coords_vib, elements_vib, \"{}/vib.xyz\".format(outdir))\n\ndef vibrations(settings):\n amplitude = settings[\"amplitude\"] # at a vibration of 100 1/cm\n outdir = settings[\"outdir\"]\n vibs = settings[\"vibspectrum\"]\n for vib_idx, vib in enumerate(vibs):\n if abs(vib)<20:\n continue\n filename=\"%s/vib_%i_%.0f.xyz\"%(outdir, vib_idx, vib)\n if os.path.exists(filename) and not settings[\"overwrite\"]:\n print(\" --- load vibration mode %i (%.0f 1/cm)\"%(vib_idx, vib))\n coords_vib, elements_vib = dftb.readXYZs(filename)\n else:\n print(\" --- test vibration mode %i (%.0f 1/cm)\"%(vib_idx, vib))\n coords_vib=[]\n elements_vib=[]\n prefactor = amplitude#/(vib/100.0)\n for idx in range(0,100):\n coords_vib.append(settings[\"coords\"]+idx/100.0*settings[\"hess\"][vib_idx]*prefactor)\n elements_vib.append(settings[\"elements\"])\n rmsd = np.mean((coords_vib[-1]-coords_vib[0])**2.0)\n print(rmsd)\n #for idx in range(0,100)[::-1]:\n # coords_vib.append(settings[\"coords\"]+idx/100.0*settings[\"hess\"][vib_idx]*prefactor)\n # elements_vib.append(settings[\"elements\"])\n dftb.exportXYZs(coords_vib, elements_vib, filename)\n\ndef prep_dirs(settings):\n outdir = \"output\"\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n if not os.path.exists(\"%s/models\"%(outdir)):\n os.makedirs(\"%s/models\"%(outdir))\n outdir_test = \"output_test\"\n if not os.path.exists(outdir_test):\n os.makedirs(outdir_test)\n settings[\"outdir\"] = outdir\n settings[\"outdir_test\"] = outdir_test\n return(outdir, outdir_test)\n\n\n\n\n","sub_path":"generate_data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"614535199","text":"import datetime as dt\n\nimport scrapy\n\nfrom gazette.items import Gazette\nfrom gazette.spiders.base import BaseGazetteSpider\n\n\nclass ApMacapaSpider(BaseGazetteSpider):\n\n name = \"ap_macapa\"\n allowed_domains = [\"macapa.ap.gov.br\"]\n start_date = None\n\n TERRITORY_ID = \"1600303\"\n\n def __init__(self, start_date=None, end_date=None, *args, **kwargs):\n self.start_date = dt.date(2018, 1, 1)\n self.end_date = dt.date.today()\n\n super(ApMacapaSpider, self).__init__(start_date, end_date)\n\n self.logger.debug(\n \"Start date is {date}\".format(date=self.start_date.isoformat())\n )\n self.logger.debug(\"End date is {date}\".format(date=self.end_date.isoformat()))\n\n def start_requests(self):\n base_url = \"https://macapa.ap.gov.br/\"\n\n target_date = self.start_date\n data = {\n \"s\": \"\",\n \"post_type\": \"official_diaries\",\n \"search\": \"official_diaries\",\n \"official_diary_number\": \"\",\n }\n while target_date <= self.end_date:\n formatted_date = target_date.strftime(\"%d/%m/%Y\")\n data.update(\n {\n \"official_diary_initial_date\": formatted_date,\n \"official_diary_final_date\": formatted_date,\n }\n )\n\n yield scrapy.FormRequest(\n url=base_url, formdata=data, method=\"GET\", meta={\"date\": target_date}\n )\n target_date = target_date + dt.timedelta(days=1)\n\n def parse(self, response):\n # Extract Items\n links = response.xpath('//i[@class=\"fa fa-file-pdf-o\"]/parent::a')\n links = links.xpath(\"@href\").getall()\n\n gazette_date = response.meta[\"date\"]\n\n if len(links) == 0:\n self.logger.warning(\n \"No gazettes found for date {date}\".format(date=gazette_date)\n )\n\n for index, file_url in enumerate(links):\n yield Gazette(\n date=gazette_date,\n file_urls=[file_url],\n is_extra_edition=(index > 0),\n power=\"executive_legislative\",\n )\n","sub_path":"data_collection/gazette/spiders/ap_macapa.py","file_name":"ap_macapa.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"236057193","text":"# -*- coding: utf-8 -*-\nfrom flask import g, request, abort, jsonify, url_for\nfrom flask_restful import fields, marshal, reqparse, marshal_with\nfrom flask_restful import Resource\nimport flask_restful as restful\nfrom server.app.extensions import db, bcrypt, auth\nfrom server.app.models import Route\nfrom sqlalchemy import and_\n\n\nroute_fields = {\n 'id': fields.Integer,\n 'district_id': fields.Integer,\n 'route_name': fields.String,\n}\n\ndistrict_parser = reqparse.RequestParser()\n\ndistrict_parser.add_argument(\n 'district_name', dest='districName',\n type=str, location='form',\n required=True, help='The district name'\n)\n\n\nclass RouteList(Resource):\n decorators = [auth.login_required]\n\n @marshal_with(route_fields)\n def get(self):\n routes = Route.get_routes()\n if not routes:\n abort(404)\n return routes\n\n\nclass RouteAPI(Resource):\n decorators = [auth.login_required, marshal_with(route_fields)]\n\n # @marshal_with(route_fields)\n def get(self, id):\n route = Route.get(id)\n if not route:\n abort(404)\n return route\n\n def post(self):\n district_id = request.json.get('districtId')\n route_name = request.json.get('routeName')\n if not route_name or district_id:\n abort(400)\n\n route = Route(district_id, route_name)\n db.session.add(route)\n db.session.commit()\n return route, 201.\n\n def put(self, id):\n district_id = request.json.get('districtId')\n route_name = request.json.get('routeName')\n route = Route.get(id)\n if not route:\n abort(404)\n route.district_id = district_id\n route.route_name = route_name\n\n db.session.add(route)\n db.session.commit()\n\n return route, 201\n\n @auth.login_required\n def delete(self, id):\n route = Route.get(id)\n\n if route:\n if route.buses:\n return jsonify({'resCode': '30', 'resMsg': 'theere is some buses index for it'})\n else:\n db.session.delete(route)\n db.session.commit()\n return jsonify({\"resCode\": '20', 'resMsg': 'delete success'})\n\n else:\n return jsonify({\"resCode\": '30', 'resMsg': 'route is not exit'})\n\n","sub_path":"server/app/resources/route_resources.py","file_name":"route_resources.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"278256089","text":"import requests\n\nfrom cartmigration.libs.utils import *\nimport optparse\n\nfrom cartmigration.models.basecart import LeBasecart\nclass Command():\n\tdef __init__(self, action):\n\t\tself._action = action\n\n\tdef set_max_migration_id(self, migration_id):\n\t\tpath = get_root_path() + '/cartmigration/etc/'\n\t\tfile_name = path + 'max_id.ini'\n\t\tcheck_exist = False\n\t\tif os.path.isfile(file_name):\n\t\t\tcheck_exist = True\n\t\twith open(file_name, 'w') as log_file:\n\t\t\tlog_file.write('[max_id]\\n')\n\t\t\tlog_file.write('max_id=' + to_str(migration_id))\n\t\tif not check_exist and os.path.isfile(file_name):\n\t\t\tos.chmod(file_name, 0o777)\n\n\tdef get_max_migration_id(self):\n\t\treturn get_config_ini('max_id', 'max_id', 0, file = 'max_id.ini')\n\n\tdef map_customer_group(self, notice):\n\t\tif not notice['support']['customer_group_map']:\n\t\t\treturn response_success(dict())\n\t\tsrc_customer_group = notice['src']['customer_group']\n\t\tif not src_customer_group:\n\t\t\treturn response_error('Src Customer group is empty')\n\t\ttarget_customer_group = notice['target']['customer_group']\n\t\tif not target_customer_group:\n\t\t\treturn response_error('target Customer group is empty')\n\t\tmap_data = self.get_map_in_config('map-customer-group', src_customer_group, target_customer_group)\n\n\t\tgroup_default = next(iter(target_customer_group.keys()))\n\t\tfor src_group_id, src_group_label in src_customer_group.items():\n\t\t\tif src_group_id in map_data:\n\t\t\t\tcontinue\n\t\t\tcheck = False\n\t\t\tfor target_group_id, target_group_label in target_customer_group.items():\n\t\t\t\tif src_group_label.lower() == target_group_label.lower():\n\t\t\t\t\tmap_data[str(src_group_id)] = to_str(target_group_id)\n\t\t\t\t\tcheck = True\n\t\t\t\t\tbreak\n\t\t\tif check is False:\n\t\t\t\tmap_data[str(src_group_id)] = to_str(group_default)\n\t\treturn response_success(map_data)\n\n\tdef map_order_status(self, notice):\n\t\tif not notice['support']['order_status_map']:\n\t\t\treturn response_success(dict())\n\t\tsrc_order_status = notice['src']['order_status']\n\t\tif not src_order_status:\n\t\t\treturn response_success(dict())\n\t\ttarget_order_status = notice['target']['order_status']\n\t\tif not target_order_status:\n\t\t\treturn response_error('target order status is empty')\n\t\tmap_data = self.get_map_in_config('map-order-status', src_order_status, target_order_status)\n\t\tstatus_default = next(iter(target_order_status.keys()))\n\t\tfor src_group_id, src_group_label in src_order_status.items():\n\t\t\tif src_group_id in map_data:\n\t\t\t\tcontinue\n\t\t\tcheck = False\n\t\t\tfor target_group_id, target_group_label in target_order_status.items():\n\t\t\t\tif src_group_label.lower() == target_group_label.lower() \\\n\t\t\t\t\t\tor src_group_label.lower() == target_group_id.lower() \\\n\t\t\t\t\t\tor src_group_id.lower() == target_group_id.lower() \\\n\t\t\t\t\t\tor src_group_id.lower() == target_group_label.lower():\n\t\t\t\t\tmap_data[src_group_id] = target_group_id\n\t\t\t\t\tcheck = True\n\t\t\t\t\tbreak\n\t\t\tif check is False:\n\t\t\t\tmap_data[src_group_id] = status_default\n\t\treturn response_success(map_data)\n\n\tdef map_language_data(self, notice):\n\t\tif not notice['support']['language_map']:\n\t\t\treturn response_success()\n\t\t# if to_len(notice['src']['languages']) > to_len(notice['target']['languages']):\n\t\t# \treturn response_error(\"Number languages src > Number languages target\")\n\t\tsrc_languages = notice['src']['languages']\n\t\tif not src_languages:\n\t\t\treturn response_error('Src language is empty')\n\t\ttarget_languages = notice['target']['languages']\n\t\tif not target_languages:\n\t\t\treturn response_error('target language is empty')\n\t\ttarget_keys = list(target_languages.keys())\n\t\tkey_default = 0\n\t\tlen_target = to_len(target_languages)\n\t\tkey_uses = list()\n\t\tmap_data = self.get_map_in_config('map-language', src_languages, target_languages)\n\t\tfor src_language_id, src_language_label in src_languages.items():\n\t\t\tif src_language_id in map_data:\n\t\t\t\tcontinue\n\t\t\tcheck = False\n\t\t\tcheck_break = False\n\t\t\tfor target_language_id, target_language_label in target_languages.items():\n\t\t\t\tif src_language_label.lower() == target_language_label.lower():\n\t\t\t\t\tmap_data[str(src_language_id)] = to_str(target_language_id)\n\t\t\t\t\tkey_uses.append(target_language_id)\n\t\t\t\t\tcheck = True\n\t\t\t\t\tbreak\n\t\t\tif check is False:\n\t\t\t\twhile key_default < to_len(target_keys) - 1:\n\t\t\t\t\tif key_default not in key_uses:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tkey_default += 1\n\t\t\t\tif key_default >= len_target:\n\t\t\t\t\tcheck_break = True\n\t\t\t\t\tbreak\n\t\t\t\tmap_data[str(src_language_id)] = target_keys[key_default]\n\t\t\t\tkey_uses.append(target_keys[key_default])\n\t\t\t\tkey_default += 1\n\t\t\tif check_break:\n\t\t\t\tbreak\n\t\treturn response_success(map_data)\n\n\tdef get_map_in_config(self, key, src_data, target_data):\n\t\tmap_data = dict()\n\t\tmap_in_config = get_config_ini(key)\n\t\tif map_in_config:\n\t\t\tfor src_group, target_group in map_in_config.items():\n\t\t\t\tmap_key = None\n\t\t\t\tmap_value = None\n\t\t\t\tfor src_key, src_label in src_data.items():\n\t\t\t\t\tif src_group == src_key or src_group == src_label:\n\t\t\t\t\t\tmap_key = src_key\n\t\t\t\t\t\tbreak\n\t\t\t\tif not map_key:\n\t\t\t\t\tcontinue\n\t\t\t\tfor target_key, target_label in target_data.items():\n\t\t\t\t\tif target_group == target_key or target_group == target_label:\n\t\t\t\t\t\tmap_value = target_key\n\t\t\t\t\t\tbreak\n\t\t\t\tif not map_value:\n\t\t\t\t\tcontinue\n\t\t\t\tmap_data[map_key] = map_value\n\t\treturn map_data\n\tdef get_custom_headers(self):\n\t\ttime_request = to_str(to_int(time.time()))\n\t\tprivate_key = get_config_ini('local', 'private_key')\n\t\thmac = hash_hmac('sha256', time_request, private_key)\n\t\tcustom_headers = dict()\n\t\tcustom_headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64;en; rv:5.0) Gecko/20110619 Firefox/5.0'\n\t\tcustom_headers['Authorization'] = time_request + \":\" + hmac\n\t\treturn custom_headers\n\n\tdef call_server(self, path, data):\n\t\tcustom_headers = self.get_custom_headers()\n\t\tif data and isinstance(data, dict):\n\t\t\tdata['test'] = True\n\t\ttry:\n\t\t\tip_host = socket.gethostbyname(socket.gethostname()) # Default to any avialable network interface\n\t\texcept Exception:\n\t\t\tip_host = '127.0.0.1'\n\t\turl = 'http://' + ip_host + ':' + get_config_ini('local', 'port') + '/api/v1/' + path\n\t\treturn self.call(url, data, custom_headers)\n\n\tdef call(self, url, data, custom_headers = None):\n\t\tif isinstance(data, list) or isinstance(data, dict):\n\t\t\tdata = json_encode(data)\n\t\tif not custom_headers:\n\t\t\tcustom_headers = dict()\n\t\t\tcustom_headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64;en; rv:5.0) Gecko/20110619 Firefox/5.0'\n\t\telif isinstance(custom_headers, dict) and not custom_headers.get('User-Agent'):\n\t\t\tcustom_headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64;en; rv:5.0) Gecko/20110619 Firefox/5.0'\n\n\t\tres = False\n\t\ttry:\n\t\t\tr = requests.post(url, data, headers = custom_headers)\n\n\t\t\tres = r.text\n\t\t\tr.raise_for_status()\n\t\texcept requests.exceptions.HTTPError as errh:\n\t\t\tlog(\"Http Error:\" + to_str(errh) + \" : \" + to_str(res), type_error = 'test_exp')\n\t\texcept requests.exceptions.ConnectionError as errc:\n\t\t\tlog(\"Error Connecting:\" + to_str(errc) + \" : \" + to_str(res), type_error = 'test_exp')\n\t\texcept requests.exceptions.Timeout as errt:\n\t\t\tlog(\"Timeout Error:\" + to_str(errt) + \" : \" + to_str(res), type_error = 'test_exp')\n\t\texcept requests.exceptions.RequestException as err:\n\t\t\tlog(\"OOps: Something Else\" + to_str(err) + \" : \" + to_str(res), type_error = 'test_exp')\n\t\treturn res\n\n\tdef setup(self, migration_id = None, option_args = None):\n\t\tinfo = {\n\t\t\t'src': dict(get_config_ini('src', file = 'test.ini')),\n\t\t\t'target': dict(get_config_ini('target', file = 'test.ini')),\n\t\t}\n\t\tdata = dict()\n\t\tif migration_id:\n\t\t\tdata['migration_id'] = migration_id\n\t\tfor _key, value in info.items():\n\t\t\tfor data_key, data_value in value.items():\n\t\t\t\tif \"[\" not in data_key:\n\t\t\t\t\tdata[_key + '_' + data_key] = data_value\n\t\t\t\telse:\n\t\t\t\t\tkey_path = re.split(r\"[\\[\\]]\", data_key.strip(']'))\n\t\t\t\t\tif _key + '_' + key_path[0] not in data:\n\t\t\t\t\t\tdata[_key + '_' + key_path[0]] = dict()\n\t\t\t\t\tdata[_key + '_' + key_path[0]][key_path[1]] = data_value\n\t\tdata['test'] = True\n\t\tsetup_info = self.call_server('action/setup_cart', data)\n\t\tif isinstance(setup_info, str):\n\t\t\tsetup_info = json_decode(setup_info)\n\t\tif setup_info['result'] != 'success':\n\t\t\treturn setup_info\n\t\tnotice = setup_info['data']\n\t\tmigration_data = {\n\t\t\t'notice': json_encode(notice),\n\t\t}\n\t\tmodel = LeBasecart()\n\t\tmigration_data['migration_id'] = migration_id\n\t\tif not migration_id:\n\t\t\tmigration_id = to_int(self.get_max_migration_id()) + 1\n\t\t\tmodel.set_migration_id(migration_id)\n\t\t\tself.call_server('action/clear_previous_data', {'migration_id': migration_id, 'test': True})\n\t\t\tnotice['migration_id'] = migration_id\n\t\t\tmigration_data = {\n\t\t\t\t'notice': json_encode(notice),\n\t\t\t}\n\t\t\tmigration_data['migration_id'] = migration_id\n\t\t\tmodel.get_db(test = True).set_migration_id(migration_id)\n\t\t\tmigration = model.insert_obj(TABLE_MIGRATION, data = migration_data, insert_id = True)\n\t\t\tif migration['result'] != 'success':\n\t\t\t\treturn response_error()\n\t\t\tself.set_max_migration_id(migration_id)\n\t\telse:\n\t\t\tmigration = model.update_obj(TABLE_MIGRATION, migration_data, {'migration_id': migration_id})\n\t\t\tif migration['result'] != 'success':\n\t\t\t\treturn response_error()\n\t\treturn response_success(migration_id)\n\n\tdef get_source_cart(self, notice):\n\t\trouter = get_model('basecart')\n\t\tsource_cart_type = notice['src']['cart_type']\n\t\ttarget_cart_type = notice['target']['cart_type']\n\t\tspecial_type = source_cart_type == target_cart_type\n\t\tcart_version = notice['src']['config']['version']\n\t\tcart_name = getattr(router, 'get_cart')(source_cart_type, cart_version, special_type)\n\t\tsource_cart = get_model(cart_name)\n\t\tif not source_cart:\n\t\t\treturn None\n\t\tgetattr(source_cart, 'set_migration_id')(notice['migration_id'])\n\t\tgetattr(source_cart, 'set_type')('src')\n\t\tgetattr(source_cart, 'set_notice')(notice)\n\t\tgetattr(source_cart, 'set_is_test')(True)\n\n\t\treturn source_cart\n\n\tdef display_upload(self, migration_id, notice):\n\t\tpath_file = get_config_ini('src', 'file', file = 'test.ini')\n\t\tfull_path_file = get_pub_path() + '/' + DIR_UPLOAD + '/' + path_file\n\t\tif not path_file or not os.path.isdir(full_path_file):\n\t\t\treturn response_error(\"Don't file path\")\n\t\tsource_cart = self.get_source_cart(notice)\n\t\tgetattr(source_cart, 'set_migration_id')(migration_id)\n\t\tsource_cart.get_db(test = True).set_migration_id(migration_id)\n\t\tfile_info = getattr(source_cart, 'get_file_info')()\n\t\tconfig_ini = dict(get_config_ini('config', file = 'test.ini'))\n\t\tconfig_data = dict()\n\t\tfor config_key, config_value in config_ini.items():\n\t\t\tif notice['support'].get(config_key) and to_bool(config_value):\n\t\t\t\tconfig_data[config_key] = to_bool(config_value)\n\t\tupload_res = dict()\n\t\tfor info_key, info_label in file_info.items():\n\t\t\tfile_details = getattr(source_cart, 'get_default_file_details')()\n\t\t\tupload_name = getattr(source_cart, 'get_upload_file_name')(info_key)\n\t\t\tcheck = False\n\t\t\tparents = info_label['parents']\n\t\t\tfor parent in parents:\n\t\t\t\tif notice['support'].get(parent) and to_bool(get_config_ini('config', parent, file = 'test.ini')):\n\t\t\t\t\tif os.path.isfile(full_path_file + '/' + upload_name):\n\t\t\t\t\t\tcheck = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\treturn response_error(\"Not found file \" + upload_name)\n\t\t\tif check:\n\t\t\t\ttry:\n\t\t\t\t\tfile_details['upload'] = True\n\t\t\t\t\tfile_details['name'] = upload_name\n\t\t\t\t\tfile_details['storage'] = False\n\t\t\t\t\tnotice['src']['config']['file'][info_key] = file_details\n\t\t\t\t\tif info_key not in upload_res:\n\t\t\t\t\t\tupload_res[info_key] = dict()\n\t\t\t\t\tupload_res[info_key]['result'] = 'success'\n\t\t\t\t\tupload_res[info_key]['name'] = upload_name\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tlog_traceback(migration_id)\n\t\t\t\t\tfile_details['upload'] = False\n\t\t\t\t\tupload_res[info_key]['result'] = 'error'\n\t\t\t\t\tnotice['src']['config']['file'][info_key] = file_details\n\t\t\telse:\n\t\t\t\tfile_details['upload'] = False\n\t\t\t\tnotice['src']['config']['file'][info_key] = file_details\n\t\tgetattr(source_cart, 'set_notice')(notice)\n\t\tprepare_display_upload = getattr(source_cart, 'prepare_display_upload')(config_data)\n\t\tdisplay_upload = getattr(source_cart, 'display_upload')(upload_res)\n\t\tif display_upload['result'] != 'success':\n\t\t\treturn display_upload\n\t\tupload_res = display_upload['msg']\n\t\tfor type_file, res in upload_res.items():\n\t\t\tif res['result'] != 'success':\n\t\t\t\treturn response_error('File ' + res['name'] + ': ' + res['msg'])\n\t\tnotice = getattr(source_cart, 'get_notice')()\n\t\tsource_cart.update_obj(TABLE_MIGRATION, {'notice': json_encode(notice)}, {'migration_id': migration_id})\n\n\t\treturn response_success(notice)\n\tdef config(self, migration_id = None, option_args = None):\n\t\tdata_test = {\n\t\t\t'migration_id': migration_id\n\t\t}\n\t\tnotice = self.call_server('action/get_migration_info', data_test)\n\t\tif not notice:\n\t\t\treturn response_error()\n\t\tif isinstance(notice, str):\n\t\t\tnotice = json_decode(notice)\n\t\tif notice['src']['setup_type'] == 'file':\n\t\t\tdisplay_upload = self.display_upload(migration_id, notice)\n\t\t\tif display_upload['result'] != 'success':\n\t\t\t\treturn display_upload\n\t\t\tnotice = display_upload['data']\n\t\tconfig_ini = dict(get_config_ini('config', file = 'test.ini'))\n\t\tconfig_data = dict()\n\t\tfor config_key, config_value in config_ini.items():\n\t\t\tif notice['support'].get(config_key) and to_bool(config_value):\n\t\t\t\tconfig_data[config_key] = to_bool(config_value)\n\t\tmap_language = self.map_language_data(notice)\n\t\tif map_language['result'] != 'success':\n\t\t\treturn map_language\n\t\tconfig_data['languages'] = map_language['data']\n\t\tconfig_data['languages_select'] = dict()\n\t\tfor src_id, target_id in config_data['languages'].items():\n\t\t\tconfig_data['languages_select'][str(src_id)] = 'on'\n\t\tconfig_data['migration_id'] = migration_id\n\t\tcustomer_group_map = self.map_customer_group(notice)\n\t\tif customer_group_map['result'] != 'success':\n\t\t\treturn customer_group_map\n\t\tconfig_data['customer_group'] = customer_group_map['data']\n\t\torder_status_map = self.map_order_status(notice)\n\t\tif order_status_map['result'] != 'success':\n\t\t\treturn order_status_map\n\t\tconfig_data['order_status'] = order_status_map['data']\n\t\tconfig = self.call_server('action/config', config_data)\n\t\tif isinstance(config, str):\n\t\t\tconfig = json_decode(config)\n\t\treturn config\n\n\tdef full(self, migration_id = None, option_args = None):\n\t\tsetup_info = self.setup(migration_id)\n\t\tif setup_info['result'] != 'success':\n\t\t\tsetup_info['step'] = 'setup'\n\t\t\treturn setup_info\n\t\tmigration_id = setup_info['data']\n\t\tconfig_info = self.config(migration_id)\n\t\tif config_info['result'] == 'success':\n\t\t\treturn self.start(migration_id, option_args)\n\t\telse:\n\t\t\tconfig_info['step'] = 'config'\n\t\t\treturn config_info\n\n\tdef start(self, migration_id, option_args = None):\n\t\tstart = self.call_server('start/' + to_str(migration_id), {'migration_id': migration_id})\n\t\tstart_decode = json_decode(start)\n\t\tif not start_decode or start_decode['result'] != 'success':\n\t\t\treturn start\n\t\treturn \"Start migration id: {}. Run \\033[94m\\033[1m watch cat {}/log/{}/notice.log\\033[0m to view process migration\".format(migration_id, get_pub_path(), migration_id)\n\n\tdef reset(self, migration_id, option_args = None):\n\t\treset = self.call_server('action/reset_migration', {'migration_id': migration_id})\n\t\treset_decode = json_decode(reset)\n\t\tif not reset_decode or reset_decode['result'] != 'success':\n\t\t\treturn reset\n\t\treturn \"Start migration id: {}. Run \\033[94m\\033[1m watch cat {}/log/{}/notice.log\\033[0m to view process migration\".format(migration_id, get_pub_path(), migration_id)\n\n\tdef stop_loop(self, migration_id, option_args = None):\n\t\treturn self.call_server('action/kill_end_loop_migration', {'migration_id': migration_id})\n\n\tdef stop(self, migration_id, option_args = None):\n\t\treturn self.call_server('action/kill_migration', {'migration_id': migration_id})\n\n\tdef recent(self, migration_id, option_args = None):\n\t\trecent = self.call_server('recent/' + to_str(migration_id), {'migration_id': migration_id})\n\t\tif recent['result'] != 'success':\n\t\t\treturn recent\n\t\treturn self.start(migration_id, option_args)\n\n\tdef change_mode(self, migration_id, option_args):\n\t\tmode = option_args.mode\n\t\tmodel = LeBasecart()\n\t\tmodel.set_migration_id(migration_id)\n\t\tmodel.get_db(test = True).set_migration_id(migration_id)\n\t\tmigration = model.select_row(TABLE_MIGRATION, {'migration_id': migration_id})\n\t\tif not migration:\n\t\t\treturn\n\t\tupdate = dict()\n\t\tupdate['mode'] = MIGRATION_FULL if mode == 'full' else MIGRATION_DEMO\n\t\tnotice = json_decode(migration['notice'])\n\t\tnotice['mode'] = MIGRATION_FULL if mode == 'full' else MIGRATION_DEMO\n\t\tupdate['notice'] = json_encode(notice)\n\t\tmodel.update_obj(TABLE_MIGRATION, update, {'migration_id': migration_id})\n\t\treturn\n\n\tdef private_key(self, migration_id = None, option_args = None):\n\t\tconfig_file = get_root_path() + '/cartmigration/etc/config.ini'\n\t\tif not os.path.isfile(config_file):\n\t\t\tprint(\"Not found config file\")\n\t\t\treturn\n\t\tconfig = configparser.ConfigParser()\n\t\tconfig.read(config_file)\n\t\tif not config.has_section('local'):\n\t\t\tprint(\"Not found section local in config file\")\n\t\t\treturn\n\t\tprivate_key = md5(to_str(to_int(time.time())))\n\t\tconfig['local']['private_key'] = md5(to_str(to_int(time.time())))\n\t\twith open(config_file, 'w') as configfile: # save\n\t\t\tconfig.write(configfile)\n\t\treturn private_key\n\n\tdef run(self, option_args = None):\n\t\tmigration_id = option_args.migration_id\n\t\tif not hasattr(self, self._action):\n\t\t\tprint('Action invalid')\n\t\t\treturn\n\t\tmode = option_args.mode\n\t\tif mode and mode in ['full', 'demo']:\n\t\t\tself.change_mode(migration_id, option_args)\n\t\taction = getattr(self, self._action)(migration_id, option_args)\n\t\tprint(action)\ndef get_list_action():\n\treturn {\n\t\t'setup': 'Setup migration',\n\t\t'config': 'Config migration',\n\t\t'start': 'Start migration',\n\t\t'full': 'auto setup => config => start',\n\t\t'stop': 'stop migration',\n\t\t'stop_loop': 'stop end loop',\n\t\t'reset': 'reset migration',\n\t\t'change_mode': 'Change mode',\n\t\t'recent': 'recent Migration',\n\t\t'private_key': 'Generate private key',\n\t}\ndef get_help_action():\n\taction = get_list_action()\n\tdesc = list()\n\tfor action_key, action_desc in action.items():\n\t\tdesc.append('[' + action_key + ':' + action_desc.capitalize() + ']')\n\treturn \"\\n\".join(desc)\n\nparser = optparse.OptionParser()\nparser.add_option('-m', '--migration_id', help=\"Migration Id\", default=\"\")\nparser.add_option('-a', '--action', help=get_help_action(), default=\"\")\nparser.add_option('-o', '--mode', help=\"Mode: full or demo\", default=\"\")\n\noptions, args = parser.parse_args()\nparam_migration_id = options.migration_id\nparam_action = options.action\nlist_action = get_list_action()\nif param_action not in list(list_action.keys()):\n\tprint('Action ' + param_action + ' invalid')\n\tsys.exit()\n\nif param_action not in ['setup', 'full', 'private_key'] and not param_migration_id:\n\tprint('Action ' + param_action + ' required migration_id')\n\tsys.exit()\ntest = Command(param_action)\ntest.run(options)\n\n\n","sub_path":"v32/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":18647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"446980130","text":"#!/usr/bin/python3\n\n\"\"\"\n== Лото ==\n\nПравила игры в лото.\n\nИгра ведется с помощью специальных карточек, на которых отмечены числа, \nи фишек (бочонков) с цифрами.\n\nКоличество бочонков — 90 штук (с цифрами от 1 до 90).\n\nКаждая карточка содержит 3 строки по 9 клеток. В каждой строке по 5 случайных цифр, \nрасположенных по возрастанию. Все цифры в карточке уникальны. Пример карточки:\n\n--------------------------\n 9 43 62 74 90\n 2 27 75 78 82\n 41 56 63 76 86 \n--------------------------\n\nВ игре 2 игрока: пользователь и компьютер. Каждому в начале выдается \nслучайная карточка. \n\nКаждый ход выбирается один случайный бочонок и выводится на экран.\nТакже выводятся карточка игрока и карточка компьютера.\n\nПользователю предлагается зачеркнуть цифру на карточке или продолжить.\nЕсли игрок выбрал \"зачеркнуть\":\n\tЕсли цифра есть на карточке - она зачеркивается и игра продолжается.\n\tЕсли цифры на карточке нет - игрок проигрывает и игра завершает��я.\nЕсли игрок выбрал \"продолжить\":\n\tЕсли цифра есть на карточке - игрок проигрывает и игра завершается.\n\tЕсли цифры на карточке нет - игра продолжается.\n\t\nПобеждает тот, кто первый закроет все числа на своей карточке.\n\nПример одного хода:\n\nНовый бочонок: 70 (осталось 76)\n------ Ваша карточка -----\n 6 7 49 57 58\n 14 26 - 78 85\n23 33 38 48 71 \n--------------------------\n-- Карточка компьютера ---\n 7 87 - 14 11 \n 16 49 55 88 77 \n 15 20 - 76 -\n--------------------------\nЗачеркнуть цифру? (y/n)\n\nПодсказка: каждый следующий случайный бочонок из мешка удобно получать \nс помощью функции-генератора.\n\nПодсказка: для работы с псевдослучайными числами удобно использовать \nмодуль random: http://docs.python.org/3/library/random.html\n\n\"\"\"\nimport random\n\n\nclass LotoBag:\n def __init__(self):\n self.barrels = [i for i in range(1,91)]\n def pull_barrel(self):\n b = random.choice(self.barrels)\n self.barrels.remove(b)\n print('Новый бочонок: {} (осталось {})'.format(b,len(self.barrels)))\n return b\n\n\nclass LotoCard:\n def __init__(self,name):\n self.status = 0\n self.name = name\n self.field = ['' for _ in range(9*3)]\n positions = []\n for x in range(3):\n positions.extend(random.sample(range(x*9,(x+1)*9),5))\n numbers = random.sample(range(1,91),15)\n for i in positions:\n self.field[i] = numbers.pop(0)\n \n def card_info(self):\n print(self.name)\n for i in range(3):\n print('%2s %2s %2s %2s %2s %2s %2s %2s %2s ' %\n tuple(self.field[i*9:(i+1)*9]))\n print('--------------------------')\n \n def play(self, b, answer=None):\n if answer:\n if (answer=='y') and (b in self.field):\n self.field[self.field.index(b)]='-'\n elif (answer=='y') and (b not in self.field):\n self.status = -1\n elif (answer=='n') and (b not in self.field):\n pass\n elif (answer=='n') and (b in self.field):\n self.status = -1\n else:\n print('Не верное значение')\n self.status = -1\n else:\n if b in self.field:\n self.field[self.field.index(b)]='-'\n if(int not in list(map(type, self.field))):\n self.status = 1\n\n\nLotoBagOne = LotoBag()\n\nLotoCardUser = LotoCard('------ Ваша карточка -----')\nLotoCardPC = LotoCard('-- Карточка компьютера ---')\n\nwhile abs(LotoCardUser.status)+abs(LotoCardPC.status) == 0:\n b = LotoBagOne.pull_barrel() \n LotoCardUser.card_info()\n LotoCardPC.card_info()\n LotoCardUser.play(b, input('Зачеркнуть цифру? (y/n):'))\n# LotoCardUser.play(b)\n LotoCardPC.play(b)\n \nif LotoCardUser.status == 1:\n if LotoCardPC.status == 1:\n print('Ничья')\n else:\n print('Вы выйграли')\nelse:\n print('Вы проиграли')\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"lesson07/home_work/loto.py","file_name":"loto.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"315300404","text":"class Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n intervals = list(sorted(intervals, key=lambda x: x[0]))\n p = 0\n while p < len(intervals) - 1:\n a, b = intervals[p], intervals[p + 1]\n if a[1] >= b[0]:\n intervals[p] = [a[0], max(a[1], b[1])]\n intervals.pop(p + 1)\n else:\n p += 1\n return intervals\n","sub_path":"Week08/56.合并区间.py","file_name":"56.合并区间.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"495052083","text":"__AUTHOR__ = 'Reverier Xu'\n\nimport re\n\nproperties = {\n 'name': '字符串分割(正则)',\n 'categories': '字符串操作',\n 'input': {0: '输入'},\n 'output': {0: '前', 1: '后'},\n 'properties': {\n '表达式': str\n }\n}\n\ndefaults = {\n '表达式': '[$|#]'\n}\n\n\ndef main(inp: dict, settings: dict):\n out = {}\n pattern = settings['表达式']\n outs = re.split(pattern, inp[0], 1)\n out[0] = outs[0]\n try:\n out[1] = outs[1]\n except BaseException:\n out[1] = ''\n return out\n","sub_path":"Modules/DataFlow/StringCutRegexModule.py","file_name":"StringCutRegexModule.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"383346442","text":"import RPi.GPIO as gpio \r\nimport math\r\nimport time\r\nimport datetime\r\nimport csv\r\n\r\nclass enco:\r\n\r\n def __init__(self, pin_A, pin_B, pin_signal, diameter, resolution, goal):\r\n\r\n self.pin_A = pin_A\r\n self.pin_B = pin_B\r\n self.pin_signal = pin_signal\r\n self.count = 0\r\n self.precount = 0\r\n self.sign = 0\r\n self.lastB = 0\r\n self.currentB = 0\r\n lim_rot = int(goal / (math.pi * diameter) * 1000) # diameter[mm]\r\n self.lim_pul = lim_rot * resolution\r\n\r\n gpio.setwarnings(False)\r\n gpio.setmode(gpio.BCM)\r\n gpio.setup(self.pin_A, gpio.IN, pull_up_down=gpio.PUD_UP)\r\n gpio.setup(self.pin_B, gpio.IN, pull_up_down=gpio.PUD_UP)\r\n gpio.setup(self.pin_signal, gpio.OUT, initial=gpio.LOW)\r\n\r\n dt = datetime.datetime.now()\r\n file_name = \"encLog_\" + str(dt.year) + \".\" + str(dt.month) + \".\" + str(dt.day + 4) + \"_\" + str(dt.hour + 20) + \".\" + str(dt.minute) + \".csv\"\r\n self.f = open(file_name, \"a\")\r\n self.writer = csv.writer(self.f, lineterminator=\"\\n\")\r\n\r\n # datasize 2*20000\r\n self.log = [[0.0, 0]]\r\n for i in range(19999):\r\n self.log.append([0.0, 0])\r\n\r\n time.sleep(0.5)\r\n print(\">> setup of encoder is done.\")\r\n print(\">> lim_rot:%d, lim_pul = %d\" % (lim_rot, self.lim_pul))\r\n\r\n def deal(self):\r\n self.lastB = gpio.input(self.pin_B)\r\n\r\n while not(gpio.input(self.pin_A)):\r\n self.currentB = gpio.input(self.pin_B)\r\n self.sign = 1\r\n\r\n if self.sign == 1:\r\n if self.lastB == 0 and self.currentB == 1:\r\n self.count += 1\r\n if self.lastB == 1 and self.currentB == 0:\r\n self.count -= 1\r\n self.sign = 0\r\n\r\n def go(self):\r\n\r\n initial_time = time.time()\r\n now_time = time.time()\r\n goal_time = 0.0\r\n num = 0\r\n sig = 0\r\n\r\n while True:\r\n\r\n self.precount = self.count\r\n self.deal()\r\n\r\n # main for counting\r\n if self.precount != self.count:\r\n now_time = time.time() - initial_time\r\n self.log[num] = [now_time, self.count]\r\n print(self.count)\r\n num += 1\r\n\r\n # write data if no moving for 3 sec and num > 10000\r\n if (time.time() - initial_time > now_time + 3) and (num > 10000):\r\n print(\">> writing data so far...\")\r\n self.writer.writerows([i for i in self.log if not (i == [0.0, 0])]) # write data except element [0.0, 0]\r\n for i in range(20000):\r\n self.log[i] = [0.0, 0] # reset datalog\r\n print(\">> ok, all done.\")\r\n num = 0\r\n\r\n # output signal\r\n if (sig == 0) and (self.count >= self.lim_pul):\r\n gpio.output(self.pin_signal, gpio.HIGH)\r\n sig == 1\r\n goal_time = time.time() - initial_time\r\n\r\n # stop signal\r\n if (sig == 1) and (time.time() - initial_time > goal_time + 3.0):\r\n gpio.output(self.pin_signal, gpio.LOW)\r\n\r\n if time.time() - initial_time > 30 * 60:\r\n break\r\n\r\n def end(self):\r\n self.writer.writerows([i for i in self.log if not (i == [0.0, 0])])\r\n gpio.cleanup([self.pin_A, self.pin_B])\r\n self.f.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n try:\r\n enc = enco(20, 21, 5, 25, 25, 90)\r\n enc.go()\r\n except KeyboardInterrupt:\r\n enc.end()\r\n finally:\r\n enc.end()","sub_path":"src/roten.py","file_name":"roten.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"457729174","text":"from django.db import models\n\nclass TypUslugi(models.Model):\n typ_uslugi = models.CharField(primary_key=True, max_length=50)\n\n class Meta:\n app_label='globalnewartosci'\n verbose_name_plural = 'typ uslugi'\n\n def __unicode__(self):\n return self.typ_uslugi\n\nclass Stopa_rynkowa(models.Model):\n st_rynkowa=models.FloatField(primary_key=True)\n\n class Meta:\n app_label='globalnewartosci'\n verbose_name_plural = 'stopa rynkowa'\n# Create your models here.\n","sub_path":"DjangoProj/globalnewartosci/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"431580023","text":"import argparse\n\nfrom stests.core.utils import factory\nfrom stests.core.utils import logger\nfrom stests.core.utils.args_factory import get_argparser_for_generator\nfrom stests.generators.wg_110 import constants\nfrom stests.generators.wg_110.args import Arguments\nfrom stests.orchestration.predicates import is_run_locked\n\n\n\n# Set command line arguments.\nARGS = get_argparser_for_generator(f\"Executes {constants.DESCRIPTION} workflow.\")\n\n# CLI argument: initial CLX balance.\nARGS.add_argument(\n \"--faucet-initial-clx-balance\",\n help=f\"Initial CLX balance of faucet account. Default={constants.FAUCET_INITIAL_CLX_BALANCE}\",\n dest=\"faucet_initial_clx_balance\",\n type=int,\n default=constants.FAUCET_INITIAL_CLX_BALANCE\n )\n\n# CLI argument: initial CLX balance.\nARGS.add_argument(\n \"--contract-initial-clx-balance\",\n help=f\"Initial CLX balance of contract account. Default={constants.CONTRACT_INITIAL_CLX_BALANCE}\",\n dest=\"contract_initial_clx_balance\",\n type=int,\n default=constants.CONTRACT_INITIAL_CLX_BALANCE\n )\n\n# CLI argument: user accounts.\nARGS.add_argument(\n \"--user-accounts\",\n help=f\"Number of user accounts to generate. Default={constants.USER_ACCOUNTS}\",\n dest=\"user_accounts\",\n type=int,\n default=constants.USER_ACCOUNTS\n )\n\n# CLI argument: initial CLX balance.\nARGS.add_argument(\n \"--user-initial-clx-balance\",\n help=f\"Initial CLX balance of user accounts. Default={constants.USER_INITIAL_CLX_BALANCE}\",\n dest=\"user_initial_clx_balance\",\n type=int,\n default=constants.USER_INITIAL_CLX_BALANCE\n )\n\n\ndef main(args: argparse.Namespace):\n \"\"\"Entry point.\n \n \"\"\"\n # Import initialiser to setup upstream services / actors.\n import stests.initialiser\n\n # Unpack args.\n network_id = factory.create_network_id(args.network_name)\n node_id = factory.create_node_id(network_id, args.node_index)\n\n # Set execution context.\n ctx = factory.create_run_info(\n args=Arguments.create(args),\n loop_count=args.loop_count,\n loop_interval=args.loop_interval,\n network_id=network_id,\n node_id=node_id,\n run_index=args.run_index,\n run_type=constants.TYPE,\n use_stored_contracts=True\n )\n\n # Abort if a run lock cannot be acquired.\n if is_run_locked(ctx):\n logger.log_warning(f\"{constants.TYPE} :: run {args.run_index} aborted as it is currently executing.\")\n \n # Start run.\n else:\n from stests.orchestration.actors import do_run\n do_run.send(ctx)\n logger.log(f\"{constants.TYPE} :: run {args.run_index} started\")\n\n\n# Invoke entry point.\nmain(ARGS.parse_args())\n","sub_path":"stests/generators/wg_110/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"237737163","text":"# pylint: disable=C0103\n\nimport sys\nimport time\nfrom hashlib import sha1\nfrom datetime import datetime, timedelta\nfrom collections import defaultdict\n\nfrom . import database, version, archiver_listeners\nfrom .configs import Config\n\nconfig = Config()\n\nSUPPORTED_TIMESTAMP_FORMATS = (\n \"%Y%m%d %H:%M:%S.%f\",\n \"%Y-%m-%d %H:%M:%S.%fZ\",\n \"%Y-%m-%d %H:%M:%S.%f\",\n \"%Y-%m-%d %H:%M:%SZ\",\n \"%Y-%m-%d %H:%M:%S\",\n \"%Y%m%dT%H:%M:%S.%f\",\n \"%Y-%m-%dT%H:%M:%S.%f\",\n \"%Y-%m-%dT%H:%M:%S.%fZ\",\n \"%Y-%m-%dT%H:%M:%S.%f\",\n \"%Y-%m-%dT%H:%M:%SZ\",\n \"%Y-%m-%dT%H:%M:%S\",\n \"%Y-%m-%dT%H:%M:%S.%f%z\",\n )\n\n\nclass TimeAdjust:\n def __init__(self, secs, adjust_to_system):\n self._time_adjust_secs = secs\n self._adjust_to_system = adjust_to_system\n\n def secs(self):\n secs = self._time_adjust_secs\n if self._adjust_to_system:\n if time.daylight != 0 and time.localtime().tm_isdst:\n secs = secs + time.altzone\n else:\n secs = secs + time.timezone\n return secs\n\n\nclass TestItem:\n def __init__(self, archiver):\n self.archiver = archiver\n\n def parent_suite(self):\n for item in reversed(self.archiver.stack):\n if isinstance(item, Suite):\n return item\n return None\n\n def parent_test(self):\n for item in reversed(self.archiver.stack):\n if isinstance(item, Test):\n return item\n return None\n\n def _parent_item(self):\n return self.archiver.stack[-1] if self.archiver.stack else None\n\n def test_run_id(self):\n return self.archiver.test_run_id\n\n\nclass FingerprintedItem(TestItem):\n def __init__(self, archiver, name, class_name=None):\n super(FingerprintedItem, self).__init__(archiver)\n self.name = name\n self.parent_item = self._parent_item()\n if class_name:\n self.full_name = '.'.join([class_name, name])\n elif not self.parent_item or not self.parent_item.full_name:\n self.full_name = self.name\n else:\n parent_prefix = self.parent_item.full_name + '.' if self.parent_item else ''\n self.full_name = parent_prefix + self.name\n self.id = None\n\n self.status = None\n self.setup_status = None\n self.execution_status = None\n self.teardown_status = None\n self.failed_by_teardown = False\n\n self.start_time = None\n self.end_time = None\n self.elapsed_time = None\n self.elapsed_time_setup = None\n self.elapsed_time_execution = None\n self.elapsed_time_teardown = None\n self.critical = None\n\n self.kw_type = None\n self.kw_call_depth = 0\n self.library = None\n self.arguments = []\n self.tags = []\n self.metadata = {}\n self._last_metadata_name = None\n\n self.child_test_ids = []\n self.child_suite_ids = []\n\n self.subtree_fingerprints = []\n self.subtree_statuses = []\n self.fingerprint = None\n self.setup_fingerprint = None\n self.execution_fingerprint = None\n self.teardown_fingerprint = None\n\n self._execution_path = None\n self._child_counters = defaultdict(lambda: 0)\n\n def insert_results(self):\n raise NotImplementedError()\n\n def update_status(self, status, start_time, end_time, elapsed=None, critical=None):\n if status == 'NOT_RUN':\n # If some keyword is not executed the execution was a dryrun\n self.archiver.output_from_dryrun = True\n self.status = status\n self.start_time = start_time\n self.end_time = end_time\n if self.start_time and self.end_time:\n start = adjusted_timestamp_to_datetime(self.start_time, self.archiver.time_adjust.secs())\n end = adjusted_timestamp_to_datetime(self.end_time, self.archiver.time_adjust.secs())\n self.elapsed_time = int((end - start).total_seconds()*1000)\n elif elapsed is not None:\n self.elapsed_time = elapsed\n self.critical = critical\n\n def _hashing_name(self):\n return self.full_name\n\n def finish(self):\n self.execution_path() # Make sure this is called before exiting any item\n self.handle_child_statuses()\n if not self.status:\n if self.execution_status:\n self.status = self.execution_status\n else:\n self.status = 'PASS'\n if not self.elapsed_time:\n self.elapsed_time = (self.elapsed_time_setup if self.elapsed_time_setup else 0\n + self.elapsed_time_execution if self.elapsed_time_execution else 0\n + self.elapsed_time_teardown if self.elapsed_time_teardown else 0)\n self.calculate_fingerprints()\n self.propagate_fingerprints_status_and_elapsed_time()\n self.insert_results()\n\n def calculate_fingerprints(self):\n \"\"\"Calculate identification fingerprints using sha1 hashing.\"\"\"\n # sha1 is not considered secure anymore but in this use case\n # it is not used for any security functionality.\n # sha1() lines marked nosec for Bandit linter to ignore.\n\n if self.subtree_fingerprints:\n execution = sha1() # nosec\n for child in self.subtree_fingerprints:\n execution.update(child.encode('utf-8'))\n self.execution_fingerprint = execution.hexdigest()\n\n fingerprint = sha1() # nosec\n fingerprint.update(self._hashing_name().encode('utf-8'))\n fingerprint.update(str(self.setup_fingerprint).encode('utf-8'))\n fingerprint.update(str(self.execution_fingerprint).encode('utf-8'))\n fingerprint.update(str(self.teardown_fingerprint).encode('utf-8'))\n fingerprint.update(str(self.status).encode('utf-8'))\n fingerprint.update(str(self.arguments).encode('utf-8'))\n self.fingerprint = fingerprint.hexdigest()\n\n def handle_child_statuses(self):\n if self.subtree_statuses:\n if 'FAIL' in self.subtree_statuses:\n # Single child failure will fail the execution\n self.execution_status = 'FAIL'\n elif 'PASS' in self.subtree_statuses:\n # Single passing child execution and item is not considered to be skipped\n self.execution_status = 'PASS'\n else:\n self.execution_status = 'SKIPPED'\n\n def propagate_fingerprints_status_and_elapsed_time(self):\n if self.kw_type == 'setup':\n self.parent_item.setup_fingerprint = self.fingerprint\n self.parent_item.setup_status = self.status\n self.parent_item.elapsed_time_setup = self.elapsed_time\n elif self.kw_type == 'teardown':\n self.parent_item.teardown_fingerprint = self.fingerprint\n self.parent_item.teardown_status = self.status\n self.parent_item.elapsed_time_teardown = self.elapsed_time\n else:\n if self.parent_item:\n self.parent_item.subtree_fingerprints.append(self.fingerprint)\n self.parent_item.subtree_statuses.append(self.status)\n if self.elapsed_time:\n if self.parent_item.elapsed_time_execution:\n self.parent_item.elapsed_time_execution += self.elapsed_time\n else:\n self.parent_item.elapsed_time_execution = self.elapsed_time\n\n def status_and_fingerprint_values(self):\n return {'status': self.status,\n 'setup_status': self.setup_status,\n 'execution_status': self.execution_status,\n 'teardown_status': self.teardown_status,\n 'start_time': adjusted_timestamp(self.start_time, self.archiver.time_adjust.secs())\n if self.start_time else None,\n 'elapsed': self.elapsed_time,\n 'setup_elapsed': self.elapsed_time_setup,\n 'execution_elapsed': self.elapsed_time_execution,\n 'teardown_elapsed': self.elapsed_time_teardown,\n 'fingerprint': self.fingerprint,\n 'setup_fingerprint': self.setup_fingerprint,\n 'execution_fingerprint': self.execution_fingerprint,\n 'teardown_fingerprint': self.teardown_fingerprint}\n\n def fail_children(self):\n for suite_id in self.child_suite_ids:\n key_values = {'suite_id': suite_id, 'test_run_id': self.test_run_id()}\n self.archiver.db.update('suite_result', {'status': 'FAIL'}, key_values)\n for test_id in self.child_test_ids:\n key_values = {'test_id': test_id, 'test_run_id': self.test_run_id()}\n self.archiver.db.update('test_result', {'status': 'FAIL'}, key_values)\n\n def set_execution_path(self, execution_path):\n self._execution_path = execution_path\n\n @staticmethod\n def _execution_path_identifier():\n return ''\n\n def child_counter(self, execution_path_identifier):\n self._child_counters[execution_path_identifier] += 1\n return self._child_counters[execution_path_identifier]\n\n def execution_path(self):\n if not self._execution_path:\n identifier = self._execution_path_identifier()\n if self.parent_item:\n identifier += str(self.parent_item.child_counter(self._execution_path_identifier()))\n else:\n identifier += '1'\n if self.parent_item and self.parent_item.execution_path():\n self._execution_path = (self.parent_item.execution_path() + '-' + identifier)\n else:\n self._execution_path = identifier\n return self._execution_path\n\n\nclass TestRun(FingerprintedItem):\n def __init__(self, archiver, archived_using, generated, generator, rpa, dryrun):\n super(TestRun, self).__init__(archiver, '')\n data = {'archived_using': archived_using,\n 'archiver_version': version.ARCHIVER_VERSION,\n 'generated': adjusted_timestamp(generated, self.archiver.time_adjust.secs())\n if generated else None,\n 'generator': generator,\n 'rpa': rpa,\n 'dryrun': dryrun,\n 'schema_version': self.archiver.db.current_schema_version()}\n try:\n self.id = self.archiver.db.insert_and_return_id('test_run', data)\n except database.IntegrityError:\n raise database.IntegrityError(\n 'ERROR: Unable to insert results. Probably the test archive schema is not '\n 'compatible with the version of TestArchiver you are using. '\n 'Consider updating to 2.0 or later.')\n\n def execution_path(self):\n return ''\n\n def insert_results(self):\n raise NotImplementedError()\n\n\nclass Suite(FingerprintedItem):\n def __init__(self, archiver, name, repository):\n super(Suite, self).__init__(archiver, name)\n data = {'full_name': self.full_name, 'name': name, 'repository': repository}\n self.id = self.archiver.db.return_id_or_insert_and_return_id('suite', data,\n ['repository', 'full_name'])\n\n @staticmethod\n def _execution_path_identifier():\n return 's'\n\n def insert_results(self):\n data = {'suite_id': self.id, 'test_run_id': self.test_run_id(),\n 'execution_path': self.execution_path()}\n data.update(self.status_and_fingerprint_values())\n if self.id not in self.parent_item.child_suite_ids:\n try:\n self.archiver.db.insert('suite_result', data)\n except database.IntegrityError:\n print(\"ERROR: database.IntegrityError: these results have already been archived!\")\n sys.exit(1)\n self.insert_metadata()\n if self.failed_by_teardown:\n self.fail_children()\n if self.parent_item:\n self.parent_item.child_suite_ids.append(self.id)\n self.parent_item.child_suite_ids.extend(self.child_suite_ids)\n self.parent_item.child_test_ids.extend(self.child_test_ids)\n\n else:\n print(\"WARNING: duplicate results for suite '{}' are ignored\".format(self.full_name))\n\n def insert_metadata(self):\n # If the top suite add/override metadata with metadata given to archiver\n if isinstance(self.parent_item, TestRun):\n if self.archiver.additional_metadata:\n for name in self.archiver.additional_metadata:\n self.metadata[name] = self.archiver.additional_metadata[name]\n if self.archiver.config.time_adjust_secs != 0:\n self.metadata[\"time_adjust_secs\"] = self.archiver.config.time_adjust_secs\n if self.archiver.config.time_adjust_with_system_timezone:\n self.metadata[\"time_adjust_secs_total\"] = self.archiver.time_adjust.secs()\n\n for name in self.metadata:\n content = self.metadata[name]\n data = {'name': name, 'value': content,\n 'suite_id': self.id, 'test_run_id': self.test_run_id()}\n self.archiver.db.insert('suite_metadata', data)\n if name.startswith('series'):\n if '#' in content:\n series_name, build_number = content.split('#')\n else:\n series_name, build_number = content, None\n self.archiver.test_series[series_name] = build_number\n elif name == 'team':\n self.archiver.team = content\n\n def register_metadata(self, name=None, value=None):\n if name:\n self._last_metadata_name = name\n if value:\n self.metadata[self._last_metadata_name] = value\n\n\nclass Test(FingerprintedItem):\n def __init__(self, archiver, name, class_name):\n super(Test, self).__init__(archiver, name, class_name)\n data = {'full_name': self.full_name, 'name': name, 'suite_id': self.parent_item.id}\n self.id = self.archiver.db.return_id_or_insert_and_return_id('test_case', data,\n ['suite_id', 'full_name'])\n\n @staticmethod\n def _execution_path_identifier():\n return 't'\n\n def insert_results(self):\n if self.id not in self.parent_item.child_test_ids:\n data = {'test_id': self.id, 'test_run_id': self.test_run_id(), 'critical': self.critical,\n 'execution_path': self.execution_path()}\n data.update(self.status_and_fingerprint_values())\n self.archiver.db.insert('test_result', data)\n if self.subtree_fingerprints and self.archiver.config.archive_keywords:\n data = {'fingerprint': self.execution_fingerprint, 'keyword': None, 'library': None,\n 'status': self.execution_status, 'arguments': self.arguments}\n self.archiver.db.insert_or_ignore('keyword_tree', data, ['fingerprint'])\n if self.archiver.config.archive_keywords:\n self.insert_subtrees()\n self.insert_tags()\n self.parent_item.child_test_ids.append(self.id)\n else:\n print(\"WARNING: duplicate results for test '{}' are ignored\".format(self.full_name))\n\n def insert_tags(self):\n for tag in self.tags:\n data = {'tag': tag, 'test_id': self.id, 'test_run_id': self.test_run_id()}\n self.archiver.db.insert('test_tag', data)\n\n def insert_subtrees(self):\n call_index = 0\n for subtree in self.subtree_fingerprints:\n data = {'fingerprint': self.execution_fingerprint,\n 'subtree': subtree, 'call_index': call_index}\n key_values = ['fingerprint', 'subtree', 'call_index']\n self.archiver.db.insert_or_ignore('tree_hierarchy', data, key_values)\n call_index += 1\n\n\nclass Keyword(FingerprintedItem):\n def __init__(self, archiver, name, library, kw_type, arguments):\n super(Keyword, self).__init__(archiver, name)\n self.library = library\n self.kw_type = kw_type\n self.kw_call_depth = self.parent_item.kw_call_depth + 1\n if arguments:\n self.arguments.extend(arguments)\n\n @staticmethod\n def _execution_path_identifier():\n return 'k'\n\n def insert_results(self):\n if self.kw_type == 'teardown' and self.status == 'FAIL':\n self.parent_item.failed_by_teardown = True\n if self.archiver.config.archive_keywords:\n data = {'fingerprint': self.fingerprint, 'keyword': self.name, 'library': self.library,\n 'status': self.status, 'arguments': self.arguments}\n self.archiver.db.insert_or_ignore('keyword_tree', data, ['fingerprint'])\n self.insert_subtrees()\n if self.archiver.config.archive_keyword_statistics:\n self.update_statistics()\n\n def insert_subtrees(self):\n call_index = 0\n for subtree in self.subtree_fingerprints:\n data = {'fingerprint': self.fingerprint, 'subtree': subtree, 'call_index': call_index}\n key_values = ['fingerprint', 'subtree', 'call_index']\n self.archiver.db.insert_or_ignore('tree_hierarchy', data, key_values)\n call_index += 1\n\n def _hashing_name(self):\n return self.library + '.' + self.name\n\n def update_statistics(self):\n if self.fingerprint in self.archiver.keyword_statistics:\n stat_object = self.archiver.keyword_statistics[self.fingerprint]\n stat_object['calls'] += 1\n if self.elapsed_time:\n if stat_object['max_execution_time'] is None:\n stat_object['max_execution_time'] = self.elapsed_time\n else:\n stat_object['max_execution_time'] = max(stat_object['max_execution_time'],\n self.elapsed_time)\n if stat_object['min_execution_time'] is None:\n stat_object['min_execution_time'] = self.elapsed_time\n else:\n stat_object['min_execution_time'] = min(stat_object['min_execution_time'],\n self.elapsed_time)\n if stat_object['cumulative_execution_time'] is None:\n stat_object['cumulative_execution_time'] = self.elapsed_time\n else:\n stat_object['cumulative_execution_time'] += self.elapsed_time\n stat_object['max_call_depth'] = max(stat_object['max_call_depth'], self.kw_call_depth)\n else:\n self.archiver.keyword_statistics[self.fingerprint] = {\n 'fingerprint': self.fingerprint,\n 'test_run_id': self.test_run_id(),\n 'calls': 1,\n 'max_execution_time': self.elapsed_time,\n 'min_execution_time': self.elapsed_time,\n 'cumulative_execution_time': self.elapsed_time,\n 'max_call_depth': self.kw_call_depth,\n }\n\n\nclass LogMessage(TestItem):\n def __init__(self, archiver, log_level, timestamp):\n super(LogMessage, self).__init__(archiver)\n self.parent_item = self._parent_item()\n self.log_level = log_level\n self.timestamp = timestamp\n self.id = None\n self._time_adjust = TimeAdjust(archiver.config.time_adjust_secs,\n archiver.config.time_adjust_with_system_timezone)\n\n def insert(self, content):\n if (not self.archiver.config.ignore_logs and\n not self.archiver.config.log_level_ignored(self.log_level)):\n message_length = config.max_log_message_length\n\n if message_length < 0:\n message = content[message_length:]\n elif message_length > 0:\n message = content[:message_length]\n else:\n message = content\n data = {'test_run_id': self.test_run_id(),\n 'timestamp': adjusted_timestamp(self.timestamp, self.archiver.time_adjust.secs()),\n 'log_level': self.log_level,\n 'message': message,\n 'test_id': self.parent_test().id if self.parent_test() else None,\n 'suite_id': self.parent_suite().id,\n 'execution_path': self.execution_path()}\n self.id = self.archiver.db.insert('log_message', data)\n\n def execution_path(self):\n return self.parent_item.execution_path()\n\n\ndef database_connection(configuration):\n return database.get_connection_and_check_schema(configuration)\n\n\nclass Archiver:\n def __init__(self, connection, configuration, build_number_cache=None):\n self.config = configuration\n self.test_type = None\n self.additional_metadata = self.config.metadata\n self.test_run_id = None\n self.test_series = {}\n self.team = self.config.team\n self.series = self.config.series\n self.repository = self.config.repository\n\n self.archived_using = None\n self.output_from_dryrun = False\n self.db = connection\n self.stack = []\n self.keyword_statistics = {}\n self.build_number_cache = build_number_cache or {}\n self.execution_context = self.config.execution_context\n self.changes = self.config.changes\n self.execution_id = self.config.execution_id\n\n self.time_adjust = TimeAdjust(self.config.time_adjust_secs,\n self.config.time_adjust_with_system_timezone)\n\n self.listeners = []\n if self.config.change_engine_url:\n self.listeners.append(\n archiver_listeners.ChangeEngineListener(self, self.config.change_engine_url))\n\n def current_item(self, expected_type=None):\n item = self.stack[-1] if self.stack else None\n if expected_type:\n if not isinstance(item, expected_type):\n print(\"PARSING ERROR - printing current stack:\")\n for item in self.stack:\n print(item.__class__.__name__)\n raise Exception(\"Expected to have '{}' but had '{}' currently in stack\".format(\n expected_type,\n item.__class__.__name__))\n return item\n\n def current_item_is_keyword(self):\n if isinstance(self.current_item(), Keyword):\n return True\n return False\n\n def current_item_is_test(self):\n if isinstance(self.current_item(), Test):\n return True\n return False\n\n def current_item_is_suite(self):\n if isinstance(self.current_item(), Suite):\n return True\n return False\n\n def current_suite(self):\n if self.current_item():\n return self.current_item().parent_suite()\n return None\n\n def current_suites(self):\n return [item for item in self.stack if isinstance(item, Suite)]\n\n def current_keyword(self):\n keyword = self.current_item(Keyword)\n return keyword\n\n def begin_test_run(self, archived_using, generated, generator, rpa, dryrun):\n test_run = TestRun(self, archived_using, generated, generator, rpa, dryrun)\n self.archived_using = archived_using\n self.test_run_id = test_run.id\n self.stack.append(test_run)\n\n def update_dryrun_status(self):\n data = {'dryrun': self.output_from_dryrun}\n self.db.update('test_run', data, {'id': self.test_run_id})\n\n def end_test_run(self):\n for content in self.config.series:\n if '#' in content:\n series_name, build_number = content.split('#')\n else:\n series_name, build_number = content, None\n self.test_series[series_name] = build_number\n for name in self.test_series:\n self.report_series(name, self.test_series[name])\n if not self.test_series:\n self.report_series('default series', None)\n self.report_series('All builds', None)\n if self.config.archive_keywords and self.config.archive_keyword_statistics:\n self.report_keyword_statistics()\n\n self.db.commit()\n for listener in self.listeners:\n listener.end_run()\n\n return self.build_number_cache\n\n def report_series(self, name, build_id):\n data = {'team': self.team if self.team else 'No team',\n 'name': name}\n series_id = self.db.return_id_or_insert_and_return_id('test_series', data, ['team', 'name'])\n if build_id:\n try:\n build_number = int(build_id)\n except ValueError:\n build_number = self._build_number_by_id(series_id, build_id)\n else:\n if series_id in self.build_number_cache:\n build_number = self.build_number_cache[series_id]\n else:\n previous_build_number = self.db.max_value('test_series_mapping', 'build_number',\n {'series': series_id})\n build_number = previous_build_number + 1 if previous_build_number else 1\n self.build_number_cache[series_id] = build_number\n data = {\n 'series': series_id,\n 'test_run_id': self.test_run_id,\n 'build_number': build_number,\n 'build_id': build_id,\n }\n self.db.insert('test_series_mapping', data)\n\n def _build_number_by_id(self, series_id, build_id):\n build_number = self.db.fetch_one_value('test_series_mapping', 'build_number',\n {'build_id': build_id, 'series': series_id})\n if not build_number:\n previous_build_number = self.db.max_value('test_series_mapping', 'build_number',\n {'series': series_id})\n build_number = previous_build_number + 1 if previous_build_number else 1\n return build_number\n\n def begin_suite(self, name, execution_path=None):\n suite = Suite(self, name, 'repo')\n suite.set_execution_path(execution_path)\n self.stack.append(suite)\n return suite\n\n def end_suite(self, attributes=None):\n if attributes:\n self.current_item(Suite).update_status(attributes['status'], attributes['starttime'],\n attributes['endtime'])\n self.current_item(Suite).metadata = attributes['metadata']\n self.current_item(Suite).finish()\n suite = self.stack.pop()\n for listener in self.listeners:\n listener.suite_result(suite)\n\n def begin_test(self, name, class_name=None, execution_path=None):\n test = Test(self, name, class_name)\n test.set_execution_path(execution_path)\n self.stack.append(test)\n return test\n\n def end_test(self, attributes=None):\n if attributes:\n critical = attributes['critical'] == 'yes' if 'critical' in attributes else None\n self.current_item(Test).update_status(attributes['status'], attributes['starttime'],\n attributes['endtime'], critical=critical)\n self.current_item(Test).tags = attributes['tags']\n self.current_item(Test).finish()\n test = self.stack.pop()\n for listener in self.listeners:\n listener.test_result(test)\n\n def begin_status(self, status, start_time=None, end_time=None, elapsed=None, critical=None):\n self.current_item().update_status(status, start_time, end_time, elapsed, critical)\n\n def update_status(self, status):\n self.current_item().status = status\n\n def begin_keyword(self, name, library, kw_type, arguments=None):\n keyword = Keyword(self, name, library, kw_type.lower(), arguments)\n self.stack.append(keyword)\n return keyword\n\n def end_keyword(self, attributes=None):\n if attributes:\n self.current_item(Keyword).update_status(attributes['status'], attributes['starttime'],\n attributes['endtime'])\n self.current_item(Keyword).finish()\n self.stack.pop()\n\n def keyword(self, name, library, kw_type, status, arguments=None):\n keyword = self.begin_keyword(name, library, kw_type, arguments)\n self.update_status(status)\n self.end_keyword()\n return keyword\n\n def update_arguments(self, argument):\n self.current_item(Keyword).arguments.append(argument)\n\n def update_tags(self, tag):\n self.current_item(Test).tags.append(tag)\n\n def metadata(self, name, content):\n self.begin_metadata(name)\n self.end_metadata(content)\n\n def begin_metadata(self, name):\n self.current_item(Suite).register_metadata(name=name)\n\n def end_metadata(self, content):\n self.current_item(Suite).register_metadata(value=content)\n\n def log_message(self, level, content, timestamp=None):\n self.begin_log_message(level, timestamp)\n self.end_log_message(content)\n\n def begin_log_message(self, level, timestamp=None):\n self.stack.append(LogMessage(self, level, timestamp))\n\n def end_log_message(self, content):\n self.current_item(LogMessage).insert(content)\n self.stack.pop()\n\n def report_keyword_statistics(self):\n for fingerprint in self.keyword_statistics:\n self.db.insert('keyword_statistics', self.keyword_statistics[fingerprint])\n\n\ndef timestamp_to_datetime(timestamp):\n for timestamp_format in SUPPORTED_TIMESTAMP_FORMATS:\n try:\n parsed_datetime = datetime.strptime(timestamp, timestamp_format)\n return parsed_datetime\n except ValueError:\n pass\n raise Exception(\"timestamp: '{}' is in unsupported format\".format(timestamp))\n\n\ndef adjusted_timestamp_to_datetime(timestamp, time_adjust_secs=0):\n adjusted_datetime = timestamp_to_datetime(timestamp)\n adjustment = abs(time_adjust_secs)\n if time_adjust_secs > 0:\n adjusted_datetime = adjusted_datetime + timedelta(seconds=adjustment)\n elif time_adjust_secs < 0:\n adjusted_datetime = adjusted_datetime - timedelta(seconds=adjustment)\n return adjusted_datetime\n\n\ndef adjusted_timestamp(timestamp, time_adjust_secs=0):\n adjusted_stamp = timestamp\n if timestamp and time_adjust_secs != 0:\n adjusted_datetime = adjusted_timestamp_to_datetime(timestamp, time_adjust_secs)\n adjusted_stamp = adjusted_datetime.isoformat(timespec='milliseconds')\n return adjusted_stamp\n","sub_path":"test_archiver/archiver.py","file_name":"archiver.py","file_ext":"py","file_size_in_byte":30819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"640507303","text":"from numpy.random import seed\nseed(5393)\nfrom tensorflow import set_random_seed\nset_random_seed(12011)\n\nimport os\n\nimport numpy as np\nimport pandas as pd \nfrom scipy import sparse\n\nfrom sklearn.preprocessing import LabelEncoder, LabelBinarizer\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\n\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\n\nimport logging\nlogging.basicConfig(level = logging.INFO)\n\nEMBED_DIM = 300\nVOCAB_SIZE = 5000\nmax_len = 1000\nbatch_size = 16\nn_folds = 5\nfold_dir = \"/data/victor/violence-workshop/batches/reversefolds\"\ndata_pkl = \"../../data/dataframe_with_scores_withdoc2vec.pkl\"\n\ndef pad_csr(a, newshape):\n \"\"\" Pads csr_matrix with zeros. Modifies a inplace. \"\"\"\n n, m = a.shape\n a._shape = newshape\n a.indptr = np.pad(a.indptr, (0, newshape[0] - n), 'edge')\n\ndef filter_nans(seq):\n \"\"\" Filters out floats (np.nan) from list \"\"\"\n return np.array([x for x in seq if not isinstance(x, float)])\n\ndef pad_or_trim(seq, max_len=1000):\n \"\"\" Pads or trims seq to have max_len rows \"\"\"\n n, m = seq.shape\n \n if n > max_len:\n seq = seq[-max_len:, :]\n elif n < max_len:\n if sparse.issparse(seq):\n pad_csr(seq, (max_len, m))\n else:\n seq = np.r_[seq, np.zeros((max_len - n, m))]\n return seq\n\ndef process_ngrams(batch_features, ngram_features):\n \"\"\" Transform batch_features into tensor of dims:\n (n, max_len, #features) where n is len(batch_features)\"\"\"\n n = batch_features.shape[0]\n\n batch_features = batch_features.apply(ngram_features.transform)\\\n .apply(pad_or_trim)\n\n batch_features = sparse.vstack(batch_features)\n\n batch_features = batch_features.toarray()\\\n .reshape(n, max_len, -1)\n\n return batch_features\n\ndef process_scores(X):\n \"\"\" Transforms X into tensor of dims:\n (n, max_len, #features) where n is len(X).\n \n This is a special case of process for lists of scores\"\"\"\n batch_scores = X.apply(np.array)\\\n .apply(lambda x: x.reshape(-1, 1))\\\n .apply(pad_or_trim)\n\n batch_scores = np.concatenate(batch_scores.values, axis = 0)\\\n .reshape(-1, max_len, 1)\n\n return batch_scores\n\n\n############################################################\n# Load Data\n############################################################\ndata = pd.read_pickle(data_pkl)\n\n# Encode genre\nlb_genre = LabelEncoder()\ndata['genre'] = lb_genre.fit_transform(data['genre'])\n\n\n############################################################\n# 3 to 5 chars w/ spaces\n# unigrams + bigrams\n############################################################\n# This defines the analyzer to be used with Countvectorizer\ndef char_ngram_tokenizer(text, ngram_range):\n def aux(text, ngram_size):\n for i in range(len(text) - ngram_size):\n yield text[i : i + ngram_size]\n\n for n in range(*ngram_range):\n for ngram in aux(text, n):\n yield ngram\n \nngram_features = FeatureUnion([\n (\"char_ngrams\", CountVectorizer(analyzer = lambda text: char_ngram_tokenizer(text, ngram_range=(3, 6)),\n max_features = VOCAB_SIZE)),\n (\"token_ngrams\", CountVectorizer(ngram_range=(1, 2),\n max_features=VOCAB_SIZE))\n ])\n\ntfidf_ = TfidfVectorizer(ngram_range=(1, 2), max_features=VOCAB_SIZE)\n\n############################################################\n# Batch generation\n############################################################\ndef process(X, Y, i, ngram_features, batch_dir, tfidf_transformer = None):\n # Features\n ## ngrams\n #logging.info(\"ngrams\")\n #batch_ngrams = process_ngrams(X['sentences'].iloc[i : i + batch_size], ngram_features)\n #np.savez(os.path.join(batch_dir, \"{}_ngrams\".format(i)),\n # features = batch_ngrams)\n #batch_ngrams = None\n \n ## tfidf\n #logging.info(\"tfidf\")\n #batch_tfidf = process_ngrams(X['sentences'].iloc[i : i + batch_size], tfidf_transformer)\n #np.savez(os.path.join(batch_dir, \"{}_tfidf\".format(i)),\n # features = batch_tfidf)\n #batch_tfidf = None\n\n # ## Word2vec\n #logging.info(\"word2vec\")\n #batch_word2vec = X['word2vec_sent_mean_vec'].iloc[i : i + batch_size]\\\n # .apply(filter_nans)\\\n # .apply(pad_or_trim)\n #np.savez(os.path.join(batch_dir, \"{}_word2vec\".format(i)),\n # features = batch_word2vec)\n #batch_word2vec = None\n\n # paragraph2vec\n logging.info(\"paragraph2vec\")\n batch_paragraph2vec = X['doc2vec_vectors'].iloc[i : i + batch_size]\\\n\t\t\t\t\t .apply(filter_nans)\\\n\t\t\t\t\t .apply(pad_or_trim)\n np.savez(os.path.join(batch_dir, \"{}_doc2vec\".format(i)),\n features = batch_paragraph2vec)\n batch_paragraph2vec = None\n\n # ## Lexicons\n #logging.info(\"Empath\")\n #batch_empath = X['empath_sentence'].iloc[i : i + batch_size]\\\n # .apply(np.array)\\\n # .apply(pad_or_trim)\n #np.savez(os.path.join(batch_dir, \"{}_empath\".format(i)),\n # empath = batch_empath)\n #logging.info(\"Lexicons\")\n #batch_lexicon = process_scores(X['abusive_scores'].iloc[i : i + batch_size])\n #batch_vader = process_scores(X['vader_scores'].iloc[i : i + batch_size])\n #batch_afinn = process_scores(X['afinn_scores'].iloc[i : i + batch_size])\n #batch_hatebase = X['hatebase_sentence'].iloc[i : i + batch_size].apply(pad_or_trim)\n #np.savez(os.path.join(batch_dir, \"{}_lexicon\".format(i)),\n # abusive_scores = batch_lexicon,\n # vader = batch_vader,\n # afinn = batch_afinn,\n # hatebase = batch_hatebase)\n\n # batch_lexicon = None\n #batch_vader = None\n #batch_afinn = None\n #batch_hatebase = None\n\n ## Save labels\n #logging.info(\"Labels\")\n #batch_labels = Y[i : i + batch_size]\n #np.savez(os.path.join(batch_dir, \"{}_labels\".format(i)),\n # labels = batch_labels)\n\n\n ## Save metadata\n #logging.info(\"Metadata\")\n #batch_genre = X['genre'][i : i + batch_size]\n #np.savez(os.path.join(batch_dir, \"{}_meta\".format(i)),\n # genre = batch_genre)\n\n logging.info(\"Done for {}\".format(i))\n\n\nskf = StratifiedKFold(n_splits = n_folds, random_state = 42)\nlb = LabelBinarizer()\nY = lb.fit_transform(data['violence_rating'])\n\nfor k, (train, test) in enumerate(skf.split(data.violence_rating, data.violence_rating)):\n \n train_dir = os.path.join(fold_dir, str(k), \"train\")\n test_dir = os.path.join(fold_dir, str(k), \"test\")\n eval_dir = os.path.join(fold_dir, str(k), \"eval\")\n\n for t in [train_dir, test_dir, eval_dir]:\n os.makedirs(t, exist_ok = True)\n\n X_train, X_test = data.iloc[train], data.iloc[test]\n Y_train, Y_test = Y[train], Y[test]\n X_train, X_eval, Y_train, Y_eval = train_test_split(X_train, Y_train, test_size = 64, random_state = 666)\n\n # Fit vocab\n ngram_features.fit(data.iloc[train]['text'], Y_train)\n tfidf_.fit(data.iloc[train]['text'], Y_train)\n\n # Create batches\n for i in tqdm(range(0, X_train.shape[0], batch_size)):\n process(X_train, Y_train, i, ngram_features = ngram_features, batch_dir = train_dir, tfidf_transformer = tfidf_)\n\n for i in tqdm(range(0, X_eval.shape[0], batch_size)):\n process(X_eval, Y_eval, i, ngram_features = ngram_features, batch_dir = eval_dir, tfidf_transformer = tfidf_)\n\n for i in tqdm(range(0, X_test.shape[0], batch_size)):\n process(X_test, Y_test, i, ngram_features = ngram_features, batch_dir = test_dir, tfidf_transformer = tfidf_)\n","sub_path":"experiments/createReverseBatches.py","file_name":"createReverseBatches.py","file_ext":"py","file_size_in_byte":7828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"354369375","text":"import django_filters\nfrom User.models import PlayUser,Agent\nfrom rest_framework import serializers\n\nclass PlayUserFilter(django_filters.rest_framework.FilterSet):\n \"\"\"\n http://127.0.0.1:8000/Api/User/playuser_api_search/?RegDate_gte_after=2018-07-18&RegDate_gte_before=2018-07-20\n \"\"\"\n RegDate_gte = django_filters.DateFromToRangeFilter(field_name='RegDate', lookup_expr='gte')\n class Meta:\n model = PlayUser\n #fields = ['UserName', 'Proxy__UserName','BankCardNum','ActualName','RegDate_gte',]\n fields = \"__all__\"\n\n\nclass AgentFilter(django_filters.rest_framework.FilterSet):\n class Meta:\n model = Agent\n fields = \"__all__\"\n #fields = ['UserName','ActualName',]","sub_path":"User/utils/core/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"239563287","text":"\nimport random\nzwierzeta = [\"tygrys\", \"lew\", \"pantera\", \"slon\", \"zyrafa\", \"gepard\", \"hipopotam\", \"nietoperz\", \"sowa\"]\n\n#wyswietlenie 2 elementu od końca\n#print(zwierzeta[-2])\n#exit(1)\n\n\n\n\n\ndef wyswietlenieSlow(lista_slow, a):\n #for liczba in lista_slow[0:a]:\n # print(liczba)\n for liczba in range(0, a):\n print(random.choice(lista_slow))\n\nwhile True:\n try:\n a = (int(input(\"Podaj liczbe od 1 do 9: \")))\n if a in range(1,10):\n print(a)\n wyswietlenieSlow(zwierzeta, a)\n break\n else:\n print(\"wrong number\")\n except ValueError:\n print(\"this is not a number\")","sub_path":"zajecia3/praca domowa4.py","file_name":"praca domowa4.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"48552078","text":"import time\nimport argparse\n\nfrom env import Env\nfrom examples import *\n\n\nWAIT_TIME = 5\n\ndef human2human(env, wait=WAIT_TIME):\n env.gui.render()\n while True:\n board, is_win = env.human_step()\n\n if is_win == 1 or is_win == -1:\n time.sleep(wait)\n break\n\ndef human2ai(env, agent, first=True, wait=WAIT_TIME):\n env.gui.render()\n if first:\n env.human_step()\n\n while True:\n pos = agent.get_action(env)\n board, is_win = env.ai_step(pos)\n if is_win == 1 or is_win == -1:\n time.sleep(wait)\n break\n\n board, is_win = env.human_step()\n if is_win == 1 or is_win == -1:\n time.sleep(wait)\n break\n\ndef ai2ai(env, agent_1, agent_2, render=True, wait=WAIT_TIME):\n if render:\n env.gui.render()\n while True:\n\n pos_1 = agent_1.get_action(env)\n board, is_win = env.ai_step(pos_1, render)\n\n if is_win == 1 or is_win == -1:\n time.sleep(wait)\n break\n\n pos_2 = agent_2.get_action(env)\n board, is_win = env.step(pos_2)\n if is_win == 1 or is_win == -1:\n time.sleep(wait)\n break\n\n\nif __name__ == \"__main__\":\n # don't change the\n parser = argparse.ArgumentParser()\n parser.add_argument('-size', type=int, default=15)\n args = parser.parse_args()\n env = Env(size=args.size)\n\n agent = Agent()\n human2ai(env, agent)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"131614179","text":"# Copyright 2013, Sandia Corporation. Under the terms of Contract\n# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain\n# rights in this software.\n\nimport slycat.web.server.database.couchdb\nimport slycat.web.server.database.scidb\nimport slycat.web.server.worker\n\nimport cherrypy\nimport json\nimport random\nimport Queue\n\nclass prototype(slycat.web.server.worker.prototype):\n \"\"\"Worker that serves up rectangular table \"chunks\", for interactive browsing\n of giant tables.\"\"\"\n def __init__(self, security, name):\n slycat.web.server.worker.prototype.__init__(self, security, name)\n self.request = Queue.Queue()\n self.response = Queue.Queue()\n\n def get_table_chunker_metadata(self, arguments):\n \"\"\"Called to retrieve metadata describing the underlying table.\"\"\"\n self.request.put((\"metadata\", None))\n return self.response.get()\n\n def get_table_chunker_search(self, arguments):\n \"\"\"Called to search for a column value.\"\"\"\n try:\n search = [spec.split(\":\") for spec in arguments[\"query\"].split(\",\")]\n search = [(int(column), float(value)) for column, value in search]\n except:\n raise cherrypy.HTTPError(\"400 Malformed search argument must match :,...\")\n\n self.request.put((\"search\", search))\n return self.response.get()\n\n def get_table_chunker_chunk(self, arguments):\n \"\"\"Called to retrieve the given chunk. Note that the returned chunk may\n contain a subset of the requested data, or no data at all.\"\"\"\n try:\n rows = [spec.split(\"-\") for spec in arguments[\"rows\"].split(\",\")]\n rows = [(int(spec[0]), int(spec[1]) if len(spec) == 2 else int(spec[0]) + 1) for spec in rows]\n rows = [row for begin, end in rows for row in range(begin, end)]\n except:\n raise cherrypy.HTTPError(\"400 Malformed rows argument must be a comma separated collection of row indices or half-open index ranges.\")\n\n try:\n columns = [spec.split(\"-\") for spec in arguments[\"columns\"].split(\",\")]\n columns = [(int(spec[0]), int(spec[1]) if len(spec) == 2 else int(spec[0]) + 1) for spec in columns]\n columns = [column for begin, end in columns for column in range(begin, end)]\n except:\n raise cherrypy.HTTPError(\"400 Malformed columns argument must be a comma separated collection of column indices or half-open index ranges.\")\n\n self.request.put((\"chunk\", (rows, columns)))\n return self.response.get()\n\n def put_table_chunker_sort(self, arguments):\n \"\"\"Called to sort the underlying table. The sorted results will be returned\n by subsequent calls to search() and chunk().\"\"\"\n try:\n sort = [(int(column), order) for column, order in arguments[\"order\"]]\n except:\n raise cherrypy.HTTPError(\"400 Malformed order argument must match [[column, order],[column, order],...]\")\n\n for column, order in sort:\n if order != \"ascending\" and order != \"descending\":\n raise cherrypy.HTTPError(\"400 Sort-order must be 'ascending' or 'descending'.\")\n\n self.request.put((\"sort\", sort))\n return self.response.get()\n\n def work(self):\n self.preload()\n\n while not self.stopped:\n try:\n # Process the next request ...\n request, parameters = self.request.get(timeout=1)\n if request == \"chunk\":\n rows, columns = parameters\n\n # Constrain the request to 0 <= index along both dimensions\n rows = [row for row in rows if row >= 0]\n columns = [column for column in columns if column >= 0]\n response = self.get_chunk(rows, columns)\n\n elif request == \"metadata\":\n response = self.get_metadata()\n\n elif request == \"search\":\n search = [(column, value) for column, value in parameters if column >= 0]\n response = self.get_search(search)\n\n elif request == \"sort\":\n sort = [(column, order) for column, order in parameters if column >= 0]\n response = self.put_sort(sort)\n\n self.response.put(response)\n\n except Queue.Empty:\n pass\n\n def preload(self):\n \"\"\"Implement this in derivatives to do any pre-loading of data before entering the main chunk-retrieval loop.\"\"\"\n raise NotImplementedError()\n\n def get_metadata(self):\n \"\"\"Implement this in derivatives to return metadata describing the underlying data.\"\"\"\n raise NotImplementedError()\n\n def get_search(self, search):\n \"\"\"Implement this in derivatives to search the underlying data.\"\"\"\n raise NotImplementedError()\n\n def get_chunk(self, rows, columns):\n \"\"\"Implement this in derivatives to fetch the given chunk.\"\"\"\n raise NotImplementedError()\n\n def put_sort(self, sort):\n \"\"\"Implement this in derivatives to sort the underlying data.\"\"\"\n raise NotImplementedError()\n\nclass test(prototype):\n \"\"\"Table chunker that creates an arbitrary-size table containing random data for testing.\"\"\"\n def __init__(self, security, row_count, column_count, generate_index, seed=12345):\n prototype.__init__(self, security, \"chunker.table.test\")\n self.row_count = row_count\n self.column_count = column_count\n self.seed = seed\n self.generate_index = generate_index\n\n def preload(self):\n generator = random.Random()\n generator.seed(self.seed)\n self.column_names = [\"column-%s\" % column for column in range(self.column_count)]\n self.column_types = [\"double\" for column in range(self.column_count)]\n self.columns = [[generator.randint(0, self.row_count / 10) for row in range(self.row_count)] for column in range(self.column_count)]\n\n if self.generate_index is not None:\n self.column_count += 1\n self.column_names.append(self.generate_index)\n self.column_types.append(\"int64\")\n self.columns.append(range(self.row_count))\n\n self.sort_index = range(self.row_count)\n self.sort_indices = {() : self.sort_index}\n self.set_message(\"Using %s x %s test data.\" % (self.row_count, self.column_count))\n\n def get_metadata(self):\n response = {\n \"row-count\" : self.row_count,\n \"column-count\" : self.column_count,\n \"column-names\" : self.column_names,\n \"column-types\" : self.column_types,\n \"column-min\" : [min(column) if len(column) else None for column in self.columns],\n \"column-max\" : [max(column) if len(column) else None for column in self.columns]\n }\n return response\n\n def get_search(self, search):\n search = [(column, value) for column, value in search if column < self.column_count]\n\n response = {\n \"search\" : search,\n \"matches\" : [[row for row in range(self.row_count) if self.columns[column][self.sort_index[row]] == value] for column, value in search]\n }\n\n return response\n\n def get_chunk(self, rows, columns):\n # Constrain end <= count along both dimensions\n rows = [row for row in rows if row < self.row_count]\n columns = [column for column in columns if column < self.column_count]\n\n response = {\n \"rows\" : rows,\n \"columns\" : columns,\n \"column-names\" : [\"column-%s\" % column for column in columns],\n \"data\" : [[self.columns[column][self.sort_index[row]] for row in rows] for column in columns],\n }\n return response\n\n def put_sort(self, sort):\n sort = [(column, order) for column, order in sort if column < self.column_count]\n sort_index_key = tuple(sort)\n\n if sort_index_key not in self.sort_indices:\n index = range(self.row_count)\n for column, order in reversed(sort):\n index = sorted(index, key=lambda x: self.columns[column][x], reverse = (False if order == \"ascending\" else True))\n self.sort_indices[sort_index_key] = index\n self.sort_index = self.sort_indices[sort_index_key]\n\n response = {\n \"sort\" : sort\n }\n return response\n\nclass file(prototype):\n \"\"\"Table chunker that returns data from a database \"file\" (typically, a model artifact).\"\"\"\n def __init__(self, security, mid, fid, generate_index):\n prototype.__init__(self, security, \"chunker.table.file\")\n self.mid = mid\n self.fid = fid\n self.column_names = None\n self.generate_index = generate_index\n\n def preload(self):\n database = slycat.web.server.database.couchdb.connect()\n data = json.load(database.get_attachment(self.mid, self.fid))\n\n if isinstance(data, dict):\n # Treat the data as a table perspective ...\n if \"type\" in data and data[\"type\"] == \"table\":\n self.column_names = data[\"column-names\"]\n self.column_types = data[\"column-types\"]\n self.column_count = data[\"column-count\"]\n self.row_count = data[\"row-count\"]\n self.columns = data[\"columns\"]\n # Treat the data as a dict containing name : [column] pairs ...\n else:\n # By default use every column, sorted by name ...\n self.column_names = sorted(data.keys())\n self.column_types = [\"string\" for name in self.column_names]\n self.column_count = len(self.column_names)\n self.row_count = min([len(data[name]) for name in self.column_names])\n self.columns = [data[name] for name in self.column_names]\n else:\n raise Exception(\"Can't parse data into a table.\")\n\n if self.generate_index is not None:\n self.column_count += 1\n self.column_names.append(self.generate_index)\n self.column_types.append(\"int64\")\n self.columns.append(range(self.row_count))\n\n self.sort_index = range(self.row_count)\n self.sort_indices = {() : self.sort_index}\n self.set_message(\"Loaded %s x %s file.\" % (self.row_count, self.column_count))\n\n def get_metadata(self):\n response = {\n \"row-count\" : self.row_count,\n \"column-count\" : self.column_count,\n \"column-names\" : self.column_names,\n \"column-types\" : self.column_types,\n \"column-min\" : [min(column) if len(column) else None for column in self.columns],\n \"column-max\" : [max(column) if len(column) else None for column in self.columns]\n }\n return response\n\n def get_search(self, search):\n search = [(column, value) for column, value in search if column < self.column_count]\n\n response = {\n \"search\" : search,\n \"matches\" : [[row for row in range(self.row_count) if self.columns[column][self.sort_index[row]] == value] for column, value in search]\n }\n\n return response\n\n def get_chunk(self, rows, columns):\n # Constrain end <= count along both dimensions\n rows = [row for row in rows if row < self.row_count]\n columns = [column for column in columns if column < self.column_count]\n\n response = {\n \"rows\" : rows,\n \"columns\" : columns,\n \"column-names\" : [self.column_names[column] for column in columns],\n \"data\" : [[self.columns[column][self.sort_index[row]] for row in rows] for column in columns]\n }\n\n return response\n\n def put_sort(self, sort):\n sort = [(column, order) for column, order in sort if column < self.column_count]\n sort_index_key = tuple(sort)\n\n if sort_index_key not in self.sort_indices:\n index = range(self.row_count)\n for column, order in reversed(sort):\n index = sorted(index, key=lambda x: self.columns[column][x], reverse = (False if order == \"ascending\" else True))\n self.sort_indices[sort_index_key] = index\n self.sort_index = self.sort_indices[sort_index_key]\n\n response = {\n \"sort\" : sort\n }\n return response\n\nclass artifact(prototype):\n \"\"\"Table chunker that returns data from a database \"file\" (typically, a model artifact).\"\"\"\n def __init__(self, security, model, artifact, generate_index):\n prototype.__init__(self, security, \"chunker.table.artifact\")\n self.model = model\n self.artifact = model[\"artifact:%s\" % artifact]\n self.generate_index = generate_index\n\n def preload(self):\n database = slycat.web.server.database.scidb.connect()\n\n columns = self.artifact[\"columns\"]\n column_names = self.artifact[\"column-names\"]\n\n with database.query(\"aql\", \"select name from %s\" % column_names) as results:\n self.column_names = [value.getString() for attribute in results for value in attribute]\n self.column_count = len(self.column_names)\n\n with database.query(\"aql\", \"select type_id from attributes(%s)\" % columns) as results:\n self.column_types = [value.getString() for attribute in results for value in attribute]\n\n low = database.query_value(\"aql\", \"select low from dimensions(%s)\" % columns).getInt64()\n high = database.query_value(\"aql\", \"select high from dimensions(%s)\" % columns).getInt64()\n self.row_count = high + 1 if high >= low else 0\n\n self.columns = []\n with database.query(\"aql\", \"select * from %s\" % columns) as results:\n attributes = iter(results)\n for column_type in self.column_types:\n if column_type == \"string\":\n self.columns.append([value.getString() for value in attributes.next()])\n elif column_type == \"double\":\n self.columns.append([value.getDouble() for value in attributes.next()])\n else:\n self.columns.append([None] * self.row_count)\n\n if self.generate_index is not None:\n self.column_count += 1\n self.column_names.append(self.generate_index)\n self.column_types.append(\"int64\")\n self.columns.append(range(self.row_count))\n\n self.sort_index = range(self.row_count)\n self.sort_indices = {() : self.sort_index}\n self.set_message(\"Loaded %s x %s file.\" % (self.row_count, self.column_count))\n\n def get_metadata(self):\n response = {\n \"row-count\" : self.row_count,\n \"column-count\" : self.column_count,\n \"column-names\" : self.column_names,\n \"column-types\" : self.column_types,\n \"column-min\" : [min(column) if len(column) else None for column in self.columns],\n \"column-max\" : [max(column) if len(column) else None for column in self.columns]\n }\n return response\n\n def get_search(self, search):\n search = [(column, value) for column, value in search if column < self.column_count]\n\n response = {\n \"search\" : search,\n \"matches\" : [[row for row in range(self.row_count) if self.columns[column][self.sort_index[row]] == value] for column, value in search]\n }\n\n return response\n\n def get_chunk(self, rows, columns):\n # Constrain end <= count along both dimensions\n rows = [row for row in rows if row < self.row_count]\n columns = [column for column in columns if column < self.column_count]\n\n response = {\n \"rows\" : rows,\n \"columns\" : columns,\n \"column-names\" : [self.column_names[column] for column in columns],\n \"data\" : [[self.columns[column][self.sort_index[row]] for row in rows] for column in columns]\n }\n\n return response\n\n def put_sort(self, sort):\n sort = [(column, order) for column, order in sort if column < self.column_count]\n sort_index_key = tuple(sort)\n\n if sort_index_key not in self.sort_indices:\n index = range(self.row_count)\n for column, order in reversed(sort):\n index = sorted(index, key=lambda x: self.columns[column][x], reverse = (False if order == \"ascending\" else True))\n self.sort_indices[sort_index_key] = index\n self.sort_index = self.sort_indices[sort_index_key]\n\n response = {\n \"sort\" : sort\n }\n return response\n\n","sub_path":"packages/slycat/web/server/worker/chunker/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":15126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"47693593","text":"#coding=utf-8\nimport ddt\nimport unittest\nimport sys\nsys.path.append('D:/JY_selenium')\nfrom unittest import TestSuite\nfrom business.register_business import RegisterBusiness\nfrom selenium import webdriver\nimport HTMLTestRunner\nimport os\nimport time\nfrom util.excel_until import ExcelUtil\nex = ExcelUtil()\ndata = ex.get_data()\n#邮箱、用户名、密码、验证码、错误信息定位元素、错误提示信息\n@ddt.ddt\nclass FirstDdtCase(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.get('http://biz-test.jiutongpay.com.cn')\n self.driver.maximize_window()\n self.login = RegisterBusiness(self.driver)\n\n def tearDown(self):\n time.sleep(2)\n for method_name, error in self._outcome.errors:\n if error:\n case_name = self._testMethodName\n file_path = os.path.join(os.getcwd() + \"/report/\" + case_name + \".png\")\n self.driver.save_screenshot(file_path)\n self.driver.close()\n '''\n @ddt.data(\n ['111111','1@163.com','123123','code','user_name_error','用户名或密码错误'],\n ['222222','2@163.com','123123','code','user_name_error','用户名或密码错误'],\n ['JTZF800003','1@163.com','123123','code','user_name_error','用户名或密码错误']\n )\n\n @ddt.unpack\n '''\n @ddt.data(*data)\n def test_register_case(self,data):\n username, email, password, code, assertCode, assertText = data\n username_error = self.login.register_function(username,email,password,code,assertCode,assertText)\n self.assertFalse(username_error)\n\nif __name__ == '__main__':\n file_path = os.path.join(os.getcwd() + \"/report/\" + \"first_case1.html\")\n f = open(file_path, 'wb')\n suite = unittest.TestLoader().loadTestsFromTestCase(FirstDdtCase)\n runner = HTMLTestRunner.HTMLTestRunner(stream=f, title=\"report1\", description=u\"测试报告1\", verbosity=2)\n runner.run(suite)\n f.close()\n\n\n","sub_path":"case/first_ddt_case.py","file_name":"first_ddt_case.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"72727189","text":"from nltk.tokenize import sent_tokenize\n\n\ndef lines(a, b):\n similar_lines = []\n # split lines into array\n a = a.split('\\n')\n b = b.split('\\n')\n\n # check if sentence is similar and not a duplicate\n for _, sentence in enumerate(a):\n if sentence in b and sentence not in similar_lines:\n similar_lines.append(sentence)\n\n return similar_lines\n\n\ndef sentences(a, b):\n # sent_tokenize breaks strings into sentences\n a_sentences = sent_tokenize(a)\n b_sentences = sent_tokenize(b)\n similar_sentences = []\n\n # check if sentence is similar and not duplicate\n for _, sentence in enumerate(a_sentences):\n if sentence in b_sentences and sentence not in similar_sentences:\n similar_sentences.append(sentence)\n\n return similar_sentences\n\n\ndef substrings(a, b, n):\n similar_substr = []\n a_substrings = []\n b_substrings = []\n\n # break strings into substrings\n for i in range(0, len(a) - n + 1):\n a_substrings.append(a[i:i+n])\n\n for i in range(0, len(b) - n + 1):\n b_substrings.append(b[i:i+n])\n\n # check if substrings are similar and not duplicate\n for _, substr in enumerate(a_substrings):\n if substr in b_substrings and substr not in similar_substr:\n similar_substr.append(substr)\n\n for _, substr in enumerate(b_substrings):\n if substr in a_substrings and substr not in similar_substr:\n similar_substr.append(substr)\n\n return similar_substr","sub_path":"pset6/similarities/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"247688972","text":"import logging\nimport os\nimport os.path as osp\nimport torch\nfrom torch.cuda.amp import autocast, GradScaler\nimport mmcv\nimport time\nimport cv2\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom detectron2.utils.events import EventStorage\nfrom detectron2.checkpoint import PeriodicCheckpointer\nfrom detectron2.evaluation import (\n CityscapesInstanceEvaluator,\n CityscapesSemSegEvaluator,\n COCOEvaluator,\n COCOPanopticEvaluator,\n DatasetEvaluators,\n LVISEvaluator,\n PascalVOCDetectionEvaluator,\n SemSegEvaluator,\n)\n\nfrom detectron2.data.common import AspectRatioGroupedDataset\nfrom detectron2.data import MetadataCatalog\n\nfrom lib.utils.utils import dprint\nfrom lib.vis_utils.image import grid_show, vis_bbox_opencv\nfrom core.gdrn_selfocc_modeling.tools.torch_utils import ModelEMA\nfrom core.utils import solver_utils\nimport core.gdrn_selfocc_modeling.tools.my_comm as comm\nfrom core.utils.my_checkpoint import MyCheckpointer\nfrom core.utils.my_writer import MyCommonMetricPrinter, MyJSONWriter, MyTensorboardXWriter\nfrom core.utils.utils import get_emb_show\nfrom core.utils.data_utils import denormalize_image\nfrom core.gdrn_selfocc_modeling.datasets.data_loader import build_gdrn_train_loader, build_gdrn_test_loader\nfrom core.gdrn_selfocc_modeling.losses.crosstask_loss import CT_loss\nfrom core.gdrn_selfocc_modeling.losses.crosstask_projection_loss import CT_loss_projection\n\nfrom .engine_utils import batch_data, get_out_coor, get_out_mask\nfrom .gdrn_evaluator import gdrn_inference_on_dataset, GDRN_Evaluator\nfrom .gdrn_custom_evaluator import GDRN_EvaluatorCustom\nimport ref\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_evaluator(cfg, dataset_name, output_folder=None):\n \"\"\"Create evaluator(s) for a given dataset.\n\n This uses the special metadata \"evaluator_type\" associated with each\n builtin dataset. For your own dataset, you can simply create an\n evaluator manually in your script and do not have to worry about the\n hacky if-else logic here.\n \"\"\"\n if output_folder is None:\n output_folder = osp.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n if evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n if evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n\n _distributed = comm.get_world_size() > 1\n dataset_meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])\n train_obj_names = dataset_meta.objs\n if evaluator_type == \"bop\":\n gdrn_eval_cls = GDRN_Evaluator if cfg.VAL.get(\"USE_BOP\", False) else GDRN_EvaluatorCustom\n return gdrn_eval_cls(\n cfg, dataset_name, distributed=_distributed, output_dir=output_folder, train_objs=train_obj_names\n )\n\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(dataset_name, evaluator_type)\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)\n\n\ndef do_test(cfg, model, epoch=None, iteration=None):\n results = OrderedDict()\n model_name = osp.basename(cfg.MODEL.WEIGHTS).split(\".\")[0]\n for dataset_name in cfg.DATASETS.TEST:\n if epoch is not None and iteration is not None:\n eval_out_dir = osp.join(cfg.OUTPUT_DIR, f\"inference_epoch_{epoch}_iter_{iteration}\", dataset_name)\n else:\n eval_out_dir = osp.join(cfg.OUTPUT_DIR, f\"inference_{model_name}\", dataset_name)\n evaluator = get_evaluator(cfg, dataset_name, eval_out_dir)\n data_loader = build_gdrn_test_loader(cfg, dataset_name, train_objs=evaluator.train_objs)\n results_i = gdrn_inference_on_dataset(cfg, model, data_loader, evaluator, amp_test=cfg.TEST.AMP_TEST)\n results[dataset_name] = results_i\n # if comm.is_main_process():\n # logger.info(\"Evaluation results for {} in csv format:\".format(dataset_name))\n # print_csv_format(results_i)\n if len(results) == 1:\n results = list(results.values())[0]\n return results\n\n\ndef get_tbx_event_writer(out_dir, backup=False):\n tb_logdir = osp.join(out_dir, \"tb\")\n mmcv.mkdir_or_exist(tb_logdir)\n if backup:\n old_tb_logdir = osp.join(out_dir, \"tb_old\")\n mmcv.mkdir_or_exist(old_tb_logdir)\n os.system(\"mv -v {} {}\".format(osp.join(tb_logdir, \"events.*\"), old_tb_logdir))\n\n tbx_event_writer = MyTensorboardXWriter(tb_logdir, backend=\"tensorboardX\")\n return tbx_event_writer\n\n\ndef do_train(cfg, args, model, optimizer, renderer=None, resume=False):\n model.train()\n\n # some basic settings =========================\n dataset_meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])\n data_ref = ref.__dict__[dataset_meta.ref_key]\n obj_names = dataset_meta.objs\n\n # load data ===================================\n train_dset_names = cfg.DATASETS.TRAIN\n data_loader = build_gdrn_train_loader(cfg, train_dset_names)\n data_loader_iter = iter(data_loader)\n\n # load 2nd train dataloader if needed\n train_2_dset_names = cfg.DATASETS.get(\"TRAIN2\", ())\n train_2_ratio = cfg.DATASETS.get(\"TRAIN2_RATIO\", 0.0)\n if train_2_ratio > 0.0 and len(train_2_dset_names) > 0:\n data_loader_2 = build_gdrn_train_loader(cfg, train_2_dset_names)\n data_loader_2_iter = iter(data_loader_2)\n else:\n data_loader_2 = None\n data_loader_2_iter = None\n\n images_per_batch = cfg.SOLVER.IMS_PER_BATCH\n if isinstance(data_loader, AspectRatioGroupedDataset):\n dataset_len = len(data_loader.dataset.dataset)\n if data_loader_2 is not None:\n dataset_len += len(data_loader_2.dataset.dataset)\n iters_per_epoch = dataset_len // images_per_batch\n else:\n dataset_len = len(data_loader.dataset)\n if data_loader_2 is not None:\n dataset_len += len(data_loader_2.dataset)\n iters_per_epoch = dataset_len // images_per_batch\n max_iter = cfg.SOLVER.TOTAL_EPOCHS * iters_per_epoch\n dprint(\"images_per_batch: \", images_per_batch)\n dprint(\"dataset length: \", dataset_len)\n dprint(\"iters per epoch: \", iters_per_epoch)\n dprint(\"total iters: \", max_iter)\n\n bs_ref = cfg.SOLVER.get(\"REFERENCE_BS\", 64) # nominal batch size =========================\n accumulate_iter = max(round(bs_ref / cfg.SOLVER.IMS_PER_BATCH), 1) # accumulate loss before optimizing\n # NOTE: update lr every accumulate_iter\n scheduler = solver_utils.build_lr_scheduler(cfg, optimizer, total_iters=max_iter // accumulate_iter)\n\n AMP_ON = cfg.SOLVER.AMP.ENABLED\n logger.info(f\"AMP enabled: {AMP_ON}\")\n grad_scaler = GradScaler()\n\n # resume or load model ===================================\n checkpointer = MyCheckpointer(\n model,\n cfg.OUTPUT_DIR,\n optimizer=optimizer,\n scheduler=scheduler,\n gradscaler=grad_scaler,\n save_to_disk=comm.is_main_process(),\n )\n start_iter = checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get(\"iteration\", -1) + 1\n\n\n # Exponential moving average (NOTE: initialize ema after loading weights) ========================\n if comm.is_main_process() and cfg.MODEL.EMA.ENABLED:\n ema = ModelEMA(model, **cfg.MODEL.EMA.INIT_CFG)\n ema.updates = start_iter // accumulate_iter\n # save the ema model\n checkpointer.model = ema.ema.module if hasattr(ema.ema, \"module\") else ema.ema\n else:\n ema = None\n\n if comm._USE_HVD: # hvd may be not available, so do not use the one in args\n import horovod.torch as hvd\n\n # not needed\n # start_iter = hvd.broadcast(torch.tensor(start_iter), root_rank=0, name=\"start_iter\").item()\n\n # Horovod: broadcast parameters & optimizer state.\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n # Horovod: (optional) compression algorithm.\n compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none\n optimizer = hvd.DistributedOptimizer(\n optimizer,\n named_parameters=model.named_parameters(),\n op=hvd.Adasum if args.use_adasum else hvd.Average,\n compression=compression,\n ) # device_dense='/cpu:0'\n elif comm._USE_BPS:\n import byteps.torch as bps\n\n # BytePS: (optional) compression algorithm.\n compression = bps.Compression.fp16 if args.fp16_pushpull else bps.Compression.none\n\n # BytePS: wrap optimizer with DistributedOptimizer.\n optimizer = bps.DistributedOptimizer(\n optimizer, named_parameters=model.named_parameters(), compression=compression\n )\n\n # BytePS: broadcast parameters.\n bps.broadcast_parameters(model.state_dict(), root_rank=0)\n bps.broadcast_optimizer_state(optimizer, root_rank=0)\n\n if cfg.SOLVER.CHECKPOINT_BY_EPOCH:\n ckpt_period = cfg.SOLVER.CHECKPOINT_PERIOD * iters_per_epoch\n else:\n ckpt_period = cfg.SOLVER.CHECKPOINT_PERIOD\n periodic_checkpointer = PeriodicCheckpointer(\n checkpointer, ckpt_period, max_iter=max_iter, max_to_keep=cfg.SOLVER.MAX_TO_KEEP\n )\n\n # build writers ==============================================\n tbx_event_writer = get_tbx_event_writer(cfg.OUTPUT_DIR, backup=not cfg.get(\"RESUME\", False))\n tbx_writer = tbx_event_writer._writer # NOTE: we want to write some non-scalar data\n writers = (\n [MyCommonMetricPrinter(max_iter), MyJSONWriter(osp.join(cfg.OUTPUT_DIR, \"metrics.json\")), tbx_event_writer]\n if comm.is_main_process()\n else []\n )\n\n # compared to \"train_net.py\", we do not support accurate timing and\n # precise BN here, because they are not trivial to implement\n logger.info(\"Starting training from iteration {}\".format(start_iter))\n iter_time = None\n with EventStorage(start_iter) as storage:\n optimizer.zero_grad()\n for iteration in range(start_iter, max_iter):\n storage.iter = iteration\n epoch = iteration // iters_per_epoch + 1 # epoch start from 1\n storage.put_scalar(\"epoch\", epoch, smoothing_hint=False)\n\n if np.random.rand() < train_2_ratio:\n data = next(data_loader_2_iter)\n else:\n data = next(data_loader_iter)\n\n if iter_time is not None:\n storage.put_scalar(\"time\", time.perf_counter() - iter_time)\n iter_time = time.perf_counter()\n\n # if cfg.TRAIN.VIS:\n # vis_train_data(data, obj_names, cfg)\n\n # forward ============================================================\n batch = batch_data(cfg, data, renderer=renderer)\n with autocast(enabled=AMP_ON):\n out_dict, loss_dict = model(\n batch[\"roi_img\"],\n gt_xyz=batch.get(\"roi_xyz\", None),\n gt_xyz_bin=batch.get(\"roi_xyz_bin\", None),\n gt_mask_trunc=batch[\"roi_mask_trunc\"],\n gt_mask_visib=batch[\"roi_mask_visib\"],\n gt_mask_obj=batch[\"roi_mask_obj\"],\n gt_mask_erode=batch.get(\"roi_mask_erode\", None),\n gt_region=batch.get(\"roi_region\", None),\n gt_ego_rot=batch.get(\"ego_rot\", None),\n gt_trans=batch.get(\"trans\", None),\n gt_trans_ratio=batch[\"roi_trans_ratio\"],\n gt_points=batch.get(\"roi_points\", None),\n sym_infos=batch.get(\"sym_info\", None),\n roi_classes=batch[\"roi_cls\"],\n roi_cams=batch[\"roi_cam\"],\n roi_whs=batch[\"roi_wh\"],\n roi_centers=batch[\"roi_center\"],\n resize_ratios=batch[\"resize_ratio\"],\n roi_coord_2d=batch.get(\"roi_coord_2d\", None),\n roi_extents=batch.get(\"roi_extent\", None),\n do_loss=True,\n # selfocc parameters\n gt_q0=batch.get(\"roi_Q0\", None),\n gt_occmask=batch.get(\"occmask\", None),\n roi_extent=batch.get(\"roi_extent\", None), # 注意上面有一个带s的,其实是一个,重复了,懒得改了\n size_imW=batch.get(\"im_W\", 640),\n size_imH=batch.get(\"im_H\", 480),\n E_step=epoch,\n )\n '''\n # pop necessary loss here\n if iteration < max_iter * cfg.TRAIN.CT_START:\n loss_dict.pop(\"loss_ct\")\n if iteration < max_iter * cfg.TRAIN.CT_P_START:\n loss_dict.pop(\"loss_ct_pro\")\n '''\n # do test after reasonable iteration\n losses = sum(loss_dict.values())\n assert torch.isfinite(losses).all(), loss_dict\n\n loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n if comm.is_main_process():\n storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)\n\n # backward & optimize ======================================================\n if AMP_ON:\n grad_scaler.scale(losses).backward()\n\n # # Unscales the gradients of optimizer's assigned params in-place\n # grad_scaler.unscale_(optimizer)\n # # Since the gradients of optimizer's assigned params are unscaled, clips as usual:\n # torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)\n # optimize\n if iteration % accumulate_iter == 0:\n if comm._USE_HVD or comm._USE_BPS:\n optimizer.synchronize()\n with optimizer.skip_synchronize():\n grad_scaler.step(optimizer)\n grad_scaler.update()\n else:\n grad_scaler.step(optimizer)\n grad_scaler.update()\n else:\n losses.backward()\n # clip gradient\n torch.nn.utils.clip_grad_norm_(model.parameters(), 10)\n # optimize\n if iteration % accumulate_iter == 0:\n optimizer.step()\n\n if iteration % accumulate_iter == 0:\n optimizer.zero_grad()\n if ema is not None:\n ema.update(model)\n storage.put_scalar(\"lr\", optimizer.param_groups[0][\"lr\"], smoothing_hint=False)\n scheduler.step()\n\n if cfg.TEST.EVAL_PERIOD > 0 and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter - 1:\n if ema is not None:\n ema.update_attr(model)\n do_test(\n cfg,\n model=ema.ema.module if hasattr(ema.ema, \"module\") else ema.ema,\n epoch=epoch,\n iteration=iteration,\n )\n else:\n do_test(cfg, model, epoch=epoch, iteration=iteration)\n # Compared to \"train_net.py\", the test results are not dumped to EventStorage\n comm.synchronize()\n\n if iteration - start_iter > 5 and (\n (iteration + 1) % cfg.TRAIN.PRINT_FREQ == 0 or iteration == max_iter - 1 or iteration < 100\n ):\n for writer in writers:\n writer.write()\n # visualize some images ========================================\n if cfg.TRAIN.VIS_IMG:\n with torch.no_grad():\n vis_i = 0\n roi_img_vis = batch[\"roi_img\"][vis_i].cpu().numpy()\n roi_img_vis = denormalize_image(roi_img_vis, cfg).transpose(1, 2, 0).astype(\"uint8\")\n tbx_writer.add_image(\"input_image\", roi_img_vis, iteration)\n\n out_coor_x = out_dict[\"coor_x\"].detach()\n out_coor_y = out_dict[\"coor_y\"].detach()\n out_coor_z = out_dict[\"coor_z\"].detach()\n out_xyz = get_out_coor(cfg, out_coor_x, out_coor_y, out_coor_z)\n\n out_xyz_vis = out_xyz[vis_i].cpu().numpy().transpose(1, 2, 0)\n out_xyz_vis = get_emb_show(out_xyz_vis)\n tbx_writer.add_image(\"out_xyz\", out_xyz_vis, iteration)\n\n gt_xyz_vis = batch[\"roi_xyz\"][vis_i].cpu().numpy().transpose(1, 2, 0)\n gt_xyz_vis = get_emb_show(gt_xyz_vis)\n tbx_writer.add_image(\"gt_xyz\", gt_xyz_vis, iteration)\n\n out_mask = out_dict[\"mask\"].detach()\n out_mask = get_out_mask(cfg, out_mask)\n out_mask_vis = out_mask[vis_i, 0].cpu().numpy()\n tbx_writer.add_image(\"out_mask\", out_mask_vis, iteration)\n\n gt_mask_vis = batch[\"roi_mask\"][vis_i].detach().cpu().numpy()\n tbx_writer.add_image(\"gt_mask\", gt_mask_vis, iteration)\n periodic_checkpointer.step(iteration, epoch=epoch)\n\n\ndef vis_train_data(data, obj_names, cfg):\n for i, d in enumerate(data):\n # if i >= 1:\n # continue\n full_img = mmcv.imread(d[\"file_name\"], \"color\")\n # if \"000009/rgb/000047.png\" not in d[\"file_name\"]:\n # continue\n print(d[\"file_name\"])\n im_H, im_W = full_img.shape[:2]\n roi_cls = d[\"roi_cls\"]\n if roi_cls not in [0]:\n continue\n bbox_center = d[\"bbox_center\"]\n scale = d[\"scale\"]\n x1 = max(min(bbox_center[0] - scale / 2, im_W), 0)\n x2 = max(min(bbox_center[0] + scale / 2, im_W), 0)\n y1 = max(min(bbox_center[1] - scale / 2, im_H), 0)\n y2 = max(min(bbox_center[1] + scale / 2, im_H), 0)\n full_img_vis = vis_bbox_opencv(full_img, np.array([x1, y1, x2, y2]), fmt=\"xyxy\")\n\n bbox_ori = d[\"bbox\"]\n full_img_bbox = vis_bbox_opencv(full_img, bbox_ori, fmt=\"xyxy\")\n obj_name = obj_names[roi_cls]\n\n roi_img = d[\"roi_img\"].numpy()\n roi_img = denormalize_image(roi_img, cfg).transpose(1, 2, 0).astype(\"uint8\")\n\n roi_mask_trunc = d[\"roi_mask_trunc\"].numpy().astype(\"bool\")\n roi_mask_visib = d[\"roi_mask_visib\"].numpy().astype(\"bool\")\n roi_mask_obj = d[\"roi_mask_obj\"].numpy().astype(\"bool\")\n\n kernel = np.ones((3, 3), np.uint8)\n erode_mask_obj = cv2.erode(roi_mask_obj.astype(\"uint8\"), kernel, iterations=1)\n\n roi_xyz = d[\"roi_xyz\"].numpy().transpose(1, 2, 0)\n roi_xyz_show = get_emb_show(roi_xyz) * erode_mask_obj[:, :, None].astype(\"float32\")\n\n coord2d = d[\"roi_coord_2d\"].numpy().transpose(1, 2, 0)\n roi_h, roi_w = coord2d.shape[:2]\n zeros_1 = np.zeros((roi_h, roi_w, 1), dtype=\"float32\")\n coord2d_3 = np.concatenate([zeros_1, get_emb_show(coord2d)], axis=2)\n\n # yapf: disable\n vis_imgs = [\n full_img_vis[:, :, [2, 1, 0]], full_img_bbox[:, :, [2, 1, 0]], roi_img[:, :, [2, 1, 0]],\n roi_mask_trunc * erode_mask_obj, roi_mask_visib*erode_mask_obj, roi_mask_obj*erode_mask_obj,\n roi_xyz_show,\n coord2d_3,\n coord2d[:, :, 0], coord2d[:, :, 1]\n ]\n titles = [\n \"full_img\", \"ori_bbox\", f\"roi_img({obj_name})\",\n \"roi_mask_trunc\", \"roi_mask_visib\", \"roi_mask_obj\",\n \"roi_xyz\",\n \"roi_coord2d\",\n \"roi_coord2d_x\", \"roi_coord2d_y\"\n ]\n row = 3\n col = 4\n if \"roi_region\" in d:\n roi_region = d[\"roi_region\"].numpy() # (bh, bw)\n roi_region_3 = np.zeros((roi_h, roi_w, 3), dtype=\"float32\")\n for region_id in range(256):\n # if region_id == 0:\n # continue\n if region_id in roi_region:\n for _c in range(3):\n roi_region_3[:, :, _c][roi_region == region_id] = roi_xyz_show[:, :, _c][roi_region == region_id].mean()\n roi_region_3 = roi_region_3 * erode_mask_obj[:, :, None].astype(\"float32\")\n vis_imgs.append(roi_region_3)\n titles.append(\"roi_region\")\n if len(vis_imgs) > row * col:\n col += 1\n for _im, _name in zip(vis_imgs, titles):\n save_path = osp.join(cfg.OUTPUT_DIR, \"vis\", _name+'.png')\n mmcv.mkdir_or_exist(osp.dirname(save_path))\n if _im.shape[-1] == 3:\n _im = _im[:, :, [2,1,0]]\n if _im.max() < 1.1:\n _im = (_im * 255).astype(\"uint8\")\n print(save_path)\n mmcv.imwrite(_im, save_path)\n\n grid_show(vis_imgs, titles, row=row, col=col)\n\n # yapf: enable\n","sub_path":"core/gdrn_selfocc_modeling/engine/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":22205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"264575828","text":"#!/usr/bin/env python3\nfrom sys import exit\nfrom random import randint\n\nclass Scene:\n\n def enter(self):\n print(\"This scene is not yet configured. Subclass it and implelent enter().\")\n exit(1)\n\nclass Engine:\n\n def __init__(self, scene_map):\n self.scene_map = scene_map\n\n def play(self):\n current_scene = self.scene_map.opening_scene()\n\n while True:\n print(\"\\n------\")\n next_scene_name = current_scene.enter()\n current_scene = self.scene_map.next_scene(next_scene_name)\n\nclass Death(Scene):\n\n def enter(self):\n print(\"You have died! Better luck next time asshole!\")\n play_again()\n\n\nclass CentralCorridor(Scene):\n \n def enter(self):\n print(\"you have entered the Central Corridor! Beware!\")\n print(\"you have the following options...choose wisely!\")\n print(\"Shoot, tell a joke, run\")\n print(\"what will it be?\")\n print(\"1. shoot\")\n print(\"2. run\")\n print(\"3. tell a joke\")\n\n action = input(\"> \")\n\n if action == '1' or action == '2':\n return 'death'\n elif action == '3':\n return 'laser_weapon_armory'\n\n\nclass LaserWeaponArmory(Scene):\n\n def enter(self):\n print(\"You have entered the Laser Weapon Armory! Beware!\")\n print(\"you msut enter a code to escape!\")\n print(\"it is a 3 digit code! if you are really stuck maybe if you were l33t you would figure it out!\")\n code = \"%d%d%d\" % (randint(1,9), randint(1,9), randint(1,9))\n cheat_code = '1337'\n guess = input(\"[keypad]> \")\n guesses = 0\n\n while guess != code and guesses < 9:\n if guess != cheat_code:\n print(\"Bzzzed!\")\n if guess == cheat_code:\n print(\"well arent you smart...here is the code!\\nCode: \" + code)\n guesses += 1\n guess = input(\"[keypad]> \")\n if guess == code:\n return 'the_bridge'\n\n \n\nclass TheBridge(Scene):\n\n def enter(self):\n print(\"You have entered the endless bridge! Beware!\")\n Combat()\n return 'escape_pod'\n\nclass Finished(Scene):\n\n def enter(self):\n print(\"You have escaped good luck to you out in the galaxy!\")\n play_again()\n \n\nclass EscapePod(Scene):\n\n def enter(self):\n print(\"You have entered the escape pod!\")\n print(\"There are 5 pods but only one is good! you must decide which one!\")\n good_pod = randint(1,5)\n print(good_pod)\n cheat_code = '1337'\n guess = input(\"[Pod #]> \")\n\n if int(guess) != good_pod:\n print(\"you jumped into pod %s and hit the eject button.\" % guess)\n return 'death'\n else:\n print(\"you jump into pod %s and hit the eject button.\" % guess)\n return 'finished'\n\n\nclass Map:\n\n scenes = {\n 'central_corridor': CentralCorridor(),\n 'laser_weapon_armory': LaserWeaponArmory(),\n 'the_bridge': TheBridge(),\n 'escape_pod': EscapePod(),\n 'death' : Death(),\n 'finished': Finished()\n }\n\n def __init__(self, start_scene):\n self.start_scene = start_scene\n\n def next_scene(self, scene_name):\n return Map.scenes.get(scene_name)\n\n\n def opening_scene(self):\n return self.next_scene(self.start_scene)\n\nclass Player:\n def __init__(self, health):\n self.health = health\n\n def get_health(self):\n return self.health\n\n\n\nrogue = Player(100)\n\nclass Weapons:\n\n @staticmethod\n def sword():\n print(\"you strike the Gothon with your sword dealing 5 damage!\")\n sword_strike = 5\n rogue.health -= sword_strike\n print(rogue.health)\n\n @staticmethod\n def gun():\n print(\"you shot the gothon for 25 damage!\")\n gun_shot = 25\n rogue.health -= gun_shot\n print(rogue.health)\n\n @staticmethod\n def knife():\n print(\"you stab the Gothon for 45 damage!\")\n stab = 45\n rogue.health -= stab\n print(rogue.health)\n\n \n\ndef Combat():\n print(\"How will you fight back?\")\n if rogue.health > 0:\n print(\"choose a weapon to fight back with!\")\n print(\"1. Sword\")\n print(\"2. Gun\")\n print(\"3. Knife\")\n choice = input(\"> \")\n if choice == '1':\n return Weapons.sword()\n elif choice == '2':\n return Weapons.gun()\n elif choice == '3':\n return Weapons.knife()\n return 'escape pod'\n\ndef play_again():\n print(\"would you like to play again?\")\n print(\"Yes or No?\")\n answer = input(\"> \")\n if answer == 'y':\n rogue.health = 100\n a_game.play()\n else:\n exit()\n\n\na_map = Map('central_corridor')\na_game = Engine(a_map)\na_game.play()\n\n\n\n\n\n\n\n\n\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"339061566","text":"\nfrom app.models import Task, Contract\n\nimport json\nimport app.constants as CONST\nimport app.bl.contract as contract_bl\n\n\ndef add_create_market_task(match):\n\t# get active contract\n\tcontract = contract_bl.get_active_smart_contract()\n\tif contract is None:\n\t\treturn None\n\n\t# add task\n\ttask = Task(\n\t\ttask_type=CONST.TASK_TYPE['REAL_BET'],\n\t\tdata=json.dumps(match.to_json()),\n\t\taction=CONST.TASK_ACTION['CREATE_MARKET'],\n\t\tstatus=-1,\n\t\tcontract_address=contract.contract_address,\n\t\tcontract_json=contract.json_name\n\t)\n\treturn task\n\n\ndef is_contract_support_report_for_creator_method(name):\n\tarr = name.split('v')\n\tif len(arr) == 2:\n\t\tnums = arr[1].split('_')\n\t\tnumber = int('{}{}'.format(nums[0], nums[1]))\n\t\tif number >= 14: #support version from 1.4\n\t\t\treturn True\n\n\treturn False\n\n\ndef can_admin_report_this_outcome(outcome):\n\tif outcome is None or outcome.result != CONST.RESULT_TYPE['PENDING'] or outcome.hid is None:\n\t\treturn False\n\t\n\tcontract = Contract.find_contract_by_id(outcome.contract_id)\n\tif contract is None:\n\t\treturn False\n\n\t# created by admin\n\tif outcome.created_user_id is None:\n\t\treturn True\n\n\tif is_contract_support_report_for_creator_method(contract.json_name) is False:\n\t\treturn False\n\n\tif outcome.match.grant_permission == 1 and \\\n\t\toutcome.match.creator_wallet_address is not None and \\\n\t\tlen(outcome.match.creator_wallet_address) > 0:\n\t\treturn True\n\n\treturn False","sub_path":"restapi/app/bl/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"100810249","text":"\n\nfrom xai.brain.wordbase.nouns._cinch import _CINCH\n\n#calss header\nclass _CINCHED(_CINCH, ):\n\tdef __init__(self,): \n\t\t_CINCH.__init__(self)\n\t\tself.name = \"CINCHED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"cinch\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_cinched.py","file_name":"_cinched.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"297019843","text":"import os\nimport json\nimport glob\nimport pickle\nimport numpy as np\nfrom decimal import Decimal\nfrom scipy.stats import spearmanr\nfrom collections import defaultdict\n\nimport torch\nimport torchvision.transforms as transforms\n\nimport viz\nimport utils\nfrom explainers import CASO, \\\n VanillaGrad, IntegratedGrad, SmoothGrad, \\\n BatchTuner, SmoothCASO, EigenCASO, \\\n NewExplainer, VATExplainer, Spectrum\nfrom attackers import EmptyAttack, GhorbaniAttack, ScaledNoiseAttack\nfrom imagenet1000_clsid_to_human import clsid_to_human\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\n\ntransf = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n ])\n\n# norm_tranf = transforms.Compose([\n# transforms.Resize((224, 224)),\n# transforms.ToTensor(),\n# transforms.Normalize(mean=[0.485, 0.456, 0.406],\n# std=[0.229, 0.224, 0.225])\n# ])\n\n\ndef setup_imagenet(batch_size=16, example_ids=None,\n n_batches=-1, n_examples=-1,\n shuffle=True, dump_filenames=None,\n arch='resnet50'):\n model = utils.load_model(arch)\n model.eval()\n model.cuda()\n print('model loaded')\n\n image_path = '/fs/imageNet/imagenet/ILSVRC_val/**/*.JPEG'\n image_files = list(glob.iglob(image_path, recursive=True))\n image_files = sorted(image_files, key=lambda x: os.path.basename(x))\n real_ids = [os.path.basename(x) for x in image_files]\n\n label_path = '/fs/imageNet/imagenet/ILSVRC2012_devkit_t12/' \\\n + 'data/ILSVRC2012_validation_ground_truth.txt'\n with open(label_path) as f:\n labels = [clsid_to_human[int(x)-1] for x in f.readlines()]\n\n if example_ids is not None:\n examples = {r: (r, m, l)\n for r, m, l in zip(real_ids, image_files, labels)}\n examples = [examples[x] for x in example_ids]\n else:\n examples = list(zip(real_ids, image_files, labels))\n\n if shuffle:\n np.random.seed(0)\n np.random.shuffle(examples)\n\n if n_examples > 0:\n examples = examples[:n_examples]\n elif n_batches > 0:\n examples = examples[:batch_size * n_batches]\n\n selected_files = sorted([x[0] for x in examples])\n if dump_filenames is not None:\n with open(dump_filenames, 'w') as f:\n f.write(json.dumps(selected_files))\n # print('\\n'.join(selected_files))\n\n def batch_loader(batch):\n batch = list(map(list, zip(*batch)))\n ids, xs, ys = batch\n return (ids, [viz.pil_loader(x) for x in xs], ys)\n\n batch_indices = list(range(0, len(examples), batch_size))\n batches = [examples[i: i + batch_size] for i in batch_indices]\n batches = map(batch_loader, batches)\n n_batches = len(batch_indices)\n print(n_batches, 'batches', n_batches * batch_size, 'images loaded')\n return model, batches, n_batches\n\n\ndef saliency_correlation(s1, s2):\n # s1 and s2 are batched\n assert s1.shape == s2.shape\n assert s1.ndim == 4\n batch_size = s1.shape[0]\n s1 = viz.agg_clip(s1)\n s2 = viz.agg_clip(s2)\n s1 = s1.reshape(batch_size, -1)\n s2 = s2.reshape(batch_size, -1)\n return [spearmanr(x1, x2).correlation for x1, x2 in zip(s1, s2)]\n\n\ndef saliency_overlap(s1, s2):\n assert s1.shape == s2.shape\n batch_size = s1.shape[0]\n s1 = viz.agg_clip(s1)\n s2 = viz.agg_clip(s2)\n s1 = s1.reshape(batch_size, -1)\n s2 = s2.reshape(batch_size, -1)\n scores = []\n K = 1000\n for x1, x2 in zip(s1, s2):\n x1 = set(np.argsort(-x1)[:K])\n x2 = set(np.argsort(-x2)[:K])\n scores.append(len(x1.intersection(x2)) / K)\n return scores\n\n\ndef get_prediction(model, batch, to_human=True):\n ys = model(batch).max(1)[1].data\n if to_human:\n return [clsid_to_human[y] for y in ys.tolist()]\n else:\n return ys\n\n\ndef plot_matrix(matrix, filename, fontsize=40, rects=[]):\n '''Each entry in the matrix should be a dictionary of:\n image: an image ready to be plotted by imshow\n cmap: color map or None\n text_top, text_bottom, text_left, text_right\n rects: rectangles around box (i, j)\n '''\n n_rows = len(matrix)\n n_cols = max(len(row) for row in matrix)\n plt.rc('text', usetex=True)\n f, ax = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 4 * n_rows))\n for i, row in enumerate(matrix):\n for j, c in enumerate(row):\n image = c.get('image', np.zeros((244, 244)))\n cmap = c.get('cmap', None)\n aax = ax[i, j] if len(matrix) > 1 else ax[j]\n aax.imshow(image, cmap=cmap)\n if 'text_top' in c:\n aax.text(0.5, 1.1, c['text_top'], ha='center', va='center',\n transform=aax.transAxes, fontsize=fontsize)\n if 'text_bottom' in c:\n aax.text(0.5, -0.1, c['text_bottom'], ha='center', va='center',\n transform=aax.transAxes, fontsize=fontsize)\n if 'text_left' in c:\n aax.text(-0.1, 0.5, c['text_left'], ha='center', va='center',\n rotation=90, transform=aax.transAxes,\n fontsize=fontsize)\n if 'text_right' in c:\n aax.text(1.1, 0.5, c['text_right'], ha='center', va='center',\n rotation=270, transform=aax.transAxes,\n fontsize=fontsize)\n aax.set_axis_off()\n for i, j in rects:\n aax = ax[i, j] if len(matrix) > 1 else ax[j]\n f.patches.extend([\n patches.Rectangle(\n (-0.01, -0.01), 1.02, 1.02, linewidth=6,\n edgecolor='#55e400', fill=False,\n transform=aax.transAxes)\n ])\n # f.tight_layout(pad=2.0, h_pad=2.0, w_pad=2.0)\n f.savefig(filename)\n plt.close('all')\n\n\ndef explain(model, batches, explainers):\n '''\n Returns a dictionary keyed by the id\n image: original input image\n label: predicted label\n The for each (explainer, attacker):\n perturbed: perturbed input\n saliency: saliency with perturbation\n '''\n results = defaultdict(dict)\n for batch_idx, batch in enumerate(batches):\n ids, images, labels = batch\n xs = torch.stack([transf(x) for x in images]).cuda()\n ys = get_prediction(model, xs)\n for explain_name, explainer in explainers:\n saliency_1 = explainer.explain(model, xs.clone()).cpu().numpy()\n for i, id in enumerate(ids):\n results[id]['image'] = images[i]\n results[id]['label'] = labels[i]\n results[id]['prediction'] = ys[i]\n results[id][explain_name] = {'saliency_1': saliency_1[i]}\n return results\n\n\ndef explain_attack_explain(model, batches, explainers, attackers):\n '''\n Returns a dictionary keyed by the id\n image: original input image\n label: predicted label\n for each explainer:\n saliency_1: saliency without perturbation\n for each (explainer, attacker):\n perturbed: perturbed input\n saliency_2: saliency with perturbation\n overlap: between saliency_1 and saliency_2\n '''\n results = defaultdict(dict)\n for batch_idx, batch in enumerate(batches):\n ids, images, labels = batch\n xs = torch.stack([transf(x) for x in images]).cuda()\n ys = get_prediction(model, xs)\n for explain_name, explainer in explainers:\n saliency_1 = explainer.explain(model, xs.clone()).cpu().numpy()\n for attack_name, attacker in attackers:\n perturbed = attacker.attack(model, xs.clone(), saliency_1)\n perturbed_np = perturbed.cpu().numpy()\n saliency_2 = explainer.explain(model, perturbed).cpu().numpy()\n overlap = saliency_overlap(saliency_1, saliency_2)\n for i, id in enumerate(ids):\n results[id]['image'] = images[i]\n results[id]['label'] = labels[i]\n results[id]['prediction'] = ys[i]\n results[id][explain_name] = {'saliency_1': saliency_1[i]}\n results[id][(explain_name, attack_name)] = {\n 'perturbed': perturbed_np[i],\n 'saliency_2': saliency_2[i],\n 'overlap': overlap[i],\n }\n return results\n\n\ndef plot_explainer(model, batches, n_batches, explainers,\n folder='figures/explain'):\n os.makedirs(folder, exist_ok=True)\n results = explain(model, batches, explainers)\n for i, (id, example) in enumerate(results.items()):\n image = transforms.Resize((224, 224))(example['image'])\n row = [{'image': image}]\n for explain_name, _ in explainers:\n saliency_1 = viz.agg_clip(example[explain_name]['saliency_1'])\n row.append({\n 'image': saliency_1,\n 'cmap': 'gray',\n 'text_top': explain_name,\n })\n with open('placeholder_{}.pkl'.format(i), 'wb') as f:\n example['id'] = id\n example['cafo'] = example['Grad']['saliency_1']\n example['caso'] = example['Grad']['saliency_1']\n example['grad'] = example['Grad']['saliency_1']\n example['integrated'] = example['Grad']['saliency_1']\n example.pop('Grad')\n pickle.dump(example, f)\n plot_matrix([row], f'{folder}/{id}.pdf', fontsize=15)\n\n\ndef plot_explainer_attacker(model, batches, n_batches, attackers, explainers,\n folder='figures/explain_attack'):\n '''For each example, generate a matrix of plots:\n rows are different inputs (original and perturbed)\n columns are different explainers.\n '''\n os.makedirs(folder, exist_ok=True)\n results = explain_attack_explain(model, batches, explainers, attackers)\n for id, example in results.items():\n row = []\n for explain_name, _ in explainers:\n saliency_1 = viz.agg_clip(example[explain_name]['saliency_1'])\n row.append({\n 'image': saliency_1,\n 'cmap': 'gray',\n 'text_top': explain_name,\n })\n matrix = [row]\n for attack_name, _ in attackers:\n image_row = []\n saliency_row = []\n for explain_name, _ in explainers:\n cell = example[(explain_name, attack_name)]\n perturbed = viz.img_rev(cell['perturbed'])\n image_row.append({\n 'image': perturbed,\n 'text_left': attack_name,\n })\n saliency_2 = viz.agg_clip(cell['saliency_2'])\n med_diff = viz.get_median_difference(saliency_2)\n text_top = '{:.3f}'.format(med_diff)\n saliency_row.append({\n 'image': saliency_2,\n 'cmap': 'gray',\n 'text_left': 'Saliency',\n 'text_top': text_top,\n })\n matrix.append(image_row)\n matrix.append(saliency_row)\n plot_matrix(matrix, f'{folder}/{id}.pdf', fontsize=15)\n\n\ndef plot_post_processing(model, batches, n_batches,\n folder='figures/post_processing'):\n '''Single image saliency mapping with four different post-processing\n methods.\n '''\n os.makedirs(folder, exist_ok=True)\n explainers = [\n ('CASO', CASO()),\n ('CAFO', CASO(lambda_t2=0)),\n ('Gradient', VanillaGrad()),\n ]\n results = explain(model, batches, explainers)\n for id, example in results.items():\n raw_image = transforms.Resize((224, 224))(example['image'])\n image_input = transf(example['image']).numpy()\n plt.rc('text', usetex=True)\n col0 = [\n {'image': raw_image, 'text_left': r'$\\Delta$'},\n {'image': raw_image, 'text_left': r'clip$(\\Delta)$'},\n {'image': raw_image, 'text_left': r'$\\Delta\\odot x$'},\n {'image': raw_image, 'text_left': r'clip$(\\Delta\\odot x)$'},\n ]\n col0 += [{'image': raw_image} for _ in range(3)]\n matrix = [col0]\n for mth_name, _ in explainers:\n col = []\n saliency = example[mth_name]['saliency_1']\n saliency_0 = viz.agg_default(saliency)\n saliency_1 = viz.agg_clip(saliency)\n saliency_2 = viz.agg_default(saliency * image_input)\n saliency_3 = viz.agg_clip(saliency * image_input)\n col.append({'image': saliency_0, 'cmap': 'gray',\n 'text_top': mth_name})\n col.append({'image': saliency_1, 'cmap': 'gray'})\n col.append({'image': saliency_2, 'cmap': 'gray'})\n col.append({'image': saliency_3, 'cmap': 'gray'})\n matrix.append(col)\n matrix = list(map(list, zip(*matrix)))\n plot_matrix(matrix, f'{folder}/{id}.pdf')\n\n\ndef to_decimal(x):\n x = '{:.0e}'.format(Decimal(x))\n x = x.replace('e+', 'e^{')\n x = x.replace('e-', 'e^{-') + '}'\n return x\n\n\ndef plot_steps(model, batches, n_batches, n_iters=None,\n folder='figures/steps'):\n os.makedirs(folder, exist_ok=True)\n if n_iters is None:\n n_iters = [1, 2, 3, 4, 5, 6]\n explainers = []\n for n in n_iters:\n explainers.append(\n (n, CASO(lambda_t2=0, n_iter=n)))\n results = explain(model, batches, explainers)\n for id, example in results.items():\n image = transforms.Resize((224, 224))(example['image'])\n row = [{'image': image}]\n for n in n_iters:\n # only show explainer label on top of the row of original\n # example without perturbation\n cell = example[n]\n saliency = viz.agg_clip(cell['saliency_1'])\n med_diff = viz.get_median_difference(saliency)\n text_top = ''\n row.append({\n 'image': saliency,\n 'cmap': 'gray',\n 'text_top': text_top,\n 'text_bottom': r'$\\eta={:.3f}$'.format(med_diff),\n })\n path = f'{folder}/{id}.pdf'\n plot_matrix([row], path, fontsize=30)\n\n\ndef plot_l1_l2(model, batches, n_batches, l1s=None, l2s=None,\n folder='figures/l1_l2'):\n os.makedirs(folder, exist_ok=True)\n if l1s is None:\n l1s = [1, 10, 50, 100, 200]\n if l2s is None:\n l2s = [1e2, 1e3, 1e4, 1e5, 1e6]\n explainers = []\n for l1 in l1s:\n # use the combination as name\n for l2 in l2s:\n explainers.append(\n ((l1, l2), EigenCASO(lambda_l1=l1, lambda_l2=l2, init='eig')))\n results = explain(model, batches, explainers)\n rect = (0, 0)\n best_median_diff = 0\n for id, example in results.items():\n matrix = []\n image = transforms.Resize((224, 224))(example['image'])\n for i, l2 in enumerate(l2s):\n text_left = r'$\\lambda_2={}$'.format(to_decimal(l2))\n row = [{'image': image, 'text_left': text_left}]\n for j, l1 in enumerate(l1s):\n # only show explainer label on top of the row of original\n # example without perturbation\n cell = example[(l1, l2)]\n saliency = viz.agg_clip(cell['saliency_1'])\n med_diff = viz.get_median_difference(saliency)\n text_top = ''\n if i == 0:\n text_top = r'$\\lambda_1={}$'.format(to_decimal(l1))\n row.append({\n 'image': saliency,\n 'cmap': 'gray',\n 'text_top': text_top,\n 'text_bottom': r'$\\eta={:.3f}$'.format(med_diff),\n })\n if med_diff > best_median_diff:\n rect = (i, j + 1)\n best_median_diff = med_diff\n matrix.append(row)\n path = f'{folder}/{id}.pdf'\n plot_matrix(matrix, path, fontsize=30, rects=[rect])\n\n\ndef task_goose():\n goose_id = 'ILSVRC2012_val_00045520.JPEG'\n model, batches, n_batches = setup_imagenet(example_ids=[goose_id])\n plot_post_processing(model, batches, n_batches,\n folder='figures/goose/post_processing')\n plot_l1_l2(model, batches, n_batches, folder='figures/goose/l1_l2')\n\n\ndef task_explain():\n model, batches, n_batches = setup_imagenet(batch_size=1, n_examples=2)\n explainers = [\n ('Grad', VanillaGrad()),\n # ('Spectrum', Spectrum()),\n # ('EigenCASO', EigenCASO(optim='adam', lambda_l1=0.1, n_iter=60)),\n ]\n plot_explainer(model, batches, n_batches, explainers)\n\n\ndef task_explain_attack():\n model, batches, n_batches = setup_imagenet(batch_size=16, n_examples=5)\n attackers = [\n ('Original', EmptyAttack()),\n ('Ghorbani', GhorbaniAttack()),\n ('Random', ScaledNoiseAttack()),\n ]\n explainers = [\n ('Grad', VanillaGrad()),\n ('Smooth', SmoothGrad()),\n ('Integrated', IntegratedGrad()),\n ]\n plot_explainer_attacker(model, batches, n_batches, attackers, explainers)\n\n\ndef task_l1_l2():\n model, batches, n_batches = setup_imagenet(n_examples=16)\n n_steps = 16\n l1_lo, l1_hi = 0.01, 2e5\n l2_lo, l2_hi = 1e2, 1e8\n l1s = np.geomspace(l1_lo, l1_hi, n_steps).tolist()\n l2s = np.geomspace(l2_lo, l2_hi, n_steps).tolist()\n plot_l1_l2(model, batches, n_batches, l1s=l1s, l2s=[10])\n\n\ndef task_steps():\n model, batches, n_batches = setup_imagenet(n_examples=16)\n plot_steps(model, batches, n_batches)\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--task', type=str)\n args = parser.parse_args()\n fs = {\n 'goose': task_goose,\n 'explain': task_explain,\n 'explain_attack': task_explain_attack,\n 'l1l2': task_l1_l2,\n 'steps': task_steps,\n }\n fs[args.task]()\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":17937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"602593221","text":"import numpy as np\nimport pandas as pd\nfrom minepy import MINE\nimport random\nimport math\nfrom multiprocessing import Pool\n\nimport sys\n\n# import statsmodels\n# from statsmodels.stats.multitest import fdrcorrection\n\ndef print_stats(mine):\n print (\"MIC\", mine.mic())\n print (\"MAS\", mine.mas())\n print (\"MEV\", mine.mev())\n print (\"MCN (eps=0)\", mine.mcn(0))\n print (\"MCN (eps=1-MIC)\", mine.mcn_general())\n print (\"GMIC\", mine.gmic())\n print (\"TIC\", mine.tic())\n\ndef filter_df(df):\n # process data\n # 1.filtered out genes that are expressed in less than 10% of the array spots \n # 2.and selected spots with at least ten total read counts\n\n # gene start from 1\n df_genes = df.columns.values\n df_genes = df_genes[1:]\n\n # obtain the cell spots\n cellcentroids = df['Unnamed: 0']\n\n # delete the corridinats\n df.drop(['Unnamed: 0'],axis=1,inplace=True)\n \n # obtain the cell spots after delete the spots less than ten total read counts\n # cellcentroids = cellcentroids.drop(df[df['Col_sum']<=10].index)\n cellcentroids = cellcentroids.values\n cell_x = []\n cell_y = []\n for cell_i in cellcentroids:\n cell_x.append(cell_i.split('x')[0])\n cell_y.append(cell_i.split('x')[1])\n\n return(np.array(cell_x),np.array(cell_y),df)\n\ndef background_correction(cell_x,cell_y):\n cell_x = [int(x) for x in cell_x]\n cell_y = [int(x) for x in cell_y]\n\n # plt.scatter(cell_x,cell_y, edgecolor='b', facecolor='none', alpha=0.5 )\n # plt.xlabel(\"x\"); plt.ylabel(\"y\")\n # plt.show()\n\n cell_x_copy = cell_x\n cell_y_copy = cell_y\n\n cell_x = [round(x) for x in cell_x]\n cell_y = [round(x) for x in cell_y]\n\n xMin = min(cell_x);xMax = max(cell_x);\n yMin = min(cell_y);yMax = max(cell_y);\n\n cell_x, cell_y = (list(t) for t in zip(*sorted(zip(cell_x,cell_y),key=lambda x: x[0])))\n corridinats = []\n for i in range(len(cell_x)):\n corridinats.append((cell_x[i],cell_y[i]))\n\n cell_x_noise = []\n cell_y_noise = []\n for x in range(xMin,xMax+1,1):\n for y in range(yMin,yMax+1,1):\n if (x,y) not in corridinats:\n cell_x_noise.append(x)\n cell_y_noise.append(y)\n\n cell_x_new = cell_x_copy + cell_x_noise\n cell_y_new = cell_y_copy + cell_y_noise\n\n # plt.scatter(cell_x_new,cell_y_new, edgecolor='b', facecolor='none', alpha=0.5 )\n # plt.xlabel(\"x\"); plt.ylabel(\"y\")\n # plt.show()\n\n df = pd.DataFrame({'cell_x_noise':cell_x_noise,'cell_y_noise':cell_y_noise})\n return df\n\ndef calculate_mic_gene_both(cell_x,cell_y,gene_count):\n x_count_list = []\n y_count_list = []\n index = np.nonzero(gene_count)\n count = gene_count[index]\n\n x = cell_x[index]\n y = cell_y[index]\n for count_index in range(len(count)):\n x_count_list.extend([x[count_index]]*count[count_index])\n y_count_list.extend([y[count_index]]*count[count_index])\n \n assert np.sum(gene_count) == len(x_count_list) == len(y_count_list)\n\n mine = MINE(alpha=0.6, c=15, est=\"mic_approx\")\n mine.compute_score(x_count_list, y_count_list)\n return(mine.mic(),mine.tic())\n\ndef calculate_mic_gene(cell_x,cell_y,gene_count,scoring):\n x_count_list = []\n y_count_list = []\n index = np.nonzero(gene_count)\n count = gene_count[index]\n\n x = cell_x[index]\n y = cell_y[index]\n for count_index in range(len(count)):\n x_count_list.extend([x[count_index]]*count[count_index])\n y_count_list.extend([y[count_index]]*count[count_index])\n \n assert np.sum(gene_count) == len(x_count_list) == len(y_count_list)\n\n mine = MINE(alpha=0.6, c=15, est=\"mic_approx\")\n mine.compute_score(x_count_list, y_count_list)\n\n if scoring==\"MIC\":\n return(mine.mic())\n elif scoring==\"TIC\":\n return(mine.tic())\n\ndef compute_genes_MIC(file_path):\n # preprocess the dataset\n pd_file = pd.read_csv(file_path, sep='\\t')\n cell_x,cell_y,pd_file = filter_df_TIC(pd_file)\n cortext_svz_counts = pd_file\n\n genes = cortext_svz_counts.columns.values\n print('total genes: ',len(genes))\n print('total spots: ',len(cell_x))\n\n # simulate cell location by poisson\n pd_BG = background_correction(cell_x,cell_y)\n cell_x_BG = pd_BG['cell_x_noise'].values\n cell_y_BG = pd_BG['cell_y_noise'].values\n print(pd_BG)\n\n # updata the location \n cell_x_new = np.hstack((cell_x,cell_x_BG)) \n cell_y_new = np.hstack((cell_y,cell_y_BG)) \n\n MIC_list = []\n TIC_list = []\n for gene in genes:\n print('gene:',gene)\n gene_count = cortext_svz_counts[gene].values\n\n Q1 = np.quantile(gene_count,0.25)\n Q2 = np.quantile(gene_count,0.5)\n Q3 = np.quantile(gene_count,0.75)\n EM = 0.5*Q2 + 0.25*(Q1+Q3)\n mean = int(EM)\n\n gene_count_noise = mean*np.ones(np.shape(cell_x_BG),dtype=np.int64)\n\n gene_count_new = np.hstack((gene_count + mean,gene_count_noise)) \n gene_count_new = gene_count_new.astype(np.int64)\n\n mic_score,tic_score = calculate_mic_gene_both(cell_x=cell_x_new,cell_y=cell_y_new,gene_count=gene_count_new)\n print('mic:',mic_score)\n print('tic:',tic_score)\n MIC_list.append(mic_score)\n TIC_list.append(tic_score)\n\n return(MIC_list,TIC_list,genes)\n\n\ndef compute_genes_pvalue(prefix,file_path,top_genes,method,num_perm,num_jobs):\n # preprocess the dataset\n pd_file = pd.read_csv(file_path, sep='\\t')\n # pd_file = pd.read_csv(file_path)\n cell_x,cell_y,pd_file = filter_df(pd_file)\n cortext_svz_counts = pd_file\n print(pd_file)\n\n # genes = cortext_svz_counts.columns.values\n genes = top_genes\n print('top genes:',len(genes))\n\n # simulate cell location by poisson\n BG_filepath = prefix + dataname + '/' + dataname + '_BG_spots.csv'\n pd_BG = pd.read_csv(BG_filepath)\n cell_x_noise = pd_BG['cell_x_noise'].values\n cell_y_noise = pd_BG['cell_y_noise'].values\n\n # updata the location \n cell_x_new = np.hstack((cell_x,cell_x_noise)) \n cell_y_new = np.hstack((cell_y,cell_y_noise)) \n\n p_values = []\n scores = []\n for gene in genes:\n print('gene:',gene)\n gene_count = cortext_svz_counts[gene].values\n gene_count_plot = gene_count.tolist()\n gene_count_plot.sort()\n print(gene_count_plot)\n\n gene_count_noise = random.sample(gene_count_plot,np.shape(cell_x_noise)[0])\n gene_count_noise.sort()\n print(gene_count_noise)\n\n gene_count_new = np.hstack((gene_count,np.array(gene_count_noise)))\n gene_count_new = gene_count_new.astype(np.int64)\n\n score, permutation_scores, pvalue = permutation_test(cell_x_new,cell_y_new,gene,gene_count_new,scoring=method,n_permutations=num_perm,\n n_jobs=num_jobs, random_state=1)\n print('pvalue:',pvalue)\n print('score:',score)\n p_values.append(pvalue)\n scores.append(score)\n return(p_values,scores,genes)\n\ndef permutation_test(cell_x,cell_y,gene,gene_count,scoring=\"TIC\",n_permutations=1000,\n n_jobs=None, random_state=0,verbose=0):\n score_true = _permutation_test_score(cell_x, cell_y,gene,gene_count,scoring)\n\n num_processor = n_jobs\n pool = Pool(num_processor)\n\n result = []\n split_num = n_permutations // num_processor\n left_num = n_permutations%num_processor\n process_list = [split_num] * num_processor\n for i in range(left_num):\n process_list[i] += 1\n\n # print(\"split_num:\", split_num)\n # print(\"left_num:\", left_num)\n\n def per_process_shuffle(a, repeat_num, i): \n rng = np.random.RandomState(i)\n res = []\n for _ in range(repeat_num):\n rng.shuffle(a)\n copy_a = a.copy()\n res.append(copy_a) \n return res\n\n for i in range(num_processor):\n result.append(\n pool.apply_async(\n _permutation_test_score_list, args=(cell_x, cell_y,gene, np.array(per_process_shuffle(gene_count.tolist(), process_list[i], i)),scoring)\n )\n )\n permutation_scores = []\n for i in result:\n for score in i.get():\n permutation_scores.append(score)\n \n pool.close()\n pool.join()\n\n permutation_scores = np.array(permutation_scores)\n pvalue = (np.sum(permutation_scores >= score_true) + 1.0) / (n_permutations + 1)\n return score_true, permutation_scores, pvalue\n\ndef _permutation_test_score(cell_x,cell_y,gene,gene_count,scoring):\n \"\"\"Auxiliary function for permutation_test_score\"\"\"\n return calculate_mic_gene(cell_x,cell_y,gene_count,scoring)\n\ndef _permutation_test_score_list(cell_x,cell_y,gene,gene_count_list,scoring):\n \"\"\"Auxiliary function for permutation_test_score\"\"\"\n score_list = []\n for gene_count in gene_count_list:\n score_list.append(calculate_mic_gene(cell_x,cell_y,gene_count,scoring))\n return score_list\n\ndef compute_FDR(df):\n genes = df['gene'].values\n pvalue = df['pvalue'].values\n tic_list = df['TIC'].values\n\n # sort\n pvalue, genes,tic_list= (list(t) for t in zip(*sorted(zip(pvalue,genes,tic_list),key=lambda x: x[0])))\n\n rejected,qvalue = fdrcorrection(np.array(pvalue), alpha=0.05, method='negcorr', is_sorted=True)\n return(genes,pvalue,qvalue,tic_list)\n\n\nif __name__ == '__main__': \n # dataname = sys.argv[1]\n # num_perm = sys.argv[2]\n # num_perm = int(num_perm)\n\n dataname = 'BC2'\n num_perm = 10000\n # load raw data\n prefix = 'data/ST/'\n file_path = prefix + dataname + '/' + dataname + '_count_matrix.tsv' # 151673\n\n ############################## compute TIC rank \n MIC_list, TIC_list, genes = compute_genes_MIC(file_path=file_path)\n MIC_list, TIC_list, genes = (list(t) for t in zip(*sorted(zip(MIC_list, TIC_list, genes),key=lambda x: x[1],reverse=True)))\n df = pd.DataFrame({'gene':genes,'mic':MIC_list,'tic':TIC_list})\n save_path = 'results/'+dataname+'/'+dataname+'_tic_rank.csv'\n df.to_csv(save_path,index=False)\n\n ###############\n # df = pd.read_csv('results/' + dataname + '/'+ dataname+'_mic_tic.csv') #[33538,4]\n genes = df['gene'].values.tolist()\n\n ############################## if perform permutation on top 10% genes:p-val\n top_genes = genes\n\n p_values, scores, genes = compute_genes_pvalue(prefix=prefix,file_path=file_path,top_genes = top_genes, method='TIC',num_perm = num_perm,num_jobs=40)\n scores_rank, p_values_rank, genes_rank, = (list(t) for t in zip(*sorted(zip(scores,p_values,genes),key=lambda x: x[1])))\n df = pd.DataFrame({'gene':genes_rank,'pvalue':p_values_rank,'TIC':scores_rank})\n save_path = 'results/'+dataname+'/'+dataname+'_pvals.csv'\n df.to_csv(save_path)\n \n","sub_path":"SPRI_ST_differentBG.py","file_name":"SPRI_ST_differentBG.py","file_ext":"py","file_size_in_byte":10661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"548942290","text":"import os, sys\nimport numpy as np\nimport gym\nimport tensorflow as tf\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow.contrib.eager as tfe\nfrom tensorflow.contrib.layers import flatten, conv2d, fully_connected\nfrom collections import deque, Counter\nimport random\nfrom datetime import datetime\n\nenv = gym.make('MsPacman-v0')\nn_outputs = env.action_space.n\ncolor = np.array([210, 164, 74]).mean()\n\ndef preprocess_observation(obs):\n\n\timg = obs[1:176:2, ::2]\n\timg = img.mean(axis=2)\n\timg[img==color] = 0\n\n\timg = ((img - 128)/128) - 1\n\n\treturn img.reshape(88, 80, 1)\n\ntf.reset_default_graph()\n\ndef q_network(X, name_scope):\n\tinitializer = tf.contrib.layers.variance_scaling_initializer()\n\n\twith tf.variable_scope(name_scope) as scope:\n\n\t\tlayer_1 = conv2d(X, num_outputs=32, kernel_size=(8, 8), stride=4, padding='SAME', weights_initializer=initializer)\n\t\ttf.summary.histogram('layer_1', layer_1)\n\n\t\tlayer_2 = conv2d(layer_1, num_outputs=64, kernel_size=(4, 4), stride=2, padding='SAME', weights_initializer=initializer)\n\t\ttf.summary.histogram('layer_2', layer_2)\n\n\t\tlayer_3 = conv2d(layer_2, num_outputs=64, kernel_size=(3, 3), stride=1, padding='SAME', weights_initializer=initializer)\n\t\ttf.summary.histogram('layer_3', layer_3)\n\n\t\tflat = flatten(layer_3)\n\t\tfc = fully_connected(flat, num_outputs=128, weights_initializer=initializer)\n\t\ttf.summary.histogram('fc', fc)\n\n\t\toutput = fully_connected(fc, num_outputs=n_outputs, activation_fn=None, weights_initializer=initializer)\n\t\ttf.summary.histogram('output', output)\n\n\t\tvariables = {v.name[len(name_scope):]:v for v in tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)}\n\n\t\treturn variables, output\n\nepsilon = 0.5\neps_min = 0.05\neps_max = 1.0\neps_decay_steps = 500000\n\ndef epsilon_greedy(action, step):\n\tp = np.random.random(1).squeeze()\n\tepsilon = max(eps_min, eps_max - (eps_max - eps_min)*step/eps_decay_steps)\n\n\tif np.random.random() < epsilon:\n\t\treturn np.random.randint(n_outputs)\n\telse:\n\t\treturn action\n\nbuffer_len = 20000\nexp_buffer = deque(maxlen=buffer_len)\n\ndef sample_memories(batch_size):\n\tperm_batch = np.random.permutation(len(exp_buffer))[:batch_size]\n\tmem = np.array(exp_buffer)[perm_batch]\n\n\treturn mem[:, 0], mem[:, 1], mem[:, 2], mem[:, 3], mem[:, 4]\n\nnum_episodes = 800\nbatch_size = 24\ninput_shape = (None, 88, 80, 1)\nlearning_rate = 0.001\nX_shape = input_shape\ndiscount_factor = 0.97\n\nglobal_step = 0\ncopy_steps = 100\nsteps_train = 4\nstart_steps = 200\nlogdir = 'DQN_logs'\n\n#placeholder for the screen input\nX = tf.placeholder(tf.float32, shape=X_shape)\nin_training_mode = tf.placeholder(tf.bool)\n\n#getting outputs for main training network and target network\nmainQ, mainQ_ouputs = q_network(X, 'mainQ')\ntargetQ, targetQ_outputs = q_network(X, 'targetQ')\n\n#defining placeholders for action and Q values \nX_action = tf.placeholder(tf.int32, shape=(None,))\nQ_action = tf.reduce_sum(targetQ_outputs * tf.one_hot(X_action, n_outputs), axis=-1, keepdims=True)\n\n#copying main Q network parameters to target Q network\ncopy_op = [tf.assign(var_values, targetQ[var_name]) for var_name, var_values in mainQ.items()]\ncopy_target_to_main = tf.group(*copy_op)\n\n#calculating loss\ny = tf.placeholder(tf.float32, shape=(None, 1))\nloss = tf.reduce_sum(tf.square(y - Q_action))\noptimizer = tf.train.AdamOptimizer(learning_rate)\ntraining_op = optimizer.minimize(loss)\n\nloss_summary = tf.summary.scalar('Loss', loss)\nmerge_summary = tf.summary.merge_all()\nfile_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n\tsess.run(init)\n\n\tfor i in range(num_episodes):\n\t\tdone = False\n\t\tobs = env.reset()\n\t\tepoch = 0\n\t\tepisodic_reward = 0\n\t\tactions_counter = Counter()\n\t\tepisodic_loss = []\n\n\t\tepisode_step = 0\n\t\twhile not done:\n\t\t\tepisode_step += 1\n\t\t\tsys.stdout.write('\\rEpisode: {}/{}, Step: {}'.format(i + 1, num_episodes, episode_step))\n\t\t\tsys.stdout.flush()\n\t\t\tobs = preprocess_observation(obs)\n\t\t\tactions = mainQ_ouputs.eval(feed_dict={X:[obs], in_training_mode:False})\n\t\t\taction = np.argmax(actions, axis=-1)\n\t\t\tactions_counter[str(action)] += 1\n\n\t\t\taction = epsilon_greedy(action, global_step)\n\n\t\t\tnext_obs, reward, done, _ = env.step(action)\n\n\t\t\texp_buffer.append([obs, action, preprocess_observation(next_obs), reward, done])\n\n\t\t\tif global_step%steps_train == 0 and global_step > start_steps:\n\t\t\t\to_obs, o_act, o_next_obs, o_rew, o_done = sample_memories(batch_size)\n\n\t\t\t\to_obs = [x for x in o_obs]\n\t\t\t\to_next_obs = [x for x in o_next_obs]\n\n\t\t\t\tnext_act = mainQ_ouputs.eval(feed_dict={X:o_next_obs, in_training_mode:False})\n\n\t\t\t\ty_batch = o_rew + discount_factor*np.max(next_act, axis=-1) * (1 - o_done)\n\n\t\t\t\tmrg_summry = merge_summary.eval(feed_dict={X:o_obs, y:np.expand_dims(y_batch, axis=-1), X_action:o_act, in_training_mode:False})\n\t\t\t\tfile_writer.add_summary(mrg_summry, global_step)\n\n\t\t\t\ttrain_loss, _ = sess.run([loss, training_op], feed_dict={X:o_obs, y:np.expand_dims(y_batch, axis=-1), X_action:o_act, in_training_mode:True})\n\t\t\t\tepisodic_loss.append(train_loss)\n\n\t\t\tif (global_step + 1) % copy_steps == 0 and global_step > start_steps:\n\t\t\t\tcopy_target_to_main.run()\n\n\t\t\tobs = next_obs\n\t\t\tepoch += 1\n\t\t\tglobal_step += 1\n\t\t\tepisodic_reward += reward\n\n\t\tprint(\"\\nEpoch: {}, Global Step: {}, Reward: {}\".format(epoch, global_step, episodic_reward))","sub_path":"atari_DQN.py","file_name":"atari_DQN.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"347728103","text":"# coding: utf8\n# cyrilchars.py\n# 4/30/2015 jichi\nfrom functools import partial\nfrom . import unichars\n\n\"\"\"\n@param unicode\n@return int\n\"\"\"\nfindcyril = partial(unichars.findrange, start=unichars.ORD_CYRIL_FIRST, stop=unichars.ORD_CYRIL_LAST)\n\n\"\"\"\n@param unicode\n@return bool\n\"\"\"\nanycyril = partial(unichars.ordany, start=unichars.ORD_CYRIL_FIRST, stop=unichars.ORD_CYRIL_LAST)\n\n\"\"\"\n@param unicode\n@return bool\n\"\"\"\nallcyril = partial(unichars.ordall, start=unichars.ORD_CYRIL_FIRST, stop=unichars.ORD_CYRIL_LAST)\n\n\"\"\"\n@param unicode\n@return bool\n\"\"\"\niscyril = partial(unichars.charinrange, start=unichars.ORD_CYRIL_FIRST, stop=unichars.ORD_CYRIL_LAST)\n\nRU_UK = ( # (unicode ru, unicode uk)\n ('э', 'е'),\n ('и', 'і'),\n ('й', 'ї'), # j\n ('г', 'ґ'),\n ('х', 'г'),\n)\ndef ru2uk(text):\n \"\"\"\n @param text unicode\n @return unicode\n \"\"\"\n for k,v in RU_UK:\n text = text.replace(k, v)\n text = text.replace(k.upper(), v.upper())\n return text\n\ndef ru2lang(text, lang):\n \"\"\"\n @param text unicode\n @param lang str\n @return unicode\n \"\"\"\n #if lang == 'bg':\n # return ru2bg(text)\n if lang == 'uk':\n return ru2uk(text)\n return text\n\ndef uk2ru(text):\n \"\"\"\n @param text unicode\n @return unicode\n \"\"\"\n for v,k in reversed(RU_UK):\n text = text.replace(k, v)\n text = text.replace(k.upper(), v.upper())\n return text\n\n_TRANSLIT_FIX = (\n ('c', 'с'),\n)\ndef en2ru(text):\n \"\"\"\n @param text unicode\n @return unicode\n \"\"\"\n from transliterate import translit\n text = translit(text, 'ru')\n for k,v in _TRANSLIT_FIX:\n text = text.replace(k, v)\n text = text.replace(k.upper(), v.upper())\n return text\ndef en2uk(text):\n \"\"\"\n @param text unicode\n @return unicode\n \"\"\"\n from transliterate import translit\n text = translit(text, 'uk')\n for k,v in _TRANSLIT_FIX:\n text = text.replace(k, v)\n text = text.replace(k.upper(), v.upper())\n return text\n\nCYRILLIC_LANGUAGES = 'ru', 'uk'\ndef latin2cyril(text, lang=CYRILLIC_LANGUAGES[0]):\n \"\"\"\n @param text unicode\n @return unicode\n \"\"\"\n if lang == 'ru':\n return en2ru(text)\n if lang == 'uk':\n return en2uk(text)\n return text\n\n# EOF\n","sub_path":"py/libs/unitraits/cyrilchars.py","file_name":"cyrilchars.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"395129204","text":"#Imports.\nimport sys\nimport random\nimport datetime\nimport time\n\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\n\n#DB Connection.\nclient = MongoClient('localhost', 27017)\ndb = client.trafficanalyser\ncardata = db.cardata\n\n#Generating a random number between selectected min and max. \ndef randomNumGen(min, max):\n return random.uniform(min, max)\n\n\n#Selecting a random type of vehicle from the given list of vehicletypes. \ndef randomCarType():\n types = ['van', 'truck', 'car', 'motorbike', 'bus']\n return random.choice(types)\n\n#Selecting a random road from the given list of roads.\ndef randomRoad():\n roads = ['Kongevej', 'Prinsevej', 'Janusvej', 'Abevej', '´Thomasvej',\n 'Princessevej', 'Elefantvej', 'Egevej', 'Kakaovej', 'Pinsevej',\n 'E20', 'E45', 'E39', 'E47', 'E55'\n ]\n return random.choice(roads)\n\n#Generating data with a specific amount. \ndef generateData(quantity):\n print('Generating {0} database entries...'.format(quantity))\n print(' ')\n\n #Generating an id for a specific unit. \n for i in range(quantity):\n\n print('Generating entry #{0}'.format(i))\n \n #Makes it sleep for a random time. This will make the timestamp to not be the same.\n time.sleep(randomNumGen(0.05, 1.5))\n \n #Picking a random vehicletype.\n vtype = randomCarType()\n\n #Picking a random roadname within the roadtype and sets the speed for the vehicle after.\n road = randomRoad()\n if(road == 'Kongevej' or road == 'Prinsevej' or road == 'Janusvej' or road == 'Abevej' or road == 'Thomasvej'):\n if(vtype == 'van'):\n speed = randomNumGen(20.0, 70.0)\n elif(vtype == 'truck'):\n speed = randomNumGen(20.0, 65.0)\n elif(vtype == 'car'):\n speed = randomNumGen(20.0, 80.0)\n elif(vtype == 'motorbike'):\n speed = randomNumGen(20.0, 80.0)\n elif(vtype == 'bus'):\n speed = randomNumGen(20.0, 65.0)\n roadSpeed = 50\n roadtype = \"villavej\"\n\n #Picking a random roadname within the roadtype and sets the speed for the vehicle after. \n elif(road == 'Princessevej' or road == 'Elefantvej' or road == 'Egevej' or road == 'Kakaovej' or road == 'Pinsevej'):\n if(vtype == 'van'):\n speed = randomNumGen(50.0, 105.0)\n elif(vtype == 'truck'):\n speed = randomNumGen(50.0, 100.0)\n elif(vtype == 'car'):\n speed = randomNumGen(50.0, 115.0)\n elif(vtype == 'motorbike'):\n speed = randomNumGen(50.0, 115.0)\n elif(vtype == 'bus'):\n speed = randomNumGen(50.0, 100.0)\n roadSpeed = 80\n roadtype = \"landevej\"\n\n #Picking a random roadname within the roadtype and sets the speed for the vehicle after. \n elif(road == 'E20' or road == 'E45' or road == 'E39' or road == 'E47' or road == 'E55'):\n if(vtype == 'van'):\n speed = randomNumGen(85.0, 150.0)\n elif(vtype == 'truck'):\n speed = randomNumGen(70.0, 100.0)\n elif(vtype == 'car'):\n speed = randomNumGen(85.0, 200.0)\n elif(vtype == 'motorbike'):\n speed = randomNumGen(85.0, 200.0)\n elif(vtype == 'bus'):\n speed = randomNumGen(70.0, 100.0)\n roadSpeed = 110\n roadtype = \"motorvej\"\n\n #A random distance to the next car is being set. \n dist = randomNumGen(1.0, 25.0)\n\n #A timestamp is being set. \n timestamp = datetime.datetime.utcnow().isoformat()\n\n #The data is formatted so it fits.\n data = {\"vehicletype\": \"{0}\".format(vtype), \"road\": \"{0}\".format(road), \"roadtype\": \"{0}\".format(roadtype), \"roadspeed\": \"{0}\".format(roadSpeed), \"speed\": \"{0}\".format(speed), \"distance\": \"{0}\".format(dist), \"timestamp\": \"{0}\".format(timestamp)}\n\n #The data is getting inserted to the database.\n cardata.insert_one(data)\n \n print(' ')\n print('Done!')\n \n#Calls the method with 1 system argument\ngenerateData(int(sys.argv[1]))","sub_path":"Semester Projekt 4 - Trafficanalyser/Tools/Mockdata Generator.py","file_name":"Mockdata Generator.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"647319981","text":"import datetime\nfrom time import strptime\nimport bs4 as bs\nimport urllib.request\nfrom models.fixture import UpcomingFixture\n\ndef add_countdown(soup, description):\n # Markers to find within sidebar for updating without overwriting manually input data\n begin_marker = '[](/beginCountdown)'\n end_marker = '[](/endCountdown)'\n clock_string = ''\n\n # Call get_next_fixture with soup argument\n next_fixture = get_next_fixture(soup)\n\n # Time delta\n td = next_fixture - datetime.datetime.utcnow()\n\n fixture_end = get_fixture_end(next_fixture)\n \n start_index = description.find(begin_marker) + len(begin_marker)\n end_index = description.find(end_marker)\n\n if (start_index == -1 or end_index == -1):\n return False\n elif (((((td.seconds // 60) % 60) <= 15) and (td.days == 0) and (td.seconds // 3600 == 0)) or (fixture_end > datetime.datetime.utcnow() and next_fixture < datetime.datetime.utcnow())):\n clock_string = '\\n####Match in progress'\n else:\n clock_string = '\\n####Next Match: ' + str(td.days) + 'd ' + str(td.seconds // 3600) + \\\n 'hr ' + str((td.seconds // 60) % 60) + 'min\\n'\n\n print(clock_string)\n new_description = description[:start_index] + clock_string + description[end_index:]\n return new_description\n\ndef get_next_fixture(soup):\n fixture = soup.find(class_=\"games-container\").find_next_sibling(\n ).find_next_sibling().find(class_=\"score-list upcoming\")\n\n if (len(fixture.find(class_=\"date\").get_text()) == 12):\n f_date = fixture.find(class_=\"date\").get_text()[0:6]\n else:\n f_date = fixture.find(class_=\"date\").get_text()[0:5]\n\n # Most recent errors need to be fixed here\n f_time = fixture.find(class_=\"time\").get_text()\n if (f_time.find(\"PM\") != -1):\n if (f_time[:2].find(\":\") != -1):\n f_time = datetime.time(int(f_time[0:1]) + 12, int(f_time[2:4]))\n else:\n if (f_time[0:2] == '12'):\n f_time = datetime.time(int(f_time[0:2]), int(f_time[3:5]))\n else:\n f_time = datetime.time(int(f_time[0:2]) + 12, int(f_time[3:5]))\n else:\n if (f_time[:2].find(\":\") != -1):\n f_time = datetime.time(int(f_time[0:1]), int(f_time[2:4]))\n else:\n f_time = datetime.time(int(f_time[0:2]), int(f_time[3:5]))\n\n temp_date = fixture.find(class_=\"date\").get_text()\n f_year = int(temp_date[len(temp_date) - 5:len(temp_date)])\n try:\n f_day = int(temp_date[4:6])\n except ValueError:\n f_day = int(temp_date[4:5])\n \n next_fixture = datetime.datetime(f_year, strptime(\n f_date[0:3], '%b').tm_mon, f_day, f_time.hour, f_time.minute)\n return next_fixture\n\ndef get_fixture_end(fixture):\n fixture_end = fixture + datetime.timedelta(0,6900)\n return fixture_end","sub_path":"fixture_countdown.py","file_name":"fixture_countdown.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"69113689","text":"from PyQt5.QtWidgets import QWidget, QLabel, QPushButton, QVBoxLayout, QHBoxLayout\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtCore import QSize\nfrom PyQt5 import QtGui, QtCore\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import QShortcut\n\nfrom gestione.cliente.certificati.controller_certificati.controller_certficati import controller_certificati\n\n#visualizzazione utente palestra\nclass view_visualizza_utente_palestra(QWidget):\n\n def __init__(self, controllore_abbonamento, parent=None):\n super(view_visualizza_utente_palestra, self).__init__(parent)\n\n self.controllore_abbonamento = controllore_abbonamento\n self.controllore_certificato = controller_certificati()\n\n self.v_layout = QVBoxLayout()\n\n # labels contenenti dati dell'utente iscritto in palestra\n self.create_label(\"Nome: \", self.controllore_abbonamento.get_nome_abbonamento())\n self.create_label(\"Cognome: \", self.controllore_abbonamento.get_cognome_abbonamento())\n self.create_label(\"Codice fiscale: \", self.controllore_abbonamento.get_codicefiscale_abbonamento())\n self.create_label(\"Nato a: \", self.controllore_abbonamento.get_nato_abbonamento())\n self.create_label(\"Data di nascita: \", self.controllore_abbonamento.get_data_abbonamento().strftime('%d/%m/%Y'))\n self.create_label(\"Residenza(via e città): \", self.controllore_abbonamento.get_residenza_abbonamento())\n self.create_label(\"Email: \", self.controllore_abbonamento.get_email_abbonamento())\n self.create_label(\"Cellulare: \", str(self.controllore_abbonamento.get_cellulare_abbonamento()))\n self.create_label(\"Struttura: \", self.controllore_abbonamento.get_struttura_abbonamento())\n self.create_label(\"Abbonamento: \", self.controllore_abbonamento.get_tipoabbonamento_abbonamento())\n self.create_label(\"Certificato valido fino al: \", self.controllore_certificato.get_certificato_by_codicefiscale(self.controllore_abbonamento.get_codicefiscale_abbonamento()).datafine.strftime('%d/%m/%Y'))\n self.create_label(\"Inizio abbonamento: \", self.controllore_abbonamento.get_attivazione_abbonamento().strftime('%d/%m/%Y') )\n self.create_label(\"Scadenza abbonamento: \", self.controllore_abbonamento.get_scadenza_abbonamento(self.controllore_abbonamento.get_tipoabbonamento_abbonamento(),self.controllore_abbonamento.get_attivazione_abbonamento()).strftime('%d/%m/%Y'))\n\n\n\n self.h_layout = QHBoxLayout()\n\n self.create_button(\"Chiudi\", self.mostra_indietro)\n\n self.shortcut_indietro = QShortcut(QKeySequence('Return'), self)\n self.shortcut_indietro.activated.connect(self.mostra_indietro)\n\n self.v_layout.addLayout(self.h_layout)\n\n self.setLayout(self.v_layout)\n\n self.setMinimumSize(700, 600)\n self.setMaximumSize(700, 600)\n self.setWindowTitle(\"Utente palestra\")\n self.setWindowIcon(QtGui.QIcon(\"images/immaginelogo1.png\"))\n\n # per lo sfondo\n oImage = QImage(\"images/immaginepesisfocata.jpeg\")\n sImage = oImage.scaled(QSize(700, 600))\n palette = QPalette()\n palette.setBrush(10, QBrush(sImage))\n self.setPalette(palette)\n\n #funzione per creare i vari label\n def create_label(self, testo, descrizione):\n h_layout = QHBoxLayout()\n\n label = QLabel(testo)\n label.setFont(QFont(\"Arial\",15))\n h_layout.addWidget(label)\n\n\n label_di_testo = QLabel(descrizione)\n label_di_testo.setFont(QFont(\"Arial\",15))\n h_layout.addWidget(label_di_testo)\n\n self.v_layout.addLayout(h_layout)\n\n #funzione per creare i vari bottoni\n def create_button(self, titolo, funzione):\n bottone = QPushButton(titolo)\n bottone.setFont(QFont(\"Yu Gothic UI Light\", 12))\n bottone.clicked.connect(funzione)\n self.h_layout.addWidget(bottone)\n bottone.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n\n #funzione per chiudere la pagina\n def mostra_indietro(self):\n self.close()","sub_path":"struttura/palestra/Iscritti/view_iscritti/view_visualizza_utente_palestra.py","file_name":"view_visualizza_utente_palestra.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"630434088","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ldig : Language Detector with Infinite-Gram\n# This code is available under the MIT License.\n# (c)2011 Nakatani Shuyo / Cybozu Labs Inc.\n\nimport os, sys, re, codecs, json\nimport optparse\nimport numpy\nimport subprocess\nimport da\nimport logging\nfrom typing import Tuple, List\nimport zipfile\nimport time\n\n# python2/3 import\ntry:\n import html.entities\nexcept ImportError:\n import html.entities as htmlentitydefs\n\nlogger = logging.getLogger(__name__)\n\nPATH_SCRIPT = os.path.dirname(os.path.realpath(__file__))\n\n\nclass ldig(object):\n def __init__(self, model_dir=os.path.join(PATH_SCRIPT, \"models/model.latin.20120315.zip\")):\n \"\"\"\n expect full path of model directory or zip file\n \"\"\"\n if (\".zip\" in model_dir):\n with zipfile.ZipFile(model_dir, \"r\") as model_compressed:\n # remove zip extension for path\n model_dir = model_dir[:-4]\n model_compressed.extractall(model_dir)\n self.features = os.path.join(model_dir, 'features')\n self.labels = os.path.join(model_dir, 'labels.json')\n self.default_labels = self.load_labels()\n self.param = os.path.join(model_dir, 'parameters.npy')\n self.default_param = numpy.load(self.param)\n self.doublearray = os.path.join(model_dir, 'doublearray.npz')\n self.default_trie = self.load_da()\n\n def load_da(self):\n trie = da.DoubleArray()\n trie.load(self.doublearray)\n return trie\n\n def load_features(self):\n features = []\n with codecs.open(self.features, 'rb', 'utf-8') as f:\n pre_feature = \"\"\n for n, s in enumerate(f):\n m = re.match(r'(.+)\\t([0-9]+)', s)\n if not m:\n sys.exit(\"irregular feature : '%s' at %d\" % (s, n + 1))\n if pre_feature >= m.groups(1):\n sys.exit(\"unordered feature : '%s' at %d\" % (s, n + 1))\n pre_feature = m.groups(1)\n features.append(m.groups())\n return features\n\n def load_labels(self):\n with open(self.labels, 'rb') as f:\n return json.load(f)\n\n def init(self, temppath, corpus_list, lbff, ngram_bound):\n \"\"\"\n Extract features from corpus and generate TRIE(DoubleArray) data\n - load corpus\n - generate temporary file for maxsubst\n - generate double array and save it\n - parameter: lbff = lower bound of feature frequency\n \"\"\"\n\n labels = []\n with codecs.open(temppath, 'wb', 'utf-8') as f:\n for corpus in corpus_list:\n with codecs.open(corpus, 'rb', 'utf-8') as g:\n for i, s in enumerate(g):\n label, text, _ = normalize_text(s)\n if label is None or label == \"\":\n sys.stderr.write(\"no label data at %d in %s \\n\" % (i + 1, corpus))\n continue\n if label not in labels:\n labels.append(label)\n f.write(text)\n f.write(\"\\n\")\n\n labels.sort()\n logger.info(\"labels: %d\" % len(labels))\n with open(self.labels, 'wb') as f:\n f.write(json.dumps(labels))\n\n logger.info(\"generating max-substrings...\")\n temp_features = self.features + \".temp\"\n maxsubst = \"./maxsubst\"\n if os.name == 'nt': maxsubst += \".exe\"\n subprocess.call([maxsubst, temp_path, temp_features])\n\n # count features\n M = 0\n features = []\n r1 = re.compile('.\\u0001.')\n r2 = re.compile('[A-Za-z\\u00a1-\\u00a3\\u00bf-\\u024f\\u1e00-\\u1eff]')\n with codecs.open(temp_features, 'rb', 'utf-8') as f:\n for line in f:\n i = line.index('\\t')\n st = line[0:i]\n c = int(line[i + 1:-1])\n if c >= lbff and len(st) <= ngram_bound and (not r1.search(st)) and r2.search(st) and (\n st[0] != '\\u0001' or st[-1] != '\\u0001'):\n M += 1\n features.append((st, line))\n logger.info(\"# of features = %d\" % M)\n\n features.sort()\n with codecs.open(self.features, 'wb', 'utf-8') as f:\n for s in features:\n f.write(s[1])\n\n # generate_doublearray(self.doublearray, [s[0] for s in features])\n\n # numpy.save(self.param, numpy.zeros((M, len(labels))))\n\n def shrink(self):\n features = self.load_features()\n param = numpy.load(self.param)\n\n list_element = (numpy.abs(param).sum(1) > 0.0000001)\n new_param = param[list_element]\n logger.info(\"# of features : %d => %d\" % (param.shape[0], new_param.shape[0]))\n\n numpy.save(self.param, new_param)\n new_features = []\n with codecs.open(self.features, 'wb', 'utf-8') as f:\n for i, x in enumerate(list_element):\n if x:\n f.write(\"%s\\t%s\\n\" % features[i])\n new_features.append(features[i][0])\n\n generate_doublearray(self.doublearray, new_features)\n\n def debug(self, arguments):\n features = self.load_features()\n trie = self.load_da()\n labels = self.load_labels()\n param = numpy.load(self.param)\n\n for st in arguments:\n _, text, _ = normalize_text(st)\n events = trie.extract_features(\"\\u0001\" + text + \"\\u0001\")\n logger.info(\"orig: '%s'\" % st)\n logger.info(\"norm: '%s'\" % text)\n sum_labels = numpy.zeros(len(labels))\n logger.info(\"id\\tfeat\\tfreq\\t%s\" % \"\\t\".join(labels))\n for id_feature in sorted(events, key=lambda id_feature: features[id_feature][0]):\n phi = param[id_feature,]\n sum_labels += phi * events[id]\n logger.info(\"%d\\t%s\\t%d\\t%s\" % (\n id_feature, features[id_feature][0], events[id_feature], \"\\t\".join([\"%0.2f\" % x for x in phi])))\n exp_w = numpy.exp(sum_labels - sum_labels.max())\n prob = exp_w / exp_w.sum()\n logger.info(\"\\t\\t\\t%s\" % \"\\t\".join([\"%0.2f\" % x for x in sum_labels]))\n logger.info(\"\\t\\t\\t%s\" % \"\\t\".join([\"%0.1f%%\" % (x * 100) for x in prob]))\n\n def learn(self, options, arguments):\n trie = self.load_da()\n param = numpy.load(self.param)\n labels = self.load_labels()\n\n logger.info(\"loading corpus... \" + time.strftime(\"%H:%M:%S\", time.localtime()))\n corpus, idlist = load_corpus(arguments, labels)\n logger.info(\"inference... \" + time.strftime(\"%H:%M:%S\", time.localtime()))\n inference(param, labels, corpus, idlist, trie, options)\n logger.info(\"finish... \" + time.strftime(\"%H:%M:%S\", time.localtime()))\n numpy.save(self.param, param)\n\n def detect_file(self, files_path):\n # typ List[str] -> List[Tuple[float,str]]\n trie = self.load_da()\n param = numpy.load(self.param)\n labels = self.load_labels()\n\n return likelihood_file(param, labels, trie, files_path)\n\n def detect_text(self, text):\n # type str -> Tuple[float,str]\n return likelihood_text(self.default_param, self.default_labels, self.default_trie, text)\n\n\n# from http://www.programming-magic.com/20080820002254/\nreference_regex = re.compile(r'&(#x?[0-9a-f]+|[a-z]+);', re.IGNORECASE)\nnum16_regex = re.compile(r'#x\\d+', re.IGNORECASE)\nnum10_regex = re.compile(r'#\\d+', re.IGNORECASE)\n\n\ndef htmlentity2unicode(text):\n result = ''\n i = 0\n while True:\n match = reference_regex.search(text, i)\n if match is None:\n result += text[i:]\n break\n\n result += text[i:match.start()]\n i = match.end()\n name = match.group(1)\n\n if name in list(html.entities.name2codepoint.keys()):\n result += chr(html.entities.name2codepoint[name])\n elif num16_regex.match(name):\n result += chr(int('0' + name[1:], 16))\n elif num10_regex.match(name):\n result += chr(int(name[1:]))\n return result\n\n\ndef normalize_twitter(text):\n \"\"\"normalization for twitter\"\"\"\n text = re.sub(r'(@|#)[^ ]+', '', text)\n # remove whole url is simpler too have more coherent result\n text = re.sub(r'(https|http)?:\\/\\/(\\w|\\.|\\/|\\?|\\=|\\&|\\%)*\\b', '', text)\n text = re.sub(r'(^| )[:;x]-?[\\(\\)dop]($| )', ' ', text) # facemark\n text = re.sub(r'(^| )(rt[ :]+)*', ' ', text)\n text = re.sub(r'([hj])+([aieo])+(\\1+\\2+){1,}', r'\\1\\2\\1\\2', text, re.IGNORECASE) # laugh\n text = re.sub(r' +(via|live on) *$', '', text)\n return text\n\n\nre_ignore_i = re.compile(r'[^I]')\nre_turkish_alphabet = re.compile('[\\u011e\\u011f\\u0130\\u0131]')\nvietnamese_norm = {\n '\\u0041\\u0300': '\\u00C0', '\\u0045\\u0300': '\\u00C8', '\\u0049\\u0300': '\\u00CC', '\\u004F\\u0300': '\\u00D2',\n '\\u0055\\u0300': '\\u00D9', '\\u0059\\u0300': '\\u1EF2', '\\u0061\\u0300': '\\u00E0', '\\u0065\\u0300': '\\u00E8',\n '\\u0069\\u0300': '\\u00EC', '\\u006F\\u0300': '\\u00F2', '\\u0075\\u0300': '\\u00F9', '\\u0079\\u0300': '\\u1EF3',\n '\\u00C2\\u0300': '\\u1EA6', '\\u00CA\\u0300': '\\u1EC0', '\\u00D4\\u0300': '\\u1ED2', '\\u00E2\\u0300': '\\u1EA7',\n '\\u00EA\\u0300': '\\u1EC1', '\\u00F4\\u0300': '\\u1ED3', '\\u0102\\u0300': '\\u1EB0', '\\u0103\\u0300': '\\u1EB1',\n '\\u01A0\\u0300': '\\u1EDC', '\\u01A1\\u0300': '\\u1EDD', '\\u01AF\\u0300': '\\u1EEA', '\\u01B0\\u0300': '\\u1EEB',\n\n '\\u0041\\u0301': '\\u00C1', '\\u0045\\u0301': '\\u00C9', '\\u0049\\u0301': '\\u00CD', '\\u004F\\u0301': '\\u00D3',\n '\\u0055\\u0301': '\\u00DA', '\\u0059\\u0301': '\\u00DD', '\\u0061\\u0301': '\\u00E1', '\\u0065\\u0301': '\\u00E9',\n '\\u0069\\u0301': '\\u00ED', '\\u006F\\u0301': '\\u00F3', '\\u0075\\u0301': '\\u00FA', '\\u0079\\u0301': '\\u00FD',\n '\\u00C2\\u0301': '\\u1EA4', '\\u00CA\\u0301': '\\u1EBE', '\\u00D4\\u0301': '\\u1ED0', '\\u00E2\\u0301': '\\u1EA5',\n '\\u00EA\\u0301': '\\u1EBF', '\\u00F4\\u0301': '\\u1ED1', '\\u0102\\u0301': '\\u1EAE', '\\u0103\\u0301': '\\u1EAF',\n '\\u01A0\\u0301': '\\u1EDA', '\\u01A1\\u0301': '\\u1EDB', '\\u01AF\\u0301': '\\u1EE8', '\\u01B0\\u0301': '\\u1EE9',\n\n '\\u0041\\u0303': '\\u00C3', '\\u0045\\u0303': '\\u1EBC', '\\u0049\\u0303': '\\u0128', '\\u004F\\u0303': '\\u00D5',\n '\\u0055\\u0303': '\\u0168', '\\u0059\\u0303': '\\u1EF8', '\\u0061\\u0303': '\\u00E3', '\\u0065\\u0303': '\\u1EBD',\n '\\u0069\\u0303': '\\u0129', '\\u006F\\u0303': '\\u00F5', '\\u0075\\u0303': '\\u0169', '\\u0079\\u0303': '\\u1EF9',\n '\\u00C2\\u0303': '\\u1EAA', '\\u00CA\\u0303': '\\u1EC4', '\\u00D4\\u0303': '\\u1ED6', '\\u00E2\\u0303': '\\u1EAB',\n '\\u00EA\\u0303': '\\u1EC5', '\\u00F4\\u0303': '\\u1ED7', '\\u0102\\u0303': '\\u1EB4', '\\u0103\\u0303': '\\u1EB5',\n '\\u01A0\\u0303': '\\u1EE0', '\\u01A1\\u0303': '\\u1EE1', '\\u01AF\\u0303': '\\u1EEE', '\\u01B0\\u0303': '\\u1EEF',\n\n '\\u0041\\u0309': '\\u1EA2', '\\u0045\\u0309': '\\u1EBA', '\\u0049\\u0309': '\\u1EC8', '\\u004F\\u0309': '\\u1ECE',\n '\\u0055\\u0309': '\\u1EE6', '\\u0059\\u0309': '\\u1EF6', '\\u0061\\u0309': '\\u1EA3', '\\u0065\\u0309': '\\u1EBB',\n '\\u0069\\u0309': '\\u1EC9', '\\u006F\\u0309': '\\u1ECF', '\\u0075\\u0309': '\\u1EE7', '\\u0079\\u0309': '\\u1EF7',\n '\\u00C2\\u0309': '\\u1EA8', '\\u00CA\\u0309': '\\u1EC2', '\\u00D4\\u0309': '\\u1ED4', '\\u00E2\\u0309': '\\u1EA9',\n '\\u00EA\\u0309': '\\u1EC3', '\\u00F4\\u0309': '\\u1ED5', '\\u0102\\u0309': '\\u1EB2', '\\u0103\\u0309': '\\u1EB3',\n '\\u01A0\\u0309': '\\u1EDE', '\\u01A1\\u0309': '\\u1EDF', '\\u01AF\\u0309': '\\u1EEC', '\\u01B0\\u0309': '\\u1EED',\n\n '\\u0041\\u0323': '\\u1EA0', '\\u0045\\u0323': '\\u1EB8', '\\u0049\\u0323': '\\u1ECA', '\\u004F\\u0323': '\\u1ECC',\n '\\u0055\\u0323': '\\u1EE4', '\\u0059\\u0323': '\\u1EF4', '\\u0061\\u0323': '\\u1EA1', '\\u0065\\u0323': '\\u1EB9',\n '\\u0069\\u0323': '\\u1ECB', '\\u006F\\u0323': '\\u1ECD', '\\u0075\\u0323': '\\u1EE5', '\\u0079\\u0323': '\\u1EF5',\n '\\u00C2\\u0323': '\\u1EAC', '\\u00CA\\u0323': '\\u1EC6', '\\u00D4\\u0323': '\\u1ED8', '\\u00E2\\u0323': '\\u1EAD',\n '\\u00EA\\u0323': '\\u1EC7', '\\u00F4\\u0323': '\\u1ED9', '\\u0102\\u0323': '\\u1EB6', '\\u0103\\u0323': '\\u1EB7',\n '\\u01A0\\u0323': '\\u1EE2', '\\u01A1\\u0323': '\\u1EE3', '\\u01AF\\u0323': '\\u1EF0', '\\u01B0\\u0323': '\\u1EF1',\n}\nre_vietnamese = re.compile(\n '[AEIOUYaeiouy\\u00C2\\u00CA\\u00D4\\u00E2\\u00EA\\u00F4\\u0102\\u0103\\u01A0\\u01A1\\u01AF\\u01B0][\\u0300\\u0301\\u0303\\u0309\\u0323]')\nre_latin_cont = re.compile('([a-z\\u00e0-\\u024f])\\\\1{2,}')\nre_symbol_cont = re.compile('([^a-z\\u00e0-\\u024f])\\\\1{1,}')\n\n\ndef normalize_text(org):\n m = re.match(r'([-A-Za-z]+)\\t(.+)', org)\n if m:\n label, org = m.groups()\n else:\n label = \"\"\n m = re.search(r'\\t([^\\t]+)$', org)\n if m:\n s = m.group(0)\n else:\n s = org\n s = htmlentity2unicode(s)\n s = re.sub('[\\u2010-\\u2015]', '-', s)\n s = re.sub('[0-9]+', '0', s)\n s = re.sub('[^\\u0020-\\u007e\\u00a1-\\u024f\\u0300-\\u036f\\u1e00-\\u1eff]+', ' ', s)\n s = re.sub(' +', ' ', s)\n\n # vietnamese normalization\n s = re_vietnamese.sub(lambda x: vietnamese_norm[x.group(0)], s)\n\n # lower case with Turkish\n s = re_ignore_i.sub(lambda x: x.group(0).lower(), s)\n # if re_turkish_alphabet.search(s):\n # s = s.replace(u'I', u'\\u0131')\n # s = s.lower()\n\n # Romanian normalization\n s = s.replace('\\u0219', '\\u015f').replace('\\u021b', '\\u0163')\n\n s = normalize_twitter(s)\n s = re_latin_cont.sub(r'\\1\\1', s)\n s = re_symbol_cont.sub(r'\\1', s)\n\n return label, s.strip(), org\n\n\n# load courpus\ndef load_corpus(filelist, labels):\n idlist = dict((x, []) for x in labels)\n corpus = []\n for filename in filelist:\n f = codecs.open(filename, 'rb', 'utf-8')\n for i, s in enumerate(f):\n label, text, org_text = normalize_text(s)\n if label not in labels:\n sys.exit(\"unknown label '%s' at %d in %s \" % (label, i + 1, filename))\n idlist[label].append(len(corpus))\n corpus.append((label, text, org_text))\n f.close()\n return corpus, idlist\n\n\ndef shuffle(idlist):\n n = max(len(idlist[lang]) for lang in idlist)\n list_element = []\n for lang in idlist:\n text_ids = idlist[lang]\n n_text = len(text_ids)\n list_element += text_ids * (n / n_text)\n numpy.random.shuffle(text_ids)\n list_element += text_ids[:n % n_text]\n numpy.random.shuffle(list_element)\n return list_element\n\n\n# prediction probability\ndef predict(param, events):\n import ipdb; ipdb.set_trace()\n sum_w = numpy.dot(numpy.array([param[ei] for ei in list(events.keys())]).T, numpy.array(list(events.values())))\n exp_w = numpy.exp(sum_w - sum_w.max())\n return exp_w / exp_w.sum()\n\n\n# inference and learning\ndef inference(param, labels, corpus, idlist, trie, options):\n K = len(labels)\n M = param.shape[0]\n\n shuffled_ids = shuffle(idlist)\n N = len(shuffled_ids)\n WHOLE_REG_INT = (N / options.n_whole_reg) + 1\n\n # learning rate\n eta = options.eta\n if options.reg_const:\n penalties = numpy.zeros_like(param)\n alpha = pow(0.9, -1.0 / N)\n uk = 0\n\n corrects = numpy.zeros(K, dtype=int)\n counts = numpy.zeros(K, dtype=int)\n for m, target in enumerate(shuffled_ids):\n label, text, _ = corpus[target]\n events = trie.extract_features(\"\\u0001\" + text + \"\\u0001\")\n label_k = labels.index(label)\n\n y = predict(param, events)\n predict_k = y.argmax()\n counts[label_k] += 1\n if label_k == predict_k:\n corrects[label_k] += 1\n\n # learning\n if options.reg_const:\n eta *= alpha\n uk += options.reg_const * eta / N\n y[label_k] -= 1\n y *= eta\n\n if options.reg_const:\n indexes = events\n if (N - m) % WHOLE_REG_INT == 1:\n logger.info(\"full regularization: %d / %d\" % (m, N))\n indexes = list(range(M))\n for id_index in indexes:\n prm = param[id_index]\n pnl = penalties[id_index]\n if id_index in events: prm -= y * events[id_index]\n\n for j in range(K):\n w = prm[j]\n if w > 0:\n w1 = w - uk - pnl[j]\n if w1 > 0:\n prm[j] = w1\n pnl[j] += w1 - w\n else:\n prm[j] = 0\n pnl[j] -= w\n elif w < 0:\n w1 = w + uk - pnl[j]\n if w1 < 0:\n prm[j] = w1\n pnl[j] += w1 - w\n else:\n prm[j] = 0\n pnl[j] -= w\n else:\n for id_event, freq in list(events.items()):\n param[id_event,] -= y * freq\n\n for lbl, crct, cnt in zip(labels, corrects, counts):\n if cnt > 0:\n logger.info(\"> %s = %d / %d = %.2f\" % (lbl, crct, cnt, 100.0 * crct / cnt))\n logger.info(\"> total = %d / %d = %.2f\" % (corrects.sum(), N, 100.0 * corrects.sum() / N))\n list_features = (numpy.abs(param).sum(1) > 0.0000001)\n logger.info(\"> # of relevant features = %d / %d\" % (list_features.sum(), M))\n\n\ndef likelihood_file(param, labels, trie, filelist):\n K = len(labels)\n corrects = numpy.zeros(K, dtype=int)\n counts = numpy.zeros(K, dtype=int)\n\n label_map = dict((x, i) for i, x in enumerate(labels))\n\n n_available_data = 0\n log_likely = 0.0\n prob_and_label = list()\n for filename in filelist:\n f = codecs.open(filename, 'rb', 'utf-8')\n for s in f:\n label, text, org_text = normalize_text(s)\n\n if label not in label_map:\n label_map[label] = -1\n label_k = label_map[label]\n\n events = trie.extract_features(\"\\u0001\" + text + \"\\u0001\")\n y = predict(param, events)\n predict_k = y.argmax()\n\n if label_k >= 0:\n log_likely -= numpy.log(y[label_k])\n n_available_data += 1\n counts[label_k] += 1\n if label_k == predict_k and y[predict_k] >= 0.6:\n corrects[predict_k] += 1\n\n predict_lang = labels[predict_k]\n if y[predict_k] < 0.6:\n predict_lang = \"\"\n logger.info(\"%s\\t%s\\t%s\" % (label, predict_lang, org_text))\n prob_and_label.append((y[predict_k], predict_lang))\n f.close()\n\n if n_available_data > 0:\n log_likely /= n_available_data\n\n for lbl, crct, cnt in zip(labels, corrects, counts):\n if cnt > 0:\n logger.info(\"> %s = %d / %d = %.2f\" % (lbl, crct, cnt, 100.0 * crct / cnt))\n logger.info(\n \"> total = %d / %d = %.2f\" % (corrects.sum(), n_available_data, 100.0 * corrects.sum() / n_available_data))\n logger.info(\"> average negative log likelihood = %.3f\" % log_likely)\n\n return prob_and_label\n\n\ndef likelihood_text(param, labels, trie, text):\n K = len(labels)\n corrects = numpy.zeros(K, dtype=int)\n counts = numpy.zeros(K, dtype=int)\n\n label_map = dict((x, i) for i, x in enumerate(labels))\n\n n_available_data = 0\n log_likely = 0.0\n label, text, org_text = normalize_text(text)\n if label not in label_map:\n # logger.warning(\"WARNING : unknown label '%s' (ignore the later same labels)\\n\" % (label))\n label_map[label] = -1\n label_k = label_map[label]\n\n events = trie.extract_features(\"\\u0001\" + text + \"\\u0001\")\n \n y = predict(param, events)\n predict_k = y.argmax()\n\n if label_k >= 0:\n log_likely -= numpy.log(y[label_k])\n n_available_data += 1\n counts[label_k] += 1\n if label_k == predict_k and y[predict_k] >= 0.6:\n corrects[predict_k] += 1\n\n predict_lang = labels[predict_k]\n if y[predict_k] < 0.6:\n predict_lang = \"\"\n logger.info(\"%s\\t%s\\t%s\" % (label, predict_lang, org_text))\n\n if n_available_data > 0:\n log_likely /= n_available_data\n\n for lbl, crct, cnt in zip(labels, corrects, counts):\n if cnt > 0:\n logger.info(\"> %s = %d / %d = %.2f\" % (lbl, crct, cnt, 100.0 * crct / cnt))\n logger.info(\n \"> total = %d / %d = %.2f\" % (corrects.sum(), n_available_data, 100.0 * corrects.sum() / n_available_data))\n logger.info(\"> average negative log likelihood = %.3f\" % log_likely)\n\n return log_likely, predict_lang\n\n\ndef generate_doublearray(file_to_save, features):\n trie = da.DoubleArray()\n trie.initialize(features)\n trie.save(file_to_save)\n\n\nif __name__ == '__main__':\n sys.stdout = codecs.getwriter('utf-8')(sys.stdout)\n\n parser = optparse.OptionParser()\n parser.add_option(\"-m\", dest=\"model\", help=\"model directory\")\n parser.add_option(\"--init\", dest=\"init\", help=\"initialize model\", action=\"store_true\")\n parser.add_option(\"--learning\", dest=\"learning\", help=\"learn model\", action=\"store_true\")\n parser.add_option(\"--shrink\", dest=\"shrink\", help=\"remove irrevant features\", action=\"store_true\")\n parser.add_option(\"--debug\", dest=\"debug\", help=\"detect command line text for debug\", action=\"store_true\")\n\n # for initialization\n parser.add_option(\"--ff\", dest=\"bound_feature_freq\", help=\"threshold of feature frequency (for initialization)\",\n type=\"int\", default=8)\n parser.add_option(\"-n\", dest=\"ngram_bound\", help=\"n-gram upper bound (for initialization)\", type=\"int\",\n default=99999)\n parser.add_option(\"-x\", dest=\"maxsubst\", help=\"max substring extractor\", default=\"./maxsubst\")\n\n # for learning\n parser.add_option(\"-e\", \"--eta\", dest=\"eta\", help=\"learning rate\", type=\"float\", default=0.1)\n parser.add_option(\"-r\", \"--regularity\", dest=\"reg_const\", help=\"regularization constant\", type=\"float\")\n parser.add_option(\"--wr\", dest=\"n_whole_reg\", help=\"number of whole regularizations\", type=\"int\", default=2)\n\n (options_user, args_user) = parser.parse_args()\n if not options_user.model: parser.error(\"need model directory (-m)\")\n\n detector = ldig(options_user.model)\n if options_user.init:\n if not os.path.exists(options_user.model):\n os.mkdir(options_user.model)\n if len(args_user) == 0:\n parser.error(\"need corpus\")\n else:\n if not os.path.exists(detector.features):\n parser.error(\"features file doesn't exist\")\n if not os.path.exists(detector.labels):\n parser.error(\"labels file doesn't exist\")\n if not os.path.exists(detector.param):\n parser.error(\"parameters file doesn't exist\")\n\n if options_user.init:\n temp_path = os.path.join(options_user.model, 'temp')\n detector.init(temp_path, args_user, options_user.bound_feature_freq, options_user.ngram_bound)\n\n elif options_user.debug:\n detector.debug(args_user)\n\n elif options_user.shrink:\n detector.shrink()\n\n elif options_user.learning:\n detector.learn(options_user, args_user)\n\n else:\n detector.detect_file(args_user)\n # import cProfile\n # cProfile.runctx('detector.detect(options, args)', globals(), locals(), 'ldig.profile')\n","sub_path":"ldig/ldig.py","file_name":"ldig.py","file_ext":"py","file_size_in_byte":23083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"409212614","text":"\"\"\"HasAffiliation Module for hasAffiliation relation in neo4j\"\"\"\n\nfrom person import Person\nfrom database import Database\nfrom transform import Transform\nfrom collections import OrderedDict\nfrom organization import Organization\nfrom datetime import datetime, timedelta\n\n__author__ = \"Jagjeet Goraya\"\n__version__ = \"0.1\"\n__status__ = \"development\"\n\nlog_file = 'hasAffiliation.py'\n\nclass HasAffiliation(Database):\n\t\"\"\"Creates and updates hasAffiliation relation in neo4j.\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"Initialize object variables.\"\"\"\n\n\t\tDatabase.__init__(self)\n\n\t\tself.label = 'hasAffiliation'\n\t\tself.active_status = 1\n\t\tself.entity_type = 'relation'\n\t\tself.mysql_delta_query = 'select_delta'\n\t\tself.mysql_filter_query = 'select_filtered'\n\t\tself.mysql_mapped_organization_query = 'select_mapped'\n\t\tself.date = (datetime.now() - timedelta(days=9)).strftime(\"%Y-%m-%d\")\n\t\tself.merge_query = 'merge'\n\t\tself.delete_query = 'delete'\n\t\tself.unique_keys = ['Permalink', 'PeoplePermalink', 'PrimaryRole']\n\t\tself.to_integer = [\n\t\t\t\t\t\t\t'StartedOnYear',\n\t\t\t\t\t\t\t'StartedOnMonth'\n\t\t\t\t\t\t\t'EndedOnYear',\n\t\t\t\t\t\t\t'EndedOnMonth',\n\t\t\t\t\t\t\t'PeopleJobsId'\n\t\t\t\t\t\t\t]\n\t\tself.skip = []\n\t\t\n\n\t\tself.neo4j_mapping = self.get_neo4j_mapping(self.label)\n\t\tself.neo4jQuery = self.get_neo4j_queries(self.label, self.merge_query, self.entity_type)\n\t\t# self.neo4jDeleteQuery = self.get_neo4j_queries(self.label, self.delete_query, self.entity_type)\n\t\tself.neo4j_properties = self.get_neo4j_properties(\n\t\t\tself.active_status, self.label, self.entity_type\n\t\t\t)\n\n\tdef create_query_data(self, data):\n\t\t\"\"\"Creates dynamic string for node properties and match condition.\"\"\"\n\t\t\n\t\ttry:\n\t\t\tproperties = []\n\t\t\tfirst_key = []\n\t\t\tsecond_key = []\n\t\t\tsecond_label = []\n\n\t\t\tfor key, value in data.iteritems():\n\t\t\t\tkey = Transform.to_str(key)\n\t\t\t\tvalue = Transform.to_str(value)\n\t\t\n\t\t\t\tif key in self.unique_keys:\n\t\t\t\t\tif key == 'Permalink':\n\t\t\t\t\t\titem = '{}:\"{}\",Source:\"CB\"'.format('Permalink',value)\n\t\t\t\t\t\tsecond_key.append(item)\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\telif key == 'PeoplePermalink':\n\t\t\t\t\t\titem = '{}:\"{}\"'.format('Permalink',value)\n\t\t\t\t\t\tfirst_key.append(item)\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\telif key == 'PrimaryRole':\n\t\t\t\t\t\tsecond_label.append(value)\n\t\t\t\t\t\tcontinue\n\n\t\t\t\tif key in self.skip:\n\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\n\t\t\t\tif value != 0 and not value:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif isinstance(value, str) and '\"' in value:\n\t\t\t\t\tvalue = value.replace('\"','') \n\t\t\t\t\n\t\t\t\titem = '{}:\"{}\"'.format(key,value)\n\t\t\t\tproperties.append(item)\n\n\t\t\tproperty_string = ','.join(properties)\n\t\t\tfirst_key_string = ','.join(first_key)\n\t\t\tsecond_key_string = ','.join(second_key)\n\t\t\tsecond_label_string = ','.join(second_label)\n\n\t\texcept Exception as e:\n\t\t\tself.logg(debug_msg = 'Error while perparing properties.', \n\t\t\t\tinfo_msg = 'Function = create_query_data()', \n\t\t\t\twarning_msg = 'Data to properties string failed.', \n\t\t\t\terror_msg = 'Module = '+log_file, \n\t\t\t\tcritical_msg = str(e))\n\n\t\telse:\n\t\t\treturn first_key_string, second_label_string, second_key_string, property_string\n\n\tdef create_data_dictionary(self, row):\n\t\t\"\"\"Converts mysql data into dictionary of neo4j properties.\"\"\"\n\n\t\tproperties = OrderedDict()\n\n\t\ttoday_date = datetime.now().strftime(\"%Y-%m-%d\")\n\t\tdata = Transform.map_data(self.neo4j_mapping, row)\n\n\t\tfor key, value in self.neo4j_properties.iteritems():\n\t\t\ttry:\n\t\t\t\tif key == 'UpdatedDate':\n\t\t\t\t\tproperties.update({'UpdatedDate':today_date})\n\n\t\t\t\telif key == 'IsCurrent':\n\t\t\t\t\tflag = data.get('IsCurrent')\n\t\t\t\t\tif flag == 1:\n\t\t\t\t\t\tproperties.update({'IsCurrent':'true'})\n\n\t\t\t\t\telif flag == 0 :\n\t\t\t\t\t\tproperties.update({'IsCurrent':'false'})\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tproperties.update({'IsCurrent':None})\n\n\t\t\t\telif key == 'StartedOnYear':\n\t\t\t\t\tStartedOn = data.get('StartedOn')\n\t\t\t\t\tif StartedOn:\n\t\t\t\t\t\tyear = str(StartedOn.year)\n\t\t\t\t\t\tproperties.update({'StartedOnYear':year})\n\n\t\t\t\telif key == 'StartedOnMonth':\n\t\t\t\t\tStartedOn = data.get('StartedOn')\n\t\t\t\t\tif StartedOn:\n\t\t\t\t\t\tmonth = str(StartedOn.month)\n\t\t\t\t\t\tproperties.update({'StartedOnMonth':month})\n\n\t\t\t\telif key == 'EndedOnYear':\n\t\t\t\t\tEndedOn = data.get('EndedOn')\n\t\t\t\t\tif EndedOn:\n\t\t\t\t\t\tyear = str(EndedOn.year)\n\t\t\t\t\t\tproperties.update({'EndedOnYear':year})\n\n\t\t\t\telif key == 'EndedOnMonth':\n\t\t\t\t\tEndedOn = data.get('EndedOn')\n\t\t\t\t\tif EndedOn:\n\t\t\t\t\t\tmonth = str(EndedOn.month)\n\t\t\t\t\t\tproperties.update({'EndedOnMonth':month})\n\n\t\t\t\telif key == 'PrimaryRole':\n\t\t\t\t\trole = data.get('PrimaryRole')\n\t\t\t\t\tlabel = Transform.get_neo4j_label(role)\n\t\t\t\t\tif label:\n\t\t\t\t\t\tproperties.update({'PrimaryRole':label})\n\n\t\t\t\telse:\n\t\t\t\t\tval = data.get(key)\n\t\t\t\t\tproperties.update({key:val})\n\n\t\t\texcept Exception as e:\n\t\t\t\tself.logg(debug_msg = 'Error while perparing data dictionary.', \n\t\t\t\t\tinfo_msg = 'Function = create_data_dictionary()', \n\t\t\t\t\twarning_msg = 'Data transformation to key, value pair failed.', \n\t\t\t\t\terror_msg = 'Module = '+log_file, \n\t\t\t\t\tcritical_msg = str(e))\n\t\t\t\tcontinue\n\n\t\treturn properties\n\n\tdef check_node_existence(self, data):\n\t\t\"\"\"Checks if the nodes for the relationship exists, if not creates new nodes.\"\"\"\n\n\t\tif data:\n\t\t\tpermalink = Transform.to_str(data.get('Permalink'))\n\t\t\tlabel = Transform.to_str(data.get('PrimaryRole'))\n\t\t\tpeople_permalink = Transform.to_str(data.get('PeoplePermalink'))\n\n\t\t\ttry:\n\t\t\t\torganization = Organization()\n\t\t\t\torganization.check_create_organization_node(label, permalink)\n\t\t\t\tperson = Person()\n\t\t\t\tperson.check_create_person_node(people_permalink)\n\n\t\t\texcept Exception as e:\n\t\t\t\tpass\n\t\n\tdef create_update_hasAffiliation_relationship(self, data):\n\t\t\"\"\"Process raw data from mysql and executes neo4j queries.\"\"\"\n\t\t\n\t\tif data:\n\t\t\tfor row in data:\n\t\t\t\ttry:\n\t\t\t\t\tproperties_data = self.create_data_dictionary(row)\n\t\t\t\t\tself.check_node_existence(properties_data)\n\t\t\t\t\tfirst_key, second_label, second_key, properties = self.create_query_data(properties_data)\n\t\t\t\t\tif first_key and second_label and second_key and properties:\n\t\t\t\t\t\tquery = self.neo4jQuery.format(first_key, second_label, second_key, properties)\n\t\t\t\t\t\tself.execute_neo4j_merge_query(query)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself.logg(debug_msg = 'Error while creating neo4j node query.', \n\t\t\t\t\t\tinfo_msg = 'Function = create_update_hasAffiliation_relationship()',\n\t\t\t\t\t\twarning_msg = 'Error in creating dynamic node query.',\n\t\t\t\t\t\terror_msg = 'Module = '+log_file,\n\t\t\t\t\t\tcritical_msg = str(e))\n\n\n\tdef get_data(self, data_set):\n\t\t\"\"\"Fetch data from mysql w.r.t relation.\"\"\"\n\t\t\n\t\tif data_set == 'delta':\n\t\t\tdata = self.fetch_all_mysql_data(\n\t\t\t\tself.label, self.mysql_delta_query, self.entity_type, match=self.date\n\t\t\t\t)\n\t\t\treturn data\n\n\t\telif data_set == 'mapped':\n\t\t\tdata = self.fetch_all_mysql_data(\n\t\t\t\tself.label, self.mysql_mapped_organization_query, self.entity_type, match=None\n\t\t\t\t)\n\t\t\treturn data\n\n\t@classmethod\n\tdef run(cls):\n\t\t\"\"\".\"\"\"\n\n\t\ttry:\n\t\t\thasAffiliation = cls()\n\t\t\tdata = hasAffiliation.get_data(data_set='delta')\n\t\t\tif data:\n\t\t\t\thasAffiliation.create_update_hasAffiliation_relationship(data)\n\n\t\texcept Exception as e:\n\t\t\treturn False\n\n\t\telse:\n\t\t\treturn True\n\nif __name__ == '__main__':\n\t\"\"\".\"\"\"\n\n\tHasAffiliation.run()\n\n\n\n\n\n","sub_path":"neo4j_data_ingestion/etl/hasAffiliation.py","file_name":"hasAffiliation.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"76570876","text":"import code, copy, csv, json, math, os, re\n\n# pip install pyshp\nimport shapefile\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport os\n\npjoin = os.path.join\n\n# pip install Shapely\nfrom shapely import geometry\nfrom shapely.geometry import Point\n\nabbreviation_to_name = {\n\t\"AL\": \"Alabama\",\n\t\"AK\": \"Alaska\",\n\t\"AZ\": \"Arizona\",\n\t\"AR\": \"Arkansas\",\n\t\"CA\": \"California\",\n\t\"CO\": \"Colorado\",\n\t\"CT\": \"Connecticut\",\n\t\"DE\": \"Delaware\",\n\t\"DC\": \"District of Columbia\",\n\t\"FL\": \"Florida\",\n\t\"GA\": \"Georgia\",\n\t\"GU\": \"Guam\",\n\t\"HI\": \"Hawaii\",\n\t\"ID\": \"Idaho\",\n\t\"IL\": \"Illinois\",\n\t\"IN\": \"Indiana\",\n\t\"IA\": \"Iowa\",\n\t\"KS\": \"Kansas\",\n\t\"KY\": \"Kentucky\",\n\t\"LA\": \"Louisiana\",\n\t\"ME\": \"Maine\",\n\t\"MD\": \"Maryland\",\n\t\"MA\": \"Massachusetts\",\n\t\"MI\": \"Michigan\",\n\t\"MN\": \"Minnesota\",\n\t\"MS\": \"Mississippi\",\n\t\"MO\": \"Missouri\",\n\t\"MT\": \"Montana\",\n\t\"NE\": \"Nebraska\",\n\t\"NV\": \"Nevada\",\n\t\"NH\": \"New Hampshire\",\n\t\"NJ\": \"New Jersey\",\n\t\"NM\": \"New Mexico\",\n\t\"NY\": \"New York\",\n\t\"NC\": \"North Carolina\",\n\t\"ND\": \"North Dakota\",\n\t\"OH\": \"Ohio\",\n\t\"OK\": \"Oklahoma\",\n\t\"OR\": \"Oregon\",\n\t\"PA\": \"Pennsylvania\",\n\t\"RI\": \"Rhode Island\",\n\t\"SC\": \"South Carolina\",\n\t\"SD\": \"South Dakota\",\n\t\"TN\": \"Tennessee\",\n\t\"TX\": \"Texas\",\n\t\"UT\": \"Utah\",\n\t\"VT\": \"Vermont\",\n\t\"VI\": \"Virgin Islands\",\n\t\"VA\": \"Virginia\",\n\t\"WA\": \"Washington\",\n\t\"WV\": \"West Virginia\",\n\t\"WI\": \"Wisconsin\",\n\t\"WY\": \"Wyoming\",\n}\n\nfips_code_to_name = {\n\t\"01\": \"Alabama\",\n\t\"02\": \"Alaska\",\n\t\"04\": \"Arizona\",\n\t\"05\": \"Arkansas\",\n\t\"06\": \"California\",\n\t\"08\": \"Colorado\",\n\t\"09\": \"Connecticut\",\n\t\"10\": \"Delaware\",\n\t\"11\": \"District of Columbia\",\n\t\"12\": \"Florida\",\n\t\"13\": \"Georgia\",\n\t\"15\": \"Hawaii\",\n\t\"16\": \"Idaho\",\n\t\"17\": \"Illinois\",\n\t\"18\": \"Indiana\",\n\t\"19\": \"Iowa\",\n\t\"20\": \"Kansas\",\n\t\"21\": \"Kentucky\",\n\t\"22\": \"Louisiana\",\n\t\"23\": \"Maine\",\n\t\"24\": \"Maryland\",\n\t\"25\": \"Massachusetts\",\n\t\"26\": \"Michigan\",\n\t\"27\": \"Minnesota\",\n\t\"28\": \"Mississippi\",\n\t\"29\": \"Missouri\",\n\t\"30\": \"Montana\",\n\t\"31\": \"Nebraska\",\n\t\"32\": \"Nevada\",\n\t\"33\": \"New Hampshire\",\n\t\"34\": \"New Jersey\",\n\t\"35\": \"New Mexico\",\n\t\"36\": \"New York\",\n\t\"37\": \"North Carolina\",\n\t\"38\": \"North Dakota\",\n\t\"39\": \"Ohio\",\n\t\"40\": \"Oklahoma\",\n\t\"41\": \"Oregon\",\n\t\"42\": \"Pennsylvania\",\n\t\"44\": \"Rhode Island\",\n\t\"45\": \"South Carolina\",\n\t\"46\": \"South Dakota\",\n\t\"47\": \"Tennessee\",\n\t\"48\": \"Texas\",\n\t\"49\": \"Utah\",\n\t\"50\": \"Vermont\",\n\t\"51\": \"Virginia\",\n\t\"53\": \"Washington\",\n\t\"54\": \"West Virginia\",\n\t\"55\": \"Wisconsin\",\n\t\"56\": \"Wyoming\",\n\t\"60\": \"American Samoa\",\n\t\"66\": \"Guam\",\n\t\"69\": \"Northern Mariana Islands\",\n\t\"72\": \"Puerto Rico\",\n\t\"78\": \"Virgin Islands\",\n}\n\nnot_states = set([\n\t\"American Samoa\",\n\t\"Guam\",\n\t\"Northern Mariana Islands\",\n\t\"Puerto Rico\",\n\t\"Virgin Islands\",\n])\n\n# Maps formly independent cities to the counties they\n# now belong to. This way we can add the deaths from\n# these cities (which the CDC keeps separate, since its\n# data goes back to 1999) to the counties the cities\n# now belong to.\nformer_independent_cities_to_counties = {\n\t\"Virginia\": {\n\t\t\"clifton forge city\": \"alleghany county\",\n\t\t\"bedford city\": \"bedford county\",\n\t}\n}\n\nclass CountyNameMerger:\n\tkHardCoded = {\n\t\t\"Alabama\": {\n\t\t\t\"de kalb\": \"dekalb county\",\t\n\t\t},\n\t\t\"Alaska\": {\n\t\t\t\"anchorage borough\": \"anchorage municipality\",\n\t\t\t\"juneau borough\": \"juneau city and borough\",\n\t\t\t\"petersburg borough/census area\": \"petersburg borough\",\n\t\t\t\"sitka borough\": \"sitka city and borough\",\n\t\t\t\"skagway-hoonah-angoon census area\" : \"skagway municipality\",\n\t\t\t\"wrangell-petersburg census area\": \"wrangell city and borough\",\n\t\t\t\"yakutat borough\": \"yakutat city and borough\",\n\t\t\t# Formerly known as Wade Hampton Census Area\n\t\t\t\"wade hampton census area\": \"kusilvak census area\",\n\t\t\t# Renamed in 2008\n\t\t\t\"prince of wales-outer ketchikan census area\": \"prince of wales-hyder census area\",\n\n\t\t\t\"anchorage borough/municipality\": \"anchorage municipality\",\n\t\t\t\"juneau borough/city\": \"juneau city and borough\",\n\t\t\t\"sitka borough/city\": \"sitka city and borough\",\n\t\t\t\"wrangell borough/city\": \"wrangell city and borough\",\n\t\t\t\"yakutat borough/city\": \"yakutat city and borough\",\n\n\t\t\t\"municipality of anchorage\": \"anchorage municipality\",\n\t\t\t\"city and borough of juneau\": \"juneau city and borough\",\n\t\t\t\"petersburg census area\": \"petersburg borough\",\n\t\t},\n\t\t\"California\": {\n\t\t\t\"san francisco county/city\": \"san francisco county\",\n\t\t},\n\t\t\"Colorado\": {\n\t\t\t\"broomfield county/city\": \"broomfield county\",\n\t\t},\n\t\t\"District of Columbia\": {\n\t\t\t\"washington\": \"district of columbia\",\n\t\t\t\"district of columbia county\": \"district of columbia\",\n\t\t},\n\t\t\"Florida\": {\n\t\t\t\"de soto\": \"desoto county\",\n\t\t},\n\t\t\"Georgia\": {\n\t\t\t\"de kalb\": \"dekalb county\",\n\t\t},\n\t\t\"Idaho\": {\n\t\t\t\"fremont (includes yellowstone park)\": \"fremont county\"\n\t\t},\n\t\t\"Illinois\": {\n\t\t\t\"la salle\": \"lasalle county\",\n\t\t\t\"du page\": \"dupage county\",\n\t\t\t\"de kalb\": \"dekalb county\",\n\t\t},\n\t\t\"Indiana\": {\n\t\t\t\"de kalb\": \"dekalb county\",\n\t\t\t\"de kalb county\": \"dekalb county\",\n\t\t\t\"la porte county\": \"laporte county\",\n\t\t\t\"la porte\": \"laporte county\",\n\t\t\t\"de kalb\": \"dekalb county\",\n\t\t\t\"la grange\": \"lagrange county\",\n\t\t},\n\t\t\"Iowa\": {\n\t\t\t\"o brien\": \"o'brien county\",\n\t\t},\n\t\t\"Louisiana\": {\n\t\t\t\"la salle parish\": \"lasalle parish\",\n\t\t\t\"la salle\": \"lasalle parish\",\n\t\t},\n\t\t\"Maryland\": {\n\t\t\t\"baltimore (independent city)\": \"baltimore city\",\n\t\t\t\"baltimore city county\": \"baltimore city\",\n\t\t\t\"prince georges\": \"prince george's county\",\n\t\t\t\"queen annes\": \"queen anne's county\",\n\t\t\t\"st. marys\": \"st. mary's county\",\n\t\t},\n\t\t\"Mississippi\": {\n\t\t\t\"de soto\": \"desoto county\",\n\t\t},\n\t\t\"Missouri\": {\n\t\t\t\"jackson county (including other portions of kansas city)\": \"jackson county\",\n\t\t\t\"city of st. louis\": \"st. louis city\",\n\t\t\t\"st. louis city county\": \"st. louis city\",\n\t\t\t\"Jackson County (including other portions of Kansas City)\": \"Jackson County\",\n\t\t\t\"de kalb\": \"dekalb county\",\n\t\t},\n\t\t\"Nevada\": {\n\t\t\t\"carson city county\": \"carson city\"\n\t\t},\n\t\t\"New Mexico\": {\n\t\t\t\"debaca county\": \"de baca county\",\n\t\t\t\"dona ana county\": \"doña ana county\",\n\t\t\t\"dona ana\": \"doña ana county\",\n\t\t},\n\t\t\"North Dakota\": {\n\t\t\t\"la moure\": \"lamoure county\",\n\t\t},\n\t\t\"Pennsylvania\": {\n\t\t\t\"mc kean county\": \"mckean county\",\n\t\t},\n\t\t\"South Dakota\": {\n\t\t\t\"shannon county\": \"oglala lakota county\",\n\t\t\t\"shannon\": \"oglala lakota county\",\n\t\t},\n\t\t\"Tennessee\": {\n\t\t\t\"de kalb\": \"dekalb county\",\t\n\t\t},\n\t\t\"Texas\": {\n\t\t\t\"de witt\": \"dewitt county\",\n\t\t},\n\t\t\"Virginia\": {\n\t\t\t\"colonial heights cit\": \"colonial heights city\"\n\t\t}\n\t}\n\n\n# if county not in states[state] and county + ' county' in states[state]:\n# \tcounty = county + ' county'\n# if county not in states[state] and county + ' parish' in states[state]:\n# \tcounty = county + ' parish'\n# assert county in states[state], f'{county}, {state}'\n\n\n\tdef __init__(self):\n\t\twith open('base.json', 'r') as f:\n\t\t\tself.states = json.load(f)\n\n\tdef merge_state(self, state, list1, list2, allow_missing, missing):\n\t\tif (not allow_missing) and len(missing.get(state, {})) == 0:\n\t\t\tassert len(list1) == len(list2), f\"{state}\\n\\n{sorted(list1)}\\n\\n{sorted(list2)}\"\n\t\t\tassert len(set(list1)) == len(list1)\n\t\t\tassert len(set(list2)) == len(list2)\n\n\t\thardCoded = CountyNameMerger.kHardCoded.get(state, {})\n\n\t\tM = {}\n\n\t\tfoo = 'honolulu' in list1\n\n\t\ti = 0\n\t\twhile i < len(list1):\n\t\t\tcounty = list1[i].lower()\n\t\t\tif county[:3] == 'st ':\n\t\t\t\tcounty = 'st. ' + county[3:]\n\n\t\t\tif county[-19:] == ' (independent city)':\n\t\t\t\tif county[-23:-18] != 'city ':\n\t\t\t\t\tcounty = county[:-19] + ' city'\n\t\t\t\telse:\n\t\t\t\t\tcounty = county[:-19]\n\n\t\t\tif county[-12:] == ' county/city':\n\t\t\t\tcounty = county[:-5]\n\t\t\tif county[-12:] == ' county/town':\n\t\t\t\tcounty = county[:-5]\n\n\t\t\tif county in hardCoded:\n\t\t\t\tj = list2.index(hardCoded[county])\n\t\t\t\tM[list1[i]] = list2[j]\n\t\t\t\tdel list1[i]\n\t\t\t\tdel list2[j]\n\t\t\telif county in list2:\n\t\t\t\tj = list2.index(county)\n\t\t\t\tM[list1[i]] = list2[j]\n\t\t\t\tdel list1[i]\n\t\t\t\tdel list2[j]\n\t\t\telif county + ' county' in list2:\n\t\t\t\tj = list2.index(county + ' county')\n\t\t\t\tM[list1[i]] = list2[j]\n\t\t\t\tdel list1[i]\n\t\t\t\tdel list2[j]\n\t\t\telif state == 'Louisiana' and county + ' parish' in list2:\n\t\t\t\tj = list2.index(county + ' parish')\n\t\t\t\tM[list1[i]] = list2[j]\n\t\t\t\tdel list1[i]\n\t\t\t\tdel list2[j]\n\t\t\telse:\n\t\t\t\ti += 1\n\n\t\tif state not in missing:\n\t\t\tassert len(list1) == 0, f\"{state}\\n\\n{list1}\\n\\n{list2}\"\n\t\telse:\n\t\t\tassert len([x for x in list1 if x not in missing[state]]) == 0\n\n\t\tif not allow_missing:\n\t\t\tassert len(list2) - len(missing.get(state, {})) == 0, list2\n\n\t\t# Assert mapping is not many-to-1\n\t\tassert len(M.values()) == len(set(M.values()))\n\n\t\treturn M\n\n\tdef merge(self, states, allow_missing=False, missing={}):\n\t\tif not allow_missing:\n\t\t\tassert len(states) == 51\n\t\tfor state in states:\n\t\t\tM = self.merge_state(\n\t\t\t\tstate,\n\t\t\t\tlist(states[state].keys()),\n\t\t\t\tlist(self.states[state].keys()),\n\t\t\t\tallow_missing=allow_missing,\n\t\t\t\tmissing=missing\n\t\t\t)\n\t\t\tfor county in M:\n\t\t\t\tself.add_to_json(\n\t\t\t\t\tself.states[state][M[county]],\n\t\t\t\t\tstates[state][county]\n\t\t\t\t)\n\n\tdef add_to_json(self, base, addition):\n\t\tfor k in addition:\n\t\t\tassert k not in base\n\t\t\tbase[k] = addition[k]\n\ndef get_geometry():\n\tstates = {}\n\n\tsf = shapefile.Reader(pjoin('data', 'tl_2017_us_county/tl_2017_us_county.shp'))\n\n\t# Add geometric data for countries.\n\tfor s in sf:\n\t\tstate = fips_code_to_name[s.record.STATEFP]\n\t\tif state in not_states:\n\t\t\tcontinue\n\t\tif state not in states:\n\t\t\tstates[state] = {}\n\t\tcounty_name = s.record.NAMELSAD.lower()\n\t\t# There is one county that crosses from negative\n\t\t# to positive longitudes. The easiest fix is to\n\t\t# subtract 360 degrees for positive longitudes.\n\t\tpoly = geometry.Polygon([(x - 360 if x > 0 else x, y) for x, y in s.shape.points])\n\t\tcenter = poly.convex_hull.centroid\n\t\tstates[state][county_name] = {\n\t\t\t\"area\": poly.convex_hull.area,\n\t\t\t# NOTE: we don't undo the \"- 360\" transformation\n\t\t\t# above, since most use cases probably *prefer*\n\t\t\t# not having to deal with the wrapping behavior.\n\t\t\t\"longitude\": center.x,\n\t\t\t\"latitude\": center.y,\n\t\t}\n\t\tstates[state][county_name][\"area\"] *= math.cos(states[state][county_name][\"latitude\"] * math.pi / 180)\n\n\treturn states\n\ndef get_zips():\n\tstates = {}\n\twith open(pjoin('data', 'zip_county_fips_2018_03.csv'), 'r') as f:\n\t\treader = csv.reader(f, delimiter=',')\n\t\theader = next(reader)\n\t\trows = [row for row in reader]\n\n\tfor zipcode, fips, city, state, county, _ in rows:\n\t\tif state in [\"PR\", \"GU\", \"VI\"]:\n\t\t\tcontinue\n\t\tstate = abbreviation_to_name[state]\n\t\tcounty = county.lower()\n\t\tif state not in states:\n\t\t\tstates[state] = {}\n\t\tif county not in states[state]:\n\t\t\tstates[state][county] = {\n\t\t\t\t'zip-codes': []\n\t\t\t}\n\t\tstates[state][county]['zip-codes'].append(zipcode)\n\n\tstates[\"Alaska\"][\"kusilvak census area\"] = {\n\t\t\"zip-codes\": [\n\t\t\t\"99554\", \"99563\", \"99581\", \"99585\", \"99604\", \"99620\", \"99632\", \"99650\", \"99657\", \"99658\", \"99662\", \"99666\"\n\t\t]\n\t}\n\tstates[\"South Dakota\"][\"oglala lakota county\"] = {\n\t\t\"zip-codes\": [\n\t\t\t\"57716\", \"57752\", \"57756\", \"57764\", \"57770\", \"57772\", \"57794\",\n\t\t]\n\t}\n\treturn states\n\ndef get_demographics():\n\tage_code_to_group = {\n\t 0: \"all\",\n\t\t1: \"0-4\",\n\t\t2: \"5-9\",\n\t\t3: \"10-14\",\n\t\t4: \"15-19\",\n\t\t5: \"20-24\",\n\t\t6: \"25-29\",\n\t\t7: \"30-34\",\n\t\t8: \"35-39\",\n\t\t9: \"40-44\",\n\t\t10: \"45-49\",\n\t\t11: \"50-54\",\n\t\t12: \"55-59\",\n\t\t13: \"60-64\",\n\t\t14: \"65-69\",\n\t\t15: \"70-74\",\n\t\t16: \"75-79\",\n\t\t17: \"80-84\",\n\t\t18: \"85+\"\n\t}\n\n\t# https://www2.census.gov/programs-surveys/popest/technical-documentation/file-layouts/2010-2018/cc-est2018-alldata.pdf\n\tyear_code_to_year = {\n\t\t \"1\": \"4/1/2010\",\n\t\t \"2\": \"4/1/2010\", # sic\n\t\t \"3\": \"7/1/2010\",\n\t\t \"4\": \"7/1/2011\",\n\t\t \"5\": \"7/1/2012\",\n\t\t \"6\": \"7/1/2013\",\n\t\t \"7\": \"7/1/2014\",\n\t\t \"8\": \"7/1/2015\",\n\t\t \"9\": \"7/1/2016\",\n\t\t\"10\": \"7/1/2017\",\n\t\t\"11\": \"7/1/2018\",\n\t}\n\t# After downloading this file you should open it with a text editor (\n\t# I use Sublime) and re-encode it as utf8.\n\n\tstates = {}\n\n\twith open(pjoin('data', 'cc-est2018-alldata.csv'), 'r') as f:\n\t\treader = csv.reader(f, delimiter=',')\n\t\theader = next(reader)\n\t\trows = [row for row in reader]\n\t\tassert header[:7] == ['SUMLEV', 'STATE', 'COUNTY', 'STNAME', 'CTYNAME', 'YEAR', 'AGEGRP']\n\t\tfor row in rows:\n\t\t\tstate = row[3]\n\t\t\tif state not in states:\n\t\t\t\tstates[state] = {}\n\n\t\t\tcounty = row[4].lower()\n\n\t\t\t# We only grab the latest year available and ignore the\n\t\t\t# other rows.\n\t\t\tif year_code_to_year[row[5]] != '7/1/2018':\n\t\t\t\tcontinue\n\n\t\t\tage_group = int(row[6])\n\t\t\tif age_group == 0:\n\t\t\t\t# age group \"0\" is everyone. We grab racial data from\n\t\t\t\t# this row. The racial break down done by the Census\n\t\t\t\t# Bureau is... intense, with 73 different columns. To\n\t\t\t\t# keep the file size reasonable I don't track them all.\n\t\t\t\t# Fortunately the code and data is freely available so\n\t\t\t\t# it is trivial for you to add more columns if you like!\n\n\t\t\t\t# We assume this is the first row we see.\n\t\t\t\tassert county not in states[state]\n\t\t\t\tstates[state][county] = {}\n\t\t\t\tstates[state][county]['race_demographics'] = {}\n\t\t\t\tstates[state][county]['age_demographics'] = {}\n\n\t\t\t\tstates[state][county]['male'] = int(row[8])\n\t\t\t\tstates[state][county]['female'] = int(row[9])\n\n\t\t\t\ttotal = int(row[7])\n\t\t\t\tstates[state][county]['population'] = total\n\n\t\t\t\tstates[state][county]['race_demographics']['non_hispanic_white_alone_male'] = int(row[34]) / total\n\t\t\t\tstates[state][county]['race_demographics']['non_hispanic_white_alone_female'] = int(row[35]) / total\n\n\t\t\t\tstates[state][county]['race_demographics']['black_alone_male'] = int(row[12]) / total\n\t\t\t\tstates[state][county]['race_demographics']['black_alone_female'] = int(row[13]) / total\n\n\t\t\t\tstates[state][county]['race_demographics']['asian_alone_male'] = int(row[16]) / total\n\t\t\t\tstates[state][county]['race_demographics']['asian_alone_female'] = int(row[17]) / total\n\n\t\t\t\tstates[state][county]['race_demographics']['hispanic_male'] = int(row[56]) / total\n\t\t\t\tstates[state][county]['race_demographics']['hispanic_female'] = int(row[57]) / total\n\n\t\t\telse:\n\t\t\t\tstates[state][county]['age_demographics'][age_code_to_group[int(row[6])]] = int(row[7]) / states[state][county]['population']\n\n\t\t\tassert county in states[state]\n\n\tfor state_name in states:\n\t\tfor county_name in states[state_name]:\n\t\t\tassert 'race_demographics' in states[state_name][county_name]\n\n\treturn states\n\ndef get_cdc_deaths():\n\tstates = {}\n\tfor varname, fn in zip(['suicides', 'firearm suicides', 'homicides'], [\"Compressed Mortality, 1999-2016 (all suicides).txt\", \"Compressed Mortality, 1999-2016 (firearm suicides).txt\", \"Compressed Mortality (assaults), 1999-2016.txt\"]):\n\t\twith open(pjoin('data', fn), 'r') as f:\n\t\t\treader = csv.reader(f, delimiter='\\t', quotechar='\"')\n\t\t\trows = [row for row in reader]\n\t\theader = rows[0]\n\t\trows = rows[1:]\n\t\trows = rows[:rows.index(['---']) - 1]\n\t\tformer_independent_cities = {}\n\t\tfor row in rows:\n\t\t\t_, county, _, deaths, _, _ = row\n\t\t\tcounty = county.lower()\n\t\t\tstate = abbreviation_to_name[county.split(', ')[-1].upper()]\n\t\t\tif state not in states:\n\t\t\t\tstates[state] = {}\n\t\t\tcounty = ', '.join(county.split(', ')[:-1])\n\n\t\t\t# These counties changed their names recently, and rows with\n\t\t\t# both the old names and new names are found in the CDC\n\t\t\t# dataset, so we simply ignore these names.\n\t\t\tif county in ['prince of wales-outer ketchikan census area', 'skagway-hoonah-angoon census area', \"wrangell-petersburg census area\"]:\n\t\t\t\tcontinue\n\n\t\t\tif deaths == 'Suppressed':\n\t\t\t\tdeaths = None\n\t\t\telse:\n\t\t\t\tdeaths = int(deaths)\n\n\t\t\tif state in former_independent_cities_to_counties and county in former_independent_cities_to_counties[state]:\n\t\t\t\tcounty = former_independent_cities_to_counties[state][county]\n\t\t\t\tif state not in former_independent_cities:\n\t\t\t\t\tformer_independent_cities[state] = {}\n\t\t\t\tformer_independent_cities[state][county] = deaths\n\t\t\t\tcontinue\n\n\t\t\tif county not in states[state]:\n\t\t\t\tstates[state][county] = {\n\t\t\t\t\t\"deaths\": {}\n\t\t\t\t}\n\t\t\tassert varname not in states[state][county]\n\t\t\tstates[state][county][\"deaths\"][varname] = deaths\n\n\t\t# Add formly independent cities to their respective counties.\n\t\tfor state in former_independent_cities:\n\t\t\tfor county in former_independent_cities[state]:\n\t\t\t\t# If either value was suppressed, we keep the concatenated\n\t\t\t\t# value as None.\n\t\t\t\tif states[state][county][\"deaths\"][varname] is None:\n\t\t\t\t\tcontinue\n\t\t\t\tif former_independent_cities[state][county] is None:\n\t\t\t\t\tcontinue\n\t\t\t\tstates[state][county][\"deaths\"][varname] += former_independent_cities[state][county]\n\n\t\tfor state in states:\n\t\t\tfor county in states[state]:\n\t\t\t\tassert varname in states[state][county][\"deaths\"]\n\n\treturn states\n\n# Labor force data\n# https://www.bls.gov/lau/#cntyaa\n\ndef get_labor_force():\n\tstates = {}\n\n\twith open(pjoin('data', 'laborforce.txt'), 'r') as f:\n\t\tlines = f.readlines()\n\t\tfor line in lines[6:]:\n\t\t\tline = line.strip()\n\t\t\tif len(line) == 0:\n\t\t\t\tbreak\n\t\t\tlaus_code, state_fips_code, county_fips_code, county_name, year, labor_force, employed, unemployed, unemployment_rate = re.sub(r\" +\", \" \", line).split(\" \")\n\n\t\t\tif county_name == \"District of Columbia\":\n\t\t\t\tstate = \"District of Columbia\"\n\t\t\t\tcounty_name = state.lower()\n\t\t\telse:\n\t\t\t\tstate = county_name.split(', ')[-1]\n\t\t\t\tif state not in abbreviation_to_name:\n\t\t\t\t\tcontinue\n\t\t\t\tstate = abbreviation_to_name[state]\n\t\t\t\tcounty_name = ', '.join(county_name.split(', ')[:-1]).lower()\n\n\t\t\tif state in not_states:\n\t\t\t\tcontinue\n\n\t\t\tif state not in states:\n\t\t\t\tstates[state] = {}\n\n\t\t\tcounty = {}\n\t\t\tcounty['labor_force'] = float(labor_force.replace(\",\",\"\"))\n\t\t\tcounty['employed'] = float(employed.replace(\",\",\"\"))\n\t\t\tcounty['unemployed'] = float(unemployed.replace(\",\",\"\"))\n\t\t\tcounty['unemployment_rate'] = float(unemployment_rate)\n\t\t\tassert county_name not in states[state]\n\t\t\tstates[state][county_name] = county\n\n\t# Missing county...\n\tassert \"kalawao county\" not in states[\"Hawaii\"]\n\tstates[\"Hawaii\"][\"kalawao county\"] = {\n\t\t\"labor_force\": None,\n\t\t\"employed\": None,\n\t\t\"unemployed\": None,\n\t\t\"unemployment_rate\": None\n\t}\n\n\treturn states\n\ndef get_fatal_police_shootings():\n\tstates = {}\n\n\tfor varname, fn in zip(\n\t\t['total', 'unarmed', 'fire-armed'],\n\t\t['shootings-by-county.json', 'unarmed-shootings-by-county.json', 'shootings-by-county-where-victim-had-firearm.json']):\n\t\twith open(pjoin('generated', fn), 'r') as f:\n\t\t\tshootings = json.load(f)\n\n\t\t\tfor k in shootings:\n\t\t\t\tstate_name = abbreviation_to_name[k[-2:].upper()]\n\t\t\t\tif state_name not in states:\n\t\t\t\t\tstates[state_name] = {}\n\t\t\t\tstate = states[state_name]\n\t\t\t\tcounty_name = k[:-4]\n\n\t\t\t\tif county_name not in state:\n\t\t\t\t\tstate[county_name] = {\n\t\t\t\t\t\t\"fatal_police_shootings\": {}\n\t\t\t\t\t}\n\n\t\t\t\tstate[county_name][\"fatal_police_shootings\"][varname] = shootings[k]\n\n\treturn states\n\ndef get_police_deaths():\n\tstates = {}\n\n\twith open(pjoin('data', 'police-deaths-2019.txt'), 'r') as f:\n\t\tlines = f.readlines()[8:]\n\tlines = [line.strip() for line in lines]\n\tF = {}\n\tfor i in range(0, len(lines), 5):\n\t\tname = lines[i + 0]\n\t\tcause = lines[i + 2]\n\t\tF[cause] = F.get(cause, 0) + 1\n\t\tlocation = lines[i + 3]\n\t\tstate = abbreviation_to_name[location[-2:]]\n\t\tcounty = location[:-4].lower()\n\t\tif state not in states:\n\t\t\tstates[state] = {}\n\t\tif county not in states[state]:\n\t\t\tstates[state][county] = {}\n\t\tstates[state][county]['police_deaths'] = states[state][county].get('police_deaths', 0) + 1\n\n\treturn states\n\ndef get_avg_income():\n\tstates = {}\n\tfor fn in os.listdir(pjoin('data', 'CAINC1')):\n\t\twith open(pjoin('data', 'CAINC1', fn), 'r', encoding='Latin-1') as f:\n\t\t\treader = csv.reader(f, delimiter=',')\n\t\t\theader = next(reader)\n\t\t\trows = [row for row in reader]\n\t\t\tfor row in rows[3:-1]:\n\t\t\t\tif len(row) < 7:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif row[6] != 'Per capita personal income (dollars) 2/':\n\t\t\t\t\tcontinue\n\n\t\t\t\t# This is an effective way to ensure the row is\n\t\t\t\t# sensible (and not a footer / footnote).\n\t\t\t\ttry:\n\t\t\t\t\tavg_income = int(row[-1])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tcontinue\n\n\t\t\t\tloc = row[1]\n\t\t\t\t# ignore fotenotes...\n\t\t\t\twhile loc[-1] == '*':\n\t\t\t\t\tloc = loc[:-1]\n\t\t\t\tassert loc[-4:-2] == ', '\n\t\t\t\tcounty = loc[:-4].lower()\n\t\t\t\tstate = abbreviation_to_name[loc[-2:]]\n\n\t\t\t\tif state not in states:\n\t\t\t\t\tstates[state] = {}\n\n\t\t\t\t# These counties are combined...\n\t\t\t\tif loc == 'Maui + Kalawao, HI':\n\t\t\t\t\tassert \"maui county\" not in states[state]\n\t\t\t\t\tassert \"kalawao county\" not in states[state]\n\t\t\t\t\tstates[state]['maui county'] = {\n\t\t\t\t\t\t\"avg_income\": avg_income\n\t\t\t\t\t}\n\t\t\t\t\tstates[state]['kalawao county'] = {\n\t\t\t\t\t\t\"avg_income\": avg_income\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Independent cities are merged with their surrounding counties.\n\t\t\t\t# We un-merge them here.\n\t\t\t\tif '+' in county:\n\t\t\t\t\tparts = [x.strip() for x in re.findall(r\"[^,\\+]+\", county)]\n\t\t\t\t\tassert parts[0] + ' county' not in states[state]\n\t\t\t\t\tstates[state][parts[0] + ' county'] = {\n\t\t\t\t\t\t\"avg_income\": avg_income\n\t\t\t\t\t}\n\t\t\t\t\tfor part in parts[1:]:\n\t\t\t\t\t\tif part[-5:] != ' city':\n\t\t\t\t\t\t\tpart += ' city'\n\t\t\t\t\t\tassert part not in states[state]\n\t\t\t\t\t\tstates[state][part] = {}\n\t\t\t\t\t\tstates[state][part]['avg_income'] = avg_income\n\t\t\t\t\tcontinue\n\n\t\t\t\tassert county not in states[state]\n\t\t\t\tstates[state][county] = {\n\t\t\t\t\t\"avg_income\": avg_income\n\t\t\t\t}\n\n\tfor state in states:\n\t\tfor county in states[state]:\n\t\t\tassert 'avg_income' in states[state][county]\n\n\treturn states\n\ndef get_covid():\n\tblacklist = {\n\t\t\"Alaska\": {\n\t\t\t# Ordinarily we'd map this to \"kusilvak census area\" but,\n\t\t\t# (I assume due to an oversight by either the CDC or\n\t\t\t# usafacts.org) this county represented twice (once for\n\t\t\t# each name). Fortunately both death counts are zero, so\n\t\t\t# we just ignore it for now.\n\t\t\t\"wade hampton census area\": None,\n\t\t},\n\t\t\"California\": {\n\t\t\t\"grand princess cruise ship\": None,\n\t\t},\n\t}\n\n\tstates = {}\n\n\tfor varname, fn in zip(['deaths', 'confirmed'], ['covid_deaths_usafacts.csv', 'covid_confirmed_usafacts.csv']):\n\t\twith open(pjoin('data', fn), 'r') as f:\n\t\t\treader = csv.reader(f, delimiter=',')\n\t\t\theader = next(reader)\n\t\t\trows = [row for row in reader]\n\t\t\tfor date in ['6/3/20', '6/10/20', '6/17/20', '6/24/20', '7/1/20']:\n\t\t\t\tcolumn = header.index(date)\n\t\t\t\tnew_york_unallocated = 0\n\t\t\t\tfor row in rows:\n\t\t\t\t\tif row[1] == 'Statewide Unallocated':\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tcounty = row[1].lower()\n\t\t\t\t\tstate = abbreviation_to_name[row[2]]\n\n\t\t\t\t\tif state not in states:\n\t\t\t\t\t\tstates[state] = {}\n\n\t\t\t\t\t# Lacking any clear/easy alternatives, we simply dump\n\t\t\t\t\t# these in New York County at the end of the loop.\n\t\t\t\t\t# We assert that the number of unallocated deaths is\n\t\t\t\t\t# pretty small. If it ever becomes large (relative\n\t\t\t\t\t# to the New York counties) we may want to revisit\n\t\t\t\t\t# our approach.\n\t\t\t\t\tif county == \"new york city unallocated/probable\":\n\t\t\t\t\t\tnew_york_unallocated = int(row[column])\n\t\t\t\t\t\tassert new_york_unallocated < 500, new_york_unallocated\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t# Apparently this dataset isn't consistent between its own CSV files, so\n\t\t\t\t\t# we need to hard-code some fixes...\n\t\t\t\t\tif state == 'Colorado' and county == \"broomfield county and city\":\n\t\t\t\t\t\tcounty = 'broomfield county'\n\t\t\t\t\tif state == 'Virginia' and county == \"matthews county\":\n\t\t\t\t\t\tcounty = \"mathews county\"\n\n\n\t\t\t\t\tif state == 'Alaska':\n\t\t\t\t\t\tif county == \"wade hampton census area\":\n\t\t\t\t\t\t\twade_hampton = int(row[column])\n\t\t\t\t\t\telif county == \"kusilvak census area\":\n\t\t\t\t\t\t\tkusilvak = int(row[column])\n\n\t\t\t\t\tif state in blacklist and county in blacklist[state]:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif county not in states[state]:\n\t\t\t\t\t\tstates[state][county] = {}\n\n\t\t\t\t\tif f\"covid-{varname}\" not in states[state][county]:\n\t\t\t\t\t\tstates[state][county][f\"covid-{varname}\"] = {}\n\n\t\t\t\t\t# Add growth in covid deaths the week before lock downs started having an effect to estimate the\n\t\t\t\t\t# naive growth rate.\n\t\t\t\t\tif varname == 'deaths':\n\t\t\t\t\t\tif float(row[79-7]) > 0:\n\t\t\t\t\t\t\tstates[state][county][f\"covid-{varname}\"][f'growth-rate-est'] = float(row[79])/float(row[79-7])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tstates[state][county][f\"covid-{varname}\"][f'growth-rate-est'] = None\n\n\t\t\t\t\tassert date not in states[state][county][f\"covid-{varname}\"]\n\t\t\t\t\tstates[state][county][f\"covid-{varname}\"][date] = int(row[column])\n\n\t\t\t\t# We distribute \"unattributed New York deaths\" proportional to how the other\n\t\t\t\t# covid deaths are distributed.\n\t\t\t\ttotal = states['New York']['new york county'][f\"covid-{varname}\"][date] + states['New York']['bronx county'][f\"covid-{varname}\"][date] + states['New York']['kings county'][f\"covid-{varname}\"][date] + states['New York']['queens county'][f\"covid-{varname}\"][date] + states['New York']['richmond county'][f\"covid-{varname}\"][date]\n\t\t\t\tstates['New York']['new york county'][f\"covid-{varname}\"][date] += new_york_unallocated * (states['New York']['new york county'][f\"covid-{varname}\"][date] / total)\n\t\t\t\tstates['New York']['bronx county'][f\"covid-{varname}\"][date] += new_york_unallocated * (states['New York']['bronx county'][f\"covid-{varname}\"][date] / total)\n\t\t\t\tstates['New York']['kings county'][f\"covid-{varname}\"][date] += new_york_unallocated * (states['New York']['kings county'][f\"covid-{varname}\"][date] / total)\n\t\t\t\tstates['New York']['queens county'][f\"covid-{varname}\"][date] += new_york_unallocated * (states['New York']['queens county'][f\"covid-{varname}\"][date] / total)\n\t\t\t\tstates['New York']['richmond county'][f\"covid-{varname}\"][date] += new_york_unallocated * (states['New York']['richmond county'][f\"covid-{varname}\"][date] / total)\n\n\t# Wade Hampton and Kusilvak are the same cuonty but, for some reason, exist as two rows. Since both rows have zero\n\t# deaths we won't worry about this for now... but if the rows ever differ we may want to email the CDC and ask why\n\t# they have duplicate rows.\n\tassert wade_hampton == kusilvak, 'If this is ever violated, we need to revisit how we resolve these duplicate rows'\n\n\treturn states\n\ndef get_elections():\n\tstates = {}\n\tfips_to_county = {\n\t\t'08014': ('broomfield county', 'Colorado')\n\t}\n\twith open(pjoin('data', 'fips_to_county.txt'), 'r') as f:\n\t\tfor line in f.readlines():\n\t\t\tif len(line.strip()) == 0:\n\t\t\t\tcontinue\n\t\t\tcode, county, state = line.strip().split('\\t')\n\t\t\t# American Samoa, Northern Mariana Islands, Puerto Rico\n\t\t\tif state in ['AS', 'MP', 'PR']:\n\t\t\t\tcontinue\n\t\t\tfips_to_county[code] = (county.lower(), abbreviation_to_name[state])\n\n\twith open(pjoin('data', 'US_County_Level_Presidential_Results_08-16.csv'), 'r') as f:\n\t\treader = csv.reader(f, delimiter=',')\n\t\theader = next(reader)\n\t\tassert header == ['fips_code', 'county', 'total_2008', 'dem_2008', 'gop_2008', 'oth_2008', 'total_2012', 'dem_2012', 'gop_2012', 'oth_2012', 'total_2016', 'dem_2016', 'gop_2016', 'oth_2016']\n\t\trows = [row for row in reader]\n\t\tfor row in rows:\n\t\t\tcounty, state = fips_to_county[row[0]]\n\n\t\t\tif state not in states:\n\t\t\t\tstates[state] = {}\n\n\t\t\tall2008 = int(row[2])\n\t\t\tdem2008 = int(row[3])\n\t\t\tgop2008 = int(row[4])\n\n\t\t\tall2012 = int(row[6])\n\t\t\tdem2012 = int(row[7])\n\t\t\tgop2012 = int(row[8])\n\n\t\t\tall2016 = int(row[10])\n\t\t\tdem2016 = int(row[11])\n\t\t\tgop2016 = int(row[12])\n\n\t\t\tstates[state][county] = {\n\t\t\t\t\"elections\": {\n\t\t\t\t\t\"2008\": {\n\t\t\t\t\t\t\"total\": all2008,\n\t\t\t\t\t\t\"dem\": dem2008,\n\t\t\t\t\t\t\"gop\": gop2008,\n\t\t\t\t\t},\n\t\t\t\t\t\"2012\": {\n\t\t\t\t\t\t\"total\": all2012,\n\t\t\t\t\t\t\"dem\": dem2012,\n\t\t\t\t\t\t\"gop\": gop2012,\n\t\t\t\t\t},\n\t\t\t\t\t\"2016\": {\n\t\t\t\t\t\t\"total\": all2016,\n\t\t\t\t\t\t\"dem\": dem2016,\n\t\t\t\t\t\t\"gop\": gop2016,\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t\"fips\": row[0]\n\t\t\t}\n\n\t# Missing Alaska\n\tassert \"Alaska\" not in states\n\tstates[\"Alaska\"] = {}\n\n\treturn states\n\nif __name__ == '__main__':\n\tmerger = CountyNameMerger()\n\n\tmerger.merge(get_geometry())\n\tmerger.merge(get_zips())\n\tmerger.merge(get_demographics())\n\tmerger.merge(get_cdc_deaths())\n\tmerger.merge(get_labor_force())\n\n\t# Fatal police shootings are unique in that we don't have an\n\t# entry for every county, because the Washington Post tracks\n\t# stats by *shooting* rather than by county. As a result, we\n\t# need to tolerate having missing counties.\n\tmerger.merge(get_fatal_police_shootings(), allow_missing=True)\n\t# After we merge, add zeros for all missing counties (which\n\t# aren't present in the Washington Post dataset, simply because\n\t# they had no fatal police shootings).\n\tfor state in merger.states:\n\t\tfor county in merger.states[state]:\n\t\t\tif \"fatal_police_shootings\" not in merger.states[state][county]:\n\t\t\t\tmerger.states[state][county][\"fatal_police_shootings\"] = {}\n\t\t\tif \"total\" not in merger.states[state][county][\"fatal_police_shootings\"]:\n\t\t\t\tmerger.states[state][county][\"fatal_police_shootings\"][\"total\"] = 0\n\t\t\tif \"unarmed\" not in merger.states[state][county][\"fatal_police_shootings\"]:\n\t\t\t\tmerger.states[state][county][\"fatal_police_shootings\"][\"unarmed\"] = 0\n\t\t\tif \"fire-armed\" not in merger.states[state][county][\"fatal_police_shootings\"]:\n\t\t\t\tmerger.states[state][county][\"fatal_police_shootings\"][\"fire-armed\"] = 0\n\n\t# We do the same thing for police deaths.\n\tmerger.merge(get_police_deaths(), allow_missing=True)\n\tfor state in merger.states:\n\t\tfor county in merger.states[state]:\n\t\t\tif 'police_deaths' not in merger.states[state][county]:\n\t\t\t\tmerger.states[state][county]['police_deaths'] = 0\n\n\tmerger.merge(get_avg_income())\n\tmerger.merge(get_covid())\n\n\t# We're missing election data for Alaska and Kalawao County, HI\n\tmerger.merge(get_elections(), missing={\n\t\t\"Alaska\": set(merger.states[\"Alaska\"].keys()),\n\t\t\"Hawaii\": {\"kalawao\"}\n\t})\n\n\twith open('states.json', 'w+') as f:\n\t\tjson.dump(merger.states, f, indent=2)\n\n","sub_path":"create_json.py","file_name":"create_json.py","file_ext":"py","file_size_in_byte":28918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"99723249","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\ndef split_data(filename):\r\n x = []\r\n y = []\r\n data = open(filename)\r\n while 1:\r\n line = data.readline()\r\n if line==\"\":\r\n break\r\n line = line[:-1].split(\"\\t\")\r\n line[0] = list(map(float, line[0].split()))\r\n x.append([1] + line[0])\r\n y.append(int(line[1]))\r\n #print(x, y)\r\n return np.array(x), np.array(y)\r\n\r\ndef pocket(w, x, y):\r\n w_temp = w.copy()\r\n indexs = list(range(len(x)))\r\n \r\n updates = 100\r\n while(updates>0):\r\n random.shuffle(indexs)\r\n for i in indexs:\r\n sign = 0\r\n if(np.dot(w_temp,x[i])>0):\r\n sign = 1\r\n else:\r\n sign = -1\r\n if(sign!=y[i]):\r\n w_temp += y[i]* x[i]\r\n updates -= 1\r\n error = test(w, x, y)\r\n error_temp = test(w_temp, x, y)\r\n if(error_temp0):\r\n sign = 1\r\n else:\r\n sign = -1\r\n if(sign!=y[i]):\r\n count += 1\r\n return count\r\n\r\nif __name__ == '__main__':\r\n train_x,train_y = split_data(\"hw1_7_train.dat\")\r\n test_x,test_y = split_data(\"hw1_7_test.dat\")\r\n \r\n error_list = []\r\n repeat = int(input(\"Repeat times: \"))\r\n for i in range(repeat):\r\n w = np.array([0,0,0,0,0],dtype=float)\r\n w_pocket = pocket(w, train_x, train_y)\r\n \r\n error_test = test(w_pocket, test_x, test_y)/len(test_x)\r\n print(i+1, \"times error:\", error_test)\r\n error_list.append(error_test)\r\n print(\"average error:\", sum(error_list)/repeat)\r\n plt.hist(error_list)\r\n plt.savefig(\"error_histogram.png\")\r\n ","sub_path":"機器學習基石/hw1/pocket.py","file_name":"pocket.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"470568586","text":"from tworaven_apps.solver_interfaces.util_search import (\n SearchAutoSklearn,\n SearchCaret,\n SearchH2O,\n SearchTPOT,\n SearchLudwig,\n SearchMLJarSupervised,\n SearchMLBox,\n SearchTwoRavens)\n\n\nclass Solve(object):\n def __init__(self, system, specification,\n callback_found: str, callback_arguments=None,\n system_params=None, search_id=None):\n self.system = system\n self.specification = specification\n self.system_params = system_params or {}\n self.search = {\n 'auto_sklearn': SearchAutoSklearn,\n 'h2o': SearchH2O,\n 'tpot': SearchTPOT,\n 'caret': SearchCaret,\n 'ludwig': SearchLudwig,\n 'mljar-supervised': SearchMLJarSupervised,\n 'mlbox': SearchMLBox,\n 'TwoRavens': SearchTwoRavens\n }[system](\n specification=self.specification['search'],\n callback_found=callback_found,\n callback_arguments=callback_arguments,\n system_params=self.system_params,\n search_id=search_id)\n\n def run(self):\n return self.search.run()\n\n","sub_path":"tworaven_apps/solver_interfaces/util_solve.py","file_name":"util_solve.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"341313125","text":"import tornado.ioloop\nimport tornado.web\nimport time\n\nclass SleepHandler(tornado.web.RequestHandler):\n @tornado.web.asynchronous\n def get(self):\n time.sleep(10)\n self.write('awake')\n\nclass ImmediateHandler(tornado.web.RequestHandler):\n def get(self):\n self.write('I am very fast')\n\napplication = tornado.web.Application([\n (r'/sleep', SleepHandler),\n (r'/immediate', ImmediateHandler)\n ])\n\nif __name__ == '__main__':\n application.listen(9000)\n tornado.ioloop.IOLoop.current().start()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"496105401","text":"##\n##\nclass NetworkNode():\n def __init__(self, name, ip, location):\n self.name = name\n self.ip = ip\n self.location = location\n\nnode001 = NetworkNode('rtr001', '192.168.1.1', '1st floor')\nnode002 = NetworkNode('rtr002', '192.168.1.2', '2nd floor')\nnode003 = NetworkNode('rtr003', '192.168.1.3', '3rd floor')\n\nprint(node001.name, node002.ip, node003.location)\nprint(node002.name, node003.ip, node001.location)\nprint(node003.name, node001.ip, node002.location)\n\n##\n## End of file...\n","sub_path":"TSHOOT/TSHOOT#9/good.myclass1.py","file_name":"good.myclass1.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"649278108","text":"import random\r\nfrom characters import *\r\n\r\nclass square:\r\n def __init__(self,x,y):\r\n self.x=x\r\n self.y=y\r\n self.occupy=False\r\n self.occupant=None\r\n \r\n def clear(self):\r\n self.occupy=False\r\n self.occupant=None\r\n \r\n def samesquare(self,sq):\r\n return (self.x==sq.x) and (self.y==sq.y)\r\n \r\n def beingattacked(self,damage):\r\n if self.occupy:\r\n self.occupant.hp=self.occupant.hp-(damage-self.occupant.armor)\r\n if self.occupant.hp<=0:\r\n self.occupy=False\r\n self.occupant=None\r\n \r\n \r\nclass character:\r\n def __init__(self,name,hp,hpmax,atk,armor,mobility,isgood,position):\r\n self.name=name\r\n self.hp=hp\r\n self.hpmax=hpmax\r\n self.atk=atk\r\n self.armor=armor\r\n self.mobility=mobility\r\n self.isgood=isgood\r\n self.position=position\r\n\r\nclass attack:\r\n def __init__(self,damage,target):\r\n self.damage=damage\r\n self.target=target\r\n \r\n def attackdir(self,dir):\r\n if dir==0:\r\n return attack(0,[])\r\n elif dir==1:\r\n return self\r\n elif dir==2:\r\n newtarget=[]\r\n for i in (self.target):\r\n newtarget.append((i[1],-i[0]))\r\n return attack(self.damage,newtarget)\r\n elif dir==3:\r\n newtarget=[]\r\n for i in (self.target):\r\n newtarget.append((-i[0],-i[1]))\r\n return attack(self.damage,newtarget)\r\n elif dir==4:\r\n newtarget=[]\r\n for i in (self.target):\r\n newtarget.append((-i[1],i[0]))\r\n return attack(self.damage,newtarget)\r\n else:\r\n return None\r\n \r\n\r\nclass board:\r\n def __init__(self):\r\n self.grid=[[square(i,j) for j in range (12)] for i in range(12)]\r\n \r\n def makemove(self,m):\r\n if m.start!=m.end:\r\n f=m.start\r\n t=m.end\r\n self.grid[t.x][t.y].occupy=True\r\n f.occupant.position=self.grid[t.x][t.y]\r\n self.grid[t.x][t.y].occupant=f.occupant\r\n self.grid[f.x][f.y].clear()\r\n \r\n #assume the move is valid\r\n \r\n \r\n def gamestatus(self):\r\n good=False\r\n bad=False\r\n for i in range (12):\r\n for j in range(12):\r\n if self.grid[i][j].occupy:\r\n if self.grid[i][j].occupant.isgood:\r\n good=True\r\n else:\r\n bad=True\r\n if (good and bad):\r\n return 0\r\n elif good:\r\n return 1\r\n else:\r\n return -1\r\n \r\n def display(self):\r\n for i in range (12):\r\n for j in range (12):\r\n if not self.grid[i][j].occupy:\r\n print (\"*\",end=\"\")\r\n else :\r\n print (self.grid[i][j].occupant.name[0],end=\"\")\r\n print (\"\")\r\n\r\n\r\nclass move:\r\n def __init__(self,start,end):\r\n self.start=start\r\n self.end=end\r\n \r\n def samemove(self,m):\r\n return (self.start.samesquare(m.start)) and (self.end.samesquare(m.end))\r\n \r\n def isin (self,ms):\r\n flag=False\r\n for i in ms:\r\n if self.samemove(i):\r\n flag=True\r\n return flag\r\n \r\n \r\n\r\nclass player:\r\n def __init__(self,name,control,board):\r\n self.name=name\r\n self.control=control\r\n self.thisturn=False\r\n self.board=board\r\n \r\n def getallmove(self):\r\n moves=[]\r\n if self.control!=None:\r\n current=(self.control.position.x,self.control.position.y)\r\n front=[current]\r\n fronttemp=[]\r\n mob=self.control.mobility\r\n while mob>=0:\r\n for i in front:\r\n j=(i[0]-1,i[1]) \r\n if (not j in front) and (not j in moves) and (not j in fronttemp) and j[0]>=0 and j[0]<12 and j[1]>=0 and j[1]<12:\r\n if self.board.grid[j[0]][j[1]].occupant==None:\r\n fronttemp.append(j)\r\n j=(i[0]+1,i[1])\r\n if (not j in front) and (not j in moves) and (not j in fronttemp) and j[0]>=0 and j[0]<12 and j[1]>=0 and j[1]<12:\r\n if self.board.grid[j[0]][j[1]].occupant==None:\r\n fronttemp.append(j) \r\n j=(i[0],i[1]-1)\r\n if (not j in front) and (not j in moves) and (not j in fronttemp) and j[0]>=0 and j[0]<12 and j[1]>=0 and j[1]<12:\r\n if self.board.grid[j[0]][j[1]].occupant==None:\r\n fronttemp.append(j) \r\n j=(i[0],i[1]+1)\r\n if (not j in front) and (not j in moves) and (not j in fronttemp) and j[0]>=0 and j[0]<12 and j[1]>=0 and j[1]<12:\r\n if self.board.grid[j[0]][j[1]].occupant==None:\r\n fronttemp.append(j)\r\n mob=mob-1\r\n moves=moves+front\r\n front=fronttemp\r\n fronttemp=[]\r\n out=[]\r\n moves=list(set(moves))\r\n for i in moves:\r\n out.append(move(self.board.grid[current[0]][current[1]],self.board.grid[i[0]][i[1]]))\r\n return out\r\n \r\n \r\n def selectmove(self):\r\n m=move(square(-1,-1),square(-1,-1))\r\n allm=self.getallmove()\r\n while not (m.isin(allm)):\r\n x=int(input(self.control.name+\"x>>\"))\r\n y=int(input(self.control.name+\"y>>\"))\r\n m=move(self.control.position,self.board.grid[x][y])\r\n return m\r\n \r\n def makeattack(self,dir):\r\n ts=self.control.atk.attackdir(dir).target\r\n targets=[]\r\n for i in ts:\r\n targets.append((self.control.position.x+i[0],self.control.position.y+i[1]))\r\n for k in targets:\r\n if (k[0]<0 or k[0]>11 or k[1]<0 or k[1]>11) or self.board.grid[k[0]][k[1]].occupy==False:\r\n targets.remove(k)\r\n for j in targets:\r\n self.board.grid[j[0]][j[1]].beingattacked(self.control.atk.damage)\r\n \r\n def selectattack(self):\r\n n=-1\r\n while n!=0 and n!=1 and n!=2 and n!=3 and n!=4:\r\n n=int(input(\"dir>>\"))\r\n return n\r\n \r\n def gettargets(self,dir):\r\n ts=self.control.atk.attackdir(dir).target\r\n targets=[]\r\n for i in ts:\r\n targets.append((self.control.position.x+i[0],self.control.position.y+i[1]))\r\n for j in targets:\r\n if (j[0]<0 or j[0]>11 or j[1]<0 or j[1]>11) or self.board.grid[j[0]][j[1]].occupy==False:\r\n targets.remove(j)\r\n return targets\r\n \r\n \r\n \r\n def aiattack(self):\r\n max=(0,0)\r\n for i in range (1,5):\r\n count=0\r\n lst=self.gettargets(i)\r\n for j in lst:\r\n t=self.board.grid[j[0]][j[1]]\r\n if t.occupy:\r\n if t.occupant.isgood:\r\n count+=1\r\n else:\r\n count+=-1\r\n if count>max[0]:\r\n max=(count,i)\r\n return max[0]\r\n \r\n def aimove(self):\r\n allm=self.getallmove()\r\n r=random.randint(0,len(allm)-1)\r\n return allm[r]\r\n \r\ndef createplayer(c,name):\r\n c.position.occupy=True\r\n c.position.occupant=c\r\n ps.append(player(name,c,b))\r\n\r\ndef main():\r\n createplayer(cavalry,\"Cavalry\")\r\n createplayer(minion,\"minion\")\r\n createplayer(skelton,\"skelton\")\r\n createplayer(skelton2,\"skelton2\")\r\n createplayer(skelton3,\"skelton3\")\r\n index=0\r\n b.display()\r\n while (b.gamestatus()==0):\r\n turn=ps[index]\r\n if turn.control.isgood:\r\n m=turn.selectmove()\r\n b.makemove(m)\r\n b.display()\r\n print (\"\")\r\n turn.makeattack(turn.selectattack()) \r\n else:\r\n m=turn.aimove()\r\n b.makemove(m)\r\n b.display()\r\n print (\"\")\r\n turn.makeattack(turn.aiattack())\r\n b.display()\r\n print (\"\") \r\n for i in ps:\r\n print (i.control.name,\"has\",i.control.hp,\"health\")\r\n index=(index+1)%len(ps)\r\n if (b.gamestatus==1):\r\n print (\"you wins\")\r\n else:\r\n print(\"gg\")\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n#def main2():\r\n# b=board()\r\n# c=character(\"g\",30,30,attack(10,[(1,0)]),5,2,True,b.grid[0][0])\r\n# e=character(\"b\",20,20,attack(20,[(1,1)]),5,2,False,b.grid[1][1])\r\n# b.grid[0][0].occupy=True\r\n# b.grid[0][0].occupant=c \r\n# b.grid[1][1].occupy=True\r\n# b.grid[1][1].occupant=e\r\n# p=player(\"ht\",c,b)\r\n# q=player(\"xx\",e,b)\r\n# turn=p \r\n# while (b.gamestatus()==0):\r\n# b.display()\r\n# print (\"player1 heath\",p.control.hp)\r\n# print (\"player2 heath\",q.control.hp)\r\n# if (turn==p):\r\n# print (\"player1 moves\")\r\n# else:\r\n# print (\"player2 moves\")\r\n# m=turn.selectmove()\r\n# b.makemove(m)\r\n# b.display()\r\n# turn.makeattack(turn.selectattack())\r\n# print (\"player1 heath\",p.control.hp)\r\n# print (\"player2 heath\",q.control.hp)\r\n# if (turn==p):\r\n# turn=q\r\n# else:\r\n# turn=p\r\n# if (b.gamestatus==1):\r\n# print (\"player1 wins\")\r\n# else:\r\n# print(\"player2 wins\")\r\n \r\n \r\n#b=board() \r\n#c=character(\"g\",100,100,attack(10,[(1,0)]),5,2,True,b.grid[7][5])\r\n#b.grid[7][5].occupy=True\r\n#b.grid[7][5].occupant=c\r\n#e=character(\"b\",50,50,attack(20,[(1,1)]),5,2,False,b.grid[8][5])\r\n#b.grid[8][5].occupy=True\r\n#b.grid[8][5].occupant=e\r\n#p=player(\"ht\",c,b)\r\n#b.display()\r\n#for i in range (5):\r\n# print (p.gettargets(i))\r\n# b.makemove(move(b.grid[5][5],b.grid[7][5]))\r\n# print (\"\")\r\n# b.display()\r\n# print (\"\")\r\n# p.makeattack(1)\r\n# print (b.grid[8][5].occupant.hp)\r\n# print (len(p.getallmove()))\r\nps = []\r\nb = board()\r\n\r\ncavalry = character(\"Cavalry\", 450, 450, attack(40, [(1,0),(2,0)]), 10, 5, True, b.grid[10][9])\r\nminion=character(\"minion\",100,100,attack(20,[(1,0)]),2,2,False,b.grid[1][0])\r\nminion2=character(\"minion2\",100,100,attack(20,[(1,0)]),2,2,False,b.grid[1][1])\r\nminion3=character(\"minion3\",100,100,attack(20,[(1,0)]),2,2,False,b.grid[1][2])\r\ndemon=character(\"demon\",1000,1000,attack(150,[(1,0),(1,1),(1,-1)]),15,3,False,b.grid[0][0])\r\nskelton=character(\"skelton\",50,50,attack(50,[(1,0)]),0,4,False,b.grid[1][3])\r\nskelton2=character(\"skelton2\",50,50,attack(50,[(1,0)]),0,4,False,b.grid[1][4])\r\nskelton3=character(\"skelton3\",50,50,attack(50,[(1,0)]),0,4,False,b.grid[1][5])\r\n\r\ncreateplayer(cavalry, \"Cavalry\")\r\ncreateplayer(minion, \"minion\")\r\ncreateplayer(skelton, \"skelton\")\r\ncreateplayer(skelton2, \"skelton2\")\r\ncreateplayer(skelton3, \"skelton3\")\r\n\r\n# main()\r\n\r\n\r\n\r\n ","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":11028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"352782291","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\n\nsys.path.append( os.path.join(sys.path[0], 'system/networks') )\nsys.path.append( os.path.join(sys.path[0], 'system/devices') )\n\nfrom deviceConstants import *\nimport deviceObject\n\nfrom networkConstants import *\n\nfrom modbusConstants import *\n# import modbusConstants\n\nimport copy\nfrom collections import OrderedDict\n\n\nimport logsystem\nlog = logsystem.getLogger()\n\ndeviceType = \"EVCO-EV3224\"\ndeviceTypeName = \"EVCO-EV3224\"\nexecutionType = deviceNetworkExecution\nexecutionTypeName = deviceNetworkExecutionText\n\nvalueDescriptions = OrderedDict()\n\n\n# Analog Inputs - Dedicated Register value \nvalueDescriptions[\"Probe 1\"] = {\"dataType\":dataTypeFloat,\"valueType\":valueTypeOutput,\"significantDigits\":1,\"displayName\":\"Probe 1\",\"unitType\":unitTypeTemperature, \"defaultLog\":\"true\"}\nvalueDescriptions[\"Probe 2\"] = {\"dataType\":dataTypeFloat,\"valueType\":valueTypeOutput,\"significantDigits\":1,\"displayName\":\"Probe 2\",\"unitType\":unitTypeTemperature, \"defaultLog\":\"true\"}\nvalueDescriptions[\"Probe 3\"] = {\"dataType\":dataTypeFloat,\"valueType\":valueTypeOutput,\"significantDigits\":1,\"displayName\":\"Probe 3\",\"unitType\":unitTypeTemperature, \"defaultLog\":\"true\"}\n\n#Digital Inputs 101\nvalueDescriptions[\"DoorDigitalInput\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"Open\", \"false\": \"Closed\"},\"valueType\":valueTypeOutput,\"displayName\":\"Door Status\", \"defaultLog\":\"true\"}\nvalueDescriptions[\"MultifunctionDigitalInput\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"Mute\", \"false\": \"-\"},\"valueType\":valueTypeOutput,\"displayName\":\"MultifunctionDigitalInput\"}\n\n# OUTPUT - Status Flag bits\n# 100 - Status flag\n#valueDescriptions[\"ALARM\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"Alarm\"}\nvalueDescriptions[\"DEFROST Phase\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"0\":\"Off\", \"1\": \"On\"},\"valueType\":valueTypeOutput,\"displayName\":\"DEFROST Phase\", \"defaultLog\":\"true\"}\nvalueDescriptions[\"Status Light\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"0\":\"Off\", \"1\": \"On\"},\"valueType\":valueTypeOutput,\"displayName\":\"Status Light\", \"defaultLog\":\"true\"}\nvalueDescriptions[\"Status Aux\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"0\":\"Off\", \"1\": \"On\"},\"valueType\":valueTypeOutput,\"displayName\":\"Status Aux\", \"defaultLog\":\"true\"}\nvalueDescriptions[\"Status Resistors\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"0\":\"Off\", \"1\": \"On\"},\"valueType\":valueTypeOutput,\"displayName\":\"Status Resistors\", \"defaultLog\":\"true\"}\nvalueDescriptions[\"Set point\"] = {\"dataType\":dataTypeFloat,\"valueType\":valueTypeOutput,\"significantDigits\":1,\"displayName\":\"Probe 3\",\"unitType\":unitTypeTemperature, \"defaultLog\":\"true\"}\n\n# 101 - Alarm flag\n# valueDescriptions[\"T1PROBEALARM\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"T1 Probe Alarm\",\"description\":\"T1 Probe Alarm\"}\n# valueDescriptions[\"T2PROBEALARM\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"T2 Probe Alarm\",\"description\":\"T2 Probe Alarm\"}\n# valueDescriptions[\"T3PROBEALARM\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"T3 Probe Alarm\",\"description\":\"T3 Probe Alarm\"}\n# valueDescriptions[\"HIGHTEMPALARM\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"High Temp Alarm\",\"description\":\"High Temp Alarm\"}\n# valueDescriptions[\"LOWTEMPALARM\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"Low Temp Alarm\",\"description\":\"Low Temp Alarm\"}\n# valueDescriptions[\"HIGHCONDALARM\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"Generic Alarm\",\"description\":\"Generic Alarm\"}\n# valueDescriptions[\"HIGHPRESALARM\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"High Pressure Alarm\",\"description\":\"High Pressure Alarm\"}\n# valueDescriptions[\"DOOROPENALARM\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"Door Open Alarm\",\"description\":\"Door Open Alarm\"}\n# valueDescriptions[\"CONDCLEANALARM\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"Condenser Clean Alarm\",\"description\":\"Condenser Clean Alarm\"}\n\n# 181 - Output Digital flag\nvalueDescriptions[\"Output K1\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"Output K1\", \"defaultLog\":\"true\"}\nvalueDescriptions[\"Output K2\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"Output K2\", \"defaultLog\":\"true\"}\nvalueDescriptions[\"Output K3\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"Output K3\", \"defaultLog\":\"true\"}\nvalueDescriptions[\"Output K4\"] = {\"dataType\":dataTypeBool,\"unitType\":\"OnOff\",\"dataList\":{\"true\":\"On\", \"false\": \"Off\"},\"valueType\":valueTypeOutput,\"displayName\":\"Output K4\", \"defaultLog\":\"true\"}\n\n\n# Configuration Registers\n# valueDescriptions[\"IT\"] = {\"displayName\":\"IT\",\"description\":\"Integral Output Cycle time (10-500 sec)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"90\"}\n# valueDescriptions[\"AO1L\"] = {\"displayName\":\"AO1L\",\"description\":\"Econ 1 Volt Low Offset (1=0.04v)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"AO1H\"] = {\"displayName\":\"AO1H\",\"description\":\"Econ 1 Volt High Offset (1=0.04v)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"-10\"}\n# valueDescriptions[\"AO2L\"] = {\"displayName\":\"AO2L\",\"description\":\"VS Fan, 1 Volt Low Offset (1=0.04v)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"AO2H\"] = {\"displayName\":\"AO2H\",\"description\":\"VS FAn, 1 Volt High Offset (1=0.04v)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"-10\"}\n# valueDescriptions[\"OFST\"] = {\"displayName\":\"OFST\",\"description\":\"Built-In Space Temp 1 Offset (-18.0~18.0)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"0\"}\n# valueDescriptions[\"PB\"] = {\"displayName\":\"PB\",\"description\":\"Proportional Band (0.0~18.0)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeDeltaTemperature,\"significantDigits\":1,\"default\":\"0\"}\n# valueDescriptions[\"DIFF\"] = {\"displayName\":\"DIFF\",\"description\":\"Stage Differential (0.1~2.0)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeDeltaTemperature,\"significantDigits\":1,\"default\":\"1\"}\n# valueDescriptions[\"LOC\"] = {\"displayName\":\"LOC\",\"description\":\"Disabling Front Button Selection\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Unlock All Button\",\"249\":\"UP & Down Setpoint\",\"255\":\"Lock All Button\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"RE1\"] = {\"displayName\":\"RE1\",\"description\":\"Econ AO1 Direct/Reverse Control Output\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Direct\",\"1\":\"Reverse\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"RE2\"] = {\"displayName\":\"RE2\",\"description\":\"VS Fan AO2 Direct/Reverse Control Output\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Direct\",\"1\":\"Reverse\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"RS\"] = {\"displayName\":\"RS\",\"description\":\"Control Temperature Source\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Space Temp 1\",\"1,\":\"Space Temp\",\"2\":\"Average Space T 1 & 2\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"SP\"] = {\"displayName\":\"SP\",\"description\":\"Thermostat LCD Display Options\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Temp/Time\",\"1\":\"SP/Time\",\"2\":\"Temp/RH\",\"3\":\"SP/RH\",\"4\":\"Temp/Dewpt\",\"5\":\"Dewpt/Time\",\"6\":\"Temp/Time-RH\",\"7\":\"Supply Temp\",\"8\":\"Time/Rotate\"},\"valueType\":valueTypeConfig,\"default\":\"8\"}\n# valueDescriptions[\"BAUD\"] = {\"displayName\":\"BAUD\",\"description\":\"Modbus Baud Rate Speed\",\"dataType\":dataTypeList,\"dataList\":{\"1\":\"2400 bps\",\"2\":\"4800 bps\",\"3\":\"9600 bps\",\"5\":\"19200 bps\",\"6\":\"38400 bps\",\"7\":\"57600 bps\",\"8\":\"115200 bps\"},\"valueType\":valueTypeConfig,\"default\":\"5\"}\n# valueDescriptions[\"PRTY\"] = {\"displayName\":\"PRTY\",\"description\":\"Modbus Parity Data/Stop Bits\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Even81\",\"1\":\"Odd81\",\"2\":\"none82\",\"3\":\"none81\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"ID\"] = {\"displayName\":\"ID\",\"description\":\"Modbus Node ID\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"1\"}\n# valueDescriptions[\"RHOF\"] = {\"displayName\":\"RHOF\",\"description\":\"Humidity Reading Offset (-30.0~30.0)\",\"dataType\":dataTypeFloat,\"unitType\": \"Percent\",\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"DST\"] = {\"displayName\":\"DST\",\"description\":\"USA Daylight Savings Enable\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Disable\",\"1\":\"Enable\"},\"valueType\":valueTypeConfig,\"default\":\"1\"}\n# valueDescriptions[\"FAN\"] = {\"displayName\":\"FAN\",\"description\":\"Fan Type Selection\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Single Speed\",\"1\":\"Variable Speed\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"HFAN\"] = {\"displayName\":\"HFAN\",\"description\":\"Fan Off Delay for Heat Mode (0~300 sec)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"120\"}\n# valueDescriptions[\"CFAN\"] = {\"displayName\":\"CFAN\",\"description\":\"Fan Off Delay for Cool Mode (0~300 sec)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"OFAN\"] = {\"displayName\":\"OFAN\",\"description\":\"Fan Mode For Occupied Period\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Automatic\",\"1\":\"Continuous\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"UFAN\"] = {\"displayName\":\"UFAN\",\"description\":\"Fan Mode For Unoccupied Period\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Automatic\",\"1\":\"Continuous\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"H1F\"] = {\"displayName\":\"H1F\",\"description\":\"Fan % Output for Heat Stage 1\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"40\"}\n# valueDescriptions[\"H2F\"] = {\"displayName\":\"H2F\",\"description\":\"Fan % Output for Heat Stage 2\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"80\"}\n# valueDescriptions[\"C1F\"] = {\"displayName\":\"C1F\",\"description\":\"Fan % Output for Cool Stage 1\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"60\"}\n# valueDescriptions[\"C2F\"] = {\"displayName\":\"C2F\",\"description\":\"Fan % Output for Cool Stage 2\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"100\"}\n# valueDescriptions[\"IDF\"] = {\"displayName\":\"IDF\",\"description\":\"Fan % Output for Idle Time\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"30\"}\n# valueDescriptions[\"DHF\"] = {\"displayName\":\"DHF\",\"description\":\"Fan% For Dehum When No Heat Or Cool\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"50\"}\n# valueDescriptions[\"HDLY\"] = {\"displayName\":\"HDLY\",\"description\":\"Inter-Stage Heat Delay (1~10 mins)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"1\"}\n# valueDescriptions[\"CDLY\"] = {\"displayName\":\"CDLY\",\"description\":\"Inter-Stage Cool Delay (1~10 mins)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"1\"}\n# valueDescriptions[\"RHOP\"] = {\"displayName\":\"RHOP\",\"description\":\"Dehumidification Control Options\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Monitor Only\",\"1\":\"Cool and Heat\",\"2\":\"Dehumidify\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"OHSP\"] = {\"displayName\":\"OHSP\",\"description\":\"RH Occupied Cut-In (0~100)\",\"dataType\":dataTypeFloat, \"unitType\": \"Percent\",\"valueType\":valueTypeConfig,\"default\":\"55\"}\n# valueDescriptions[\"OHDF\"] = {\"displayName\":\"OHDF\",\"description\":\"RH Occupied Cut-Out Differential (0~50)\",\"dataType\":dataTypeFloat, \"unitType\": \"Percent\",\"valueType\":valueTypeConfig,\"default\":\"5\"}\n# valueDescriptions[\"UHSP\"] = {\"displayName\":\"UHSP\",\"description\":\"RH Unoccupied Cut-In (0~100)\",\"dataType\":dataTypeFloat, \"unitType\": \"Percent\",\"valueType\":valueTypeConfig,\"default\":\"60\"}\n# valueDescriptions[\"UHDF\"] = {\"displayName\":\"UHDF\",\"description\":\"RH Unoccupied Cut-Out Differential (0~50)\",\"dataType\":dataTypeFloat, \"unitType\": \"Percent\",\"valueType\":valueTypeConfig,\"default\":\"5\"}\n# valueDescriptions[\"SPLY\"] = {\"displayName\":\"SPLY\",\"description\":\"Supply Temperature Sensor Enable\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Disable\",\"1\":\"Enable\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"ECO\"] = {\"displayName\":\"ECO\",\"description\":\"Economizer Function\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Disable\",\"1\":\"Enable\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"ECDO\"] = {\"displayName\":\"ECDO\",\"description\":\"Economizer Command\",\"dataType\":dataTypeList,\"dataList\":{\"0\":\"Off\",\"1\":\"On\"},\"valueType\":valueTypeConfig,\"default\":\"0\"}\n# valueDescriptions[\"ECON\"] = {\"displayName\":\"ECON\",\"description\":\"Econimizer ON Output Level (0~100%)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"80\"}\n# valueDescriptions[\"ECOF\"] = {\"displayName\":\"ECOF\",\"description\":\"Econimizer OFF Output Level (0~100%)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"10\"}\n# valueDescriptions[\"SPDT\"] = {\"displayName\":\"SPDT\",\"description\":\"Temporary Temp Setpt Range (2.0~20.0)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeDeltaTemperature,\"significantDigits\":1,\"default\":\"2\"}\n# valueDescriptions[\"SPT\"] = {\"displayName\":\"SPT\",\"description\":\"Temporary Temp Setpt Duration (15~120 mins)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"60\"}\n# valueDescriptions[\"HTSP\"] = {\"displayName\":\"HTSP\",\"description\":\"Dehumification Reheat Setpoint (32.0~122.0)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeDeltaTemperature,\"significantDigits\":1,\"default\":\"65\"}\n# valueDescriptions[\"RSOF\"] = {\"displayName\":\"RSOF\",\"description\":\"Remote Temperature Offset (-18.0~18.0)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeDeltaTemperature,\"significantDigits\":1,\"default\":\"0\"}\n# valueDescriptions[\"SSOF\"] = {\"displayName\":\"SSOF\",\"description\":\"Supply Temperature Offset (-18.0~18.0)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeDeltaTemperature,\"significantDigits\":1,\"default\":\"0\"}\n\n# Configuration Schedule Registers\n# Sunday\n# valueDescriptions[\"SUNOCC\"] = {\"displayName\":\"Sunday Occupied\",\"description\":\"Sunday Occupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"800\"}\n# valueDescriptions[\"SUNOCCSETCL\"] = {\"displayName\":\"Sunday OCC Cool\",\"description\":\"Sunday Occupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"26.1\"}\n# valueDescriptions[\"SUNOCCSETHT\"] = {\"displayName\":\"Sunday OCC Heat\",\"description\":\"Sunday Occupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"21.1\"}\n# valueDescriptions[\"SUNUNOCC\"] = {\"displayName\":\"Sunday Unoccupied\",\"description\":\"Sunday Unoccupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"1800\"}\n# valueDescriptions[\"SUNUNOCCSETCL\"] = {\"displayName\":\"Sunday Unocc Cool\",\"description\":\"Sunday Unoccupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"29.4\"}\n# valueDescriptions[\"SUNUNOCCSETHT\"] = {\"displayName\":\"Sunday Unocc Heat\",\"description\":\"Sunday Unoccupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"16.6\"}\n# Monday\n# valueDescriptions[\"MONOCC\"] = {\"displayName\":\"Monday Occupied\",\"description\":\"Monday Occupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"800\"}\n# valueDescriptions[\"MONOCCSETCL\"] = {\"displayName\":\"Monday OCC Cool\",\"description\":\"Monday Occupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"26.1\"}\n# valueDescriptions[\"MONOCCSETHT\"] = {\"displayName\":\"Monday OCC Heat\",\"description\":\"Monday Occupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"21.1\"}\n# valueDescriptions[\"MONUNOCC\"] = {\"displayName\":\"Monday Unoccupied\",\"description\":\"Monday Unoccupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"1800\"}\n# valueDescriptions[\"MONUNOCCSETCL\"] = {\"displayName\":\"Monday Unocc Cool\",\"description\":\"Monday Unoccupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"29.4\"}\n# valueDescriptions[\"MONUNOCCSETHT\"] = {\"displayName\":\"Monday Unocc Heat\",\"description\":\"Monday Unoccupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"16.6\"}\n# Tuesday\n# valueDescriptions[\"TUEOCC\"] = {\"displayName\":\"Tuesday Occupied\",\"description\":\"Tuesday Occupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"800\"}\n# valueDescriptions[\"TUEOCCSETCL\"] = {\"displayName\":\"Tuesday OCC Cool\",\"description\":\"Tuesday Occupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"26.1\"}\n# valueDescriptions[\"TUEOCCSETHT\"] = {\"displayName\":\"Tuesday OCC Heat\",\"description\":\"Tuesday Occupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"21.1\"}\n# valueDescriptions[\"TUEUNOCC\"] = {\"displayName\":\"Tuesday Unoccupied\",\"description\":\"Tuesday Unoccupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"1800\"}\n# valueDescriptions[\"TUEUNOCCSETCL\"] = {\"displayName\":\"Tuesday Unocc Cool\",\"description\":\"Tuesday Unoccupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"29.4\"}\n# valueDescriptions[\"TUEUNOCCSETHT\"] = {\"displayName\":\"Tuesday Unocc Heat\",\"description\":\"Tuesday Unoccupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"16.6\"}\n# Wednesday\n# valueDescriptions[\"WEDOCC\"] = {\"displayName\":\"Wednesday Occupied\",\"description\":\"Wednesday Occupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"800\"}\n# valueDescriptions[\"WEDOCCSETCL\"] = {\"displayName\":\"Wednesday OCC Cool\",\"description\":\"Wednesday Occupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"26.1\"}\n# valueDescriptions[\"WEDOCCSETHT\"] = {\"displayName\":\"Wednesday OCC Heat\",\"description\":\"Wednesday Occupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"21.1\"}\n# valueDescriptions[\"WEDUNOCC\"] = {\"displayName\":\"Wednesday Unoccupied\",\"description\":\"Wednesday Unoccupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"1800\"}\n# valueDescriptions[\"WEDUNOCCSETCL\"] = {\"displayName\":\"Wednesday Unocc Cool\",\"description\":\"Wednesday Unoccupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"29.4\"}\n# valueDescriptions[\"WEDUNOCCSETHT\"] = {\"displayName\":\"Wednesday Unocc Heat\",\"description\":\"Wednesday Unoccupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"16.6\"}\n# Thursday\n# valueDescriptions[\"THUOCC\"] = {\"displayName\":\"Thursday Occupied\",\"description\":\"Thursday Occupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"800\"}\n# valueDescriptions[\"THUOCCSETCL\"] = {\"displayName\":\"Thursday OCC Cool\",\"description\":\"Thursday Occupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"26.1\"}\n# valueDescriptions[\"THUOCCSETHT\"] = {\"displayName\":\"Thursday OCC Heat\",\"description\":\"Thursday Occupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"21.1\"}\n# valueDescriptions[\"THUUNOCC\"] = {\"displayName\":\"Thursday Unoccupied\",\"description\":\"Thursday Unoccupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"1800\"}\n# valueDescriptions[\"THUUNOCCSETCL\"] = {\"displayName\":\"Thursday Unocc Cool\",\"description\":\"Thursday Unoccupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"29.4\"}\n# valueDescriptions[\"THUUNOCCSETHT\"] = {\"displayName\":\"Thursday Unocc Heat\",\"description\":\"Thursday Unoccupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"16.6\"}\n# Friday\n# valueDescriptions[\"FRIOCC\"] = {\"displayName\":\"Friday Occupied\",\"description\":\"Friday Occupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"800\"}\n# valueDescriptions[\"FRIOCCSETCL\"] = {\"displayName\":\"Friday OCC Cool\",\"description\":\"Friday Occupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"26.1\"}\n# valueDescriptions[\"FRIOCCSETHT\"] = {\"displayName\":\"Friday OCC Heat\",\"description\":\"Friday Occupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"21.1\"}\n# valueDescriptions[\"FRIUNOCC\"] = {\"displayName\":\"Friday Unoccupied\",\"description\":\"Friday Unoccupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"1800\"}\n# valueDescriptions[\"FRIUNOCCSETCL\"] = {\"displayName\":\"Friday Unocc Cool\",\"description\":\"Friday Unoccupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"29.4\"}\n# valueDescriptions[\"FRIUNOCCSETHT\"] = {\"displayName\":\"Friday Unocc Heat\",\"description\":\"Friday Unoccupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"16.6\"}\n# Saturday\n# valueDescriptions[\"SATOCC\"] = {\"displayName\":\"Saturday Occupied\",\"description\":\"Saturday Occupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"800\"}\n# valueDescriptions[\"SATOCCSETCL\"] = {\"displayName\":\"Saturday OCC Cool\",\"description\":\"Saturday Occupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"26.1\"}\n# valueDescriptions[\"SATOCCSETHT\"] = {\"displayName\":\"Saturday OCC Heat\",\"description\":\"Saturday Occupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"21.1\"}\n# valueDescriptions[\"SATUNOCC\"] = {\"displayName\":\"Saturday Unoccupied\",\"description\":\"Saturday Unoccupied Time (0~2359)\",\"dataType\":dataTypeInt,\"valueType\":valueTypeConfig,\"default\":\"1800\"}\n# valueDescriptions[\"SATUNOCCSETCL\"] = {\"displayName\":\"Saturday Unocc Cool\",\"description\":\"Saturday Unoccupied Cool (5~98.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"29.4\"}\n# valueDescriptions[\"SATUNOCCSETHT\"] = {\"displayName\":\"Saturday Unocc Heat\",\"description\":\"Saturday Unoccupied Heat (4~89.5)\",\"dataType\":dataTypeFloat,\"valueType\":valueTypeConfig,\"unitType\":unitTypeTemperature,\"significantDigits\":1,\"default\":\"16.6\"}\n\n\n# ALARMS\nalarmDescriptions = OrderedDict()\n\n\n\nclass Device(deviceObject.NetworkDeviceObject):\n def __init__(self, deviceManager, name, description, network, networkAddress, image, method = \"\"):\n deviceObject.NetworkDeviceObject.__init__(self, deviceManager, name, description, network, networkAddress, deviceType, deviceTypeName, image)\n\n self._valueDescriptions = valueDescriptions\n self._alarmDescriptions = alarmDescriptions\n\n self.loadValuesFromDatabase()\n self.loadAdvisoriesFromDatabase()\n\n self.configModbusList = []\n # [\"1\",\"IT\",7],\n # [\"1\",\"AO1L\",8],\n # [\"1\",\"AO1H\",9],\n # [\"2\",\"AO2L\",10],\n # [\"2\",\"AO2H\",11],\n # [\"2\",\"OFST\",12],\n # [\"3\",\"PB\",13],\n # [\"3\",\"DIFF\",14],\n # [\"3\",\"LOC\",15],\n # [\"4\",\"RE1\",16],\n # [\"4\",\"RE2\",17],\n # [\"4\",\"RS\",18],\n # [\"5\",\"SP\",19],\n # [\"5\",\"BAUD\",20],\n # [\"5\",\"PRTY\",21],\n # [\"6\",\"ID\",22],\n # [\"6\",\"RHOF\",23],\n # [\"6\",\"DST\",24],\n # [\"7\",\"FAN\",25],\n # [\"7\",\"HFAN\",26],\n # [\"7\",\"CFAN\",27],\n # [\"8\",\"OFAN\",28],\n # [\"8\",\"UFAN\",29],\n # [\"8\",\"H1F\",30],\n # [\"9\",\"H2F\",31],\n # [\"9\",\"C1F\",32],\n # [\"9\",\"C2F\",33],\n # [\"10\",\"IDF\",34],\n # [\"10\",\"DHF\",35],\n # [\"10\",\"HDLY\",36],\n # [\"11\",\"CDLY\",37],\n # [\"11\",\"RHOP\",38],\n # [\"11\",\"OHSP\",39],\n # [\"12\",\"OHDF\",40],\n # [\"12\",\"UHSP\",41],\n # [\"12\",\"UHDF\",42],\n # [\"13\",\"SPLY\",43],\n # [\"13\",\"ECO\",44],\n # [\"13\",\"ECDO\",45],\n # [\"14\",\"ECON\",46],\n # [\"14\",\"ECOF\",47],\n # [\"14\",\"SPDT\",48],\n # [\"15\",\"SPT\",49],\n # [\"15\",\"HTSP\",50],\n # [\"15\",\"RSOF\",51],\n # [\"16\",\"SSOF\",52],\n # [\"16\",\"SUNOCC\",61],\n # [\"16\",\"SUNOCCSETCL\",75],\n # [\"17\",\"SUNOCCSETHT\",76],\n # [\"17\",\"SUNUNOCC\",62],\n # [\"17\",\"SUNUNOCCSETCL\",77],\n # [\"18\",\"SUNUNOCCSETHT\",78],\n # [\"18\",\"MONOCC\",63],\n # [\"18\",\"MONOCCSETCL\",79],\n # [\"19\",\"MONOCCSETHT\",80],\n # [\"19\",\"MONUNOCC\",64],\n # [\"19\",\"MONUNOCCSETCL\",81],\n # [\"20\",\"MONUNOCCSETHT\",82],\n # [\"20\",\"TUEOCC\",65],\n # [\"20\",\"TUEOCCSETCL\",83],\n # [\"21\",\"TUEOCCSETHT\",84],\n # [\"21\",\"TUEUNOCC\",66],\n # [\"21\",\"TUEUNOCCSETCL\",85],\n # [\"22\",\"TUEUNOCCSETHT\",86],\n # [\"22\",\"WEDOCC\",67],\n # [\"22\",\"WEDOCCSETCL\",87],\n # [\"23\",\"WEDOCCSETHT\",88],\n # [\"23\",\"WEDUNOCC\",68],\n # [\"23\",\"WEDUNOCCSETCL\",89],\n # [\"24\",\"WEDUNOCCSETHT\",90],\n # [\"24\",\"THUOCC\",69],\n # [\"24\",\"THUOCCSETCL\",91],\n # [\"25\",\"THUOCCSETHT\",92],\n # [\"25\",\"THUUNOCC\",70],\n # [\"25\",\"THUUNOCCSETCL\",93],\n # [\"26\",\"THUUNOCCSETHT\",94],\n # [\"26\",\"FRIOCC\",71],\n # [\"26\",\"FRIOCCSETCL\",95],\n # [\"27\",\"FRIOCCSETHT\",96],\n # [\"27\",\"FRIUNOCC\",72],\n # [\"27\",\"FRIUNOCCSETCL\",97],\n # [\"28\",\"FRIUNOCCSETHT\",98],\n # [\"28\",\"SATOCC\",73],\n # [\"28\",\"SATOCCSETCL\",99],\n # [\"29\",\"SATOCCSETHT\",100],\n # [\"29\",\"SATUNOCC\",74],\n # [\"29\",\"SATUNOCCSETCL\",101],\n # [\"30\",\"SATUNOCCSETHT\",102]\n # ]\n\n\n def _prepareSetDeviceConfigurationTransactions(self):\n if self._newDeviceConfigurationValues is None:\n return None\n\n retval = []\n networkTransTag = \"\"\n networkTrans = None\n\n for configItem in self.configModbusList:\n # [0] = MessageGroup/Tag; [1] = actual parameter, [2] = register address\n if networkTransTag != configItem[0]:\n networkTransTag = configItem[0]\n networkTrans = NetworkTransaction(\"WriteConfig\" + networkTransTag)\n retval.append(networkTrans)\n\n key = configItem[1]\n\n newConfigValue = self._newDeviceConfigurationValues[key]\n\n # LEO sending data to Device\n if self._valueDescriptions[key][\"dataType\"] == dataTypeFloat:\n if self._valueDescriptions[key][\"unitType\"] == unitTypeTemperature:\n newConfigValue = self._convertC2F(newConfigValue)\n elif self._valueDescriptions[key][\"unitType\"] == unitTypeDeltaTemperature:\n newConfigValue = self._convertDeltaC2F(newConfigValue)\n newConfigValue = newConfigValue\n value = self._convertFromFloatValue(newConfigValue * 10)\n elif self._valueDescriptions[key][\"dataType\"] == dataTypeBool:\n value = 0 if newConfigValue == False else 1\n else:\n value = self._convertFromIntValue(newConfigValue)\n networkTrans.transactions.append(NetworkMessage(writeHoldingRegister(configItem[2], value), configItem[1]))\n return retval\n\n\n def _prepareUpdateDeviceConfigurationTransactions(self):\n retval = []\n networkTransTag = \"\"\n networkTrans = None\n\n for configItem in self.configModbusList:\n if networkTransTag != configItem[0]:\n networkTransTag = configItem[0]\n networkTrans = NetworkTransaction(\"ReadConfig\" + networkTransTag)\n retval.append(networkTrans)\n networkTrans.transactions.append(NetworkMessage(readHoldingRegisters(configItem[2], 1), configItem[1]))\n return retval\n\n def _prepareListOfTransactions(self, modbusList, tagPrefix):\n retval = []\n networkTransTag = \"\"\n networkTrans = None\n\n for item in modbusList:\n if networkTransTag != item[0]:\n networkTransTag = item[0]\n networkTrans = NetworkTransaction(tagPrefix + networkTransTag)\n retval.append(networkTrans)\n networkTrans.transactions.append(NetworkMessage(readHoldingRegisters(item[2], 1), item[1]))\n return retval\n\n\n def _prepareLoggingTransactions(self, valueToLog):\n if len(valueToLog) == 0:\n return None\n\n networkTrans = NetworkTransaction(\"Logging\")\n\n if \"ACTIVE SETPT\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readInputRegisters(1, 1), \"ACTIVE SETPT\"))\n\n if \"CONTROL TEMP\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readInputRegisters(0, 1), \"CONTROL TEMP\"))\n\n if \"COOL SETPT\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readHoldingRegisters(0, 1), \"COOL SETPT\"))\n\n if \"HEAT SETPT\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readHoldingRegisters(1, 1), \"HEAT SETPT\"))\n\n if \"SPACE TEMP 1\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readInputRegisters(6, 1), \"SPACE TEMP 1\"))\n\n if \"SPACE TEMP 2\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readInputRegisters(7, 1), \"SPACE TEMP 2\"))\n\n if \"SUPPLY TEMP\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readInputRegisters(8, 1), \"SUPPLY TEMP\"))\n\n if \"ROOM RH%\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readInputRegisters(4, 1), \"ROOM RH%\"))\n\n if \"DEW POINT\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readInputRegisters(5, 1), \"DEW POINT\"))\n\n if \"MODE\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readDiscreteInputRegisters(0), \"MODE\"))\n\n if \"OCCUPANCY\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readInputRegisters(9, 1), \"OCCUPANCY\"))\n\n if \"FAN STATUS\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readDiscreteInputRegisters(5), \"FAN STATUS\"))\n\n if \"VS FAN SPEED\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readInputRegisters(3, 1), \"VS FAN SPEED\"))\n\n if \"HEAT STAGE1\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readDiscreteInputRegisters(3), \"HEAT STAGE1\"))\n\n if \"HEAT STAGE2\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readDiscreteInputRegisters(4), \"HEAT STAGE2\"))\n\n if \"COOL STAGE1\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readDiscreteInputRegisters(1), \"COOL STAGE1\"))\n\n if \"COOL STAGE2\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readDiscreteInputRegisters(2), \"COOL STAGE2\"))\n\n if \"DEHUM STATUS\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readDiscreteInputRegisters(6), \"DEHUM STATUS\"))\n\n if \"DAMPER POS\" in valueToLog:\n networkTrans.transactions.append(NetworkMessage(readInputRegisters(2, 1), \"DAMPER POS\"))\n\n return [ networkTrans ]\n\n\n def _prepareUpdateStatusTransactions(self):\n statusList = [\n [\"1\",\"Probe 1\",201],\n [\"1\",\"Probe 2\",202],\n [\"1\",\"Probe 3\",203],\n [\"2\",\"181_bitfield\",181],\n [\"2\",\"101_bitfield\",101],\n #[\"3\",\"DEFROST Phase\",557],\n [\"4\",\"Status Light\",502],\n [\"4\",\"Status Aux\",503],\n [\"5\",\"Status Resistors\",504],\n [\"5\",\"Set point\",558]\n ]\n return self._prepareListOfTransactions(statusList, \"Status\")\n\n\n def _executeTransaction(self, networkTrans):\n if not networkTrans.online:\n self._nullOutputValues()\n else:\n\n for transaction in networkTrans.transactions:\n with self.lock:\n if isinstance(transaction.response, readHoldingRegistersResponse):\n value = transaction.response.registers[0]\n\n if transaction.tag == \"101_bitfield\":\n # log.debug(\"Got Status Flag\")\n self._values[\"DoorDigitalInput\"] = ((value & 0x4) > 0)\n #self._alarm = self._values[\"ALARM\"]\n #self._values[\"MUTE\"] = ((value & 0x2) > 0)\n #self._values[\"DEFROST\"] = ((value & 0x4) > 0)\n self._values[\"MultifunctionDigitalInput\"] = ((value & 0x8) > 0)\n self.ProcessVirtualProps( transaction.tag )\n\n # elif transaction.tag == \"101_bitfield\":\n # # log.debug(\"Got Alarm Flag\")\n # self._values[\"DoorDigitalInput\"] = ((value & 0x1) > 0)\n # self.checkBooleanAdvisory(\"T1PROBEALARM\", self._values[\"T1PROBEALARM\"])\n # self._values[\"T2PROBEALARM\"] = ((value & 0x2) > 0)\n # self.checkBooleanAdvisory(\"T2PROBEALARM\", self._values[\"T2PROBEALARM\"])\n # self._values[\"T3PROBEALARM\"] = ((value & 0x4) > 0)\n # self.checkBooleanAdvisory(\"T3PROBEALARM\", self._values[\"T3PROBEALARM\"])\n # self._values[\"HIGHTEMPALARM\"] = ((value & 0x8) > 0)\n # self.checkBooleanAdvisory(\"HIGHTEMPALARM\", self._values[\"HIGHTEMPALARM\"])\n # self._values[\"LOWTEMPALARM\"] = ((value & 0x10) > 0)\n # self.checkBooleanAdvisory(\"LOWTEMPALARM\", self._values[\"LOWTEMPALARM\"])\n # self._values[\"HIGHCONDALARM\"] = ((value & 0x20) > 0)\n # self.checkBooleanAdvisory(\"HIGHCONDALARM\", self._values[\"HIGHCONDALARM\"])\n # self._values[\"HIGHPRESALARM\"] = ((value & 0x40) > 0)\n # self.checkBooleanAdvisory(\"HIGHPRESALARM\", self._values[\"HIGHPRESALARM\"])\n # self._values[\"DOOROPENALARM\"] = ((value & 0x80) > 0)\n # self.checkBooleanAdvisory(\"DOOROPENALARM\", self._values[\"DOOROPENALARM\"])\n # self._values[\"CONDCLEANALARM\"] = ((value & 0x100) > 0)\n # self.checkBooleanAdvisory(\"CONDCLEANALARM\", self._values[\"CONDCLEANALARM\"])\n # self.ProcessVirtualProps( transaction.tag )\n\n elif transaction.tag == \"181_bitfield\":\n # log.debug(\"Got Output Flag\")\n self._values[\"Output K1\"] = ((value & 0x10) > 0)\n self._values[\"Output K2\"] = ((value & 0x20) > 0)\n self._values[\"Output K3\"] = ((value & 0x40) > 0)\n self._values[\"Output K4\"] = ((value & 0x80) > 0)\n\n # elif transaction.tag == \"CFG_1\":\n # self._values[\"T2\"] = ((value & 0x1) > 0)\n # self._values[\"FID\"] = ((value & 0x20) > 0)\n # self._values[\"LOC\"] = ((value & 0x40) > 0)\n # self._values[\"STBY\"] = ((value & 0x80) > 0)\n\n # elif transaction.tag == \"CFG_2\":\n # self._values[\"DS\"] = ((value & 0x1) > 0)\n # self._values[\"C-H\"] = ((value & 0x2) > 0)\n # self._values[\"INP\"] = ((value & 0x40) > 0)\n # self._values[\"LIGHTS\"] = ((value & 0x20) > 0)\n # self._values[\"SB\"] = ((value & 0x80) > 0)\n\n # elif transaction.tag == \"DEFROST Phase\":\n # value1 = ((value & 0x2560) > 0)\n # value2 = ((value & 0x5120) > 0)\n # value3 = ((value & 0x10240) > 0)\n # value4 = ((value & 0x20480) > 0)\n # self._values[\"STANDBY\"] = ((value & 0x80) > 0)\n\n # elif transaction.tag == \"CMD_1\":\n # self._values[\"MDEF\"] = ((value & 0x4) > 0)\n\n else: # LEO reading up from Device\n key = transaction.tag\n if self._valueDescriptions[key][\"dataType\"] == dataTypeFloat:\n self._values[key] = self._convertToFloatValue(value)\n if int(self._values[\"SCL\"]) == 2:\n if self._valueDescriptions[key][\"unitType\"] == unitTypeTemperature:\n self._values[key] = self._convertF2C(self._values[key])\n elif self._valueDescriptions[key][\"unitType\"] == unitTypeDeltaTemperature:\n self._values[key] = self._convertDeltaF2C(self._values[key])\n elif self._values[\"SCL\"] == 0:\n self._values[key] = self._values[key] / 10.0\n elif self._valueDescriptions[key][\"dataType\"] == dataTypeBool:\n self._values[key] = False if value == 0 else True\n else:\n self._values[key] = self._convertToIntValue(value)\n\n if \"ReadConfig\" in networkTrans.tag:\n self.saveValuesToDatabase()\n elif \"WriteConfig\" in networkTrans.tag:\n self.updateDeviceConfiguration()\n\n def _convertF2C(self, value):\n return (value - 32) / 1.8\n\n def _convertC2F(self, value):\n return (value * 1.8) + 32\n\n def _convertDeltaF2C(self, value):\n return value / 1.8\n\n def _convertDeltaC2F(self, value):\n return value * 1.8\n\n def _convertToFloatValue(self, value):\n if value > 0x7fff:\n return float(value - 0x10000)\n return float(value)\n\n def _convertFromFloatValue(self, value):\n if value < 0:\n return int(round(value + 0x10000))\n return int(round(value))\n\n def _convertToIntValue(self, value):\n if value > 0x7fff:\n return int(value - 0x10000)\n return int(value)\n\n def _convertFromIntValue(self, value):\n if value < 0:\n return int(value + 0x10000)\n return int(value)\n\n\t\n\n #This below code is under execute transactions\n\n # for transaction in networkTrans.transactions:\n # with self.lock:\n # if isinstance(transaction.response, readHoldingRegistersResponse) :\n # value = transaction.response.registers[0]\n # decimalDivide = 0\n # key = transaction.tag\n # if self._valueDescriptions[key][\"dataType\"] == dataTypeFloat:\n # if decimalDivide == 0:\n # self._values[key] = self._convertToFloatValue(value)/10\n # else:\n # self._values[key] = value\n # if self._valueDescriptions[key][\"unitType\"] == unitTypeTemperature:\n # self._values[key] = self._convertF2C(self._values[key])\n # elif self._valueDescriptions[key][\"unitType\"] == unitTypeDeltaTemperature:\n # self._values[key] = self._convertDeltaF2C(self._values[key])\n # self._values[key] = self._values[key]\n # elif self._valueDescriptions[key][\"dataType\"] == dataTypeBool:\n # self._values[key] = False if value == 0 else True\n # else:\n # # log.debug(key) ###########################################\n # self._values[key] = self._convertToIntValue(value)\n\n # elif isinstance(transaction.response, readInputRegistersResponse) :\n # value = transaction.response.registers[0]\n # decimalDivide = 0\n # key = transaction.tag\n # if self._valueDescriptions[key][\"dataType\"] == dataTypeFloat:\n # if decimalDivide == 0:\n # self._values[key] = self._convertToFloatValue(value)/10\n # else:\n # self._values[key] = value\n # if self._valueDescriptions[key][\"unitType\"] == unitTypeTemperature:\n # self._values[key] = self._convertF2C(self._values[key])\n # elif self._valueDescriptions[key][\"unitType\"] == unitTypeDeltaTemperature:\n # self._values[key] = self._convertDeltaF2C(self._values[key])\n # self._values[key] = self._values[key]\n # elif self._valueDescriptions[key][\"dataType\"] == dataTypeBool:\n # self._values[key] = False if value == 0 else True\n # else:\n # # log.debug(key) ###########################################\n # self._values[key] = self._convertToIntValue(value)\n\n # elif isinstance(transaction.response, readDiscreteInputRegistersResponse) :\n # value = transaction.response.registers\n # decimalDivide = 0\n # key = transaction.tag\n # if self._valueDescriptions[key][\"dataType\"] == dataTypeFloat:\n # if decimalDivide == 0:\n # self._values[key] = self._convertToFloatValue(value)/10\n # else:\n # self._values[key] = value\n # if self._valueDescriptions[key][\"unitType\"] == unitTypeTemperature:\n # self._values[key] = self._convertF2C(self._values[key])\n # elif self._valueDescriptions[key][\"unitType\"] == unitTypeDeltaTemperature:\n # self._values[key] = self._convertDeltaF2C(self._values[key])\n # self._values[key] = self._values[key]\n # elif self._valueDescriptions[key][\"dataType\"] == dataTypeBool:\n # self._values[key] = False if value == 0 else True\n # else:\n # # log.debug(key) ###########################################\n # self._values[key] = self._convertToIntValue(value)\n\n # elif isinstance(transaction.response, readCoilsRegistersResponse) :\n # value = transaction.response.registers\n # decimalDivide = 0\n # key = transaction.tag\n # if self._valueDescriptions[key][\"dataType\"] == dataTypeFloat:\n # if decimalDivide == 0:\n # self._values[key] = self._convertToFloatValue(value)/10\n # else:\n # self._values[key] = value\n # if self._valueDescriptions[key][\"unitType\"] == unitTypeTemperature:\n # self._values[key] = self._convertF2C(self._values[key])\n # elif self._valueDescriptions[key][\"unitType\"] == unitTypeDeltaTemperature:\n # self._values[key] = self._convertDeltaF2C(self._values[key])\n # self._values[key] = self._values[key]\n # elif self._valueDescriptions[key][\"dataType\"] == dataTypeBool:\n # self._values[key] = False if value == 0 else True\n # else:\n # # log.debug(key) ###########################################\n # self._values[key] = self._convertToIntValue(value)\n\n\n","sub_path":"Factory/monitor-2.0-factory/monitor/system/devices/EVCO-EV3224/__EVCO-EV3224__.py","file_name":"__EVCO-EV3224__.py","file_ext":"py","file_size_in_byte":44790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"610969079","text":"import numpy as np\r\nimport pandas as pd \r\nimport streamlit as st\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom scipy.stats import norm\r\n\r\nclass VaR:\r\n \r\n def __init__(self, df):\r\n \r\n self.df = df\r\n self.returns = self.df.pct_change()\r\n self.cov_matrix = self.returns.cov()\r\n \r\n def ef_var(self, min_var_weights, max_sharpe_weights):\r\n \r\n min_var_avg_rets = self.returns.mean()\r\n min_var_portfolio_mean = min_var_avg_rets @ min_var_weights\r\n min_var_portfolio_std = np.sqrt(min_var_weights.T @ self.cov_matrix @ min_var_weights)\r\n min_var_std_investment = 100000 * min_var_portfolio_std\r\n\r\n max_sharpe_avg_rets = self.returns.mean()\r\n max_sharpe_portfolio_mean = max_sharpe_avg_rets @ max_sharpe_weights\r\n max_sharpe_portfolio_std = np.sqrt(max_sharpe_weights.T @ self.cov_matrix @ max_sharpe_weights)\r\n max_sharpe_std_investment = 100000 * max_sharpe_portfolio_std\r\n \r\n x = np.arange(-0.05, 0.055, 0.001)\r\n \r\n min_var_norm_dist = norm.pdf(x, min_var_portfolio_mean, min_var_portfolio_std)\r\n max_sharpe_norm_dist = norm.pdf(x, max_sharpe_portfolio_mean, max_sharpe_portfolio_std)\r\n \r\n fig1 = plt.figure(figsize = (6,6))\r\n plt.plot(x, min_var_norm_dist, color='g', label = \"Mininum Variance\")\r\n plt.plot(x, max_sharpe_norm_dist, color='r', label = \"Maximum Sharpe\")\r\n plt.legend()\r\n plt.xlabel(\"Returns (%)\")\r\n plt.ylabel(\"Frequency\")\r\n plt.title(\"1 Day VaR returns distribution\")\r\n plt.grid(True)\r\n \r\n min_var = norm.ppf(0.05, min_var_portfolio_mean, min_var_portfolio_std)\r\n max_sharpe = norm.ppf(0.05, max_sharpe_portfolio_mean, max_sharpe_portfolio_std)\r\n \r\n min_var_mean_investment = 100000 * (1 + min_var_portfolio_mean)\r\n min_var_std_investment = 100000 * min_var_portfolio_std\r\n \r\n max_sharpe_mean_investment = 100000 * (1 + max_sharpe_portfolio_mean)\r\n max_sharpe_std_investment = 100000 * max_sharpe_portfolio_std\r\n \r\n min_var_cutoff = norm.ppf(0.05, min_var_mean_investment, min_var_std_investment)\r\n max_sharpe_cutoff = norm.ppf(0.05, max_sharpe_mean_investment, max_sharpe_std_investment)\r\n \r\n min_var_historical_var = 100000 - min_var_cutoff\r\n max_sharpe_historical_var = 100000 - max_sharpe_cutoff\r\n \r\n min_var_array = []\r\n max_sharpe_array = []\r\n \r\n num_days = int(15)\r\n for x in range(1, num_days + 1):\r\n \r\n min_var_array.append(np.round(min_var_historical_var * np.sqrt(x),2))\r\n max_sharpe_array.append(np.round(max_sharpe_historical_var * np.sqrt(x),2))\r\n \r\n fig2 = plt.figure(figsize = (6,6))\r\n plt.xlabel(\"Day\")\r\n plt.ylabel(\"Max portfolio loss (USD)\")\r\n plt.title(\"Max portfolio loss (VaR) over 15-day period\")\r\n plt.plot(min_var_array, \"g\", label = \"mininum variance\")\r\n plt.plot(max_sharpe_array, \"r\", label = \"maximum variance\") \r\n plt.legend()\r\n plt.grid(True)\r\n \r\n col1, col2 = st.beta_columns(2)\r\n \r\n with col1:\r\n \r\n st.pyplot(fig1)\r\n st.subheader(\"Mininum Variance Portfolio\")\r\n st.write(\"mininum variance portfolio expected daily return: {}%\".format(round(min_var_portfolio_mean * 100, 4)))\r\n st.write(\"mininum variance portfolio daily volatility: {}\".format(round(min_var_portfolio_std, 6)))\r\n st.write(\"1 day mininum variance VaR with 95% confidence: {}%\".format(round((100 * min_var), 3)))\r\n st.write(\"mininum variance cutoff value: ${:,}\".format(round(min_var_cutoff, 2)))\r\n st.write(\"mininum variance historical VaR: ${:,}\".format(round(min_var_historical_var, 2)))\r\n \r\n with col2:\r\n \r\n st.pyplot(fig2)\r\n st.subheader(\"Maximum Sharpe Portfolio\")\r\n st.write(\"maximum sharpe portfolio expected daily return: {}%\".format(round(max_sharpe_portfolio_mean * 100, 4)))\r\n st.write(\"maximum sharpe portfolio daily volatility: {}\".format(round(max_sharpe_portfolio_std, 6)))\r\n st.write(\"1 day maximum sharpe VaR with 95% confidence: {}%\".format(round((100 * max_sharpe), 3)))\r\n st.write(\"maximum sharpe cutoff value: ${:,}\".format(round(max_sharpe_cutoff,2)))\r\n st.write(\"maximum sharpe historical VaR: ${:,}\".format(round(max_sharpe_historical_var, 2)))\r\n \r\n st.write(\"_____________________________________________\")\r\n \r\n \r\n def standard_var(self, weights):\r\n \r\n st.write(\"Assumming ${:,} portfolio\".format(100000))\r\n \r\n avg_rets = self.returns.mean()\r\n portfolio_mean = avg_rets @ weights\r\n portfolio_std = np.sqrt(weights.T @ self.cov_matrix @ weights)\r\n std_investment = 100000 * portfolio_std\r\n \r\n x = np.arange(-0.05, 0.055, 0.001)\r\n norm_dist = norm.pdf(x, portfolio_mean, portfolio_std)\r\n \r\n fig1 = plt.figure(figsize = (6,6))\r\n plt.xlabel(\"Returns (%)\")\r\n plt.ylabel(\"Frequency\")\r\n plt.plot(x, norm_dist)\r\n plt.title(\"1 Day VaR returns distribution\")\r\n plt.grid(True)\r\n \r\n var = norm.ppf(0.05, portfolio_mean, portfolio_std)\r\n mean_investment = 100000 * (1 + portfolio_mean)\r\n std_investment = 100000 * portfolio_std\r\n cutoff = norm.ppf(0.05, mean_investment, std_investment)\r\n historical_var = 100000 - cutoff\r\n \r\n array = []\r\n num_days = int(15)\r\n \r\n for x in range(1, num_days + 1):\r\n array.append(np.round(historical_var * np.sqrt(x), 2))\r\n \r\n fig2 = plt.figure(figsize = (6,6))\r\n plt.xlabel(\"Day\")\r\n plt.ylabel(\"Max portfolio loss (USD)\")\r\n plt.title(\"Max portfolio loss (VaR) over 15-day period\")\r\n plt.plot(array) \r\n plt.grid(True)\r\n \r\n col1, col2 = st.beta_columns(2)\r\n \r\n with col1: \r\n st.pyplot(fig1)\r\n \r\n with col2:\r\n st.pyplot(fig2)\r\n \r\n st.write(\"expected daily return: {}%\".format(round(portfolio_mean * 100, 4)))\r\n st.write(\"expected daily volatility {}\".format(round(portfolio_std, 6)))\r\n st.write(\"1 day VaR with 95% confidence: {}%\".format(round((100 * var), 3)))\r\n st.write(\"cutoff value: ${:,}\".format(round(cutoff, 2)))\r\n st.write(\"historical VaR: ${:,}\".format(round(historical_var, 2)))\r\n st.write(\"_____________________________________________\")\r\n \r\n \r\n \r\n ","sub_path":"streamlit_var.py","file_name":"streamlit_var.py","file_ext":"py","file_size_in_byte":6730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"311125345","text":" \nfrom __future__ import division\nimport argparse\nimport cv2\n\ndef __pre_process(img):\n #img = Image.open(img).convert('LA')\n img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)\n height, width = img.shape[:2]\n\n if width > 5000:\n new_img = cv2.resize(img, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_AREA)\n h, w = new_img.shape[:2]\n else:\n new_img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)\n h, w = new_img.shape[:2]\n\n if h % 8 != 0 or w % 8 != 0:\n size = (w+(8-w%8)), (h+(8-h%8)) \n new_img = cv2.resize(new_img, (size))\n else:\n return new_img \n\n return new_img\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-ori\", type=str, required=True, help=\"Path to the original image \")\n ap.add_argument(\"-res\", type=str, required=True, help=\"Path to the output image\")\n args = ap.parse_args()\n\n if args.ori: \n res = __pre_process(args.ori)\n cv2.imwrite(args.res, res)\n\nif __name__ == \"__main__\":\n main()","sub_path":"program skripsi/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"570243526","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 23 20:03:18 2019\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nfrom flask import Flask\r\nfrom jinja2 import Markup, Environment, FileSystemLoader\r\nfrom pyecharts.globals import CurrentConfig\r\n\r\n# 关于 CurrentConfig,可参考 [基本使用-全局变量]\r\nCurrentConfig.GLOBAL_ENV = Environment(loader=FileSystemLoader(\"./templates\"))\r\n\r\nfrom pyecharts import options as opts\r\nfrom pyecharts.charts import Bar, Line, Grid\r\nimport pandas as pd\r\n\r\n\r\nfor each_table in ['data/C#.xlsx','data/C++.xlsx','data/HTML+CSS.xlsx','data/Java.xlsx','data/JavaScript.xlsx','data/PHP.xlsx','data/Python.xlsx','data/Ruby.xlsx','data/Swift.xlsx','data/TypeScript.xlsx']:\r\n df = pd.read_excel(each_table)\r\n df[\"industry_first\"]= df.industry.str.split(' ').str[0].str.split(\"/\").str[0].str.split(\",\").str[0].str.split(\"(\").str[0].str.split(\"|\").str[0].str.split(\"丨\").str[0]\r\n df[\"wage_avg\"] = (df.wage_min + df.wage_max)/2\r\n df.to_excel(each_table)\r\n#加wage_avg和industry_first两列\r\n\r\n avg_wage = round(df.wage_avg.mean(),2)\r\n count = df.groupby(\"industry_first\").industry_first.count().sort_values(ascending=False)\r\n industries = list(count.keys()[0:20])\r\n count = list(count[0:20])\r\n salaries = []\r\n data = []\r\n\r\n for each in industries:\r\n data.append({\"行业\":each,\"平均工资\":round(df.query(\"industry_first == '\"+ each +\"'\" ).wage_avg.mean(), 2)})\r\n salaries.append(round(df.query(\"industry_first == '\"+ each +\"'\" ).wage_avg.mean(), 2))\r\n\r\n print(each_table)\r\n print(industries)\r\n print(count )\r\n print(salaries)\r\n print(avg_wage)\r\n print('*'*50)\r\n\r\napp = Flask(__name__, static_folder=\"templates\")\r\n\r\n\r\ndef bar_base() -> Bar:\r\n c = (\r\n Bar()\r\n .add_xaxis(industries)\r\n .add_yaxis('工资', salaries)\r\n .set_global_opts(title_opts={\"text\": \"C#\", \"subtext\": \"工资与行业关系图\"})\r\n)\r\n return c\r\n\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n c = bar_base()\r\n return Markup(c.render_embed())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run()","sub_path":"source_code/pyecharts-flask-demo/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"343749187","text":"# -*- coding: utf-8 -*-\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom flask import jsonify\n\nBase = declarative_base()\n\nclass Puppy(Base):\n\n # define tables\n __tablename__ = 'puppy'\n\n # define columns\n name =Column(String(80), nullable = False)\n id = Column(Integer, primary_key = True)\n description = Column(String(250))\n\n # define query response: equivalent to getter setter\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializeable format\"\"\"\n return {\n \t 'id': self.id,\n 'name': self.name,\n 'description' : self.description\n }\n\nengine = create_engine('sqlite:///puppies.db', encoding='utf8')\nBase.metadata.create_all(engine)\n\n\n\n# ----------------------\n# Puppy methods: get, make, put, delete \n# ----------------------\n\ndef get_all_puppies(session):\n puppies = session.query(Puppy).all()\n return jsonify(puppies=[i.serialize for i in puppies])\n \ndef create_puppy(name, description, session):\n puppy = Puppy(name=name, description=description)\n session.add(puppy)\n session.commit()\n return jsonify(puppy=puppy.serialize) \n\ndef get_puppy(id, session):\n puppy = session.query(Puppy).filter_by(id=id).one()\n return jsonify(puppy=puppy.serialize)\n \ndef update_puppy(id, name, description, session):\n puppy = session.query(Puppy).filter_by(id=id).one()\n\n if name:\n puppy.name = name\n if description:\n puppy.description = description\n\n session.add(puppy)\n session.commit()\n\n return jsonify(message=\"Updating a Puppy with id %s\" % id) \n\ndef delete_puppy(id, session):\n puppy = session.query(Puppy).filter_by(id=id).one()\n\n session.delete(puppy)\n session.commit()\n\n return jsonify(message=\"Removing Puppy with id %s\" % id)\n\n","sub_path":"models/puppy.py","file_name":"puppy.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"260645175","text":"import PyQt5.uic\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QMainWindow\n\nfrom src.ServiceInteraction.data import weekend_info\nfrom src.ServiceInteraction.positionmodel import PositionModel\nfrom src.ServiceInteraction.reader import DataReader\nfrom src.overlaywidget import OverlayWidget\n\nui_class = PyQt5.uic.loadUiType('gui/mainwindow.ui')\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n\n self.ui = ui_class[0]()\n self.ui.setupUi(self)\n\n self.data_reader = DataReader()\n\n self.ui.actionStart.triggered.connect(self.startReader)\n self.ui.actionStop.triggered.connect(self.stopReader)\n\n self.mw = OverlayWidget()\n\n @pyqtSlot()\n def startReader(self):\n self.data_reader.run()\n self.mw.setup(PositionModel(weekend_info), 'qml/EntryList.qml')\n self.mw.show()\n\n @pyqtSlot()\n def stopReader(self):\n pass\n\n\n","sub_path":"src/gui/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"342536943","text":"from collections import Counter\nimport tfidf\nimport math\nfrom nose.tools import eq_\n\n\ndef test_max_word_occurance():\n doc = Counter('')\n eq_(tfidf.max_word_occurance(doc), 0)\n\n doc1 = Counter('abbcccdddd')\n eq_(tfidf.max_word_occurance(doc1), 4)\n\n doc2 = Counter('abbcccddd')\n\n eq_(tfidf.max_word_occurance([doc1, doc2]), 4)\n\n\ndef test_tf():\n doc = Counter('abbcccdddd')\n eq_(tfidf.tf('', doc), 0.0)\n eq_(tfidf.tf('a', doc), 0.25)\n eq_(tfidf.tf('b', doc), 0.5)\n eq_(tfidf.tf('d', doc), 1.0)\n\n\ndef test_idf():\n doc_set = {i: Counter('a') for i in range(4)}\n doc_set.update({i: Counter('b') for i in range(4, 6)})\n\n eq_(tfidf.idf('a', doc_set), math.log(6 / 4.0))\n eq_(tfidf.idf('b', doc_set), math.log(6 / 2.0))\n","sub_path":"2014Fall/Project_1B/tests/test_tfidf.py","file_name":"test_tfidf.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"20100865","text":"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom datetime import datetime\n\n\nn_inputs = 10\nn_hidden1 = 500\nn_hidden2 = 400\nn_hidden3 = 200\nn_outputs = 1\nlearning_rate = 0.01\nn_epoch = 150\nbatch_size = 100\n\nnow = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\nroot_logdir = \"tf_logs\"\nlogdir = \"{}/L_Form-{}\".format(root_logdir, now)\n\n#read data\ndf1 = pd.read_excel('simulationDaten_1552308049255.xlsx')\ndf2 = pd.read_excel('simulationDaten_1552313754763.xlsx')\ndf3 = pd.read_excel('simulationDaten_1552320706273.xlsx')\ndf = pd.concat([df1, df2, df3])\ndf['Elastic Modulus(N/mm^2)'] = 200000\ndf['Poissons Ration'] = 0.29\ndf['Shear Modulus(N/mm^2)'] = 77000\n\ntrain_size = 0.8\nX_train, X_test, y_train, y_test = train_test_split(df.drop(columns=['maxDisp(mm)']),\n df['maxDisp(mm)'],\n test_size=1-train_size,\n random_state=88)\n\nscaler = StandardScaler() # or MaxAbsScaler(), MinMaxScaler()\nX_train_nor = pd.DataFrame(scaler.fit_transform(X_train.values), index=X_train.index, columns=X_train.columns)\nX_test_nor = pd.DataFrame(scaler.transform(X_test.values), index=X_test.index, columns=X_test.columns)\n\nX = tf.placeholder(tf.float32, shape=(None, n_inputs), name='X')\ny = tf.placeholder(tf.float32, shape=(None), name='y')\n\nactivation = tf.nn.relu\nhe_init = tf.contrib.layers.variance_scaling_initializer()\nhidden1 = tf.contrib.layers.fully_connected(X, n_hidden1, activation_fn=activation, weights_initializer=he_init)\nhidden2 = tf.contrib.layers.fully_connected(hidden1, n_hidden2, activation_fn=activation, weights_initializer=he_init)\nhidden3 = tf.contrib.layers.fully_connected(hidden2, n_hidden3, activation_fn=activation, weights_initializer=he_init)\npredict = tf.contrib.layers.fully_connected(hidden3, n_outputs, activation_fn=None, weights_initializer=he_init)\n\n\ndef fetch_batch(epoch, batch_index, batch_size):\n X_batch = X_train_nor[batch_index * batch_size: (batch_index + 1) * batch_size]\n y_batch = y_train[batch_index * batch_size: (batch_index + 1) * batch_size]\n return X_batch, y_batch\n\n\nloss = tf.losses.mean_squared_error(y, predict) ** 0.5 #RMSE\noptimizer = tf.train.AdamOptimizer(learning_rate)\ntrain_op = optimizer.minimize(loss)\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n init.run()\n for epoch in range(n_epoch):\n for batch_index in range(len(X_train_nor) // batch_size):\n X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)\n sess.run(train_op, feed_dict={X: X_batch, y: y_batch})\n sess.run(train_op, feed_dict={X: X_batch, y: y_batch})\n RMSE_train = loss.eval(feed_dict={X: X_batch, y: y_batch})\n RMSE_test = loss.eval(feed_dict={X: X_test_nor, y: y_test})\n print(epoch, \"RMSE_train:\", RMSE_train, \" RMSE_test:\", RMSE_test)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"L_Form_analyse_NN.py","file_name":"L_Form_analyse_NN.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"425131119","text":"import csv\nimport re\nimport pymongo\nfrom pymongo import MongoClient\nimport datetime\n\n\nclass Ticket:\n\n def __init__(self):\n client = MongoClient()\n self.db = client['db_tickets']\n\n def read_data(self, file):\n # Загрузить данные в бд из CSV-файла\n with open(file, encoding='utf8') as csv_file:\n # прочитать файл с данными и записать в коллекцию\n reader = csv.DictReader(csv_file)\n for item in reader:\n add_item = dict()\n for key in item:\n if key == 'Цена':\n add_item[key] = int(item[key])\n elif key == 'Дата':\n date_split = item[key].replace('0', '').split('.')\n add_item[key] = datetime.datetime(2019, int(date_split[1]), int(date_split[0]))\n else:\n add_item[key] = item[key]\n self.db.tickets.insert_one(add_item)\n\n def find_cheapest(self):\n \"\"\"\n Отсортировать билеты из базы по возрастания цены\n Документация: https://docs.mongodb.com/manual/reference/method/cursor.sort/\n \"\"\"\n for record in self.db.tickets.find().sort('Цена', pymongo.ASCENDING):\n print(record)\n\n def find_by_name(self, name):\n \"\"\"\n Найти билеты по имени исполнителя (в том числе – по подстроке),\n и вернуть их по возрастанию цены\n \"\"\"\n regex = re.compile(re.escape(name), re.I)\n for record in self.db.tickets.find({'Исполнитель': regex}).sort('Цена', pymongo.ASCENDING):\n print(record)\n\n\nif __name__ == '__main__':\n mongodbTickets = Ticket()\n mongodbTickets.read_data('artists.csv')\n print('==================================================')\n mongodbTickets.find_cheapest()\n print('==================================================')\n mongodbTickets.find_by_name('Enter')\n","sub_path":"mongodb.py","file_name":"mongodb.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"333455504","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport base\nfrom config.schema import FeedbackTemplateMapping\nimport util.database\n\nclass TemplateMappingDeleteHandler(base.BaseHandler):\n def get(self):\n self.check()\n try:\n groupid = self.get_argument(\"groupid\")\n templateid = self.get_argument(\"templateid\")\n except:\n self.redirect(\"/admin/template\")\n\n session = util.database.Session()\n result = session.query(FeedbackTemplateMapping).filter(FeedbackTemplateMapping.template_id == templateid, FeedbackTemplateMapping.template_group_id == groupid).first()\n if result:\n session.delete(result)\n session.commit()\n\n self.redirect(\"/admin/template?id=\"+templateid)\n \n session.close()\n\n def post(self):\n pass\n","sub_path":"src/handlers/admin/templatemappingdelete.py","file_name":"templatemappingdelete.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"170469581","text":"\"\"\"\nThis is the Core of the entire system. \n\nTree always initialize from a `dict`. Can be merge/find from dict.\n\"\"\"\n\nimport sys\n\nclass Tree(object):\n \"\"\"\n >>> Tree({1:0})\n {1: }\n >>> Tree()\n {}\n\n >>> a = {'cpu': {'10.3.1.12': {'idle': 0.32,\n ... 'sytem': 0.29,\n ... 'user': 1,\n ... 'marker': 'sth. wrong happened'}},\n ... 'default_action': 'average'}\n >>> Tree(a)\n {'cpu': {'10.3.1.12': {'idle': , 'sytem': , 'user': }}}\n\n >>> t = Tree(a)\n >>> for ks, v in t.find('cpu,,user'): print ks, v\n ['cpu', '10.3.1.12', 'user'] \n \"\"\"\n def __init__(self, d=None):\n if d is None:\n self._dict = {}\n self.action = _average\n else:\n action_name = d.get('default_action', 'average')\n self.action = _action_from_name(action_name)\n self._dict = evolve(d, self.action)\n\n def merge(self, d):\n self._dict = merge(self._dict, d, self.action)\n\n def find(self, name):\n # right, same as right\n name= name.rstrip(',')\n\n for ks, x in find_pattern(self._dict, name):\n yield ks, x\n\n def __repr__(self):\n return self._dict.__repr__()\n\nclass Item(object):\n def __init__(self, value=None, action=None):\n self.value = value\n self.action = action # sum, average, ...\n\n def __repr__(self):\n return \"\" % repr(self.value)\n\n def __add__(self, b):\n # use b's action\n return Item(b.action(self, b), b.action)\n\n#if sys.version_info.major > 2 or (sys.version_info.major == 2 and sys.version_info.minor > 5):\n def __iadd__(self, b):\n #print 'iadd', self.value, b.value, b.action(self, b)\n self.value = b.action(self, b)\n return self\n\ndef _average(a,b):\n try:\n getattr(a, 'sum')\n except AttributeError:\n setattr(a, 'sum', a.value)\n setattr(a, 'count', 1)\n a.sum += b.value\n a.count += 1\n return float(a.sum)/a.count\n\ndef _add(a,b):\n #print 'add', a, b, a.value + b.value\n return a.value + b.value\n\ndef _action_from_name(name):\n m = {'add': _add,\n 'average': _average}\n return m[name]\n\ndef evolve(d, default_action=_add):\n \"\"\"\n Convert a normal dict to Tree like dict with memeber as Item.\n\n >>> evolve({1:2})\n {1: }\n >>> evolve({1: Item(2)})\n {1: }\n\n >>> d = evolve({1: {2: 3}}, _add)\n >>> d[1][2].action == _add\n True\n \"\"\"\n rd = {}\n for k,v in d.iteritems():\n if isinstance(v, Item):\n rd[k] = v\n elif isinstance(v, (int, long, float)):\n #print default_action\n rd[k] = Item(v, default_action)\n elif isinstance(v, dict):\n rd[k] = evolve(v, default_action)\n return rd\n\ndef merge(d1, d2, default_action=None):\n \"\"\"\n >>> merge({1: Item(1,_add)}, {3: Item(1,_add)})\n {1: , 3: }\n >>> merge({1: Item(1,_add)}, {1: 2})\n {1: }\n >>> merge({1: Item(1,_add)}, {1: 2}, _add)\n {1: }\n\n >>> merge({1: {2:Item(3, _add)}}, {1:{2:4}}, _add)\n {1: {2: }}\n\n >>> merge({1: {2:Item(3, _add)}}, {1:4})\n {1: {'count': , 2: }}\n\n >>> merge({1:Item(4, _add)}, {1: {2: {3:5}}})\n {1: {'count': , 2: {3: 5}}}\n\n >>> da = merge({1:{}}, {'default_action':'average',1: {2: {3:5}}})\n >>> da\n {1: {2: {3: 5}}, 'default_action': 'average'}\n\n >>> merge(Tree({1:3}), Tree({1:2}))\n {1: }\n\n >>> merge({'os': {}, 'default_action':'add'}, {'os' : {'Windows': {'NT 5.1': 1, 'NT 6.1':2}}})\n {'default_action': 'add', 'os': {'Windows': {'NT 5.1': 1, 'NT 6.1': 2}}}\n >>> merge({'os': {'Windows': {'NT 5.1': 2, 'NT 6.1':4}}, 'default_action':'add'}, {'os' : {'Windows': {'NT 5.1': 1, 'NT 6.1':2}}})\n {'default_action': 'add', 'os': {'Windows': {'NT 5.1': , 'NT 6.1': }}}\n \n # TDOO: merge result action missing\n \"\"\"\n if default_action is None:\n if isinstance(d2, dict) and 'default_action' in d2:\n default_action = _action_from_name(d2['default_action'])\n elif isinstance(d1, dict) and 'default_action' in d1:\n default_action = _action_from_name(d1['default_action'])\n else:\n default_action = _average\n\n if isinstance(d2, Tree):\n d2 = d2._dict\n\n if isinstance(d1, Tree):\n d1 = d1._dict\n\n for k,v2 in d2.iteritems():\n if k not in d1:\n d1[k] = v2\n else:\n v1 = d1[k]\n if isinstance(v1, Item) and isinstance(v2, (Item)):\n #d1[k] = v1 + v2\n v1 += v2\n elif isinstance(v1, Item) and isinstance(v2, (int, long, float)):\n v1 += Item(v2, default_action)\n elif isinstance(v1, dict) and isinstance(v2, dict):\n d1[k] = merge(v1, v2, default_action)\n elif isinstance(v1, dict) and isinstance(v2, Item):\n v1['count'] = v2\n elif isinstance(v1, dict) and isinstance(v2, (int, long, float)):\n v1['count'] = Item(v2, default_action)\n elif isinstance(v1, Item) and isinstance(v2, dict):\n v2.update({'count':v1})\n d1[k] = v2\n elif isinstance(v1, (int, long, float)) and isinstance(v2, (int, long, float)):\n d1[k] = Item(v1, default_action) + Item(v2, default_action)\n elif isinstance(v1, (int, long, float)) and isinstance(v2, Item):\n d1[k] = Item(v1, default_action) + v2\n else:\n assert False, 'merge unespected type k: %r v1: %r[%r] v2: %r[%r]' % (k, v1, type(v1), v2, type(v2))\n return d1\n\ndef keyin(key, d):\n arr = key.split(',')\n for a in arr:\n if a in d:\n d = d[a]\n else:\n return False\n return True\n\ndef match(a, b):\n \"\"\"\n >>> match([], [])\n True\n >>> match([1], [1])\n True\n >>> match([1], [''])\n True\n\n >>> match(['1','2'], ['1'])\n True\n >>> match(['1','2'], [''])\n True\n\n >>> match(['1','2'], ['3'])\n False\n >>> match(['1'], ['3'])\n False\n >>> match(['1','2'], ['','1'])\n False\n\n >>> match(['1','2'], ['','2'])\n True\n >>> match(['1','2','3'], ['','2'])\n True\n\n >>> match(['1'], ['', '', '6'])\n False\n \"\"\"\n\n if len(b) > len(a):\n return False\n\n for i, xa in enumerate(a):\n if i < len(b):\n xb = b[i]\n if xb and xb != xa:\n return False\n else:\n break\n\n return True\n\ndef find_pattern(d, pattern):\n \"\"\"\n Find all matched Items.\n\n >>> d = {'1': Item(1, _add), '2': Item(2, _add), '3': {'4':{'6':Item(6, _add)}, '5':Item(5, _add)}}\n >>> [x for ks,x in find_pattern(d, '')]\n [, , , ]\n >>> [x for ks,x in find_pattern(d, '1,2')]\n []\n >>> [x for ks,x in find_pattern(d, '3,4')]\n []\n \n >>> [x for ks,x in find_pattern(d, '3')]\n [, ]\n\n >>> [x for ks,x in find_pattern(d, ',3')]\n []\n >>> [x for ks,x in find_pattern(d, ',4')]\n []\n\n >>> [x for ks,x in find_pattern(d, ',,6')]\n []\n \"\"\"\n\n arr = pattern.split(',')\n\n # deep first traversal\n def dfs(d, ks):\n for k, v in d.iteritems():\n deep_ks = ks + [k]\n if isinstance(v, dict):\n for x in dfs(v, deep_ks):\n yield x\n elif isinstance(v, (Item, int, long, float)):\n yield (v, deep_ks)\n else:\n assert False, 'dfs unespected type k: %r v: %r' % (k, v)\n\n for v, ks in dfs(d, []):\n if match(ks, arr):\n yield ks, v\n","sub_path":"server/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":7866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"139236243","text":"# coding: utf-8\r\nfrom multiprocessing import Process, Queue\r\nimport threading\r\nimport queue\r\nimport time\r\nimport atexit\r\nimport os\r\nimport RPi.GPIO as GPIO\r\n\r\nfrom pins import *\r\nimport dispmod\r\nimport menumod\r\nimport ledmod\r\nimport audiomod\r\nimport alarmmod\r\n\r\n\r\nclass Main():\r\n def __init__(self):\r\n GPIO.add_event_detect(\r\n UP, GPIO.RISING, callback=self.buttonEvents, bouncetime=200)\r\n GPIO.add_event_detect(\r\n DOWN, GPIO.RISING, callback=self.buttonEvents, bouncetime=200)\r\n GPIO.add_event_detect(\r\n OK, GPIO.RISING, callback=self.buttonEvents, bouncetime=200)\r\n GPIO.add_event_detect(\r\n BACK, GPIO.RISING, callback=self.buttonEvents, bouncetime=200)\r\n\r\n atexit.register(exit, self)\r\n print(\"exithandler registered\")\r\n\r\n self.exit_flag = False # initializing exit if True\r\n self.threadCount = 5\r\n self.alarm_time = None\r\n # can be \"Main\", \"temp\", \"set_temp\", \"full_screen\", \"alarm_multiL\", \"alarm_set\", \"night\", \"color\", \"brightness\"\r\n self.menu_mode = \"full_screen\"\r\n self.last_activity = time.time()\r\n self.tasks = Queue() # scheduled tasks\r\n self.ringing = False\r\n self.abort_alarm = False\r\n # delay for alarm sound after light in seconds\r\n self.light_delay = Queue(1)\r\n\r\n self.ledRunning = Queue(1) # will be filled if LED process is running\r\n self.ledOn = Queue(1) # weather led is on\r\n # if filled LED related processes will exit; 1 off, 2 keep on\r\n self.ledExit = Queue(1)\r\n self.ledTemp = Queue() # holds temperature for ledmod.lightTemo\r\n self.ledColor = Queue() # holds color for LED from Menu setting\r\n self.ledRainbow = Queue(1) # whether Rainbow is on\r\n self.ledColor_on = Queue(1) # filled, if solid color is on\r\n self.color_change = Queue(1) # filled if color change is stopped\r\n self.brightness = Queue(1) # containes brightness\r\n self.brightness.put_nowait(255)\r\n\r\n # instanziate Disp & Menu & Speakers\r\n self.Alarm = alarmmod.Alarmmod(self)\r\n self.Disp = dispmod.Dispmod(self)\r\n self.Speakers = audiomod.Speaker(self)\r\n self.Menu = menumod.Menu(self)\r\n\r\n self.tasks.put((\"scheduler\", [-1], \"\"))\r\n\r\n self.Disp.set_menu_text()\r\n ledmod.light_off()\r\n\r\n # initiante threads\r\n for _ in range(self.threadCount):\r\n t = threading.Thread(target=self.taskmanager)\r\n t.daemon = True\r\n t.start()\r\n print(\"threads initialized, active: \", threading.active_count())\r\n\r\n # # ---TEST---\r\n # self.tasks.put(\r\n # [\"sunrise\", -1, 30])\r\n\r\n # -----Main loop-----\r\n print(\"Initialization complete\")\r\n try:\r\n while not self.exit_flag:\r\n self.Disp.update()\r\n time.sleep(0.1)\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n # -----EXIT-----\r\n self.closeLed(1) # closes led related processes\r\n raise SystemExit # exits Main thread\r\n\r\n def taskmanager(self):\r\n '''Is called threaded.\r\n Gets task from tasks queue.\r\n Task has the form: (task, path, value)\r\n '''\r\n print(\"Thread enters taskmanager\")\r\n while not self.exit_flag:\r\n try:\r\n # Tries every 1s to get a task\r\n task = self.tasks.get(True, 1)\r\n except queue.Empty:\r\n continue\r\n except:\r\n print(\"Thread closed during waiting\")\r\n raise SystemExit\r\n\r\n print(\"Task recieved: \", task[0].encode(\r\n 'ascii', 'ignore').decode('ascii'))\r\n path = task[1]\r\n value = task[2]\r\n\r\n # ---Licht---\r\n # ---light on---\r\n if task[0] == \"Licht\":\r\n if value == \"an\": # turn light on\r\n self.closeLed()\r\n\r\n # set Menu\r\n self.Menu.setMenuValue(self.Menu.paths[\"Licht\"], \"aus\")\r\n self.Disp.set_menu_text()\r\n\r\n # get color\r\n if not self.ledTemp.empty():\r\n self.ledTemp.get_nowait()\r\n self.ledTemp.put(self.Menu.getMenuItem(\r\n self.Menu.paths[\"Standard Temp\"])[1])\r\n\r\n # start process\r\n p = Process(target=ledmod.light_on, args=(\r\n self.ledExit, self.ledRunning, self.ledOn,\r\n self.ledTemp, self.brightness))\r\n p.start()\r\n print(\"Task started licht an: \", task)\r\n\r\n elif value == \"aus\": # turn light off\r\n self.closeLed()\r\n\r\n # set Menu\r\n self.Menu.setMenuValue(self.Menu.paths[\"Licht\"], \"an\")\r\n self.Disp.set_menu_text()\r\n\r\n print(\"Task done licht aus: \", task)\r\n\r\n # ---Rainbow---\r\n elif task[0] == \"Rainbow\":\r\n self.closeLed()\r\n\r\n # set Menu\r\n self.Menu.setMenuValue(self.Menu.paths[\"Licht\"], \"aus\")\r\n\r\n # start process\r\n p = Process(target=ledmod.rainbow, args=(\r\n self.ledExit, self.ledRunning, self.ledOn, self.ledRainbow, self.brightness))\r\n p.start()\r\n print(\"Task started Rainbow: \", task)\r\n\r\n # ---Sunrise---\r\n elif task[0] == \"sunrise\":\r\n self.closeLed()\r\n\r\n # set Menu\r\n self.Menu.setMenuValue(self.Menu.paths[\"Licht\"], \"aus\")\r\n\r\n self.light_delay.put_nowait(value)\r\n\r\n # start process\r\n p = Process(target=ledmod.sunrise, args=(\r\n self.ledExit, self.ledRunning, self.ledOn, self.light_delay))\r\n p.start()\r\n print(\"Task started sunrise: \", task)\r\n\r\n # ---solid color---\r\n elif task[0] == \"Farbe\":\r\n if self.ledColor_on.empty():\r\n self.ledColor_on.put_nowait(1)\r\n self.closeLed()\r\n self.Menu.setMenuValue(self.Menu.paths[\"Licht\"], \"aus\")\r\n\r\n # start process\r\n p = Process(target=ledmod.solid_color, args=(\r\n self.ledExit, self.ledRunning, self.ledOn,\r\n self.color_change, self.ledColor_on, self.brightness))\r\n p.start()\r\n self.menu_mode = \"color\"\r\n print(\"Task started solid_color: \", task)\r\n else:\r\n self.menu_mode = \"color\"\r\n\r\n # ---brightness---\r\n elif task[0] == \"Helligkeit\":\r\n self.menu_mode = \"brightness\"\r\n if self.ledOn.empty():\r\n # set Menu\r\n self.Menu.setMenuValue(self.Menu.paths[\"Licht\"], \"aus\")\r\n self.Disp.set_menu_text()\r\n\r\n # get color\r\n if not self.ledTemp.empty():\r\n self.ledTemp.get_nowait()\r\n self.ledTemp.put(self.Menu.getMenuItem(\r\n self.Menu.paths[\"Standard Temp\"])[1])\r\n\r\n led_color = self.Menu.getMenuItem(\r\n self.Menu.paths[\"Standard Temp\"])[1]\r\n p = Process(target=ledmod.light_on, args=(\r\n self.ledExit, self.ledRunning, self.ledOn,\r\n self.ledTemp, self.brightness))\r\n p.start()\r\n\r\n # ---Temperature---Standard Temp---\r\n elif task[0] == \"Temperatur\" or task[0] == \"Standard Temp\":\r\n self.closeLed()\r\n\r\n if task[0] == \"Temperatur\": # change Menu mode\r\n self.menu_mode = \"temp\"\r\n else:\r\n self.menu_mode = \"set_temp\"\r\n # grab initial temp:\r\n self.ledTemp.put_nowait(self.Menu.getMenuItem()[1])\r\n\r\n # start process\r\n p = Process(target=ledmod.light_temp, args=(\r\n self.ledExit, self.ledRunning, self.ledOn, self.ledTemp, self.brightness))\r\n p.start()\r\n print(\"Task started temp: \", task)\r\n\r\n # ---Blink---\r\n elif task[0] == \"Blink\":\r\n if self.ledOn.empty() and self.ledRunning.empty():\r\n if value == \"red\":\r\n color = (255, 0, 0)\r\n elif value == \"green\":\r\n color = (0, 255, 0)\r\n elif value == \"yellow\":\r\n color = (200, 200, 0)\r\n else:\r\n color = None\r\n print(\"Blink\")\r\n ledmod.ring_chase(self.ledRunning, color=color)\r\n else:\r\n print(\"No Blink: ledOn.full: \", self.ledOn.full(),\r\n \"\\n ledRunning.full()\", self.ledRunning.full())\r\n\r\n # ---Radio---\r\n elif task[0] == \"Radio\":\r\n if value[1] is not None:\r\n self.Speakers.radio(value)\r\n elif value[0] == \"Radio aus\":\r\n self.Speakers.stop_playing()\r\n self.Menu.make_submenus(\"Radio\")\r\n self.Menu.path = self.Menu.paths[\"Radio\"].copy()\r\n self.Disp.disp_update = True\r\n elif value[0] in [\"Lauter\", \"Leiser\"]:\r\n self.Speakers.vol(value[0])\r\n elif value[0] == \"Sender wechseln\":\r\n self.Menu.crr_path = self.Menu.paths[\"Radio\"] + [0]\r\n self.Menu.make_submenus(\"Radio\")\r\n self.Disp.disp_update = True\r\n else:\r\n print(\"Task Radio: unknown value!!! \\n\", value)\r\n\r\n # ---System---\r\n elif task[0] == \"Sicher ausschalten?\":\r\n print(\"shutting down system\")\r\n exit(self)\r\n time.sleep(1)\r\n os.system(\"shutdown -h now\")\r\n elif task[0] == \"Sicher neustarten?\":\r\n print(\"rebooting system\")\r\n exit(self)\r\n time.sleep(1)\r\n os.system(\"shutdown -r now\")\r\n elif task[0] == \"Sicher beenden?\":\r\n print(\"closing application\")\r\n self.exit_flag = True\r\n\r\n # ---Alarm---\r\n elif task[0] == \"Wecker\":\r\n self.menu_mode = \"alarm_multiL\"\r\n self.Alarm.alarms[0].set_selected()\r\n self.Alarm.selected = 0\r\n self.Disp.set_disp_mode(3)\r\n print(\"Task Wecker: mode changed.\")\r\n\r\n elif task[0] == \"Alarm ausschalten\":\r\n self.Alarm.mod_alarm.set_active(False)\r\n self.menu_mode = \"alarm_multiL\"\r\n self.Disp.set_disp_mode(3)\r\n elif task[0] == \"Ändern\":\r\n self.menu_mode = \"alarm_set\"\r\n self.Alarm.mod_alarm.mod_part = \"h\"\r\n self.Disp.set_disp_mode(4)\r\n elif task[0] == \"Löschen\":\r\n self.Alarm.del_mod()\r\n self.menu_mode = \"alarm_multiL\"\r\n self.Disp.set_disp_mode(3)\r\n elif task[0] == \"Zurück\":\r\n self.Menu.set_crr_menu(\"main\")\r\n self.menu_mode = \"alarm_multiL\"\r\n self.Disp.set_disp_mode(3)\r\n\r\n elif task[0] == \"ring\":\r\n if not self.ringing:\r\n self.ringing = True\r\n self.Alarm.ring(value)\r\n else:\r\n print(\"Taskmanager: ALLREADY RINGING!!\")\r\n\r\n elif task[0] == \"scheduler\":\r\n self.Alarm.alarm_scheduler()\r\n\r\n # ---Schlafen---\r\n elif task[0] == \"Schlafen\":\r\n self.menu_mode = \"night\"\r\n self.Disp.set_disp_mode(0)\r\n self.closeLed()\r\n\r\n # End task check\r\n else:\r\n print(\"TASKMANAGER: unknown task!!! \", task)\r\n\r\n # Exit thread:\r\n print(\"Thread closed after while loop\")\r\n raise SystemExit\r\n\r\n# -----BUTTON EVENTS-----\r\n def buttonEvents(self, ch):\r\n '''Callback function for raising edges.\r\n '''\r\n self.last_activity = time.time()\r\n\r\n # ---Main Menu---\r\n if self.menu_mode == \"Main\":\r\n if ch == UP:\r\n self.Menu.moveUp()\r\n print(\"UP\")\r\n elif ch == DOWN:\r\n self.Menu.moveDown()\r\n print(\"DOWN\")\r\n elif ch == OK:\r\n self.Menu.moveDeep()\r\n print(\"OK\")\r\n elif ch == BACK:\r\n self.Menu.moveHigh()\r\n print(\"BACK\")\r\n\r\n # ---temperature---\r\n elif self.menu_mode == \"temp\" or self.menu_mode == \"set_temp\":\r\n temp = self.Menu.getMenuItem()[1]\r\n path = self.Menu.path\r\n self.ledTemp.put_nowait(temp)\r\n\r\n repeats = 1\r\n while True:\r\n\r\n if ch == UP:\r\n temp += 100\r\n self.Menu.setMenuValue(path, temp)\r\n self.Disp.set_menu_text()\r\n self.ledTemp.put_nowait(temp)\r\n print(\"+ temp\")\r\n if GPIO.input(UP):\r\n time.sleep(1 / repeats)\r\n if GPIO.input(UP):\r\n if repeats <= 5:\r\n repeats += 1\r\n continue\r\n\r\n elif ch == DOWN:\r\n temp -= 100\r\n self.Menu.setMenuValue(path, temp)\r\n self.Disp.set_menu_text()\r\n self.ledTemp.put_nowait(temp)\r\n print(\"- temp\")\r\n if GPIO.input(DOWN):\r\n time.sleep(1 / repeats)\r\n if GPIO.input(DOWN):\r\n if repeats <= 5:\r\n repeats += 1\r\n continue\r\n elif ch == OK:\r\n self.ledExit.put_nowait(2) # exit, keep light on\r\n self.menu_mode = \"Main\"\r\n # update Menu \"Licht\"\r\n self.Menu.setMenuValue(self.Menu.paths[\"Licht\"], \"aus\")\r\n self.Disp.set_menu_text()\r\n print(\"Back to Main Menu, Light is ON\")\r\n\r\n while not self.ledTemp.empty():\r\n self.ledTemp.get()\r\n print(\"ledTemp was not empty!!!\")\r\n elif ch == BACK:\r\n self.closeLed(1) # exit, turn lights off\r\n if self.menu_mode == \"set_temp\":\r\n self.tasks.put_nowait((\"Blink\", -1, \"green\"))\r\n self.menu_mode = \"Main\"\r\n # update Menu \"Licht\"\r\n self.Menu.setMenuValue(self.Menu.paths[\"Licht\"], \"an\")\r\n self.Disp.set_menu_text()\r\n print(\"Back to Main Menu, Light is OFF\")\r\n\r\n while not self.ledTemp.empty():\r\n self.ledTemp.get()\r\n print(\"ledTemp was not empty!!!\")\r\n break\r\n\r\n # ---alarm_multiline---\r\n elif self.menu_mode == \"alarm_multiL\":\r\n if ch == UP:\r\n self.Alarm.sel_up()\r\n elif ch == DOWN:\r\n self.Alarm.sel_down()\r\n elif ch == OK:\r\n self.Alarm.sel_ok()\r\n elif ch == BACK:\r\n self.Alarm.sel_back()\r\n\r\n # ---setting alarm---\r\n elif self.menu_mode == \"alarm_set\":\r\n self.Alarm.set_clock(ch)\r\n\r\n # ---full screen---\r\n elif self.menu_mode == \"full_screen\":\r\n\r\n # Abord ringing after ringing\r\n if self.ringing and ch == BACK:\r\n print(\"Abort alarm!\")\r\n self.abort_alarm = True # --> exit: alarmmod.ring()\r\n self.Disp.set_disp_mode(0)\r\n self.menu_mode = \"night\"\r\n self.closeLed()\r\n self.Speakers.stop_playing()\r\n\r\n elif ch in [UP, DOWN, OK, BACK]:\r\n print(\"Minimizing Clock\")\r\n self.Disp.set_disp_mode(1)\r\n self.menu_mode = \"Main\"\r\n\r\n # ---night time---\r\n elif self.menu_mode == \"night\":\r\n if ch in [UP, DOWN, OK, BACK]:\r\n print(\"Disp_mode: \", self.Disp.disp_mode)\r\n print(\"menu_mode: \", self.menu_mode)\r\n\r\n if self.Disp.disp_mode == 0:\r\n print(\"Awaking Clock\")\r\n self.Disp.set_disp_mode(2)\r\n \r\n # Abord ringing before ringing\r\n elif self.ringing and self.Disp.disp_mode == 2 and ch == BACK:\r\n print(\"Abort alarm!\")\r\n self.abort_alarm = True # --> exit: alarmmod.ring()\r\n self.Disp.set_disp_mode(0)\r\n self.closeLed()\r\n self.Speakers.stop_playing()\r\n else:\r\n print(\"Waking up\")\r\n self.Disp.set_disp_mode(1)\r\n self.menu_mode = \"Main\"\r\n\r\n # ---solid color---\r\n elif self.menu_mode == \"color\":\r\n if ch == UP:\r\n ledmod.brighter(self.brightness)\r\n elif ch == DOWN:\r\n ledmod.darker(self.brightness)\r\n elif ch == OK:\r\n # stop color change if changing, start changing if stopped\r\n if self.color_change.empty():\r\n self.color_change.put_nowait(1)\r\n print(\"Stopping color change..\")\r\n else:\r\n self.color_change.get(False)\r\n print(\"Resuming color change..\")\r\n elif ch == BACK:\r\n held = True\r\n for i in range(4):\r\n time.sleep(0.5)\r\n if not GPIO.input(BACK):\r\n self.menu_mode = \"Main\"\r\n held = False\r\n break\r\n if held:\r\n self.ledExit.put_nowait(1)\r\n self.menu_mode = \"Main\"\r\n self.Menu.setMenuValue(self.Menu.paths[\"Licht\"], \"an\")\r\n\r\n # ---brightness---\r\n elif self.menu_mode == \"brightness\": # TODO: Update only self.brightness\r\n if ch == UP:\r\n ledmod.brighter(self.brightness)\r\n elif ch == DOWN:\r\n ledmod.darker(self.brightness)\r\n elif ch == OK:\r\n self.menu_mode = \"Main\"\r\n elif ch == BACK:\r\n self.closeLed()\r\n self.menu_mode = \"Main\"\r\n\r\n # End button check\r\n else:\r\n print(\"buttonEvents: UNKOWN MENU MODE!!!\", self.menu_mode)\r\n\r\n def closeLed(self, exit_flag=1):\r\n '''tries to close LED related processes and turn off LED\r\n '''\r\n\r\n print(\"START: closeLED\")\r\n i = 0\r\n while self.ledRunning.full() and i < 12: # while LED is in use\r\n print(\"try closing led process, try No. \", i + 1)\r\n if not self.ledExit.full() and i == 0: # and LED process is not closing\r\n self.ledExit.put_nowait(exit_flag) # close LED process\r\n time.sleep(0.1)\r\n i += 1\r\n if i == 12: # timeout: process could not be closed\r\n try:\r\n self.ledRunning.get(False)\r\n # clear ledExit IF process could not be closed\r\n self.ledExit.get(False)\r\n print(\"LED PROCESS COULD NOT BE CLOSED!!!\")\r\n except queue.Empty:\r\n print(\"LED process closed in last second\")\r\n if self.ledOn.full():\r\n print(\"closingLed: ledOn was filled, turning light off\")\r\n ledmod.light_off()\r\n self.ledOn.get()\r\n while self.ledExit.full():\r\n print(\"closingLed: ledExit full on exit!\")\r\n self.ledExit.get(False)\r\n self.Menu.setMenuValue(self.Menu.paths[\"Licht\"], \"an\")\r\n\r\n\r\ndef exit(Main):\r\n ''' Registered exit handler\r\n '''\r\n print(\"begin exit\")\r\n Main.closeLed()\r\n Main.Speakers.stop_playing()\r\n Main.Disp.disp_mode = 0\r\n dispmod.disp_off()\r\n # Main.LED.light_off()\r\n GPIO.cleanup([LED, SOUND, UP, DOWN, OK, BACK])\r\n print(\"EXIT\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Main = Main()\r\n","sub_path":"mainmod.py","file_name":"mainmod.py","file_ext":"py","file_size_in_byte":20814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"146802306","text":"import rsf.api as rsf\nimport numpy as np\nimport sys\n\n\ndef corr(f,g,c,lags,adj=False):\n nlags = len(lags)\n for ci in range(nlags):\n l = lags[ci] \n for i in range(len(f)):\n if i+l < len(f) and i+l >= 0 and i-l >=0 and i-l Thanks for sign Up \" \n \n \"

Welcome to Repair Me

\"\n\n\n \n \"

thduhfhf

\"\n \"\"\n )\n \n\n \n mail.send_message(\"New Sign Up \" + form.name.data,\n sender=form.email.data,\n recipients=[\"repairmemain@gmail.com\"],\n body=form.email.data+\"\\n\"+\"\\nPhone number :\" + form.phone.data\n ) \n \n \n \n \n \n \n return redirect(url_for('login'))\n \n \n \n \n return render_template('signup.html',form=form)\n\n@app.route('/login', methods=['GET','POST']) ################################################ USER LOG IN \ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data , password=form.password.data).first()\n if user :\n login_user(user , remember=form.remember.data)\n next_page = request.args.get('next')\n flash('You are successfully Logged In ' , 'primary')\n return redirect(next_page) if next_page else redirect(url_for('account'))\n\n else :\n flash('login unsuccessful. Please check email and password' , 'danger')\n\n return render_template('user_login.html',form=form)\n\n@app.route('/userlogout' ) ################################################################## Logout \ndef userlogout():\n logout_user()\n return redirect(url_for('home'))\n\n@app.route('/account') ###################################################################### ACCOUNT \ndef account():\n\n \"\"\"\n form = UpdateProfileForm()\n if form.validate_on_submit():\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n current_user.img = picture_file\n\n current_user.name = form.name.data\n current_user.email = form.email.data \n db.session.commit()\n flash('Your account has been updated!', 'success')\n return redirect(url_for('account'))\n \n elif request.method == 'GET':\n form.name.data = current_user.name\n form.email.data = current_user.email\n \"\"\"\n bookservices = Bookservices.query.order_by(Bookservices.date.desc()).filter_by(email=current_user.email)\n \n book_1 = Bookservices.query.filter_by(email=current_user.email).all()\n len_book = len(book_1)\n \n \n\n \n\n\n image_file = url_for('static',filename='assets/img/profile_pics/'+ current_user.img)\n return render_template(\"account.html\" , image_file = image_file ,bookservices=bookservices,len_book =len_book )\n \n@app.route('/service/' , methods=['GET' , 'POST']) ##################### BOOK SERVICES \n@login_required\ndef bookservice(service_slug):\n if current_user.is_authenticated:\n service = Services.query.filter_by(slug=service_slug).first()\n homeapps = Category_homeapp.query.all()\n \n if (request.method == 'POST'):\n name = request.form.get('name')\n #email = request.form.get('email')\n email = current_user.email\n phone = request.form.get('phone')\n service = service.title \n category = request.form.get('category',default=(\"None\")) \n status = request.form.get('status',default=(\"Pending\")) \n serviceman = request.form.get('serviceman',default=(\"Not yet alloted\")) \n area = request.form.get('area')\n location = request.form.get('location')\n service_date = request.form.get('service_date')\n service_time = request.form.get('service_time')\n \n\n entry = Bookservices(name=name, email=email , phone=phone, area=area , service=service ,category=category, \n status=status,serviceman=serviceman,location=location ,service_date=service_date , service_time=service_time ,\n date =datetime.now())\n db.session.add(entry)\n db.session.commit()\n\n\n mail.send_message(\"Thanks for booking \" + current_user.name,\n sender=email,\n recipients=[current_user.email],\n body=email+\"\\n\"+\"\\nPhone number :\" + phone \n )\n\n \n\n \n mail.send_message(\"New Service request from \" + name,\n sender=email,\n recipients=[\"contact@repairmeshop.in\"],\n body=email + \"\\nPhone Number : \" + phone + \"\\nService : \"+ service +\n \"\\nCategory : \"+ category + \"\\nArea : \" + area + \"\\nLocation : \" + location + \n \"\\nService date : \" + service_date +\"\\nService Time : \" + service_time \n )\n return redirect(url_for('account')) \n \n\n return render_template('bookservices.html', service=service ,homeapps=homeapps)\n\n@app.route('/editbookservice/' , methods=['GET' , 'POST']) ###################### EDIT BOOK SERVICES \ndef editbookservice(sno):\n if ('user' in session and session['user'] == \"rex\"):\n\n if request.method == 'POST':\n status = request.form.get('status') \n serviceman = request.form.get('serviceman') \n bookservice = Bookservices.query.filter_by(sno=sno).first()\n bookservice.status = status\n bookservice.serviceman = serviceman\n db.session.commit()\n return redirect(url_for('dashboard'))\n \n bookservice = Bookservices.query.filter_by(sno=sno).first()\n return render_template('editbookservice.html', bookservice= bookservice )\n \n@app.route('/editservice/' , methods=['GET' , 'POST']) ########################## EDIT SERVICES \ndef editservice(sno):\n if ('user' in session and session['user'] == \"rex\"):\n\n if request.method == 'POST':\n\n title = request.form.get('title')\n slug = request.form.get('slug')\n icon = request.form.get('icon')\n color = request.form.get('color')\n description = request.form.get('description')\n date = datetime.now()\n \n\n\n if sno=='0':\n service = Services( sno=sno , title=title , description=description , slug=slug , icon=icon , \n color = color , date=date )\n db.session.add(service)\n db.session.commit()\n return redirect(url_for('dashboard'))\n\n\n else:\n service = Services.query.filter_by(sno=sno).first()\n service.title = title\n service.description = description\n service.slug = slug\n service.icon = icon\n service.color = color\n service.date = date\n \n\n\n\n db.session.commit()\n return redirect(url_for('dashboard'))\n\n service = Services.query.filter_by(sno=sno).first()\n return render_template('editservice.html', service=service, sno=sno)\n\n@app.route('/editserviceman/' , methods=['GET' , 'POST']) ######################## EDIT SERVICES MAN \ndef editserviceman(id):\n if ('user' in session and session['user'] == \"rex\"):\n\n if request.method == 'POST':\n\n name = request.form.get('name')\n description = request.form.get('description')\n status = request.form.get('status')\n phone = request.form.get('phone')\n address = request.form.get('address')\n adhar_no = request.form.get('adhar_no')\n\n if id=='0':\n serviceman = Servicemen( id=id, name=name , description=description , status=status , phone=phone ,\n address=address , adhar_no=adhar_no )\n db.session.add(serviceman)\n db.session.commit()\n return redirect(url_for('dashboard'))\n\n else:\n serviceman = Servicemen.query.filter_by(id=id).first()\n serviceman.name = name\n serviceman.description = description\n serviceman.status = status\n serviceman.phone = phone\n serviceman.address = address\n serviceman.adhar_no = adhar_no\n\n db.session.commit()\n return redirect(url_for('dashboard'))\n \n serviceman = Servicemen.query.filter_by(id=id).first()\n return render_template('editservicemen.html', serviceman=serviceman,id=id)\n\n@app.route('/about') ######################################################################## ABOUT \ndef about():\n services = Services.query.filter_by().all()\n return render_template('about.html', services=services )\n\n@app.route('/team') ######################################################################### TEAM \ndef team():\n return render_template('team.html')\n\n@app.route('/dashboard' , methods=['GET' , 'POST'] ) ####################################### DASHBOARD \ndef dashboard():\n if ('user' in session and session['user'] == \"rex\"):\n services = Services.query.all()\n contacts = Contacts.query.order_by(Contacts.date.desc()).all()\n bookservices = Bookservices.query.order_by(Bookservices.date.desc()).all()\n homeapps = Category_homeapp.query.all()\n servicemen = Servicemen.query.all()\n\n \n\n return render_template('dashboard.html', services=services , contacts=contacts , \n bookservices=bookservices , homeapps=homeapps ,servicemen=servicemen)\n\n if request.method == 'POST':\n username = request.form.get('uname')\n userpass = request.form.get('pass')\n if (username == \"rex\" and userpass == \"rex5467\"):\n session['user'] = username\n services = Services.query.all()\n contacts = Contacts.query.all()\n \n return render_template('dashboard.html', services=services , contacts=contacts)\n\n return render_template('login.html')\n\n@app.route('/logout') ####################################################################### LOGOUT\ndef logout():\n session.pop('user')\n return redirect('/dashboard')\n\n@app.route('/delete/' , methods = ['GET' , 'POST']) ############################# DELETE SERVICE \ndef delete(sno):\n if ('user' in session and session['user'] == \"rex\"):\n service = Services.query.filter_by(sno=sno).first()\n db.session.delete(service)\n db.session.commit()\n return redirect('/dashboard')\n\n@app.route('/portfolio') #################################################################### PORTFOLIO\ndef portfolio():\n return render_template('portfolio.html')\n\n@app.route('/contact', methods = ['GET','POST']) ############################################ CONTACT \ndef contact():\n if (request.method == 'POST'):\n name = request.form.get('name')\n email = request.form.get('email')\n phone = request.form.get('phone')\n subject = request.form.get('subject')\n message = request.form.get('message')\n entry = Contacts(name=name, phone_num=phone, msg=message, date=datetime.now(), email=email , subject=subject)\n db.session.add(entry)\n db.session.commit()\n\n \n mail.send_message(\"Thanks \" + name,\n sender=email,\n recipients=[email],\n body=email+\"\\n\"+\"\\nPhone number :\" + phone +\"\\n\\nSubject: \"+ subject + \"\\n\\n\" +message \n )\n \n\n\n mail.send_message(\"New Contact Request from \" + name,\n sender=email,\n recipients=[\"repairmemain@gmail.com\"],\n body=email+\"\\n\"+\"\\nPhone number :\" + phone +\"\\n\\nSubject: \"+ subject + \"\\n\\n\" +message \n )\n return render_template('index.html')\n \n \n \n \n \n \n \n \n return render_template('contact.html')\n\n\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":22166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"72947664","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom mingluji.items import MinglujiItem\n\nclass LcelandSpiderSpider(scrapy.Spider):\n name = 'lceland_spider'\n allowed_domains = ['isl.bizdirlib.com']\n start_urls = ['http://isl.bizdirlib.com/company']\n\n def parse(self, response):#获取url\n url_list = response.xpath('//*[@id=\"block-system-main\"]/div/div/div[2]/div/ul/li')\n for i in range(len(url_list)):\n link = \"https://isl.bizdirlib.com\"+str(response.xpath('//*[@id=\"block-system-main\"]/div/div/div[2]/div/ul/li['+str(i)+']/div/span/a/@href').extract_first())\n yield scrapy.Request(link,callback=self.prase1)\n next_link = response.xpath('//*[@id=\"block-system-main\"]/div/div/div[3]/ul/li[3]/a/@href').extract_first()#获取下一页\n if next_link:\n next_link = next_link\n yield scrapy.Request(\"https://isl.bizdirlib.com\"+next_link,callback=self.parse)\n def prase1(self,response):#解析内容\n if response.css('[itemprop=\"email\"]::text').extract_first() !=None:\n Category_html = response.css(\"fieldset ul li\").extract()\n try:\n Category_text = re.findall(r\"Category Activities(.*?)\", str(Category_html))[0].split(\":\")[1].replace(\" \",\"\")\n except:\n Category_text = None\n minglujiurl = MinglujiItem()\n minglujiurl['Company_Name'] = response.css('[itemprop=\"name\"]::text').extract_first()\n minglujiurl['Country'] = response.css('[itemprop=\"location\"]::text').extract_first()\n minglujiurl['Address'] = response.css('[itemprop=\"address\"]::text').extract_first()\n minglujiurl['Phone'] = response.css('[itemprop=\"telephone\"]::text').extract_first()\n minglujiurl['Email'] = response.css('[itemprop=\"email\"]::text').extract_first()\n minglujiurl['web_url'] = response.css('[itemprop=\"url\"]::text').extract_first()\n minglujiurl['Category'] = Category_text\n yield minglujiurl\n else:\n pass\n","sub_path":"mingluji/mingluji/spiders/lceland_spider.py","file_name":"lceland_spider.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"137273106","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport cv2 as cv\nimport numpy as np\nfrom settings import detectionCon\nfrom cvzone.HandTrackingModule import HandDetector\nfrom DragRect import DragFigure\n\n\ndef overlay_transparent(background, overlay, x, y):\n\n background_width = background.shape[1]\n background_height = background.shape[0]\n\n if x >= background_width or y >= background_height:\n return background\n\n h, w = overlay.shape[0], overlay.shape[1]\n\n if x + w > background_width:\n w = background_width - x\n overlay = overlay[:, :w]\n\n if y + h > background_height:\n h = background_height - y\n overlay = overlay[:h]\n\n if overlay.shape[2] < 4:\n overlay = np.concatenate(\n [\n overlay,\n np.ones((overlay.shape[0], overlay.shape[1], 1), dtype = overlay.dtype) * 255\n ],\n axis = 2,\n )\n\n overlay_image = overlay[..., :3]\n mask = overlay[..., 3:] / 255.0\n\n background[y:y+h, x:x+w] = (1.0 - mask) * background[y:y+h, x:x+w] + mask * overlay_image\n\n return background\n\n\nDetector = HandDetector(detectionCon=detectionCon, maxHands=2)\n\n# Rect_list = list()\nCircle_list = list()\n#\n# for i in range(5):\n# Rect_list.append(DragFigure())\n\nRect_list = [DragFigure([50, 130], [100, 200])]\nCircle_list.append(DragFigure([250, 250], [80, 80]))\nCircle_list.append(DragFigure([500, 250], [80, 80]))\nBallImage = cv.imread('images/ball2.png', cv.IMREAD_UNCHANGED)\n# BallImage = cv.imread('images/ball.png')\nprint(BallImage.shape)\nElipse = DragFigure([450, 500], [75, 75])\nBallRect = DragFigure([320, 320], [320, 320])","sub_path":"sources.py","file_name":"sources.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"219550219","text":"from portfolio.models import Project, Technology\nfrom helpers.decorators import render_to\nfrom django.shortcuts import get_object_or_404\n\n@render_to('portfolio/index.html')\ndef index(request):\n \"\"\"\n Shows a list of all projects on a single page.\n \"\"\"\n projects = Project.objects.all()\n return {\n 'project_list': projects,\n }\n\n@render_to('portfolio/by_technology.html')\ndef by_technology(request, technology_slug):\n tech = get_object_or_404(Technology, slug=technology_slug)\n projects = Project.objects.filter(technologies=tech)\n return {\n 'project_list': projects,\n 'technology': tech,\n }\n\n\n@render_to('portfolio/project.html')\ndef project(request, project_slug):\n project = get_object_or_404(Project, slug=project_slug)\n return {\n 'project': project,\n }\n","sub_path":"mightylemon/apps/portfolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"413348326","text":"from keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import load_model\nfrom keras.backend import clear_session\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nimport nltk\nfrom nltk.stem import SnowballStemmer\nfrom nltk.tokenize import word_tokenize\n\nnltk.download('punkt')\nnltk.download('averaged_perceptron_tagger')\n\nimport re\nfrom collections import defaultdict\nimport itertools\nimport pickle\n\nMAX_NUM_WORDS = 20000\nMAX_SEQUENCE_LENGTH = 1500\n\nsample_text = \"hi all. i'm an extrovert and I don't like to be pushed around\"\n\n\nclass mtbi_inference:\n\tdef __init__(self, text, types):\n\t\tclear_session()\n\t\ttry:\n\t\t\twith open(\"tfid_vectorizer.pkl\", \"rb\") as tf:\n\t\t\t\tself.tfid_vectorizer = pickle.load(tf)\n\t\t\tprint(\"Tfid vectorizer restored...\")\n\t\texcept:\n\t\t\tprint(\"Tfid vectorizer does not exist...\")\n\n\t\ttry:\n\t\t\twith open(\"tsvd_red.pkl\", \"rb\") as f:\n\t\t\t\tself.tsvd_red = pickle.load(f)\n\t\t\tprint(\"TSVD object restored...\")\n\t\texcept:\n\t\t\tprint(\"TSVD object does not exist...\")\n\n\t\ttry:\n\t\t\twith open(\"posts_tokenizer.pkl\", \"rb\") as f:\n\t\t\t\tself.posts_tokenizer = pickle.load(f)\n\t\t\tprint(\"Tokenizer restored...\")\n\t\texcept:\n\t\t\tprint(\"Tokenizer does not exist...\")\n\t\t\t\n\t\ttry:\n\t\t\twith open(\"scaler.pkl\", \"rb\") as sf:\n\t\t\t\tself.scaler = pickle.load(sf)\n\t\texcept FileNotFoundError:\n\t\t\tprint(\"Scaler object does not exist...\")\n\n\t\tself.text = text\n\t\tself.types = types\n\t\tself.model = load_model(\"dense_model.h5\")\n\n\t\tself.stemmer = SnowballStemmer(\"english\")\n\n\tdef preprocess_pipeline(self):\n\n\t\tdef count_words(x):\n\t\t\treturn len(x.split())\n\n\t\tdef element_ratio(x, count):\n\t\t\tlength = len(x.split())\n\t\t\tif length > 0:\n\t\t\t\treturn float(count/length)\n\t\t\treturn 0.\n\n\t\tdef unique_words_ratio(x):\n\t\t\twords = x.split()\n\n\t\t\tif len(words) > 0:\n\t\t\t\treturn float(len(set(words))/len(words))\n\t\t\treturn 0.\n\n\t\tdef process_emoji(x):\n\t\t regex = r\":[^\\s]+:\"\n\t\t count = len(re.findall(regex, x))\n\t\t return element_ratio(x, count)\n\t\t #return len(re.findall(regex, x))\n\n\t\tdef exclamation_mark_count(x):\n\t\t regex = r\"!\"\n\t\t count = len(re.findall(regex, x))\n\t\t return element_ratio(x, count)\n\n\t\tdef question_mark_count(x):\n\t\t regex = r\"\\?\"\n\t\t count = len(re.findall(regex, x))\n\t\t return element_ratio(x, count)\n\n\t\tdef capital_letters_count(x):\n\t\t regex = r\"[^A-Z]\"\n\t\t count = len(re.sub(regex, \"\", x))\n\t\t return element_ratio(x, count)\n\n\t\tdef capital_letters_ratio(x):\n\t\t regex = r\"[^A-Z]\"\n\t\t cap = re.sub(regex, \"\", x)\n\t\t if len(x) > 0:\n\t\t return float(len(cap)/len(x))\n\t\t return 0.0\n\n\t\tdef ellypsis_count(x):\n\t\t regex = r\"\\.\\.\\.\"\n\t\t count = len(re.findall(regex, x))\n\t\t return element_ratio(x, count)\n\n\t\tdef emoji_faces_count(x):\n\t\t regex = r\"[;:]+[_-]?[\\)\\(]\"\n\t\t count = len(re.findall(regex, x))\n\t\t return element_ratio(x, count)\n\n\t\tdef capitalized_words_ratio(x):\n\t\t regex = r\"[A-Z]{2,}\\s\"\n\t\t count = len(re.findall(regex, x))\n\t\t return element_ratio(x, count)\n\n\n\t\tdef pof_list(x):\n\t\t pof = nltk.pos_tag(word_tokenize(x))\n\t\t result = [p for _, p in pof]\n\t\t return result\n\n\t\tf_vec = []\n\n\t\t#f_vec.append(count_words(text))\n\t\tf_vec.append(process_emoji(self.text))\n\t\tf_vec.append(exclamation_mark_count(self.text))\n\t\tf_vec.append(question_mark_count(self.text))\n\t\tf_vec.append(capital_letters_count(self.text))\n\t\tf_vec.append(ellypsis_count(self.text))\n\t\tf_vec.append(emoji_faces_count(self.text))\n\t\tf_vec.append(capitalized_words_ratio(self.text))\n\t\tf_vec.append(unique_words_ratio(self.text))\n\n\t\tf_vec_poflist = pof_list(self.text)\n\n\t\tconvtag_dict={'ADJ':['JJ','JJR','JJS'], 'ADP':['EX','TO'], 'ADV':['RB','RBR','RBS','WRB'], 'CONJ':['CC','IN'],'DET':['DT','PDT','WDT'],\n\t\t\t\t 'NOUN':['NN','NNS','NNP','NNPS'], 'NUM':['CD'],'PRT':['RP'],'PRON':['PRP','PRP$','WP','WP$'],\n\t\t\t\t 'VERB':['MD','VB','VBD','VBG','VBN','VBP','VBZ'],'.':['#','$',\"''\",'(',')',',','.',':'],'X':['FW','LS','UH']}\n\n\t\texpanded_convtag = {}\n\t\tfor pos_key, pos_list in convtag_dict.items():\n\t\t for el in pos_list:\n\t\t expanded_convtag[el] = pos_key\n\n\t\tdef count_pos(x):\n\t\t counter = defaultdict(float)\n\t\t x_length = len(x)\n\n\t\t for w in x:\n\t\t if w in expanded_convtag.keys():\n\t\t counter[expanded_convtag[w]] += 1\n\t\t else:\n\t\t counter[w] += 1\n\n\t\t for k, v in counter.items():\n\t\t counter[k] = v/x_length\n\n\t\t return counter\n\n\t\tpos_count = count_pos(f_vec_poflist)\n\t\tfor k in sorted(convtag_dict.keys()):\n\t\t\tf_vec.append(pos_count.get(k,0.0))\n\n\t\ttfid_features = self.tfid_vectorizer.transform([self.text])\n\t\ttfid_features_dimred = self.tsvd_red.transform(tfid_features)\n\t\tdense_features = np.hstack([f_vec, tfid_features_dimred.flatten()])\n\n\t\tscaled_dense_features = self.scaler.transform(dense_features[None, :])\n\n\t\treturn scaled_dense_features\n\n\tdef predict(self):\n\t\tprint(\"Prediction step: \")\n\t\tfeature = self.preprocess_pipeline()\n\t\tprint(feature.shape)\n\t\tprint(feature)\n\t\tpred = float(self.model.predict(feature))\n\t\tprint(f\"You are {max(pred, 1-pred)*100:.2f} {self.types[int(pred<=0.5)]}...\")\n\t\treturn pred\n\t\t\n#mtbi_obj = mtbi_inference(sample_text, types=[\"Introvert\", \"Extrovert\"])\n#mtbi_obj.predict()\n","sub_path":"pipe2.py","file_name":"pipe2.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"545864737","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport json\nfrom glob import glob\nimport shutil\n\n\ndef main(geojson_file):\n \"\"\"Main.\"\"\"\n\n # read geojson export from QGIS\n with open(geojson_file) as f:\n j = json.load(f)\n\n # create site datasets\n for feat in j['features']:\n props = feat['properties']\n if feat.get('geometry', None) is None: continue\n id = \"site-{}-keelvol_{}-keelpage_{}\".format(\"_\".join(props['Site'].lower().split()),\n props['KeelVol'], props['KeelPage'])\n dataset_dir = os.path.join('test', 'datasets-kibana', id)\n if not os.path.isdir(dataset_dir):\n os.makedirs(dataset_dir, 0o755)\n ds_file = os.path.join(dataset_dir, \"{}.dataset.json\".format(id))\n with open(ds_file, 'w') as f:\n json.dump({\n 'version': '0.1',\n 'label': props['Site'],\n 'location': feat['geometry']['coordinates'],\n 'metadata': props,\n }, f, indent=2, sort_keys=True)\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","sub_path":"data/create_site_datasets-kibana.py","file_name":"create_site_datasets-kibana.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"296952389","text":"# Copyright 2021 The XLS Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains build macros for XLS.\n\"\"\"\n\nload(\n \"//xls/build_rules:xls_config_rules.bzl\",\n \"enable_generated_file_wrapper\",\n)\nload(\n \"//xls/build_rules:xls_rules.bzl\",\n \"get_xls_dslx_verilog_generated_files\",\n \"xls_dslx_verilog\",\n)\n\ndef xls_dslx_verilog_macro(\n name,\n dep,\n ir_conv_args = {},\n opt_ir_args = {},\n codegen_args = {},\n enable_generated_file = True,\n enable_presubmit_generated_file = False,\n **kwargs):\n \"\"\"A macro wrapper for the 'xls_dslx_verilog' rule.\n\n The macro instantiates the 'xls_dslx_verilog' rule and\n 'enable_generated_file_wrapper' function. The generated files of the rule\n are listed in the outs attribute of the rule.\n\n Args:\n name: The name of the rule.\n dep: The 'xls_dslx_module_library' target used for dependency. See 'dep'\n attribute from the 'xls_dslx_verilog' rule.\n ir_conv_args: IR conversion Arguments. See 'ir_conv_args' attribute from\n the 'xls_dslx_ir' rule.\n opt_ir_args: IR optimization Arguments. See 'opt_ir_args' attribute from\n the 'xls_ir_opt_ir' rule.\n codegen_args: Codegen Arguments. See 'codegen_args' attribute from the\n 'xls_ir_verilog' rule.\n enable_generated_file: See 'enable_generated_file' from\n 'enable_generated_file_wrapper' function.\n enable_presubmit_generated_file: See 'enable_presubmit_generated_file'\n from 'enable_generated_file_wrapper' function.\n **kwargs: Positional arguments. Named arguments.\n \"\"\"\n\n # Type check input\n if type(name) != type(\"\"):\n fail(\"Argument 'name' must be of string type.\")\n if type(dep) != type(\"\"):\n fail(\"Argument 'dep' must be of string type.\")\n if type(ir_conv_args) != type({}):\n fail(\"Argument 'ir_conv_args' must be of dictionary type.\")\n if type(opt_ir_args) != type({}):\n fail(\"Argument 'opt_ir_args' must be of dictionary type.\")\n if type(codegen_args) != type({}):\n fail(\"Argument 'codegen_args' must be of dictionary type.\")\n if type(enable_generated_file) != type(True):\n fail(\"Argument 'enable_generated_file' must be of boolean type.\")\n if type(enable_presubmit_generated_file) != type(True):\n fail(\"Argument 'enable_presubmit_generated_file' must be \" +\n \"of boolean type.\")\n\n xls_dslx_verilog(\n name = name,\n dep = dep,\n ir_conv_args = ir_conv_args,\n opt_ir_args = opt_ir_args,\n codegen_args = codegen_args,\n outs = get_xls_dslx_verilog_generated_files(name, codegen_args),\n **kwargs\n )\n enable_generated_file_wrapper(\n wrapped_target = name,\n tags = kwargs.get(\"tags\", None),\n testonly = kwargs.get(\"testonly\", None),\n enable_generated_file = enable_generated_file,\n enable_presubmit_generated_file = enable_presubmit_generated_file,\n )\n\ndef xls_dslx_cpp_type_library(\n name,\n src):\n \"\"\"Creates a cc_library target for transpiled DSLX types.\n\n This macros invokes the DSLX-to-C++ transpiler and compiles the result as\n a cc_library.\n\n Args:\n name: The name of the eventual cc_library.\n src: The DSLX file whose types to compile as C++.\n \"\"\"\n native.genrule(\n name = name + \"_generate_sources\",\n srcs = [src],\n outs = [\n name + \".h\",\n name + \".cc\",\n ],\n tools = [\n \"//xls/dslx:cpp_transpiler_main\",\n ],\n cmd = \"$(location //xls/dslx:cpp_transpiler_main) \" +\n \"--output_header_path=$(@D)/{}.h \".format(name) +\n \"--output_source_path=$(@D)/{}.cc \".format(name) +\n \"$(location {})\".format(src),\n )\n\n native.cc_library(\n name = name,\n srcs = [\":\" + name + \".cc\"],\n hdrs = [\":\" + name + \".h\"],\n deps = [\n \"@com_google_absl//absl/base:core_headers\",\n \"@com_google_absl//absl/status:status\",\n \"@com_google_absl//absl/status:statusor\",\n \"@com_google_absl//absl/types:span\",\n \"//xls/public:value\",\n ],\n )\n","sub_path":"xls/build_rules/xls_macros.bzl","file_name":"xls_macros.bzl","file_ext":"bzl","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"547357097","text":"import argparse\nfrom google.cloud import vision\nfrom google.cloud.vision import types\nimport io\nfrom gcloud import storage\n\ndef create_uri(bucket_name, blob_name):\n return \"gs://\" + bucket_name + \"/\" + blob_name\n\ndef get_text(image_uri):\n client = vision.ImageAnnotatorClient()\n image = vision.types.Image()\n image.source.image_uri = image_uri\n response = client.document_text_detection(image=image)\n document = response\n return response\n\ndef get_all_text(bucket_name, directory):\n client = storage.Client(project='medical-extraction')\n bucket = client.get_bucket(bucket_name)\n full_text = []\n\n for blob in bucket.list_blobs(prefix=directory):\n print(\"----------------------\" + blob.name + \"---------------------------\")\n next_doc = get_text(create_uri(bucket_name,blob.name)).full_text_annotation.text.splitlines()\n full_text += next_doc\n # print(full_text)\n for line in next_doc:\n print (line)\n\n return full_text\n","sub_path":"request_handling.py","file_name":"request_handling.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"515858693","text":"\n\n\n\ndef find_matching_files(file_path, dictionary):\n \"\"\"\n creates a list of match\n :param file_path:\n :param dictionary:\n :return:\n \"\"\"\n from cgl.core.path import remove_root\n ignore = ['.json', '.msd']\n if not self.single_asset_name:\n no_ext_path, ext = os.path.splitext(file_path)\n glob_pattern = '{}'.format(no_ext_path)\n else:\n directory = os.path.dirname(self.scene_object.copy(context='render').path_root)\n glob_pattern = '{}/{}.*'.format(directory, self.single_asset_name)\n print('\\tlooking for {}'.format(glob_pattern))\n files = glob.glob(glob_pattern)\n for f in files:\n f = f.replace('\\\\', '/')\n _, ext = os.path.splitext(f)\n ext = ext.replace('.', '')\n dictionary[str(ext)] = remove_root(f)\n for key in dictionary:\n print('\\t\\t{}: {}'.format(key, dictionary[key]))\n return dictionary","sub_path":"cgl/plugins/MagicSceneDescription.py","file_name":"MagicSceneDescription.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"78946331","text":"import pandas as pd\n\n# part 1: key(hashtag) search\ntry:\n df_key_list = pd.read_csv('../search_settings/key_list.csv')\nexcept FileNotFoundError:\n print('can not locate \"../search_settings/key_list.csv\", please check if \"key_list.csv\" is in the correct directory')\n exit(1)\n\n# VARS TO IMPORT\nkeys = df_key_list[df_key_list.columns[0]].tolist()\nsince = '2020-06-01' # starting date of search result to return\nfwr_count = 1000 # follower_count >=, any int\nfav_count = 0 # favorite_count >=, any int\nrt_count = 0 # retweet_count >=, any int\n\n# part 2: account-keyword search\ntry:\n df_account_key_list = pd.read_csv('../search_settings/account_key_list.csv')\nexcept:\n print(\n 'can not locate \"../search_settings/account_key_list.csv\", please check if \"account_list_key.csv\" is in the correct directory')\n exit(1)\n\ntry:\n # VARS TO IMORT\n df_account_list = pd.read_csv('../search_settings/account_list.csv')\nexcept:\n print(\n 'can not locate \"./search_settings/account_list.csv\", please check if \"account_list_csv\" is in the correct directory')\n exit(1)\n\n# create account-key pair into a dictionary\naccount_list = df_account_list[df_account_list.columns[1]].tolist()\ntemp_dict = dict()\nfor acc in account_list:\n temp_dict[acc] = df_account_key_list[df_account_key_list.columns[0]].tolist()\n\n# VARS TO IMPORT\naccount_key_dict = temp_dict # account-ket pairs\n\n# VARS TO IMPORT, aws S3 details\nmaster_bucket = '' # name of output bucket, avoid global bucket name duplication in aws S3\nregion = 'us-east-1' # region of output bucket\n","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"30667594","text":"# DB API for JEDI\n\nimport datetime\n\nfrom pandajedi.jediconfig import jedi_config\n\nfrom pandaserver.taskbuffer import TaskBuffer\nfrom pandaserver.brokerage.SiteMapper import SiteMapper\nimport JediDBProxyPool\nfrom Interaction import CommandReceiveInterface\n\n# logger\nfrom pandacommon.pandalogger.PandaLogger import PandaLogger\nlogger = PandaLogger().getLogger(__name__.split('.')[-1])\n\n# use customized proxy pool\nTaskBuffer.DBProxyPool = JediDBProxyPool.DBProxyPool\n\n\nclass JediTaskBuffer(TaskBuffer.TaskBuffer,CommandReceiveInterface):\n\n # constructor\n def __init__(self,conn):\n CommandReceiveInterface.__init__(self,conn)\n TaskBuffer.TaskBuffer.__init__(self)\n TaskBuffer.TaskBuffer.init(self,jedi_config.db.dbhost,\n jedi_config.db.dbpasswd,\n nDBConnection=1)\n # site mapper\n self.siteMapper = SiteMapper(self)\n # update time for site mapper\n self.dateTimeForSM = datetime.datetime.utcnow()\n logger.debug('__init__')\n\n\n\n # get SiteMapper\n def getSiteMapper(self):\n timeNow = datetime.datetime.utcnow()\n if datetime.datetime.utcnow()-self.dateTimeForSM > datetime.timedelta(minutes=10):\n self.siteMapper = SiteMapper(self)\n self.dateTimeForSM = timeNow\n return self.siteMapper\n\n \n\n # get work queue map\n def getWorkQueueMap(self):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # get\n retVal = proxy.getWorkQueueMap()\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get the list of datasets to feed contents to DB\n def getDatasetsToFeedContents_JEDI(self,vo=None,prodSourceLabel=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # get\n retVal = proxy.getDatasetsToFeedContents_JEDI(vo,prodSourceLabel)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # feed files to the JEDI contents table\n def insertFilesForDataset_JEDI(self,datasetSpec,fileMap,datasetState,stateUpdateTime,\n nEventsPerFile,nEventsPerJob,maxAttempt,firstEventNumber,\n nMaxFiles,nMaxEvents,useScout,fileList,useFilesWithNewAttemptNr,\n nFilesPerJob,nEventsPerRange,nChunksForScout,includePatt,\n excludePatt,xmlConfig,noWaitParent,parent_tid,pid,maxFailure,\n useRealNumEvents,respectLB,tgtNumEventsPerJob,skipFilesUsedBy,ramCount):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.insertFilesForDataset_JEDI(datasetSpec,fileMap,datasetState,stateUpdateTime,\n nEventsPerFile,nEventsPerJob,maxAttempt,\n firstEventNumber,nMaxFiles,nMaxEvents,\n useScout,fileList,useFilesWithNewAttemptNr,\n nFilesPerJob,nEventsPerRange,nChunksForScout,\n includePatt,excludePatt,xmlConfig,\n noWaitParent,parent_tid,pid,maxFailure,\n useRealNumEvents,respectLB,\n tgtNumEventsPerJob,skipFilesUsedBy,ramCount)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get files from the JEDI contents table with jediTaskID and/or datasetID\n def getFilesInDatasetWithID_JEDI(self,jediTaskID=None,datasetID=None,nFiles=None,status=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getFilesInDatasetWithID_JEDI(jediTaskID,datasetID,nFiles,status)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # insert dataset to the JEDI datasets table\n def insertDataset_JEDI(self,datasetSpec):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.insertDataset_JEDI(datasetSpec)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # update JEDI dataset\n def updateDataset_JEDI(self,datasetSpec,criteria,lockTask=False):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.updateDataset_JEDI(datasetSpec,criteria,lockTask)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # update JEDI dataset attributes\n def updateDatasetAttributes_JEDI(self,jediTaskID,datasetID,attributes):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.updateDatasetAttributes_JEDI(jediTaskID,datasetID,attributes)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI dataset attributes\n def getDatasetAttributes_JEDI(self,jediTaskID,datasetID,attributes):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getDatasetAttributes_JEDI(jediTaskID,datasetID,attributes)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI dataset attributes with map\n def getDatasetAttributesWithMap_JEDI(self,jediTaskID,criteria,attributes):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getDatasetAttributesWithMap_JEDI(jediTaskID,criteria,attributes)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI dataset with jediTaskID and datasetID\n def getDatasetWithID_JEDI(self,jediTaskID,datasetID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getDatasetWithID_JEDI(jediTaskID,datasetID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI datasets with jediTaskID\n def getDatasetsWithJediTaskID_JEDI(self,jediTaskID,datasetTypes=None,getFiles=False):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retStat,datasetSpecList = proxy.getDatasetsWithJediTaskID_JEDI(jediTaskID,datasetTypes=datasetTypes)\n if retStat == True and getFiles == True:\n for datasetSpec in datasetSpecList:\n # read files\n retStat,fileSpecList = proxy.getFilesInDatasetWithID_JEDI(jediTaskID,datasetSpec.datasetID,None,None)\n if retStat == False:\n break\n for fileSpec in fileSpecList:\n datasetSpec.addFile(fileSpec)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retStat,datasetSpecList\n\n\n\n # insert task to the JEDI tasks table\n def insertTask_JEDI(self,taskSpec):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.insertTask_JEDI(taskSpec)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # update JEDI task\n def updateTask_JEDI(self,taskSpec,criteria,oldStatus=None,updateDEFT=False,insertUnknown=None,\n setFrozenTime=True,setOldModTime=False):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.updateTask_JEDI(taskSpec,criteria,oldStatus,updateDEFT,insertUnknown,\n setFrozenTime,setOldModTime)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # update JEDI task lock\n def updateTaskLock_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.updateTaskLock_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # update JEDI task status by ContentsFeeder\n def updateTaskStatusByContFeeder_JEDI(self,jediTaskID,taskSpec=None,getTaskStatus=False,pid=None,\n setFrozenTime=True,useWorldCloud=False):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.updateTaskStatusByContFeeder_JEDI(jediTaskID,taskSpec,getTaskStatus,\n pid,setFrozenTime,useWorldCloud)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI task with jediTaskID\n def getTaskWithID_JEDI(self,jediTaskID,fullFlag=False,lockTask=False,pid=None,lockInterval=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTaskWithID_JEDI(jediTaskID,fullFlag,lockTask,pid,lockInterval)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI task and tasks with ID and lock it\n def getTaskDatasetsWithID_JEDI(self,jediTaskID,pid,lockTask=True):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTaskDatasetsWithID_JEDI(jediTaskID,pid,lockTask)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI tasks with selection criteria\n def getTaskIDsWithCriteria_JEDI(self,criteria,nTasks=50):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTaskIDsWithCriteria_JEDI(criteria,nTasks)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI tasks to be finished\n def getTasksToBeFinished_JEDI(self,vo,prodSourceLabel,pid,nTasks=50):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTasksToBeFinished_JEDI(vo,prodSourceLabel,pid,nTasks)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get tasks to be processed\n def getTasksToBeProcessed_JEDI(self,pid,vo,workQueue,prodSourceLabel,cloudName,\n nTasks=50,nFiles=100,simTasks=None,minPriority=None,\n maxNumJobs=None,typicalNumFilesMap=None,\n fullSimulation=False,simDatasets=None,\n mergeUnThrottled=None,readMinFiles=False):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTasksToBeProcessed_JEDI(pid,vo,workQueue,prodSourceLabel,cloudName,nTasks,nFiles,\n simTasks=simTasks,\n minPriority=minPriority,\n maxNumJobs=maxNumJobs,\n typicalNumFilesMap=typicalNumFilesMap,\n fullSimulation=fullSimulation,\n simDatasets=simDatasets,\n mergeUnThrottled=mergeUnThrottled,\n readMinFiles=readMinFiles)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get tasks to be processed\n def checkWaitingTaskPrio_JEDI(self,vo,workQueue,prodSourceLabel,cloudName):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTasksToBeProcessed_JEDI(None,vo,workQueue,prodSourceLabel,\n cloudName,isPeeking=True)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n \n # get job statistics with work queue\n def getJobStatisticsWithWorkQueue_JEDI(self,vo,prodSourceLabel,minPriority=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getJobStatisticsWithWorkQueue_JEDI(vo,prodSourceLabel,minPriority)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get job statistics with work queue per cloud\n def getJobStatWithWorkQueuePerCloud_JEDI(self,vo,prodSourceLabel,cloud=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getJobStatisticsWithWorkQueue_JEDI(vo,prodSourceLabel,cloud=cloud)\n # release proxy\n self.proxyPool.putProxy(proxy)\n if retVal[0] == False:\n return retVal\n # make per-cloud map\n retMap = {}\n for computingSite,siteMap in retVal[1].iteritems():\n for cloud,cloudMap in siteMap.iteritems():\n # add cloud\n if not retMap.has_key(cloud):\n retMap[cloud] = {}\n for workQueue_ID,workQueueMap in cloudMap.iteritems():\n # add work queue\n if not retMap[cloud].has_key(workQueue_ID):\n retMap[cloud][workQueue_ID] = {}\n for jobStatus,nCount in workQueueMap.iteritems():\n # add job status\n if not retMap[cloud][workQueue_ID].has_key(jobStatus):\n retMap[cloud][workQueue_ID][jobStatus] = 0\n # add\n retMap[cloud][workQueue_ID][jobStatus] += nCount\n # return\n return retVal[0],retMap\n\n\n\n # generate output files for task\n def getOutputFiles_JEDI(self,jediTaskID,provenanceID,simul,instantiateTmpl=False,instantiatedSite=None,\n isUnMerging=False,isPrePro=False,xmlConfigJob=None,siteDsMap=None,middleName='',\n registerDatasets=False,parallelOutMap=None,fileIDPool=[]):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getOutputFiles_JEDI(jediTaskID,provenanceID,simul,instantiateTmpl,instantiatedSite,\n isUnMerging,isPrePro,xmlConfigJob,siteDsMap,middleName,\n registerDatasets,parallelOutMap,fileIDPool)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # insert output file templates\n def insertOutputTemplate_JEDI(self,templates):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.insertOutputTemplate_JEDI(templates)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # insert JobParamsTemplate\n def insertJobParamsTemplate_JEDI(self,jediTaskID,templ):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.insertJobParamsTemplate_JEDI(jediTaskID,templ)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # insert TaskParams\n def insertTaskParams_JEDI(self,vo,prodSourceLabel,userName,taskName,taskParams,parent_tid=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.insertTaskParams_JEDI(vo,prodSourceLabel,userName,taskName,taskParams,parent_tid)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # reset unused files\n def resetUnusedFiles_JEDI(self,jediTaskID,inputChunk):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.resetUnusedFiles_JEDI(jediTaskID,inputChunk)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # insert TaskParams\n def insertUpdateTaskParams_JEDI(self,jediTaskID,vo,prodSourceLabel,updateTaskParams,insertTaskParamsList):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.insertUpdateTaskParams_JEDI(jediTaskID,vo,prodSourceLabel,updateTaskParams,insertTaskParamsList)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # set missing files\n def setMissingFiles_JEDI(self,jediTaskID,datasetID,fileIDs):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.setMissingFiles_JEDI(jediTaskID,datasetID,fileIDs)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # rescue picked files\n def rescuePickedFiles_JEDI(self,vo,prodSourceLabel,waitTime):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.rescuePickedFiles_JEDI(vo,prodSourceLabel,waitTime)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # rescue unlocked tasks with picked files\n def rescueUnLockedTasksWithPicked_JEDI(self,vo,prodSourceLabel,waitTime,pid):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.rescueUnLockedTasksWithPicked_JEDI(vo,prodSourceLabel,waitTime,pid)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # unlock tasks\n def unlockTasks_JEDI(self,vo,prodSourceLabel,waitTime,hostName=None,pgid=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.unlockTasks_JEDI(vo,prodSourceLabel,waitTime,hostName,pgid)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get the size of input files which will be copied to the site\n def getMovingInputSize_JEDI(self,siteName):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getMovingInputSize_JEDI(siteName)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get typical number of input files for each workQueue+processingType\n def getTypicalNumInput_JEDI(self,vo,prodSourceLabel,workQueue):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTypicalNumInput_JEDI(vo,prodSourceLabel,workQueue)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get highest prio jobs with workQueueID\n def getHighestPrioJobStat_JEDI(self,prodSourceLabel,cloudName,workQueueID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getHighestPrioJobStat_JEDI(prodSourceLabel,cloudName,workQueueID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get the list of tasks to refine\n def getTasksToRefine_JEDI(self,vo=None,prodSourceLabel=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTasksToRefine_JEDI(vo,prodSourceLabel)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get task parameters with jediTaskID\n def getTaskParamsWithID_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTaskParamsWithID_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # register task/dataset/templ/param in a single transaction\n def registerTaskInOneShot_JEDI(self,jediTaskID,taskSpec,inMasterDatasetSpec,\n inSecDatasetSpecList,outDatasetSpecList,\n outputTemplateMap,jobParamsTemplate,taskParams,\n unmergeMasterDatasetSpec,unmergeDatasetSpecMap,\n uniqueTaskName,oldTaskStatus):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.registerTaskInOneShot_JEDI(jediTaskID,taskSpec,inMasterDatasetSpec,\n inSecDatasetSpecList,outDatasetSpecList,\n outputTemplateMap,jobParamsTemplate,taskParams,\n unmergeMasterDatasetSpec,unmergeDatasetSpecMap,\n uniqueTaskName,oldTaskStatus)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # set tasks to be assigned\n def setScoutJobDataToTasks_JEDI(self,vo,prodSourceLabel):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.setScoutJobDataToTasks_JEDI(vo,prodSourceLabel)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # prepare tasks to be finished\n def prepareTasksToBeFinished_JEDI(self,vo,prodSourceLabel,nTasks=50,simTasks=None,pid='lock',noBroken=False):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.prepareTasksToBeFinished_JEDI(vo,prodSourceLabel,nTasks,simTasks,pid,noBroken)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get tasks to be assigned\n def getTasksToAssign_JEDI(self,vo,prodSourceLabel,workQueue):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTasksToAssign_JEDI(vo,prodSourceLabel,workQueue)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get tasks to check task assignment\n def getTasksToCheckAssignment_JEDI(self,vo,prodSourceLabel,workQueue):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTasksToCheckAssignment_JEDI(vo,prodSourceLabel,workQueue)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # calculate RW with a priority\n def calculateRWwithPrio_JEDI(self,vo,prodSourceLabel,workQueue,priority):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.calculateRWwithPrio_JEDI(vo,prodSourceLabel,workQueue,priority)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # calculate RW for tasks\n def calculateTaskRW_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.calculateTaskRW_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # calculate WORLD RW with a priority\n def calculateWorldRWwithPrio_JEDI(self,vo,prodSourceLabel,workQueue,priority):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.calculateWorldRWwithPrio_JEDI(vo,prodSourceLabel,workQueue,priority)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # calculate WORLD RW for tasks\n def calculateTaskWorldRW_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.calculateTaskWorldRW_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # set cloud to tasks\n def setCloudToTasks_JEDI(self,taskCloudMap):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.setCloudToTasks_JEDI(taskCloudMap)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get the list of tasks to exec command\n def getTasksToExecCommand_JEDI(self,vo,prodSourceLabel):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTasksToExecCommand_JEDI(vo,prodSourceLabel)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get the list of PandaIDs for a task\n def getPandaIDsWithTask_JEDI(self,jediTaskID,onlyActive):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getPandaIDsWithTask_JEDI(jediTaskID,onlyActive)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get the list of queued PandaIDs for a task\n def getQueuedPandaIDsWithTask_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getQueuedPandaIDsWithTask_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get jediTaskID/datasetID/FileID with dataset and file names\n def getIDsWithFileDataset_JEDI(self,datasetName,fileName,fileType):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getIDsWithFileDataset_JEDI(datasetName,fileName,fileType)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get PandaID for a file\n def getPandaIDWithFileID_JEDI(self,jediTaskID,datasetID,fileID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getPandaIDWithFileID_JEDI(jediTaskID,datasetID,fileID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI files for a job\n def getFilesWithPandaID_JEDI(self,pandaID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getFilesWithPandaID_JEDI(pandaID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # update task parameters\n def updateTaskParams_JEDI(self,jediTaskID,taskParams):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.updateTaskParams_JEDI(jediTaskID,taskParams)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # reactivate pending tasks\n def reactivatePendingTasks_JEDI(self,vo,prodSourceLabel,timeLimit,timeoutLimit=None,minPriority=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.reactivatePendingTasks_JEDI(vo,prodSourceLabel,timeLimit,timeoutLimit,minPriority)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # restart contents update\n def restartTasksForContentsUpdate_JEDI(self,vo,prodSourceLabel,timeLimit=30):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.restartTasksForContentsUpdate_JEDI(vo,prodSourceLabel,timeLimit=timeLimit)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # kick exhausted tasks\n def kickExhaustedTasks_JEDI(self,vo,prodSourceLabel,timeLimit):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.kickExhaustedTasks_JEDI(vo,prodSourceLabel,timeLimit)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get file spec of lib.tgz\n def getBuildFileSpec_JEDI(self,jediTaskID,siteName,associatedSites):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getBuildFileSpec_JEDI(jediTaskID,siteName,associatedSites)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get file spec of old lib.tgz\n def getOldBuildFileSpec_JEDI(self,jediTaskID,datasetID,fileID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getOldBuildFileSpec_JEDI(jediTaskID,datasetID,fileID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # insert lib dataset and files\n def insertBuildFileSpec_JEDI(self,jobSpec,reusedDatasetID,simul):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.insertBuildFileSpec_JEDI(jobSpec,reusedDatasetID,simul)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get sites used by a task\n def getSitesUsedByTask_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getSitesUsedByTask_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get random seed\n def getRandomSeed_JEDI(self,jediTaskID,simul):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getRandomSeed_JEDI(jediTaskID,simul)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get preprocess metadata\n def getPreprocessMetadata_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getPreprocessMetadata_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get log dataset for preprocessing\n def getPreproLog_JEDI(self,jediTaskID,simul):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getPreproLog_JEDI(jediTaskID,simul)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get sites with best connections to source\n def getBestNNetworkSites_JEDI(self,source,protocol,nSites,threshold,cutoff,maxWeight):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getBestNNetworkSites_JEDI(source,protocol,nSites,threshold,\n cutoff,maxWeight)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n\n # get jobsetID\n def getUserJobsetID_JEDI(self,userName):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n tmpJobID,tmpDummy,tmpStat = proxy.getUserParameter(userName,1,None)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return tmpStat,tmpJobID\n\n\n\n # retry or incrementally execute a task\n def retryTask_JEDI(self,jediTaskID,commStr,maxAttempt=5):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.retryTask_JEDI(jediTaskID,commStr,maxAttempt)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # append input datasets for incremental execution\n def appendDatasets_JEDI(self,jediTaskID,inMasterDatasetSpecList,inSecDatasetSpecList):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.appendDatasets_JEDI(jediTaskID,inMasterDatasetSpecList,inSecDatasetSpecList)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # record retry history\n def recordRetryHistory_JEDI(self,jediTaskID,oldNewPandaIDs,relationType):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.recordRetryHistory_JEDI(jediTaskID,oldNewPandaIDs,relationType)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI tasks with a selection criteria\n def getTasksWithCriteria_JEDI(self,vo,prodSourceLabel,taskStatusList,taskCriteria={},datasetCriteria={},\n taskParamList=[],datasetParamList=[]):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTasksWithCriteria_JEDI(vo,prodSourceLabel,taskStatusList,taskCriteria,datasetCriteria,\n taskParamList,datasetParamList)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # check parent task status \n def checkParentTask_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.checkParentTask_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get task status \n def getTaskStatus_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTaskStatus_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get lib.tgz for waiting jobs\n def getLibForWaitingRunJob_JEDI(self,vo,prodSourceLabel,checkInterval):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getLibForWaitingRunJob_JEDI(vo,prodSourceLabel,checkInterval)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get tasks to get reassigned\n def getTasksToReassign_JEDI(self,vo=None,prodSourceLabel=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTasksToReassign_JEDI(vo,prodSourceLabel)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # kill child tasks\n def killChildTasks_JEDI(self,jediTaskID,taskStatus):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.killChildTasks_JEDI(jediTaskID,taskStatus)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # kick child tasks\n def kickChildTasks_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.kickChildTasks_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n\n # lock task\n def lockTask_JEDI(self,jediTaskID,pid):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.lockTask_JEDI(jediTaskID,pid)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get successful files\n def getSuccessfulFiles_JEDI(self,jediTaskID,datasetID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getSuccessfulFiles_JEDI(jediTaskID,datasetID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # unlock a single task\n def unlockSingleTask_JEDI(self,jediTaskID,pid):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.unlockSingleTask_JEDI(jediTaskID,pid)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # throttle JEDI tasks\n def throttleTasks_JEDI(self,vo,prodSourceLabel,waitTime):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.throttleTasks_JEDI(vo,prodSourceLabel,waitTime)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # throttle a JEDI task\n def throttleTask_JEDI(self,jediTaskID,waitTime,errorDialog):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.throttleTask_JEDI(jediTaskID,waitTime,errorDialog)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # release throttled tasks\n def releaseThrottledTasks_JEDI(self,vo,prodSourceLabel):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.releaseThrottledTasks_JEDI(vo,prodSourceLabel)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # release throttled task\n def releaseThrottledTask_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.releaseThrottledTask_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get throttled users\n def getThrottledUsersTasks_JEDI(self,vo,prodSourceLabel):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getThrottledUsersTasks_JEDI(vo,prodSourceLabel)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # lock process\n def lockProcess_JEDI(self,vo,prodSourceLabel,cloud,workqueue_id,pid,forceOption=False,timeLimit=5):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.lockProcess_JEDI(vo,prodSourceLabel,cloud,workqueue_id,pid,forceOption,timeLimit)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # unlock process\n def unlockProcess_JEDI(self,vo,prodSourceLabel,cloud,workqueue_id,pid):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.unlockProcess_JEDI(vo,prodSourceLabel,cloud,workqueue_id,pid)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # unlock process with PID\n def unlockProcessWithPID_JEDI(self,vo,prodSourceLabel,workqueue_id,pid,useBase):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.unlockProcessWithPID_JEDI(vo,prodSourceLabel,workqueue_id,pid,useBase)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # check process lock\n def checkProcessLock_JEDI(self,vo,prodSourceLabel,cloud,workqueue_id,pid,checkBase):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.checkProcessLock_JEDI(vo,prodSourceLabel,cloud,workqueue_id,pid,checkBase)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get JEDI tasks to be assessed\n def getAchievedTasks_JEDI(self,vo,prodSourceLabel,timeLimit=60,nTasks=50):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getAchievedTasks_JEDI(vo,prodSourceLabel,timeLimit,nTasks)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get inactive sites\n def getInactiveSites_JEDI(self,flag,timeLimit):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getInactiveSites_JEDI(flag,timeLimit)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get total walltime\n def getTotalWallTime_JEDI(self,vo,prodSourceLabel,workQueue,cloud=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTotalWallTime_JEDI(vo,prodSourceLabel,workQueue,cloud)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get total walltime\n def getTotalWallTime_JEDI(self,vo,prodSourceLabel,workQueue,cloud=None):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getTotalWallTime_JEDI(vo,prodSourceLabel,workQueue,cloud)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # check duplication with internal merge\n def checkDuplication_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.checkDuplication_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n # get network metrics for brokerage\n def getNetworkMetrics(self, dst, keyList):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getNetworkMetrics(dst, keyList)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n # get nuclei that have built up a long backlog\n def getBackloggedNuclei(self):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getBackloggedNuclei()\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n # get network metrics for brokerage\n def getPandaSiteToAtlasSiteMapping(self):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getPandaSiteToAtlasSiteMapping()\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n # get failure counts for a task\n def getFailureCountsForTask_JEDI(self,jediTaskID,timeWindow):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getFailureCountsForTask_JEDI(jediTaskID,timeWindow)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get old merge job PandaIDs\n def getOldMergeJobPandaIDs_JEDI(self,jediTaskID,pandaID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getOldMergeJobPandaIDs_JEDI(jediTaskID,pandaID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get active jumbo jobs for a task\n def getActiveJumboJobs_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getActiveJumboJobs_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # get jobParms of the first job\n def getJobParamsOfFirstJob_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.getJobParamsOfFirstJob_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # bulk fetch fileIDs\n def bulkFetchFileIDs_JEDI(self,jediTaskID,nIDs):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.bulkFetchFileIDs_JEDI(jediTaskID,nIDs)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n\n\n # set del flag to events\n def setDelFlagToEvents_JEDI(self,jediTaskID):\n # get DBproxy\n proxy = self.proxyPool.getProxy()\n # exec\n retVal = proxy.setDelFlagToEvents_JEDI(jediTaskID)\n # release proxy\n self.proxyPool.putProxy(proxy)\n # return\n return retVal\n\n","sub_path":"pandajedi/jedicore/JediTaskBuffer.py","file_name":"JediTaskBuffer.py","file_ext":"py","file_size_in_byte":43998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"47132727","text":"# rdumtool\n# Copyright (C) 2019 Ryan Finnie\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n# 02110-1301, USA.\n\nimport argparse\nimport sys\nimport os\nimport json\nimport time\nimport datetime\nimport logging\nimport threading\nimport datetime\nfrom . import __version__\nimport rdum\n\n\nclass MyThread(threading.Thread):\n def __init__(self, cmd, app):\n self.cmd = cmd\n self.app = app\n threading.Thread.__init__(self)\n\n def run(self):\n os.system('../cpu {} {}'.format(self.cmd, self.app))\n\n\ndef parse_args(argv=None):\n \"\"\"Parse user arguments.\"\"\"\n if argv is None:\n argv = sys.argv\n\n parser = argparse.ArgumentParser(\n description='rdumtool ({})'.format(__version__),\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=os.path.basename(argv[0]),\n )\n\n parser.add_argument(\n '--version', '-V', action='version',\n version=__version__,\n help='Report the program version',\n )\n\n parser.add_argument(\n '--quiet', '-q', action='store_true',\n help='Whether to display human-readable information to stderr',\n )\n parser.add_argument(\n '--debug', action='store_true',\n help='Print extra debugging information.',\n )\n parser.add_argument(\n '--device-type', '-t', choices=['UM24C', 'UM25C', 'UM34C'], default='UM24C',\n help='Device type',\n )\n device_group = parser.add_mutually_exclusive_group(required=False)\n device_group.add_argument(\n '--bluetooth-device', '-d',\n help='Bluetooth MAC address of the device',\n )\n device_group.add_argument(\n '--serial-device', '-s',\n help='Serial filename (e.g. /dev/rfcomm0) of the device',\n )\n parser.add_argument(\n '--json', action='store_true',\n help='Output JSON data',\n )\n parser.add_argument(\n '--watch', type=float, const=2.0, nargs='?', default=None,\n help='Repeat every WATCH seconds',\n )\n parser.add_argument(\n '--next-screen', action='store_true',\n help='Go to the next screen on the display',\n )\n parser.add_argument(\n '--previous-screen', action='store_true',\n help='[UM25C, UM34C] Go to the previous screen on the display',\n )\n parser.add_argument(\n '--rotate-screen', action='store_true',\n help='Rotate the screen 90 degrees clockwise',\n )\n parser.add_argument(\n '--set-data-group', type=int, choices=range(10), default=None,\n help='[UM25C, UM34C] Set the selected data group',\n )\n parser.add_argument(\n '--next-data-group', action='store_true',\n help='[UM24C] Change to the next data group',\n )\n parser.add_argument(\n '--clear-data-group', action='store_true',\n help='Clear the current data group',\n )\n parser.add_argument(\n '--set-record-threshold', type=float, choices=[x / 100 for x in range(31)], default=None,\n help='Set the recording threshold',\n )\n parser.add_argument(\n '--set-screen-brightness', type=int, choices=range(6), default=None,\n help='Set the screen brightness',\n )\n parser.add_argument(\n '--set-screen-timeout', type=int, choices=range(10), default=None,\n help='Set the screen timeout',\n )\n\n args = parser.parse_args(args=argv[1:])\n return args\n\n\nclass RDUMTool:\n #prints infor\n def print_json(self, response):\n # Get what we read from volts, amps and append timestamp\n return str(getattr(response, 'volts')) + ',' + str(getattr(response, 'amps')) + \",\" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S.%f') + \"\\n\"\n # out = {'amps': getattr(response, 'amps'), 'timestamp': time.time()}\n # out = {x: getattr(response, x) for x in response.labels}\n # out['data_groups'] = [{'amp_hours': x.amp_hours, 'watt_hours': x.watt_hours} for x in out['data_groups']]\n # out['collection_time'] = (out['collection_time'] - datetime.datetime.fromtimestamp(0)).total_seconds()\n\n # print(json.dumps(out))\n\n def print_human(self, response):\n logging.debug('DUMP: {}'.format(repr(response.dump())))\n charging_map = {\n rdum.CHARGING_UNKNOWN: 'Unknown / Normal',\n rdum.CHARGING_QC2: 'Quick Charge 2.0',\n rdum.CHARGING_QC3: 'Quick Charge 3.0',\n rdum.CHARGING_APP2_4A: 'Apple 2.4A',\n rdum.CHARGING_APP2_1A: 'Apple 2.1A',\n rdum.CHARGING_APP1_0A: 'Apple 1.0A',\n rdum.CHARGING_APP0_5A: 'Apple 0.5A',\n rdum.CHARGING_DCP1_5A: 'DCP 1.5A',\n rdum.CHARGING_SAMSUNG: 'Samsung',\n }\n print('USB: {:5.02f}V, {:6.03f}A, {:6.03f}W, {:6.01f}Ω'.format(\n response.volts,\n response.amps,\n response.watts,\n response.resistance,\n ))\n print('Data: {:5.02f}V(+), {:5.02f}V(-), charging mode: {}'.format(\n response.data_line_positive_volts,\n response.data_line_negative_volts,\n charging_map[response.charging_mode],\n ))\n print('Recording {:5}: {:8.03f}Ah, {:8.03f}Wh, {:6d} sec at >= {:4.02f}A'.format(\n '(on)' if response.recording else '(off)',\n response.record_amphours,\n response.record_watthours,\n response.record_seconds,\n response.record_threshold,\n ))\n\n def make_dgpart(response, idx):\n data_group = response.data_groups[idx]\n return '{}{:d}: {:8.03f}Ah, {:8.03f}Wh'.format(\n '*' if data_group.group == response.data_group_selected else ' ',\n data_group.group,\n data_group.amp_hours,\n data_group.watt_hours,\n )\n print('Data groups:')\n print(' {:32}{}'.format(\n make_dgpart(response, 0),\n make_dgpart(response, 5),\n ))\n print(' {:32}{}'.format(\n make_dgpart(response, 1),\n make_dgpart(response, 6),\n ))\n print(' {:32}{}'.format(\n make_dgpart(response, 2),\n make_dgpart(response, 7),\n ))\n print(' {:32}{}'.format(\n make_dgpart(response, 3),\n make_dgpart(response, 8),\n ))\n print(' {:32}{}'.format(\n make_dgpart(response, 4),\n make_dgpart(response, 9),\n ))\n\n print('{:>5s}, temperature: {:3d}C ({:3d}F)'.format(\n self.args.device_type, response.temp_c, response.temp_f))\n print('Screen: {:d}/6, brightness: {:d}/5, timeout: {}'.format(\n response.screen_selected,\n response.screen_brightness,\n '{:d} min'.format(\n response.screen_timeout) if response.screen_timeout else 'off',\n ))\n if response.collection_time:\n print('Collection time: {}'.format(response.collection_time))\n\n def send_commands(self):\n # TODO: Verify UM25C/UM34C\n for arg, command_val, compat in [\n ('next_screen', b'\\xf1', ['UM24C', 'UM25C', 'UM34C']),\n ('rotate_screen', b'\\xf2', ['UM24C', 'UM25C', 'UM34C']),\n ('next_data_group', b'\\xf3', ['UM24C']),\n ('previous_screen', b'\\xf3', ['UM25C', 'UM34C']),\n ('clear_data_group', b'\\xf4', ['UM24C', 'UM25C', 'UM34C']),\n ('set_data_group', lambda x: bytes(\n [0xa0 + x]), ['UM25C', 'UM34C']),\n ('set_record_threshold', lambda x: bytes(\n [0xb0 + int(x * 100)]), ['UM24C', 'UM25C', 'UM34C']),\n ('set_screen_brightness', lambda x: bytes(\n [0xd0 + x]), ['UM24C', 'UM25C', 'UM34C']),\n ('set_screen_timeout', lambda x: bytes(\n [0xe0 + x]), ['UM24C', 'UM25C', 'UM34C']),\n ]:\n arg_val = getattr(self.args, arg)\n if (arg_val is None) or (arg_val is False):\n continue\n if self.args.device_type not in compat:\n logging.warning(\n '{} not supported on this device, ignoring'.format(arg))\n continue\n if type(command_val) != bytes:\n command_val = command_val(getattr(self.args, arg))\n logging.info('Setting {} to {}'.format(\n arg, getattr(self.args, arg)))\n self.dev.send(command_val)\n # Sometimes you can send multiple commands quickly, but sometimes\n # it'll eat commands. Sleeping 0.5s between commands is safe.\n time.sleep(0.5)\n\n def setup_logging(self):\n logging_format = '%(message)s'\n if self.args.debug:\n logging_level = logging.DEBUG\n logging_format = '%(asctime)s %(levelname)s: %(message)s'\n elif self.args.quiet:\n logging_level = logging.ERROR\n else:\n logging_level = logging.INFO\n logging.basicConfig(\n format=logging_format,\n level=logging_level,\n )\n\n def setup_device(self):\n if (not self.args.bluetooth_device) and (not self.args.serial_device):\n logging.info('Searching for Bluetooth devices, please wait')\n self.dev = rdum.DeviceBluetooth()\n for mac, name, bt_class in self.dev.scan():\n logging.info(' {} - {}'.format(mac, name))\n if name in ('UM24C', 'UM25C', 'UM34C'):\n self.args.bluetooth_device = mac\n self.args.device_type = name\n if not self.args.bluetooth_device:\n logging.error('No suitable Bluetooth device found')\n return\n if self.args.bluetooth_device:\n logging.info('Connecting to {} {}'.format(\n self.args.device_type, self.args.bluetooth_device))\n self.dev = rdum.DeviceBluetooth(self.args.bluetooth_device)\n elif self.args.serial_device:\n logging.info('Connecting to {} {}'.format(\n self.args.device_type, self.args.serial_device))\n self.dev = rdum.DeviceSerial(self.args.serial_device)\n logging.info('Connection established')\n logging.info('')\n\n def loop(self):\n f = open(\"GPS_ON_SCREEN_OFF.txt\", \"a+\")\n count = 0\n while count < 5:\n count += 1\n start_time = time.time()\n cur_time = 0\n print(\"Running test #\" + str(count))\n f.write(\"Test #\" + str(count) + \" | Time: 600 seconds | GPS: on | Screen: off\\n\")\n while cur_time < 600:\n try:\n self.dev.send(b'\\xf0')\n if self.args.json:\n # Write to file what we read from UM24C\n f.write(self.print_json(rdum.Response(self.dev.recv(), collection_time=datetime.datetime.now(), device_type=self.args.device_type,)))\n cur_time = time.time() - start_time\n else:\n f.write(self.print_human(rdum.Response(self.dev.recv(), collection_time=datetime.datetime.now(), device_type=self.args.device_type,)))\n except KeyboardInterrupt:\n raise\n except Exception:\n if self.args.watch is None:\n raise\n else:\n logging.exception('An exception has occurred')\n if self.args.watch is not None:\n if not self.args.json:\n print()\n if self.args.watch > 0:\n time.sleep(self.args.watch)\n else:\n return\n\n def main(self):\n self.args = parse_args()\n self.setup_logging()\n\n # logging.info('rdumtool {}'.format(__version__))\n # logging.info('Copyright (C) 2019 Ryan Finnie')\n # logging.info('')\n\n self.setup_device()\n try:\n # self.send_commands()\n self.loop()\n\n except KeyboardInterrupt:\n pass\n self.dev.close()\n\n\ndef main():\n return(RDUMTool().main())\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"rdumtool/rdum/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":12782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"143634596","text":"import unittest\nfrom math import * \nfrom cities import * \n\nclass TestTravelingSalesman(unittest.TestCase):\n \n # compute_total_distance also inherently tests compute_distance\n def test_compute_distance(self):\n lat1 = 32.361538\n lat2 = 58.301935\n lon1 = -86.279118\n lon2 = -134.41974 \n \n test_set = [('Alabama', 'Montgomery', lat1, lon1),\n ('Alaska', 'Juneau', lat2, lon2)]\n self.assertAlmostEqual(sqrt((lat1 - lat2)**2 + (lon1-lon2)**2)*2, compute_total_distance(test_set), 5) # Should equal the single distance multipled by two\n \n def test_compute_total_distance(self):\n lat1 = 32.361538\n lat2 = 58.301935\n lat3 = 33.448457\n lon1 = -86.279118\n lon2 = -134.41974 \n lon3 = -112.073844\n \n test_set = [('Alabama', 'Montgomery', lat1, lon1),\n ('Alaska', 'Juneau', lat2, lon2),\n ('Arizona', 'Phoenix', lat3, lon3)]\n \n answer = compute_distance(lat1, lon1, lat2, lon2) + compute_distance(lat2, lon2, lat3, lon3) + compute_distance(lat3, lon3, lat1, lon1)\n self.assertAlmostEqual(answer, compute_total_distance(test_set), 5) # Should equal the single distance multipled by two\n \n \n # swap_cities also inherently tests shuffle_cities, which is difficult to test due to its random output\n def test_swap_cities(self):\n \n # First test: switch 1 and 3\n test_set = [('Alabama', 'Montgomery', 32.361538, -86.279118),\n ('Alaska', 'Juneau', 58.301935, -134.41974),\n ('Arizona', 'Phoenix', 33.448457, -112.073844),\n ('Arkansas', 'Little Rock', 34.736009, -92.331122)]\n answer = [('Alabama', 'Montgomery', 32.361538, -86.279118),\n ('Arkansas', 'Little Rock', 34.736009, -92.331122),\n ('Arizona', 'Phoenix', 33.448457, -112.073844),\n ('Alaska', 'Juneau', 58.301935, -134.41974)]\n output = swap_cities(test_set, 1, 3)\n self.assertEqual(answer, output[0]) # check that we match the answer\n self.assertAlmostEqual(compute_total_distance(answer), output[1], 5) # check that our mileage output makes sense\n \n # Second test: switch 0 and 3\n test_set = [('Alabama', 'Montgomery', 32.361538, -86.279118),\n ('Alaska', 'Juneau', 58.301935, -134.41974),\n ('Arizona', 'Phoenix', 33.448457, -112.073844),\n ('Arkansas', 'Little Rock', 34.736009, -92.331122)]\n answer = [('Arkansas', 'Little Rock', 34.736009, -92.331122),\n ('Alaska', 'Juneau', 58.301935, -134.41974),\n ('Arizona', 'Phoenix', 33.448457, -112.073844),\n ('Alabama', 'Montgomery', 32.361538, -86.279118)]\n output = swap_cities(test_set, 0, 3)\n self.assertEqual(answer, output[0]) # check that we match the answer\n self.assertAlmostEqual(compute_total_distance(answer), output[1], 5) # check that our mileage output makes sense\n\n def test_find_best_cycle(self):\n \n # Clearly the set below is unoptimized; we'd expect answer to be the optimized set\n test_set = [('Alabama', 'Montgomery', 32.361538, -86.279118),\n ('Alaska', 'Juneau', 58.301935, -134.41974),\n ('Arizona', 'Phoenix', 33.448457, -112.073844),\n ('Arkansas', 'Little Rock', 34.736009, -92.331122)]\n \n # Sorts the answers so, intuitively, the salesman's path will be one continuous circle\n answer = [('Alaska', 'Juneau', 58.301935, -134.41974),\n ('Arizona', 'Phoenix', 33.448457, -112.073844),\n ('Alabama', 'Montgomery', 32.361538, -86.279118),\n ('Arkansas', 'Little Rock', 34.736009, -92.331122)]\n\n best_map = find_best_cycle(test_set)\n \n # First confirm, that the output road_map is what we'd expect\n self.assertAlmostEqual(compute_total_distance(best_map[0]), compute_total_distance(answer), 10)\n \n # Confirm that all entries exist\n self.assertEqual(set(test_set), set(best_map[0]))\n\nunittest.main()\n","sub_path":"traveling_salesman/cities_test.py","file_name":"cities_test.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"102114626","text":"# coding=utf-8\n# Copyright (c) 2016 - Dmall Shanghai-Tech \n\nimport logging\nimport random\nfrom dubbo import Dubbo\nfrom dubbo.enhancetypes import Long\nfrom dubbo.registry import ZookeeperRegistry\nfrom django.conf import settings\n\nlogger_api = logging.getLogger(settings.PUSH_LOG)\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\nclass DubboClient():\n \"\"\"\n Python Library for interacting with dubbo center API\n \"\"\"\n\n def __init__(self):\n registry = ZookeeperRegistry(settings.ZOOKEEPER_HOST)\n self.providers = registry.get_providers(settings.SERVICE_PATH)\n\n if len(self.providers) > 0:\n dubbo_ip, dubbo_port = random.choice(self.providers).split(':')\n dubbo_port = int(dubbo_port)\n config = {'classpath': settings.DUBBO_JAR_PATH}\n client = Dubbo(((dubbo_ip, dubbo_port),), config)\n self.dubbo_service = client.getProxy(settings.SERVICE_PATH)\n\n def get_spec_rule_by_skuid(self, sku_id):\n \"\"\"\n\n return:\n List\n \"\"\"\n\n result = None\n\n try:\n result = self.dubbo_service.getSpecRuleBySkuId(Long(sku_id))\n except Exception as e:\n logger_api.error(str(e))\n logger_api.info('dubbo, .')\n return None\n\n if result is None or result.model is None:\n logger_api.error('dubbo, False.')\n return None\n else:\n logger_api.info(result)\n result_data = {\n 'skuId': result.model.skuId,\n 'refSkuId': result.model.refSkuId,\n 'specNum': result.model.specNum,\n }\n return result_data\n\nif __name__ == \"__main__\":\n dubbo_client = DubboClient()\n skuid_spec_data = \\\n dubbo_client.get_spec_rule_by_skuids([11111111111, 22222222222])\n","sub_path":"erp_order.git/clients/python_dubbo_client/dubbo_client.py","file_name":"dubbo_client.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"285767998","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport mut.thermo\nimport mut.bayes\nimport mut.stats\nimport tqdm\nconstants = mut.thermo.load_constants()\n\n# Load the prior predictive check data. \nprior_data = pd.read_csv('../../data/Chure2019_DNA_prior_predictive_checks.csv')\n\n# Load the stan model. \nmodel = mut.bayes.StanModel('../stan/Chure2019_DNA_binding_energy.stan')\n\n# Set up a dataframe to store the properties.\nsamples_dfs = []\nsbc_dfs = []\n\n# Definie the thinning constant for computing the rank statistic. \nthin = 5\n\n# Iterate through each simulation\nfor g, d in tqdm.tqdm(prior_data.groupby('sim_idx')):\n \n # Determine the ground truth for each parameter.\n gt = {'ep_RA': d['ep_RA'].unique(),\n 'sigma': d['sigma'].unique()}\n\n # Generate the data dictionary. \n data_dict = {'J':1,\n 'N': len(d),\n 'idx': np.ones(len(d)).astype(int),\n 'R': np.ones(len(d)) * constants['RBS1027'],\n 'Nns': 4.6E6,\n 'ep_ai': constants['ep_AI'],\n 'n_sites': constants['n_sites'],\n 'Ka': constants['Ka'],\n 'Ki': constants['Ki'],\n 'c': d['IPTGuM'],\n 'fc': d['fc_draw']}\n \n # Sample the model\n _, samples = model.sample(data_dict=data_dict) \n samples.rename(columns={'ep_RA[1]': 'ep_RA', 'sigma[1]':'sigma'},\n inplace=True)\n samples['sim_idx'] = g\n samples_dfs.append(samples)\n \n # Compute the properties for each parameter. \n _sbc_dfs = []\n for p in ['ep_RA', 'sigma']:\n _df = pd.DataFrame([])\n z_score = (np.mean(samples[p]) - gt[p]) / np.std(samples[p])\n shrinkage = 1 - (np.var(samples[p]) / np.var(prior_data[p].unique()))\n _df['z_score'] = z_score\n _df['shrinkage'] = shrinkage\n _df['param'] = p \n _df['rank'] = np.sum(samples[p].values[::thin] < gt[p])\n _df['rank_ndraws'] = len(samples[p].values[::thin])\n _df['post_median'] = np.median(samples[p])\n _df['post_mean'] = np.mean(samples[p])\n _df['post_mode'] = samples.iloc[np.argmax(samples['lp__'].values)][p]\n _df['ground_truth'] = gt[p]\n _sbc_dfs.append(_df)\n \n _sbc_dfs = pd.concat(_sbc_dfs)\n _sbc_dfs['sim_idx'] = g\n sbc_dfs.append(_sbc_dfs) \nsbc_df = pd.concat(sbc_dfs) \n \nsbc_df.to_csv('../../data/Chure2019_DNA_sbc_statistics.csv', index=False)\n","sub_path":"code/analysis/Chure2019_DNA_sbc_samples.py","file_name":"Chure2019_DNA_sbc_samples.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"451510830","text":"from django.db import models\nfrom modelcluster.contrib.taggit import ClusterTaggableManager\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom taggit.models import TaggedItemBase\nfrom wagtail.admin.edit_handlers import (FieldPanel, InlinePanel,\n MultiFieldPanel, PageChooserPanel)\nfrom wagtail.core.models import Orderable, Page\nfrom wagtail.search import index\nfrom wagtail.snippets.models import register_snippet\n\n\nclass PersonPage(Page):\n first_name = models.CharField(\n max_length=255,\n verbose_name='First Name',\n )\n last_name = models.CharField(\n max_length=255,\n verbose_name='Last Name',\n )\n\n content_panels = Page.content_panels + [\n MultiFieldPanel([\n FieldPanel('first_name'),\n FieldPanel('last_name'),\n ], 'Person'),\n InlinePanel('addresses', label='Address'),\n ]\n\n class Meta:\n verbose_name = 'Person'\n verbose_name_plural = 'Persons'\n\n\n@register_snippet\nclass Address(index.Indexed, ClusterableModel, Orderable):\n address = models.CharField(\n max_length=255,\n verbose_name='Address',\n )\n tags = ClusterTaggableManager(\n through='home.AddressTag',\n blank=True,\n )\n person = ParentalKey(\n to='home.PersonPage',\n related_name='addresses',\n verbose_name='Person'\n )\n\n panels = [\n PageChooserPanel('person'),\n FieldPanel('address'),\n FieldPanel('tags'),\n ]\n\n class Meta:\n verbose_name = 'Address'\n verbose_name_plural = 'Address'\n\n\nclass AddressTag(TaggedItemBase):\n content_object = ParentalKey(\n to='home.Address',\n on_delete=models.CASCADE,\n related_name='tagged_items'\n )\n","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"469025000","text":"# 총 n명이 있고 내 차례가 p번째일 때 매번 내 순서 구하기\n\ncnt = 1 # 순서를 나타냄 계속 1씩 증가\nround = 1 # 모든 인원이 돌때마다 1씩 증가\ndest = 10 # 10번째 까지 돈다고 가정\nwhile cnt <= dest:\n\tif (round-1)*n+p == cnt:\n\t\tprint('my turn')\n\n\tif cnt%n == 0:\n\t\tround += 1\n\tcnt += 1 # round를 증가시키고 cnt를 증가시켜야 오류X\n","sub_path":"순서체크/check_turn.py","file_name":"check_turn.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"104824249","text":"import simpy\nimport numpy as np\n\n\n## one to one communication\ndef processA(env, pipe_out):\n ## processA sending msg to processB\n for i in range(0, 10):\n p_name = \"PA{:2d}\".format(i)\n yield env.timeout(3)\n print(\"{:8.2f}: {} executed a1\".format(env.now, p_name))\n yield env.timeout(5)\n print(\"{:8.2f}: {} executed a2\".format(env.now, p_name))\n ## 상황에 따라서 메세지를 보냄\n if np.random.normal(0, 1) < 0.5:\n pipe_out.put(\"msg1\")\n else:\n pipe_out.put(\"msg2\")\n yield env.timeout(5)\n print(\"{:8.2f}: {} executed a3\".format(env.now, p_name))\n print(\"{:8.2f}: {} completed\".format(env.now, p_name))\n\n\ndef processB(env, pipe_in):\n for i in range(0, 10):\n p_name = \"PB{:2d}\".format(i)\n yield env.timeout(3)\n print(\"{:8.2f}: {} executed a1\".format(env.now, p_name))\n print(\"{:8.2f}: {} is waiting for msg\".format(env.now, p_name))\n ## 메세지를 받음.\n msg = yield pipe_in.get()\n ## 받은 메세지에 따라서 다른 행동을 취함\n if msg == 'msg1':\n print(\"{:8.2f}: {} get {}\".format(env.now, p_name, msg))\n yield env.timeout(5)\n print(\"{:8.2f}: {} executed exc_a1\".format(env.now, p_name))\n elif msg == 'msg2':\n print(\"{:8.2f}: {} get {}\".format(env.now, p_name, msg))\n yield env.timeout(5)\n print(\"{:8.2f}: {} executed exc_a2\".format(env.now, p_name))\n print(\"{:8.2f}: {} completed\".format(env.now, p_name))\n\n\nnp.random.seed(42)\nenv = simpy.Environment()\npipe = simpy.Store(env)\nenv.process(processA(env, pipe_out=pipe))\nenv.process(processB(env, pipe_in=pipe))\n\nenv.run(until=50)","sub_path":"example/process_communication.py","file_name":"process_communication.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"487724970","text":"from globals import Globals\nimport time\nfrom forex_moving_average_functions import get_instrument_list\nfrom forex_moving_average_functions import create_moving_average_tick\nfrom forex_moving_average_functions import save_moving_average_tick\nfrom forex_moving_average_functions import send_email\n\n# Initialize variables.\naccount_id = Globals.account\ntoken = Globals.token\nsubject = 'Forex Moving Average Tick Worker'\nbody = 'All ticks have been saved.\\n'\nbear = 0\nbull = 0\ntotal = 0\nmax_atr = -1\natr_mult_sum = 0\n\n# Save the list of instruments.\ninstruments = get_instrument_list( account_id,token)\n\n\n# Loop through instruments.\nfor i in range(0,len(instruments)):\n\t# Create the Moving_Average_Tick object.\n\tma_tick = create_moving_average_tick(instruments[i],100,50,'D',token)\n\tatr = ma_tick.atr\n\n\t# Refresh the max_atr\n\tif atr > max_atr:\n\t\tmax_atr = atr\n\n\t# Compate the atr to the max_atr to get the atr multiplier.\n\tatr_mult = max_atr/ ma_tick.atr\n\n\t# Only add atr_multiplier to the total if < 5.\n\t#if atr_mult < :\n\tatr_mult_sum += atr_mult\n\ttime.sleep(0.5)\n\n# Loop through instruments.\nfor i in range(0,len(instruments)):\n\n\t# Create the Moving_Average_Tick object.\n\tma_tick = create_moving_average_tick(instruments[i],100,50,'D',token)\n\n\t# Write the header on first iteration.\n\tif total == 0:\n\t\tbody += ma_tick.timestamp + '\\n\\n'\n\n\t# Calculate the ATR multiplier %.\n\tatr_mult = max_atr/ ma_tick.atr\n\t\n\t# Calculate the normalized ATR multiplier %.\n\t#if atr_mult < 5:\n\torder_percent = round(atr_mult/atr_mult_sum,4)\n\t#else:\n\t#\torder_percent = 0\n\n\t# Save the Moving_Average_Tick to DynamoDB.\n\tsave_moving_average_tick(ma_tick.pair, ma_tick.timestamp, ma_tick.moving_average_1, ma_tick.moving_average_2, ma_tick.close, ma_tick.sentiment, ma_tick.high_low,ma_tick.atr_dollar, ma_tick.atr, order_percent)\n\n\t# Count bearish or bullish.\n\tif ma_tick.sentiment == 'BEAR':\n\t\tbear += 1\n\t\ttotal += 1\n\n\telse:\n\t\tbull += 1\n\t\ttotal += 1\n\n\t# Append data to email body.\n\tbody += ma_tick.pair + '\\n----------------\\nSentiment: ' + ma_tick.sentiment + '\\nClose: ' + str(ma_tick.close) + '\\nMA100: ' + str(ma_tick.moving_average_1) + '\\nMA50: ' + str(ma_tick.moving_average_2) + '\\nHigh/Low: ' + ma_tick.high_low + '\\n\\n'\n\n\t# Sleep the loop for 1 second.\n\ttime.sleep(0.5)\n\n# Append totals to email body.\nbody += 'TOTALS\\n----------------\\n\\n' + 'BEAR: ' + str(bear) + '\\nBULL: ' + str(bull) + '\\nTOTAL: ' + str(total)\n\n# Send email.\nsend_email('bennett.e.siegel@gmail.com', subject, body)\n\n","sub_path":"forex_moving_average_tick_worker.py","file_name":"forex_moving_average_tick_worker.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"417729733","text":"import torch, torchvision\nimport numpy as np\nimport argparse\nfrom models.data_utils import TestRawDatasetFromFolder, display_transform, create_new_lr_image\nfrom torch.utils.data import DataLoader\nfrom models.model_gen import SRGANGenerator\nfrom models.model_discriminator import Discriminator\nimport os\nfrom tqdm import tqdm\nimport pytorch_ssim\nimport math\n\nclass Convert:\n\tdef __init__(self):\n\t\tself.model_path = None\n\t\t# list\n\t\tself.test_images = None\n\t\tself.in_path = None\n\t\tself.out_path = None\n\t\tself.model_path = None\n\n\t\tself.out_file = None\n\t\tself.out = None\n\n\t@staticmethod\n\tdef parse_args(args):\n\t\tparser = argparse.ArgumentParser()\n\n\t\tdata_args = parser.add_argument_group('Dataset options')\n\t\tdata_args.add_argument('--in_path', default='./test_raw_in_images')\n\t\tdata_args.add_argument('--out_path', default='./test_raw_out_images')\n\n\t\t# neural network options\n\t\tnn_args = parser.add_argument_group('Network options')\n\t\tnn_args.add_argument('--upscale_factor', default=2, type=int, choices=[2, 4, 8],\n\t\t help='super resolution upscale factor')\n\t\tnn_args.add_argument('--model_dir', type=str, default='saved_models', help='saved model dir')\n\t\tnn_args.add_argument('--in_channels', type=int, default=3)\n\n\t\t# training options\n\t\ttraining_args = parser.add_argument_group('Training options')\n\t\ttraining_args.add_argument('--n_save', type=int, default=10, help='number of batches to save')\n\n\t\treturn parser.parse_args(args)\n\n\tdef construct_dir(self):\n\t\t# self.in_path = os.path.join(self.args.in_path, 'up_%d' % self.args.upscale_factor)\n\t\tself.in_path = self.args.in_path\n\t\tself.out_path = os.path.join(self.args.out_path, 'up_%d' % self.args.upscale_factor)\n\t\tself.model_path = os.path.join(self.args.model_dir, '%d.pth' % self.args.upscale_factor)\n\t\tself.out_file = os.path.join(self.out_path, 'result.txt')\n\n\t\tif not os.path.exists(self.out_path):\n\t\t\tos.makedirs(self.out_path)\n\n\tdef construct_data(self):\n\t\tself.test_set = TestRawDatasetFromFolder(self.in_path,\n\t\t upscale_factor=self.args.upscale_factor)\n\n\t\tself.test_loader = DataLoader(dataset=self.test_set, num_workers=1, batch_size=1, shuffle=False)\n\n\tdef main(self, args=None):\n\t\tself.args = self.parse_args(args=args)\n\t\tself.generator = SRGANGenerator(args=self.args)\n\t\tself.discriminator = Discriminator(args=self.args)\n\t\tself.construct_dir()\n\t\tif torch.cuda.is_available():\n\t\t\tself.generator.cuda()\n\t\t\tself.discriminator.cuda()\n\n\t\tif torch.cuda.is_available():\n\t\t\t(generate_state_dict, discriminator_state_dict) = torch.load(self.model_path, map_location='gpu')\n\t\telse:\n\t\t\t(generate_state_dict, discriminator_state_dict) = torch.load(self.model_path, map_location='cpu')\n\n\t\tself.generator.load_state_dict(generate_state_dict)\n\t\tself.discriminator.load_state_dict(discriminator_state_dict)\n\n\t\tself.construct_data()\n\n\t\twith open(self.out_file, 'w') as self.out:\n\t\t\tself.test_loop()\n\n\tdef test_loop(self):\n\t\t# put to GPU\n\n\t\t# mse, ssim, and psnr are not available at current settings\n\t\ttest_results = {'D_G_z':0, 'n_samples': 0}\n\n\t\tnaive_results = {'D_G_z': 0, 'n_samples': 0}\n\n\t\twith torch.no_grad():\n\t\t\tself.generator.eval()\n\t\t\tself.discriminator.eval()\n\t\t\ttest_images = []\n\t\t\tfor idx, (lr_image, naive_hr_image) in enumerate(tqdm(self.test_loader)):\n\t\t\t\tif idx >= self.args.n_save:\n\t\t\t\t\tbreak\n\t\t\t\tcur_batch_size = lr_image.size(0)\n\t\t\t\ttest_results['n_samples'] += cur_batch_size\n\n\t\t\t\tif torch.cuda.is_available():\n\t\t\t\t\tlr_image = lr_image.cuda()\n\t\t\t\t\tnaive_hr_image = naive_hr_image.cuda()\n\n\t\t\t\tsr_image = self.generator(lr_image)\n\t\t\t\tsr_probs, log_sr_probs = self.discriminator(sr_image)\n\t\t\t\ttest_results['D_G_z'] += sr_probs.data.cpu().sum()\n\n\t\t\t\tnaive_sr_probs, naive_log_sr_probs = self.discriminator(naive_hr_image)\n\t\t\t\tnaive_results['D_G_z'] += naive_sr_probs.data.cpu().sum()\n\n\t\t\t\tlr_image = create_new_lr_image(lr_image, sr_image)\n\t\t\t\tfor image_idx in range(cur_batch_size):\n\t\t\t\t\ttest_images.extend(\n\t\t\t\t\t\t[display_transform()(lr_image[image_idx].data.cpu()),\n\t\t\t\t\t\t display_transform()(naive_hr_image[image_idx].data.cpu()),\n\t\t\t\t\t\t display_transform()(sr_image[image_idx].data.cpu())])\n\n\t\t\t\tif idx == 10:\n\t\t\t\t\tbreak\n\n\t\t\ttest_results['D_G_z'] /= test_results['n_samples']\n\t\t\tnaive_results['D_G_z'] /= test_results['n_samples']\n\n\t\t\t# write to out file\n\t\t\tresult_line = '\\tTest\\n'\n\t\t\tfor k, v in test_results.items():\n\t\t\t\tresult_line += '{} = {}, '.format(k, v)\n\n\t\t\tresult_line += '\\n'\n\t\t\tfor k, v in naive_results.items():\n\t\t\t\tresult_line += 'naive_{} = {} '.format(k, v)\n\t\t\tprint(result_line)\n\t\t\tself.out.write(result_line+'\\n')\n\t\t\tself.save_image(test_images)\n\n\tdef save_image(self, images, epoch):\n\t\t# number of out images, 4*5 is number of sub-images in an output image\n\t\tn_out_images = int(len(images) / 4)\n\n\t\tcur_out_image_dir = os.path.join(self.out_path, 'epoch_%d' % epoch)\n\n\t\tif not os.path.exists(cur_out_image_dir):\n\t\t\tos.makedirs(cur_out_image_dir)\n\n\t\tfor idx in tqdm(range(n_out_images), desc='saving validating image'):\n\t\t\timage = torch.stack(images[idx*(4):(idx+1)*(4)])\n\t\t\tif image.size()[0] < 1:\n\t\t\t\tbreak\n\t\t\timage = torchvision.utils.make_grid(image, nrow=4, padding=10)\n\t\t\tsave_path = os.path.join(cur_out_image_dir, 'index_%d.jpg' % idx)\n\t\t\ttorchvision.utils.save_image(image, save_path, padding=5)\n\n\nif __name__ == '__main__':\n\ttest = Convert()\n\ttest.main()","sub_path":"convert_raw_images.py","file_name":"convert_raw_images.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"503178084","text":"import cv2\nfrom keras.engine.saving import load_model\nimport sudoku_finder as s\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nIMAGE_SIZE = 256\nmodel = load_model(\"sudoku_classification_model.h5\")\ncap = cv2.VideoCapture(0)\n\nif not (cap.isOpened()):\n print(\"Camera could not be open !!!\")\n\nwhile True:\n ret, frame = cap.read()\n\n cv2.imshow(\"frame\", frame)\n img = frame\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))\n img = np.array(img).reshape(-1,IMAGE_SIZE, IMAGE_SIZE, 1)\n p = model.predict([[img]])\n\n if s.detected(frame) and p[0][0] < p[0][1]:\n print(\"detected\")\n img = s.sudoku_finder(frame)\n plt.imshow(img)\n plt.show()\n # cap.release()\n continue\n\n if cv2.waitKey(1) and 0xFF == ord('q'):\n cap.release()\n quit()\n cv2.destroyAllWindows()\n break\n","sub_path":"Server/VideoCapturing.py","file_name":"VideoCapturing.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"303443530","text":"from flask import Blueprint, current_app, abort, request, jsonify\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom .. import db\nfrom ..models import User, Resume\nfrom ..providers import ProviderError\nfrom ..utils import validation_required\n\n\nmodule = Blueprint('resume', __name__)\n\n\n@module.route('/resume', methods=['GET'])\n@jwt_required\ndef resume():\n \"\"\"\n User's resume list\n\n .. :quickref: protected; Retrieve user's resume list directly from provider\n\n **Request**:\n\n .. sourcecode:: http\n\n GET /refresh HTTP/1.1\n Authorization: JWT q1w2.e3r4.t5y\n\n **Response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Content-Type: application/json\n\n [\n {\n \"enabled\": true,\n \"link\": \"https://\",\n \"name\": \"John Doe\",\n \"published\": \"2018-08-19T17:41:52+0300\",\n \"title\": \"Proctologist-jeweler\",\n \"uniq\": \"erwhy2333r23rd2r32er23\"\n }\n ]\n\n :reqheader Authorization: valid JWT token\n\n :statuscode 200: OK\n :statuscode 401: auth errors\n :statuscode 500: unexpected errors\n :statuscode 503: provider errors\n \"\"\"\n try:\n user_id = get_jwt_identity()\n user = User.query.get(user_id)\n\n provider = current_app.providers[user.provider]\n resumes = provider.fetch(user.access)\n\n for i in resumes:\n resume = Resume.query.filter_by(uniq=i['uniq'], owner=user).first()\n if not resume:\n resume = Resume(uniq=i['uniq'], enabled=False, owner=user)\n current_app.logger.info(f'Resume created: {resume}')\n db.session.add(resume)\n\n i['enabled'] = resume.enabled\n\n db.session.commit()\n\n except ProviderError as e:\n current_app.logger.error(f'Resume error: {e}')\n return abort(503, 'Provider error')\n\n except SQLAlchemyError as e:\n current_app.logger.error(f'{type(e).__name__}: {e}', exc_info=1)\n return abort(500, 'Database error')\n\n else:\n return jsonify(resumes)\n\n\n@module.route('/resume', methods=['POST'])\n@jwt_required\n@validation_required({'uniq': {'type': 'string', 'required': True}})\ndef resume_toggle():\n \"\"\"\n Enable/disable automatically publish user's resume\n\n .. :quickref: protected; Toggle automatically publish user's resume\n\n **Request**:\n\n .. sourcecode:: http\n\n POST /resume HTTP/1.1\n Content-Type: application/json\n\n {\n \"uniq\": \"q1w2e3r4t5y6\"\n }\n\n **Response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Content-Type: application/json\n\n {\n \"enabled\": true\n }\n\n :reqjson string uniq: provider's resume id\n\n :statuscode 200: OK\n :statuscode 400: invalid JSON in request's body\n :statuscode 401: auth errors\n :statuscode 500: unexpected errors\n \"\"\"\n try:\n user_id = get_jwt_identity()\n user = User.query.get(user_id)\n uniq = request.get_json()['uniq']\n resume = Resume.query.filter_by(uniq=uniq, owner=user).first()\n\n if not resume:\n return abort(404, 'Resume not found')\n\n resume.enabled = not resume.enabled\n\n db.session.add(resume)\n db.session.commit()\n\n except SQLAlchemyError as e:\n current_app.logger.error(f'{type(e).__name__}: {e}', exc_info=1)\n return abort(500, 'Database error')\n\n else:\n current_app.logger.info(f'Resume toggled: {resume}')\n return jsonify(enabled=resume.enabled)\n","sub_path":"app/controllers/resume.py","file_name":"resume.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"558233387","text":"# Template of using quick sort to select the kth largest/smallest element in a unsorted list\n\n# Example - https://leetcode.com/problems/kth-largest-element-in-an-array/\n\n# Basically finding the kth smallest number\n# Numbers are ordered from small to large\n#\n#\ndef quick_select(nums, k, start, end):\n if start >= end:\n return nums[start]\n\n left, right = start, end\n mid = (start + end) // 2\n pivot = nums[mid]\n\n while left <= right:\n while left <= right and nums[left] < pivot:\n left += 1\n while left <= right and nums[right] > pivot:\n right -= 1\n\n if left <= right:\n nums[left], nums[right] = nums[right], nums[left]\n left += 1\n right -= 1\n\n if k <= right:\n return quick_select(nums, k, start, right)\n if k >= left:\n return quick_select(nums, k, left, end)\n\n return nums[k]\n\n\n#test = [7, 6, 5, 4, 3, 2, 1, 0]\ntest = [9, 1, 7, 3, 5, 4, 6, 2, 8, 0]\nprint(quick_select(test, 5 - 1, 0, len(test) - 1))\n","sub_path":"algorithm/quick_select.py","file_name":"quick_select.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"327226020","text":"# 執行緒實驗\n#\n# Purpose: 利用threading.Condition實作producer and consumer\n#\n# Result:\n#\n\nimport threading\n\n\ndef producer(clerk):\n for product in range(10):\n clerk.purchase(product)\n print('店員進貨({})'.format(product))\n\n\ndef consumer(clerk):\n for product in range(10):\n print('店員賣出{}'.format(clerk.sellout()))\n\n\n# 店員類別\nclass Clerk:\n def __init__(self):\n # -1 表示沒有持有產品\n self.product = -1\n # 每一個資源(店員)有一個conditional lock\n self.cond = threading.Condition()\n\n def purchase(self, product):\n with self.cond:\n # 若條件不成立, 在conditional lock 上 wait, 直到條件成立跳出迴圈\n while self.product != -1:\n self.cond.wait()\n self.product = product\n self.cond.notify()\n\n def sellout(self):\n with self.cond:\n # 若條件不成立, 在conditional lock 上 wait, 直到條件成立跳出迴圈\n while self.product == -1:\n self.cond.wait()\n p = self.product\n self.product = -1\n self.cond.notify()\n return p\n\n\nclerk = Clerk()\nt1 = threading.Thread(target=producer, args=(clerk,))\nt2 = threading.Thread(target=consumer, args=(clerk,))\nt1.start()\nt2.start()\nt1.join()\nt2.join()\n","sub_path":"ch13_concurrency_and_multiprocessing/condition_demo.py","file_name":"condition_demo.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"371783390","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom .models import Student\nfrom .forms import StudentRegistrationForm\n\n\ndef student_list(request):\n context = {\n \"students\": Student.objects.all()\n }\n return render(request, \"students/student_list.html\", context)\n\n\ndef createNewStudent(request):\n if request.method == 'GET':\n form = StudentRegistrationForm()\n return render(request, 'students/student_form.html', {\"form\": form})\n else:\n form = StudentRegistrationForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n else:\n messages.error(\n request, 'عملیات ناموفقانه بود لطفا دوباره کوشش نمایید ')\n return HttpResponseRedirect(reverse('student-list'))\n messages.success(request, \" محصیل جدید موفقانه افزوده شد!\")\n return HttpResponseRedirect(reverse('student-create'))\n\n\ndef update_student(request, pk=0):\n if request.method == \"GET\":\n if pk == 0:\n form = StudentRegistrationForm()\n else:\n student = Student.objects.get(pk=pk)\n form = StudentRegistrationForm(instance=student)\n return render(request, \"students/student_detail.html\", {\"form\": form})\n else:\n if pk == 0:\n form = StudentRegistrationForm(request.POST, request.FIELS)\n else:\n student = Student.objects.get(pk=pk)\n form = StudentRegistrationForm(\n request.POST, request.FILES, instance=student)\n if form.is_valid():\n form.save()\n messages.success(request, \"تغییرات موفقانه بود\")\n return HttpResponseRedirect(reverse(f'student-detail/{pk}'))\n","sub_path":"students/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"123037219","text":"\"\"\"\nMIT License\n\nCopyright (c) 2020 Airbyte\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nfrom functools import partial\nfrom json import JSONDecodeError\nfrom typing import Mapping, Tuple\n\nimport requests\nfrom base_python import BaseClient\nfrom requests.exceptions import ConnectionError\n\n\nclass Client(BaseClient):\n \"\"\"\n Tempo API Reference: https://tempo-io.github.io/tempo-api-docs/\n \"\"\"\n\n API_VERSION = 3\n DEFAULT_ITEMS_PER_PAGE = 100\n\n PARAMS = {\"limit\": DEFAULT_ITEMS_PER_PAGE, \"offset\": 0}\n ENTITIES_MAP = {\n \"accounts\": {\"url\": \"/accounts\", \"func\": lambda v: v[\"results\"], \"params\": PARAMS},\n \"customers\": {\"url\": \"/customers\", \"func\": lambda v: v[\"results\"], \"params\": PARAMS},\n \"worklogs\": {\"url\": \"/worklogs\", \"func\": lambda v: v[\"results\"], \"params\": PARAMS},\n \"workload-schemes\": {\"url\": \"/workload-schemes\", \"func\": lambda v: v[\"results\"], \"params\": PARAMS},\n }\n\n def __init__(self, api_token):\n self.headers = {\"Authorization\": \"Bearer \" + api_token}\n self.base_api_url = f\"https://api.tempo.io/core/{self.API_VERSION}\"\n super().__init__()\n\n def lists(self, name, url, params, func, **kwargs):\n while True:\n response = requests.get(f\"{self.base_api_url}{url}?limit={params['limit']}&offset={params['offset']}\", headers=self.headers)\n data = func(response.json())\n yield from data\n if len(data) < self.DEFAULT_ITEMS_PER_PAGE:\n break\n params[\"offset\"] += self.DEFAULT_ITEMS_PER_PAGE\n\n def _enumerate_methods(self) -> Mapping[str, callable]:\n return {entity: partial(self.lists, name=entity, **value) for entity, value in self.ENTITIES_MAP.items()}\n\n def health_check(self) -> Tuple[bool, str]:\n alive = True\n error_msg = None\n # must be implemented later\n\n try:\n next(self.lists(name=\"workload-schemes\", **self.ENTITIES_MAP[\"workload-schemes\"]))\n\n except ConnectionError as error:\n alive, error_msg = False, str(error)\n # If the input domain is incorrect or doesn't exist, then the response would be empty, resulting in a JSONDecodeError\n except JSONDecodeError:\n alive, error_msg = (\n False,\n \"Unable to connect to the Tempo API with the provided credentials. Please make sure the input credentials and environment are correct.\",\n )\n\n return alive, error_msg\n","sub_path":"airbyte-integrations/connectors/source-tempo/source_tempo/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"242298835","text":"\"\"\"\n Zoom, shift, mirror and adjust brightness of images.\n Used for increasing the size of our data set\n\"\"\"\nimport os\nimport random\nimport cv2\nimport numpy as np\n\n\nIMG_IDX = 1\nSRC_DIR = \"../dataset/dataset_images/\"\nDST_DIR = \"../dataset/dataset_images/agmntd_dataset/\"\nsrc_files = os.listdir(SRC_DIR)\n\nif not os.path.exists(DST_DIR):\n os.makedirs(DST_DIR)\n\n\ndef fill(f_img, img_h, img_w):\n f_img = cv2.resize(\n f_img,\n (img_h, img_w),\n cv2.INTER_CUBIC)\n return f_img\n\n\ndef horizontal_shift(h_img, ratio=0.0):\n # zoom and shift image along x-axis\n if ratio > 1 or ratio < 0:\n print('Value should be less than 1 and greater than 0')\n return h_img\n\n ratio = random.uniform(-ratio, ratio)\n img_h, img_w = h_img.shape[:2]\n to_shift = img_w*ratio\n if ratio > 0:\n h_img = h_img[:, :int(img_w-to_shift), :]\n if ratio < 0:\n h_img = h_img[:, int(-1*to_shift):, :]\n h_img = fill(h_img, img_h, img_w)\n return h_img\n\n\ndef zoom(z_img, value):\n # zoom in on area of image\n if value > 1 or value < 0:\n return z_img\n value = random.uniform(value, 1)\n img_h, img_w = z_img.shape[:2]\n h_taken = int(value*img_h)\n w_taken = int(value*img_w)\n h_start = random.randint(0, img_h - h_taken)\n w_start = random.randint(0, img_w - w_taken)\n z_img = z_img[h_start:h_start + h_taken, w_start:w_start + w_taken, :]\n z_img = fill(z_img, img_h, img_w)\n return z_img\n\n\ndef brightness(b_img, low, high):\n # adjust image brightness\n value = random.uniform(low, high)\n hsv = cv2.cvtColor(b_img, cv2.COLOR_BGR2HSV)\n hsv = np.array(hsv, dtype=np.float64)\n\n hsv[:, :, 1] = hsv[:, :, 1]*value\n hsv[:, :, 1][hsv[:, :, 1] > 255] = 255\n hsv[:, :, 2] = hsv[:, :, 2]*value\n hsv[:, :, 2][hsv[:, :, 2] > 255] = 255\n\n hsv = np.array(hsv, dtype=np.uint8)\n b_img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n return b_img\n\n\ndef mirroring(m_img, flip_dir):\n \"\"\"\n mirror image around x-axis\n FLIP_HORZ = 1\n FLIP_VERT = 0\n \"\"\"\n m_img = cv2.flip(m_img, flip_dir)\n return m_img\n\n\ndef rotation(r_img):\n # rotate image\n r_img = cv2.rotate(r_img, cv2.cv2.ROTATE_90_CLOCKWISE)\n return r_img\n\n\nfor file in src_files:\n if os.path.isfile(SRC_DIR+file):\n a, b = os.path.splitext(SRC_DIR + file)\n img = cv2.imread(str(a+b), 1)\n img = cv2.imread(str(a+b), 1)\n img = brightness(img, 0.4, 1)\n img = horizontal_shift(img, 0.4)\n img = zoom(img, 0.2)\n img = mirroring(img, 0)\n img = rotation(img)\n cv2.imwrite(DST_DIR + 'aug_misc_'+str(IMG_IDX) + \".png\", img)\n IMG_IDX += 1\nprint(\"Done..\")\n","sub_path":"dataset_utils/augment_dataset.py","file_name":"augment_dataset.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"420132228","text":"import glob\nimport os\nimport numpy as np\nimport scipy.misc\nimport librosa\nfrom PIL import Image\n\nname = ['01_speech', '02_song']\n\nWidth = 650\nHeight = 180\n\ncount = 0.\ngaros = 0.\npasscounter = 0\n\n\nfor fname in name :\n if not os.path.exists('RAVDESS/Mels') :\n os.mkdir('RAVDESS/Mels')\n\n if not os.path.exists('RAVDESS/Mels/%s'%(fname)) :\n os.mkdir('RAVDESS/Mels/%s'%(fname))\n\n folders = np.array([])\n index = 0\n for name in glob.glob('RAVDESS/%s/*'%(fname)) :\n folders = np.append(folders,name)\n a,b = folders[index].split('RAVDESS/%s'%(fname))\n if not os.path.exists(('RAVDESS/Mels/%s'%(fname)+b)) :\n os.mkdir(('RAVDESS/Mels/%s'%(fname)+b))\n index += 1\n\n index = 1\n\n for label in folders :\n for files in glob.glob(str(label + '/*.wav')) :\n a,b = label.split('RAVDESS/%s/'%(fname))\n\n audio_path_num = files\n # audio_path = '/path/to/your/favorite/song.mp3'\n\n c, d = files.split(label)\n audioname,e = d.split('.wav')\n\n y, sr = librosa.load(audio_path_num, sr=48000)\n\n # cutter = len(y) / sr\n #\n # plz = cutter * sr\n # whatthe = y.shape\n #\n # if y.size > cutter * sr:\n # y = y[0:sr]\n\n # Normalize\n librosa.util.normalize(y, norm=1)\n\n # Let's make a spectrogram (freq, power)\n Spec = librosa.amplitude_to_db(abs(librosa.stft(y, n_fft=2048)), ref=np.max)\n\n # Let's make a CQT\n C = librosa.amplitude_to_db(abs(librosa.cqt(y, sr=sr)))\n\n # Let's make and display a mel-scaled power (energy-squared) spectrogram\n S = librosa.feature.melspectrogram(y, sr=sr, n_mels=128)\n\n # Convert to log scale (dB). We'll use the peak power (max) as reference.\n log_S = librosa.power_to_db(S, ref=np.max)\n\n # Next, we'll extract the top 13 Mel-frequency cepstral coefficients (MFCCs)\n mfcc = librosa.feature.mfcc(S=log_S, n_mfcc=13)\n\n # Let's pad on the first and second deltas while we're at it\n delta_mfcc = librosa.feature.delta(mfcc)\n delta2_mfcc = librosa.feature.delta(mfcc, order=2)\n\n if len(Spec[0]) == len(C[0]) == len(S[0]) == len(log_S[0]) == len(mfcc[0]) :\n rawdata_length = len(Spec[0])\n else :\n print('Input Size Different..?')\n quit()\n\n rawdata_height = y.size/rawdata_length\n\n # Raw Data\n rawdata_array = y[0:(rawdata_height) * rawdata_length]\n rawdata_array = np.reshape(rawdata_array, (rawdata_height, rawdata_length))\n\n scipy.misc.imsave('1.raw.png', np.flipud(rawdata_array))\n scipy.misc.imsave('2.spec.png', np.flipud(Spec))\n scipy.misc.imsave('3.cqt.png', np.flipud(C))\n scipy.misc.imsave('4.mel_power_spectrogram.png', np.flipud(log_S))\n scipy.misc.imsave('5.mf.png', np.flipud(mfcc))\n scipy.misc.imsave('6.mf_d.png', np.flipud(delta_mfcc))\n scipy.misc.imsave('7.mf_dd.png', np.flipud(delta2_mfcc))\n\n # Let's Concat\n # raw_img = Image.open('1.raw.png')\n # spec_img = Image.open('2.spec.png')\n # cqt_img = Image.open('3.cqt.png')\n mel_spec_img = Image.open('4.mel_power_spectrogram.png')\n mel_img = Image.open('5.mf.png')\n mel_d_img = Image.open('6.mf_d.png')\n mel_dd_img = Image.open('7.mf_dd.png')\n\n Input_stack = np.vstack((mel_spec_img, mel_img, mel_d_img, mel_d_img, mel_dd_img))\n # zero padd image, PADDING SIZE : 600, 1800\n Input_PAD = np.zeros((Height, Width), dtype=np.float64)\n h = Input_stack.shape[0]\n w = Input_stack.shape[1]\n\n if w > Width :\n print ('file : %s passed -> legnth : %d'%(audioname,w))\n passcounter += 1\n continue\n\n x_put = (Width - w) / 2\n y_put = (Height - h) / 2\n\n if h % 2 == 0 and y_put != 0:\n y_put - 1\n if w % 2 == 0 and x_put != 0:\n x_put - 1\n\n Input_PAD[y_put:y_put + h][:, x_put:x_put + w] = Input_stack\n\n Input = Input_PAD\n\n garos += w\n count += 1\n\n\n # str('{:03}'.format(index))\n filename = 'RAVDESS/Mels/%s/'%(fname)+b+ '/' + audioname + '.png'\n scipy.misc.imsave(filename, Input)\n index += 1\n print ('%s is saved' % (filename))\n\n\nmean = 0.\nmean = garos/count\n\nprint('length mean : %f'%(mean))\nprint('Passnum : %d'%(passcounter))\n\n\n\n","sub_path":"01_dataset_inputter.py","file_name":"01_dataset_inputter.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"20736575","text":"import re\nfrom suds.client import Client\n\n\nclass UnknownCurrencyException(Exception):\n pass\n\n\ndef convert_to_celsius(fahrenheit):\n url = 'http://www.webservicex.net/ConvertTemperature.asmx?WSDL'\n client = Client(url)\n celsius = client.service.ConvertTemp(\n Temperature=fahrenheit,\n FromUnit='degreeFahrenheit', ToUnit='degreeCelsius'\n )\n return celsius\n\n\ndef mean_temperature(filename):\n with open(filename) as f:\n temperatures = [\n int(line.split()[0]) for line in f.readlines()\n ]\n return sum(temperatures) / len(temperatures)\n\n\ndef flight_cost(filename):\n with open(filename) as f:\n for line in f.readlines():\n departure, arrival, cost, currency = \\\n (re.split('[ :-]+', line.strip()))\n yield cost, currency\n\n\ndef convert_to_rubles(cost, currency):\n if currency == 'RUB':\n return float(cost)\n url = 'http://fx.currencysystem.com/webservices/CurrencyServer4.asmx?WSDL'\n client = Client(url)\n available_currencies = client.service.Currencies().split(';')\n if currency not in available_currencies:\n raise UnknownCurrencyException(\n 'название валюты {} указано неверно'.format(currency)\n )\n cost_in_rubles = client.service.ConvertToNum(\n fromCurrency=currency,\n toCurrency='RUB',\n amount=cost,\n rounding=True\n )\n return float(cost_in_rubles)\n\n\ndef collect_distances(filename):\n with open(filename) as f:\n for line in f.readlines():\n departure, arrival, distance, unit = \\\n (re.split('[ :-]+', line.strip()))\n yield float(distance.replace(',', ''))\n\n\ndef convert_to_kilometers(miles):\n url = 'http://www.webservicex.net/length.asmx?WSDL'\n client = Client(url)\n kilometers = client.service.ChangeLengthUnit(\n LengthValue=miles,\n fromLengthUnit='Miles',\n toLengthUnit='Kilometers'\n )\n return kilometers\n\n\n# задача 1\nmean = mean_temperature('temps.txt')\nprint('{:.0f}'.format(convert_to_celsius(mean)))\n\n\n# Задача 2\nflight_cost_in_rubles = sum(\n convert_to_rubles(cost, currency)\n for cost, currency in flight_cost('currencies.txt')\n)\nprint('{:.0f}'.format(flight_cost_in_rubles))\n\n\n# Задача 3\nprint('{:.2f}'.format(\n convert_to_kilometers(\n sum(collect_distances('travel.txt'))\n )\n))\n","sub_path":"dz14.py","file_name":"dz14.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"85140192","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n======================================================================================================\n 杭州HUB仿真项目\n\n 项目启动日期:2017年7月6日\n 项目启动标识:AIRPORT OF EZHOU'S PROJECT -- HZ\n ===========================================\n 代码创建日期:2017年7月6日\n 代码创���工程师:卢健\n 代码版本:1.0\n 版本更新日期:2017年7月20日\n 版本更新工程师:卢健,赵鹏\n\n 代码整体功能描述:汇流点机器类模块\n 1、二入一出模型, 本次只需要一个入口一个出口;\n 2、无延时处理过程;\n 3、每个入口\\出口无服务受限;\n=====================================================================================================\n\"\"\"\nfrom src.utils import PackageRecord\n\n\nclass Cross(object):\n \"\"\"\n Cross obj:\n sim one machine that have more than one input ports and one out put port.\n input_i wrapped in a dict: input_dic =\n {\n 'x1_in1': queue, ...,\n 'x1_ini': queue}.\n _ _ _ _ _ _ _\n | |\n input_1 - ->| |\n . | Cross |- ->output\n input_i - ->| |\n |_ _ _ _ _ _ _|\n \"\"\"\n def __init__(self,\n env,\n machine_id,\n pipelines_dict=None,\n resource_dict=None,\n equipment_resource_dict=None):\n \"\"\"\n init class self args:\n Args:\n env: A simpy.Environment instance.\n machine_id: Cross machine id.\n pipelines_dict: pip line 字典\n resource_dict: 资源查询字典\n equipment_resource_dict: 机器资源id映射字典\n Raises:\n RuntimeError: An error occurred when input_dic\n not initialized before.\n \"\"\"\n self.env = env\n self.machine_id = machine_id\n self.pipelines_dict = pipelines_dict\n self.equipment_resource_dict = equipment_resource_dict\n self.resource_dict = resource_dict\n self.resource_set = self._set_machine_resource()\n\n def _set_machine_resource(self):\n \"\"\"\"\"\"\n if self.equipment_resource_dict:\n self.equipment_id = self.machine_id[1]\n self.input_pip_line = self.pipelines_dict[self.machine_id]\n else:\n raise RuntimeError('cross machine',\n self.machine_id,\n 'not initial equipment_resource_dict!')\n\n def run(self):\n\n while True:\n package = yield self.input_pip_line.get()\n # 获取出口队列id\n id_output_pip_line = package.next_pipeline\n # 记录机器开始处理货物信息\n package.insert_data(\n PackageRecord(\n equipment_id=self.equipment_id,\n package_id=package.item_id,\n time_stamp=self.env.now,\n action=\"start\", ))\n # 记录机器结束处理货物信息\n package.insert_data(\n PackageRecord(\n equipment_id=self.equipment_id,\n package_id=package.item_id,\n time_stamp=self.env.now,\n action=\"end\", ))\n # 放入下一步的传送带\n self.pipelines_dict[id_output_pip_line].put(package)\n","sub_path":"src/machine/cross/cross.py","file_name":"cross.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"430819730","text":"from connection import Connection\nfrom dbadmin import DbAdmin\nfrom categoryFromApi import CategoryFromApi\nfrom productFromApi import ProductFromApi\n\n\nclass DbInstall:\n\n def run(self):\n auth = Connection()\n auth.connect()\n mysql = DbAdmin(auth)\n mysql.create_db()\n mysql.create_table_category()\n mysql.create_table_products()\n mysql.create_categories_products()\n mysql.create_favorite_table()\n categories = CategoryFromApi()\n categories_bd = mysql.add_categories(categories.get_categories())\n products = ProductFromApi()\n for cat in categories_bd:\n mysql.add_products(products.get_products(cat[1]), cat[0])\n\n\ndef main():\n db = DbInstall()\n db.run()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dbinstall.py","file_name":"dbinstall.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"558394150","text":"#Jonathan Barnes\nfrom Crypto.Hash import SHA256\nfrom Crypto.Cipher import AES\nfrom speck import SpeckCipher\nimport string\nimport random\nimport os\nimport math\nimport sys\n\n\niv = os.urandom(16)\n\ndef Hprime(ptxt):\n temp = SHA256.new()\n c = ptxt.encode()\n temp.update(c)\n temp = temp.hexdigest()[:8] #get first 32 bits or 8 4-bit charactors\n return temp\n\ndef dm(key, h):\n #add padding for (128-bit, 256-bit, 512-bit)\n if(len(key) > 64):\n key = key.ljust(128,hex((128-len(key))%0xF)[2:])\n elif(len(key) > 32):\n key = key.ljust(64,hex((64-len(key))%0xF)[2:])\n else:\n key = key.ljust(32,hex((32-len(key))%0xF)[2:])\n \n E = AES.new(key.encode(), AES.MODE_CBC, iv=iv)\n if(type(h) != bytearray):\n h = bytearray(ord(_a) ^ _b for _a, _b in zip(h, E.encrypt(h.encode())))#xor the input h with output of Encryption block\n else:\n h = bytearray(_a ^ _b for _a, _b in zip(h, E.encrypt(h)))#xor the input h with output of Encryption block\n return h\n\ndef md(ptxt):\n h = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)])\n for i in range(int(math.ceil(len(ptxt)/32))):\n h = dm(ptxt[i*32:(i + 1) * 32], h)\n #print(h)\n return h\n\ndef mac(k, m):\n inp = k + m #concatinate the two strings\n out = md(inp)\n return out\n\ndef hack(m1):\n m2 = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)]).encode()\n h = dm(m2, m1) #send in output m1 as input h\n return h\n\ndef colisions(ctxt):\n cprime = Hprime(ctxt)\n count = 0\n while(ctxt != cprime):\n ctxt = Hprime(ctxt)\n cprime = Hprime(Hprime(cprime))\n count = count +1\n print(count)\n\ndef sponge(m):\n key = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)])#128 bit static key\n P = AES.new(key.encode(), AES.MODE_CBC, iv=iv)\n H = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\n H = list(hex(H)[2:])\n #absorbing phase takes in blocks of size 64bits of message\n for i in range(math.ceil(len(m)/16)):\n #if its on the first go it's a string otherwise its an int\n if(type(H[1]) != int):\n H[:16] = bytearray(ord(_a) ^ ord(_b) for _a, _b in zip(H[:16], m[:(16*(i+1))]))\n temp = list(a for a in H[16:])\n H = list(chr(a) for a in H[:16]) #turn it into a list of chars so I can join them then encode to encrypt\n #print(H)\n H = ''.join(H) + ''.join(temp)\n H = P.encrypt(H.encode())\n else:\n H = list(a for a in H)\n H[:16] = bytearray(_a ^ ord(_b) for _a, _b in zip(H[:16], m[:(16*(i+1))]))\n H = bytes(a for a in H) #turn it into a list of chars so I can join them then encode to encrypt\n H = P.encrypt(H)\n \n #squeze phase\n H2 = P.encrypt(H)\n H = list(chr(a) for a in H) #get into a list of chars\n H2 = list(chr(a) for a in H2)\n Hash = ''.join(H) + ''.join(H2) #concat the two in one string\n return Hash\n\ndef cbcmac(m, key):\n iv = \"0000000000000000\"\n iv = iv.encode()\n E = AES.new(key.encode(),AES.MODE_CBC, iv=iv)\n if(len(m)%16 != 0):\n m.ljust(math.ceil(len(m)/16)*16, '0') #do padding\n Tag = E.encrypt(m.encode())\n return Tag\n\ndef cbcmacforge(m1, m2, T1, T2):\n temp = list(ord(_a) ^ _b for _a, _b in zip(m2, T1)) #xor\n temp = (chr(a) for a in temp)\n temp = ''.join(temp) #turn backt to a string \n m3 = m1 + temp #concatinate\n print(\"message 3\")\n print(m3)\n print(\"Tag 3\")\n print(T2) #will have the same tag as m2\n\ndef UH(k, m):\n p = 2**66 - 5\n temp = 0\n print(len(m))\n for n in range(len(m)):\n temp = temp + ord(m[n]) * (k**n)\n \n return (temp % p)\n\ndef CWMAC(k1, k2, N, M):\n B = SpeckCipher(k1, key_size=128, block_size=64)\n P = UH(k2, M)\n ciphertext = B.encrypt(N)\n ciphertext = ciphertext ^ P #xor\n return ciphertext\n\n\nprint(\"This is The number of itterations till a colision:\")\nc = Hprime(\"Hello\")\ncolisions(c)\n\nprint(\"\\nthis is the output of M-D hash\")\nout = md(c)\nprint (out)\n\nm = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)])\nk = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)])\n\nprint(\"\\nthis is the output of The MAC\")\nout = mac(k, m)\nprint(out)\n\nm = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(64)])\nprint(\"\\nThis is the output of the sponge\")\nout = sponge(m)\nprint(out)\n\nm = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(16)])\nkey = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(16)])\nprint(\"\\nthis is the output of the cbc mac:\")\nprint(\"message 1:\")\nprint(m)\nprint(\"tag 1\")\nout = cbcmac(m, key)\nprint(out)\nprint(\"message 2:\")\nm2 = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(16)])\nprint(m2)\nprint(\"tag 2\")\nout2 = cbcmac(m2, key)\nprint(out2)\ncbcmacforge(m, m2, out, out2)\n\nkey1 = random.getrandbits(128)\nkey2 = random.getrandbits(64)\nm = random.getrandbits(128)\nout = CWMAC(key1, key2, m, m2)\nprint(\"\\nthis is the output of the W-C MAC:\")\nprint(out)","sub_path":"Assignment5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"246438981","text":"from helper import *\nfrom numpy import *\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\n\n\nfor resolution in resolutions:\n path = \"../results/\" + resolution + \"/average_coefficient_of_variation.csv\"\n if Path(path).is_file():\n with open(path) as file:\n coefficients = [float(i) for i in file.readline().split(\",\")]\n plt.plot(range(1, len(coefficients)+1), coefficients, label=resolution)\nplt.xlabel(\"Timestep after last observed value\")\nplt.ylabel(\"Average coefficient of variation\")\nplt.legend(loc='best')\nplt.savefig(\"../results/coefficient_of_variation.png\")\nplt.show()\n","sub_path":"analysis/code/coefficient-of-variation-graph.py","file_name":"coefficient-of-variation-graph.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"414560265","text":"##\r\n## Imprima la suma de la columna 2 por cada letra \r\n## de la columna 4, ordnados alfabeticamente.\r\n##\r\n## a,114\r\n## b,40\r\n## c,91\r\n## d,65\r\n## e,79\r\n## f,110\r\n## g,35\r\n##\r\ndatos = open('data.csv', 'r').readlines()\r\ndatos = [fila[:-1].split(\"\\t\") for fila in datos]\r\n\r\nletras = sorted(set([letra for fila in datos for letra in fila[3].split(\",\")]))\r\n\r\ndicc = { letra : 0 for letra in letras}\r\n\r\nfor fila in datos:\r\n for letra in fila[3].split(\",\"): \r\n dicc[letra] += int(fila[1])\r\n\r\nfor letra in letras:\r\n print (\"{},{}\".format(letra,dicc[letra]))\r\n\r\n\r\n","sub_path":"q12.py","file_name":"q12.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"222409875","text":"import os.path, sys\nfrom ConfigParser import ConfigParser\nfrom sqlite3 import connect as sqlconn\nfrom nsdevman.settings import workdir\nfrom nsdevman.nsroot import _\n\ntracroot = os.path.join(workdir, 'trac')\n\ntrac_perm = { 'member': [u\"FILE_VIEW\", u\"LOG_VIEW\", u\"MILESTONE_VIEW\",\n u\"REPORT_VIEW\", u\"ROADMAP_VIEW\", u\"SEARCH_VIEW\",\n u\"TICKET_VIEW\", u\"TIMELINE_VIEW\", u\"WIKI_VIEW\",\n u\"TICKET_CREATE\", u\"TICKET_MODIFY\", u\"WIKI_CREATE\",\n u\"WIKI_MODIFY\"],\n 'codereader': [u\"BROWSER_VIEW\", u\"CHANGESET_VIEW\"],\n 'coder': [u\"BROWSER_VIEW\", u\"CHANGESET_VIEW\"],\n 'tracman': [u\"MILESTONE_ADMIN\", u\"MILESTONE_CREATE\",\n u\"MILESTONE_DELETE\", u\"MILESTONE_MODIFY\",\n u\"REPORT_ADMIN\", u\"REPORT_CREATE\", u\"REPORT_DELETE\",\n u\"REPORT_MODIFY\", u\"REPORT_SQL_VIEW\",\n u\"PERMISSION_ADMIN\", u\"PERMISSION_GRANT\",\n u\"PERMISSION_REVOKE\", u\"TRAC_ADMIN\", u\"WIKI_DELETE\",\n u\"BUILD_VIEW\", u\"VERSIONCONTROL_ADMIN\",\n u\"BUILD_EXEC\", u\"BUILD_ADMIN\"] }\n\ndef Perm2TracPermSet(perms):\n tracperms = []\n for perm in perms:\n perm = perm.split('-')[-1]\n for tperm in trac_perm[perm]:\n if tperm not in tracperms: tracperms.append(tperm)\n return tracperms\n\ndef UpdateTracPerm(projname, member, email, perms):\n if not os.path.isdir(os.path.join(tracroot, projname)): return\n conn = sqlconn(os.path.join(tracroot, projname, 'db', 'trac.db'))\n cursor = conn.cursor()\n # Get CurSet.\n curset = []\n rows = cursor.execute('SELECT action FROM permission WHERE username=?',\n (member, ))\n for row in rows: curset.extend(row)\n #sys.stderr.write('curset = %s\\n' % repr(curset))\n # Get NewSet.\n cursor.execute('DELETE FROM session_attribute WHERE sid=? AND name=?',\n (member, u'email'))\n newset = Perm2TracPermSet(perms)\n if newset:\n cursor.execute('INSERT INTO session_attribute VALUES (?, ?, ?, ?)',\n (member, 1, u'email', email))\n #sys.stderr.write('newset = %s\\n' % repr(newset))\n # Get RemoveSet.\n removeset = []\n for ele in curset:\n if ele not in newset and ele not in removeset:\n removeset.append(ele)\n #sys.stderr.write('removeset = %s\\n' % repr(removeset))\n if removeset:\n cursor.executemany('DELETE FROM permission WHERE username=? AND action=?',\n map(lambda rm: (member, rm), removeset))\n # Get AddSet.\n addset = []\n for ele in newset:\n if ele not in curset and ele not in addset:\n addset.append(ele)\n #sys.stderr.write('addset = %s\\n' % repr(addset))\n if addset:\n cursor.executemany('INSERT INTO permission VALUES (?, ?)',\n map(lambda add: (member, add), addset))\n conn.commit()\n\ndef UpdateTracConfig(projname):\n if not os.path.isdir(os.path.join(tracroot, projname)): return\n cfgfn = os.path.join(tracroot, projname, 'conf', 'trac.ini')\n traccfg = ConfigParser()\n traccfg.readfp(open(cfgfn, 'rt'))\n dirty = False\n if not traccfg.has_section('components'):\n dirty = True; traccfg.add_section('components')\n for opt in ('tracext.git.*',\n 'tracopt.ticket.commit_updater.committicketreferencemacro',\n 'tracopt.ticket.commit_updater.committicketupdater'):\n if not traccfg.has_option('components', opt):\n dirty = True; traccfg.set('components', opt, 'enabled')\n elif traccfg.get('components', opt) != 'enabled':\n dirty = True; traccfg.set('components', opt, 'enabled')\n for opt in ('repository_url', 'repository_dir', 'repository_type'):\n if traccfg.has_option('trac', opt):\n dirty = True; traccfg.remove_option('trac', opt)\n if not traccfg.has_section('repositories'):\n dirty = True; traccfg.add_section('repositories')\n for rtype in ('svn', 'git'):\n rdir = os.path.join(workdir, rtype, projname)\n if not os.path.exists(rdir):\n for prop in ('dir', 'type', 'hidden'):\n opt = '%s.%s' % (rtype, prop)\n if traccfg.has_option('repositories', opt):\n dirty = True; traccfg.remove_option('repositories', opt)\n else:\n for prop in (('dir', rdir),\n ('type', rtype),\n ('hidden', 'false')):\n opt = '%s.%s' % (rtype, prop[0])\n if not traccfg.has_option('repositories', opt):\n dirty = True; traccfg.set('repositories', opt, prop[1])\n elif traccfg.get('repositories', opt) != prop[1]:\n dirty = True; traccfg.set('repositories', opt, prop[1])\n if dirty:\n traccfg.write(open(cfgfn, 'wt'))\n","sub_path":"nsdevman/nsdevman/backup/nsproj/trac.py","file_name":"trac.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"42449598","text":"from collections import defaultdict #defaultdict是经过封装的dict,它能够让我们设定默认值\nfrom tqdm import tqdm #tqdm是一个非常易用的用来显示进度的库\nfrom math import log\nimport re\n\nclass Find_Words:\n def __init__(self, min_count=10, min_pmi=0):\n self.min_count = min_count\n self.min_pmi = min_pmi\n self.chars, self.pairs = defaultdict(int), defaultdict(int) #如果键不存在,那么就用int函数\n #初始化一个值,int()的默认结果为0\n self.total = 0.\n def text_filter(self, texts): #预切断句子,以免得到太多无意义(不是中文、英文、数字)的字符串\n for a in tqdm(texts):\n for t in re.split(u'[^\\u4e00-\\u9fa50-9a-zA-Z]+', a): #这个正则表达式匹配的是任意非中文、\n #非英文、非数字,因此它的意思就是用任\n #意非中文、非英文、非数字的字符断开句子\n if t:\n yield t\n def count(self, texts): #计数函数,计算单字出现频数、相邻两字出现的频数\n for text in self.text_filter(texts):\n self.chars[text[0]] += 1\n for i in range(len(text)-1):\n self.chars[text[i+1]] += 1\n self.pairs[text[i:i+2]] += 1\n self.total += 1\n self.chars = {i:j for i,j in self.chars.items() if j >= self.min_count} #最少频数过滤\n self.pairs = {i:j for i,j in self.pairs.items() if j >= self.min_count} #最少频数过滤\n self.strong_segments = set()\n for i,j in self.pairs.items(): #根据互信息找出比较“密切”的邻字\n _ = log(self.total*j/(self.chars[i[0]]*self.chars[i[1]]))\n if _ >= self.min_pmi:\n self.strong_segments.add(i)\n def find_words(self, texts): #根据前述结果来找词语\n self.words = defaultdict(int)\n for text in self.text_filter(texts):\n s = text[0]\n for i in range(len(text)-1):\n if text[i:i+2] in self.strong_segments: #如果比较“密切”则不断开\n s += text[i+1]\n else:\n self.words[s] += 1 #否则断开,前述片段作为一个词来统计\n s = text[i+1]\n self.words = {i:j for i,j in self.words.items() if j >= self.min_count} #最后再次根据频数过滤\n\ndef get_data(file='data/corpus_medical.txt'):\n f = open(file, 'r', encoding=\"utf8\")\n s = f.read()\n return re.split(u'[^\\u4e00-\\u9fa50-9a-zA-Z]+', s)\n\nfw = Find_Words(16, 1)\nfw.count(get_data())\nfw.find_words(get_data())\n\nimport pandas as pd\ndf_out = pd.DataFrame()\ndata_out = pd.Series(fw.words).sort_values(ascending=False)\ndf_out[\"word\"] = data_out.index\ndf_out[\"cnt\"] = data_out.values\n\ndf_out.to_excel('data/result_medical2.xlsx', index=False)\npd.DataFrame(data_out).to_csv('data/result_medical2.txt', sep=\"\\t\", header = False)\n","sub_path":"demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"75113188","text":"'''\n file system table\n'''\nimport threading\n\nfrom util.str import *\nfrom util.path import *\nfrom util.log import Logger\nfrom util.lock import Lock\nfrom util.util import is_subset\n\nfrom store.table import *\nfrom store.itable import *\nfrom store.verifier import *\n\nFSTableOperationError = Exception\n\n\nclass FSTable(ITable):\n '''\n table base on file\n '''\n def __init__(self):\n self.path = None\n self.table_file = None\n self.data_file = None\n\n self.lock = threading.Lock() #lock for operate table data\n\n def create(self, dbpath, table):\n '''\n create table\n :return self\n '''\n try:\n #initialize table parameters\n self.table = table\n self.name = table.name\n\n self.path = join_paths(dbpath, table.name)\n self.table_file = join_paths(self.path, \"table\")\n self.data_file = join_paths(self.path, \"data\")\n\n #create table directory if it is not exists\n make_dirs(self.path)\n\n #create or replace table file\n if is_file(self.table_file):\n\n #replace old table file if needed\n old_table = self.desc()\n if self.table != old_table:\n #replace table file\n self._replace_table_file()\n else:\n #new table is same as exists table\n pass\n else:\n #create new table file\n self._create_table_file()\n\n #create or upgrade or replace data file\n if is_file(self.data_file):\n #replace old data file if needed\n with open(self.data_file) as fdata:\n nfields = strips(fdata.readline().split(\",\"))\n if self.table.nfields() != nfields:\n if is_subset(nfields, self.table.nfields()):\n self._upgrade_data_file()\n else:\n self._replace_data_file()\n else:\n #create new data file\n self._create_data_file()\n\n Logger.info(\"create table %s...success.\", self.name)\n return self\n except Exception as e:\n Logger.error(\"create table %s...failed. error: %s\", self.name, str(e))\n raise e\n\n def load(self, dbpath, name):\n '''\n load table\n :return: self\n '''\n try:\n #initialize table parameters\n self.name = name\n\n self.path = join_paths(dbpath, name)\n self.table_file = join_paths(self.path, \"table\")\n self.data_file = join_paths(self.path, \"data\")\n\n self.table = self.desc()\n\n #load data file\n if not is_file(self.data_file):\n #create data file if not exists\n self._create_data_file()\n else:\n #replace old data file if needed\n with open(self.data_file) as fdata:\n nfields = strips(fdata.readline().split(\",\"))\n if self.table.nfields() != nfields:\n if is_subset(nfields, self.table.nfields()):\n self._upgrade_data_file()\n else:\n self._replace_data_file()\n\n Logger.info(\"loading table %s...success.\", self.name)\n return self\n except Exception as e:\n Logger.info(\"loading table %s...failed. error: %s\", self.name, str(e))\n raise e\n\n def desc(self):\n '''\n descrite table from store\n :return: Table\n '''\n try:\n with open(self.table_file) as ftable:\n table = Table().fromstr(ftable.read())\n return table\n except Exception as e:\n Logger.info(\"describe table %s...failed. error: %s\", self.name, str(e))\n raise e\n\n def drop(self):\n '''\n drop table\n :return:\n '''\n try:\n remove_dir(self.path)\n except Exception as e:\n Logger.error(\"drop table %s...failed. error %s\", self.name, str(e))\n raise e\n\n\n def truncate(self):\n '''\n truncate table\n :return:\n '''\n try:\n with Lock(self.lock):\n remove_files(self.data_file)\n self._create_data_file()\n except Exception as e:\n Logger.error(\"truncate table %s...failed. error %s\", self.name, str(e))\n raise e\n\n\n def select(self):\n '''\n select all data from table\n :return:\n '''\n try:\n with Lock(self.lock):\n with open(self.data_file, \"r\") as fdata:\n models = []\n\n #read field names\n nfields = strips(fdata.readline().strip().split(\",\"))\n #read data records\n data = fdata.readline()\n while data:\n data = data.strip()\n vfields = strips(data.split(\",\"))\n model = {}\n for idx in range(0, len(nfields)):\n model[nfields[idx]] = str2obj(vfields[idx], ',')\n models.append(model)\n data = fdata.readline()\n\n return models\n except Exception as e:\n Logger.info(\"select data from table %s...failed. error: %s\", self.name, str(e))\n raise e\n\n\n def insert(self, models):\n '''\n insert data to table\n :param models:\n :return:\n '''\n try:\n with Lock(self.lock):\n with open(self.data_file, \"a\") as fdata:\n lines = []\n for model in models:\n vfields = []\n for nfield in self.table.nfields():\n vfields.append(objtostr(model.get(nfield), ','))\n lines.append(\"%s\\n\" % \",\".join(vfields))\n fdata.writelines(lines)\n except Exception as e:\n Logger.info(\"insert data to table %s...failed. error: %s\", self.name, str(e))\n raise e\n\n def _create_table_file(self):\n '''\n create table file\n :return:\n '''\n with open(self.table_file, 'w') as ftable:\n ftable.write(self.table.tostr())\n\n def _replace_table_file(self):\n '''\n replace table file\n :return:\n '''\n if is_file(self.table_file):\n from time import strftime\n old_table_file = \"%s.old.%s\" % (self.table_file, strftime(\"%Y%m%d%H%M%S\"))\n move(self.table_file, old_table_file)\n\n self._create_table_file()\n\n def _create_data_file(self):\n '''\n create data file\n :return:\n '''\n with open(self.data_file, 'w') as fdata:\n fdata.write(\",\".join(self.table.nfields()) + \"\\n\")\n\n def _replace_data_file(self):\n '''\n replace data file\n :return:\n '''\n if is_file(self.data_file):\n from time import strftime\n old_data_file = \"%s.old.%s\" % (self.data_file, strftime(\"%Y%m%d%H%M%S\"))\n move(self.data_file, old_data_file)\n\n self._create_data_file()\n\n def _upgrade_data_file(self):\n '''\n upgrade data file to new table structure\n :return:\n '''\n #new fields for table\n newfields = self.table.fields\n\n #create new data file\n from time import strftime\n new_data_file = \"%s.new.%s\" % (self.data_file, strftime(\"%Y%m%d%H%M%S\"))\n with open(new_data_file, 'w') as fnewdata:\n #write table header first\n fnewdata.write(\",\".join(self.table.nfields()) + \"\\n\")\n\n #move old data to new data file\n with open(self.data_file) as folddata:\n #read old data file headers\n oldfields = {}\n noldfields = strips(folddata.readline().split(\",\"))\n for i in range(0, len(noldfields)):\n oldfields[noldfields[i]] = i\n\n #move old data to new data file\n old_data = folddata.readline()\n while old_data:\n old_columns = strips(old_data.split(\",\"))\n\n new_columns = []\n for i in range(0, len(newfields)):\n idx = oldfields.get(newfields[i].name, None)\n if idx is not None:\n #new column exists in old column\n new_columns.append(objtostr(old_columns[idx], \",\"))\n else:\n #new column not exists in old column\n new_columns.append(objtostr(newfields[i].default.default(), \",\"))\n\n fnewdata.write(\"%s\\n\" % \",\".join(new_columns))\n\n old_data = folddata.readline()\n\n #replace old data file with new data file\n old_data_file = \"%s.old.%s\" % (self.data_file, strftime(\"%Y%m%d%H%M%S\"))\n move(self.data_file, old_data_file)\n move(new_data_file, self.data_file)\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"python/hydra/hydra/dba/fstable.py","file_name":"fstable.py","file_ext":"py","file_size_in_byte":9443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"102952157","text":"# !/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n\n# JSON类型 -- Python类型\n# {} -- dict\n# [] -- list\n#\"string\" -- str\n# 123.45 -- int/float\n# true/false -- True/False\n# null -- None\n\n# json\n# 1. json.dumps() 返回一个str,内容为标准的JSON, ensure_ascii参数\n# 2. json.dump() 把json写入一个file-like Object\n# 3. json.load()/json.loads()将json反序列化\n\n# JSON标准规定JSON的编码为utf-8\n\n\nimport json\n\nDUMP_PATH = r'/Users/yuri/Documents/workspace/tutorial/hello_python/IO编程/序列化/dump.json'\n\nd = dict(name='Bob', age=20, score=90)\nr = json.dumps(d)\n# print(r)\n\nwith open(DUMP_PATH,'wb') as f:\n json.dump(d,f)\n\n\nwith open(DUMP_PATH,'r') as f:\n print(json.load(f))\n\nclass Student(object):\n \n def __init__(self, name, age, score):\n self.name = name\n self.age = age \n self.score = score\n\ndef student2dict(stu):\n return {\n 'name' : stu.name,\n 'age' : stu.age,\n 'score' : stu.score\n }\n\ns = Student('Bob', 20, 88)\nprint(json.dumps(s, default=student2dict))\nprint(json.dumps(s, default=lambda obj: obj.__dict__))\n\ndef dict2student(dict):\n return Student(dict['name'],dict['age'],dict['score'])\n\nwith open(DUMP_PATH,'rb') as f:\n s = f.read()\n print(json.loads(s, object_hook=dict2student))\n\n\n","sub_path":"9.IO编程/序列化/use_json.py","file_name":"use_json.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"540697105","text":"class Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n # second round\n # 2016-07-22\n # ask more about constant space (or other restraint)\n \n res = [[1,1] for n in nums]\n for i in range(1,len(nums)):\n res[i][0] = res[i-1][0] * nums[i-1]\n for i in range(len(nums)-2,-1,-1):\n res[i][1] = res[i+1][1] * nums[i+1]\n for i in range(0,len(nums)):\n res[i] = res[i][0] * res[i][1]\n return res\n \n \n","sub_path":"238-product_of_array_except_self/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"333557516","text":"# coding=utf-8\nfrom __future__ import print_function, absolute_import, unicode_literals\n\nimport sys\nimport json\nimport numpy as np\nimport pandas as pd\nimport talib as ta\nfrom gm.api import *\n\ndef init(context):\n #MA快线与慢线\n context.fast = 2\n context.slow = 16\n context.frequency = \"900s\"\n # context.goods交易的品种\n context.symbol = 'SHFE.AU'\n context.fields = \"high,low,close\"\n context.period = context.slow + 1 # 订阅数据滑窗长度\n #TimeFilter优化 \n context.window_now = ['14:00:00', '09:45:00']\n\n context.maxdd = 0.0\n context.high_level = 1000000.\n \n context.delta = 100000.0\n context.add_position = 1\n \n # 订阅context.goods里面的品种, bar频率为frequency\n subscribe(symbols=context.symbol,frequency=context.frequency,count=context.period,wait_group=True)\n\ndef on_bar(context, bars):\n \n date_now = str(context.now)\n #若当前时间在交易窗口内,则交易\n if str(date_now[-8:]) >= str(context.window_now[0]) or str(date_now[-8:]) <= str(context.window_now[1]):\n #获取账户现金字典\n Account_cash = context.account().cash\n #获取账户持仓字典\n Account_positions = context.account().positions()\n \n #获取当前持仓方向\n if len(Account_positions)>0:\n position_side = Account_positions[0]['side']\n else:\n position_side = 0\n #获取当前持仓浮动盈亏\n if len(Account_positions)>0:\n position_fpnl = Account_positions[0]['fpnl']\n \n \"\"\"\n #更新最高水准线\n context.high_level = max(context.high_level, Account_cash['nav'])\n #更新最大回撤\n context.maxdd = max(context.high_level - Account_cash['nav'], context.maxdd)\n \n if len(Account_positions)>0:\n Target_fixrisk = ((Account_cash['available']+Account_positions[0]['cost']*0.9)/Account_cash['nav'])*(0.02/0.2)\n else:\n Target_fixrisk = 0.1\n\n #当前调仓比例\n target_MaxDD = ((Account_cash['nav'] - Account_cash['nav']*0.1 - context.maxdd*1.5)/Account_cash['nav'])*0.1\n print(target_MaxDD, Target_fixrisk)\n \"\"\"\n\n # 获取数据\n close_prices = context.data(symbol=context.symbol,frequency=context.frequency,\n count=context.period,fields='close')\n trade_prices = context.data(symbol=context.symbol,frequency=context.frequency,\n count=context.period,fields='high,low')\n last_price = close_prices['close'][context.slow-1]\n # 计算长短���期均线\n fast_avg = ta.SMA(close_prices.values.reshape(context.period), context.fast)\n slow_avg = ta.SMA(close_prices.values.reshape(context.period), context.slow)\n \n SellStop = trade_prices['low'][context.slow-1] - 0.1\n SellLimit = trade_prices['low'][context.slow-1] - 0.5\n BuyStop = trade_prices['high'][context.slow-1] + 0.1\n BuyLimit = trade_prices['high'][context.slow-1] + 0.5\n # 均线下穿,做空\n if slow_avg[-2] < fast_avg[-2] and slow_avg[-1] >= fast_avg[-1]:\n # 平多仓\n order_target_percent(symbol=context.symbol, percent=0, position_side=1, order_type=2)\n # 开空仓\n #order_target_percent(symbol=context.symbol, percent=Target_fixrisk, position_side=2,\n # order_type=OrderType_Limit, price=SellStop)\n order_target_percent(symbol=context.symbol, percent=0.1, position_side=2,\n order_type=OrderType_Limit, price=SellStop) \n # 均线上穿,做多\n if fast_avg[-2] < slow_avg[-2] and fast_avg[-1] >= slow_avg[-1]:\n # 平空仓\n order_target_percent(symbol=context.symbol, percent=0, position_side=2, order_type=2) \n # 开多仓\n #order_target_percent(symbol=context.symbol, percent=Target_fixrisk, position_side=1,\n # order_type=OrderType_Limit, price=BuyStop)\n order_target_percent(symbol=context.symbol, percent=0.1, position_side=1,\n order_type=OrderType_Limit, price=BuyStop)\n #无均线上穿或下穿,衡量是否加仓\n if not (slow_avg[-2] < fast_avg[-2] and slow_avg[-1] >= fast_avg[-1]) \\\n and not (fast_avg[-2] < slow_avg[-2] and fast_avg[-1] >= slow_avg[-1]):\n if position_side!=0 and position_fpnl > context.delta*context.add_position:\n order_volume(symbol=context.symbol, volume=100, side=1, order_type=2 ,position_effect=1)\n #记录加仓,下次加仓时浮动盈亏要大一倍\n context.add_position = context.add_position + 1\n print(\"加仓: 浮动盈亏 %f\"%position_fpnl)\n \n\"\"\"\ndef on_backtest_finished(context, indicator):\n \n #以下用于在回测结束后保存回测指标\n indicator_data = {}\n indicator_data = indicator\n file_name = 'test_1'\n \n with open('E:/Program Files/other/交易系统作业/代码/作业四/'+file_name+'.json','w',encoding=\"utf-8\") as json_file:\n json.dump(indicator_data, json_file)\n print(\"WINDOW %s done!\"%(file_name))\n\"\"\"\n\nif __name__ == '__main__':\n run(strategy_id='c7645ff3-f516-11e8-beec-3c970e853b38',\n filename='project4_4.py',\n mode=MODE_BACKTEST,\n token='8401315ba754693611d3bb99131e9cbc527c605f',\n backtest_start_time='2016-10-20 09:15:00',\n backtest_end_time='2018-11-24 15:00:00',\n backtest_adjust=ADJUST_PREV,\n backtest_initial_cash=1000000,\n backtest_commission_ratio=0.0001,\n backtest_slippage_ratio=0)#.0001)\n","sub_path":"trading_system/project3/project4_4.py","file_name":"project4_4.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"16563995","text":"import warnings\r\nwarnings.filterwarnings('ignore')\r\nimport pandas as pd\r\nimport numpy as np\r\nimport joblib\r\nimport itertools\r\nimport cv2\r\nfrom datasist.structdata import detect_outliers\r\nfrom imblearn.over_sampling import SMOTE\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm import SVC, LinearSVC\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.linear_model import Perceptron\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom xgboost import XGBClassifier\r\nfrom sklearn.impute import KNNImputer\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport plotly.graph_objs as go\r\nimport plotly.express as px\r\nfrom plotly.offline import init_notebook_mode, iplot\r\ninit_notebook_mode(connected=True)\r\ndef plot_confusion_matrix(cm, classes,normalize=False,title='Confusion matrix',cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n plt.figure(figsize=(6, 6))\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=90)\r\n plt.yticks(tick_marks, classes)\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n\r\n thresh = cm.max() / 2.\r\n cm = np.round(cm, 2)\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, cm[i, j],\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n plt.show()\r\ndef MLPredictAcc(X, y, classes , scale = False , smote = False):\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\r\n if (smote == True):\r\n sampler = SMOTE()\r\n X_train, y_train = sampler.fit_resample(X_train, y_train)\r\n if (scale == True) :\r\n scaler = StandardScaler()\r\n scaler.fit(X_train)\r\n X_train = scaler.transform(X_train)\r\n X_test = scaler.transform(X_test)\r\n models = {\r\n \"XGB\": XGBClassifier(),\r\n \"KNN\": KNeighborsClassifier(),\r\n \"SVC\": SVC(),\r\n \"DT\": DecisionTreeClassifier(),\r\n \"RF\": RandomForestClassifier(),\r\n \"GaussianNB\" : GaussianNB(),\r\n \"Perceptron\" : Perceptron(),\r\n \"LinearSVC\" : LinearSVC(),\r\n \"SGDClassifier\" : SGDClassifier(),\r\n \"LogisticRegression\" : LogisticRegression()\r\n }\r\n modell = []\r\n modell_acc = []\r\n model_built = {}\r\n for name, model in models.items():\r\n print(f'Training Model {name} \\n--------------')\r\n model.fit(X_train, y_train)\r\n y_pred = model.predict(X_test)\r\n cf = confusion_matrix(y_test, y_pred)\r\n acc_svc = round(accuracy_score(y_test, y_pred) * 100,2)\r\n modell.append(name)\r\n modell_acc.append(acc_svc)\r\n model_built[name]=model\r\n plot_confusion_matrix(cf, classes, title='{} cf with acc = {} %'.format(name,acc_svc))\r\n print('-' * 30)\r\n models = pd.DataFrame(\r\n {\r\n 'Model': modell,\r\n 'Score': modell_acc ,\r\n\r\n })\r\n models = models.sort_values(by='Score', ascending=False)\r\n models['Score'] = models['Score'].apply(lambda x : str(x) + \" %\")\r\n modelss = pd.DataFrame({\r\n \"index \": [p for p in range(1,len(modell_acc)+1)],\r\n \"model\" : models['Model'],\r\n 'Score': models['Score'],\r\n })\r\n\r\n if (scale == True):\r\n return modelss, model_built , scaler\r\n else:\r\n return modelss, model_built\r\ndef check_category_classes(df):\r\n return df.select_dtypes(include='O').columns.to_list()\r\ndef check_non_category_classes(df):\r\n return df.select_dtypes(exclude='O').columns.to_list()\r\ndef define_column_type(df):\r\n numerical_column =check_non_category_classes(df)\r\n categorical_column = check_category_classes(df)\r\n print(\"numerical_column\", numerical_column)\r\n print(\"categorical_column\", categorical_column)\r\n return numerical_column , categorical_column\r\ndef show_value_count_category_column(df , categorical_column):\r\n for name in categorical_column:\r\n df_count = pd.DataFrame(df[name].value_counts())\r\n print(df_count)\r\n print(\"*\" * 50)\r\ndef allam_visualize_null_count(df):\r\n plt.figure(figsize=(12,8))\r\n print(df.isnull().sum())\r\n sns.heatmap(df.isnull())\r\ndef allam_plot_graph(x,y,xlabel=\"xlabel\",ylabel=\"ylabel\" , title = \"plot grapg\"):\r\n plt.figure(figsize=(12, 8))\r\n plt.style.use('ggplot')\r\n ax = plt.axes()\r\n ax.set(xlabel=xlabel, ylabel=ylabel)\r\n ax.set_title(title)\r\n plt.plot(x,y, color='blue', marker='o', markersize=11)\r\ndef allam_pie_graph(labels , values , title = None):\r\n fig = go.Figure(data=[go.Pie(labels=labels, values=values, textinfo='label+percent',\r\n insidetextorientation='radial' , title=title\r\n )] )\r\n\r\n fig.show()\r\ndef categoryColumnCountAndPercentage(df , categorical_column):\r\n for i in categorical_column:\r\n print(\"count and percentage of column {}\".format(i))\r\n f = df[i].value_counts()\r\n allam_pie_graph(f.index.tolist(), f.tolist())\r\n print(\"*\" * 100)\r\ndef categoryColumnMostAccurance(df,categorical_column):\r\n for i in categorical_column:\r\n print(\"most accurance of column {}\".format(i))\r\n f = df[i].value_counts()\r\n allam_display_most_accurance(f.index.tolist(), f.tolist())\r\n print(\"*\" * 100)\r\ndef found_text(m_purpose):\r\n bb=''\r\n for i in range(len(m_purpose)):\r\n if i <=len(m_purpose) -1 :\r\n bb += str(m_purpose[i]) + '% ,'\r\n bb = bb[0:-1]\r\n return bb.split(',')\r\ndef allam_compression_2_class(purp ,c1_purpose ,c2_purpose , c1_name= \"type 1 \" , c2_name = \"type2\" , title =\"compression\" , X_axis_name =\"X\" , Y_axis_name = \"Y\"):\r\n c1_txt = found_text(c1_purpose)\r\n c2_txt = found_text(c2_purpose)\r\n c1_pur = go.Bar(\r\n x=purp,\r\n y=c1_purpose,\r\n name=c1_name,\r\n text=c1_txt,\r\n textposition='auto',\r\n )\r\n c2_pur = go.Bar(\r\n x=purp,\r\n y=c2_purpose,\r\n name=c2_name,\r\n text=c2_txt,\r\n textposition='auto',\r\n )\r\n data = [c1_pur, c2_pur]\r\n layout = dict(\r\n title=title,\r\n xaxis=dict(title=X_axis_name),\r\n yaxis=dict(title=Y_axis_name)\r\n )\r\n fig = dict(data=data, layout=layout)\r\n iplot(fig, filename='grouped-bar-direct-labels')\r\ndef allam_display_most_accurance(things , values , title = \"allam display most accurance\"):\r\n data = [\r\n go.Scatterpolar(\r\n r=values,\r\n theta=things,\r\n fill='toself',\r\n )\r\n ]\r\n layout = go.Layout(\r\n title=title\r\n )\r\n fig = dict(data=data, layout=layout)\r\n iplot(fig)\r\ndef allam_bar_graph(purp ,c1_purpose , c1_name= \"type 1 \", title =\"compression\" , X_axis_name =\"X\" , Y_axis_name = \"Y\"):\r\n c1_txt = found_text(c1_purpose)\r\n c1_pur = go.Bar(\r\n x=purp,\r\n y=c1_purpose,\r\n name=c1_name,\r\n text=c1_txt,\r\n textposition='auto',\r\n )\r\n data = [c1_pur]\r\n layout = dict(\r\n title=title,\r\n xaxis=dict(title=X_axis_name),\r\n yaxis=dict(title=Y_axis_name)\r\n )\r\n fig = dict(data=data, layout=layout)\r\n iplot(fig, filename='grouped-bar-direct-labels')\r\ndef allam_create_table(df ):\r\n colm = list(df.columns)\r\n ListOfColumnName = []\r\n for i in colm : ListOfColumnName.append(df[i])\r\n fig = go.Figure(data=[go.Table(\r\n header=dict(values= colm ,\r\n fill_color='paleturquoise',\r\n align='left'),\r\n cells=dict(values=ListOfColumnName,\r\n fill_color='lavender',\r\n align='left'))\r\n ])\r\n fig.show()\r\ndef allam_sunburst_charts(df,path,values,color):\r\n fig = px.sunburst(df, path=path, values=values, color=color)\r\n fig.show()\r\ndef allam_histogram(df,x):\r\n fig = px.histogram(df, x=x)\r\n fig.show()\r\ndef allam_histogram_with_rug(df,x,color=None,):\r\n fig = px.histogram(df, x=x, color=color, marginal=\"rug\", hover_data=df.columns)\r\n fig.show()\r\ndef allam_histogram_with_violin(df,x,color=None,):\r\n fig = px.histogram(df, x=x, color=color, marginal=\"violin\", hover_data=df.columns)\r\n fig.show()\r\ndef allam_histogram_with_box(df,x,color=None,):\r\n fig = px.histogram(df, x=x, color=color, marginal=\"box\", hover_data=df.columns)\r\n fig.show()\r\ndef allam_box_plot(df,x,y,color=None):\r\n fig = px.box(df, x=x, y=y, color=color)\r\n fig.update_traces(quartilemethod=\"exclusive\")\r\n fig.show()\r\ndef allam_violin_plot(df,x,y,color=None):\r\n fig = px.violin(df, x=x, y=y, color=color, box=True, points=\"all\", hover_data=df.columns)\r\n fig.show()\r\ndef allam_scatter_plot(df , x,y,color=None,size=None):\r\n fig = px.scatter(df, x=x, y=y, color=color, size=size)\r\n fig.show()\r\ndef allam_commpression_pointplot(df,x,y,clas,row=None,col=None):\r\n grid = sns.FacetGrid(df, row=row ,col=col , size=2.2, aspect=1.6)\r\n grid.map(sns.pointplot, x, y, clas, palette='deep')\r\n grid.add_legend()\r\ndef allam_commpression_barplot(df,x,y,row=None,col=None):\r\n grid = sns.FacetGrid(df, row=row, col=col)\r\n grid.map(sns.barplot, x, y)\r\n grid.add_legend()\r\ndef allam_commpression_histplot(df,x,row=None,col=None):\r\n grid = sns.FacetGrid(df, row=row, col=col, size=2.2, aspect=1.6)\r\n grid.map(plt.hist, x, alpha=.5, bins=20)\r\n grid.add_legend()\r\ndef allam_visualize_date(df,dateColumn , NumericalColumn):\r\n fig = px.line(df, x=dateColumn, y=NumericalColumn)\r\n fig.show()\r\ndef allam_visualize_corr(corr):\r\n plt.figure(figsize=(16, 12))\r\n sns.heatmap(corr, annot=True, fmt='0.2f')\r\ndef convert_pilimg_to_cv2img(img):\r\n return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\r\ndef detect_outlier(df,numerical):\r\n for col in numerical:\r\n outliers = detect_outliers(df, 0, [col])\r\n df.drop(outliers, inplace=True)\r\n print(\"len outliner in {} = {}\".format(col,len(outliers)) )\r\ndef make_encoding_dict(df):\r\n return dict(tuple(zip(df.value_counts().index.tolist(), [i for i in range (100)])))\r\ndef load_modelWithScaler(model_path,scaler_path ,data ,returnName=False,dictionary = None):\r\n model = joblib.load(model_path)\r\n scaler = joblib.load(scaler_path)\r\n dictionary = dictionary\r\n data = data\r\n prediction = model.predict(scaler.transform([data]))\r\n if (returnName == True):\r\n for name, age in dictionary.items():\r\n if age == prediction:\r\n return name\r\n else:\r\n return prediction\r\ndef imuter_nan(df,coll):\r\n imputer = KNNImputer()\r\n for col in coll:\r\n df[col] = imputer.fit_transform(df[[col]])","sub_path":"Data analysis/diabetics_by_allam/Project-main/AllamDataScience.py","file_name":"AllamDataScience.py","file_ext":"py","file_size_in_byte":11249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"457269149","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, jsonify, request, Markup, abort, make_response\n# import peewee\n# import json\n\napi = Flask(__name__)\n\n@api.route('/')\ndef index():\n html = '''\n
\n

\n Test Name:

\n Config File:

\n Interval:

\n Bandwidth:

\n MSS:

\n Parallel:

\n Time:

\n Protocol is UDP? :

\n Use Server Output? :

\n Use ESXTOP Output? :

\n

\n

\n
\n '''\n return Markup(html)\n\n@api.route('/iperf3test', methods=['GET', 'POST'])\ndef iperf3test():\n try:\n if request.method == 'POST':\n return request.form['TestName']\n else:\n return request.args.get('TestName', '')\n except Exception as e:\n return str(e)\n\n@api.route('/sayHello', methods=['GET'])\ndef say_hello():\n\n result = {\n \"result\":True,\n \"data\": \"Hello, world!\"\n }\n\n return make_response(jsonify(result))\n # if you do not want to use Unicode: \n # return make_response(json.dumps(result, ensure_ascii=False))\n\n@api.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\nif __name__ == '__main__':\n api.run(host='0.0.0.0', port=8000)\n","sub_path":"altperf-server/apps/start-api.py","file_name":"start-api.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"640596205","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\n# -*- coding: utf-8 -*-\nfrom os import path as os_path\nimport os\nimport sys\n\n#################################\n# General #\n#################################\n\n\nSECRET_KEY = 'SECRET_KEY'\nDB = 'sqlite'\nDEBUG = True\nALLOWED_HOSTS = ['*']\nDOOR_KEY = 'DOOR_KEY'\nRPI_SECRET_KEY = 'RPI_SECRET_KEY'\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nROOT_URLCONF = 'website.urls'\nWSGI_APPLICATION = 'website.wsgi.application'\nSITE_ID = 1\nAPPEND_SLASH = True\nLOGIN_REDIRECT_URL = '/authentication/login/'\nLOGIN_URL = '/authentication/login/'\n\nDATAPORTEN_OAUTH_AUTH_URL = \"https://auth.dataporten.no/oauth/authorization\"\nDATAPORTEN_OAUTH_TOKEN_URL = \"https://auth.dataporten.no/oauth/token\"\nDATAPORTEN_OAUTH_CLIENT_ID = \"SetThis\"\nDATAPORTEN_OAUTH_CLIENT_SECRET = \"MagicSealsAndNarwalsDancingTogetherInRainbows\"\n\nADMINS = (\n ('devops', 'hackerspace-dev@idi.ntnu.no'),\n)\n\ntry:\n from .local_settings import *\nexcept ImportError:\n pass\n\n#################################\n# Installed apps #\n#################################\n\nINSTALLED_APPS = [\n 'django.contrib.humanize',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n 'django.contrib.flatpages',\n 'website',\n 'applications',\n 'news',\n 'door',\n 'files',\n 'ckeditor',\n 'ckeditor_uploader',\n 'sekizai',\n 'sorl.thumbnail',\n 'django_nyt',\n 'wiki',\n 'wiki.plugins.macros',\n 'wiki.plugins.help',\n 'wiki.plugins.links',\n 'wiki.plugins.images',\n 'wiki.plugins.attachments',\n 'wiki.plugins.notifications',\n 'mptt',\n 'authentication',\n 'authentication_feide',\n 'smart_selects',\n 'committees',\n 'dal',\n 'dal_select2',\n 'material',\n 'rpi',\n 'inventory',\n 'userprofile',\n 'vaktliste'\n]\n\n\n#################################\n# App config #\n#################################\n\nTHUMBNAIL_PRESERVE_FORMAT = True\n\n\n#################################\n# Database #\n#################################\n\nif DB == 'postgres':\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': DATABASE_NAME,\n 'USER': DATABASE_USER,\n 'PASSWORD': DATABASE_PASSWORD,\n 'HOST': 'localhost',\n 'PORT': '',\n }\n }\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n }\n\n#################################\n# Templates #\n#################################\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'DIRS': (os.path.join(BASE_DIR, 'templates/'),),\n 'OPTIONS': {\n 'context_processors': [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"sekizai.context_processors.sekizai\",\n ],\n 'debug': DEBUG,\n }\n },\n]\n\n#################################\n# Security #\n#################################\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n\n]\n\n#################################\n# Static #\n#################################\n\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\nif not DEBUG:\n STATIC_ROOT = '../static'\n MEDIA_ROOT = '../media'\n\nCKEDITOR_UPLOAD_PATH = os_path.join(BASE_DIR, 'media/uploads')\nCKEDITOR_BROWSE_SHOW_DIRS = True\nCKEDITOR_UPLOAD_SLUGIFY_FILENAME = False\nCKEDITOR_RESTRICT_BY_USER = False\nCKEDITOR_BROWSE_SHOW_DIRS = False\nCKEDITOR_CONFIGS = {\n 'default': {\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Undo', 'Redo'],\n ['Format', 'Styles', 'Font', 'FontSize'],\n ['Bold', 'Italic', 'Underline'],\n ['HorizontalRule', 'NumberedList', 'BulletedList', '-', 'Outdent', 'Indent'],\n ['JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],\n ['Link', 'Unlink'],\n ['TextColor', 'BGColor', 'Smiley'],\n ['RemoveFormat', 'ShowBlocks', 'Maximize'],\n ['Source']\n ],\n 'width': 840,\n 'height': 300,\n 'toolbarCanCollapse': False,\n }\n}\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n#################################\n# Email #\n#################################\n\nif DEBUG:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n# DEFAULT_FROM_MAIL = 'hackerspace-dev@idi.ntnu.no'\nDEFAULT_FROM_MAIL = 'web.hackerspace.ntnu@gmail.com'\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\n#################################\n# Internalization #\n#################################\n\nTIME_ZONE = 'Europe/Oslo'\n\nLANGUAGE_CODE = 'nb'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = False\n\n#################################\n# Logging #\n#################################\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n }\n}\n\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'skin': 'bootstrapck',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', '-', 'Undo', 'Redo', '-', 'PasteText'],\n ['NumberedList', 'BulletedList', '-', 'Link'],\n ['Maximize', 'Find', 'Replace']\n ],\n 'customConfig': '/static/js/ckeditor_config.js',\n },\n\n 'committees': {\n 'skin': 'bootstrapck',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', '-', 'Undo', 'Redo', '-', 'PasteText'],\n ['NumberedList', 'BulletedList', '-', 'Link'],\n ['Maximize', 'Find', 'Replace']\n ],\n 'customConfig': '/static/js/ckeditor_config.js',\n },\n}\n\nDEFAULT_CONFIG = CKEDITOR_CONFIGS\n","sub_path":"website/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":7228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"78380124","text":"from sklearn.neural_network import MLPRegressor\r\nfrom sklearn.model_selection import train_test_split\r\nimport numpy\r\nimport pickle\r\n\r\nx = numpy.genfromtxt(\"x.csv\", delimiter=',')\r\ny = numpy.genfromtxt(\"y.csv\", delimiter=',')\r\n\r\ndata = numpy.zeros((30,30))\r\nz = numpy.loadtxt(open(\"z.csv\",\"rb\"), delimiter=\",\")\r\n\r\nfor i in range(len(data)):\r\n for j in range(len(data[i])):\r\n data[i][j] = x[i] * y[j]\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(data, z, random_state=1)\r\nmodel = MLPRegressor(hidden_layer_sizes=(3,3,3), solver=\"lbfgs\", random_state=1, max_iter=500).fit(X_train, y_train)\r\nfilename = 'learner-model.sav'\r\npickle.dump(model, open(filename,'wb'))\r\nmodel.predict(X_test)\r\nprint(model.score(X_test, y_test))","sub_path":"cs465homework2/model-trainer.py","file_name":"model-trainer.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"35126261","text":"#!/usr/bin/python\n\n\n\nimport cleanup\nfrom topo5 import topo5\nfrom mininet.net import Mininet\nfrom mininet.node import Controller\nfrom mininet.cli import CLI\nfrom mininet.log import setLogLevel, info\nfrom packet import *\n\nfrom printer import *\n\ndef lab5():\n\n\n '''\n packet=create_packet(3,1,101,102,103,104,105,'data')\n print(packet)\n #pktype, leng, src, dest, seq = read_header(packet)\n print(read_data(packet))\n print(read_header(packet))\n\n packet2=create_lsp(1,1,108,'data')\n print(packet2)\n #pktype, leng, src, dest, seq = read_header(packet)\n print(read_lspdata(packet2))\n print(read_lspheader(packet2))\n\n if read_header(packet) > read_lspheader(packet2):\n print('yes')\n\n packet3=create_ack(1,1,108,103)\n print(packet3)\n #pktype, leng, src, dest, seq = read_header(packet)\n print(read_ack(packet3))\n '''\n\n\n net=topo5()\n\n def dijkstra(graph, src, dest, visited=[], distances={}, predecessors={}):\n \"\"\" calculates a shortest path tree routed in src\n \"\"\"\n # a few sanity checks\n if src not in graph:\n raise TypeError('The root of the shortest path tree cannot be found')\n if dest not in graph:\n raise TypeError('The target of the shortest path cannot be found')\n # ending condition\n if src == dest:\n # We build the shortest path and display it\n path = []\n pred = dest\n while pred != None:\n path.append(pred)\n pred = predecessors.get(pred, None)\n print('shortest path: ' + str(path) + \" cost= \" + str(distances[dest]))\n global path2\n path2 = path\n\n else:\n # if it is the initial run, initializes the cost\n if not visited:\n distances[src] = 0\n # visit the neighbors\n for neighbor in graph[src]:\n if neighbor not in visited:\n new_distance = distances[src] + graph[src][neighbor]\n print(new_distance)\n if new_distance <= distances.get(neighbor, float('inf')):\n distances[neighbor] = new_distance\n predecessors[neighbor] = src\n # mark as visited\n visited.append(src)\n # now that all neighbors have been visited: recurse\n # select the non visited node with lowest distance 'x'\n # run Dijskstra with src='x'\n unvisited = {}\n for k in graph:\n if k not in visited:\n unvisited[k] = distances.get(k, float('inf'))\n x = min(unvisited, key=unvisited.get)\n dijkstra(graph, x, dest, visited, distances, predecessors)\n\n def dijkstraTable(topo):\n # Routing table creater for each node in network\n\n # open file for saving paths\n\n # filename = \"src.txt\"\n # f = open(filename, 'w+')\n\n topoG = topo.g\n\n graphDic = {} # empty dictionary\n for node in topoG.nodes(): # make switch dictionary without links\n graphDic[node] = {}\n for edge in topoG.edges(): # adds each link to each switch\n graphDic[edge[0]][edge[1]] = 1\n graphDic[edge[1]][edge[0]] = 1\n\n # get paths to all other node from src\n\n for node in graphDic:\n\n path = dijkstra(graphDic, src, node, visited=[], distances={}, predecessors={})\n\n dpidPath = []\n\n # add switch names\n\n for switch in path:\n dpidPath.append(topo.id_gen(name=switch).dpid)\n\n # write paths to file for the src\n\n route = \"%s %s \\n\" % node % path\n print(route)\n\n dijkstraTable(net)\n\n info( '*** Starting network\\n')\n net.start()\n info( '*** Running CLI\\n' )\n CLI( net )\n\n\n\n\n info( '*** Stopping network' )\n net.stop()\n\nif __name__ == '__main__':\n setLogLevel( 'info' )\n lab5()\n","sub_path":"socket/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"30339892","text":"# -*- coding: utf8 -*-\n# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom tencentcloud.common.abstract_model import AbstractModel\n\n\nclass Aspect(AbstractModel):\n \"\"\"作文批改每个维度名字与得分\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Name: 项目 名字\n :type Name: str\n :param Score: 该项得分\n :type Score: float\n \"\"\"\n self.Name = None\n self.Score = None\n\n\n def _deserialize(self, params):\n self.Name = params.get(\"Name\")\n self.Score = params.get(\"Score\")\n\n\nclass CompostionContext(AbstractModel):\n \"\"\"ocr返回的作文文本信息\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Content: 作文内容\n :type Content: str\n \"\"\"\n self.Content = None\n\n\n def _deserialize(self, params):\n self.Content = params.get(\"Content\")\n\n\nclass CorrectData(AbstractModel):\n \"\"\"批改的结果\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Score: 总得分\n :type Score: float\n :param ScoreCat: 各项得分详情\n :type ScoreCat: :class:`tencentcloud.ecc.v20181213.models.ScoreCategory`\n :param Comment: 综合评价\n :type Comment: str\n :param SentenceComments: 句子点评\n :type SentenceComments: list of SentenceCom\n \"\"\"\n self.Score = None\n self.ScoreCat = None\n self.Comment = None\n self.SentenceComments = None\n\n\n def _deserialize(self, params):\n self.Score = params.get(\"Score\")\n if params.get(\"ScoreCat\") is not None:\n self.ScoreCat = ScoreCategory()\n self.ScoreCat._deserialize(params.get(\"ScoreCat\"))\n self.Comment = params.get(\"Comment\")\n if params.get(\"SentenceComments\") is not None:\n self.SentenceComments = []\n for item in params.get(\"SentenceComments\"):\n obj = SentenceCom()\n obj._deserialize(item)\n self.SentenceComments.append(obj)\n\n\nclass ECCRequest(AbstractModel):\n \"\"\"ECC请求参数结构体\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Content: 作文文本,必填\n :type Content: str\n :param Title: 作文题目,可选参数\n :type Title: str\n :param Grade: 年级标准, 默认以cet4为标准,取值与意义如下:elementary 小学,grade7 grade8 grade9分别对应初一,初二,初三。 grade10 grade11 grade12 分别对应高一,高二,高三,以及cet4和cet6 分别表示 英语4级和6级。\n :type Grade: str\n :param Outline: 作文提纲,可选参数,作文的写作要求。\n :type Outline: str\n :param ModelSubject: 范文标题,可选参数,本接口可以依据提供的范文对作文进行评分。\n :type ModelSubject: str\n :param ModelContent: 范文内容,可选参数,同上,范文的正文部分。\n :type ModelContent: str\n \"\"\"\n self.Content = None\n self.Title = None\n self.Grade = None\n self.Outline = None\n self.ModelSubject = None\n self.ModelContent = None\n\n\n def _deserialize(self, params):\n self.Content = params.get(\"Content\")\n self.Title = params.get(\"Title\")\n self.Grade = params.get(\"Grade\")\n self.Outline = params.get(\"Outline\")\n self.ModelSubject = params.get(\"ModelSubject\")\n self.ModelContent = params.get(\"ModelContent\")\n\n\nclass ECCResponse(AbstractModel):\n \"\"\"ECC返回参数结构体\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Data: 整体的批改结果\n :type Data: :class:`tencentcloud.ecc.v20181213.models.CorrectData`\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n \"\"\"\n self.Data = None\n self.RequestId = None\n\n\n def _deserialize(self, params):\n if params.get(\"Data\") is not None:\n self.Data = CorrectData()\n self.Data._deserialize(params.get(\"Data\"))\n self.RequestId = params.get(\"RequestId\")\n\n\nclass EHOCRRequest(AbstractModel):\n \"\"\"EHOCR请求参数结构体\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Image: 图片所在的url或base64编码后的图像数据,依据InputType而定\n :type Image: str\n :param InputType: 输出图片类型,0表示Image字段是图片所在的url,1表示Image字段是base64编码后的图像数据\n :type InputType: int\n \"\"\"\n self.Image = None\n self.InputType = None\n\n\n def _deserialize(self, params):\n self.Image = params.get(\"Image\")\n self.InputType = params.get(\"InputType\")\n\n\nclass EHOCRResponse(AbstractModel):\n \"\"\"EHOCR返回参数结构体\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Data: 识别后的作文内容\n :type Data: :class:`tencentcloud.ecc.v20181213.models.CompostionContext`\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n \"\"\"\n self.Data = None\n self.RequestId = None\n\n\n def _deserialize(self, params):\n if params.get(\"Data\") is not None:\n self.Data = CompostionContext()\n self.Data._deserialize(params.get(\"Data\"))\n self.RequestId = params.get(\"RequestId\")\n\n\nclass ScoreCategory(AbstractModel):\n \"\"\"四个维度的得分\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Words: 词汇项\n :type Words: :class:`tencentcloud.ecc.v20181213.models.Aspect`\n :param Sentences: 句子项\n :type Sentences: :class:`tencentcloud.ecc.v20181213.models.Aspect`\n :param Structure: 篇章结构\n :type Structure: :class:`tencentcloud.ecc.v20181213.models.Aspect`\n :param Content: 内容\n :type Content: :class:`tencentcloud.ecc.v20181213.models.Aspect`\n \"\"\"\n self.Words = None\n self.Sentences = None\n self.Structure = None\n self.Content = None\n\n\n def _deserialize(self, params):\n if params.get(\"Words\") is not None:\n self.Words = Aspect()\n self.Words._deserialize(params.get(\"Words\"))\n if params.get(\"Sentences\") is not None:\n self.Sentences = Aspect()\n self.Sentences._deserialize(params.get(\"Sentences\"))\n if params.get(\"Structure\") is not None:\n self.Structure = Aspect()\n self.Structure._deserialize(params.get(\"Structure\"))\n if params.get(\"Content\") is not None:\n self.Content = Aspect()\n self.Content._deserialize(params.get(\"Content\"))\n\n\nclass SentenceCom(AbstractModel):\n \"\"\"句子点评\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Suggestions: 点评内容\n :type Suggestions: list of SentenceSuggest\n :param Sentence: 点评的句子信息\n :type Sentence: :class:`tencentcloud.ecc.v20181213.models.SentenceItem`\n \"\"\"\n self.Suggestions = None\n self.Sentence = None\n\n\n def _deserialize(self, params):\n if params.get(\"Suggestions\") is not None:\n self.Suggestions = []\n for item in params.get(\"Suggestions\"):\n obj = SentenceSuggest()\n obj._deserialize(item)\n self.Suggestions.append(obj)\n if params.get(\"Sentence\") is not None:\n self.Sentence = SentenceItem()\n self.Sentence._deserialize(params.get(\"Sentence\"))\n\n\nclass SentenceItem(AbstractModel):\n \"\"\"句子的相关信息\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Sentence: 英语句子\n :type Sentence: str\n :param ParaID: 段落id\n :type ParaID: int\n :param SentenceID: 句子id\n :type SentenceID: int\n \"\"\"\n self.Sentence = None\n self.ParaID = None\n self.SentenceID = None\n\n\n def _deserialize(self, params):\n self.Sentence = params.get(\"Sentence\")\n self.ParaID = params.get(\"ParaID\")\n self.SentenceID = params.get(\"SentenceID\")\n\n\nclass SentenceSuggest(AbstractModel):\n \"\"\"句子批阅建议\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Type: 类型\n :type Type: str\n :param ErrorType: 错误类型\n :type ErrorType: str\n :param Origin: 原始单词\n :type Origin: str\n :param Replace: 替换成 的单词\n :type Replace: str\n :param Message: 提示信息\n :type Message: str\n \"\"\"\n self.Type = None\n self.ErrorType = None\n self.Origin = None\n self.Replace = None\n self.Message = None\n\n\n def _deserialize(self, params):\n self.Type = params.get(\"Type\")\n self.ErrorType = params.get(\"ErrorType\")\n self.Origin = params.get(\"Origin\")\n self.Replace = params.get(\"Replace\")\n self.Message = params.get(\"Message\")","sub_path":"tencentcloud/ecc/v20181213/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"272190901","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 18 10:33:28 2018\n@author: zhengji\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom .FEHelper import *\nimport time\nimport sys\n\nclass BaseFeatureBuilding(object):\n \n def __init__(self, df, ephemeride, start, end, freq):\n self.df = df\n self.ephemeride = ephemeride\n self.start = start\n self.end = end \n self.freq = freq\n \n # convert df.TIME from string to datetime\n self.df.TIME = pd.to_datetime(self.df.TIME)\n \n def __getitem__(self, key):\n return self.df[key]\n \n \n def fillna(self):\n \"\"\"Make date index continuous in self.df\"\"\"\n if (not(FEHelper.isContinuous(self.df.TIME, self.start, self.end, self.freq))):\n df_missingdate = pd.DataFrame(index=pd.date_range(start=self.start, end=self.end, freq=self.freq))\n df_missingdate = df_missingdate.reset_index()\n df_missingdate = df_missingdate.rename(columns = {\"index\":\"TIME\"})\n\n self.df = df_missingdate.merge(self.df, on='TIME', how='left')\n self.df = self.df.fillna(0)\n \n \n def merge(self):\n \"\"\"Merge self.df with self.ephemeride\"\"\"\n self.ephemeride[\"HOURL\"] = self.ephemeride[\"LEVER\"].apply(lambda x: dt.datetime.strptime(str(x),'%Hh%M').time().hour) \n self.ephemeride[\"MINL\"] = self.ephemeride[\"LEVER\"].apply(lambda x: dt.datetime.strptime(str(x),'%Hh%M').time().minute/ 60.0) \n self.ephemeride[\"HOURC\"] = self.ephemeride[\"COUCHER\"].apply(lambda x: dt.datetime.strptime(str(x),'%Hh%M').time().hour) \n self.ephemeride[\"MINC\"] = self.ephemeride[\"COUCHER\"].apply(lambda x: dt.datetime.strptime(str(x),'%Hh%M').time().minute/ 60.0) \n self.ephemeride[\"HALF_HOURL\"] = self.ephemeride[\"HOURL\"] + self.ephemeride[\"MINL\"]\n self.ephemeride[\"HALF_HOURC\"] = self.ephemeride[\"HOURC\"] + self.ephemeride[\"MINC\"]\n \n self.df[\"DATE\"] = pd.to_datetime(self.df[\"TIME\"].apply(lambda x : x.date())) \n self.df[\"MM_DD\"] = self.df[\"DATE\"].map(lambda x: str(x.month).zfill(2) + '-' + str(x.day).zfill(2)) \n self.df = self.df.merge(self.ephemeride[['HALF_HOURL','HALF_HOURC','MM_DD']], on='MM_DD', how='left').copy()\n \n def addBasicTimeFeatures(self):\n \"\"\"add basic time features\"\"\"\n self.df[\"YEAR\"] = self.df['TIME'].apply(lambda x : x.year)\n self.df[\"MONTH\"] = self.df['TIME'].apply(lambda x : x.month)\n self.df[\"DAY\"] = self.df['TIME'].apply(lambda x : x.day)\n self.df['WEEK_DAY'] = self.df['TIME'].apply(lambda x: x.weekday())\n \n def addPeriodicTimeFeatures(self):\n \"\"\"add sin/cos time features\"\"\"\n self.df['xMONTH'] = self.df['MONTH'].map(lambda x : np.sin(2*np.pi*x/12))\n self.df['yMONTH'] = self.df['MONTH'].map(lambda x : np.cos(2*np.pi*x/12))\n self.df['xDAY'] = self.df['DAY'].map(lambda x : np.sin(2*np.pi*x/31))\n self.df['yDAY'] = self.df['DAY'].map(lambda x : np.cos(2*np.pi*x/31))\n self.df['xWD'] = self.df['WEEK_DAY'].map(lambda x : np.sin(2*np.pi*x/7))\n self.df['yWD'] = self.df['WEEK_DAY'].map(lambda x : np.cos(2*np.pi*x/7))\n \n def addEphemerideFeatures(self): \n \"\"\"add ephemeride related features\"\"\"\n self.df['DAY_LENGTH'] = self.df['HALF_HOURC'] - self.df['HALF_HOURL']\n \n def addTimeGroupFeatures(self):\n \"\"\"create time groups and dummy them\"\"\"\n self.df['MONTH_GROUP'] = self.df['MONTH'].map(FEHelper.Monthgroup)\n self.df['WD_GROUP'] = self.df['WEEK_DAY'].map(FEHelper.Daygroup)\n self.df = self.df.join(pd.get_dummies(self.df['MONTH_GROUP'], prefix='MG'))\n self.df = self.df.join(pd.get_dummies(self.df['WD_GROUP'], prefix='WDG'))\n \n def addLags(self):\n \"\"\"add lags\"\"\"\n pass\n\n def addTSE(self):\n \"\"\"add Time Since Epoch features\"\"\" \n epoch = min(self.df.DATE)\n self.df['TSE'] = self.df.DATE.map(lambda x: x-epoch)\n self.df['TSE'] = self.df['TSE']/ np.timedelta64(1, 'D')\n \n \n def addHolidays(self):\n holidays = FEHelper.lholidays() \n self.df = self.df.merge(holidays, left_on='DATE', right_on='ds', how='left')\n self.df.drop(['ds'], axis=1,inplace=True)\n self.df['holiday'].fillna(value=0,inplace=True)\n\n aftholidays = FEHelper.aftholidays() \n self.df = self.df.merge(aftholidays, left_on='DATE', right_on='ds', how='left')\n self.df.drop(['ds'], axis=1, inplace=True)\n self.df['aftholiday'].fillna(value=0, inplace=True)\n \n list_public_holidays = [x.strftime('%Y-%m-%d') for x in holidays['ds']]\n self.df['DATE'] = self.df['DATE'].apply(lambda x: x.strftime('%Y-%m-%d'))\n self.df['JF'] = self.df['DATE'].apply(lambda x: int(x in list_public_holidays))\n\n\n \"\"\" 2. Add school holidays\"\"\"\n vac_toussaint = ((self.df.DATE >= '2012-10-27') & (self.df.DATE < '2012-11-08')) + \\\n ((self.df.DATE >= '2013-10-19') & (self.df.DATE < '2013-11-04')) + \\\n ((self.df.DATE >= '2014-10-18') & (self.df.DATE < '2014-11-03')) + \\\n ((self.df.DATE >= '2015-10-17') & (self.df.DATE < '2015-11-02')) + \\\n ((self.df.DATE >= '2016-10-19') & (self.df.DATE < '2016-11-03')) + \\\n ((self.df.DATE >= '2017-10-21') & (self.df.DATE < '2017-11-06')) + \\\n ((self.df.DATE >= '2018-10-20') & (self.df.DATE < '2018-11-05')) + \\\n ((self.df.DATE >= '2019-10-19') & (self.df.DATE < '2019-11-04')) + \\\n ((self.df.DATE >= '2020-10-17') & (self.df.DATE < '2020-11-02')) + \\\n ((self.df.DATE >= '2021-10-16') & (self.df.DATE < '2021-10-31')) \n vac_noel = ((self.df.DATE >= '2012-01-01') & (self.df.DATE < '2012-01-03')) + \\\n ((self.df.DATE >= '2012-12-22') & (self.df.DATE < '2013-01-07')) + \\\n ((self.df.DATE >= '2013-12-21') & (self.df.DATE < '2014-01-06')) + \\\n ((self.df.DATE >= '2014-12-20') & (self.df.DATE < '2015-01-05')) + \\\n ((self.df.DATE >= '2015-12-19') & (self.df.DATE < '2016-01-04')) + \\\n ((self.df.DATE >= '2016-12-17') & (self.df.DATE < '2017-01-03')) + \\\n ((self.df.DATE >= '2017-12-23') & (self.df.DATE < '2018-01-08')) + \\\n ((self.df.DATE >= '2018-12-22') & (self.df.DATE < '2019-01-07')) + \\\n ((self.df.DATE >= '2019-12-21') & (self.df.DATE < '2020-01-06')) + \\\n ((self.df.DATE >= '2020-12-19') & (self.df.DATE < '2021-01-04')) + \\\n ((self.df.DATE >= '2021-12-18') & (self.df.DATE < '2022-01-02')) \n vac_hiver = ((self.df.DATE >= '2012-02-11') & (self.df.DATE < '2012-03-12')) + \\\n ((self.df.DATE >= '2013-02-16') & (self.df.DATE < '2013-03-18')) + \\\n ((self.df.DATE >= '2014-02-15') & (self.df.DATE < '2014-03-17')) + \\\n ((self.df.DATE >= '2015-02-07') & (self.df.DATE < '2015-03-09')) + \\\n ((self.df.DATE >= '2016-02-07') & (self.df.DATE < '2016-03-07')) + \\\n ((self.df.DATE >= '2017-02-04') & (self.df.DATE < '2017-03-06')) + \\\n ((self.df.DATE >= '2018-02-10') & (self.df.DATE < '2018-03-12')) + \\\n ((self.df.DATE >= '2019-02-08') & (self.df.DATE < '2019-03-09')) + \\\n ((self.df.DATE >= '2020-02-08') & (self.df.DATE < '2020-03-09')) + \\\n ((self.df.DATE >= '2021-02-06') & (self.df.DATE < '2021-03-08')) + \\\n ((self.df.DATE >= '2022-02-05') & (self.df.DATE < '2022-03-06')) \n vac_printemps = ((self.df.DATE >= '2012-04-07') & (self.df.DATE < '2012-05-07')) + \\\n ((self.df.DATE >= '2013-04-13') & (self.df.DATE < '2013-05-13')) + \\\n ((self.df.DATE >= '2014-04-12') & (self.df.DATE < '2014-05-28')) + \\\n ((self.df.DATE >= '2015-04-11') & (self.df.DATE < '2015-05-11')) + \\\n ((self.df.DATE >= '2016-04-02') & (self.df.DATE < '2016-05-02')) + \\\n ((self.df.DATE >= '2017-04-01') & (self.df.DATE < '2017-05-02')) + \\\n ((self.df.DATE >= '2018-04-07') & (self.df.DATE < '2018-05-07')) + \\\n ((self.df.DATE >= '2019-04-04') & (self.df.DATE < '2019-05-04')) + \\\n ((self.df.DATE >= '2020-04-04') & (self.df.DATE < '2020-05-04')) + \\\n ((self.df.DATE >= '2021-04-10') & (self.df.DATE < '2021-05-10')) + \\\n ((self.df.DATE >= '2022-04-02') & (self.df.DATE < '2022-05-01')) \n vac_ete = ((self.df.DATE >= '2012-07-05') & (self.df.DATE < '2012-09-03')) + \\\n ((self.df.DATE >= '2013-07-06') & (self.df.DATE < '2013-09-03')) + \\\n ((self.df.DATE >= '2014-07-05') & (self.df.DATE < '2014-09-01')) + \\\n ((self.df.DATE >= '2015-07-04') & (self.df.DATE < '2015-09-01')) + \\\n ((self.df.DATE >= '2016-07-06') & (self.df.DATE < '2016-09-01')) + \\\n ((self.df.DATE >= '2017-07-08') & (self.df.DATE < '2017-09-04')) + \\\n ((self.df.DATE >= '2018-07-07') & (self.df.DATE < '2018-09-03')) + \\\n ((self.df.DATE >= '2019-07-04') & (self.df.DATE < '2019-09-02')) + \\\n ((self.df.DATE >= '2020-07-04') & (self.df.DATE < '2020-09-01')) + \\\n ((self.df.DATE >= '2021-07-06') & (self.df.DATE < '2021-09-01')) + \\\n ((self.df.DATE >= '2022-07-02') & (self.df.DATE < '2022-09-01')) \n\n self.df['HolidaysFR'] = (vac_toussaint + vac_noel + vac_hiver + vac_printemps + vac_ete).astype('int')\n\n \n def transform(self):\n self.fillna()\n self.merge()\n self.addBasicTimeFeatures()\n self.addPeriodicTimeFeatures()\n self.addTSE()\n self.addHolidays()\n self.addEphemerideFeatures() # // proper for J+7 model\n self.addTimeGroupFeatures() # // proper for J+7 / M+3 model\n self.addLags() # // proper for J+7 / M+3 model\n\n return self.df\n\n\nclass DayFeatureBuilding(BaseFeatureBuilding):\n \n def __init__(self, df, ephemeride, start, end): \n super(DayFeatureBuilding,self).__init__(df, ephemeride, start, end, 'D')\n \n def __getitem__(self, key): \n return super(DayFeatureBuilding, self).__getitem__(key) \n \n def fillna(self):\n \"\"\"Make date index continuous in self.df\"\"\"\n super(DayFeatureBuilding, self).fillna()\n \n \n def merge(self):\n \"\"\"Merge self.df with self.ephemeride\"\"\"\n super(DayFeatureBuilding, self).merge()\n \n def addBasicTimeFeatures(self):\n \"\"\"add basic time features\"\"\"\n super(DayFeatureBuilding, self).addBasicTimeFeatures()\n \n def addPeriodicTimeFeatures(self):\n \"\"\"add sin/cos time features\"\"\"\n super(DayFeatureBuilding, self).addPeriodicTimeFeatures()\n\n def addEphemerideFeatures(self): \n \"\"\"add ephemeride related features\"\"\"\n super(DayFeatureBuilding, self).addEphemerideFeatures()\n \n def addTimeGroupFeatures(self):\n \"\"\"create time groups and dummy them\"\"\"\n super(DayFeatureBuilding, self).addTimeGroupFeatures()\n \n def addLags(self):\n \"\"\"add lags\"\"\"\n def shifted_mois(x):\n if self.df['MONTH'].iloc[x] > 3:\n shifted = self.df[(self.df['MONTH'] == self.df['MONTH'].iloc[x]-3) & \\\n (self.df['YEAR'] == self.df['YEAR'].iloc[x]) & \\\n (self.df['WEEK_DAY'] == self.df['WEEK_DAY'].iloc[x]) & \\\n (self.df['JF'] == 0) & (self.df['AftJF'] == 0) ].index.values\n elif self.df['MONTH'].iloc[x] <= 3:\n shifted = self.df[(self.df['MONTH'] == (self.df['MONTH'].iloc[x]-4)%13) & \\\n (self.df['YEAR'] == self.df['YEAR'].iloc[x]-1) & \\\n (self.df['WEEK_DAY'] == self.df['WEEK_DAY'].iloc[x]) & \\\n (self.df['JF'] == 0) & (self.df['AftJF'] == 0)].index.values\n return (self.df['y'].iloc[shifted].values) \n \n self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'] = self.df.index.map(lambda x : shifted_mois(x))\n self.df['MEAN_LAGGED'] = self.df.index.map(lambda x : np.nanmean(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x] ) if (len(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x]) > 0) else (self.df['y'].iloc[x] )) \n \n self.df['LAGGED_1'] = self.df.index.map(lambda x : self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x][len(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x])-1] if len(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x]) > 0 else self.df['MEAN_LAGGED'].iloc[x])\n self.df['LAGGED_2'] = self.df.index.map(lambda x : self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x][len(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x])-2] if len(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x]) > 1 else self.df['MEAN_LAGGED'].iloc[x])\n self.df['LAGGED_3'] = self.df.index.map(lambda x : self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x][len(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x])-3] if len(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x]) > 2 else self.df['MEAN_LAGGED'].iloc[x])\n self.df['LAGGED_4'] = self.df.index.map(lambda x : self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x][len(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x])-4] if len(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x]) > 3 else self.df['MEAN_LAGGED'].iloc[x])\n self.df['STD_LAGGED'] = self.df.index.map(lambda x : np.nanstd(self.df['LAGGED_3_MONTH_VALUES_WEEKDAY'].iloc[x] )) \n self.df['LAGGED_LY_DW_avg'] = self.df.apply(lambda x: np.nanmean(self.df[(self.df['YEAR'] == x['YEAR']-1) & (self.df['MONTH'] == x['MONTH']) & (self.df['WEEK_DAY'] == x['WEEK_DAY']) & (self.df['JF'] == 0) & (self.df['AftJF'] == 0)]['y']) if (x['YEAR']>min(self.df.YEAR)) else x['y'] , axis=1)\n\n # Same holiday of last years \n holidays = FEHelper.lholidays()\n for h in set(holidays.holiday):\n self.df.loc[self.df['holiday']==h, 'LAGGED_LY_DW_avg'] = self.df.loc[self.df['holiday']==h].apply(lambda x : np.nanmean(self.df[(self.df['holiday']==h) & (self.df['YEAR'] < x['YEAR'])]['y'] if (x['YEAR']>min(self.df.YEAR)) else x['y']), axis=1)\n\n # Same after holiday of last years\n aftholidays = FEHelper.aftholidays()\n for h in set(aftholidays.aftholiday):\n self.df.loc[self.df['aftholiday']==h, 'LAGGED_LY_DW_avg'] = self.df.loc[self.df['aftholiday']==h].apply(lambda x : np.mean(self.df[(self.df['aftholiday']==h) & (self.df['YEAR'] < x['YEAR'])]['y'] if (x['YEAR']>min(self.df.YEAR)) else x['y']), axis=1)\n \n self.df = self.df.fillna(0)\n \n def addTSE(self):\n \"\"\"add Time Since Epoch features\"\"\" \n super(DayFeatureBuilding, self).addTSE()\n \n \n def addHolidays(self):\n super(DayFeatureBuilding, self).addHolidays()\n \n \"\"\" 3. Add bef/aft public holidays\"\"\"\n self.df[\"AftJF\"] = self.df[\"JF\"].shift(1)\n self.df[\"BefJF\"] = self.df[\"JF\"].shift(-1)\n self.df.ix[self.df['JF']==1,'AftJF']=0\n self.df.ix[self.df['JF']==1,'BefJF']=0\n # jour ouvre\n idx_JF_special = self.df[(self.df.JF==1) &(self.df.WEEK_DAY==4) ].index.values\n idx_max = max(self.df.index.values)\n for tmp in idx_JF_special:\n if tmp+1<=idx_max:\n self.df['AftJF'].iloc[tmp+1]=0\n if tmp+3<=idx_max:\n self.df['AftJF'].iloc[tmp+3]=1\n \n def transform(self):\n return super(DayFeatureBuilding, self).transform()\n\n\nclass HourFeatureBuilding(BaseFeatureBuilding):\n \n def __init__(self, df, ephemeride, start, end):\n super(HourFeatureBuilding,self).__init__(df, ephemeride, start, end, 'H')\n \n def __getitem__(self, key):\n return super(HourFeatureBuilding, self).__getitem__(key) \n \n def fillna(self):\n \"\"\"Make date index continuous in self.df\"\"\"\n super(HourFeatureBuilding, self).fillna()\n \n \n def merge(self):\n \"\"\"Merge self.df with self.ephemeride\"\"\"\n super(HourFeatureBuilding, self).merge()\n \n def addBasicTimeFeatures(self):\n \"\"\"add basic time features\"\"\"\n super(HourFeatureBuilding, self).addBasicTimeFeatures()\n self.df[\"HOUR\"] = self.df['TIME'].apply(lambda x : x.hour) \n \n def addPeriodicTimeFeatures(self):\n \"\"\"add sin/cos time features\"\"\"\n super(HourFeatureBuilding, self).addPeriodicTimeFeatures()\n self.df['xHOUR'] = self.df['HOUR'].map(lambda x : np.sin(2*np.pi*x/24))\n self.df['yHOUR'] = self.df['HOUR'].map(lambda x : np.cos(2*np.pi*x/24))\n\n \n def addEphemerideFeatures(self): \n \"\"\"add ephemeride related features\"\"\"\n super(HourFeatureBuilding, self).addEphemerideFeatures()\n self.df['LIGHT'] = self.df.index.map(lambda x : int(self.df['HALF_HOURL'].iloc[x]<=self.df['HOUR'].iloc[x]<=self.df['HALF_HOURC'].iloc[x]))\n \n def addTimeGroupFeatures(self):\n \"\"\"create time groups and dummy them\"\"\"\n super(HourFeatureBuilding, self).addTimeGroupFeatures()\n self.df['HOUR_GROUP'] = self.df['HOUR'].map(FEHelper.Hourgroup)\n self.df = self.df.join(pd.get_dummies(self.df['HOUR_GROUP'], prefix='HG'))\n \n def addLags(self):\n \"\"\"add lags\"\"\"\n if FEHelper.isContinuous(self.df.TIME, self.start, self.end, self.freq):\n \n self.df[\"LAGGED_1\"]=self.df[\"y\"].shift(168) #lag J-7 à la même heure\n self.df[\"LAGGED_2\"]=self.df[\"y\"].shift(336) #lag J-14 à la même heure\n self.df[\"LAGGED_3\"]=self.df[\"y\"].shift(504) #lag J-21 à la même heure\n self.df[\"LAGGED_4\"]=self.df[\"y\"].shift(672) #lag J-28 à la même heure\n \n # Question: whether to handle nan values in lags\n self.df[\"LAGGED_1\"].fillna(self.df['y'], inplace=True)\n self.df[\"LAGGED_2\"].fillna(self.df['y'], inplace=True)\n self.df[\"LAGGED_3\"].fillna(self.df['y'], inplace=True)\n self.df[\"LAGGED_4\"].fillna(self.df['y'], inplace=True)\n \n self.df[\"MEAN_LAGGED\"]=self.df.index.map(lambda x : np.mean([self.df[\"LAGGED_1\"].iloc[x],self.df[\"LAGGED_2\"].iloc[x],self.df[\"LAGGED_3\"].iloc[x],self.df[\"LAGGED_4\"].iloc[x]]))\n self.df[\"STD_LAGGED\"]=self.df.index.map(lambda x : np.std([self.df[\"LAGGED_1\"].iloc[x],self.df[\"LAGGED_2\"].iloc[x],self.df[\"LAGGED_3\"].iloc[x],self.df[\"LAGGED_4\"].iloc[x]]))\n \n # Same hour, weekday, month of last year\n self.df['LAGGED_LY_DW_avg'] = self.df.apply(lambda x: np.mean(self.df[(self.df['YEAR'] == x['YEAR']-1) & (self.df['MONTH'] == x['MONTH']) & (self.df['WEEK_DAY'] == x['WEEK_DAY']) & (self.df['HOUR'] == x['HOUR']) & (self.df['JF'] == 0) & (self.df['AftJF'] == 0)]['y']) if (x['YEAR']>min(self.df.YEAR)) else x['y'] , axis=1)\n # Same hour, holiday of last years\n holidays = FEHelper.lholidays()\n for h in set(holidays.holiday):\n self.df.loc[self.df['holiday']==h, 'LAGGED_LY_DW_avg'] = self.df.loc[self.df['holiday']==h].apply(lambda x : np.mean(self.df[(self.df['holiday']==h) & (self.df['YEAR'] < x['YEAR']) & (self.df['HOUR'] == x['HOUR'])]['y']\n if (x['YEAR']>min(self.df.YEAR)) else x['y']), axis=1)\n def addTSE(self):\n \"\"\"add Time Since Epoch features\"\"\" \n super(HourFeatureBuilding, self).addTSE()\n \n \n def addHolidays(self):\n super(HourFeatureBuilding, self).addHolidays()\n \n \"\"\" 3. Add bef/aft public holidays\"\"\"\n self.df[\"AftJF\"] = self.df[\"JF\"].shift(24)\n self.df[\"BefJF\"] = self.df[\"JF\"].shift(-24)\n self.df.ix[self.df['JF']==1,'AftJF']=0\n self.df.ix[self.df['JF']==1,'BefJF']=0\n # jour ouvre\n idx_JF_special = self.df[(self.df.JF==1) &(self.df.WEEK_DAY==4) ].index.values\n idx_max = max(self.df.index.values)\n for tmp in idx_JF_special:\n if tmp+24<=idx_max:\n self.df['AftJF'].iloc[tmp+24]=0\n if tmp+72<=idx_max:\n self.df['AftJF'].iloc[tmp+72]=1\n \n def transform(self):\n return super(HourFeatureBuilding, self).transform()","sub_path":"ccf/features/TimeFeatureBuilder.py","file_name":"TimeFeatureBuilder.py","file_ext":"py","file_size_in_byte":20243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"5710682","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\nimport os\n\nAUTHOR = 'Andrew Barbarello'\nSITENAME = \"Drew Barbarello's Blog\"\nSITEURL = ''\n\nGITHUB_USER = 'drewbarbs'\nGITHUB_SKIP_FORK = True\n\nTIMEZONE = 'America/New_York'\n\nDEFAULT_LANG = 'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\n\n# Blogroll\n# LINKS = (('Pelican', 'http://getpelican.com/'),\n# ('Python.org', 'http://python.org/'),\n# ('Jinja2', 'http://jinja.pocoo.org/'),\n# ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('facebook', 'https://www.facebook.com/andrew.barbarello'),\n ('twitter', 'https://twitter.com/_dbarbs'),)\n\nDEFAULT_PAGINATION = 5\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n\n# Formatting for dates\nDEFAULT_DATE_FORMAT = ('%a, %d %B %Y')\n\n# Formatting for urls\n\nARTICLE_URL = \"posts/{date:%Y}/{date:%m}/{slug}/\"\nARTICLE_SAVE_AS = \"posts/{date:%Y}/{date:%m}/{slug}/index.html\"\n\nCATEGORY_URL = \"category/{slug}\"\nCATEGORY_SAVE_AS = \"category/{slug}/index.html\"\n\nTAG_URL = \"tag/{slug}/\"\nTAG_SAVE_AS = \"tag/{slug}/index.html\"\n\n# Generate yearly archive\nYEAR_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/index.html'\n\nPLUGIN_PATHS = ['./pelican-plugins', './my-plugins']\n\nTHEME = \"./themes/simple\"\n\nPLUGINS = ['headextra', 'pandoc_reader']\n\nSTATIC_PATHS = ['pdf', 'img', 'favicon.gif']\n\nPANDOC_ARGS = [\n '--mathjax',\n '--bibliography', os.path.join(os.path.dirname(__file__), 'refs.bib'),\n '--filter', 'pandoc-citeproc',\n '--csl', 'acm-sig-proceedings.csl'\n ]\n\n#MENUITEMS = [('Resume', '/pdf/Barbarello_Resume.pdf')]\n\n# Sidebar configuration\n\nDISPLAY_RECENT_POSTS_ON_SIDEBAR = True\n# DISPLAY_TAGS_ON_SIDEBAR = False\n\nPANDOC_EXTENSIONS = [\n '+fenced_code_blocks',\n '+fenced_code_attributes',\n '+tex_math_single_backslash'\n ]\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"383323927","text":"import cv2\nimport glob\nimport pandas as pd\nfrom imageio import imread,imsave,imwrite\nfrom skimage.transform import resize\nfrom tqdm import tqdm\nimport dlib\n\nimg_path = glob.glob(\"image/*\")\nprint(img_path)\nimg_data = pd.DataFrame(columns=['image','label','name'])\n\nfor i,train_path in tqdm(enumerate(img_path)):\n name = train_path.split(\"\\\\\")[-1]\n images = glob.glob(train_path + \"/*\")\n for image in images:\n img_data.loc[len(img_data)]=[image,i,name]\n \nprint(img_data)\ncnn_face_detector = dlib.cnn_face_detection_model_v1('C:/Users/Admin/Desktop/mmod_human_face_detector.dat')\nfor img_path in img_data.image:\n image = imread(img_path)\n\n print(\"Processing : \" + img_path)\n\n faces_cnn = cnn_face_detector(image,1)\n faceRect = faces_cnn[0]\n \n x1 = faceRect.rect.left()\n y1 = faceRect.rect.top()\n x2 = faceRect.rect.right()\n y2 = faceRect.rect.bottom()\n\n face = image[y1:y2,x1:x2]\n imsave(img_path,face)\n print (\"Done : \" + img_path)\n ","sub_path":"face_detect_save.py","file_name":"face_detect_save.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"330772971","text":"from api.core.base import DanmakuEngine\nfrom api.core.models import Danmaku, DanmakuMetaInfo, DanmakuCollection\nfrom api.utils.logger import logger\n\n\nclass DanmukaBahamt(DanmakuEngine):\n \"\"\"这个网站是台湾的, 响应有点慢, 返回结果会转换为简体中文\"\"\"\n\n def __init__(self):\n self._host = \"https://ani.gamer.com.tw\"\n self._search_api = self._host + \"/search.php\"\n self._detail_api = self._host + \"/animeRef.php\"\n self._dm_api = self._host + \"/ajax/danmuGet.php\"\n\n def search(self, keyword: str):\n keyword = self.convert_to_tw(keyword) # 使用繁体搜索, 否则没什么结果\n logger.info(f\"Searching for danmaku: {keyword}\")\n resp = self.get(self._search_api, params={\"kw\": keyword})\n if resp.status_code != 200:\n return\n anime_list = self.xpath(resp.text, '//a[contains(@href, \"animeRef\")]')\n for anime in anime_list:\n meta = DanmakuMetaInfo()\n meta.title = self.convert_to_zh(anime.xpath('div[@class=\"theme-info-block\"]/p/text()')[0]) # 转简体\n meta.play_page_url = anime.xpath('@href')[0] # /animeRef.php?sn=111487\n num_str = anime.xpath('.//span[@class=\"theme-number\"]/text()')[0] # 第14集\n meta.num = int(num_str.strip().replace(\"第\", \"\").replace(\"集\", \"\")) # 14\n yield meta\n\n def get_detail(self, play_page_url: str):\n sn = play_page_url.split(\"=\")[-1] # 111487\n collection = DanmakuCollection()\n resp = self.get(self._detail_api, params={\"sn\": sn}, allow_redirects=True) # 这里有一次重定向\n if resp.status_code != 200:\n return collection\n\n season = self.xpath(resp.text, '//section[@class=\"season\"]//li')\n if season: # 番剧播放列表存在的话\n for ep in season:\n dmk = Danmaku()\n dmk.name = self.convert_to_zh(ep.xpath(\"./a/text()\")[0])\n sn_str = ep.xpath(\"./a/@href\")[0] # ?sn=16240\n dmk.cid = sn_str.split(\"=\")[-1]\n collection.append(dmk)\n return collection\n\n # 电影等情况, 只有1集视频\n dmk = Danmaku()\n name = self.xpath(resp.text, '//div[@class=\"anime_name\"]/h1/text()')[0]\n dmk.name = self.convert_to_zh(name)\n this_url = self.xpath(resp.text, '//meta[@property=\"og:url\"]/@content')[0]\n dmk.cid = this_url.split(\"=\")[-1]\n collection.append(dmk)\n return collection\n\n def get_danmaku(self, cid: str):\n payload = {\"sn\": cid}\n ret = []\n resp = self.post(self._dm_api, data=payload, timeout=10)\n if resp.status_code != 200:\n return ret\n data = resp.json()\n for item in data:\n ret.append([\n item[\"time\"], # 弹幕的时间\n item[\"position\"], # 弹幕位置\n int(item[\"color\"][1:], 16), # 弹幕颜色 10 进制\n \"\",\n self.convert_to_zh(item[\"text\"]), # 弹幕繁体转简体\n ])\n return ret\n","sub_path":"api/danmaku/bahamut.py","file_name":"bahamut.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"181330233","text":"# -*- coding: utf-8 -*-\n\nfrom operator import add, sub, floordiv, mul, gt, mod\n\nfrom .types import Environment, LispError, Closure, String\nfrom .ast import is_boolean, is_atom, is_symbol, is_list, is_closure, is_integer, is_string\nfrom .parser import unparse\n\n\"\"\"\nThis is the Evaluator module. The `evaluate` function below is the heart\nof your language, and the focus for most of parts 2 through 6.\n\nA score of useful functions is provided for you, as per the above imports, \nmaking your work a bit easier. (We're supposed to get through this thing \nin a day, after all.)\n\"\"\"\n\ndef evaluate_quote(ast, env):\n return ast[1]\n\ndef evaluate_atom(ast, env):\n return is_atom(evaluate(ast[1], env))\n\ndef evaluate_eq(ast, env):\n arg1 = evaluate(ast[1], env)\n arg2 = evaluate(ast[2], env)\n return is_atom(arg1) and is_atom(arg2) and arg1 == arg2\n\ndef evaluate_if(ast, env):\n condition = evaluate(ast[1], env)\n if condition:\n return evaluate(ast[2], env)\n else:\n return evaluate(ast[3], env)\n\ndef evaluate_let(ast, env):\n bindings = ast[1]\n body = ast[2]\n for symbol, exp in bindings:\n env = env.extend({symbol: evaluate(exp, env)})\n return evaluate(body, env)\n\ndef evaluate_define(ast, env):\n if len(ast) != 3:\n raise LispError('Wrong number of arguments in define')\n symbol = ast[1]\n if not is_symbol(symbol):\n raise LispError(str(symbol) + ' is a non-symbol')\n value = evaluate(ast[2], env)\n env.set(symbol, value)\n\ndef evaluate_lambda(ast, env):\n if len(ast) != 3:\n raise LispError('Wrong number of arguments in lambda')\n params = ast[1]\n if not is_list(params):\n raise LispError('Lambda params must be a list')\n body = ast[2]\n return Closure(env, params, body)\n\ndef evaluate_defn(ast, env):\n name = ast[1]\n params = ast[2]\n body = ast[3]\n env.set(name, Closure(env, params, body))\n\ndef evaluate_cons(ast, env):\n head = evaluate(ast[1], env)\n tail = evaluate(ast[2], env)\n if is_string(head) and is_string(tail):\n return String(head.val + tail.val)\n return [head] + tail\n\ndef evaluate_head(ast, env):\n list_ = evaluate(ast[1], env)\n if is_string(list_):\n return String(list_.val[0])\n if not is_list(list_):\n raise LispError('Can not call head on a non-list')\n if len(list_) == 0:\n raise LispError('Can not call head on an empty list')\n return list_[0]\n\ndef evaluate_tail(ast, env):\n list_ = evaluate(ast[1], env)\n if is_string(list_):\n return String(list_.val[1:])\n if not is_list(list_):\n raise LispError('Can not call tail on a non-list')\n if len(list_) == 0:\n raise LispError('Can not call tail on an empty list')\n return list_[1:]\n\ndef evaluate_empty(ast, env):\n list_ = evaluate(ast[1], env)\n if is_string(list_):\n return list_.val == ''\n if not is_list(list_):\n raise LispError('Can not call tail on a non-list')\n return len(list_) == 0\n\ndef evaluate_cond(ast, env):\n for cond, value in ast[1]:\n if evaluate(cond, env):\n return evaluate(value, env)\n return False\n\nSPECIAL_FORMS = {\n 'quote': evaluate_quote,\n 'atom': evaluate_atom,\n 'eq': evaluate_eq,\n 'if': evaluate_if,\n 'let': evaluate_let,\n 'define': evaluate_define,\n 'lambda': evaluate_lambda,\n 'defn': evaluate_defn,\n 'cons': evaluate_cons,\n 'head': evaluate_head,\n 'tail': evaluate_tail,\n 'empty': evaluate_empty,\n 'cond': evaluate_cond,\n}\n\ndef evaluate_special_forms(ast, env):\n return SPECIAL_FORMS[ast[0]](ast, env)\n\nMATHS_OPS = {\n '+': add,\n '-': sub,\n '/': floordiv,\n '*': mul,\n '>': gt,\n 'mod': mod\n}\n\ndef evaluate_maths(ast, env):\n op = ast[0]\n arg1 = evaluate(ast[1], env)\n arg2 = evaluate(ast[2], env)\n if not is_integer(arg1) or not is_integer(arg2):\n raise LispError('Math operands must be numeric')\n return MATHS_OPS[op](arg1, arg2)\n\ndef evaluate_closure(ast, env):\n closure = ast[0]\n args = [evaluate(arg, env) for arg in ast[1:]]\n params = closure.params\n num_args = len(args)\n num_params = len(params)\n if num_args != num_params:\n msg = 'wrong number of arguments, expected {} got {}'\n raise LispError(msg.format(num_params, num_args))\n inside_env = closure.env.extend(dict(zip(params, args)))\n return evaluate(closure.body, inside_env)\n\ndef evaluate_function_call(ast, env):\n form = ast[0]\n if is_symbol(form) or is_list(form):\n return evaluate([evaluate(form, env)] + ast[1:], env)\n else:\n raise LispError(unparse(form) + ' is not a function')\n\ndef evaluate(ast, env):\n \"\"\"Evaluate an Abstract Syntax Tree in the specified environment.\"\"\"\n if is_boolean(ast) or is_integer(ast) or is_string(ast):\n return ast\n if is_symbol(ast):\n return env.lookup(ast)\n if is_list(ast):\n if len(ast) == 0:\n raise LispError('Call to an emtpy list')\n exp = ast[0]\n if exp in list(SPECIAL_FORMS.keys()):\n return evaluate_special_forms(ast, env)\n if exp in list(MATHS_OPS.keys()):\n return evaluate_maths(ast, env)\n if is_closure(exp):\n return evaluate_closure(ast, env)\n return evaluate_function_call(ast, env)\n","sub_path":"diylisp/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"}