diff --git "a/733.jsonl" "b/733.jsonl" new file mode 100644--- /dev/null +++ "b/733.jsonl" @@ -0,0 +1,689 @@ +{"seq_id":"208012598","text":"import numpy as np\n\nwith open('input') as f:\n terrain = [list(line.strip()) for line in f]\nterrain = np.array(terrain)\nprint(terrain, terrain.shape)\n\n#terrain = np.tile(terrain, (1, int(np.ceil(terrain.shape[0] / terrain.shape[1]))))\n\n\ndef traverse(terrain, slope):\n i, j = (slope[0], slope[1])\n trees = 0\n while i < terrain.shape[0]:\n wrapped_j = j % terrain.shape[1]\n print(i, j, ' |', wrapped_j)\n if terrain[i, wrapped_j] == '#':\n trees += 1\n i += slope[0]\n j += slope[1]\n\n return trees\n\nslopes = [\n (1, 1),\n (1, 3),\n (1, 5),\n (1, 7),\n (2, 1)\n]\nanswer = 1\nfor slope in slopes:\n answer *= traverse(terrain, slope)\nprint(answer)\n","sub_path":"2020/3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"281802108","text":"from matplotlib import pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\nfrom collections import OrderedDict\n\nclass KRbCustomColors:\n\tdef __init__(self):\n\t\tself.whitePlasma = self.makeWhitePlasma()\n\t\tself.whiteJet = self.makeWhiteJet()\n\t\tself.whiteMagma = self.makeWhiteMagma()\n\n\tdef makeWhitePlasma(self):\n\t\t# Default colorbar\n\t\tplasma = cm.get_cmap('plasma', 256)\n\n\t\tN = 256\n\t\tvals = np.ones((N, 4))\n\t\tvals[:,0] *= plasma.colors[-1][0]\n\t\tvals[:,1] *= plasma.colors[-1][1]\n\t\tvals[:,2] *= plasma.colors[-1][2]\n\t\ta = np.linspace(0, 1, N)\n\t\tvals[:, 3] = a**2\n\t\twhitePlasmaStart = ListedColormap(vals)\n\n\t\twhitePlasmaColors = np.vstack((whitePlasmaStart(np.linspace(0, 1, 128)),\n\t\t plasma(np.linspace(1, 0, 384))))\n\t\treturn ListedColormap(whitePlasmaColors, name='WhitePlasma')\n\n\tdef makeWhiteMagma(self):\n\t\t# Default colorbar\n\t\tmagma = cm.get_cmap('magma', 256)\n\n\t\tN = 256\n\t\tvals = np.ones((N, 4))\n\t\tvals[:,0] *= magma.colors[-1][0]\n\t\tvals[:,1] *= magma.colors[-1][1]\n\t\tvals[:,2] *= magma.colors[-1][2]\n\t\ta = np.linspace(0, 1, N)\n\t\tvals[:, 3] = a**2\n\t\twhiteMagmaStart = ListedColormap(vals)\n\n\t\twhiteMagmaColors = np.vstack((whiteMagmaStart(np.linspace(0, 1, 128)),\n\t\t magma(np.linspace(1, 0, 384))))\n\t\treturn ListedColormap(whiteMagmaColors, name='WhiteMagma')\n\n\tdef makeWhiteJet(self):\n\t\tcolors = [\n\t\t\t(1,1,1), # White\n\t\t\t(51.0/256, 11.0/256, 130.0/256), # Purple\n\t\t\t(39.0/256, 205.0/256, 247.0/256), # Cyan\n\t\t\t(39.0/256, 247.0/256, 122.0/256), # Green\n\t\t\t(247.0/256, 240.0/256, 39.0/256), # Yellow\n\t\t\t(247.0/256, 174.0/256, 39.0/256), # Orange\n\t\t\t(255.0/256, 10.0/256, 10/256), # Red\n\t\t] # R -> G -> B\n\t\tinterp = 100\n\n\t\t# Create the colormap\n\t\treturn LinearSegmentedColormap.from_list('WhiteJet', colors, N=interp)\n\n\ndef plot_examples(cms):\n \"\"\"\n helper function to plot two colormaps\n \"\"\"\n np.random.seed(19680801)\n poo = np.random.randn(30, 30)\n\n data = np.ones((30, 30)).astype(float)\n for i in range(30):\n \tfor j in range(30):\n \t\tdata[i,j] = np.exp(-((i-15)**2 + (j-15)**2) / 5.0**2)\n\n data = 15*data + poo\n\n fig, axs = plt.subplots(1, 2, figsize=(6, 3), constrained_layout=True)\n for [ax, cmap] in zip(axs, cms):\n psm = ax.pcolormesh(data, cmap=cmap, vmin=0, vmax=15)\n fig.colorbar(psm, ax=ax)\n plt.show()\n\nif __name__ == \"__main__\":\n\tcm = KRbCustomColors()\n\tplot_examples([cm.whiteJet, cm.whitePlasma])","sub_path":"lib/krb_custom_colors.py","file_name":"krb_custom_colors.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"104147995","text":"from Street import Street\r\nfrom City import City\r\nfrom Building import Building\r\nfrom Apartment import Apartment\r\n\r\ndef search(list, x, what):\r\n res = []\r\n if int(x) == 1:\r\n for m in list:\r\n print(m.__class__.__subclasses__())\r\n if m.city_name == what:\r\n res.append(m)\r\n elif int(x) == 2:\r\n for m in list:\r\n if m.population == what:\r\n res.append(m)\r\n elif int(x) == 3:\r\n for m in list:\r\n if m.street == what:\r\n res.append(m)\r\n elif int(x) == 4:\r\n for m in list:\r\n if m.building_number == what:\r\n res.append(m)\r\n else:\r\n for m in list:\r\n if m.apartment_number == what:\r\n res.append(m)\r\n return res\r\n\r\ndef delete(list, x,what):\r\n res = search(list,x,what)\r\n for m in res:\r\n list.remove(m)\r\n return list\r\ndef main():\r\n list = []\r\n while(True):\r\n print(\"1 - add new\")\r\n print(\"2 - delete\")\r\n print(\"3 - search\")\r\n print(\"4 - show all\")\r\n x = input()\r\n if int(x)==1 :\r\n print(\"City name:\")\r\n city_name = input()\r\n print(\"population:\")\r\n population = input()\r\n print(\"Street name\")\r\n street_name = input()\r\n print(\"Building number:\")\r\n building_number = input()\r\n print(\"Apartment number:\")\r\n apartment_number = input()\r\n if street_name==\"\" and building_number==\"\" and apartment_number==\"\":\r\n item = City(city_name,population)\r\n elif building_number==\"\" and apartment_number==\"\":\r\n item = Street(city_name, population,street_name)\r\n elif apartment_number==\"\":\r\n item = Building(city_name,population,street_name,building_number)\r\n else:\r\n item = Apartment(city_name,population,street_name,building_number,apartment_number)\r\n\r\n list.append(item)\r\n elif int(x)==2:\r\n print(\"1 - City name\")\r\n print(\"2 - Population\")\r\n print(\"3 - Street name\")\r\n print(\"4 - Building number\")\r\n print(\"5 - Apartment\")\r\n field = input()\r\n print(\"What?\")\r\n what = input()\r\n print(\"DELETED\")\r\n for m in delete(list, field, what):\r\n print(m)\r\n elif int(x)==3:\r\n print(\"1 - City name\")\r\n print(\"2 - Population\")\r\n print(\"3 - Street name\")\r\n print(\"4 - Building number\")\r\n print(\"5 - Apartment\")\r\n field = input()\r\n print(\"What?\")\r\n what = input()\r\n\r\n for m in search(list, field, what):\r\n print(m)\r\n else :\r\n for m in list:\r\n print(m)\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"265580696","text":"# Write a function that takes a string of braces, and determines if the order of the braces is valid. It should return true if the string is valid, and false if it's invalid.\r\n\r\n# This Kata is similar to the Valid Parentheses Kata, but introduces new characters: brackets [], and curly braces {}. Thanks to @arnedag for the idea!\r\n\r\n# All input strings will be nonempty, and will only consist of parentheses, brackets and curly braces: ()[]{}.\r\n# What is considered Valid?\r\n\r\n# A string of braces is considered valid if all braces are matched with the correct brace.\r\n# Examples\r\n\r\n# \"(){}[]\" => True\r\n# \"([{}])\" => True\r\n# \"(}\" => False\r\n# \"[(])\" => False\r\n# \"[({})](]\" => False\r\n\r\n\r\ndef validBraces(string):\r\n pairs = {'(' : ')', '[' : ']', '{' : '}'}\r\n brace_left = pairs.keys()\r\n brace_right = pairs.values()\r\n valid_pairs = []\r\n for n in string:\r\n if n in brace_left:\r\n valid_pairs.append(n)\r\n elif n in brace_right and valid_pairs and pairs[valid_pairs[-1]] == n:\r\n valid_pairs.pop()\r\n if valid_pairs:\r\n return False\r\n else:\r\n return True\r\n","sub_path":"Valid Braces.py","file_name":"Valid Braces.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"381944177","text":"## -*- coding: utf-8 -*-\nimport telebot\nimport sys\nimport pickle\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass user():\n def __init__(self, id, money):\n self.id = id\n self.money = money \n\ndef norm(text):\n textnorm = '' \n for i in text.lower():\n if i == ' ' or i == '\\n':\n continue \n textnorm += i\n return textnorm \n\ndef biggest(list):\n global userlist \n values = []\n owners = list \n goodowners = []\n\n for i in list: \n values.append(i.money)\n\n while len(owners) > 0:\n value_index = values.index( max(values) )\n goodowners.append(owners[value_index])\n values.pop(value_index)\n owners.pop(value_index)\n \n userlist = goodowners\n pickle.dump(userlist, open('database.pickle', 'wb'))\n\nif int(input('Create user database?')) == 1:\n userlist = []\n pickle.dump(userlist, open('database.pickle', 'wb')) \n\ntry:\n userlist = pickle.load(open('database.pickle', 'rb'))\n file = open('database.txt', 'r',encoding=\"utf-8\").read()\n token = file[ file.index('[token]:[') + 9: file.index(']1') ]\n group_id = file[ file.index('[groupid]:[') + 11: file.index(']2')]\n text_welcome = file[ file.index('[text_welcome]:[') + 16: file.index(']3')]\n text_goodbye = file[ file.index('[text_goodbye]:[') + 16: file.index(']4')]\n channel_id = file[ file.index('[chanid]:[') + 10: file.index(']5')] \n if int(input('Test')) == 1:\n print(token)\n print(group_id)\n print(text_welcome)\n print(text_goodbye)\n print(channel_id)\nexcept Exception as e:\n print(e)\n file = open('database.txt', 'w')\n print('Пожалуйста, заполните \\'базу данных\\'')\n a = input()\n sys.exit()\n\n\nbot = telebot.TeleBot(token)\nprint(u'Работа Начата')\n@bot.message_handler(content_types = ['new_chat_members'])\ndef hello(message):\n try:\n if str(message.chat.id) == group_id:\n bot.send_message(group_id, text_welcome)\n except Exception as e:\n print(f\"Произошла серьезная ошибка - {e}\")\n\n@bot.message_handler(content_types = ['left_chat_member'])\ndef bye(message):\n try:\n if str(message.chat.id) == group_id:\n bot.send_message(group_id, text_goodbye)\n except Exception as e:\n print(f\"Произошла серьезная ошибка - {e}\")\n\n@bot.channel_post_handler()\ndef update(message):\n try:\n if str(message.chat.id) == channel_id:\n bot.forward_message(chat_id= group_id, from_chat_id = message.chat.id, message_id = message.message_id)\n #работа с аккаунтами и их заработком \n if message.entities == None:\n return\n if message.entities[0].type != 'text_mention':\n return\n text = norm(message.text)\n if '💰суммапополнения:' not in text:\n return \n\n money = int(text[text.index('💰суммапополнения:') + 17: text.index('rub💵')])\n\n o = True\n for i in userlist:\n if i.id == message.entities[0].user.id: \n i.money += money \n o = False\n print(i.id)\n break\n if o == True:\n userlist.append(user(message.entities[0].user.id, money)) \n\n biggest(userlist)\n return\n except Exception as e:\n print(f\"Произошла серьезная ошибка - {e}\")\n \n@bot.message_handler( commands = ['top'])\ndef top(message):\n try:\n if str(message.chat.id) != group_id:\n return\n text1 = ''\n lenage = 10\n if len(userlist) < 10:\n lenage = len(userlist)\n for i in range(lenage):\n text1 += f\"{i + 1}\\.[ { bot.get_chat_member(user_id = userlist[i].id, chat_id = group_id).user.first_name } ](tg://user?id={userlist[i].id}) Заработал {userlist[i].money} RUB \\n\"\n if text1 != '':\n bot.send_message(group_id, text1, parse_mode='MarkdownV2')\n else:\n bot.send_message(group_id, \"К сожалению, список пуст\")\n except Exception as e:\n print(f\"Произошла серьезная ошибка - {e}\")\n\n@bot.message_handler( commands = ['btc'])\ndef btc(message):\n try:\n if str(message.chat.id) != group_id:\n return \n soup = BeautifulSoup(requests.get('https://www.rbc.ru/crypto/currency/btcusd').text, 'lxml')\n need = soup.find_all('div', {'class' : \"chart__subtitle js-chart-value\"})\n dell = norm(need[0].find_all('span')[0].text)\n texxt = norm(need[0].text)\n texxt = texxt.replace(dell, '') \n bot.send_message(group_id, f\"Актуальная цена биткоина ${texxt}\")\n except Exception as e:\n print(f\"Произошла серьезная ошибка - {e}\")\n\nbot.polling(none_stop=True, interval=0)\n\n\n","sub_path":"on.py","file_name":"on.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"587931151","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\nimport math\n\nfrom knack.log import get_logger\n\nfrom azure.cli.core.profiles import ResourceType\n\nlogger = get_logger(__name__)\n\n\ndef list_shares(client, prefix=None, marker=None, num_results=None,\n include_metadata=False, timeout=None, include_snapshots=False, **kwargs):\n from ..track2_util import list_generator\n generator = client.list_shares(name_starts_with=prefix, include_metadata=include_metadata, timeout=timeout,\n include_snapshots=include_snapshots, results_per_page=num_results, **kwargs)\n\n pages = generator.by_page(continuation_token=marker) # SharePropertiesPaged\n result = list_generator(pages=pages, num_results=num_results)\n\n if pages.continuation_token:\n next_marker = {\"nextMarker\": pages.continuation_token}\n result.append(next_marker)\n\n return result\n\n\ndef create_share(cmd, client, metadata=None, quota=None, fail_on_exist=False, timeout=None, **kwargs):\n from azure.core.exceptions import HttpResponseError\n try:\n client.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs)\n return True\n except HttpResponseError as ex:\n from azure.cli.command_modules.storage.track2_util import _dont_fail_on_exist\n StorageErrorCode = cmd.get_models(\"_shared.models#StorageErrorCode\",\n resource_type=ResourceType.DATA_STORAGE_FILESHARE)\n if not fail_on_exist:\n return _dont_fail_on_exist(ex, StorageErrorCode.share_already_exists)\n raise ex\n\n\ndef share_exists(cmd, client, **kwargs):\n from azure.core.exceptions import HttpResponseError\n try:\n client.get_share_properties(**kwargs)\n return True\n except HttpResponseError as ex:\n from azure.cli.command_modules.storage.track2_util import _dont_fail_on_exist\n StorageErrorCode = cmd.get_models(\"_shared.models#StorageErrorCode\",\n resource_type=ResourceType.DATA_STORAGE_FILESHARE)\n return _dont_fail_on_exist(ex, StorageErrorCode.share_not_found)\n\n\ndef generate_share_sas(cmd, client, permission=None, expiry=None, start=None, policy_id=None, ip=None, protocol=None,\n cache_control=None, content_disposition=None, content_encoding=None,\n content_language=None, content_type=None):\n generate_share_sas_fn = cmd.get_models('_shared_access_signature#generate_share_sas')\n\n sas_kwargs = {'protocol': protocol}\n sas_token = generate_share_sas_fn(account_name=client.account_name, share_name=client.share_name,\n account_key=client.credential.account_key, permission=permission,\n expiry=expiry, start=start, ip=ip, cache_control=cache_control,\n policy_id=policy_id, content_disposition=content_disposition,\n content_type=content_type, content_encoding=content_encoding,\n content_language=content_language, **sas_kwargs)\n return sas_token\n\n\ndef delete_share(cmd, client, fail_not_exist=False, timeout=None, delete_snapshots=None, **kwargs):\n from azure.core.exceptions import HttpResponseError\n try:\n client.delete_share(timeout=timeout, delete_snapshots=delete_snapshots, **kwargs)\n return True\n except HttpResponseError as ex:\n from azure.cli.command_modules.storage.track2_util import _dont_fail_on_exist\n StorageErrorCode = cmd.get_models(\"_shared.models#StorageErrorCode\",\n resource_type=ResourceType.DATA_STORAGE_FILESHARE)\n if not fail_not_exist:\n return _dont_fail_on_exist(ex, StorageErrorCode.share_not_found)\n raise ex\n\n\ndef get_share_stats(client, timeout=None, **kwargs):\n result = client.get_share_stats(timeout=timeout, **kwargs)\n datasize = round(int(result) / math.pow(1024, 3))\n if datasize == 0:\n return str(datasize + 1)\n return str(datasize)\n\n\ndef set_share_metadata(client, metadata=None, timeout=None, **kwargs):\n client.set_share_metadata(metadata=metadata, timeout=timeout, **kwargs)\n return True\n","sub_path":"src/azure-cli/azure/cli/command_modules/storage/operations/fileshare.py","file_name":"fileshare.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"389514519","text":"# this file is used to evaluate all the thigh marker position and find the best location\nimport multiprocessing\n\nfrom SubThread import *\n\nif __name__ == '__main__':\n start_time = datetime.now()\n output_names = [\n 'FP1.ForX',\n 'FP2.ForX',\n 'FP1.ForY',\n 'FP2.ForY',\n 'FP1.ForZ',\n 'FP2.ForZ',\n # 'FP1.CopX', 'FP1.CopY',\n # 'FP2.CopX', 'FP2.CopY'\n ]\n\n R2_column = [output + '_R2' for output in output_names]\n RMSE_column = [output + '_RMSE' for output in output_names]\n NRMSE_column = [output + '_NRMSE' for output in output_names]\n result_column = R2_column + RMSE_column + NRMSE_column\n\n input_names = [\n 'trunk_acc_x', 'trunk_acc_y', 'trunk_acc_z',\n 'pelvis_acc_x', 'pelvis_acc_y', 'pelvis_acc_z',\n 'l_thigh_acc_x', 'l_thigh_acc_y', 'l_thigh_acc_z',\n 'r_thigh_acc_x', 'r_thigh_acc_y', 'r_thigh_acc_z',\n 'l_shank_acc_x', 'l_shank_acc_y', 'l_shank_acc_z',\n 'r_shank_acc_x', 'r_shank_acc_y', 'r_shank_acc_z',\n 'l_foot_acc_x', 'l_foot_acc_y', 'l_foot_acc_z',\n 'r_foot_acc_x', 'r_foot_acc_y', 'r_foot_acc_z',\n 'trunk_gyr_x', 'trunk_gyr_y', 'trunk_gyr_z',\n 'pelvis_gyr_x', 'pelvis_gyr_y', 'pelvis_gyr_z',\n 'l_thigh_gyr_x', 'l_thigh_gyr_y', 'l_thigh_gyr_z',\n 'r_thigh_gyr_x', 'r_thigh_gyr_y', 'r_thigh_gyr_z',\n 'l_shank_gyr_x', 'l_shank_gyr_y', 'l_shank_gyr_z',\n 'r_shank_gyr_x', 'r_shank_gyr_y', 'r_shank_gyr_z',\n 'l_foot_gyr_x', 'l_foot_gyr_y', 'l_foot_gyr_z',\n 'r_foot_gyr_x', 'r_foot_gyr_y', 'r_foot_gyr_z',\n ]\n\n thread_number = multiprocessing.cpu_count() - 2 # allowed thread number\n pool = multiprocessing.Pool(processes=thread_number)\n\n sub_df_list = []\n for i_sub in range(SUB_NUM):\n pool.apply_async(get_segment_translation_result,\n args=(input_names, output_names, result_column, i_sub, 'GradientBoostingRegressor'),\n callback=sub_df_list.append)\n pool.close()\n pool.join()\n total_result_df = pd.DataFrame()\n for sub_df in sub_df_list:\n total_result_df = pd.concat([total_result_df, sub_df], axis=0)\n\n Evaluation.save_result(total_result_df, 'result_segment_translation', 'GradientBoostingRegressor')\n end_time = datetime.now()\n print('Duration: ' + str(end_time - start_time))\n","sub_path":"2.1_simulate_IMU_segment_translation/simulate_segment_translation.py","file_name":"simulate_segment_translation.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"423233302","text":"\nimport time\nimport os\nimport sys\nimport asyncio\nfrom six.moves import input\nimport datetime\nimport json\nimport logging\nfrom abc import ABC, abstractmethod\nfrom library.plc_modbus import PlcModbus\nfrom library.measurement import Measurement, ModbusMeasurement\nimport library.utils as utils\nfrom influxdb_module_class import InfluxDBModuleClass\nfrom multiprocessing import Process\n\n\nclass PlcModuleClass():\n\n MACHINE_TYPE = \"INGRID\"\n async_cmd_list=[]\n\n def __init__(self, logger, plc_config_dict):\n self.ip = utils.get(plc_config_dict,'MODBUS_IP')\n self.port = utils.get(plc_config_dict,'MODBUS_PORT')\n self.r_sampling_time = utils.get(plc_config_dict, 'R_SAMPLING_TIME_MS')/1000\n self.w_sampling_time = utils.get(plc_config_dict, 'W_SAMPLING_TIME_MS')/1000\n self.max_attempts=utils.get(plc_config_dict,'MAX_ATTEMPTS')\n self.measurement_list_dict = utils.get(plc_config_dict,'MODBUS_MEASUREMENTS')\n self.inputs_list_dict = utils.get(plc_config_dict, 'MODBUS_INPUTS')\n self.logger = logger\n self.device_instance = PlcModbus(self.logger, variables_dict=plc_config_dict, ip=self.ip, port = self.port)\n\n\n def get_meas_info_from_name(self, meas_name)->dict:\n for m in self.measurement_list_dict:\n if list(m.keys())[0] == meas_name:\n return list(m.values())[0]\n return None\n\n#############################################################################################\n### INIZIALIZZAZIONE e Shut down\n\n def connect_device(self):\n return self.device_instance.connect() \n\n def disconnect_device(self):\n return self.device_instance.disconnect()\n\n#############################################################################################\n### LETTURA variabili\n\n async def read_var_async(self, meas_dict):\n result = None\n try:\n key=list(meas_dict.keys())[0]\n vals=list(meas_dict.values())[0]\n register_number=utils.get(vals, 'REGISTER_NUMBER')\n value_type=utils.get(vals,'VALUE_TYPE')\n uom=utils.get(vals,'UOM')\n \n self.logger.debug(f'reading::{key}::{register_number}::value type::{value_type}::uom::{uom}')\n result = await self.device_instance.read_value(register_number, value_type, register_type=None, count=None, array_count=1)\n #aggiunta del valore\n vals['VALUE']=result\n except Exception as e:\n self.logger.critical(f'error::{e}')\n return result\n\n\n#############################################################################################\n### SCRITTURA variabili\n\n def start_manual_ctrl(self):\n data = None\n while data!= -1:\n data, val = self.ask_user()\n if data is not None and isinstance(data, dict):\n register_num=utils.get(data,'REGISTER_NUMBER')\n value_type=utils.get(data,'VALUE_TYPE')\n #chiamta in asincrono\n asyncio.run(self.device_instance.write_value(register_num, value_type, val))\n\n\n def ask_user(self):\n data=None\n val=None\n print(\"Write value or check modbus parameters list\")\n print(\" - to set a value just write '=\")\n print(\" - to check modbus parameters list just type '--show-list'\")\n print(\" - to exit just type '--exit'\")\n res=input('write here:')\n res=res.strip()\n if(res.lower()=='--show-list'):\n print(json.dumps(self.measurement_list_dict, indent=4, sort_keys=True))\n elif res.lower()=='--exit':\n print('ciao.')\n data=-1\n elif('=' in res):\n input_splitted=res.split('=')\n if(len(input_splitted)>1):\n key=input_splitted[0].strip()\n val=input_splitted[1].strip()\n for m in self.measurement_list_dict:\n self.get_meas_info_from_name(key)\n else:\n print('error, retry')\n else:\n print('nada, retry')\n\n return data,val\n\n async def sample_inputs(self, data_dict):\n '''\n Inserimento dati multipli in modalità asincrona.\n '''\n if(len(data_dict)>0):\n for key, val in data_dict:\n #chiamta in sincrono\n vals = self.get_meas_info_from_name(key)\n if(vals is not None):\n register_num=utils.get(vals,'REGISTER_NUMBER')\n value_type=utils.get(vals,'VALUE_TYPE')\n await self.device_instance.write_value(register_num, value_type, val)\n\n async def set_periodic_inputs(self, data_dict):\n '''\n dict defined as:\n REGISTER_NUMBER\n VALUE_TYPE\n SAMPLING_TIME [ms]\n '''\n while(len(data_dict)>0):\n for key, val in data_dict:\n #chiamta in sincrono\n vals = self.get_meas_info_from_name(key)\n if(vals is not None):\n register_num=utils.get(vals,'REGISTER_NUMBER')\n value_type=utils.get(vals,'VALUE_TYPE')\n sampling_time=utils.get(vals,'SAMPLING_TIME')/1000\n asyncio.run(self.write_imput_with_delay(register_num, value_type, sampling_time, val))\n\n\n async def write_imput_with_delay(self, register_num, value_type, delay, val):\n await asyncio.sleep(delay)\n return await self.device_instance.write_value(register_num, value_type, val)\n\n\n","sub_path":"Modbus2DBs/PlcSimulationEnv_DB/plc_writer/plc_module_class.py","file_name":"plc_module_class.py","file_ext":"py","file_size_in_byte":5556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"267289262","text":"\"\"\"\r\nGAME OF LIFE\r\n\r\nSyntax inspired and started from \r\nhttp://programarcadegames.com/index.php?chapter=array_backed_grids\r\n\r\n\r\n\"\"\"\r\nimport pygame\r\n \r\n# Define Dead and Alive cells\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255,0,0)\r\n \r\n# This sets the WIDTH, HEIGHT and MARGIN (thickness of border) of each grid\r\nWIDTH = 10\r\nHEIGHT = 10\r\nMARGIN = 1\r\n\r\n# How many ROWs and COLumns and in the world\r\nROW = 100\r\nCOL = 100\r\n\r\n\r\n\r\n \r\n# Create a 2 dimensional array. A two dimensional\r\n# array is a list of lists.\r\ndef generateEmptyGrid(ROW, COL):\r\n \"\"\"\r\n Create a 2 dimensional array. A two dimensional\r\n array is a list of lists.\r\n Input:\r\n - ROWs\r\n - COLumns\r\n Returns:\r\n - Empty grid (only dead cells) of size ROW x COL\r\n \"\"\"\r\n grid = []\r\n for row in range(ROW):\r\n # Add an empty array that will hold each cell\r\n # in this row\r\n grid.append([])\r\n for column in range(COL):\r\n grid[row].append(0) # Append a cell\r\n return grid\r\n \r\n# Generate Starting grid\r\ngrid = generateEmptyGrid(ROW,COL)\r\n \r\n# Insert some patterns\r\n\r\n# Glider\r\ngrid[54][60] = 1\r\ngrid[55][61] = 1\r\ngrid[56][61] = 1\r\ngrid[56][60] = 1\r\ngrid[56][59] = 1\r\n\r\n# Tub\r\ngrid[9][4] = 1\r\ngrid[10][5] = 1\r\ngrid[10][3] = 1\r\ngrid[11][4] = 1\r\n\r\n# Blinker\r\ngrid[24][25] = 1\r\ngrid[25][25] = 1\r\ngrid[26][25] = 1\r\n\r\ndef torusConversion(i,j,ROW,COL):\r\n if i < 1 and j < 1:\r\n return ROW, COL\r\n elif i >= 1 and j < 1:\r\n return i, COL\r\n elif i < 1 and j >= 1:\r\n return ROW, j\r\n elif i > ROW - 1 and j > COL - 1:\r\n return 0, 0\r\n elif i > ROW - 1 and j <= COL - 1:\r\n return 0, j\r\n elif i <= ROW - 1 and j > COL - 1:\r\n return i, 0\r\n else:\r\n return i, j\r\n\r\n\r\ndef gridUpdate(grid):\r\n \"\"\"\r\n Updates grid, by iterating over all cells in world.\r\n Uses Conway's rules from the Game Of Life.\r\n Input:\r\n - Old Grid\r\n Returns:\r\n - Updated Grid\r\n \"\"\"\r\n nROW = len(grid)\r\n nCOL = len(grid[0])\r\n nextGrid = generateEmptyGrid(nROW,nCOL) \r\n for i in range(1,nROW-1):\r\n for j in range(1,nCOL-1):\r\n N = grid[i-1][j]\r\n NE = grid[i-1][j+1]\r\n E = grid[i][j+1]\r\n SE = grid[i+1][j+1]\r\n S = grid[i+1][j]\r\n SW = grid[i+1][j-1]\r\n W = grid[i][j-1]\r\n NW = grid[i-1][j-1]\r\n # Sums the number of neighbours\r\n neighbours = N+NE+E+SE+S+SW+W+NW\r\n if grid[i][j] == 1 and neighbours < 2:\r\n nextGrid[i][j] = 0\r\n elif grid[i][j] == 1 and (neighbours == 2 or neighbours == 3):\r\n nextGrid[i][j] = 1\r\n elif grid[i][j] == 1 and neighbours > 3:\r\n nextGrid[i][j] = 0\r\n elif grid[i][j] == 0 and neighbours == 3:\r\n nextGrid[i][j] = 1\r\n return nextGrid\r\n\r\n \r\n# Initialize pygame\r\npygame.init()\r\n \r\n# Set the HEIGHT and WIDTH of the screen\r\nWINDOW_SIZE = [ROW*(WIDTH + MARGIN) + MARGIN, COL*(HEIGHT + MARGIN) + MARGIN]\r\nscreen = pygame.display.set_mode(WINDOW_SIZE)\r\n \r\n# Set title of screen\r\npygame.display.set_caption(\"Conway's GAME OF LIFE\")\r\n \r\n# Loop until the user clicks the close button.\r\ndone = False\r\n \r\n# Used to manage how fast the screen updates\r\nclock = pygame.time.Clock()\r\n \r\n# -------- Main Program Loop -----------\r\nwhile not done:\r\n grid = gridUpdate(grid)\r\n for event in pygame.event.get(): # User Interaction\r\n if event.type == pygame.QUIT: # User termination of program\r\n done = True # Exit loop\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n # User clicks the mouse. Get the position\r\n pos = pygame.mouse.get_pos()\r\n # Change the x/y screen coordinates to grid coordinates\r\n column = pos[0] // (WIDTH + MARGIN)\r\n row = pos[1] // (HEIGHT + MARGIN)\r\n # Toggle location to one or zero depending on previous status\r\n if grid[row][column] == 0:\r\n grid[row][column] = 1\r\n elif grid[row][column] == 1:\r\n grid[row][column] = 0\r\n print(\"Click \", pos, \"Grid coordinates: \", row, column)\r\n\r\n # Set the screen background\r\n screen.fill(BLACK)\r\n \r\n # Draw the grid\r\n for row in range(ROW):\r\n for column in range(COL):\r\n color = WHITE\r\n if grid[row][column] == 1:\r\n color = BLACK\r\n pygame.draw.rect(screen,\r\n color,\r\n [(MARGIN + WIDTH) * column + MARGIN,\r\n (MARGIN + HEIGHT) * row + MARGIN,\r\n WIDTH,\r\n HEIGHT])\r\n \r\n # Limit to 60 frames per second\r\n clock.tick(30)\r\n \r\n # Update screen\r\n pygame.display.flip()\r\n \r\n\t\t\r\n \r\n \r\n# Quit program\r\npygame.quit()","sub_path":"game_of_life.py","file_name":"game_of_life.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"479893914","text":"from meta import MetaGoblin\n\n# NOTE: works for la models too\n\nclass NewYorkModelsGoblin(MetaGoblin):\n '''accepts:\n - image\n - webpage\n '''\n\n NAME = 'new york models goblin'\n ID = 'newyorkmodels'\n IMAGE_URL_BASE = 'https://s3.amazonaws.com/media-ima002.globaltalentsystems.com/{}/1200'\n VIDEO_URL_BASE = 'https://s3.amazonaws.com/media-vid000.globaltalentsystems.com/{}'\n API_URL = 'http://www.newyorkmodels.com/control/portfolio_get.php?initi=1&model_id=&the_type=&port_id=&ref={}'\n\n def __init__(self, args):\n super().__init__(args)\n\n def main(self):\n self.logger.log(1, self.NAME, 'collecting urls')\n urls = []\n\n for target in self.args['targets'][self.ID]:\n if 'globaltalentsystems' in target:\n urls.append(self.parser.regex_sub(r'\\d+(?=/\\d+_)', '1200', target))\n else:\n self.logger.log(2, self.NAME, 'looting', target)\n self.logger.spin()\n\n init_response = self.get(target).content\n portfolio_id = self.parser.regex_search(r\"(?<=var\\sref\\s=\\s')[^']+\", init_response)\n\n response = self.get(self.API_URL.format(portfolio_id))\n\n model_id = self.parser.regex_search(r'(?<=|)\\d+(?=|)', response.content)\n relatives = self.parser.extract_by_regex(response.content, r'[\\w\\.\\-]+\\.\\w+(?=
)')\n video_relatives = self.parser.extract_by_regex(response.content, r'\\d+(?=\\.flv)')\n\n for rel in relatives:\n urls.append(f'{self.IMAGE_URL_BASE.format(model_id)}/{rel}')\n\n if video_relatives:\n for rel in video_relatives:\n urls.append(f'{self.VIDEO_URL_BASE.format(model_id)}/{rel}.mp4')\n\n\n self.delay()\n\n for url in urls:\n self.collect(url)\n\n self.loot()\n","sub_path":"image_goblin/goblins/new_york_models.py","file_name":"new_york_models.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"618663308","text":"import numpy as np\n\n\ndef clean_labels(x):\n try:\n return x.lower()\\\n .strip()\\\n .strip('()-_')\\\n .replace('/', ' or ')\\\n .replace(' ', '_')\\\n .replace('.', '_')\\\n .replace('__', '_')\n except AttributeError:\n return x\n\n\ndef _fix_nk_cells(df):\n \"\"\"Annotate natural killer (NK) cells as a subtype of T cells\"\"\"\n rows = df['annotation'] == 'natural_killer_cells'\n df.loc[rows, 'annotation'] = 't_cells'\n df.loc[rows, 'subannotation'] = 'natural_killer_cells'\n return df\n\n\ndef clean_annotation(df, tissue):\n # --- Bladder ---\n if tissue == 'Bladder':\n df['subannotation'] = df.annotation.str.extract(\n '(?P[AB]\\d?$)')\n # print(df.query('annotation == \"Basal\"').head())\n df['annotation'] = df.annotation.str.rstrip('AB12')\n # print(df.query('annotation == \"Basal\"').head())\n df['annotation'] = df['annotation'] + ' cells'\n\n # --- Colon ---\n elif tissue == 'Colon':\n pattern = '(?P[a-zA-Z -]+)(?P\\d?)'\n df['annotation'] = df['annotation'].str.replace('Undiff.',\n 'undifferentiated')\n df = df.annotation.str.extract(pattern)\n df['subannotation'] = df.subannotation.replace('', np.nan)\n\n # --- Diaphragm ---\n elif tissue == 'Diaphragm':\n pattern = '(?P[a-zA-Z /&]+)(?P\\d?)'\n df['annotation'] = df['annotation'].replace('B cells & T-cells',\n 'immune cells')\n df = df.annotation.str.extract(pattern)\n df['subannotation'] = df.subannotation.replace('', np.nan)\n\n # --- Fat ---\n elif tissue == 'Fat':\n df['annotation'] = df['annotation'].str.replace('-', ' ')\n df['annotation'] = df['annotation'].str.replace('Mono/macro/DCs',\n 'Myeloid cells')\n df['annotation'] = df['annotation'].str.replace('NK cells',\n 'natural killer cells')\n\n # --- Heart ---\n elif tissue == 'Heart':\n df['annotation'] = df.annotation.str.replace('Fb', 'fibroblasts')\n df['annotation'] = df.annotation.str.replace('Edc',\n 'endothelial_cells')\n df['annotation'] = df.annotation.str.replace('CMs', 'cardiomyocytes')\n df['annotation'] = df.annotation.str.replace('SMCs',\n 'smooth_muscle_cells')\n\n # Deal with Fb_1 and Immune_Cells_2\n rows = df.annotation.str.contains(r'\\d$')\n pattern = '(?P[a-zA-Z_]+)_(?P\\d?)'\n df.loc[rows] = df.loc[rows, 'annotation'].str.extract(pattern)\n\n # Deal with edc_3_endocardial and edc_2_coronary_vascular\n rows = df.annotation.str.contains(r'_\\d_')\n pattern = '(?P[a-zA-Z_]+)_\\d_(?P[a-zA-Z_]+)'\n df.loc[rows] = df.loc[rows, 'annotation'].str.extract(pattern)\n\n # --- Kidney ---\n elif tissue == \"Kidney\":\n# df['annotation'] = df['annotation'].str.replace('tubules', 'tubule')\n# rows = df.annotation.str.contains('(', regex=False)\n# pattern = '(?P[a-zA-Z ]+)(?P \\([a-zA-Z ]+\\)?)'\n# df.loc[rows] = df.loc[rows].annotation.str.extract(pattern)\n df['subannotation'] = df['annotation'].str.extract(r'(\\d)')\n rows = df.annotation == 'Proximal tubule cells'\n df.loc[rows, 'subannotation'] = '1'\n df['annotation'] = df.annotation.str.rstrip(' 1234')\n\n # --- Liver ---\n elif tissue == \"Liver\":\n # Remove newlines\n df['subannotation'] = df['subannotation'].str.replace('Female',\n '').str.replace(\n 'Male', '')\n\n # --- Lung ---\n elif tissue == \"Lung\":\n # Remove newlines\n df['annotation'] = df['annotation'].str.replace('\\n', '')\n df['subannotation'] = df.annotation.str.extract(\n r'(Type [IV]+)').str.strip()\n df['annotation'] = df.annotation.str.replace('( Type [IV]+)',\n '').str.strip().map(\n lambda x: x if x.endswith('s') else x + 's')\n\n\n # --- Marrow ---\n elif tissue == \"Marrow\":\n df = df.drop('plate.barcode', axis=1)\n\n # Fix all B cell annotations (contain capital B)\n rows = df['annotation'].str.contains('B')\n subset = df.loc[rows]\n pattern = '(.+)-B'\n df.loc[rows, 'subannotation'] = subset['annotation'].str.extract(pattern)\n df.loc[rows, 'annotation'] = 'b_cells'\n df['annotation'] = df['annotation'].str.replace(\n 'Monocytes_Monocyte-Progenitors', 'monocytes')\n df['annotation'] = df['annotation'].str.replace(\n 'Stem_Progenitors', 'hematopoietic stem cell')\n df['annotation'] = df['annotation'].str.replace('T_NK', 't_cells')\n\n # 'Immmature_Mature' --> \"maturing\"\n rows = df['subannotation'] == 'Immature_Mature'\n df.loc[rows, 'subannotation'] = 'maturing'\n\n # subannotation: MonoProgenitor --> progenitor\n # (since annotation says \"monocyte\" already)\n df['subannotation'] = df['subannotation'].str.replace(\n 'MonoProgenitor', 'progenitor')\n\n # Split on dash for granulocytes and neutrophils\n rows = df['annotation'].str.contains('-')\n pattern = '(?P.+)-(?P.+)'\n df.loc[rows] = df.loc[rows, 'annotation'].str.extract(pattern)\n\n # Remove all numbers\n df['subannotation'] = df['subannotation'].str.rstrip('0123456789')\n\n # --- Pancreas ---\n elif tissue == \"Pancreas\":\n df['subannotation'] = df['subannotation'].str.replace('Alpha - ', '')\n\n # --- Skin ---\n elif tissue == 'Skin':\n rows = df['annotation'].str.contains('IFE')\n df.loc[rows, 'annotation'] = df.loc[rows, 'annotation']\\\n .str.split().str[0] + '_cells'\n df.loc[rows, 'subannotation'] = 'interfollicular epidermis'\n\n # --- Spleen ---\n elif tissue == 'Spleen':\n df['annotation'] = df['annotation'].map(\n lambda x: x if x.endswith('s') else x + 's')\n df.annotation = df.annotation.str.replace('Follilular',\n 'Follicular').str.replace(\n 'T1/T2/Follicular', 'follicular')\n rows = df.annotation.str.contains('[BT] cells')\n pattern = '(?P[a-zA-Z 48+]+) (?P[BT] cells)'\n df.loc[rows] = df.annotation.str.extract(pattern)\n\n rows = df['annotation'].str.contains('Macrophages')\n df.loc[rows, 'annotation'] = 'myeloid_cells'\n\n # --- Tongue ---\n elif tissue == \"Tongue\":\n df['annotation'] = df['annotation'].str.replace('Basal layer',\n 'basal_cells')\n # --- Trachea ---\n elif tissue == \"Trachea\":\n df['annotation'] = df['annotation'].str.replace('Immunue', 'Immune')\n\n # --- Thymus ---\n elif tissue == 'Thymus':\n # Spellcheck\n df['annotation'] = df['annotation'].str.replace('differentation', 'differentation')\n\n rows = df['annotation'].str.startswith('thymocyte')\n df.loc[rows, 'subannotation'] = df.loc[rows, 'annotation'].str.extract(\n 'thymocyte_\\d_(.+)')\n\n # SP: single positive CD4 or CD8\n # DP: double positive CD4 and CD8\n df['subannotation'] = df['subannotation'].str.replace(\n 'DN', 'double_negative')\n df['subannotation'] = df['subannotation'].str.replace(\n 'DP', 'double_positive')\n df['subannotation'] = df['subannotation'].str.replace(\n 'SP', 'single_positive')\n df['subannotation'] = df['subannotation'].str.replace(\n 'SN', 'single_negative')\n df.loc[rows, 'annotation'] = 't_cell'\n\n df['annotation'] = df['annotation'].str.replace('&', 'and')\n df = _fix_nk_cells(df)\n df = df.applymap(clean_labels)\n return df","sub_path":"maca/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":8237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"82165024","text":"import tkinter\nfrom tkinter import *\nimport pygame\nfrom pygame import *\nfrom pygame.key import name\nfrom tkinter import filedialog\nfrom pypresence import Presence\nimport time\nfrom mutagen.mp3 import MP3\nimport tkinter.ttk as ttk\n\nsplash = Tk()\nsplash.iconbitmap(\"D:\\\\FruityProjects\\\\FruityPlayer\\\\fruity.ico\") # No need to change this\nsplash.title('FruityPlayer ~ Loading+')\n\nappW = 727\nappH = 200\n\nscreenW = splash.winfo_screenwidth()\nscreenH = splash.winfo_screenheight()\n\nx = (screenW / 2) - (appW / 2)\ny = (screenH / 2) - (appH / 2)\n\nsplash.geometry(f\"{appW}x{appH}+{int(x)}+{int(y)}\")\nsplash.configure(background='darkred')\nsplash.resizable(False, False)\n\nsplashLabel = Label(splash, text=\"Loading..\", font=(\"Bold\", 60))\nsplashLabel.pack(pady=20)\n\npygame.mixer.init()\n\ndef playerWindow():\n splash.destroy()\n\n root = Tk()\n root.title('FruityPlayer ~ 2021 Build+')\n root.iconbitmap(\"D:\\\\FruityProjects\\\\FruityPlayer\\\\fruity.ico\") # No need to change this\n \n appW = 500\n appH = 300\n\n screenW = root.winfo_screenwidth()\n screenH = root.winfo_screenheight()\n \n x = (screenW / 2) - (appW / 2)\n y = (screenH / 2) - (appH / 2)\n\n root.geometry(f\"{appW}x{appH}+{int(x)}+{int(y)}\")\n root.configure(background='darkred')\n root.resizable(False, False)\n\n background_image = PhotoImage(\"D:\\FruityProjects\\FruityPlayer\\images\\\\fruity2.png\")\n background_label = Label(root, image=background_image)\n background_label.place(x=0, y=0, relwidth=500, relheight=300)\n\n # back = PhotoImage(file=\"D:\\FruityProjects\\FruityPlayer\\images\\\\fruity2.png\")\n\n # backDisplay = Canvas(root, width=500, height=300)\n # backDisplay.pack(fill=\"both\", expand=True, anchor='nw')\n\n # backDisplay.create_image(0, 0, image=back)\n\n # Discordian Shitz Lolz (Only uncomment if you have discord open, will not work if discord is not open.)\n\n # client_id = '868092183427837973'\n # RPC = Presence(client_id)\n # RPC.connect()\n # RPC.update(details=\"FruityProjects\", state=\"Maybe listening to some jazz..\", large_image=\"fruit\")\n\n def statusTime():\n cTime = pygame.mixer.music.get_pos() / 1000\n converted = time.strftime('%H:%M:%S', time.gmtime(cTime))\n cSong = box.curselection()\n song = box.get(ACTIVE)\n song = f'D:\\FruityProjects\\FruityPlayer\\music\\{song}.mp3' # (PERSONAL GUIDE) Replace with the url to your music directory, along with the song.mp3 thing.\n mut = MP3(song)\n status.after(1000, statusTime)\n songL = mut.info.length\n convertedL = time.strftime('%H:%M:%S', time.gmtime(songL))\n status.config(text=f\"Elapsed: {converted} ~ Duration: {convertedL}\")\n\n # Variables for later lol\n\n box = Listbox(root, bg=\"grey\", fg=\"white\", width=60, selectbackground='red', selectforeground='yellow')\n box.pack(pady=20)\n\n # backButton = Button(name='Back')\n # forwardButton = Button(name='Forward')\n # playButton = Button(name='Play')\n # pauseButton = Button(name='Pause')\n # stopButton = Button(name='Stop')\n\n # Function Shitz Lol\n def playFunction():\n song = box.get(ACTIVE)\n song = f'D:/FruityProjects/FruityPlayer/music/{song}.mp3' # (PERSONAL GUIDE) Replace with the url to your music directory, along with the song.mp3 thing.\n\n pygame.mixer.music.load(song)\n pygame.mixer.music.play(loops=0)\n\n statusTime()\n\n def stopFunction():\n pygame.mixer.music.stop()\n box.selection_clear(ACTIVE)\n status.config(text=\"\")\n\n global paused\n paused = False\n\n def pauseFunction(pauseds):\n global paused\n paused = pauseds\n\n if paused:\n pygame.mixer.music.unpause()\n paused = False\n else: \n pygame.mixer.music.pause()\n paused = True\n\n def forwardFunction():\n nextS = box.curselection()\n # print(nextS)\n # print(nextS[0])\n nextS = nextS[0]+1\n song = box.get(nextS)\n\n song = f'D:/FruityProjects/FruityPlayer/music/{song}.mp3' # (PERSONAL GUIDE) Replace with the url to your music directory, along with the song.mp3 thing.\n\n pygame.mixer.music.load(song)\n pygame.mixer.music.play(loops=0)\n\n box.selection_clear(0, END)\n box.activate(nextS)\n box.selection_set(nextS, last=None)\n\n def backFunction():\n nextS = box.curselection()\n # print(nextS)\n # print(nextS[0])\n nextS = nextS[0]-1\n song = box.get(nextS)\n\n song = f'D:/FruityProjects/FruityPlayer/music/{song}.mp3' # (PERSONAL GUIDE) Replace with the url to your music directory, along with the song.mp3 thing.\n\n pygame.mixer.music.load(song)\n pygame.mixer.music.play(loops=0)\n\n box.selection_clear(0, END)\n box.activate(nextS)\n box.selection_set(nextS, last=None)\n\n def loopFunction():\n pygame.mixer.music.play(loops=-1)\n\n def replayFunction():\n pygame.mixer.music.rewind()\n\n controls = Frame(root)\n\n controls.pack()\n\n backButton = Button(controls, text='previous', background='purple', command=backFunction)\n forwardButton = Button(controls, text='next', background='darkgreen', command=forwardFunction)\n playButton = Button(controls, text='play', background='darkblue', command=playFunction)\n pauseButton = Button(controls, text='pause', background='yellow', command=lambda: pauseFunction(paused))\n stopButton = Button(controls, text='stop', background='pink', command=stopFunction)\n loopButton = Button(controls, text='loop', background='red', command=loopFunction)\n replayButton = Button(controls, text='replay', background='cyan', command=replayFunction)\n\n backButton.grid(row=0, column=0, padx=15)\n forwardButton.grid(row=0, column=1, padx=15)\n playButton.grid(row=0, column=2, padx=15)\n pauseButton.grid(row=0, column=3, padx=15)\n stopButton.grid(row=0, column=4, padx=15)\n loopButton.grid(row=0, column=5, padx=15)\n replayButton.grid(row=0, column=6, padx=15)\n\n # Menu shitz lol\n\n menu = Menu(root, tearoff=0)\n root.config(menu=menu)\n\n def addNewSong(): # (PERSONAL GUIDE) Change initialdir to your music directory, or just remove it..doesn't matter.\n newSong = filedialog.askopenfilename(initialdir=\"D:\\FruityProjects\\FruityPlayer\\music\", title='New Song Selector', filetypes=((\"MP3 Files\", \"*.mp3\"), ))\n # print(newSong)\n # newSong = newSong.replace(\"C:\", \"\")\n # newSong = newSong.replace(\"D:\", \"\")\n # newSong = newSong.replace(\"B:\", \"\")\n # newSong = newSong.replace(\"F:\", \"\")\n # newSong = newSong.replace(\"Y:\", \"\")\n newSong = newSong.replace(\"D:/FruityProjects/FruityPlayer/music/\", \"\") # (PERSONAL GUIDE) Change this to your music directory.\n newSong = newSong.replace(\".mp3\", \"\")\n # newSong = newSong.replace(\".opus\", \"\")\n # newSong = newSong.replace(\".wav\", \"\")\n # newSong = newSong.replace(\".ogg\", \"\")\n # # newSong = newSong.replace(\"/\", \"\")\n # # newSong = newSong.replace(\"\\\\\", \"\")\n # # newSong = newSong.replace(\"\", f\"{newSong}\")\n # newSong = newSong.replace(\"Downloads\", \"\")\n box.insert(END, newSong)\n\n def addNewSongs(): # (PERSONAL GUIDE) Change initialdir to your music directory, or just remove it..doesn't matter.\n newSongs = filedialog.askopenfilenames(initialdir=\"D:\\FruityProjects\\FruityPlayer\\music\", title='New Song Selector', filetypes=((\"MP3 Files\", \"*.mp3\"), ))\n\n for newSongw in newSongs:\n newSongw = newSongw.replace(\"D:/FruityProjects/FruityPlayer/music/\", \"\") # (PERSONAL GUIDE) Change this to your music directory.\n newSongw = newSongw.replace(\".mp3\", \"\")\n box.insert(END, newSongw)\n\n addSong = Menu(menu, tearoff=0)\n menu.add_cascade(label='New Song', menu=addSong)\n addSong.add_command(label='Add Song', command=addNewSong)\n addSong.add_command(label='Add Songs', command=addNewSongs)\n addSong.add_separator()\n addSong.add_command(label='Exit', command=root.destroy)\n\n def deleteSong():\n box.delete(ANCHOR)\n pygame.mixer.music.stop()\n\n def deleteSongs():\n box.delete(0, END)\n pygame.mixer.music.stop()\n\n removeSong = Menu(menu, tearoff=0)\n menu.add_cascade(label='Delete Song', menu=removeSong)\n removeSong.add_command(label=\"Remove Song\", command=deleteSong)\n removeSong.add_command(label=\"Remove Songs\", command=deleteSongs)\n\n status = Label(root, text='', bd=1, relief=GROOVE, anchor=E)\n status.pack(fill=X, side=BOTTOM, ipady=2)\n\n# Other Function Stuff lool\n\ndef confirm():\n confirmation = \"D:\\FruityProjects\\FruityPlayer\\musicAssets\\confirm.wav\" # (PERSONAL GUIDE) Make sure this shit exist somewhere in your files and link the dir right here, idk if it works without it or not so do it anyway lol\n pygame.mixer.music.load(confirmation)\n pygame.mixer.music.play()\n\nsplash.after(3000, confirm)\nsplash.after(3000, playerWindow)\nmainloop()","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"546378153","text":"from django.db import models\n\nclass Team(models.Model):\n team_name = models.CharField(max_length=50)\n abbreviation = models.CharField(max_length=3)\n team_logo = models.CharField(max_length=500)\n created_at = models.DateTimeField(auto_now_add=True)\n formation = models.ForeignKey('formations.Formation', related_name='teams', on_delete = models.CASCADE)\n players = models.ManyToManyField('players.Player', related_name='teams', blank=True)\n owner = models.ForeignKey(\n\t\t\"jwt_auth.User\",\n\t\trelated_name = \"teams\",\n\t\ton_delete = models.CASCADE\n)\n\n def __str__(self):\n return f\"{self.team_name}, {self.abbreviation}\"","sub_path":"teams/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"595406703","text":"import os\nfrom misaka import module02\nfrom misaka import module05\nfrom misaka import module06\nfrom misaka import module11\nfrom misaka import module16\nfrom misaka import module20\n\n\nclass Terminal:\n def __init__(self):\n self.lang = {\n \"en\": {\n \"init\": \"\"\"\nCommands:\n 02 launch 1-ary polynomial calc\n 05 launch document stat generator\n 06 launch joseph circle\n 11 launch net maker\n 16 launch hanoi\n 20 launch queueify\n help/? show this document\n exit quit\n\"\"\"\n }\n }\n self.clear()\n print(self.lang[\"en\"][\"init\"])\n print(\"misaka > \", end=\"\")\n self.li = [\"02\", \"05\", \"06\", \"11\", \"16\", \"20\"]\n\n def requests(self):\n # li: a list of sub-functions\n requests = input()\n self.clear()\n if requests == \"exit\":\n return False\n elif requests == \"help\" or requests == \"?\" or requests == \"-?\":\n print(self.lang[\"en\"][\"init\"])\n elif requests in self.li:\n eval(\"module\" + requests + \".Terminal()\")\n print(\"misaka > \", end=\"\")\n\n return True\n\n @staticmethod\n def clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\nengine = Terminal()\nwhile True:\n if not engine.requests():\n break\n","sub_path":"training/data structure/end-term/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"197652790","text":"import json\nimport time\nimport datetime\nfrom datetime import datetime as dt\nimport requests\nimport logging\n\nfrom rest_framework import status\n\nfrom collections import OrderedDict\nfrom django.utils import timezone\nfrom django.core.cache import cache\nfrom apps.base_app.services.exceptions import QException, AlpsException\nfrom apps.base_app.services.command import QQueueCommand\nfrom django.conf import settings\n\nfrom iqstorage.dc.keyword import KeywordSERPDataForSEOData\nimport iqstorage.common.storage_exceptions as s3e\nfrom iqdcutils.serp.dataforseo.rank_utils import RankUtils\n\n\nfrom apps.base_app.services.db_factory import GenericStorage\nfrom apps.base_app.services.helper import TransformHelper as Helper\nimport collections\n\n\nDEFAULT_RANK = 121\nDATA_NOT_AVAILABLE = None\nDATA_NA = 'NA'\nSEARCH_COMPETITOR_DOMAIN = '-1'\nactivity_logger = logging.getLogger('activity.transform.keyword')\nerror_logger = logging.getLogger('error.transform.keyword')\n\nDEFAULT_S3_OBJECT_NAME = 'latest'\n\ndef log_error(msg, error=None):\n if error is not None:\n error_logger.error('Project Metadata Transform|MSG:%s|ERROR:%s' % (msg, str(error)))\n else:\n error_logger.error('Project Metadata Transform|MSG:%s|ERROR:%s' % msg)\n\n\nclass ProjectMetadataTransform(object):\n\n # The initial constructor for the overall project transform. Contains Collection objects for all\n def __init__(self):\n self.storage_object = GenericStorage().get_object('mongodb')\n self.variables_to_ignore = [\n \"_id\",\n \"_tag\",\n \"created_date\",\n \"updated_date\",\n \"current_date\",\n \"page\",\n \"tenant_code\",\n \"brand_pack_link_count\",\n \"result_type\",\n \"base_domain\"\n ]\n # Keyword level common default values\n self.default_common_values = {\n 'rank': DEFAULT_RANK,\n 'blended_rank': DEFAULT_RANK,\n 'organic_rank': DEFAULT_RANK,\n }\n\n # Checks if the URL belongs to our domains or any of our competitors\n # Tells us if the url is relevant for our project. Returns 1/0\n def is_url_relevant(self, url):\n if url is not None:\n for alias_id in self.relevant_domain_list:\n for domain in self.relevant_domain_list[alias_id]['urls']:\n if Helper.domain_classify(domain, url):\n return 1\n return 0\n\n def get_alias_id(self, url):\n if url is not None:\n for alias_id in self.relevant_domain_list:\n for domain in self.relevant_domain_list[alias_id]['urls']:\n if Helper.domain_classify(domain, url):\n return str(alias_id)\n return SEARCH_COMPETITOR_DOMAIN\n\n def get_non_ranking_target_urls(self, target_urls, sorted_serp):\n ranking_urls = [rank['url'] for rank in sorted_serp]\n non_ranking_target_urls = list(set(target_urls).difference(ranking_urls))\n sample_kwd_doc = sorted_serp[-1].copy()\n for value in self.variables_to_ignore:\n if value in sample_kwd_doc:\n sample_kwd_doc.pop(value)\n\n for target_url_data in non_ranking_target_urls:\n target_url_doc = sample_kwd_doc.copy()\n target_url_doc['url'] = target_url_data\n target_url_doc.update(self.default_common_values)\n sorted_serp.append(target_url_doc)\n\n def is_project_relevant_url(self, each_rank, last_organic_rank):\n if (last_organic_rank <= 10) or \\\n (self.is_url_relevant(each_rank['url'])) or \\\n (each_rank.get('page', None) == 1):\n return True\n return False\n\n def get_my_domains(self):\n my_domains = self.project_details['domains'] + self.project_details['subdomains']\n target_urls = [theme['target_url']\n for theme in self.project_details['data']['themes']\n if self.keyword in theme['keywords'] and \\\n theme['target_url'] and \\\n theme['target_url'].lower() != 'not set'\n ]\n my_domains += target_urls\n return my_domains, target_urls\n\n def get_relevant_domain_list(self, my_domains):\n relevant_domain_list = dict()\n relevant_domain_list[\"0\"] = {\n 'alias_name': '__me__',\n 'urls': list(set(my_domains))\n }\n relevant_domain_list.update(self.project_details['business_competitor'])\n return collections.OrderedDict(sorted(relevant_domain_list.items()))\n\n def insert_for_date(self, keyword, req_date, locale, search_engine, device_type, transform_date, serp_data):\n try:\n # Get Docs from KWD-URL Transform\n sorted_serp = sorted(serp_data, key = lambda k: k['blended_rank'])\n\n # This is for aggregating some additional project_keyword_url_metrics\n project_keyword_url_list = []\n base_project_doc = {}\n self.keyword = keyword\n\n # Get all relevent my domain, competitor domains and URLs\n my_domains, target_urls = self.get_my_domains()\n self.relevant_domain_list = self.get_relevant_domain_list(my_domains)\n sample_kwd_doc = sorted_serp[-1].copy()\n\n # Append non-ranking target urls coming from themes\n self.get_non_ranking_target_urls(target_urls, sorted_serp)\n\n current_date = req_date\n tenant_code = self.project_message['tenant_code']\n project_id = self.project_message['project_id']\n\n # Adding project specific details common accross all URLs of the keyword\n base_project_doc['tenant_code'] = tenant_code\n base_project_doc['project_id'] = project_id\n base_project_doc['keyword'] = keyword\n base_project_doc['keyword_id'] = self.project_details['data']['keywords'][keyword]['id']\n base_project_doc['keyword_type'] = self.project_details['data']['keywords'][keyword]['type']\n base_project_doc['created_date'] = datetime.datetime.now()\n base_project_doc['search_engine'] = search_engine\n base_project_doc['locale'] = locale\n base_project_doc['device_type'] = device_type\n\n # TILL Top 10 organic ranks or my domain/competitors domains or Page 1 results\n last_organic_rank = None\n for each_rank in sorted_serp:\n organic_rank = each_rank['organic_rank']\n last_organic_rank = organic_rank if organic_rank is not None else last_organic_rank\n if self.is_project_relevant_url(each_rank, last_organic_rank):\n # To not go beyond organic rank 10 for search competitors or all page 1 results\n last_organic_rank = 11 if organic_rank == 10 else last_organic_rank\n\n project_document = base_project_doc.copy()\n\n # Retain fields from KWD doc\n for each_raw_field in each_rank:\n if each_raw_field not in self.variables_to_ignore:\n project_document[each_raw_field] = each_rank[each_raw_field]\n\n\n # Adding domain specific details\n alias_id = self.get_alias_id(each_rank['url'])\n project_document['alias_id'] = alias_id\n if alias_id == SEARCH_COMPETITOR_DOMAIN:\n project_document['alias_name'] = DATA_NA\n else:\n project_document['alias_name'] = self.relevant_domain_list[alias_id]['alias_name']\n project_keyword_url_list.append(project_document)\n\n\n # Create Dummy entries for non organic ranking my domain and competitor domains\n ranking_domains = [\n each['alias_id'] for each in project_keyword_url_list \\\n if each['organic_rank'] is not DATA_NOT_AVAILABLE\n ]\n\n non_ranking_domains = list(\n set(self.relevant_domain_list.keys()).difference(ranking_domains)\n )\n\n sample_project_doc = dict(\n (k, DATA_NOT_AVAILABLE) for k, v in \\\n sample_kwd_doc.copy().items()\n )\n for value in self.variables_to_ignore:\n if value in sample_kwd_doc:\n sample_project_doc.pop(value)\n\n sample_project_doc.update(base_project_doc)\n\n for alias_id in non_ranking_domains:\n alias_name = self.relevant_domain_list[alias_id]['alias_name']\n project_document = sample_project_doc.copy()\n project_document.update(self.default_common_values)\n\n # Domain specific dummy details\n project_document.update({\n 'alias_id': alias_id,\n 'alias_name': alias_name,\n })\n project_keyword_url_list.append(project_document)\n self.storage_object.insert_metadata_transform(\n project_keyword_url_list,\n settings.PROJECT_KW_URL_METADATA_COLLECTION,\n tenant_code,\n project_id,\n keyword,\n locale,\n search_engine,\n device_type\n )\n except Exception as e:\n log_msg = 'Error in inserting data for date for keyword: %s' % keyword\n log_error(log_msg, e)\n raise AlpsException(e)\n\n def transform(self, project_details, project_message):\n try:\n todays_date = timezone.datetime.now()\n self.project_message = project_message\n keyword_dict, serp_data = self.get_keyword_metrics(project_message)\n\n keyword = project_message['keyword']\n device_type = project_message['device_type']\n\n self.project_details = project_details\n if 'transform_date' not in project_message:\n project_message['transform_date'] = str(timezone.datetime.now())\n\n date_object = timezone.datetime.strptime(\n str(project_message['transform_date']),\n \"%Y-%m-%d %H:%M:%S.%f\"\n )\n current_date = timezone.datetime.strftime(\n date_object,\n \"%Y%m%d\"\n )\n if keyword in project_details['data']['keywords']:\n for locale in project_details['locales']:\n for search_engine in project_details['search_engines']:\n if serp_data:\n self.insert_for_date(\n keyword=keyword,\n req_date=current_date,\n locale=locale,\n search_engine=search_engine,\n device_type=device_type,\n transform_date=project_message['transform_date'],\n serp_data=serp_data\n )\n else:\n log_msg = 'Keyword:|%s| serp_tag:|%s| not in KWD Transform' \\\n % (keyword, serp_tag)\n log_error(log_msg)\n raise AlpsException('Keyword:|%s| serp_tag:|%s| not in KWD Transform' % \\\n (keyword, serp_tag))\n else:\n log_msg = 'Keyword:|%s| not in Project:|%s|' \\\n % (keyword, project_details['id'])\n log_error(log_msg)\n raise AlpsException('Keyword:|%s| not in Project:|%s|' % (keyword, project_details['id']))\n except Exception as e:\n log_msg = 'Error in running project transform'\n log_error(log_msg)\n raise e\n\n def get_s3_object_name(self, date):\n \"\"\"\n :param date_str: should be datetime_object or str(datetime_object)\n :return: corresponding date\n \"\"\"\n try:\n date_str = str(date)\n date_object = dt.strptime(date_str, \"%Y-%m-%d %H:%M:%S.%f\")\n s3_object_name = date_object.strftime('%Y%m%d')\n return s3_object_name\n except Exception as e:\n log_msg = 'Error in getting s3 objects. Message: %s' % \\\n (str(e.message))\n log_error(log_msg)\n AlpsException(e)\n\n def get_keyword_metrics(self, project_message):\n keyword_dict = {}\n\n serp_data_dict = self.get_serp_data(project_message)\n\n if not serp_data_dict or len(serp_data_dict.get('serp_list', [])) == 0:\n # to handle old scenarios where dummy entries in DC are created\n serp_data_dict = None\n\n if serp_data_dict is not None:\n keyword_dict['serp_date'] = serp_data_dict['rank_date']\n url_serp_list = serp_data_dict['serp_list']\n else:\n # Rank data not available. LOG.\n log_msg = 'No Rank data for keyword: %s' % project_message['keyword']\n log_error(log_msg)\n keyword_dict['serp_date'] = DATA_NOT_AVAILABLE\n url_serp_list = list()\n return keyword_dict, url_serp_list\n\n def get_serp_data(self, project_message):\n serp_data_dict = {}\n serp_kwargs = dict(\n keyword=project_message['keyword'],\n locale=project_message['locale'],\n device_type=project_message['device_type'],\n search_engine=project_message['search_engine']\n )\n try:\n serp_response = KeywordSERPDataForSEOData(**serp_kwargs).get()\n r = RankUtils()\n serp_data_dict = r.get_all_results(serp_response, False)\n except s3e.FileNotFound as e:\n # some other serious issue. Log and raise\n pass\n return serp_data_dict\n\n\nclass Command(QQueueCommand):\n def get_project_details(self, tenant, project_id):\n try:\n project_cache_key = settings.ALPS_PROJECT_CACHE_KEY % (tenant, str(project_id))\n # project_details = cache.get(project_cache_key, None)\n project_details = None\n if project_details is None or 'data' not in project_details:\n url_scheme = 'https' if settings.SESSION_COOKIE_SECURE is True else 'http'\n url_str = url_scheme + '://%(ip)s/alps/manage/%(' \\\n 'tenant)s/projects/%(' \\\n 'pid)s/details?session_token=%(token)s'\n api_call_url = url_str % dict(\n ip=settings.API_IP_ADDRESS, tenant=tenant, pid=project_id,\n token=settings.ALPS_APPLICATION_SESSION_TOKEN\n )\n response = requests.get(url=api_call_url)\n if response.status_code == status.HTTP_200_OK:\n project_details = response.json()\n else:\n raise Exception('Server Error')\n project_details['business_competitor'] = json.loads(json.dumps(project_details.get('business_competitor')))\n return project_details\n except Exception as e:\n log_msg = 'Project details not fetched for tenant_code: %s project: %s' % (tenant, project_id)\n log_error(log_msg, e)\n raise e\n\n def process_message(self, *args, **kwargs):\n project_message = json.loads(args[0].get_body())\n tenant = project_message['tenant_code']\n locale = project_message['locale']\n keyword = project_message['keyword']\n device_type = project_message['device_type']\n project_id = int(project_message['project_id'])\n try:\n project_details = self.get_project_details(\n tenant=tenant, project_id=project_id\n )\n transform_object = ProjectMetadataTransform()\n transform_object.transform(project_details, project_message)\n except Exception as e:\n log_msg = 'Transform failed for tenant_code: %s project: %s keyword: %s' % (tenant, project_id, keyword)\n log_error(log_msg, e)\n # retry if needed\n finally:\n self.queue.delete_message(args[0])\n","sub_path":"apps/base_app/management/commands/project_meta_transform.py","file_name":"project_meta_transform.py","file_ext":"py","file_size_in_byte":16313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"294051310","text":"import sys,os\nimport sqlite3\nfrom PyQt5.QtWidgets import QApplication,QMessageBox,QHBoxLayout,QMainWindow,QDialog,QComboBox,QWidget,QGroupBox,QPushButton,QLabel,QButtonGroup,QLineEdit,QRadioButton,QFormLayout,QScrollArea,QVBoxLayout\nfrom PyQt5.QtCore import pyqtSlot,Qt\nfrom PyQt5.QtGui import QPixmap\nimport datetime\n\nimport custom_blockchain\n\nos.environ[\"QT_SCALE_FACTOR\"] = \"1.30\"\nQApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)\nQApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)\n\ndb = sqlite3.connect('VoterInfo.db')\n\n\nclass LoginWindow(QDialog):\n\n def __init__(self):\n super().__init__()\n self.title = 'Login'\n self.left = 10\n self.top = 10\n self.width = 900\n self.height = 600\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n \n self.lbl1=QLabel(\"Enter your Voter ID\",self)\n self.lbl1.setStyleSheet(\"font: 16px\")\n self.lbl1.move(100,100)\n self.lbl1.resize(200,70)\n \n self.lbl2=QLabel(\"Enter Your Voting Token\",self)\n self.lbl2.setStyleSheet(\"font: 16px\")\n self.lbl2.move(100,250)\n self.lbl2.resize(200,70)\n \n self.text_box1=QLineEdit(self)\n self.text_box1.move(400,100)\n self.text_box1.resize(400,70)\n \n self.text_box2=QLineEdit(self)\n self.text_box2.setEchoMode(QLineEdit.Password)\n self.text_box2.move(400,250)\n self.text_box2.resize(400,70)\n \n self.button1=QPushButton(\"Login\",self)\n self.button1.clicked.connect(self.loginUser)\n self.button1.move(320,400)\n self.button1.resize(250,70)\n \n self.show()\n \n @pyqtSlot()\n def loginUser(self):\n voterId=self.text_box1.text()\n token=self.text_box2.text()\n db = sqlite3.connect('VoterInfo.db') \n cursor = db.cursor()\n\n cursor.execute(f\"SELECT Voted FROM tokentable where voter='{voterId}' and token='{token}'\") \n all_rows1 = cursor.fetchall()\n print(all_rows1) \n if len(all_rows1)>0: \n if all_rows1[0][0]=='1':\n QMessageBox.about(self, \"Alert\",\"Already Voted\") \n else:\n self.hide()\n cursor.execute(f\"SELECT name FROM candidate\") \n all_rows = cursor.fetchall() \n print(all_rows)\n db.close()\n voting_win=VotingWindow(voterId,all_rows,self) \n else:\n QMessageBox.about(self, \"Alert\",\"Invalid Credentials\") \n db.close()\n \nclass VotingWindow(QDialog):\n def __init__(self,voterId,candidate_list,parent=None):\n super().__init__(parent)\n self.title = 'Cast Voting'\n self.left = 10\n self.top = 10\n self.width = 900\n self.height = 600\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.voterId=voterId\n self.lbl1=QLabel(\"Cast yout Vote\",self)\n self.lbl1.setStyleSheet(\"font: 16px\")\n \n self.candidate_list=candidate_list\n \n self.button1=QPushButton(\"Cast yout Vote\",self)\n self.button1.clicked.connect(self.castVote)\n \n self.scrollLayout = QVBoxLayout()\n self.scrollWidget = QWidget()\n self.scrollWidget.setLayout(self.scrollLayout)\n \n self.scrollArea = QScrollArea()\n self.scrollArea.setWidgetResizable(True)\n self.scrollArea.setWidget(self.scrollWidget)\n \n self.layout1 = QVBoxLayout()\n self.layout1.addWidget(self.lbl1)\n self.layout1.addSpacing(10) \n self.layout1.addWidget(self.scrollArea) \n self.layout1.addWidget(self.button1)\n self.layout1.addSpacing(20)\n\n self.init_candidates()\n \n def setCandidateList(self,candidate_list):\n self.candidate_list=candidate_list\n \n def init_candidates(self):\n self.button_group = QButtonGroup()\n for i in range(len(self.candidate_list)):\n \n layout = QHBoxLayout()\n \n rb=QRadioButton(\"\")\n self.button_group.addButton(rb,i)\n \n lbl_box=QLabel(self.candidate_list[i][0],self)\n lbl_box.setStyleSheet(\"font:16px\")\n \n layout.addWidget(rb)\n layout.addWidget(lbl_box)\n layout.addStretch(1) \n \n self.scrollLayout.addLayout(layout)\n \n self.setLayout(self.layout1)\n self.show()\n \n @pyqtSlot()\n def castVote(self):\n if self.button_group.checkedId()>=0:\n print(\"voted \",self.candidate_list[self.button_group.checkedId()][0])\n \n blockchain = custom_blockchain.Blockchain()\n blockchain.load_stored_blockchain()\n \n verify_data=blockchain.is_chain_valid()\n if verify_data:\n \n blockchain.add_and_mine_block(self.voterId,self.candidate_list[self.button_group.checkedId()][0])\n blockchain.update_stored_blockchain()\n \n db = sqlite3.connect('VoterInfo.db') \n cursor = db.cursor()\n cursor.execute(f\"Update tokentable set voted='1' where voter='{self.voterId}'\") \n db.commit()\n \n self.hide()\n voting_win=ThankYouWindow(self.candidate_list[self.button_group.checkedId()][0],self)\n else:\n QMessageBox.about(self, \"Alert\",\"Blockchain Voting data corrupted. Voting is suspended currently. Please wait until further Notice.\") \n self.close()\n else: \n QMessageBox.about(self, \"Alert\",\"Please select one candidate.\") \n \nclass ThankYouWindow(QDialog):\n\n def __init__(self,candidate,parent=None):\n super().__init__(parent)\n self.title = 'ThankYou'\n self.left = 10\n self.top = 10\n self.width = 900\n self.height = 600\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n \n self.lbl1=QLabel(f\"Thank You For Voting. \\nYou Voted for {candidate}.\\n\\nPlease Press Exit.\",self)\n self.lbl1.setStyleSheet(\"font: 20px\")\n self.lbl1.move(200,200)\n self.lbl1.resize(500,140)\n \n self.button1=QPushButton(\"Exit\",self)\n self.button1.clicked.connect(self.exit_now)\n self.button1.move(320,400)\n self.button1.resize(250,70)\n self.show()\n\n @pyqtSlot()\n def exit_now(self):\n self.hide()\n self.close()\n \nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = LoginWindow()\n sys.exit(app.exec_())","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":6981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"159042616","text":"import torch\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom .config import PAD\n\ndef batch_sequences(seqs, max_length=None, batch_first=False, sort=False, pack=False):\n max_length = max_length or float('inf')\n batch_dim, time_dim = (0, 1) if batch_first else (1, 0)\n if len(seqs) == 1:\n lengths = [min(len(seqs[0]), max_length)]\n seq_tensor = seqs[0][:lengths[0]]\n seq_tensor = seq_tensor.unsqueeze(batch_dim)\n else:\n if sort:\n seqs.sort(key=len, reverse=True)\n lengths = [min(len(s), max_length) for s in seqs]\n batch_length = max(lengths)\n tensor_size = (len(seqs), batch_length) if batch_first \\\n else (batch_length, len(seqs))\n seq_tensor = torch.LongTensor(*tensor_size).fill_(PAD)\n for i, seq in enumerate(seqs):\n end_seq = lengths[i]\n seq_tensor.narrow(time_dim, 0, end_seq).select(batch_dim, i)\\\n .copy_(seq[:end_seq])\n if pack:\n seq_tensor = pack_padded_sequence(\n seq_tensor, lengths, batch_first=batch_first)\n return (seq_tensor, lengths)\n","sub_path":"seq2seq/tools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"609838261","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import datasets\nimport os\n\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n# 自动查看是否有mnist缓存数据集,如果没有会自动从google cloud 下载\n# x: [60k, 28, 28]\n# y: [60k]\n(x, y), (x_test,y_test) = datasets.mnist.load_data()\n# x归一化\nx = tf.convert_to_tensor(x, dtype=tf.float32) / 255\ny = tf.convert_to_tensor(y, dtype=tf.int32)\nx_test = tf.convert_to_tensor(x_test, dtype=tf.float32) / 255\ny_test = tf.convert_to_tensor(y_test, dtype=tf.int32)\nprint(x.shape, y.shape, x.dtype, y.dtype)\nprint(tf.reduce_min(x), tf.reduce_max(x))\nprint(tf.reduce_min(y), tf.reduce_max(y))\n\n# 迭代取值 每次128条\ntrain_db = tf.data.Dataset.from_tensor_slices((x, y)).batch(128)\ntest_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(128)\ntrain_iter = iter(train_db)\nsample = next(train_iter)\nprint('batch:', sample[0].shape, sample[1].shape)\n\n# [b, 784] => [b, 256] => [b, 128] => [b, 10]\n# [dim_in, dim_out], [dim_out]\nw1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1))\nb1 = tf.Variable(tf.zeros([256]))\nw2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1))\nb2 = tf.Variable(tf.zeros([128]))\nw3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.1))\nb3 = tf.Variable(tf.zeros([10]))\n\nlr = 1e-3\nfor epoch in range(100): # iterate db for 10\n for step, (x, y) in enumerate(train_db): # for every batch\n # x:[128, 28, 28]\n # y:[128]\n # [b,28,28] => [b,28*28]\n x = tf.reshape(x, [-1, 28 * 28])\n with tf.GradientTape() as tape:\n # h1 = x@w1 + b1\n # [b,784]@[784,256] + [256] (此处broadcast_to可省略,会自动broadcast)\n h1 = x @ w1 + tf.broadcast_to(b1, [x.shape[0], 256])\n h1 = tf.nn.relu(h1)\n h2 = h1 @ w2 + b2\n h2 = tf.nn.relu(h2)\n out = h2 @ w3 + b3\n\n # compute loss\n # out: [b,10]\n # y:[b]\n y_onehot = tf.one_hot(y, depth=10)\n\n # mse = mean(sum((y-out)^2))\n # [b,10]\n loss = tf.square(y_onehot - out)\n # mean\n loss = tf.reduce_mean(loss)\n\n # 计算梯度\n grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])\n # w1.assign_sub() 原地更新(保证w1依然是tf.Variable类型)\n # w1 = w1 - lr * grads[0]\n w1.assign_sub(lr * grads[0])\n # b1 = b1 - lr * grads[1]\n b1.assign_sub(lr * grads[1])\n # w2 = w2 - lr * grads[2]\n w2.assign_sub(lr * grads[2])\n # b2 = b2 - lr * grads[3]\n b2.assign_sub(lr * grads[3])\n # w3 = w3 - lr * grads[4]\n w3.assign_sub(lr * grads[4])\n # b3 = b3 - lr * grads[5]\n b3.assign_sub(lr * grads[5])\n\n if step % 100 == 0:\n print(epoch, step, 'loss:', float(loss))\n\n # test/evluation\n # 使用当前 w1 b1 w2 b2 w3 b3\n total_correct,total_num = 0,0\n for step, (x, y) in enumerate(test_db):\n # [b,28,28] => [b,28*28]\n x = tf.reshape(x,[-1,28*28])\n # [b,784] => [b,256] => [b,128] => [b,10]\n h1 = tf.nn.relu(x@w1 + b1)\n h2 = tf.nn.relu(h1@w2 + b2)\n out = h2@w3 + b3\n\n # out [b,10]\n prob = tf.nn.softmax(out,axis=1)\n # [b,10] => [b]\n pred = tf.argmax(prob,axis=1)\n pred = tf.cast(pred,dtype=tf.int32)\n # y:[b]\n correct = tf.reduce_sum(tf.cast(tf.equal(pred,y),dtype=tf.int32))\n total_correct += int(correct)\n total_num += x.shape[0]\n acc = total_correct/total_num\n print('acc test: ',acc)\n","sub_path":"test/stu04/stu10forward.py","file_name":"stu10forward.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"457139535","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport unittest\n\nimport mock\n\nfrom gntplib import tests\nfrom gntplib.client import GNTPConnection, GNTPConnectionHandler\nfrom gntplib.exceptions import GNTPError, ResponseError\nfrom gntplib.models import Response\n\n\nclass MockSocket(object):\n\n def __init__(self, buf):\n self.buf = buf\n\n def recv(self, size):\n result = self.buf[:size]\n self.buf = self.buf[len(result):]\n return result\n\n\ndef create_connection(buf, max_buffer_size=100, read_chunk_size=10):\n return GNTPConnection(MockSocket(buf),\n max_buffer_size=max_buffer_size,\n read_chunk_size=read_chunk_size)\n\n\nclass ReadUntilTestCase(tests.CallbackTestCase):\n\n def test_empty(self):\n conn = create_connection(b'')\n conn.read_until(b'\\r\\n', self.fail)\n\n def test_not_found(self):\n conn = create_connection(b'a')\n conn.read_until(b'\\r\\n', self.fail)\n\n def test_last_line(self):\n conn = create_connection(b'a\\r\\n')\n conn.read_until(b'\\r\\n', self.stop)\n self.assertEqual(self.wait(), b'a\\r\\n')\n\n def test_first_line(self):\n conn = create_connection(b'a\\r\\n\\r\\n')\n conn.read_until(b'\\r\\n', self.stop)\n self.assertEqual(self.wait(), b'a\\r\\n')\n\n def test_partial_read(self):\n conn = create_connection(b'a\\r\\n\\r\\n', read_chunk_size=1)\n conn.read_until(b'\\r\\n', self.stop)\n self.assertEqual(self.wait(), b'a\\r\\n')\n conn.read_until(b'\\r\\n', self.stop)\n self.assertEqual(self.wait(), b'\\r\\n')\n\n def test_max_buffer_size(self):\n conn = create_connection(b'abc\\r\\n', max_buffer_size=5)\n conn.read_until(b'\\r\\n', self.stop)\n self.assertEqual(self.wait(), b'abc\\r\\n')\n\n def test_too_large_message_size(self):\n conn = create_connection(b'abcd\\r\\n', max_buffer_size=5)\n with self.assertRaisesRegexp(IOError, 'reached max buffer size'):\n conn.read_until(b'\\r\\n', self.fail)\n\n\nclass ReadBytesTestCase(tests.CallbackTestCase):\n\n def test_empty(self):\n conn = create_connection(b'')\n conn.read_bytes(1, self.fail)\n\n def test_short(self):\n conn = create_connection(b'abc')\n conn.read_bytes(5, self.fail)\n\n def test_just_size(self):\n conn = create_connection(b'abc')\n conn.read_bytes(3, self.stop)\n self.assertEqual(self.wait(), b'abc')\n\n def test_enough_size(self):\n conn = create_connection(b'abcde')\n conn.read_bytes(3, self.stop)\n self.assertEqual(self.wait(), b'abc')\n\n def test_partial_read(self):\n conn = create_connection(b'abcdefg')\n conn.read_bytes(3, self.stop)\n self.assertEqual(self.wait(), b'abc')\n conn.read_bytes(3, self.stop)\n self.assertEqual(self.wait(), b'def')\n\n def test_too_large_num_byets(self):\n conn = create_connection(b'', max_buffer_size=5)\n with self.assertRaisesRegexp(GNTPError, 'too large num_bytes'):\n conn.read_bytes(6, self.fail)\n\n\nclass HandlerCallbackTestCase(unittest.TestCase):\n\n error_response_message = (b'GNTP/1.0 -ERROR NONE\\r\\n'\n b'Error-Code: 404\\r\\n'\n b'Error-Description: foo\\r\\n'\n b'\\r\\n'\n b'\\r\\n')\n\n def create_handler(self, final_callback=None, socket_callback=None):\n result = GNTPConnectionHandler(mock.MagicMock(),\n final_callback and mock.MagicMock(),\n socket_callback and mock.MagicMock())\n result.read_message = mock.MagicMock()\n result.close = mock.MagicMock()\n return result\n\n\nclass OnOkMessageTestCase(HandlerCallbackTestCase):\n\n response_message = (b'GNTP/1.0 -OK NONE\\r\\n'\n b'Response-Action: REGISTER\\r\\n'\n b'\\r\\n'\n b'\\r\\n')\n\n def test_without_final_callback_without_socket_callback(self):\n handler = self.create_handler()\n handler.on_ok_message(self.response_message)\n\n self.assertTrue(handler.close.called)\n self.assertFalse(handler.read_message.called)\n\n def test_with_final_callback_without_socket_callback(self):\n handler = self.create_handler(final_callback=True)\n handler.on_ok_message(self.response_message)\n\n self.assertTrue(handler.close.called)\n self.assertFalse(handler.read_message.called)\n response = handler.final_callback.call_args[0][0]\n self.assertIsInstance(response, Response)\n\n def test_without_final_callback_with_socket_callback(self):\n handler = self.create_handler(socket_callback=True)\n handler.on_callback_message = mock.sentinel.callback_object\n handler.on_ok_message(self.response_message)\n\n self.assertFalse(handler.close.called)\n handler.read_message.assert_called_once_with(\n mock.sentinel.callback_object)\n\n def test_with_final_callback_with_socket_callback(self):\n handler = self.create_handler(final_callback=True,\n socket_callback=True)\n handler.on_callback_message = mock.sentinel.callback_object\n handler.on_ok_message(self.response_message)\n\n self.assertFalse(handler.close.called)\n self.assertFalse(handler.final_callback.called)\n handler.read_message.assert_called_once_with(\n mock.sentinel.callback_object)\n\n def test_error_response(self):\n with self.assertRaises(ResponseError):\n handler = self.create_handler()\n handler.on_ok_message(self.error_response_message)\n\n\nclass OnCallbackMessageTestCase(HandlerCallbackTestCase):\n\n response_message = (b'GNTP/1.0 -CALLBACK NONE\\r\\n'\n b'Application-Name: App\\r\\n'\n b'Notification-ID: (null)\\r\\n'\n b'Notification-Callback-Result: CLICKED\\r\\n'\n b'Notification-Callback-Timestamp: 2012-01-01\\r\\n'\n b'Notification-Callback-Context: \\r\\n'\n b'Notification-Callback-Context-Type: \\r\\n'\n b'\\r\\n'\n b'\\r\\n')\n\n def test_without_final_callback(self):\n handler = self.create_handler(socket_callback=True)\n handler.on_callback_message(self.response_message)\n\n self.assertTrue(handler.close.called)\n response = handler.socket_callback.call_args[0][0]\n self.assertIsInstance(response, Response)\n\n def test_with_final_callback(self):\n handler = self.create_handler(final_callback=True)\n handler.socket_callback = mock.MagicMock(\n return_value=mock.sentinel.rv_object)\n handler.on_callback_message(self.response_message)\n\n self.assertTrue(handler.close.called)\n response = handler.socket_callback.call_args[0][0]\n self.assertIsInstance(response, Response)\n handler.final_callback.assert_called_once_with(\n mock.sentinel.rv_object)\n\n def test_error_response(self):\n with self.assertRaises(ResponseError):\n handler = self.create_handler()\n handler.on_callback_message(self.error_response_message)\n\n\nclass GNTPConnectionHandlerTestCase(unittest.TestCase):\n\n def setUp(self):\n self.conn = mock.MagicMock()\n self.handler = GNTPConnectionHandler(self.conn)\n\n def test_write(self):\n self.handler.write(b'foo')\n\n self.conn.write.assert_called_with(b'foo')\n\n def test_read_message(self):\n self.handler.read_message(mock.sentinel.callback_object)\n\n self.conn.read_until.assert_called_with(b'\\r\\n\\r\\n',\n mock.sentinel.callback_object)\n\n def test_close(self):\n self.assertIs(self.handler.conn, self.conn)\n\n self.handler.close()\n self.assertTrue(self.conn.close.called)\n self.assertIsNone(self.handler.conn)\n\n\nclass GNTPConnectionTestCase(unittest.TestCase):\n\n def setUp(self):\n self.sock = mock.MagicMock()\n self.conn = GNTPConnection(self.sock)\n\n def test_write(self):\n self.conn.write(b'foo')\n\n self.sock.send.assert_called_with(b'foo')\n\n def test_close(self):\n self.assertIs(self.conn.sock, self.sock)\n\n self.conn.close()\n self.assertTrue(self.sock.close.called)\n self.assertIsNone(self.conn.sock)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"gntplib/tests/connection_test.py","file_name":"connection_test.py","file_ext":"py","file_size_in_byte":8546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"138224625","text":"# это функция для нахождения НОД, используется алгоритм Евклида\n\n\ndef euclid(a, b):\n if b == 0: # если одно из чисел равно 0, то НОД равен 2му числу.\n return a\n else:\n k = a % b\n return euclid(b, k)\n\n\n# мультипликативная инверсия\ndef exteuclid(a, b):\n r1 = a\n r2 = b\n s1 = int(1)\n s2 = int(0)\n t1 = int(0)\n t2 = int(1)\n\n while r2 > 0:\n q = r1 // r2\n r = r1 - q * r2\n r1 = r2\n r2 = r\n s = s1 - q * s2\n s1 = s2\n s2 = s\n t = t1 - q * t2\n t1 = t2\n t2 = t\n\n if t1 < 0:\n t1 = t1 % a\n\n return r1, t1\n\n\n# Enter two large prime\n# numbers p and q\np = 823\nq = 953\nn = p * q\nPn = (p - 1) * (q - 1)\n\n# Generate encryption key\n# in range 1 2D00, 23:59:59 -> 59FF\n latest_hex = \"{:03x}{:01x}\".format(\n int(latest_struct.tm_hour*60 + latest_struct.tm_min),\n int(floor(latest_struct.tm_sec/3.75)))\n\n print('Latest Last Modified: {:02d}:{:02d}:{:02d} -> {} of {} files from: git status'.format(\n latest_struct.tm_hour, latest_struct.tm_min, latest_struct.tm_sec,\n latest_hex,\n len(status_mtimes)))\n\n versionCmd = ['describe']\n versionCmd.append('--tags')\n versionCmd.append('--long')\n versionCmd.append('--dirty=_' + (latest_hex or 'X'))\n versionCmd.append('--abbrev=7')\n versionCmd.append('--always')\n\n versionDesc = exec_git_line(versionCmd)\n\n reVersion = re.compile(\n r'^(.*?)(?:-0)?(?:-g?)?([0-9a-f]{7}(?:_(?:X|[0-9A-F]{4}))?)')\n\n match = reVersion.match(versionDesc)\n if match == None:\n raise Exception('could not parse version \"{}\" from: git {}'.format(\n versionDesc, ' '.join(versionCmd)))\n\n # print(versionDesc)\n\n tagVersion = match.group(1) or 'No Version'\n revPrefix = 'X:' if len(status_paths) else 'rev:'\n revision = revPrefix + match.group(2)\n\n tagVersion = re.sub(r'[<>:\",/\\\\|?*]', '_', tagVersion.strip())\n revision = re.sub(r'[<>:\",/\\\\|?*]', '_', revision.strip())\n\n safe_file_name = '{}, {}.hex'.format(tagVersion, revision)\n\n commit_dir = COMMIT_HEX_EXPERIMENT_DIR if len(\n status_mtimes) else COMMIT_HEX_COMMIT_DIR\n\n hex_path = path.join(working_folder_root, *commit_dir)\n hex_file = path.join(hex_path, safe_file_name)\n\n if not path.exists(path.dirname(hex_file)):\n try:\n makedirs(path.dirname(hex_file))\n except OSError as exc: # Guard against race condition\n if exc.errno != EEXIST:\n raise\n\n copyfile(argv[1], hex_file)\n print('**** Saved copy of hex as: \"{}\"'.format(hex_file))\n","sub_path":"git_store_hex.py","file_name":"git_store_hex.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"463819218","text":"import math\r\n\r\np0 = 70\r\np1 = None\r\np2 = None \r\npg = None\r\npg_anterior = None\r\nerror = None\r\nepsilon = math.pow(10, -5)\r\nitr_n = 0\r\n\r\ndef g_x(x):\r\n return 2.18902 * pow((pow(x, 1.41844) - 32.6757), 0.585062)\r\n\r\n# Calcular el pgorro inicial\r\np1 = g_x(p0)\r\np2 = g_x(p1)\r\npg = p0 - ((p1 - p0)**2) / (p2 - 2 * p1 + p0)\r\n\r\n# Ya que es el primero, no se puede calcular el error\r\nprint(\"{} & {: .10f} & {: .10f} & {: .10f} & {: .10f} & - \\\\\\\\\".format(itr_n, p0, p1, p2, pg))\r\nwhile True:\r\n\r\n itr_n = itr_n + 1\r\n pg_anterior = pg\r\n\r\n # Se mejora p_0 con el pgorrro anterior\r\n p0 = pg\r\n p1 = g_x(p0)\r\n p2 = g_x(p1)\r\n\r\n # Formula de Aitken \r\n pg = p0 - ((p1 - p0)**2) / (p2 - 2 * p1 + p0)\r\n\r\n #Error entre aproximaciones de pgorro\r\n error = math.fabs(pg - pg_anterior)\r\n\r\n print(\"{} | {: .10f} | {: .10f} | {: .10f} | {: .10f} | {: e} \".format(itr_n, p0, p1, p2, pg, error))\r\n if(error < epsilon):\r\n break","sub_path":"Parcial 1/Ejercicio_4_Steffensen_1.py","file_name":"Ejercicio_4_Steffensen_1.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"423715592","text":"# -*- encoding: utf-8 -*-\n# Copyright (c) 2016 b<>com\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nA :ref:`Strategy ` is an algorithm implementation which is\nable to find a :ref:`Solution ` for a given\n:ref:`Goal `.\n\nThere may be several potential strategies which are able to achieve the same\n:ref:`Goal `. This is why it is possible to configure which\nspecific :ref:`Strategy ` should be used for each goal.\n\nSome strategies may provide better optimization results but may take more time\nto find an optimal :ref:`Solution `.\n\"\"\"\n\nimport pecan\nfrom pecan import rest\nfrom wsme import types as wtypes\nimport wsmeext.pecan as wsme_pecan\n\nfrom watcher.api.controllers import base\nfrom watcher.api.controllers import link\nfrom watcher.api.controllers.v1 import collection\nfrom watcher.api.controllers.v1 import types\nfrom watcher.api.controllers.v1 import utils as api_utils\nfrom watcher.common import exception\nfrom watcher.common import policy\nfrom watcher.common import utils as common_utils\nfrom watcher.decision_engine import rpcapi\nfrom watcher import objects\n\n\ndef hide_fields_in_newer_versions(obj):\n \"\"\"This method hides fields that were added in newer API versions.\n\n Certain node fields were introduced at certain API versions.\n These fields are only made available when the request's API version\n matches or exceeds the versions when these fields were introduced.\n \"\"\"\n pass\n\n\nclass Strategy(base.APIBase):\n \"\"\"API representation of a strategy.\n\n This class enforces type checking and value constraints, and converts\n between the internal object model and the API representation of a strategy.\n \"\"\"\n _goal_uuid = None\n _goal_name = None\n\n def _get_goal(self, value):\n if value == wtypes.Unset:\n return None\n goal = None\n try:\n if (common_utils.is_uuid_like(value) or\n common_utils.is_int_like(value)):\n goal = objects.Goal.get(pecan.request.context, value)\n else:\n goal = objects.Goal.get_by_name(pecan.request.context, value)\n except exception.GoalNotFound:\n pass\n if goal:\n self.goal_id = goal.id\n return goal\n\n def _get_goal_uuid(self):\n return self._goal_uuid\n\n def _set_goal_uuid(self, value):\n if value and self._goal_uuid != value:\n self._goal_uuid = None\n goal = self._get_goal(value)\n if goal:\n self._goal_uuid = goal.uuid\n\n def _get_goal_name(self):\n return self._goal_name\n\n def _set_goal_name(self, value):\n if value and self._goal_name != value:\n self._goal_name = None\n goal = self._get_goal(value)\n if goal:\n self._goal_name = goal.name\n\n uuid = types.uuid\n \"\"\"Unique UUID for this strategy\"\"\"\n\n name = wtypes.text\n \"\"\"Name of the strategy\"\"\"\n\n display_name = wtypes.text\n \"\"\"Localized name of the strategy\"\"\"\n\n links = wtypes.wsattr([link.Link], readonly=True)\n \"\"\"A list containing a self link and associated goal links\"\"\"\n\n goal_uuid = wtypes.wsproperty(wtypes.text, _get_goal_uuid, _set_goal_uuid,\n mandatory=True)\n \"\"\"The UUID of the goal this audit refers to\"\"\"\n\n goal_name = wtypes.wsproperty(wtypes.text, _get_goal_name, _set_goal_name,\n mandatory=False)\n \"\"\"The name of the goal this audit refers to\"\"\"\n\n parameters_spec = {wtypes.text: types.jsontype}\n \"\"\"Parameters spec dict\"\"\"\n\n def __init__(self, **kwargs):\n super(Strategy, self).__init__()\n\n self.fields = []\n self.fields.append('uuid')\n self.fields.append('name')\n self.fields.append('display_name')\n self.fields.append('goal_uuid')\n self.fields.append('goal_name')\n self.fields.append('parameters_spec')\n setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset))\n setattr(self, 'name', kwargs.get('name', wtypes.Unset))\n setattr(self, 'display_name', kwargs.get('display_name', wtypes.Unset))\n setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset))\n setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset))\n setattr(self, 'parameters_spec', kwargs.get('parameters_spec',\n wtypes.Unset))\n\n @staticmethod\n def _convert_with_links(strategy, url, expand=True):\n if not expand:\n strategy.unset_fields_except(\n ['uuid', 'name', 'display_name', 'goal_uuid', 'goal_name'])\n\n strategy.links = [\n link.Link.make_link('self', url, 'strategies', strategy.uuid),\n link.Link.make_link('bookmark', url, 'strategies', strategy.uuid,\n bookmark=True)]\n return strategy\n\n @classmethod\n def convert_with_links(cls, strategy, expand=True):\n strategy = Strategy(**strategy.as_dict())\n hide_fields_in_newer_versions(strategy)\n return cls._convert_with_links(\n strategy, pecan.request.host_url, expand)\n\n @classmethod\n def sample(cls, expand=True):\n sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',\n name='DUMMY',\n display_name='Dummy strategy')\n return cls._convert_with_links(sample, 'http://localhost:9322', expand)\n\n\nclass StrategyCollection(collection.Collection):\n \"\"\"API representation of a collection of strategies.\"\"\"\n\n strategies = [Strategy]\n \"\"\"A list containing strategies objects\"\"\"\n\n def __init__(self, **kwargs):\n super(StrategyCollection, self).__init__()\n self._type = 'strategies'\n\n @staticmethod\n def convert_with_links(strategies, limit, url=None, expand=False,\n **kwargs):\n strategy_collection = StrategyCollection()\n strategy_collection.strategies = [\n Strategy.convert_with_links(g, expand) for g in strategies]\n strategy_collection.next = strategy_collection.get_next(\n limit, url=url, **kwargs)\n return strategy_collection\n\n @classmethod\n def sample(cls):\n sample = cls()\n sample.strategies = [Strategy.sample(expand=False)]\n return sample\n\n\nclass StrategiesController(rest.RestController):\n \"\"\"REST controller for Strategies.\"\"\"\n def __init__(self):\n super(StrategiesController, self).__init__()\n\n from_strategies = False\n \"\"\"A flag to indicate if the requests to this controller are coming\n from the top-level resource Strategies.\"\"\"\n\n _custom_actions = {\n 'detail': ['GET'],\n 'state': ['GET'],\n }\n\n def _get_strategies_collection(self, filters, marker, limit, sort_key,\n sort_dir, expand=False, resource_url=None):\n additional_fields = [\"goal_uuid\", \"goal_name\"]\n\n api_utils.validate_sort_key(\n sort_key, list(objects.Strategy.fields) + additional_fields)\n api_utils.validate_search_filters(\n filters, list(objects.Strategy.fields) + additional_fields)\n limit = api_utils.validate_limit(limit)\n api_utils.validate_sort_dir(sort_dir)\n\n marker_obj = None\n if marker:\n marker_obj = objects.Strategy.get_by_uuid(\n pecan.request.context, marker)\n\n need_api_sort = api_utils.check_need_api_sort(sort_key,\n additional_fields)\n sort_db_key = (sort_key if not need_api_sort\n else None)\n\n strategies = objects.Strategy.list(\n pecan.request.context, limit, marker_obj, filters=filters,\n sort_key=sort_db_key, sort_dir=sort_dir)\n\n strategies_collection = StrategyCollection.convert_with_links(\n strategies, limit, url=resource_url, expand=expand,\n sort_key=sort_key, sort_dir=sort_dir)\n\n if need_api_sort:\n api_utils.make_api_sort(strategies_collection.strategies,\n sort_key, sort_dir)\n\n return strategies_collection\n\n @wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text,\n int, wtypes.text, wtypes.text)\n def get_all(self, goal=None, marker=None, limit=None,\n sort_key='id', sort_dir='asc'):\n \"\"\"Retrieve a list of strategies.\n\n :param goal: goal UUID or name to filter by.\n :param marker: pagination marker for large data sets.\n :param limit: maximum number of resources to return in a single result.\n :param sort_key: column to sort results by. Default: id.\n :param sort_dir: direction to sort. \"asc\" or \"desc\". Default: asc.\n \"\"\"\n context = pecan.request.context\n policy.enforce(context, 'strategy:get_all',\n action='strategy:get_all')\n filters = {}\n if goal:\n if common_utils.is_uuid_like(goal):\n filters['goal_uuid'] = goal\n else:\n filters['goal_name'] = goal\n\n return self._get_strategies_collection(\n filters, marker, limit, sort_key, sort_dir)\n\n @wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text, int,\n wtypes.text, wtypes.text)\n def detail(self, goal=None, marker=None, limit=None,\n sort_key='id', sort_dir='asc'):\n \"\"\"Retrieve a list of strategies with detail.\n\n :param goal: goal UUID or name to filter by.\n :param marker: pagination marker for large data sets.\n :param limit: maximum number of resources to return in a single result.\n :param sort_key: column to sort results by. Default: id.\n :param sort_dir: direction to sort. \"asc\" or \"desc\". Default: asc.\n \"\"\"\n context = pecan.request.context\n policy.enforce(context, 'strategy:detail',\n action='strategy:detail')\n # NOTE(lucasagomes): /detail should only work agaist collections\n parent = pecan.request.path.split('/')[:-1][-1]\n if parent != \"strategies\":\n raise exception.HTTPNotFound\n expand = True\n resource_url = '/'.join(['strategies', 'detail'])\n\n filters = {}\n if goal:\n if common_utils.is_uuid_like(goal):\n filters['goal_uuid'] = goal\n else:\n filters['goal_name'] = goal\n\n return self._get_strategies_collection(\n filters, marker, limit, sort_key, sort_dir, expand, resource_url)\n\n @wsme_pecan.wsexpose(wtypes.text, wtypes.text)\n def state(self, strategy):\n \"\"\"Retrieve an information about strategy requirements.\n\n :param strategy: name of the strategy.\n \"\"\"\n context = pecan.request.context\n policy.enforce(context, 'strategy:state', action='strategy:state')\n parents = pecan.request.path.split('/')[:-1]\n if parents[-2] != \"strategies\":\n raise exception.HTTPNotFound\n rpc_strategy = api_utils.get_resource('Strategy', strategy)\n de_client = rpcapi.DecisionEngineAPI()\n strategy_state = de_client.get_strategy_info(context,\n rpc_strategy.name)\n strategy_state.extend([{\n 'type': 'Name', 'state': rpc_strategy.name,\n 'mandatory': '', 'comment': ''}])\n return strategy_state\n\n @wsme_pecan.wsexpose(Strategy, wtypes.text)\n def get_one(self, strategy):\n \"\"\"Retrieve information about the given strategy.\n\n :param strategy: UUID or name of the strategy.\n \"\"\"\n if self.from_strategies:\n raise exception.OperationNotPermitted\n\n context = pecan.request.context\n rpc_strategy = api_utils.get_resource('Strategy', strategy)\n policy.enforce(context, 'strategy:get', rpc_strategy,\n action='strategy:get')\n\n return Strategy.convert_with_links(rpc_strategy)\n","sub_path":"watcher/api/controllers/v1/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":12595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"356960819","text":"#Author: Emma Carli \n# =============================================================================\n# This code modifies the energies of simulations datasets with different bracketing functions. It does not modify the IRFs\n# =============================================================================\n\n\nfrom IPython import get_ipython\nget_ipython().magic('reset -f') \n\n\n#%%\n\nfrom astropy.io import fits\nimport os\n\nfrom scaling_functions import gradient, step\nfrom scipy import log10\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ctools\nimport gammalib\n\n#%%\n\ncaldb = 'prod3b-v2' #calibration database\norig_irf = 'South_z40_50h' \nfovradius = 0.8 #radius of the field of view, degrees\noffset = 0.5\nra = 83.6331 + offset \ndec = 22.0145\n\n\n#%%\n\n#Get calibration database and original IRF into gammalib format\ncaldb_gammalib = gammalib.GCaldb('cta', caldb) \nirf_gammalib = gammalib.GCTAResponseIrf(orig_irf, caldb_gammalib)\n\nemin = 0.1 #TeV #set minimum at 100Gev \n\n#find smallest max energy range\nemaxs = []\nemaxs.append(irf_gammalib.background().table().axis_hi(irf_gammalib.background().table().axis('ENERG'),irf_gammalib.background().table().axis_bins(irf_gammalib.background().table().axis('ENERG'))-1))\nemaxs.append(irf_gammalib.aeff().table().axis_hi(irf_gammalib.aeff().table().axis('ENERG'), irf_gammalib.aeff().table().axis_bins(irf_gammalib.aeff().table().axis('ENERG'))-1))\nemaxs.append(irf_gammalib.psf().table().axis_hi(irf_gammalib.psf().table().axis('ENERG'), irf_gammalib.psf().table().axis_bins(irf_gammalib.psf().table().axis('ENERG'))-1))\nemaxs.append(irf_gammalib.edisp().table().axis_hi(irf_gammalib.edisp().table().axis('ENERG'), irf_gammalib.edisp().table().axis_bins(irf_gammalib.edisp().table().axis('ENERG'))-1))\nemax = min(emaxs) #in TeV\n\nbreakpoints = log10(((0.15,0.11), (5.0, 0.06))) #breakpoints of the step function, as per paper\n\n\nbins = np.logspace(log10(emin), log10(emax), num=int(log10(emax/emin)*10)) #create ten bins per energy decade in the range for histogram\nbins = [log10(x) for x in bins]\n\n#%%\n\nfor cutoff in ['1','2','3']: #50, 100, 200 TeV\n \n for flux in ['a', 'b', 'c', 'd']: #20, 40, 60, 80 mCrab\n \n for function in ['constant', 'step', 'gradient']: #bracketing functions \n \n \n function_signs = ['minus','plus']\n function_components = ['EDisp'] \n \n for sign in function_signs:\n for component in function_components:\n \n irf = sign + '_' +function + '_' + component + '_' + orig_irf\n \n \n if sign == 'minus':\n scale = -0.06\n else:\n scale = 0.06\n \n #%%\n name = 'source'+cutoff+flux #name of the source\n \n wd = '/cta/carli/CPPM_Internship/Simulations_and_Analyses/'+ irf+'/'+name+'/'\n os.makedirs(wd)\n os.chdir(wd)\n \n logfile = open('simulation.txt', 'w')\n \n fits_file = fits.open( '/cta/carli/CPPM_Internship/Simulations_and_Analyses/South_z40_50h/'+name+'/obs.fits')\n fits_file[1].data.sort(order='ENERGY')\n \n if function == 'constant':\n fits_file[1].data['ENERGY'] = fits_file[1].data['ENERGY'] * (1 + scale)\n if function == 'step':\n fits_file[1].data['ENERGY'] = fits_file[1].data['ENERGY'] * (1 + (scale*step(log10(fits_file[1].data['ENERGY']),breakpoints)))\n if function == 'gradient':\n fits_file[1].data['ENERGY'] = fits_file[1].data['ENERGY'] * (1 + (scale*gradient(log10(fits_file[1].data['ENERGY']), log10(emin), log10(emax))))\n \n fits_file.writeto(wd+'obs.fits')\n \n #Plot histogram\n fig1 = plt.figure()\n ax1 = plt.gca()\n ax1.set_yscale('log')\n ax1.set_xlabel('Log10(Energy) (TeV)')\n ax1.set_ylabel('Number of events')\n \n ax1.hist(log10(fits_file[1].data['ENERGY']), bins=bins, range=(log10(emin),log10(emax)), edgecolor='black', color='0' )\n ax1.set_title('Observed counts')\n fig1.savefig(wd+'energy_histogram.pdf')\n plt.close(fig1)\n \n \n #%%\n \n #Generate skymap of the new \"simulation\"\n skymap = ctools.ctskymap()\n skymap['inobs'] = wd+'obs.fits'\n skymap[\"caldb\"] = caldb\n skymap['irf'] = irf\n skymap[\"usepnt\"] = True #use the simulation's pointing to find sky position\n skymap[\"binsz\"] = 0.02 #spatial resolution\n skymap[\"nxpix\"] = int(2*fovradius/float(skymap['binsz'].value()))\n skymap[\"nypix\"] = int(skymap['nxpix'].value())\n skymap[\"emin\"] = emin\n skymap[\"emax\"] = emax\n skymap[\"outmap\"] = wd+'skymap.fits'\n skymap['chatter'] = 4\n skymap[\"bkgsubtract\"] = \"NONE\" #or \"IRF\"\n \n #%%\n \n print(skymap)\n \n #%%\n \n skymap.logFileOpen()\n skymap.execute()\n logfile.write('ctskymap:' + str(skymap.telapse()) + 'seconds \\n')\n skymap.logFileClose()\n \n \n #%%\n #Plot the sky map of the simulation\n \n from matplotlib.colors import SymLogNorm\n #The SymLogNorm scale is a Log scale for both positive and negative values (for background subtraction)\n \n fig2 = plt.figure()\n ax2 = plt.gca()\n image = ax2.imshow(skymap.skymap().array(),origin='lower',\n extent=[ra+fovradius,ra-fovradius,dec-fovradius,dec+fovradius],\n norm=SymLogNorm(1), cmap='viridis' ) \n ax2.set_xlabel('R.A. (deg)')\n ax2.set_ylabel('Dec (deg)')\n cbar = plt.colorbar(image, ax=ax2)\n cbar.set_label('Counts')\n ax2.set_title('Map of observed counts') \n fig2.savefig(wd+'skymap.pdf')\n plt.close(fig2)\n \n #%%\n \n logfile.close()\n \n \n\n ","sub_path":"Codes/3_simulations_energy_modifications.py","file_name":"3_simulations_energy_modifications.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"396532119","text":"from django.shortcuts import render\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom utils.utils import *\nfrom django.db.models import Q\nfrom .models import *\nfrom user_backend.models import *\nimport json\nimport datetime\nfrom collections import defaultdict\nfrom django.db import transaction\nimport traceback\n\n# Create your views here.\ntry:\n redis_database.initDatabase()\nexcept:\n pass\n\nclass Login(APIView):\n def get(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n\n def post(self):\n self.checkMsg(\"username\", \"password\")\n user = auth.authenticate(request=self.request, username=self.msg[\"username\"], password=self.msg[\"password\"])\n if not user:\n raise MessageError(0, 'login failed')\n auth.login(self.request, user)\n\n\nclass Logout(APIView):\n def post(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'logout failed')\n auth.logout(self.request)\n\n\nclass UserUpdate(APIView):\n def get(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n pass\n\n\nclass UserList(APIView):\n def post(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n options = self.getMultiOption('open_id', 'identity', 'user_type')\n users = User.objects.filter(**options).values('open_id', 'identity', 'user_type')\n users = list(users)\n return {'user_list': users}\n\n\nclass RoomCreate(APIView):\n\n def post(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n self.checkMsg(\"room_num\", \"piano_type\", \"price_0\", \"price_1\", \"price_2\", \"usable\", \"artEnsemble\")\n try:\n with transaction.atomic():\n new_piano_room = Room.objects.create(\n room_num=self.msg[\"room_num\"],\n piano_type=self.msg[\"piano_type\"],\n price_0=self.msg[\"price_0\"],\n price_1=self.msg[\"price_1\"],\n price_2=self.msg[\"price_2\"],\n usable=self.msg[\"usable\"],\n artEnsemble=self.msg[\"artEnsemble\"],\n )\n if not new_piano_room:\n raise MessageError(0, 'piano room create failed')\n except:\n raise MessageError(msg='piano room create failed')\n\n\nclass RoomEdit(APIView):\n\n def post(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n self.checkMsg(\"room_num\", \"piano_type\", \"price_0\", \n \"price_1\", \"price_2\", \"usable\", \"artEnsemble\")\n\n if not Room.objects.filter(room_num=self.msg['room_num']).update(\n piano_type=self.msg[\"piano_type\"],\n price_0=self.msg[\"price_0\"],\n price_1=self.msg[\"price_1\"],\n price_2=self.msg[\"price_2\"],\n usable=self.msg[\"usable\"],\n artEnsemble=self.msg[\"artEnsemble\"]\n ):\n raise MessageError(0, 'fail to edit a piano room')\n\n\nclass RoomDelete(APIView):\n\n def post(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n self.checkMsg(\"room_num\")\n if not Room.objects.filter(room_num=self.msg['room_num']).update(usable=False):\n raise MessageError(0, 'the room is not-exist ')\n\n\nclass RoomList(APIView):\n\n def post(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n try:\n search_word = ''\n if \"piano_type\" in self.msg:\n search_word += 'Q(piano_type=self.msg[\"piano_type\"])&'\n if \"room_num\" in self.msg:\n search_word += 'Q(room_num=self.msg[\"room_num\"])&'\n temp = Room.objects.filter(eval(search_word[:-1])).values(\n 'room_num', 'piano_type', 'price_0', 'price_1', 'price_2', 'usable', 'artEnsemble')\n temp = list(temp)\n dd = defaultdict(list)\n for item in temp:\n dd[item['room_num']].append(item)\n return {'room_list': dd}\n except:\n raise MessageError(0, 'fail to list piano room as no such piano type exist')\n\n\nclass OrderList(APIView):\n\n def post(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n count = self.checkMsgMultiOption(\"order_status\", \"identity\", \"start_date\", \"end_date\", \"order_id\", \"room_num\")\n try:\n if count:\n search_word = ''\n if \"room_num\" in self.msg:\n search_word += 'Q(piano_room=getModel(Room, room_num=self.msg[\"room_num\"]))&'\n if \"order_id\" in self.msg:\n search_word += 'Q(order_id=self.msg[\"order_id\"])&'\n if \"order_status\" in self.msg:\n search_word += 'Q(order_status=self.msg[\"order_status\"])&'\n temp = Order.objects.filter(eval(search_word[:-1])).values(\n 'piano_room__room_num', 'user_id', 'start_time', 'end_time', 'payment', 'order_id',\n 'create_time', 'order_status')\n else:\n temp = Order.objects.all().values(\n 'piano_room__room_num', 'user_id', 'start_time', 'end_time', 'payment', 'order_id',\n 'create_time', 'order_status')\n temp = list(temp)\n for item in temp:\n user = User.objects.get(id=item['user_id'])\n if user.identity:\n item['user_id'] = user.identity\n else:\n item['user_id'] = user.open_id\n item['start_time'] = item['start_time'].timestamp()\n item['end_time'] = item['end_time'].timestamp()\n item['create_time'] = item['create_time'].timestamp()\n item['room_num'] = item['piano_room__room_num']\n return {'order_list': temp}\n except:\n traceback.print_exc()\n raise MessageError(0, 'fail to list order list')\n\n\nclass NewsList(APIView):\n\n def get(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n try:\n news_list = News.objects.all().values('newsTitle', 'id', 'publish_time')\n temp = list(news_list)\n for item in temp:\n item['publish_time'] = item['publish_time'].timestamp()\n item['news_id'] = item[\"id\"]\n return {'news_list': temp}\n except:\n raise MessageError(0, 'fail to list news')\n\n\nclass NewsCreate(APIView):\n\n def post(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n self.checkMsg(\"newsTitle\", \"newsContent\")\n new_news = News.objects.create(\n newsTitle=self.msg[\"newsTitle\"],\n newsContent=self.msg[\"newsContent\"],\n publish_time=timezone.now()\n )\n if not new_news:\n raise MessageError(0, 'fail to create')\n\n\nclass NewsDetail(APIView):\n\n def get(self):\n if not self.request.user.is_authenticated:\n raise MessageError(0, 'login first')\n self.checkMsg(\"news_id\")\n try:\n detail = News.objects.filter(id=self.msg[\"news_id\"]).values('newsTitle', 'newsContent', 'publish_time')\n temp = (list(detail))[0]\n temp['publish_time'] = temp['publish_time'].timestamp()\n return temp\n except:\n raise MessageError(0, 'does not exist')\n\n\n","sub_path":"admin_backend/admin_backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"516278682","text":"import salary\nfrom salary.create_workbook import create_sheet_with_headers, create_sheets_as_heads\n\n__author__ = 'gaa8664'\nfrom salary.get_sheet import get_sheet\nfrom salary import global_vars\nfrom salary import prod_sal\nfrom salary import create_maps\nfrom salary.connection import Connection\nfrom salary import fox_salary_map\nfrom salary import prod_salary_map\nfrom salary import parseworkbook\nimport openpyxl\n\n\ncomparison_workbook = None\ncomp_sheet = None\nhead_map = None\nrow_count = 1\nfound = False\n\n\ndef initialize():\n global head_map\n global comp_sheet\n global comparison_workbook\n comparison_workbook = openpyxl.Workbook()\n '''comp_sheet, head_map = create_sheet_with_headers(comparison_workbook, 'comparison', global_vars.FOX_PROD_SALHEADS.keys(),\n 'D://comparison.xlsx')'''\n comp_sheet = create_sheets_as_heads(comparison_workbook, global_vars.FOX_PROD_SALHEADS.keys(), 'D://comparison1.xlsx')\n\n\ndef sal_comparison(file_name, sheet, year, month):\n emp_sal_map = {}\n fox_salary = {}\n count = 0\n workbook = openpyxl.load_workbook(file_name)\n sheet = workbook.get_sheet_by_name(sheet)\n empcode_col = global_vars.HEAD_SEQUENCE['PR_NEWCODE']\n for row in sheet:\n global found_emp\n global row_count\n row_count = 1\n found_emp = False\n if found_emp == True:\n row_count += 1\n '''if count == 100:\n break'''\n fox_salary.clear()\n emp_sal_map.clear()\n # ignore first row of the sheet.\n if count == 0:\n count += 1\n continue\n empcode = row[empcode_col].value\n # employee salary result set\n emp_sal_resultset = prod_sal.get_employee_pro_sal(empcode, year, month)\n '''\n Process employee record from fox salary row and Prodigious Resultset to create two maps.\n 1) Foxpro Map\n 2) Prodigious Map\n Both map will have common keys for direct and simple comparison. A common value is set to represnt salary heads\n of both systems.\n '''\n if emp_sal_resultset:\n fox_salary = fox_salary_map.generate_fox_sal_map(row) # employee fox salary map\n #process result set to get salary head and value\n for val in emp_sal_resultset:\n emp_sal_map[val[2]] = val[0]\n #print(\"{1} -> {0},{2}\".format(val[0],val[1], val[2]))\n prod_salary = prod_salary_map.generate_prod_sal_map(emp_sal_map) # employee prod salary map\n compare_emp_salary(empcode, fox_salary, prod_salary,row,count)\n #print(count)\n count += 1\n Connection.connection_close()\n Connection.close_cursor()\n comparison_workbook.save('D://comparison1.xlsx')\n parseworkbook.process_workbook('D://comparison1.xlsx')\n\n\ndef enter_val_comparision_sheet(head, fox_val, prod_val, empcode, row_count):\n sheet_name_work = get_sheet_name(head)\n #print(\"{}-{}-{}-{}\".format(head,fox_val,prod_val,sheet_name_work))\n if sheet_name:\n sheet = comparison_workbook.get_sheet_by_name(sheet_name_work.lower())\n row_count = sheet.max_row\n row_count += 1\n sheet.cell(row=row_count, column=1).value = empcode\n sheet.cell(row = row_count, column=2).value = fox_val\n sheet.cell(row = row_count, column=3).value = prod_val\n '''comp_sheet.cell(row=row_count,column=1).value = empcode\n # map common head with fox heads\n fox_head = global_vars.COMMON_HEAD_FOX_HEAD_MAP[head]\n # get the column number where fox head will apper in excel.\n col_num = head_map.get(fox_head,None)\n if col_num:\n comp_sheet.cell(row=row_count, column=col_num).value = fox_val\n comp_sheet.cell(row=row_count, column=col_num+1).value = prod_val'''\n\n\ndef get_sheet_name(head):\n sheet_name = ''\n fox_head = global_vars.COMMON_HEAD_FOX_HEAD_MAP[head]\n prod_head = global_vars.FOX_PROD_SALHEADS.get(fox_head, None)\n if prod_head:\n sheet_name = prod_head.replace('9||','')\n loc = sheet_name.find('*')\n if loc > -1:\n str_array = sheet_name.split('*')\n sheet_name = str_array[0]\n return sheet_name\n\n\ndef compare_emp_salary(empcode, fox_salary, prod_salary, row,count):\n for head in fox_salary:\n prod_amount = prod_salary.get(head, 0.0)\n fox_amount = fox_salary[head]\n if head == 'Level':\n if fox_amount:\n try:\n fox_amount = float(fox_amount.replace('Level-', ''))\n if prod_amount == 'CONSOLIDATED SALARY' or prod_amount == 'FIXED STIPEND':\n prod_amount = '0'\n except:\n pass\n if fox_amount != prod_amount:\n\n enter_val_comparision_sheet(head, fox_amount, prod_amount,empcode, count)\n elif float(fox_amount) != float(prod_amount):\n print(empcode)\n enter_val_comparision_sheet(head, fox_amount, prod_amount,empcode, count)\n\n\nif __name__ == '__main__':\n file_name = 'D:\\Software\\Software\\HR Module\\Salary\\Salary April 2017.xlsx'\n sheet_name = 'PAYROLL'\n # create maps\n create_maps.head_mapping(file_name,'Map')\n create_maps.sequence_mapping(file_name, sheet_name)\n create_maps.common_head_fox_mapping()\n # initalize comparison sheet\n initialize()\n # compare salary\n sal_comparison(file_name, sheet_name, '2017', '04')\n\n\n","sub_path":"LearningPython/salary/gross_sal_msg.py","file_name":"gross_sal_msg.py","file_ext":"py","file_size_in_byte":5413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"513706738","text":"import quandl\nimport numpy as np\nimport math\nfrom sklearn import preprocessing, cross_validation,svm\nfrom sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\n# df = quandl.get(\"EOD/KO\", authtoken=\"37T_cbaygisqktDcF7zZ\",start_date = \"2016-1-1\")\n\ncolumn = ['High','Low','Volume','Adj Close']\nlabel = 'Adj Close'\n\ndata = pd.read_csv(\"D:/google_driver/Code/python/machine_learning_Web_Toturial/lay_du_lieu_internet/NASDAQ_yahoo.csv\")\ndf = data[['High','Low','Volume','Adj Close']]\ndf = df.astype('float32')\nscaler = MinMaxScaler(feature_range=(0,1))\ndf = scaler.fit_transform(df)\ndf = np.array(df)\n\nscalerLabel = MinMaxScaler(feature_range=(0,1))\ndataLabel = data[['Adj Close']]\ndataLabel = scalerLabel.fit_transform(dataLabel)\n\ntrain_size = int(len(df)*0.8)\ntest_size = len(df) - train_size\ntrain,test = df[0:train_size,:],df[train_size:len(data),:]\n\n\ndef create_dataset(dataset,look_back = 1,predictNext = 0):\n dataX, dataY = [],[]\n for i in range(len(dataset)-look_back-predictNext):\n a = dataset[i:(i+look_back)]\n a = np.reshape(a, (a.shape[0]*a.shape[1]))\n dataX.append(a)\n dataY.append(dataset[i+look_back+predictNext,3])\n dataY = np.reshape(dataY, (len(dataY),1))\n return np.array(dataX),np.array(dataY)\n\n\nlook_back = 1\npredictNext = 0\nX_train,y_train = create_dataset(train,look_back,predictNext)\nX_test,y_test = create_dataset(test,look_back,predictNext)\n\n\n# LR = LinearRegression()\n# LR.fit(X_train,y_train)\nmodel = svm.SVR(kernel='poly')\nmodel.fit(X_train,y_train)\naccuracy = model.score(X_test,y_test)\nprint(accuracy)\n\n\n# forecast_set = LR.predict(X_test)\n# k= 0\n# for i in range(len(y_test)-2):\n# if((y_test[i]-y_test[i+1])*(forecast_set[i]-forecast_set[i+1]))>0:\n# k+=1\n# print(y_test[i + 1], \" \", forecast_set[i + 1], \" Dung\")\n# continue\n# print(y_test[i+1],\" \",forecast_set[i+1],\" Sai\")\n# print(k/(len(y_test)-1))\n# print(len(y_test))\n\ntestPredict = model.predict(X_test)\ntrainPredict = model.predict(X_train)\n\ntrainPredict = np.reshape(trainPredict, (len(trainPredict),1))\ntestPredict = np.reshape(testPredict, (len(testPredict),1))\n\n\ntrainPredict = scalerLabel.inverse_transform(trainPredict)\ntestPredict = scalerLabel.inverse_transform(testPredict)\n\n\n#dataPlot = data[['Adj Close']]\ndataPlot = scalerLabel.inverse_transform(dataLabel)\n\ntrainPlot = np.empty_like(dataPlot)\ntrainPlot[:,:] = np.nan\ntrainPlot[look_back+predictNext:train_size,:] = trainPredict\n\n\n\ntestPlot = np.empty_like(dataPlot)\ntestPlot[:,:] = np.nan\ntestPlot[train_size+look_back+predictNext:len(dataPlot),:] = testPredict\n\n\n\nplt.plot(dataPlot)\nplt.plot(trainPlot)\nplt.plot(testPlot)\nplt.legend(loc = 4)\nplt.title('Linear')\nplt.show()\n\n","sub_path":"one_layer_predict_baocao.py","file_name":"one_layer_predict_baocao.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"152152629","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2020 Jarosław Stańczyk \nSource code presented in the lectures \"Python programming language\"\n\n03/filter.py\n\"\"\"\nfrom __future__ import print_function\n\n\ndef even(x):\n\tif x % 2 == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\nif __name__ == \"__main__\":\n\tprint(list(filter(even, range(0, 30))))\n\tprint(list(filter(lambda x: x % 2 == 0, range(0, 30))))\n\n# eof.\n","sub_path":"03/02.filter.py","file_name":"02.filter.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336138371","text":"from tusitio.models import Seccion, Articulo\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.db.models import Q\nimport random\nimport operator\n\ndef home(request):\n\n #secciones\n secciones = Seccion.objects.all().order_by('id')\n\n #ultimos articulos para slider\n articulos = Articulo.objects.all().order_by('id').reverse()[:15]\n\n return render_to_response('inicio.html',\n {\n 'secciones': secciones,\n 'articulos': articulos,\n 'menu_activo': 'Inicio'\n },\n context_instance=RequestContext(request))\n\n\n\ndef seccion(request, id_seccion):\n\n #secciones\n secciones = Seccion.objects.all().order_by('id')\n\n #Articulos\n articulos = Articulo.objects.filter(secciones=id_seccion).order_by('id').reverse()\n\n #seccion activa\n objeto_menu_activo = Seccion.objects.get(pk=id_seccion)\n menu_activo = objeto_menu_activo.nombre\n titulo = objeto_menu_activo.titulo\n\n return render_to_response('lista_seccion.html',\n {\n 'secciones': secciones,\n 'articulos': articulos,\n 'menu_activo': menu_activo,\n 'titulo': titulo,\n 'seccion': id_seccion\n },\n context_instance=RequestContext(request))\n\ndef articulo(request, id_articulo):\n\n #secciones\n sec = Seccion.objects.all().order_by('id')\n\n #Articulo\n articulo = get_object_or_404(Articulo, pk=id_articulo)\n\n secciones = list()\n\n for s in articulo.secciones.iterator():\n secciones.append(s.id)\n \n articulos_relacionados = list(Articulo.objects.filter(secciones__in= secciones).exclude(pk=id_articulo))\n random.shuffle(articulos_relacionados)\n articulos_relacionados = articulos_relacionados[:4]\n \n return render_to_response('articulo.html',\n {\n 'secciones': sec,\n 'articulo': articulo,\n 'relacionados': articulos_relacionados,\n },\n context_instance=RequestContext(request)) \n\ndef search(request):\n\n #secciones\n secciones = Seccion.objects.all().order_by('id')\n #obtenemos el string de busqueda\n query = request.GET.get('q', '')\n #obtenemos la lista de plabras que tiene q\n busqueda = query.split()\n #obtenemmos la seccion sobre la que se busca\n seccion = request.GET.get('seccion','') \n #definimos el menu activo que despues se va a reemplazar\n menu_activo = ''\n #palabras excluidas\n lista_excluidas= ['el', 'la', 'los', 'las', 'un', 'una', 'unos', 'unas', 'al', 'del', 'de', 'y']\n #definimos la lista donde guardaremos los queryset para la busqueda\n lista_qset=[] \n\n if busqueda:\n\n #por cada palabra creamos un qset, que lo anadimos a la lista\n for q in busqueda:\n\n #verificamos que la palabra no sea una palabra \"trivial\"\n if q not in lista_excluidas:\n qset = (\n Q(nombre__icontains=q) |\n Q(descripcion__icontains=q) |\n Q(marca__icontains=q) |\n Q(secciones__nombre__icontains=q)\n )\n\n lista_qset.append(qset)\n \n #buscamos por la lista de qset, verificando que no este vacia\n if len(lista_qset) != 0:\n results = Articulo.objects.filter(reduce(operator.or_,lista_qset)).distinct()\n \n #si estamos en una seccion filtramos\n if seccion != '':\n results = results.filter(secciones=seccion)\n menu_activo = Seccion.objects.get(pk=seccion).nombre\n else:\n results = []\n else:\n results = []\n return render_to_response('lista_seccion.html', \n {\n 'secciones': secciones,\n 'articulos': results,\n 'titulo': 'Resultados para ' + '\"'+query +'\"', \n 'query': query,\n 'seccion': seccion,\n 'menu_activo': menu_activo\n },\n context_instance=RequestContext(request))","sub_path":"tusitio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"522489748","text":"\nimport glob\nimport pickle\nimport numpy as np\n\ncombine_file_name = 'v23_AD/combined.pkl'\npkl_files = glob.glob('v23_AD/*')\n\nX_combine = []\nY_combine = []\n\nfor i in range(750):\n skip = False\n try:\n with open(pkl_files[i], 'rb') as f:\n data = pickle.load(f)\n X_train = data['X_train']\n Y_train = data['y_train']\n except Exception:\n print('Failed to load : ' + pkl_files[i])\n skip = True\n \n if not skip:\n for j in range(np.asarray(X_train).shape[0]):\n X_combine.append(X_train[j])\n Y_combine.append(Y_train[j])\n\n# X_combine=np.asarray(X_combine) #np arrays are huge on disk, save as list\n# Y_combine=np.asarray(Y_combine)\n\n\n\n# Save Data\nimport pickle\ndata = {}\ndata['X_train'],data['Y_train'] = X_combine,Y_combine\nwith open(combine_file_name, 'wb') as f:\n pickle.dump(data,f)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# EoF","sub_path":"dl-limitedview-prior/combine_AD_dataset.py","file_name":"combine_AD_dataset.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"32544216","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport requests\n\nfrom exception import AddressException\n\n\nclass Address(object):\n\n def __init__(self, address=None, location=None):\n self._address = address\n self._location = location\n self._component = None\n self._base_url = \"http://api.map.baidu.com\"\n self._params = {'output': 'json', 'ak': 'GRlG8i8IeHcupO8GR77s5LHGPk27kBlT'}\n\n @property\n def address(self):\n return self._get_address()\n\n @property\n def location(self):\n return self._location if self._location else self._get_location()\n\n @property\n def province(self):\n self._set_address()\n return self._component['province']\n\n @property\n def city(self):\n self._set_address()\n return self._component['city']\n\n @property\n def district(self):\n self._set_address()\n return self._component['district']\n\n def route(self, address):\n return self._get_route_info(address)\n\n def _set_address(self):\n if not self._component:\n self._address = self._get_address()\n\n def _get_route_info(self, address):\n func_url = '/routematrix/v2/driving'\n origin = ','.join(map(str, self.location))\n destination = ','.join(map(str, address.location))\n params = self._params.copy()\n params.update(origins=origin, destinations=destination)\n\n result = requests.get(self._base_url + func_url, params=params)\n data = json.loads(result.text)\n if data['status'] == 0:\n return data['result'][0]['duration']['value'], data['result'][0]['distance']['value']\n else:\n raise AddressException(40000)\n\n def _get_location(self):\n func_url = '/geocoder/v2/'\n params = self._params.copy()\n params.update(address=self._address)\n\n result = requests.get(self._base_url + func_url, params)\n data = json.loads(result.text)\n if data['status'] == 0:\n return data['result']['location']['lat'], data['result']['location']['lng'],\n else:\n raise AddressException(40001)\n\n def _get_address(self):\n if not self._component:\n func_url = '/geocoder/v2/'\n params = self._params.copy()\n params.update(location=\",\".join(map(str, self.location)))\n\n result = requests.get(self._base_url + func_url, params)\n data = json.loads(result.text)\n if data['status'] == 0:\n if data['result']['formatted_address']:\n self._component = data['result']['addressComponent']\n self._address = data['result']['formatted_address'] + data['result']['sematic_description']\n return self._address\n else:\n raise AddressException(40001)\n else:\n raise AddressException(40000)\n\n\nif __name__ == \"__main__\":\n # example\n\n point_1 = Address(address=\"金隅嘉华大厦\")\n print(point_1.address)\n print(point_1.location)\n print(point_1.province)\n print(point_1.city)\n print(point_1.district)\n print()\n\n point_2 = Address(location=(40.07871264866282, 116.33392797379916))\n print(point_2.address)\n print(point_2.location)\n print(point_2.province)\n print(point_2.city)\n print(point_2.district)\n print()\n\n # print(point_2.route(point_1))\n # print(Address.route(point_2, point_1))\n","sub_path":"address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"478785337","text":"\n#!/usr/bin/python\nimport os\n\ndef main():\n\tcommand = input(\"enter the command of which help you want : \")\n\tos.system(\"man \"+command+\"> file_one\")\n\n\tfd = open(\"file_one\")\n\tfile = fd.read()\n\tfd.close()\n\t\n\tos.system (\"rm -rf file_one\")\n\tprint (file)\n\nif __name__ == '__main__':\n\tmain() \n","sub_path":"Programs/Assignmets/LinuxManual.py","file_name":"LinuxManual.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"257003817","text":"import json\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nfrom pymongo import MongoClient\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom utilities.json2text import TextReader\n\n\nclass DataLoader:\n def __init__(self, dataset):\n \"\"\" Creates dataset file containg text data for urls \"\"\"\n self.dataset = dataset\n self.text_reader = TextReader()\n self.mongo_db = MongoClient('cme3-mongo01-qa.lv7.leaf.io')['cme']\n\n def access_element(self, arr, element):\n try:\n return arr[element]\n except IndexError:\n return None\n\n def segment_categories(self, arr):\n \"\"\" Given array, divide into category, subcategory, and subsubcategory \"\"\"\n cat = arr[0]\n subcat = self.access_element(arr, 1)\n subsubcat = self.access_element(arr, 2)\n return cat,subcat,subsubcat\n\n def read_data(self):\n print(\"Querying mongo...\")\n query = self.mongo_db.document.find({\"type\":\"Article\", \"_type\":\"document\", \"ad_category\": {\"$exists\": True} }, {\"sections\": 1, \"ad_category\": 1} )\n with open(self.dataset, 'w') as out:\n for i, doc in enumerate(query):\n text = self.text_reader.json2text(doc)[1]\n cat,subcat,subsubcat = self.segment_categories(doc['ad_category'])\n tmp_df = pd.DataFrame({'_id': [doc['_id']], 'text': [text], 'cat': [cat], 'subcat': [subcat], 'subsubcat': [subsubcat]})\n if i == 0:\n tmp_df.to_csv(out, index=False)\n else:\n tmp_df.to_csv(out, header=False, index=False)\n\n def read_data_parquet(self):\n print(\"Querying mongo...\")\n query = self.mongo_db.document.find({\"type\":\"Article\", \"_type\":\"document\", \"ad_category\": {\"$exists\": True} }, {\"sections\": 1, \"ad_category\": 1} )\n ids = []\n texts = []\n cats = []\n subcats = []\n subsubcats = []\n for doc in query:\n text = self.text_reader.json2text(doc)[1].replace('\\n', ' ')\n cat,subcat,subsubcat = self.segment_categories(doc['ad_category'])\n ids.append(doc['_id'])\n texts.append(text)\n cats.append(cat)\n subcats.append(subcat)\n subsubcats.append(subsubcat)\n tmp_df = pd.DataFrame({'_id': ids, 'text': texts, 'cat': cats, 'subcat': subcats, 'subsubcat': subsubcats})\n table = pa.Table.from_pandas(tmp_df)\n with open('./data/dataset.parquet', table.schema) as writer:\n writer.write_table(table)\n\n def convert_to_parquet(self, file, output):\n df = pd.read_csv(file)\n table = pa.Table.from_pandas(df)\n with pq.ParquetWriter(f'./data/{output}', table.schema) as writer:\n writer.write_table(table)\n\n\n\n\n\nclass XGBoostLoader:\n def __init__(self, level):\n self.level = level\n self.dataset = './data/dataset.csv'\n self.dataset_parquet = './data/dataset.parquet'\n self.encoder = LabelEncoder()\n self.vectorizer = TfidfVectorizer()\n\n\n def read_data(self):\n \"\"\" Reads data in chunks \"\"\"\n data = pd.read_csv(self.dataset, chunksize=10000)\n return data\n\n def write_parquet(self, chunked_df, filename):\n \"\"\" Writes dataframe to parquet file \"\"\"\n with pq.ParquetWriter(f'./data/{filename}', self.schema) as writer:\n for chunk in chunked_df:\n tmp_table = pa.Table.from_pandas(chunk)\n writer.write_table(tmp_table)\n\n def load_parquet_chunks(self, file):\n \"\"\" Loads parquet file in chunks\n\n Yields\n -----------\n parquet_chunk: pd.Dataframe\n\n \"\"\"\n parquet_file = pq.ParquetFile(file)\n for chunk_num in range(parquet_file.num_row_groups):\n yield parquet_file.read_row_group(chunk_num).to_pandas()\n\n def write_mapping(self):\n \"\"\" Write label mapping to file \"\"\"\n mapping = dict(zip(self.encoder.classes_, list(map(lambda x: str(x), self.encoder.transform(self.encoder.classes_) ) ) ))\n with open('./data/mapping.json', 'w', encoding='utf-8') as out:\n json.dump(mapping, out)\n\n\n def encode_labels(self):\n \"\"\" Adds a column to dataframe representing encoded labels\n\n Parameters\n -----------\n level: string\n String denoting cat, subcat, or subsubcat to encode\n\n Returns\n -----------\n df: pd.Dataframe\n Pandas dataframe with new column added\n \"\"\"\n df = pd.read_parquet(self.dataset_parquet, columns=['_id', 'text', self.level])\n df.dropna(subset=[self.level], inplace=True)\n self.encoder.fit(df[self.level])\n self.write_mapping()\n df[f'{self.level}_labels'] = self.encoder.transform(df[self.level])\n self.schema = pa.Table.from_pandas(df).schema\n return df\n\n def split_dataset(self, df):\n \"\"\" Splits dataset into train-val-test\n\n Parameters\n -----------\n df: pd.Dataframe\n Dataframe containing entire dataset\n\n Returns\n ----------\n train_df: pd.Dataframe\n Train dataframe.\n val_df: pd.Dataframe\n Validation dataframe.\n test_df: pd.Dataframe\n Test dataframe.\n \"\"\"\n train_df, val_df = train_test_split(df, test_size=0.3, random_state=42)\n val_df, test_df = train_test_split(val_df, test_size=0.5, random_state=42)\n return train_df, val_df, test_df\n\n\n def prepare(self, num_splits):\n \"\"\" Prepares data with train-val-test split.\n\n Returns\n --------\n None - Will write splits to data directory\n \"\"\"\n df = self.encode_labels()\n train_df, val_df, test_df = self.split_dataset(df)\n\n self.write_parquet(np.array_split(train_df, num_splits), 'traindata.parquet')\n self.write_parquet(np.array_split(val_df, num_splits), 'valdata.parquet')\n self.write_parquet(np.array_split(test_df, num_splits), 'testdata.parquet')\n\n\n\n def embedding(self, df, train=False):\n \"\"\" Creates embedding matrix for text. Must have 'text' and 'level_labels'\n column.\n\n Parameters\n -----------\n df: pd.Dataframe\n Pandas dataframe with shape (m x n). m is number of articles.\n\n Returns\n -----------\n embedding_matrix: np.array\n Numpy array with dimensions (m x d). m is number of articles and d\n is embedding dimensions.\n labels: pd.Series\n Series with dimensions (m x 1).\n \"\"\"\n if train == True:\n df.dropna(subset=['text'], inplace=True)\n embedding_matrix = self.vectorizer.fit(df['text'])\n return\n embedding_matrix = self.vectorizer.transform(df['text'])\n labels = df[f'{self.level}_labels']\n return embedding_matrix, labels\n\n\n\n\n\nclass NeuralLoader(XGBoostLoader):\n def __init__(self, level):\n self.level = level\n self.dataset = './data/dataset.csv'\n self.dataset_parquet = './data/dataset.parquet'\n self.encoder = LabelEncoder()\n\n\n def embedding(self, df):\n \"\"\" Creates embedding matrix for text. Must have 'text' and 'level_labels'\n column.\n\n Parameters\n -----------\n df: pd.Dataframe\n Pandas dataframe with shape (m x n). m is number of articles.\n\n Returns\n -----------\n text: np.array\n Numpy array with text.\n labels: pd.Series\n Series with dimensions (m x 1).\n \"\"\"\n df.dropna(subset=['text'], inplace=True)\n text = np.array(df['text'])\n labels = np.array(df[f'{self.level}_labels'])\n return text, labels\n","sub_path":"utilities/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":7903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"48405311","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 7 07:50:01 2019\n\n@author: eduardo\n\"\"\"\n# =============================================================================\n#\n# TODOS OS IMPORTS UTILIZADOS NO SISTEMA\n#\n# =============================================================================\nimport matplotlib\nimport numpy\nimport sys\nimport math\n\nimport tkinter\n\nimport matplotlib.backends.tkagg as tkagg\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\n\n# Se tiver alguma janela aberta, feche todas antes de executar o programa.\n#close(\"all\")\n\n\n# Cria a janelaFigureCanvasTkAgg\nroot = tkinter.Tk()\n\ndef printa():\n # Vetor de Alpha.\n # ---------------\n vetor_alpha = [flag_alpha_menos_9.get(), \n flag_alpha_menos_6.get(),\n flag_alpha_menos_3.get(),\n flag_alpha_zero.get(),\n flag_alpha_mais_3.get(),\n flag_alpha_mais_6.get(),\n flag_alpha_mais_9.get()]\n \n # Vetor de Beta.\n # ---------------\n vetor_beta = [flag_beta_zero.get(), \n flag_beta_mais_3.get(),\n flag_beta_mais_6.get()]\n \n # Vetor de Mach.\n # --------------\n vetor_mach = [flag_mach_05.get()] \n \n \n # Contador de variáveis.\n # ----------------------\n cont_alpha = 0\n cont_beta = 0\n cont_mach = 0\n index_alpha = []\n index_beta = []\n index_mach = []\n \n for i in range(7):\n if (vetor_alpha[i] == 1):\n cont_alpha = (cont_alpha + 1)\n index_alpha.append(i)\n \n for i in range(3):\n if (vetor_beta[i] == 1):\n cont_beta = (cont_beta + 1)\n index_beta.append(i)\n \n for i in range(1):\n if (vetor_mach[i] == 1):\n cont_mach = (cont_mach + 1)\n index_mach.append(i)\n \n \n # Lógica para implementação dos parâmetos para plot único.\n # --------------------------------------------------------\n if (cont_alpha > 1) or (cont_beta > 1) or (cont_mach > 1) :\n print(\"Mais de um está selecionado por vez.\")\n \n \n \n \n\n\n# =============================================================================\n#\n# Separa os frames do sistema. A parte da esqueda mostrará o seletor e a parte \n# da direita mostrará os gráficos.\n#\n# =============================================================================\nframe_de_alpha = tkinter.Frame(root)\nframe_de_alpha.pack(side = tkinter.LEFT,\n fill = tkinter.Y)\n\nframe_de_beta = tkinter.Frame(root)\nframe_de_beta.pack(side = tkinter.LEFT,\n fill = tkinter.Y);\n\nframe_de_mach = tkinter.Frame(root)\nframe_de_mach.pack(side = tkinter.LEFT,\n fill = tkinter.Y);\n \nframe_plota = tkinter.Frame(root)\nframe_plota.pack(side = tkinter.LEFT,\n fill = tkinter.Y); \n \n \n# =============================================================================\n#\n# ALPHA\n#\n# =============================================================================\nalpha_titulo_string = tkinter.StringVar()\nalpha = tkinter.Label(frame_de_alpha,\n textvariable = alpha_titulo_string,\n justify = \"left\")\nalpha_titulo_string.set(\"ALPHA\")\nalpha.pack()\n\n# Ângulos -- (-9)\nflag_alpha_menos_9 = tkinter.IntVar()\nalpha_menos_9 = tkinter.Checkbutton(frame_de_alpha,\n text = \"-9\",\n variable = flag_alpha_menos_9,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nalpha_menos_9.pack()\n\n# Ângulos -- (-6)\nflag_alpha_menos_6 = tkinter.IntVar()\nalpha_menos_6 = tkinter.Checkbutton(frame_de_alpha,\n text = \"-6\",\n variable = flag_alpha_menos_6,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nalpha_menos_6.pack()\n\n# Ângulos -- (-3)\nflag_alpha_menos_3 = tkinter.IntVar()\nalpha_menos_3 = tkinter.Checkbutton(frame_de_alpha,\n text = \"-3\",\n variable = flag_alpha_menos_3,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nalpha_menos_3.pack()\n\n# Ângulos -- (0)\nflag_alpha_zero = tkinter.IntVar()\nalpha_zero = tkinter.Checkbutton(frame_de_alpha,\n text = \"0\",\n variable = flag_alpha_zero,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nalpha_zero.pack()\n\n# Ângulos -- (+3)\nflag_alpha_mais_3 = tkinter.IntVar()\nalpha_mais_3 = tkinter.Checkbutton(frame_de_alpha,\n text = \"+3\",\n variable = flag_alpha_mais_3,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nalpha_mais_3.pack()\n\n# Ângulos -- (+6)\nflag_alpha_mais_6 = tkinter.IntVar()\nalpha_mais_6 = tkinter.Checkbutton(frame_de_alpha,\n text = \"+6\",\n variable = flag_alpha_mais_6,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nalpha_mais_6.pack()\n\n# Ângulos -- (+9)\nflag_alpha_mais_9 = tkinter.IntVar()\nalpha_mais_9 = tkinter.Checkbutton(frame_de_alpha,\n text = \"+9\",\n variable = flag_alpha_mais_9,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nalpha_mais_9.pack()\n\n# =============================================================================\n#\n# BETA\n#\n# =============================================================================\nbeta_titulo_string = tkinter.StringVar()\nbeta = tkinter.Label(frame_de_beta,\n textvariable = beta_titulo_string,\n justify = \"left\")\nbeta_titulo_string.set(\"BETA\")\nbeta.pack()\n\n# Ângulos -- (0)\nflag_beta_zero = tkinter.IntVar()\nbeta_zero = tkinter.Checkbutton(frame_de_beta,\n text = \"0\",\n variable = flag_beta_zero,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nbeta_zero.pack()\n\n# Ângulos -- (+3)\nflag_beta_mais_3 = tkinter.IntVar()\nbeta_mais_3 = tkinter.Checkbutton(frame_de_beta,\n text = \"+3\",\n variable = flag_beta_mais_3,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nbeta_mais_3.pack()\n\n\n# Ângulos -- (+6)\nflag_beta_mais_6 = tkinter.IntVar()\nbeta_mais_6 = tkinter.Checkbutton(frame_de_beta,\n text = \"+6\",\n variable = flag_beta_mais_6,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nbeta_mais_6.pack()\n\n\n## =============================================================================\n##\n## MACH\n##\n## =============================================================================\nmach_titulo_string = tkinter.StringVar()\nmach = tkinter.Label(frame_de_mach,\n textvariable = mach_titulo_string,\n justify = \"left\")\nmach_titulo_string.set(\"BETA\")\nmach.pack()\n\n# Velocidade\nflag_mach_05 = tkinter.IntVar()\nmach_05 = tkinter.Checkbutton(frame_de_mach,\n text = \"0\",\n variable = flag_mach_05,\n onvalue = 1,\n offvalue = 0,\n height = 3,\n width = 10)\nmach_05.pack()\n\n# =============================================================================\n#\n# Gráfico\n#\n# =============================================================================\n# Gráfico\nGrafico = tkinter.Canvas(frame_plota,\n bg = \"white\",\n height = 600,\n width = 800)\nGrafico.pack()\n\n\nfig = matplotlib.figure.Figure(figsize = (8, 6), dpi = 100)\nax1 = fig.add_subplot(111)\nax1.plot([1, 2, 3, 4, 5, 6, 7, 8], [5, 6, 9, 2, 7, 1, 10, 11])\nax1.set_xlabel(r'$\\alpha$')\nfigure_canvas_tkagg = FigureCanvasTkAgg(fig, Grafico)\nfigure_canvas_tkagg.get_tk_widget().pack(fill = tkinter.BOTH)\n\n\n \n\n\n# =============================================================================\n#\n# Botão de Plot\n#\n# =============================================================================\nbotao = tkinter.Button(frame_de_alpha, text = \"PLOT\", command = printa)\nbotao.pack()\n\nroot.mainloop()\n","sub_path":"GUi em Python/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":10602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"311193539","text":"from operator import eq\nfrom functools import partial\nfrom flask import url_for\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_admin.form import rules\nfrom flask_admin.model.template import macro, EndpointLinkRowAction\nfrom jinja2 import escape\n\nfrom ..app import db\nfrom ..scheme.object import ObjectLink, ExternalObject, ExternalObjectType\nfrom ..scheme.platform import Platform, PlatformType\nfrom ..scheme.value import Value, ValueType\nfrom .utils import CustomAdminConverter\nfrom .filters import ExternalObjectPlatformFilter, ExternalObjectSimilarFilter\n\n\ndef links_formatter(route):\n def formatter(view, context, model, name):\n list = ''.join([\n '
  • {}
  • '\n .format(\n url_for(route),\n escape(item)\n )\n for item in getattr(model, name)\n ])\n return rules.Markup(\n '
      ' + list + '
    '\n )\n return formatter\n\n\ndef link_formatter(route):\n def formatter(view, context, model, name):\n return rules.Markup(\n '{}'\n .format(\n url_for(route, id=getattr(model, name).id),\n escape(getattr(model, name))\n )\n )\n return formatter\n\n\ndef attribute_formatter(f=lambda _: True, show_score=False,\n filter=lambda _: True, limit=None):\n def formatter(view, context, model, name):\n attrs = [attr for attr in model.attributes\n if f(attr.type) and filter(attr.text)]\n if limit is not None:\n attrs = attrs[:limit]\n\n if context is None:\n return ','.join((attr.text for attr in attrs))\n\n m = context.resolve('attributes_link')\n return m(attributes=attrs, show_score=show_score)\n return formatter\n\n\ndef count_formatter(view, context, model, name):\n return len(getattr(model, name))\n\n\nclass DefaultView(ModelView):\n model_form_converter = CustomAdminConverter\n\n\nclass PlatformGroupView(DefaultView):\n column_list = ('id', 'name', 'platforms')\n column_formatters = {\n 'platforms': count_formatter\n }\n pass\n\n\nclass PlatformView(DefaultView):\n column_default_sort = 'id'\n can_view_details = True\n can_export = True\n column_list = ('id', 'type', 'group', 'name', 'slug', 'country', 'links')\n column_searchable_list = ['name', 'slug', 'country']\n column_filters = ['type', 'country', 'group']\n column_editable_list = ['country', 'name', 'slug', 'group']\n form_columns = ('type', 'group', 'name', 'slug', 'url', 'country',\n 'max_rating', 'base_score')\n column_formatters = {\n 'links': count_formatter\n }\n\n form_rules = [\n rules.FieldSet([\n rules.Field('type'),\n rules.Field('group'),\n rules.Field('name'),\n rules.Field('country'),\n rules.Field('url'),\n ], header=\"Basic info\"),\n rules.FieldSet([\n rules.Field('slug'),\n rules.Field('max_rating'),\n rules.Field('base_score'),\n ], header=\"Technical info\")\n ]\n\n\nclass ScrapView(DefaultView):\n can_view_details = True\n form_columns = ('platform', 'date', 'status', 'stats')\n column_list = ('platform', 'date', 'status')\n column_details_list = ('id', 'platform', 'date', 'status', 'stats')\n\n\nclass ValueView(DefaultView):\n column_list = ('id', 'external_object', 'type', 'text', 'score')\n column_details_list = ('id', 'external_object', 'type', 'text', 'score',\n 'sources')\n column_filters = ['type', 'score', 'external_object']\n column_searchable_list = ['text']\n can_view_details = True\n can_edit = False\n column_formatters = {\n 'external_object': link_formatter('allobject.details_view'),\n 'sources': links_formatter('valuesource.details_view'),\n }\n\n\nclass ValueSourceView(DefaultView):\n column_list = ('value', 'platform', 'score_factor')\n column_details_list = ('value', 'platform', 'score_factor')\n can_view_details = True\n can_edit = False\n column_formatters = {\n 'value': link_formatter('value.details_view'),\n 'platform': link_formatter('platform.details_view'),\n }\n\n\nclass ObjectLinkView(DefaultView):\n column_formatters = {\n 'external_object': link_formatter('allobject.details_view'),\n 'platform': link_formatter('platform.edit_view')\n }\n column_searchable_list = ['external_id']\n\n\nclass ExternalObjectView(DefaultView):\n def __init__(self, *args, **kwargs):\n kwargs['category'] = 'External Objects'\n kwargs['endpoint'] = kwargs['name'].lower() + 'object'\n super(ExternalObjectView, self).__init__(ExternalObject, *args,\n **kwargs)\n\n can_view_details = True\n can_export = True\n # TODO: Export formatters\n export_types = ['csv', 'xls']\n\n column_details_list = ('id', 'type', 'attributes_list', 'links_list')\n column_formatters = {\n 'name': attribute_formatter(partial(eq, ValueType.NAME)),\n 'title': attribute_formatter(partial(eq, ValueType.TITLE)),\n 'date': attribute_formatter(partial(eq, ValueType.DATE),\n filter=lambda t: len(t) == 4),\n 'genres': attribute_formatter(partial(eq, ValueType.GENRES),\n limit=3),\n 'country': attribute_formatter(partial(eq, ValueType.COUNTRY),\n filter=lambda t: len(t) == 2),\n 'duration': attribute_formatter(partial(eq, ValueType.DURATION),\n filter=lambda t: t.replace('.', '')\n .isdigit(),\n limit=1),\n 'attributes': attribute_formatter(show_score=True),\n 'attributes_list': macro('attributes_list'),\n 'links_list': macro('links_list'),\n 'links': count_formatter\n }\n\n column_extra_row_actions = [\n EndpointLinkRowAction('glyphicon icon-search',\n 'allobject.index_view', id_arg='flt0_0')\n ]\n\n inline_models = (\n (Value, dict(form_columns=('id', 'type', 'text'))),\n (ObjectLink, dict(form_columns=('id', 'platform', 'external_id'))))\n\n column_filters = [\n ExternalObjectSimilarFilter(name='Similar'),\n ExternalObjectPlatformFilter(\n column=Platform.country,\n name='Platform',\n options=[\n (c[0], str(c[0]).upper())\n for c in db.session.query(Platform.country).distinct()\n ]\n ),\n ExternalObjectPlatformFilter(\n column=Platform.type,\n name='Platform',\n options=[(t.name, t.name) for t in PlatformType]\n ),\n ExternalObjectPlatformFilter(\n column=Platform.slug,\n name='Platform',\n options=[\n (p.slug, p.name)\n for p in db.session.query(Platform.slug, Platform.name)\n ]\n ),\n ExternalObjectPlatformFilter(\n column=Platform.slug,\n invert=True,\n name='Platform',\n options=[\n (p.slug, p.name)\n for p in db.session.query(Platform.slug, Platform.name)\n ]\n ),\n ]\n\n def get_query(self):\n q = super(ExternalObjectView, self).get_query()\n if hasattr(self, 'external_object_type'):\n q = q.filter(ExternalObject.type == self.external_object_type)\n return q\n\n def get_count_query(self):\n q = super(ExternalObjectView, self).get_count_query()\n if hasattr(self, 'external_object_type'):\n q = q.filter(ExternalObject.type == self.external_object_type)\n return q\n\n\nclass AllObjectView(ExternalObjectView):\n column_list = ('id', 'type', 'attributes', 'links')\n column_filters = ExternalObjectView.column_filters + ['type']\n\n\nclass PersonObjectView(ExternalObjectView):\n external_object_type = ExternalObjectType.PERSON\n column_list = ('id', 'name', 'links')\n\n\nclass MovieObjectView(ExternalObjectView):\n external_object_type = ExternalObjectType.MOVIE\n column_list = ('id', 'title', 'date', 'genres', 'duration', 'country',\n 'links')\n","sub_path":"matcher/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"355964362","text":"from RPi import GPIO\r\nfrom time import sleep\r\nfrom RPi import GPIO\r\n\r\nclass Lcd:\r\n def __init__(self, e, rs, d7, d6, d5, d4, d3=0, d2=0, d1=0, d0=0, achtbit=False, four_bit_instruction_hard_wired=True):\r\n self.__e = e\r\n self.__rs = rs\r\n self.__pinnen_array = [d7, d6, d5, d4, d3, d2, d1, d0]\r\n self.__delay = 0.015\r\n self.__achtbit = achtbit\r\n self.__four_bit_instruction_hard_wired =four_bit_instruction_hard_wired\r\n\r\n GPIO.setmode(GPIO.BCM)\r\n GPIO.setup(e, GPIO.OUT)\r\n GPIO.setup(rs, GPIO.OUT)\r\n\r\n for getal in range(0,8):\r\n GPIO.setup(self.__pinnen_array[getal], GPIO.OUT)\r\n\r\n\r\n\r\n def set_GPIO_bits(self, byte):\r\n shift_bit = 128\r\n\r\n if self.__achtbit == True or self.__four_bit_instruction_hard_wired == False:\r\n for getal in range(0, 8):\r\n te_sturen = byte & shift_bit\r\n GPIO.output(self.__pinnen_array[getal], te_sturen)\r\n # print(\"Er is vertuurd: \" + str(te_sturen) + \" naar pin: \" + str(self.__pinnen_array[getal]))\r\n shift_bit = shift_bit >> 1\r\n else:\r\n for getal in range(0, 4):\r\n te_sturen = byte & shift_bit\r\n GPIO.output(self.__pinnen_array[getal], te_sturen)\r\n # print(\"Er is vertuurd: \" + str(te_sturen) + \" naar pin: \" + str(self.__pinnen_array[getal]))\r\n shift_bit = shift_bit >> 1\r\n\r\n\r\n def send_char(self, char):\r\n byte = ord(char) #getal omzetten naar een acii waarde\r\n\r\n GPIO.output(self.__e, GPIO.HIGH)\r\n GPIO.output(self.__rs, GPIO.HIGH)\r\n self.set_GPIO_bits(byte)\r\n GPIO.output(self.__e, GPIO.LOW)\r\n GPIO.output(self.__e, GPIO.HIGH)\r\n sleep(self.__delay)\r\n\r\n if self.__achtbit == False:\r\n geshifte_byte = byte << 4\r\n\r\n GPIO.output(self.__e, GPIO.HIGH)\r\n GPIO.output(self.__rs, GPIO.HIGH)\r\n self.set_GPIO_bits(geshifte_byte)\r\n GPIO.output(self.__e, GPIO.LOW)\r\n GPIO.output(self.__e, GPIO.HIGH)\r\n sleep(self.__delay)\r\n\r\n\r\n def send_instruction(self, byte):\r\n GPIO.output(self.__e, GPIO.HIGH)\r\n GPIO.output(self.__rs, GPIO.LOW)\r\n self.set_GPIO_bits(byte)\r\n GPIO.output(self.__e, GPIO.LOW)\r\n sleep(self.__delay)\r\n\r\n if self.__achtbit == False and byte != 0x28:\r\n geshifte_byte = byte << 4\r\n\r\n GPIO.output(self.__e, GPIO.HIGH)\r\n GPIO.output(self.__rs, GPIO.LOW)\r\n self.set_GPIO_bits(geshifte_byte)\r\n GPIO.output(self.__e, GPIO.LOW)\r\n sleep(self.__delay)\r\n\r\n\r\n def init_display(self, blink_cusror = True):\r\n # print(\"1ste:\")\r\n if self.__achtbit == True:\r\n self.send_instruction(0x38) # fuction set 8bit\r\n else:\r\n self.send_instruction(0x28) # fuction set 4bit\r\n\r\n # print(\"2de:\")\r\n if blink_cusror:\r\n self.send_instruction(0x0d) # display on\r\n else:\r\n self.send_instruction(0x0c)\r\n # print(\"3de:\")\r\n self.send_instruction(0x01) # clear display and cursor home\r\n\r\n def write_word(self, word):\r\n for getal in range(0, int(len(word))):\r\n self.send_char(word[getal:getal + 1])\r\n\r\n def move_to_second_line(self):\r\n self.send_instruction(0xC0)\r\n\r\n def clear_display(self):\r\n self.send_instruction(0x01)","sub_path":"LCD.py","file_name":"LCD.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"454159436","text":"# Copyright 2017 IBM Corporation\n# Copyright 2017 The Johns Hopkins University\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, print_function\nimport os\nimport sys\nimport pytest\nimport stat\nsys.path.append('..')\nfrom objectfs.core.metadata.metastore import MetaStore\nfrom objectfs.core.metadata.inode import Inode\nfrom objectfs.settings import Settings\nsettings = Settings()\nfrom config import META_STORE_LIST\n\n@pytest.mark.parametrize('meta_store', META_STORE_LIST)\ndef test_object(meta_store):\n mem = Meta_Store_Test(meta_store)\n mem.test_put_get_inode()\n mem.test_inode_id_list()\n mem.test_delete_inode()\n\nclass Meta_Store_Test:\n\n def __init__(self, meta_store):\n self._meta_store = MetaStore.load('test_fs', meta_store)\n self.inode = Inode(4, stat.S_IFREG | 644, 'test_inode_name') \n \n def __del__(self):\n self._meta_store.delete_inode_id_list(self.inode.id)\n \n def test_put_get_inode(self):\n \"\"\"Test inode put and get. Check if index is built correctly\"\"\"\n response = self._meta_store.put_inode(self.inode)\n inode = self._meta_store.get_inode(self.inode.id)\n assert(inode.id == self.inode.id)\n assert(inode.name == self.inode.name)\n inode_id = self._meta_store.get_inode_id(self.inode.parent_inode_id, self.inode.name)\n assert(self.inode.id == inode_id)\n \n def test_inode_id_list(self):\n \"\"\"Test the insertion and removal of inode ids from inode id list\"\"\"\n self._meta_store.add_inode_id_to_list(self.inode.id, 5, 'five')\n self._meta_store.add_inode_id_to_list(self.inode.id, 6, 'six')\n inode_list = []\n for (inode_id, file_name) in self._meta_store.get_inode_id_list(self.inode.id):\n inode_list.append((inode_id, file_name))\n assert((5, 'five') in inode_list)\n assert((6, 'six') in inode_list)\n self._meta_store.remove_inode_id_from_list(self.inode.id, 5, 'five')\n inode_list = []\n for (inode_id, file_name) in self._meta_store.get_inode_id_list(self.inode.id):\n inode_list.append((inode_id, file_name))\n assert((5, 'five') not in inode_list)\n assert((6, 'six') in inode_list)\n self._meta_store.remove_inode_id_from_list(self.inode.id, 6, 'six')\n inode_list = []\n for (inode_id, file_name) in self._meta_store.get_inode_id_list(self.inode.id):\n inode_list.append((inode_id, file_name))\n assert((6, 'six') not in inode_list)\n \n def test_delete_inode(self):\n \"\"\"Test inode deletion\"\"\"\n self._meta_store.delete_inode(self.inode.id)\n response = self._meta_store.get_inode(self.inode.id)\n assert(response is None)\n","sub_path":"objectfs/tests/test_memory_store.py","file_name":"test_memory_store.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"583625916","text":"import RPi.GPIO as GPIO\nfrom time import sleep\nfrom datetime import datetime\nimport asyncio\nimport smbus\n\nclass Motor():\n \n #Initializations\n def __init__(self,motorRPM = 3, pinNumber = 21, pinMode = GPIO.BCM):\n self.pinNumber = pinNumber\n self.pinMode = pinMode\n self.motorRPM = motorRPM\n GPIO.setmode(self.pinMode)\n GPIO.setwarnings(False)\n GPIO.setup(self.pinNumber, GPIO.OUT)\n\n #Function to turn the motor on\n def turnOn(self):\n GPIO.output(self.pinNumber, GPIO.HIGH)\n\n #Function to turn the motor off\n def turnOff(self):\n GPIO.output(self.pinNumber, GPIO.LOW)\n\n async def feedPet(self, rotations):\n #time of motor operation\n time = 60 * rotations / self.motorRPM \n self.turnOn()\n print( str(datetime.now()) + \" | Motor On\" ) \n sleep(time)\n self.turnOff()\n print( str(datetime.now()) + \" | Motor Off\" )\n\nclass PushButton():\n #Initializations\n def __init__(self,pinNumber):\n self.pinNumber = pinNumber\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.pinNumber, GPIO.IN, pull_up_down = GPIO.PUD_UP) #pull up resistor\n \n GPIO.add_event_detect(self.pinNumber, GPIO.FALLING) # detects on falling edge\n \n # read input and return true if button was pushed, simple debounce implemented \n def wasPushed(self):\n \n if GPIO.event_detected(self.pinNumber):\n sleep(0.025) # debounce for 25mSec\n if GPIO.input(self.pinNumber) == 0:\n return True\n else:\n return False\n\n\n \nclass i2c_device:\n def __init__(self, addr, port=1):\n self.addr = addr\n self.bus = smbus.SMBus(port)\n\n# Write a single command\n def write_cmd(self, cmd):\n self.bus.write_byte(self.addr, cmd)\n sleep(0.0001)\n\n# Write a command and argument\n def write_cmd_arg(self, cmd, data):\n self.bus.write_byte_data(self.addr, cmd, data)\n sleep(0.0001)\n\n# Write a block of data\n def write_block_data(self, cmd, data):\n self.bus.write_block_data(self.addr, cmd, data)\n sleep(0.0001)\n\n# commands\nLCD_CLEARDISPLAY = 0x01\nLCD_RETURNHOME = 0x02\nLCD_ENTRYMODESET = 0x04\nLCD_DISPLAYCONTROL = 0x08\nLCD_CURSORSHIFT = 0x10\nLCD_FUNCTIONSET = 0x20\nLCD_SETCGRAMADDR = 0x40\nLCD_SETDDRAMADDR = 0x80\n\n# flags for display entry mode\nLCD_ENTRYRIGHT = 0x00\nLCD_ENTRYLEFT = 0x02\nLCD_ENTRYSHIFTINCREMENT = 0x01\nLCD_ENTRYSHIFTDECREMENT = 0x00\n\n# flags for display on/off control\nLCD_DISPLAYON = 0x04\nLCD_DISPLAYOFF = 0x00\nLCD_CURSORON = 0x02\nLCD_CURSOROFF = 0x00\nLCD_BLINKON = 0x01\nLCD_BLINKOFF = 0x00\n\n# flags for display/cursor shift\nLCD_DISPLAYMOVE = 0x08\nLCD_CURSORMOVE = 0x00\nLCD_MOVERIGHT = 0x04\nLCD_MOVELEFT = 0x00\n\n# flags for function set\nLCD_8BITMODE = 0x10\nLCD_4BITMODE = 0x00\nLCD_2LINE = 0x08\nLCD_1LINE = 0x00\nLCD_5x10DOTS = 0x04\nLCD_5x8DOTS = 0x00\n\n# flags for backlight control\nLCD_BACKLIGHT = 0x08\nLCD_NOBACKLIGHT = 0x00\n\nEn = 0b00000100 # Enable bit\nRw = 0b00000010 # Read/Write bit\nRs = 0b00000001 # Register select bit\n\nclass Lcd:\n #initializes objects and lcd\n def __init__(self,i2cAddress = 0x27):\n self.address = i2c_device(i2cAddress)\n\n self.write(0x03)\n self.write(0x03)\n self.write(0x03)\n self.write(0x02)\n\n self.write(LCD_FUNCTIONSET | LCD_2LINE | LCD_5x8DOTS | LCD_4BITMODE)\n self.write(LCD_DISPLAYCONTROL | LCD_DISPLAYON)\n self.write(LCD_CLEARDISPLAY)\n self.write(LCD_ENTRYMODESET | LCD_ENTRYLEFT)\n sleep(0.2)\n\n # clocks EN to latch command\n def strobe(self, data):\n self.address.write_cmd(data | En | LCD_BACKLIGHT)\n sleep(.0005)\n self.address.write_cmd(((data & ~En) | LCD_BACKLIGHT))\n sleep(.0001)\n\n def writeFourBits(self, data):\n self.address.write_cmd(data | LCD_BACKLIGHT)\n self.strobe(data)\n\n # write a command to lcd\n def write(self, cmd, mode=0):\n self.writeFourBits(mode | (cmd & 0xF0))\n self.writeFourBits(mode | ((cmd << 4) & 0xF0))\n\n # write a character to lcd\n def writeChar(self, charvalue, mode=1):\n self.writeFourBits(mode | (charvalue & 0xF0))\n self.writeFourBits(mode | ((charvalue << 4) & 0xF0))\n\n # write string function with optional char positioning\n def writeString(self, string, line=1, pos=0): #( pos = 0 to 15)\n if line == 1:\n pos_new = pos\n elif line == 2:\n pos_new = 0x40 + pos\n \n self.write(0x80 + pos_new)\n \n for char in string:\n self.write(ord(char), Rs)\n\n # clear lcd and set to home\n def clear(self):\n self.write(LCD_CLEARDISPLAY)\n self.write(LCD_RETURNHOME)\n\n # define backlight on/off (on = .backlight(1); off= .backlight(0)\n def backlight(self, state): # state 1 = on, 0 = off\n if state == 1:\n self.address.write_cmd(LCD_BACKLIGHT)\n elif state == 0:\n self.address.write_cmd(LCD_NOBACKLIGHT)\n\n # add custom characters (0 - 7)\n def loadCustomChars(self, fontdata):\n self.write(0x40)\n for char in fontdata:\n for line in char:\n self.writeChar(line)\n\n def clearLine(self,line):\n self.writeString(\" \",line)\n\n def clearChar(self,line,pos=0):\n self.writeString(\" \",line,pos);\n","sub_path":"GPIO_CONTROL.py","file_name":"GPIO_CONTROL.py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"513878878","text":"import tensorflow as tf\nimport datetime\n\n\ntime = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\nlogs = 'logs/' + time + 'graph'\nwriter = tf.summary.create_file_writer(logs)\n\n\n@tf.function\ndef sum():\n x = tf.Variable(0.0, name=\"x\")\n\n a = tf.constant([[1, 2], [3, 4]])\n\n b = tf.constant([[5, 6], [7, 8]])\n\n sums = tf.add(a, b)\n\n print(sums)\n\n\ntf.summary.trace_on(graph=True, profiler=True)\nsum()\nwith writer.as_default():\n tf.summary.trace_export(\n name=\"sum\",\n step=0,\n profiler_outdir=logs\n )\n\n","sub_path":"深度学习/模型图.py","file_name":"模型图.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"224341680","text":"'''\nThe DhaB-DhaT model contains DhaB-DhaT reaction pathway\nin the MCP; diffusion in the cell; diffusion from the cell \nin the external volume.\n\nThis model is currently in use. The DhaB-DhaT model assumes that there \nare M identical MCPs within the cytosol and N identical cells within the \nexternal volume. From time scsle analysis, gradients in cell are removed.\n\nProgramme written by aarcher07\nEditing History:\n- 28/10/20\n'''\n\n\nimport sys\nimport sympy as sp\nfrom .model_constants import *\nfrom .data_set_constants import *\nfrom scipy.integrate import solve_ivp\n\nclass DhaBDhaTModelAlt:\n\n def __init__(self, rc = 0.375e-6, lc = 2.47e-6, external_volume = 0.002):\n \"\"\"\n Initializes parameters to be used numerial scheme\n :param ncells_per_metrecubed: number of cells per metrecubed at any given time during experiment\n :param rc: Radius of cell in metres\n :param lc: length of the cell in metres (needed if assuming cells are rods)\n :param external_volume: external volume containing cells in metres^3\n \"\"\"\n # Integration Parameters\n self.rc = rc\n self.lc = lc\n self.external_volume = external_volume\n self.nvars = 2*3\n\n self.cell_volume = (4*np.pi/3)*(self.rc)**3 + (np.pi)*(self.lc - 2*self.rc)*((self.rc)**2)\n self.cell_surface_area = 2*np.pi*self.rc*self.lc\n self.nparams_sens = len(MODEL_PARAMETER_LIST)\n\n # differential equation parameters\n self._set_param_sp_symbols()\n self._set_sens_vars()\n self._set_symbolic_state_vars()\n\n\n def _set_symbolic_state_vars(self):\n \"\"\"\n Generates the symbol state variables for the model\n \"\"\"\n self.x_sp = np.array(sp.symbols('x:' + str(self.nvars)))\n\n def _set_param_sp_symbols(self):\n \"\"\"\n sets dictionary of parameters to be analyzed using sensitivity analysis\n \"\"\"\n self.params_sens_sp_dict = {name:sp.symbols(name) for name in MODEL_PARAMETER_LIST}\n self.params_sens_sp = list((self.params_sens_sp_dict).values())\n\n def _set_sens_vars(self):\n \"\"\"\n creates a list of sympy symbols for the derivative of each state vector\n wrt to parameters\n \"\"\"\n\n self.n_sensitivity_eqs = self.nvars * self.nparams_sens\n #sensitivity variables\n self.sensitivity_sp = np.array(list(sp.symbols('s0:' + str(self.n_sensitivity_eqs))))\n\n def _sderiv(self, t, x, params):\n \"\"\"\n Computes the spatial derivative of the system at time point, t\n :param t: time\n :param x: state variables\n :param params: parameter list\n \"\"\"\n\n ###################################################################################\n ################################# Initialization ##################################\n ###################################################################################\n \n\n # Integration Parameters\n assert len(x) == self.nvars\n # differential equation parameters\n d = np.zeros((len(x))).tolist() # convert to list to allow use of symbolic derivatives\n n_compounds_cell = 3\n\n # cell growth\n # differential equation parameters\n ncells = params['ncells']\n ratio = 1\n\n\n ###################################################################################\n ################################# cytosol reactions ###############################\n ###################################################################################\n\n\n PermCellGlycerol = params['PermCellGlycerol']\n PermCell3HPA = params['PermCell3HPA']\n PermCellPDO = params['PermCellPDO']\n\n R_DhaB = params['VmaxfDhaB']*ratio*x[0] / (params['KmDhaBG'] + ratio*x[0]) #+ (ratio*x[1])/params['KmDhaBH']) \n R_DhaT = params['VmaxfDhaT']*ratio*x[1]/(params['KmDhaTH'] + ratio*x[1]) \n R_GlpK = params['VmaxfGlpK']*ratio*x[0]/(params['KmGlpKG'] + ratio*x[0])\n\n cell_area_volume = self.cell_surface_area/self.cell_volume\n\n \n d[0] = -R_DhaB -R_GlpK + cell_area_volume * PermCellGlycerol * (x[0 + n_compounds_cell] - ratio*x[0]) # microcompartment equation for G\n d[1] = R_DhaB - R_DhaT + cell_area_volume * PermCell3HPA * (x[1 + n_compounds_cell] - ratio*x[1]) # microcompartment equation for H\n d[2] = R_DhaT + cell_area_volume * PermCellPDO * (x[2 + n_compounds_cell] - ratio*x[2]) # microcompartment equation for P\n\n\n #####################################################################################\n ######################### external volume equations #################################\n #####################################################################################\n d[3] = ncells * self.cell_surface_area * PermCellGlycerol * (ratio*x[3 - n_compounds_cell] - x[3])\n d[4] = ncells * self.cell_surface_area * PermCell3HPA * (ratio*x[4 - n_compounds_cell] - x[4])\n d[5] = ncells * self.cell_surface_area * PermCellPDO * (ratio*x[5 - n_compounds_cell] - x[5])\n\n return d\n\n def _set_symbolic_sderiv(self):\n \"\"\"\n Generates the symbol differential equation\n \"\"\"\n x_sp = getattr(self, 'x_sp', None)\n if x_sp is None:\n self._set_symbolic_state_vars()\n self.sderiv_symbolic = self._sderiv(0, self.x_sp, self.params_sens_sp_dict)\n\n def _set_symbolic_sderiv_conc_fun(self):\n \"\"\"\n Generates the symbol jacobian of the differential equation \n wrt state variables\n \"\"\"\n sderiv_symbolic = getattr(self, 'sderiv_symbolic', None)\n if sderiv_symbolic is None:\n self._set_symbolic_sderiv()\n sderiv_symbolic = self.sderiv_symbolic\n self.sderiv_jac_conc_sp = sp.Matrix(sderiv_symbolic).jacobian(self.x_sp)\n sderiv_jac_conc_fun_lam = sp.lambdify((self.x_sp,self.params_sens_sp), self.sderiv_jac_conc_sp, 'numpy')\n self._sderiv_jac_conc_fun = lambda t,x,params_sens_dict: sderiv_jac_conc_fun_lam(x,params_sens_dict.values())\n\n def QoI(self,params,init_conds,tsamples=TIME_EVALS,tol = 10**-5):\n \"\"\"\n Integrates the DhaB-DhaT model with parameter values, param, and returns external glycerol\n 1,3-PDO and cell concentration time samples, tsamples\n @param params: dictionary parameter values to run the model. keys of the dictionary are in model_constants.py\n @param init_conds: dictionary initial conditions to run the model. keys of the dictionary are in model_constants\n @param base_dhaB_dhaT_model: instance of the DhaBDhaTModel class\n @param tsamples: time samples to collect external glycerol, external 1,3-PDO and DCW\n @param tol: tolerance at which integrate the DhaBDhaTModel\n @return: glycerol, external 1,3-PDO and DCW sampled at time samples, tsamples (3 x |tsamples| matrix)\n \"\"\"\n if not hasattr(self, '_sderiv_jac_conc_fun'):\n self._set_symbolic_sderiv_conc_fun()\n # format inputs\n tsamplessecs = np.array([t*HRS_TO_SECS for t in tsamples])\n model_params = {key: val for key,val in params.items() if key != \"scalar\" }\n scalar = params['scalar']\n\n # run ODE\n ds = lambda t,x: self._sderiv(t, x, model_params)\n ds_jac = lambda t,x: self._sderiv_jac_conc_fun(t,x,model_params)\n y0 = np.zeros(len(VARIABLE_INIT_NAMES))\n for i,init_names in enumerate(VARIABLE_INIT_NAMES):\n y0[i] = init_conds[init_names]\n\n sol = solve_ivp(ds,[0, tsamplessecs[-1]+10], y0, method = 'BDF', jac = ds_jac, t_eval=tsamplessecs,\n atol=tol,rtol=tol)#, events=event_stop)\n\n # rescale cell conc\n fdata = sol.y[DATA_COLUMNS,:].T\n fdata[:,2] = fdata[:,2]/(scalar*DCW_TO_COUNT_CONC)\n return fdata\n","sub_path":"WholeCell/13_PDO_Pathway_Inference/base_dhaB_dhaT_model/base_dhaB_dhaT_model/dhaB_dhaT_model_alt.py","file_name":"dhaB_dhaT_model_alt.py","file_ext":"py","file_size_in_byte":7854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"603746870","text":"import os\n\nimport logging.handlers\nfrom ..config.base_config import BaseConfig\n\ndir = os.path.split(os.path.realpath(__file__))[0]\n\nlogsdir = os.path.join(dir,'..','logs')\nif not os.path.exists(logsdir):\n os.mkdirs(logsdir)\n\nlogfile = os.path.join(logsdir,'log_file.txt')\n\nfh = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=1024*30, backupCount=3)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setLevel(BaseConfig.LOG_LEVEL)\nfh.setFormatter(formatter)\n\ndef logger(name):\n log = logging.getLogger(name)\n log.setLevel(BaseConfig.LOG_LEVEL)\n log.addHandler(fh)\n return log\n","sub_path":"application/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"540522285","text":"import socket\nimport binascii\nimport time\n\nHOST = '127.0.0.1' # The remote host\nPORT = 5555 # The same port as used by the server\n\n\n#Slot 2, we disconnect after the timeout and then send the data again and wait for the data the come from the server\nts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nts.connect((HOST, PORT))\nts.send(bytearray([0x00, 0x01, 0x02]))\ndata = ts.recv(1024)\nhex = str(binascii.hexlify(data), 'ascii')\nformatted_hex = ', '.join(hex[i:i+2] for i in range(0, len(hex), 2))\nprint(\"Data received\", formatted_hex)\n","sub_path":"python_bs4/tcpclient.py","file_name":"tcpclient.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"313465607","text":"\"\"\"\nmaterial.\n\nDefines how objects interact with rays.\n\"\"\"\n\nfrom .colour import WHITE\n\nclass Material:\n \"\"\"A Material defines how an object interacts with light.\"\"\"\n\n def __init__(self, ambient, diffuse=None, reflectivity=0.0, specular=None,\n shinyness=10):\n \"\"\"Initialise a material.\"\"\"\n self.ambient = ambient\n self.diffuse = diffuse or ambient\n self.reflectivity = reflectivity\n self.specular = specular or WHITE\n self.shinyness = shinyness\n\n def __getstate__(self):\n return (self.ambient, self.diffuse, self.reflectivity,\n self.specular, self.shinyness)\n\n def __setstate__(self, state):\n (self.ambient, self.diffuse, self.reflectivity,\n self.specular, self.shinyness) = state\n","sub_path":"raytrace/material.py","file_name":"material.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"437471073","text":"import random\n\nimport numpy as np\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle\n\nfrom Grain import Grain\n\n\nclass World:\n def __init__(self):\n self.objects = []\n\n def add_object(self, object):\n self.objects.append(object)\n\n def plot(self):\n x = 0\n for i in self.objects:\n x += 1\n i.plot(\"Ball\" + str(x))\n\n plt.legend()\n plt.show()\n\n def animate(self, graph=True):\n # Creates an animation\n # fig, ax = plt.subplots()\n fig = plt.figure()\n ax = plt.axes(xlim=(0, 200), ylim=(0, 200))\n patches = []\n KE = []\n PE = []\n total = []\n\n for ball in self.objects:\n patches.append(Circle((ball.pos[0], ball.pos[1]), ball.radius))\n\n def init():\n for p in patches:\n ax.add_patch(p)\n return patches\n\n def run(frame):\n nparts = len(self.objects)\n for i in range(nparts):\n self.objects[i].exact_solution()\n KE.append(self.objects[i].kinetic())\n PE.append(self.objects[i].potential())\n total.append(self.objects[i].total_energy())\n for j in range(i + 1, nparts):\n self.objects[i].pair_collision1(self.objects[j])\n self.objects[i].wall_collision()\n\n patches[i].center = (self.objects[i].pos[0],\n self.objects[i].pos[1])\n patches[i].radius = self.objects[i].radius\n\n return patches\n\n ani = animation.FuncAnimation(fig, run, frames=2000, blit=False,\n interval=30,\n repeat=False, init_func=init)\n plt.show()\n\n if graph == True:\n x = np.arange(0, len(total))\n fig, ax1 = plt.subplots()\n plt.xlabel(\"Time\")\n plt.ylabel(\"Total Energy\")\n ax1.plot(x, total)\n plt.show()\n\n x = np.arange(0, len(KE))\n fig, ax1 = plt.subplots()\n plt.xlabel(\"Time\")\n plt.ylabel(\"Kinetic Energy\")\n ax1.plot(x, KE)\n plt.show()\n\n x = np.arange(0, len(PE))\n fig, ax1 = plt.subplots()\n plt.xlabel(\"Time\")\n plt.ylabel(\"Potential Energy\")\n ax1.plot(x, PE)\n plt.show()\n\n\nif __name__ == '__main__':\n # Balls horizontally collide\n balls = [Grain([90, 2], [0, 0], 2, 0.85, [50, 0, 150]),\n Grain([100, 2], [-5, 0], 2, 0.85, [50, 0, 150])]\n\n # Energy increasing demo\n # balls = [Grain([100, 100], [0, 0], 2, 1.0, [70, 0, 130])]\n\n # Particles being poured on the ground\n # balls = []\n # for i in range(100):\n # balls.append(\n # Grain([random.randint(95, 105), (200 + 11 * i)], [0, 0], 2, 0.2, [0, 10, 200]))\n\n # Particles being poured into a container\n # balls = []\n # for i in range(100):\n # balls.append(\n # Grain([random.randint(95, 105), (200 + 11 * i)], [0, 0], 2, 0.2, [70, 20, 130]))\n\n # Breaking dam demo\n # balls = []\n # for i in range(10):\n # for j in range(10):\n # balls.append(\n # Grain([i * 5 , j * 5], [0, 0], 2, 0.5, [0, 0, 120])\n # )\n\n world = World()\n for ball in balls:\n world.add_object(ball)\n world.animate()\n","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"27885934","text":"import RPi.GPIO as GPIO\nimport time\nimport threading\n\nclass ServoController:\n\tdef __init__(self, pin, speed, r):\n\t\tself.pin = pin\n\t\tself.speed = speed\n\t\tself.min = r[0]\n\t\tself.max = r[1]\n\t\tself.angle = 0\n\t\tself.angleChanged = threading.Condition()\n\n\tdef start(self):\n\t\tGPIO.setmode(GPIO.BCM)\n\t\tGPIO.setup(self.pin, GPIO.OUT)\n\t\tself.pwm = GPIO.PWM(self.pin, self.speed)\n\t\tself.pwm.start(5)\n\t\tself.threadChangeAngle()\n\n\tdef scale(self, value, oldmin, oldmax, newmin, newmax):\n\t\toldr = (oldmax-oldmin)\n\t\tnewr = (newmax-newmin)\n\t\tnewv = (((value - oldmin)*newr)/oldr)+newmin\n\t\treturn newv\n\n\tdef updateAngle(self, ang):\n\t\twith self.angleChanged:\n\t\t\tif ang <= self.max and ang >= self.min:\n\t\t\t\tself.angle = ang\n\t\t\t\tself.angleChanged.notifyAll()\n\t\t\telse:\n\t\t\t\tprint(\"Invalid Angle\")\n\t\t\t\tif ang > self.max:\n\t\t\t\t\tself.angle = self.max\n\t\t\t\telse:\n\t\t\t\t\tself.angle = self.min\n\n\tdef changeAngle(self):\n\t\twhile True:\n\t\t\twith self.angleChanged:\n\t\t\t\tself.angleChanged.wait()\n\t\t\t\tself.pwm.ChangeDutyCycle(self.scale(self.angle, self.min, self.max, 5, 10))\n\n\tdef threadChangeAngle(self):\n\t servo_thread = threading.Thread(target=self.changeAngle)\n\t servo_thread.daemon = True # Don't let the BNO reading thread block exiting.\n\t servo_thread.start()\n\n\tdef close(self):\n\t\tprint(\"Done\")\n\t\tGPIO.cleanup()\t\n\nif __name__ == '__main__':\n\tserv = ServoController(4, 50, (-50, 50))\n\tserv.start()\n\twhile True:\n\t\tnewAngle = float(raw_input(\"Enter the new Angle:: \"))\n\t\tif newAngle > 90:\n\t\t\tbreak\n\t\tserv.updateAngle(newAngle)\n\tserv.close()\n\n\t\n","sub_path":"IMUServoTest/ServoController.py","file_name":"ServoController.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"190360145","text":"# items.py\n\n\"\"\"\n definitions for items\n import this class and loop through the items dictionary to instantiate the items\n\"\"\"\n\nitems_dict = {\n \"destroyable\" : {\n 'apple' : [\n \"It's a shiny red APPLE. You feel hungry looking at it.\",\n \"You see an APPLE.\",\n 4,\n 20\n ],\n 'flask' : [\n \"It's a silver FLASK that belonged to the drunk GUARD.\",\n \"You see a silver FLASK.\",\n 10,\n 100\n ]\n },\n\n \"nondestroyable\" : {\n 'map' : [\n \"It's a map of the dungeon!\",\n \"You see a creased piece of parchment inked with a MAP of the dungeon.\",\n 0\n ],\n 'letter' : [\n \"It's a piece of parchment lying on the floor.\" + \\\n \"\\nIt seems to have some writing on it.\",\n \"You see a letter. Why not READ it?\",\n 0\n ],\n 'helmet' : [\n \"A HELMET that looks like it belongs to a guard.\",\n \"You see a HELMET that looks like it belongs to somebody\\nyou don't want to mess with.\",\n 15\n ],\n 'key' : [\n \"It's a KEY that can open any door.\",\n \"You see a KEY decorated to look like a bone-white skull.\",\n 0\n ],\n 'sword' : [\n \"It's a gleaming silver SWORD.\" + \\\n \"\\nIt looks pretty powerful.\",\n \"You see a SWORD that seems to vibrate with energy.\",\n 45\n ],\n 'shield' : [\n \"A SHIELD with a triangle design on it.\" + \\\n \"\\nYou feel stronger when you hold it!\",\n \"It's a SHIELD made of the hide of some beast.\",\n 30\n ],\n 'mop' : [\n \"It's a MOP. It mops.\",\n \"It looks like what seems to be a MOP.\",\n 5\n ],\n 'bucket' : [\n \"It's a BUCKET. What did you expect?\",\n \"One bucket.\",\n 5\n ]\n },\n\n \"nonpickuppable\" : {\n 'torch' : [\"It's a burning wooden stick set into the stone wall.\"],\n 'mirror' : [\n \"You look at yourself in the MIRROR.\" + \\\n \"\\nYou notice a LETTER on the floor behind you\" + \\\n \"\\nthat you didn't see before!\"\n ],\n 'hay' : [\n \"A pile of HAY sits in the center of the room.\" + \\\n \"\\nYou think it's supposed to be your bed.\"],\n 'portrait' : [\n \"The PORTRAIT of a kingly-looking person stares across\" + \\\n \"\\nthe room at a CHEST sitting against the wall.\"],\n 'door' : [\"It's a DOOR. You want to go through it.\"],\n 'chest' : [\"It's a golden CHEST. It looks like it's... containing something.\"],\n 'rat' : [\"It's a RAT. Gross. It probably has some kind of plague.\"]\n }\n}\n","sub_path":"dungeon_escape_application/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"398401911","text":"\"\"\"\nHelper functions for financial computations.\n\"\"\"\nimport pandas as pd\nfrom main import database_manager\nimport datetime\nfrom typing import Dict, Union, Tuple\n\nLOOKBACK_PERIOD = 252\n\n\ndef compute_annual_returns_from_daily_return(dbm: database_manager.DatabaseManager, for_bloom: bool) -> Dict[str, pd.DataFrame]:\n \"\"\"\n Computes annual returns for all assets.\n :param for_bloom: whether it is intended for bloom datasets or quandl.\n :param dbm: a DatabaseManager instance.\n :return: dictionary containing annaul returns for bloom datasets\n \"\"\"\n d = {}\n\n table_names = dbm.bloom_dataset_names if for_bloom else dbm.quandl_dataset_names\n\n for tbl_name in table_names:\n df, info = dbm.get_table(tbl_name)\n\n if df is not None:\n df['annual_ret'] = df['PX_LAST'].pct_change(periods=LOOKBACK_PERIOD)\n df['annual_ret_sign'] = (df['annual_ret'] > 0)\n df['annual_ret_sign'] *= 2\n df['annual_ret_sign'] -= 1\n d[tbl_name] = df[['annual_ret', 'annual_ret_sign']]\n\n return d\n\n\ndef compute_monthly_returns(dbm: database_manager.DatabaseManager, tbl_name: str) -> \\\n Union[Tuple[pd.DataFrame, Tuple[str, str, str, str, str], datetime.datetime], Tuple[None, None]]:\n \"\"\"\n Computes compounded return for a month.\n :param dbm: A DatabaseManager instance.\n :param tbl_name: name of the table to compute monthly return for.\n :return: tuple consisting of (monthly return, info for table, first day the asset was traded).\n \"\"\"\n tbl, info = dbm.get_table(tbl_name)\n\n if tbl is None:\n return None, None\n\n tbl.dropna(axis=0, inplace=True)\n\n first_date = tbl.index[0]\n last_date = tbl.index[-1]\n prev_month = first_date.month\n\n row_idx = 0\n curr_date, prev_date = None, None\n\n monthly_returns = []\n daily_ret = 0\n monthly_ret = 0\n\n while curr_date != last_date:\n row_idx += 1\n\n curr_date = tbl.index[row_idx]\n\n curr_month = curr_date.month\n\n curr_price = tbl.iloc[row_idx]['PX_LAST']\n prev_price = tbl.iloc[row_idx - 1]['PX_LAST']\n\n if curr_price == 0:\n daily_ret = 0\n elif prev_price == 0:\n daily_ret = tbl.iloc[row_idx - 2]['PX_LAST']\n else:\n daily_ret = (curr_price / prev_price) - 1.0\n\n monthly_ret = monthly_ret * (daily_ret + 1) if monthly_ret != 0 else daily_ret + 1\n\n if curr_month != prev_month:\n # remove compounding of last daily return\n monthly_ret /= (daily_ret + 1)\n\n monthly_returns.append((prev_date, monthly_ret - 1))\n\n # reset for next month\n monthly_ret = daily_ret + 1\n\n prev_month = curr_month\n prev_date = curr_date\n\n df = pd.DataFrame(monthly_returns, columns=['Dates', 'Monthly_Return'])\n df.set_index('Dates', inplace=True)\n\n return df, info, first_date\n","sub_path":"main/finance_metrics.py","file_name":"finance_metrics.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"190398059","text":"import re\n\nPHONE_NUM_REGEX = re.compile(r'\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d')\nmo = PHONE_NUM_REGEX.search('My number is : 415-555-4242')\nprint(mo.group())\n\n\n#None gelme durumu\n# HERO_REGEX = re.compile(r'Batman and Tina Fey')\nHERO_REGEX = re.compile(r'Batman|Tina Fey')\n\nmo1 = HERO_REGEX.search('Batman and Tina Fey')\nprint(mo1.group())\n\n#None gelme durumu var\nmo2 = HERO_REGEX.search(\"Tina Fey and Batman\")\n#None gelme durumu\n#print(mo2)\nprint(mo2.group())\n\n\nBAT_REGEX = re.compile(r'Bat(man|mobile|copter|bat)')\n\nmo = BAT_REGEX.search('Batman lost a wheel')\n\nprint(mo.group())\n\n\nBAT_REGEX = re.compile(r'Bat(wo)?man')\n\nmo1 = BAT_REGEX.search(\"The Adventures of Batman\")\nprint(mo1.group())\n\nmo2 = BAT_REGEX.search(\"The Adventures of Batwoman\")\nprint(mo2.group())\n\n\n#Matching Zero or More with star\nBAT_REGEX = re.compile(r'Bat(wo)*man')\n\nmo1 = BAT_REGEX.search('The Advetures of Batwowowoman')\nprint(mo1.group())\n\n\nmo2 = BAT_REGEX.search('The Advetures of Batman')\nprint(mo2.group())\n\n\n#Matching one or more with the plus\nBAT_REGEX = re.compile(r'Bat(wo)+man')\nmo1 = BAT_REGEX.search(\"The Adventures of Batwoman\")\nif mo1 != None:\n print(mo1.group())\n\n\n\n#Match specific repetitions with curly brackets\nHA_REGEX = re.compile(r'(Ha){3}')\nmo1 = HA_REGEX.search(\"HAHAHAhahahaHaHaHa\")\nprint(mo1.group())\n\n\n#greedy and nongreedy matching\nGREEDY_HA_REGEX = re.compile(r'(Ha){3,5}')\nmo1 = GREEDY_HA_REGEX.search(\"HaHaHaHaHaHaHa\")\nprint(mo1.group())\n\n\nNONGREEDY_HA_REGEX = re.compile(r'(Ha){3,5}?')\nmo2 = NONGREEDY_HA_REGEX.search('HaHaHaHaHa')\nprint(mo2.group())\n\n\n#findAll\nPHONE_NUM_REGEX = re.compile(r'\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d')\nmo = PHONE_NUM_REGEX.findall('Cell: 415-555-9999 Work: 212-555-0000')\nprint(mo)\n\n#findAll with pha.\nphoneNumRegex = re.compile(r'(\\d\\d\\d)-(\\d\\d\\d)-(\\d\\d\\d\\d)') # has groups\nphoneNumRegex.findall('Cell: 415-555-9999 Work: 212-555-0000')\n#[('415', '555', '1122'), ('212', '555', '0000')]\n\nXMAS_REGEX = re.compile(r'\\d+\\s\\w+')\ndata = XMAS_REGEX.findall('12 drummers,11 pipers, 10 lords, 9 ladies')\nprint(data)\n\n#Matching your own character classes\n\nVOWEL_REGEX = re.compile(r'[aeiouAEIOU]')\nprint(VOWEL_REGEX.findall('RoboCop eats baby food. BABY FOOD'))\n\nCONSONANT_REGEX = re.compile(r'[^aeiouAEIOU]')\n\nprint(CONSONANT_REGEX.findall('RoboCop eats baby food. BABY FOOD'))\n\natRegex = re.compile(r'.at')\ndata = atRegex.findall('The cat in the hat sat on the flat mat.')\nprint(data)\n\nnameRegex = re.compile(r'First Name: (.*) Last Name: (.*)')\n\nmo = nameRegex.search('First Name: Al Last Name: Sweigart')\nprint(mo.group(1))\n\nprint(mo.group(2))\n\nnoNewlineRegex = re.compile('.*',re.DOTALL)\na = noNewlineRegex.search('Serve the public trust.\\nProtect the innocent.\\nUphold the law.')\nprint(a.group())\n\n\nregex1 = re.compile('RoboCop')\nregex2 = re.compile('ROBOCOP')\nregex3 = re.compile('robOcop')\nregex4 = re.compile('RobocOp')\n\nrobocop = re.compile(r'RoboCoP', re.I)\nprint(robocop.search(\"RoboCop is part man, part machine,all cop.\").group())\nprint(robocop.search(\"robOcop is part man, part machine,all cop.\").group())\n\n#sub regex\nnamesRegex = re.compile(r'Agent \\w+')\na = namesRegex.sub('CENSORED', 'Agent Alice gave the secret documents to Agent Bob')\nprint(a)\n\n\n\nphoneRegex = re.compile(r'''(\n (\\d{3}|\\(\\d{3}\\))? # area code\n (\\s|-|\\.)? # separator\n \\d{3} # first 3 digits\n (\\s|-|\\.) # separator\n \\d{4} # last 4 digits\n (\\s*(ext|x|ext.)\\s*\\d{2,5})? # extension\n )''', re.VERBOSE)\nprint(phoneRegex.search(\"415 555 9999\"))\n","sub_path":"regex/regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636123665","text":"import os\nimport time\nimport logging\nimport unittest\nfrom tempfile import mkstemp\nimport itertools\n\nfrom cgcloud.core.test import CgcloudTestCase\nfrom cgcloud.core.ui import main\nfrom cgcloud.spark.spark_box import heredoc, install_dir\nfrom cgcloud.spark import SparkBox, SparkMaster, SparkSlave\n\nlog = logging.getLogger( __name__ )\n\nmaster = SparkMaster.role( )\nslave = SparkSlave.role( )\nrole = SparkBox.role( )\n\nnum_slaves = 2\n\ncleanup = True\ncreate_image = True\n\n\nclass ClusterTests( CgcloudTestCase ):\n \"\"\"\n Tests the typical life-cycle of instances and images\n \"\"\"\n\n @classmethod\n def setUpClass( cls ):\n os.environ[ 'CGCLOUD_PLUGINS' ] = 'cgcloud.spark'\n super( ClusterTests, cls ).setUpClass( )\n if create_image:\n cls._cgcloud( 'create', role, '-I', '-T' )\n\n @classmethod\n def tearDownClass( cls ):\n if cleanup and create_image:\n cls._cgcloud( 'delete-image', role )\n super( ClusterTests, cls ).tearDownClass( )\n\n def test_wordcount( self ):\n self._create_cluster( )\n try:\n self._assert_remote_failure( )\n self._wait_for_slaves( )\n self._word_count( )\n finally:\n if cleanup:\n self._terminate_cluster( )\n\n # FIXME: Delete volumes\n\n def test_persistence( self ):\n volume_size_gb = 1\n self._create_cluster( '--ebs-volume-size', str( volume_size_gb ) )\n try:\n self._wait_for_slaves( )\n # Create and checksum a random file taking up 75% of the cluster's theoretical\n # storage capacity an replication factor of 1.\n test_file_size_mb = volume_size_gb * 1024 * num_slaves * 3 / 4\n self._ssh( master, 'dd if=/dev/urandom bs=1M count=%d '\n '| tee >(md5sum > test.bin.md5) '\n '| hdfs dfs -put -f - /test.bin' % test_file_size_mb )\n self._ssh( master, 'hdfs dfs -put -f test.bin.md5 /' )\n finally:\n self._terminate_cluster( )\n self._create_cluster( '--ebs-volume-size', str( volume_size_gb ) )\n try:\n self._wait_for_slaves( )\n self._ssh( master, 'test \"$(hdfs dfs -cat /test.bin.md5)\" '\n '== \"$(hdfs dfs -cat /test.bin | md5sum)\"' )\n finally:\n if cleanup:\n self._terminate_cluster( )\n\n def _create_cluster( self, *args ):\n self._cgcloud( 'create-spark-cluster', '-s', str( num_slaves ), *args )\n\n def _terminate_cluster( self ):\n for i in xrange( num_slaves ):\n self._cgcloud( 'terminate', slave )\n self._cgcloud( 'terminate', master )\n\n def _assert_remote_failure( self ):\n \"\"\"\n Proof that failed remote commands lead to test failures\n \"\"\"\n self._ssh( master, 'true' )\n try:\n self._ssh( master, 'false' )\n self.fail( )\n except SystemExit as e:\n self.assertEqual( e.code, 1 )\n\n def _wait_for_slaves( self ):\n delay = 5\n expiration = time.time( ) + 10 * 60\n commands = [ 'test $(cat %s/spark/conf/slaves | wc -l) = %s' % (install_dir, num_slaves),\n \"hdfs dfsadmin -report -live | fgrep 'Live datanodes (%s)'\" % num_slaves ]\n for command in commands:\n while True:\n try:\n self._ssh( master, command )\n except SystemExit:\n if time.time( ) + delay >= expiration:\n self.fail( \"Cluster didn't come up in time\" )\n time.sleep( delay )\n else:\n break\n\n @unittest.skip( 'Only for interactive invocation' )\n def test_word_count_only( self ):\n self._word_count( )\n\n def _word_count( self ):\n self._ssh( master, 'hdfs dfs -rm -r -f -skipTrash /test.txt /test.txt.counts' )\n self._ssh( master, 'rm -rf test.txt test.txt.counts' )\n self._ssh( master, 'curl -o test.txt https://www.apache.org/licenses/LICENSE-2.0.txt' )\n self._ssh( master, 'hdfs dfs -put -f test.txt /' )\n script, script_path = mkstemp( )\n try:\n script = os.fdopen( script, 'w' )\n script.write( heredoc( \"\"\"\n import sys\n from pyspark import SparkContext\n sc = SparkContext(appName=\"PythonPi\")\n file = sc.textFile( \"/test.txt\" )\n counts = ( file\n .flatMap( lambda line: line.split( \" \" ) )\n .map( lambda word: (word, 1) )\n .reduceByKey( lambda a, b: a + b ) )\n counts.saveAsTextFile( \"/test.txt.counts\" )\"\"\" ) )\n script.close( )\n self._rsync( master, script_path, ':wordcount.py' )\n except:\n script.close( )\n raise\n finally:\n os.unlink( script_path )\n self._ssh( master, 'spark-submit --executor-memory 512m wordcount.py' )\n self._ssh( master, 'hdfs dfs -get /test.txt.counts' )\n self._ssh( master, 'test -f test.txt.counts/_SUCCESS' )\n for i in xrange( num_slaves ):\n self._ssh( master, 'test -s test.txt.counts/part-%05d' % i )\n\n ssh_opts = [ '-o', 'UserKnownHostsFile=/dev/null', '-o', 'StrictHostKeyChecking=no' ]\n\n @classmethod\n def _ssh( cls, role, *args ):\n cls._cgcloud( 'ssh',\n '-l', 'sparkbox',\n role,\n *itertools.chain( cls.ssh_opts, args ) )\n\n @classmethod\n def _rsync( cls, role, *args ):\n cls._cgcloud( 'rsync',\n '--ssh-opts=' + ' '.join( cls.ssh_opts ),\n '-l', 'sparkbox',\n role,\n *args )\n\n @classmethod\n def _cgcloud( cls, *args ):\n log.info( \"Running %r\" % (args,) )\n main( args )\n","sub_path":"spark/src/cgcloud/spark/test/test_spark.py","file_name":"test_spark.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"466960288","text":"#!/bin/python\n'''Run the main avionics process.\n\nUsed for data collection, telemetry, and parachutes.\n'''\n\nimport argparse as arg\n\nfrom src.Avionics import Avionics\nfrom src.utilities import parse_arguments\n\ndef parse_arguments() -> \"arg.ArgumentParser.NameSpace\":\n parser = arg.ArgumentParser(\n description=\"Argument parsing for extra features\"\n )\n parser.add_argument(\n \"--gui\", \"--curses\", \"-g\", \"-G\",\n default=False, action=\"store_true\"\n )\n parser.add_argument(\n \"--sensehat\", \"--sense_hat\", \"-s\", \"-S\",\n default=False, action=\"store_true\"\n )\n arguments = parser.parse_args()\n return arguments\n\n# Run avionics directly\nif __name__ == \"__main__\":\n # Set up argument parsing\n arguments = parse_arguments()\n \n if arguments.gui: # Use curses gui\n visualizer = Vis.Vis(Avionics())\n visualizer.menu()\n elif arguments.sensehat: # Use sensehat interface\n s = SH_Interface.Interface()\n s.menu()\n else: # Run on the command line\n system = Avionics()\n system.main_process()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"374580697","text":"#\r\n# TRABALHO PRÁTICO FINAL DE FUP\r\n# NOMES DOS MEMBROS DA EQUIPE:\r\n# Breno Macêdo de Brito - 514513\r\n# Felipe Vieira Duarte - 509067\r\n#\r\n\r\n# Definir o placar (a posição 0 é a quantidade de vitórias,\r\n# 1 é a quantidade de empates e 2 a de derrotas)\r\n\r\nscoreboard = [0,0,0]\r\n\r\n# Definir as cores\r\n# utilizadas no código\r\n\r\nHEADER = '\\033[95m'\r\nOKBLUE = '\\033[94m'\r\nOKCYAN = '\\033[96m'\r\nOKGREEN = '\\033[92m'\r\nYELLOW = '\\033[33m'\r\nWARNING = '\\033[93m'\r\nFAIL = '\\033[91m'\r\nENDC = '\\033[0m'\r\nBOLD = '\\033[1m'\r\nUNDERLINE = '\\033[4m'\r\n\r\n# Iniciar a função do jogo\r\n\r\ndef tic_tac_toe():\r\n\r\n # Definir o tabuleiro\r\n # Definir os casos, A, B, C, D e E do computador, em que a\r\n # variável cases_plays irá armazenar as possíveis jogadas de cada caso\r\n\r\n board = [[[' ' for i in range(3)] for i in range(3)] for i in range(3)]\r\n \r\n cases_plays = [None]*5\r\n last_played = [None] * 3\r\n who_begins = 0\r\n \r\n # O turno da jogada, será utilizado para saber quem venceu,\r\n # quem perdeu ou se for empate (se o turno chegar a 27)\r\n\r\n turn = 0\r\n\r\n # Um vetor que, na primeira posição, armazenará o simbolo do jogador e, na segunda posição,\r\n # armazenará o símbolo da cpu\r\n\r\n marks = ['O','X']\r\n\r\n # A função show_mark irá verificar se o símbolo é o último símbolo jogado\r\n # e, caso for, irá mostrá-lo destacado no tabuleiro, para evitar que o jogador\r\n # fique buscando qual foi a jogada do adversário.\r\n\r\n def show_mark(x,y,z):\r\n if last_played[0] == x and last_played[1] == y and last_played[2] == z:\r\n return YELLOW + BOLD + board[x][y][z] + OKGREEN + BOLD\r\n else:\r\n return board[x][y][z]\r\n\r\n # Iniciar o jogo\r\n\r\n def start_game():\r\n\r\n nonlocal marks\r\n\r\n option = '0'\r\n\r\n show_menu()\r\n\r\n print()\r\n\r\n # Ler o símbolo que o jogador deseja jogar\r\n\r\n while option != '1' and option != '2' and option != '3':\r\n option = input(YELLOW + BOLD + 'Selecione com qual símbolo deseja jogar: ' + HEADER + ENDC)\r\n\r\n if option != '1' and option != '2' and option != '3':\r\n print(FAIL + BOLD + 'Opção inválida, tente novamente.' + HEADER + ENDC)\r\n\r\n \r\n if option == '3':\r\n show_creditos()\r\n return(True)\r\n\r\n print(OKBLUE + BOLD + 'Quem deverá começar?')\r\n print('1 - Eu')\r\n print('2 - O Computador' + HEADER + ENDC)\r\n\r\n # Ler quem deverá começar\r\n\r\n begin_option = '0'\r\n\r\n while begin_option != '1' and begin_option != '2':\r\n begin_option = input(YELLOW + BOLD + 'Quem começa? ' + ENDC + HEADER)\r\n\r\n if begin_option != '1' and begin_option != '2':\r\n print(FAIL + BOLD + 'Opção inválida! As opções válidas são 1 e 2!' + HEADER + ENDC)\r\n\r\n nonlocal who_begins\r\n\r\n if begin_option == '2':\r\n who_begins = 1\r\n\r\n # Inverter as posições do vetor marks caso o jogador escolha jogar com o\r\n # símbolo X\r\n\r\n if option == '1':\r\n show_board()\r\n elif option == '2':\r\n show_board()\r\n marks = marks[::-1]\r\n\r\n # A função show_menu irá simplesmente mostrar o menu\r\n\r\n def show_menu():\r\n print()\r\n print(BOLD + OKBLUE + 'Instruções:')\r\n print('O terminal mostrará as opções disponíves na forma: \"NÚMERO - AÇÃO\"')\r\n print('Entre com o número que corresponde a ação que você quer realizar para prosseguir.')\r\n print()\r\n print('1 - Jogar com o símbolo O')\r\n print('2 - Jogar com o símbolo X')\r\n print('3 - Sair' + HEADER + ENDC)\r\n print()\r\n\r\n # A função show_board irá pegar o vetor board e mostrar ele na forma\r\n # de um jogo da velha 3D\r\n\r\n def show_board():\r\n print()\r\n print(OKGREEN + BOLD + '============== TABULEIRO ATUAL ==============')\r\n print()\r\n print(' CAMADA 1 CAMADA 2 CAMADA 3 ')\r\n print(' 1 2 3 1 2 3 1 2 3 ')\r\n print(f'1 {show_mark(0,0,0)} | {show_mark(0,0,1)} | {show_mark(0,0,2)} 1 1 {show_mark(1,0,0)} | {show_mark(1,0,1)} | {show_mark(1,0,2)} 1 1 {show_mark(2,0,0)} | {show_mark(2,0,1)} | {show_mark(2,0,2)} 1')\r\n print(' ---+---+--- ---+---+--- ---+---+--- ')\r\n print(f'2 {show_mark(0,1,0)} | {show_mark(0,1,1)} | {show_mark(0,1,2)} 2 2 {show_mark(1,1,0)} | {show_mark(1,1,1)} | {show_mark(1,1,2)} 2 2 {show_mark(2,1,0)} | {show_mark(2,1,1)} | {show_mark(2,1,2)} 2')\r\n print(' ---+---+--- ---+---+--- ---+---+--- ')\r\n print(f'3 {show_mark(0,2,0)} | {show_mark(0,2,1)} | {show_mark(0,2,2)} 3 3 {show_mark(1,2,0)} | {show_mark(1,2,1)} | {show_mark(1,2,2)} 3 3 {show_mark(2,2,0)} | {show_mark(2,2,1)} | {show_mark(2,2,2)} 3' + ENDC + HEADER)\r\n print()\r\n\r\n # A função error mostrará um erro de entrada, de acordo com o tipo\r\n # de erro.\r\n\r\n def error(index):\r\n\r\n if(index == 0):\r\n print(FAIL + BOLD + \"\\nENTRADA INVÁLIDA\\n\")\r\n print(\"\\nNúmeros de 1 a 3, separados por vírgula\")\r\n print(\"Exemplo: 3,1,2\" + ENDC + HEADER)\r\n\r\n elif(index == 1):\r\n print(FAIL + BOLD + \"\\nNão há coordenadas o suficiente, insira os três eixos\" + HEADER + ENDC)\r\n\r\n elif(index == 2):\r\n print(FAIL + BOLD + \"\\nPosição não existente, reinsira o valor\" + HEADER + ENDC)\r\n\r\n elif(index == 3):\r\n print(FAIL + BOLD + \"\\nEssa casa já está preenchida, insira uma nova posição\" + HEADER + ENDC)\r\n\r\n # A função move coloca o elemento em sua devida posição\r\n\r\n def move(layer,line,column):\r\n\r\n nonlocal turn\r\n nonlocal board\r\n\r\n elemento = marks[(turn + who_begins)%2]\r\n turn += 1\r\n board[layer][line][column] = elemento\r\n\r\n last_played[0] = layer\r\n last_played[1] = line\r\n last_played[2] = column\r\n\r\n show_board()\r\n\r\n # A função player_turn recebe os inputs do player para uma jogada e retorna a jogada\r\n # para a análise de vitória\r\n\r\n def player_turn():\r\n\r\n print('\\nSelecione onde será sua jogada:')\r\n\r\n try: coordinates = [int(i.strip()) for i in input().split(',')]\r\n\r\n except: return(error(0))\r\n\r\n if(len(coordinates)!=3):return(error(1))\r\n\r\n find_element = board\r\n\r\n for index,coordinate in enumerate(coordinates):\r\n\r\n coordinate-=1\r\n\r\n coordinates[index] = coordinate\r\n\r\n if(coordinate<0 or 2max_act:\n max_act = output.max().item()\n f.write('\\nBatch:{} Current:{:.4f} Max:{:.4f}'.format(batch_idx+1,output.max().item(),max_act))\n if batch_idx==0:\n ann_thresholds[pos] = max_act\n pos = pos+1\n \n model.module.threshold_init(scaling_threshold=scaling_threshold, reset_threshold=reset_threshold, thresholds = ann_thresholds[:], default_threshold=default_threshold)\n break\n return pos\n\n if architecture.lower().startswith('vgg'): \n for l in model.module.features.named_children():\n if isinstance(l[1], nn.Conv2d):\n pos = find(int(l[0]), pos)\n \n for c in model.module.classifier.named_children():\n if isinstance(c[1], nn.Linear):\n pos = find(int(l[0])+int(c[0])+1, pos)\n\n if architecture.lower().startswith('res'):\n for l in model.module.pre_process.named_children():\n if isinstance(l[1], nn.Conv2d):\n pos = find(int(l[0]), pos)\n\ndef train(epoch, loader):\n\n global learning_rate, start_time, batch_size\n learning_rate_use = learning_rate * (lr_decay_factor**((epoch)//lr_adjust_interval))\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate_use\n \n f.write('Epoch: {} Learning Rate: {:.2e}'.format(epoch,learning_rate_use))\n \n total_loss = 0.0\n total_correct = 0\n num_train = 50000\n train_loss = AverageMeter()\n model.train()\n \n current_time = start_time\n model.module.network_init(update_interval)\n\n for batch_idx, (data, target) in enumerate(loader):\n \n if torch.cuda.is_available() and use_cuda:\n data, target = data.cuda(), target.cuda()\n #data=m(data)\n \n #print(\"Epoch: {}/{};\".format(epoch, 20), \"Training batch:{}/{};\".format(batch_idx+1, math.ceil(num_train/batch_size)))\n t=0\n mem = 0\n spike =0\n mask = 0\n spike_count = 0\n \n \n optimizer.zero_grad()\n while tmax_correct:\n max_correct = correct\n is_best = True \n \n state = {\n 'accuracy' : max_correct.item()/len(test_loader.dataset),\n 'epoch' : epoch,\n 'model_state_dict' : model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'thresholds' : ann_thresholds,\n 'timesteps' : timesteps,\n 'leak_mem' : leak_mem,\n 'scaling_threshold' : scaling_threshold,\n 'activation' : activation\n }\n filename = 'snn_'+architecture.lower()+'_'+dataset.lower()+'_'+str(timesteps)+'_lr'+str(learning_rate)+'_'+str(batch_size)+'_cf16_28'+'.pth'\n torch.save(state,filename) \n \n if is_best:\n shutil.copyfile(filename, 'best_'+filename)\n\n f.write('\\nTest set: Loss: {:.6f}, Current: {:.2f}%, Best: {:.2f}%\\n'. format(\n total_loss/(batch_idx+1), \n 100. * correct.item() / len(test_loader.dataset),\n 100. * max_correct.item() / len(test_loader.dataset)\n )\n )\n\n \ndataset = 'CIFAR10' # {'CIFAR10', 'CIFAR100'}\nbatch_size = 16\nbatch_size1 = 512\nbatch_size_test = 64\ntimesteps = 48 #64\nupdate_interval = 48 #64\nnum_workers = 4\nleak_mem = .9901\nscaling_threshold = 1.0\nreset_threshold = 0.0\ndefault_threshold = 1.0\nactivation = 'Linear' # {'Linear', 'STDB'}\narchitecture = 'VGG9'#{'VGG9','VGG11'}\nprint_to_file = True\nlog_file = 'snn_'+architecture.lower()+'_'+str(update_interval)+'_'+str(batch_size)+'_4avgpool_cf16_28'+'.log'\npretrained = True\n\n# load pre-trained ANN if intend to train the SNN, change directory\npretrained_state = './vgg9_cifar10_ann_lr.1_.1by100_bs128_pixel_submit_ckpt.pth'\n\n\n# uncomment to load pre-trained SNN if intend to resume or just test\n#pretrained_state = './best_snn_vgg9_cifar10_48_lr0.0001_16_expnotbig_4*4_99.9_wd5e-4_acc89.94.pth'\n\n\nfind_thesholds = True\n\nfreeze_conv = False\nresume = False\n#resume = './snn_vgg5_cifar10_128_lr0.0002_32_samdct2_1e-4.pth'\nlearning_rate = 1e-4\nlr_adjust_interval = 5\nlr_decay_factor = 0.5 # {0.1, 0.5, 1.0}\nSTDP_alpha = 0.3\nSTDP_beta = 0.01\n\nif print_to_file:\n f = open(log_file, 'w', buffering=1)\nelse:\n f = sys.stdout\n\nconfigure('RUNS/'+log_file)\n\nnormalize = transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\ntransform_test = transforms.Compose([transforms.ToTensor(), normalize])\n\nif dataset == 'CIFAR10':\n trainset = datasets.CIFAR10(root = './cifar_data', train = True, download = True, transform = transform_train)\n testset = datasets.CIFAR10(root='./cifar_data', train=False, download=True, transform= transform_test)\n labels = 10\n\nelif dataset == 'CIFAR100':\n trainset = datasets.CIFAR100(root = './cifar_data', train = True, download = True, transform = transform_train)\n testset = datasets.CIFAR100(root='./cifar_data', train=False, download=True, transform= transform_test)\n labels = 100\n\nelif dataset == 'IMAGENET':\n labels = 1000\n traindir = os.path.join('/local/scratch/a/imagenet/imagenet2012/', 'train')\n valdir = os.path.join('/local/scratch/a/imagenet/imagenet2012/', 'val')\n trainset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n testset = datasets.ImageFolder(\n valdir,\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])) \n\n\ntrain_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)\ntrain_loader1 = DataLoader(trainset, batch_size=batch_size1, shuffle=True)\ntest_loader = DataLoader(testset, batch_size=batch_size_test, shuffle=False)\n\nif architecture[0:3].lower() == 'vgg':\n model = VGG_SNN_STDB_lin(vgg_name = architecture, activation = activation, labels=labels, timesteps=timesteps, leak_mem=leak_mem)\n \n\nif freeze_conv:\n for param in model.features.parameters():\n param.requires_grad = False\n\nmodel = nn.DataParallel(model) \n\n#copying weights from a pre-trained ann/snn\nif pretrained:\n \n if architecture[0:3].lower() == 'vgg':\n state = torch.load(pretrained_state, map_location='cpu')\n f.write('\\n Variables loaded from pretrained model:')\n \n for key, value in state.items():\n if isinstance(value, (int, float)):\n f.write('\\n {} : {}'.format(key, value))\n else:\n f.write('\\n {}: '.format(key))\n \n model.load_state_dict(state['model_state_dict'])\n\n \n \n \n\nif torch.cuda.is_available() and use_cuda:\n model.cuda()\n\noptimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=5e-4, amsgrad=False)\n#optimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=5e-4, momentum=.9)\n\ncriterion = nn.CrossEntropyLoss()\nmax_correct = 0\nstart_epoch = 1\n\nf.write('\\nDataset :{} '.format(dataset))\nf.write('\\nBatch Size :{} '.format(batch_size))\nf.write('\\nTimesteps :{} '.format(timesteps))\nf.write('\\nUpdate Interval (time) :{} '.format(update_interval))\nf.write('\\nMembrane Leak :{} '.format(leak_mem))\nf.write('\\nScaling Threshold :{} '.format(scaling_threshold))\nf.write('\\nActivation :{} '.format(activation))\nf.write('\\nArchitecture :{} '.format(architecture))\nif pretrained:\n f.write('\\nPretrained Weight File :{} '.format(pretrained_state))\nelif resume:\n f.write('\\nResumed from state :{} '.format(resume))\nf.write('\\nStarting Learning Rate :{} '.format(learning_rate))\nf.write('\\nLR Adjust Interval :{} '.format(lr_adjust_interval))\nf.write('\\nLR Decay Factor :{} '.format(lr_decay_factor))\nf.write('\\nSTDP_alpha :{} '.format(STDP_alpha))\nf.write('\\nSTDP_beta :{} '.format(STDP_beta))\nf.write('\\nOptimizer :{} '.format(optimizer))\nf.write('\\nCriterion :{} '.format(criterion))\nf.write('\\n{}'.format(model))\n\nstart_time = datetime.datetime.now()\n\nann_thresholds = []\n\nif architecture.lower().startswith('vgg'):\n for l in model.module.features.named_children():\n \n if isinstance(l[1], nn.Conv2d):\n ann_thresholds.append(default_threshold)\n \n for l in model.module.classifier.named_children():\n \n if isinstance(l[1], nn.Linear):\n ann_thresholds.append(default_threshold)\n \n\n\n\n\n\n#VGG11 CIFAR100 4*4 stride2 small from pix 99.9 thresholds\n#ann_thresholds = [2.93, 1.72, 2.25, 0.85, 1.46, 1.39, 0.61, .94, 0.21, .51]\n\n\n#VGG9 CIFAR100 4*4 stride2 99.9 percentile thresholds\nann_thresholds = [2.72, 1.98, 1.98, .77, 1.56, 0.43, .71, .23, .71]\n\n\nthresholds_set = model.module.threshold_init(scaling_threshold=1.0, reset_threshold=reset_threshold, thresholds = ann_thresholds[:], default_threshold=default_threshold)\n\nf.write('\\n Threshold: {}'.format(thresholds_set))\n\n\n##Uncomment to find firing thresholds, else use pre-computed thresholds\n#if pretrained and find_thesholds:\n# find_threshold(ann_thresholds, train_loader1)\n# \n\nfor epoch in range(start_epoch, 25):\n \n train(epoch, train_loader)\n test(epoch, test_loader) \n\n#f.write('\\nHighest accuracy: {:.2f}%'.format(100*max_correct.item()/len(test_loader.dataset)))\n\n\n","sub_path":"main_cifar10_submit.py","file_name":"main_cifar10_submit.py","file_ext":"py","file_size_in_byte":16428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"392394374","text":"#Cody Wright\n#Card Class\n#9/28/2014\n#Robo Rally Project\nfrom enum.card import CARD\nfrom GUI.UIElement import UIElement\n\nclass Card(UIElement, object):\n\n #base definition of class\n def __init__(self,priority, *args):\n super(Card, self).__init__(*args) #This allows the Card class to inherit and run UIElement.__init__(*args) and absorb it. So you can do things like card.left!\n self._priority = priority\n self.cardEffect = self._SelectCard()\n \n\n #selects card based on priority number and selects card\n def _SelectCard(self):\n #------------------------------\n #card definitions\n #card -1 = Unkown/Error\n #card 0 = uturn\n #card 1 = rotate right\n #card 2 = rotate left\n #card 3 = backup\n #card 4 = move 1\n #card 5 = move 2\n #card 6 = move 3\n #-----------------------------\n #card type is 1 then its a uturn card\n if (self._priority>=0 and self._priority<=5):\n #returns uturn card\n return CARD.UTURN\n elif (self._priority>=6 and self._priority<=41):\n #tests if _priority number is odd/even and returns card\n if (self._priority % 2):\n return CARD.ROTATE_RIGHT\n else:\n return CARD.ROTATE_LEFT\n elif (self._priority>=42 and self._priority<=47):\n #returns backup card\n return CARD.BACKUP\n elif (self._priority>=48 and self._priority<=65):\n #returns move 1 card\n return CARD.MOVE_1\n elif (self._priority>=66 and self._priority<=77):\n #returns move 2 card\n return CARD.MOVE_2\n elif (self._priority>=78 and self._priority<=83):\n #returns move 3 card\n return CARD.MOVE_3\n else:\n return -1\n\n @property\n def value(self):\n return self._priority\n \n @value.setter\n def value(self, iValue):\n self._priority = iValue\n self._SelectCard()\n","sub_path":"Card.py","file_name":"Card.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"18499476","text":"import csv\r\nimport pandas\r\nimport numpy as np\r\nfrom sklearn.svm import SVR\r\nimport matplotlib.pyplot as plt\r\n\r\n# Initialize two empty lists\r\ndates = []\r\nopen_v = []\r\n#high_v = []\r\n#low_v = []\r\n#adj_v = []\r\n#v = []\r\n\r\ndef get_data(filename):\r\n #next(csvFileReader) # skipping column names\r\n colnames = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']\r\n csvFileReader = pandas.read_csv(filename, skiprows = [1], names=colnames)\r\n csvFileReader.readline()\r\n #dates = csvFileReader.Date.tolist();\r\n #open_v = csvFileReader.Open.tolist();\r\n for row in csvFileReader:\r\n dates.append(int(row[0].split('-')[0])) # Only gets day of the month which is at index 0\r\n open_v.append(float(row[1])) # Convert to float for more precision \r\n #high_v.append(float(row[2]))\r\n #low_v.append(float(row[3]))\r\n #close_v.append(float(row[4]))\r\n #adj_v.append(float(row[5]))\r\n #v.append(int(row[6]))\r\n return\r\n\r\ndef predict_price(dates, prices, x):\r\n\r\n reshaped_dates = np.reshape(dates, len(dates), 1) # converting to matrix of n X 1\r\n #dates = dates.reshape(1, -1)\r\n #svr_lin1 = SVR(kernel='linear', C=1e3) # 1e3 denotes 1000\r\n svr_lin2 = SVR(kernel='rbf', C=1e3)\r\n #svr_lin3 = SVR(kernel='poly', C=1e3)\r\n #svr_lin1.fit(reshaped_dates, prices)\r\n svr_lin2.fit(dates, prices)\r\n #svr_lin3.fit(dates, prices)\r\n \r\n # This plots the initial data points as black dots with the data label and plot\r\n # each of our models as well\r\n\r\n plt.scatter(reshaped_dates, open_v, color='black', label='Data') \r\n \r\n #plt.plot(open_v, svr_lin1.predict(open_v), color='red') # plotting the line made by linear kernel\r\n plt.plot(open_v, svr_lin2.predict(open_v), color='blue') # plotting the line made by linear kernel\r\n #plt.plot(open_v, svr_lin3.predict(open_v), color='green') # plotting the line made by linear kernel\r\n \r\n plt.xlabel('Date') # Setting the x-axis\r\n plt.ylabel('Price') # Setting the y-axis\r\n plt.title('Support Vector Regression') # Setting title\r\n plt.legend() # Add legend\r\n plt.show() # To display result on screen\r\n\r\n return svr_lin2.predict(x)[0]\r\n\r\nget_data('C://Users//theabhishekg//Desktop//ML FIN//YF.csv') \r\npredicted_price = predict_price(dates, open_v, 29)\r\n\r\nprint('The predicted prices are:', predicted_price)\r\n","sub_path":"svr.py","file_name":"svr.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"144089448","text":"\nimport time\nimport numpy\n\nfrom ..wrapper import Type, Value\nfrom .common import alarm, timeStamp\n\nfrom .scalar import ntwrappercommon\n\nclass NTNDArray(ntwrappercommon,numpy.ndarray):\n \"\"\"\n Augmented numpy.ndarray with additional attributes\n\n * .attrib - dictionary\n * .severity\n * .status\n * .timestamp - Seconds since 1 Jan 1970 UTC as a float\n * .raw_stamp - A tuple (seconds, nanoseconds)\n * .raw - The underlying :py:class:`p4p.Value`.\n \"\"\"\n Value = Value\n\n attrib = None\n def _store(self, value):\n ntwrappercommon._store(self, value)\n self.attrib = {}\n for elem in value.get('attribute', []):\n self.attrib[elem.name] = elem.value\n\n if elem.name=='ColorMode' and elem.value!=0:\n raise ValueError(\"I only know about ColorMode gray scale, not mode=%d\"%elem.value)\n\n shape = [D.size for D in value.dimension]\n shape.reverse()\n\n # in-place reshape! Isn't numpy fun\n self.shape = shape\n\n return self\n\n @staticmethod\n def buildType(extra=[]):\n \"\"\"Build type\n \"\"\"\n return Type([\n ('value', 'v'),\n ('alarm', alarm),\n ('timeStamp', timeStamp),\n ('dimension', ('S', None, [\n ('size', 'i'),\n ])),\n ('attribute', ('S', None, [\n ('name', 's'),\n ('value', 'v'),\n ])),\n ], id='epics:nt/NTNDArray:1.0')\n\n def __init__(self, **kws):\n self.type = self.buildType(**kws)\n\n #def wrap(self, value):\n #S, NS = divmod(time.time(), 1.0)\n #return Value(self.type, {\n #'value': A.ravel(),\n #'timeStamp': {\n #'secondsPastEpoch': S,\n #'nanoseconds': NS*1e9,\n #},\n #'attribute': [{'name':K, 'value':V} for K,V in value.attrib or {}],\n #'dimension': [{'size':N} for N in value.shape],\n #})\n\n @classmethod\n def unwrap(klass, value):\n \"\"\"Unwrap Value as NTNDArray\n \"\"\"\n return value.value.view(klass)._store(value)\n","sub_path":"src/p4p/nt/ndarray.py","file_name":"ndarray.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"532107499","text":"#!/usr/bin/env python3.5\n# encoding: utf-8\n# Created by leiwei on 2020/10/13 18:32\nimport re\n\n# 在re模块里,可以使用re.方法调用,还可以调用re.compile得到一个对象\n\ncontent = 'hello wrold good morining l love you baby good'\n\nm = re.search(r'good',content) # <_sre.SRE_Match object; span=(12, 16), match='good'>\npattern = re.compile(r'good')\nn = pattern.search(content)\nprint(n) # <_sre.SRE_Match object; span=(12, 16), match='good'>\n\n\n","sub_path":"day14_正则表达式/04-re.compile方法的使用.py","file_name":"04-re.compile方法的使用.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"623644481","text":"#!/usr/bin/env python3\n\nfrom advent_year import YEAR_NUMBER\nfrom command_opts import opt, main_entry\nfrom datetime import datetime, timedelta\nimport itertools\nimport json\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\nimport textwrap\nimport time\nimport utils\n\nALT_DATA_FILE = None\nSOURCE_CONTROL = \"p4\"\nDESC = \"\"\"\n### The suggested dail routine looks like this:\nadvent.py launch # This launches some useful links, and waits to make the next day\nadvent.py test cur # This tests the current day, keep going till it works!\nadvent.py run cur # This runs on the same data\n### And finally, when everything's done, some clean up, and make a comment to post\nadvent.py finish_day # This runs the following commands:\n # run_save cur, dl_day cur, get_index, gen_comment\n\"\"\"\n\nclass TestFailedException(Exception):\n pass\n\nclass Logger:\n def __init__(self):\n self.rows = []\n\n def __call__(self, value):\n self.show(value)\n\n def show(self, value, log_msg=True):\n global _print_catcher\n if _print_catcher is not None:\n _print_catcher.safe = True\n\n value = str(value)\n value = value.replace(\"\\r\\n\", \"\\n\")\n for cur in value.split(\"\\n\"):\n cur += \"\\n\"\n if log_msg:\n self.rows.append(cur)\n sys.stdout.write(cur)\n sys.stdout.flush()\n\n if _print_catcher is not None:\n _print_catcher.safe = False\n\n def copy_result_to_clipboard(self):\n if len(self.rows) > 0:\n import clipboard\n try:\n clipboard.copy(self.rows[-1].strip())\n self.show(\"# '\" + self.rows[-1].strip() + \"' copied to clipboard\", log_msg=False)\n except:\n self.show(\"# Unable to copy text to clipboard!\", log_msg=False)\n\n def save_to_file(self, filename):\n with open(filename, \"w\") as f:\n for cur in self.rows:\n f.write(cur)\n\n def compare_to_file(self, filename):\n is_good = True\n with open(filename) as f:\n for i, (before, after) in enumerate(itertools.zip_longest(f, self.rows)):\n if before != after:\n is_good = False\n before = \"(empty line)\" if before is None else before.rstrip('\\r\\n')\n after = \"(empty line)\" if after is None else after.rstrip('\\r\\n')\n self.show(error_msg(\"ERROR\") + f\": Line {i+1}: Got '{after}', expected: '{before}'\", log_msg=False)\n return is_good\n\n def decode_values(self, values):\n ret = values.replace(\"\\t\", \" \").split(\"\\n\")\n # Only remove empty lines at the start and end\n while len(ret) > 0 and len(ret[0].strip()) == 0:\n ret = ret[1:]\n while len(ret) > 0 and len(ret[-1].strip()) == 0:\n ret = ret[:-1]\n # Remove the common indent so extra spaces on the first line are left\n if len(ret) > 0:\n pad = min(len(x) - len(x.lstrip(' ')) for x in ret if len(x.strip()) > 0)\n if pad > 0:\n ret = [x[pad:] for x in ret]\n return ret\n\n def test(self, actual, expected):\n if str(actual) != str(expected):\n self.show(f\"Test returned {actual}, \" + error_msg(f\"expected {expected}\"))\n raise TestFailedException()\n else:\n self.show(f\"Test returned {actual}, expected {expected}\")\n\ndef error_msg(value):\n return \"\\x1b[97;101m\" + value + \"\\x1b[m\"\n\ndef edit_file(filename):\n if SOURCE_CONTROL is not None:\n if SOURCE_CONTROL == \"p4\":\n cmd = [\"p4\", \"edit\", filename]\n print(\"$ \" + \" \".join(cmd))\n subprocess.check_call(cmd)\n elif SOURCE_CONTROL == \"git\":\n pass\n else:\n raise Exception()\n\ndef add_file(filename):\n if SOURCE_CONTROL is not None:\n if SOURCE_CONTROL == \"p4\":\n cmd = [\"p4\", \"add\", filename]\n print(\"$ \" + \" \".join(cmd))\n subprocess.check_call(cmd)\n elif SOURCE_CONTROL == \"git\":\n pass\n else:\n raise Exception()\n\ndef revert_file(filename):\n if SOURCE_CONTROL is not None:\n if SOURCE_CONTROL == \"p4\":\n cmd = [\"p4\", \"revert\", filename]\n print(\"$ \" + \" \".join(cmd))\n subprocess.check_call(cmd)\n elif SOURCE_CONTROL == \"git\":\n pass\n else:\n raise Exception()\n\n@opt(\"Update all advent.py files\")\ndef update_selfs():\n with open(\"advent.py\", \"rb\") as f:\n source_data = f.read()\n\n for year in os.listdir(\"..\"):\n if re.search(\"^[0-9]{4}$\", year) is not None:\n year = os.path.join(\"..\", year)\n if os.path.isdir(year):\n dest = os.path.join(year, \"advent.py\")\n with open(dest, \"rb\") as f:\n dest_data = f.read()\n if dest_data == source_data:\n print(f\"{dest} is already up to date\")\n else:\n print(f\"Updating {dest}...\")\n edit_file(dest)\n with open(dest, \"wb\") as f:\n f.write(source_data)\n\n@opt(\"Finish off all items for a day\")\ndef finish_day():\n print(\"$ advent.py run_save cur\")\n run_save(\"cur\")\n print(\"$ advent.py dl_day cur\")\n dl_day(\"cur\")\n print(\"$ advent.py get_index\")\n get_index()\n print(\"$ advent.py gen_comment\")\n gen_comment()\n\n@opt(\"Use alt data file\")\ndef alt(file_number):\n global ALT_DATA_FILE\n ALT_DATA_FILE = int(file_number)\n\ndef get_input_file(helper, file_type=\"input\"):\n global ALT_DATA_FILE\n if ALT_DATA_FILE is None or ALT_DATA_FILE == 0:\n fn = f\"day_{helper.DAY_NUM:02d}_{file_type}.txt\"\n else:\n fn = f\"day_{helper.DAY_NUM:02d}_{file_type}_alt_{ALT_DATA_FILE:02d}.txt\"\n return os.path.join(\"Puzzles\", fn)\n\n@opt(\"Generate a comment based off scores\")\ndef gen_comment():\n max_day = 0\n for helper in utils.get_helpers():\n max_day = max(helper.DAY_NUM, max_day)\n \n scores_url = \"https://adventofcode.com/\" + YEAR_NUMBER + \"/leaderboard/self\"\n score_re = re.compile(r\"^ *(?P\\d+) +\\d+:\\d+:\\d+ +(?P\\d+) +\\d+ +\\d+:\\d+:\\d+ +(?P\\d+) +\\d+ *$\")\n scores = get_page(scores_url)\n\n found = False\n day, score1, score2 = -1, -1, -1\n\n for cur in scores.split(\"\\n\"):\n m = score_re.search(cur)\n if m is not None:\n day, score1, score2 = int(m.group(\"day\")), int(m.group(\"score1\")), int(m.group(\"score2\"))\n if day == max_day:\n found = True\n break\n \n print(\"-\" * 70)\n\n if not found:\n print(\"Warning: Couldn't find day!\")\n print(\"\")\n \n msg = f\"Python, {score1} / {score2}\\n\"\n msg += \"\\n\"\n msg += f\"[github](https://github.com/seligman/aoc/blob/master/{YEAR_NUMBER}/Helpers/day_{max_day:02}.py)\\n\"\n\n print(msg)\n import clipboard\n clipboard.copy(msg)\n\n@opt(\"Launch website\")\ndef launch():\n import webbrowser\n urls = [\n \"https://www.reddit.com/r/adventofcode/\",\n \"https://adventofcode.com/\" + YEAR_NUMBER,\n ]\n\n for url in urls:\n print(\"Launch: \" + url)\n webbrowser.open(url, 2)\n\n if os.environ.get(\"TERM_PROGRAM\", \"\") == \"vscode\":\n print(\"Already running inside of VS Code\")\n else:\n print(\"Launching VS Code\")\n subprocess.check_call(\"code .\", shell=True)\n\n make_day_wait()\n\n@opt(\"Show other commands for a day\")\ndef show_others(helper_day):\n sys.path.insert(0, 'Helpers')\n for helper in get_helpers_id(helper_day):\n print(f\"## {helper.DAY_DESC}\")\n found = False\n for cur in dir(helper):\n if cur.startswith(\"other_\"):\n print(f\"{cur[6:]} - {getattr(helper, cur)(True, None)}\")\n found = True\n if not found:\n print(\"(No other commands found)\")\n\n@opt(\"Run other command for a day\")\ndef run_other(helper_day, command):\n sys.path.insert(0, 'Helpers')\n for helper in get_helpers_id(helper_day):\n found = False\n for cur in dir(helper):\n if cur == \"other_\" + command.lower():\n with open(get_input_file(helper)) as f:\n values = []\n for sub in f:\n values.append(sub.strip(\"\\r\\n\"))\n getattr(helper, cur)(False, values)\n found = True\n if not found:\n print(f\"## {helper.DAY_DESC}\")\n print(f\"ERROR: Unable to find '{command}'\")\n\n@opt(\"Make new day (Offline)\")\ndef make_day_offline(target_day=\"cur\"):\n make_day_helper(True, force_day=target_day)\n\n@opt(\"Make new day\")\ndef make_day(target_day=\"cur\"):\n make_day_helper(False, force_day=target_day)\n\n@opt(\"Make new day, after sleeping till midnight\")\ndef make_day_wait(target_day=\"cur\"):\n import sleeper\n import random\n resp = get_page(f\"https://adventofcode.com/{YEAR_NUMBER}\")\n m = re.search(\"var server_eta *= *(?P\\d+);\", resp)\n eta = int(m.group(\"eta\")) + random.randint(5, 10)\n if sleeper.sleep(str(eta), exit_at_end=False):\n make_day_helper(False, force_day=target_day)\n\n@opt(\"Load cookie from browser to cache\")\ndef save_cookie(browser=\"Chrome\", alt_id=\"\"):\n try:\n import browser_cookie3\n except:\n raise Exception(\"Unable to load 'browser_cookie3', please try running in a venv with requirements.txt\")\n\n browser = browser.lower().strip().replace(\" \", \"\")\n alt_id = -1 if alt_id == \"\" else int(alt_id)\n browsers = {\n \"chrome\": browser_cookie3.chrome,\n \"chromium\": browser_cookie3.chromium,\n \"opera\": browser_cookie3.opera,\n \"edge\": browser_cookie3.edge,\n \"firefox\": browser_cookie3.firefox,\n }\n if browser not in browsers:\n print(f\"Unknown choice of browser: {browser}, please use one of:\")\n for x in browsers:\n print(f\" {x}\")\n exit(1)\n fn = os.path.expanduser(os.path.join(\"~\", \".aoc_cookies.json\"))\n if os.path.isfile(fn):\n with open(fn) as f:\n data = json.load(f)\n else:\n data = {}\n\n cookie = browsers[browser](domain_name='adventofcode.com')\n cookie = ';'.join(f'{x.name}={x.value}' for x in cookie)\n data[str(alt_id)] = cookie\n\n with open(fn, \"w\") as f:\n data = json.dump(data, f, indent=2, sort_keys=True)\n f.write(\"\\n\")\n\n print(\"Done\")\n\ndef get_cookie():\n fn = os.path.expanduser(os.path.join(\"~\", \".aoc_cookies.json\"))\n if os.path.isfile(fn):\n if (datetime.utcnow() - datetime.fromtimestamp(os.path.getmtime(fn))) > timedelta(days=60):\n print(\"Warning, cookie is very old, removing it\")\n os.unlink(fn)\n if not os.path.isfile(fn):\n save_cookie(alt_id=\"-1\" if (ALT_DATA_FILE is None or ALT_DATA_FILE == 0) else ALT_DATA_FILE)\n\n with open(fn) as f:\n data = json.load(f)\n return data[str(-1 if (ALT_DATA_FILE is None or ALT_DATA_FILE == 0) else ALT_DATA_FILE)]\n\ndef make_day_helper(offline, force_day=None):\n if not offline:\n get_cookie()\n\n for cur in os.listdir(\"Puzzles\"):\n if \"DO_NOT_CHECK_THIS_FILE_IN\" in cur:\n raise Exception(\"You appear to be trying to rerun make_day before a day is done!\")\n\n if force_day is None or force_day.lower() == \"cur\":\n helper_day = 1\n while os.path.isfile(os.path.join(\"Helpers\", f\"day_{helper_day:02d}.py\")):\n helper_day += 1\n else:\n helper_day = int(force_day)\n\n files = [\n os.path.join(\"Puzzles\", f\"day_{helper_day:02d}_input.txt\"),\n os.path.join(\"Helpers\", f\"day_{helper_day:02d}.py\"),\n os.path.join(\"Puzzles\", f\"day_{helper_day:02d}.html\"),\n os.path.join(\"Puzzles\", f\"day_{helper_day:02d}.html.DO_NOT_CHECK_THIS_FILE_IN\"),\n ]\n\n for filename in files:\n if os.path.isfile(filename):\n print(f\"ERROR: '{filename}' already exists!\")\n return\n\n todo = \"TODO\"\n for pass_number in range(2):\n if pass_number == 1:\n todo = dl_day(str(helper_day))\n\n with open(os.path.join(\"Helpers\", \"example.txt\")) as f_src:\n with open(os.path.join(\"Helpers\", f\"day_{helper_day:02d}.py\"), \"w\") as f_dest:\n data = f_src.read()\n data = data.replace(\"NEED_DAY0_NUM\", f\"{helper_day:02d}\")\n data = data.replace(\"NEED_DAY_NUM\", str(helper_day))\n data = data.replace(\"NEED_DAY_DESC\", todo)\n f_dest.write(data)\n\n with open(os.path.join(\"Puzzles\", f\"day_{helper_day:02d}.html.DO_NOT_CHECK_THIS_FILE_IN\"), \"w\") as f:\n f.write(\"You need to rerun dl_day!\")\n\n if not offline:\n for filename in files:\n add_file(filename)\n\n for filename in files:\n if \"html\" not in filename:\n cmd = [\"code\", filename]\n if os.name == 'nt':\n cmd = [\"cmd\", \"/c\"] + cmd\n subprocess.check_call(cmd)\n\n print(f\"Created day #{helper_day}\")\n\n@opt(\"Show days\")\ndef show_days():\n for helper in utils.get_helpers():\n print(helper.DAY_DESC)\n\ndef get_helpers_id(helper_day):\n helper_day = helper_day.lower()\n if helper_day == \"all\":\n for helper in utils.get_helpers():\n yield helper\n else:\n valid = set()\n def parse_value(value):\n if value.lower() in {\"last\", \"latest\", \"cur\", \"now\"}:\n last = None\n for helper in utils.get_helpers():\n last = helper.DAY_NUM\n return last\n else:\n return int(value)\n\n for part in helper_day.split(\",\"):\n if \"-\" in part:\n part = part.split(\"-\")\n for x in range(parse_value(part[0]), parse_value(part[1]) + 1):\n valid.add(x)\n else:\n valid.add(parse_value(part))\n\n for helper in utils.get_helpers():\n if helper.DAY_NUM in valid:\n yield helper\n\n@opt(\"Test helper\")\ndef test(helper_day):\n good, bad = 0, 0\n\n sys.path.insert(0, 'Helpers')\n\n for helper in get_helpers_id(helper_day):\n print(f\"## {helper.DAY_DESC}\")\n\n try:\n helper.test(Logger())\n print(\"That worked!\")\n good += 1\n except TestFailedException:\n bad += 1\n print(error_msg(\" FAILURE! \"))\n except SystemExit as e:\n print(error_msg(f\" exit({e}) called! \"))\n raise\n except:\n import traceback\n traceback.print_exc()\n exit(1)\n\n if good + bad > 1:\n print(\"# \" + \"-\" * 60)\n\n print(f\"Done, {good} worked, {bad} failed\")\n if bad != 0:\n print(error_msg(\" THERE WERE PROBLEMS \"))\n\n_print_catcher = None\nclass PrintCatcher:\n def __init__(self):\n self.safe = False\n self.old_stdout = sys.stdout\n self.raw_used = False\n sys.stdout = self\n\n def write(self, value):\n if not self.safe:\n self.raw_used = True\n self.old_stdout.write(value)\n\n def flush(self):\n self.old_stdout.flush()\n\n def undo(self):\n sys.stdout = self.old_stdout\n return None\n\n@opt(\"Run and time duration\")\ndef run_time(helper_day):\n start = datetime.utcnow()\n run(helper_day)\n end = datetime.utcnow()\n secs = (end - start).total_seconds()\n if secs >= 90:\n pretty = f\"{secs / 60:0.2f} minutes. That's a long time!\"\n elif secs >= 15:\n pretty = f\"{secs:0.2f} seconds. That's a long time!\"\n elif secs >= 10:\n pretty = f\"{secs:0.2f} seconds.\"\n elif secs >= 0.01:\n pretty = f\"{int(secs * 1000):d} milliseconds.\"\n else:\n pretty = f\"no time.\"\n safe_print(f\"Done, that took {pretty}\")\n\n@opt(\"Run helper\")\ndef run(helper_day):\n global _print_catcher\n _print_catcher = PrintCatcher()\n run_helper(helper_day, False)\n if _print_catcher.raw_used:\n safe_print(\"WARNING: Raw 'print' used somewhere!\")\n _print_catcher = _print_catcher.undo() # pylint: disable=assignment-from-none\n\n@opt(\"Run helper and save output as correct\")\ndef run_save(helper_day):\n run_helper(helper_day, True)\n\ndef safe_print(value):\n global _print_catcher\n if _print_catcher is not None:\n _print_catcher.safe = True\n print(value)\n if _print_catcher is not None:\n _print_catcher.safe = False\n\ndef run_helper(helper_day, save):\n sys.path.insert(0, 'Helpers')\n\n if helper_day == \"cur\" and not save:\n copy_result = True\n else:\n copy_result = False\n\n passed = 0\n failed = []\n summary = []\n cached_runs = {\"year\": YEAR_NUMBER}\n if os.path.isfile(os.path.join(tempfile.gettempdir(), \"aoc_run_cache.json\")):\n try:\n with open(os.path.join(tempfile.gettempdir(), \"aoc_run_cache.json\")) as f:\n cached_runs = json.load(f)\n if cached_runs[\"year\"] != YEAR_NUMBER:\n cached_runs = {\"year\": YEAR_NUMBER}\n except:\n cached_runs = {\"year\": YEAR_NUMBER}\n cached_runs['changed'] = False\n\n max_len = 0\n for helper in get_helpers_id(helper_day):\n max_len = max(max_len, len(helper.DAY_DESC))\n\n for helper in get_helpers_id(helper_day):\n safe_print(f\"## {helper.DAY_DESC}\")\n with open(get_input_file(helper)) as f:\n values = []\n for cur in f:\n values.append(cur.strip(\"\\r\\n\"))\n log = Logger()\n start = datetime.utcnow()\n real_run = True\n if save and cached_runs.get(str(helper.DAY_NUM), {}).get(\"hash\", \"--\") == helper.hash:\n log.rows = cached_runs[str(helper.DAY_NUM)][\"rows\"]\n for row in log.rows:\n print(row.rstrip(\"\\r\\n\"))\n real_run = False\n else:\n helper.run(log, values)\n cached_runs[str(helper.DAY_NUM)] = {\"hash\": helper.hash, \"rows\": log.rows}\n cached_runs[\"changed\"] = True\n finish = datetime.utcnow()\n secs = (finish - start).total_seconds()\n if real_run:\n if secs >= 90:\n pretty = f\"{secs / 60:0.2f} minutes to complete. That's a long time!\"\n elif secs >= 15:\n pretty = f\"{secs:0.2f} seconds to complete. That's a long time!\"\n elif secs >= 10:\n pretty = f\"{secs:0.2f} seconds to complete.\"\n elif secs >= 0.01:\n pretty = f\"{int(secs * 1000):d} milliseconds to complete.\"\n else:\n pretty = f\"no time to complete.\"\n safe_print(f\"# That took {pretty}\")\n\n filename = get_input_file(helper, file_type=\"expect\")\n if save:\n info = \"Saved output.\"\n if copy_result:\n log.copy_result_to_clipboard()\n\n if os.path.isfile(filename):\n edit_file(filename)\n log.save_to_file(filename)\n else:\n log.save_to_file(filename)\n add_file(filename)\n else:\n if os.path.isfile(filename):\n if log.compare_to_file(filename):\n info = \"Good\"\n safe_print(\"# Got expected output!\")\n passed += 1\n else:\n info = error_msg(\"ERROR\")\n safe_print(\"# \" + error_msg(\" ERROR: Expected output doesn't match! \"))\n failed.append(f\"## {helper.DAY_DESC} FAILED!\")\n else:\n info = \"Unknown\"\n safe_print(\"# No expected output to check\")\n temp = helper.DAY_DESC + \":\" + \" \" * (max_len - len(helper.DAY_DESC))\n summary.append(f\"{temp} {int(secs * 1000):6d}ms {error_msg('!') if secs > 15 else ' '} {info}\")\n\n if cached_runs[\"changed\"]:\n with open(os.path.join(tempfile.gettempdir(), \"aoc_run_cache.json\"), \"wt\") as f:\n json.dump(cached_runs, f, indent=2, sort_keys=True)\n f.write(\"\\n\")\n\n if passed + len(failed) > 1:\n safe_print(\"# \" + \"-\" * 75)\n for cur in summary:\n safe_print(cur)\n safe_print(\"# \" + \"-\" * 75)\n safe_print(f\"Passed: {passed}\")\n if len(failed) > 0:\n safe_print(f\"# \" + error_msg(f\" ERROR: Failed: {len(failed)} \"))\n for cur in failed:\n safe_print(cur)\n\n@opt(\"Make a stand alone version of the day\")\ndef make_demo(helper_day):\n sys.path.insert(0, 'Helpers')\n\n blanks = 0\n for helper in get_helpers_id(helper_day):\n filename = os.path.join(\"Helpers\", f\"day_{helper.DAY_NUM:02d}.py\")\n with open(filename) as f:\n for cur in f:\n cur = cur.strip(\"\\r\\n\")\n print(cur)\n if cur.strip() == \"\":\n blanks += 1\n else:\n blanks = 0\n\n while blanks < 2:\n print(\"\")\n blanks += 1\n\n print('# These are the simple versions of a more complex harness')\n print('')\n print('class Logger:')\n print(' def __init__(self):')\n print(' pass')\n print('')\n print(' def show(self, value):')\n print(' print(value)')\n print('')\n print('')\n print('def main():')\n print(' import os')\n print(' import sys')\n print(' if (sys.version_info.major, sys.version_info.minor) != (2, 7):')\n print(' print(\"WARNING: I expect to run on Python 2.7, no clue what\\'s about to happen!\")')\n print(' filename = f\"day_{DAY_NUM:02d}_input.txt\"')\n print(' if not os.path.isfile(filename):')\n print(' print(f\"ERROR: Need \\'{filename}\\' puzzle input to continue\")')\n print(' return')\n print(' with open(filename) as f:')\n print(' values = []')\n print(' for line in f:')\n print(' values.append(line.strip(\"\\\\r\\\\n\"))')\n print(' print(f\"## Running \\'{DAY_DESC}\\'...\")')\n print(' run(Logger(), values)')\n print(' print(\"All done!\")')\n print('')\n print('')\n print('if __name__ == \"__main__\":')\n print(' main()')\n print('')\n\ndef get_header_footer():\n header = textwrap.dedent(\"\"\"\n \n \n \n \n Advent of Code \"\"\" + YEAR_NUMBER + \"\"\"\n \n \n \n \n \n \n
    \n \"\"\").strip()\n\n footer = \"\"\"
    \"\"\"\n\n return header, footer\n\ndef get_page(url):\n import urllib.request\n cookie = get_cookie()\n\n req = urllib.request.Request(\n url, \n headers={\n 'Cookie': cookie,\n 'User-Agent': 'github.com/seligman/aoc by scott.seligman@gmail.com',\n },\n )\n resp = urllib.request.urlopen(req)\n resp = resp.read().decode(\"utf-8\")\n resp = resp.encode('ascii', 'xmlcharrefreplace')\n resp = resp.decode(\"utf-8\")\n return resp\n\n@opt(\"Download Index\")\ndef get_index():\n resp = get_page(f\"https://adventofcode.com/{YEAR_NUMBER}\")\n\n resp = re.sub(\"^.*
    \", \"\", resp, flags=re.DOTALL)\n resp = re.sub(\"
    .*$\", \"\", resp, flags=re.DOTALL)\n\n for i in range(30, 0, -1):\n resp = resp.replace(f\"/{YEAR_NUMBER}/day/{i}\", f\"day_{i:02d}.html\")\n\n header, footer = get_header_footer()\n\n edit_file(os.path.join(\"Puzzles\", \"index.html\"))\n\n with open(os.path.join(\"Puzzles\", \"index.html\"), \"wt\", encoding=\"utf-8\") as f:\n f.write(header + resp + footer)\n\n print(\"Wrote out index\")\n\n@opt(\"Download Day\")\ndef dl_day(helper_day, input_only=\"no\"):\n input_only = input_only.lower() in {\"yes\", \"true\", \"y\"}\n ret = \"\"\n already_downloaded = False\n\n for helper in get_helpers_id(helper_day):\n if already_downloaded:\n time.sleep(0.250)\n already_downloaded = True\n helper_day = helper.DAY_NUM\n\n if not input_only:\n bad_file = os.path.join(\"Puzzles\", f\"day_{helper_day:02d}.html.DO_NOT_CHECK_THIS_FILE_IN\")\n if os.path.isfile(bad_file):\n os.unlink(bad_file)\n revert_file(bad_file)\n\n if ALT_DATA_FILE is None or ALT_DATA_FILE == 0:\n filename = os.path.join(\"Puzzles\", f\"day_{helper_day:02d}_input.txt\")\n else:\n filename = os.path.join(\"Puzzles\", f\"day_{helper_day:02d}_input_alt_{ALT_DATA_FILE:02d}.txt\")\n\n if not os.path.isfile(filename):\n resp = get_page(f\"https://adventofcode.com/{YEAR_NUMBER}/day/{helper_day}/input\")\n\n with open(filename, \"wt\", encoding=\"utf-8\") as f:\n f.write(resp)\n\n print(f\"Wrote out puzzle input for day #{helper_day}\")\n\n if not input_only:\n resp = get_page(f\"https://adventofcode.com/{YEAR_NUMBER}/day/{helper_day}\")\n\n resp = re.sub(\"^.*
    \", \"\", resp, flags=re.DOTALL)\n resp = re.sub(\"
    .*$\", \"\", resp, flags=re.DOTALL)\n resp = re.sub('

    At this point, you should return to your advent calendar and try another puzzle.

    .+', \"\", resp, flags=(re.MULTILINE | re.DOTALL))\n\n header, footer = get_header_footer()\n\n with open(os.path.join(\"Puzzles\", f\"day_{helper_day:02d}.html\"), \"wt\", encoding=\"utf-8\") as f:\n f.write(header + resp + footer)\n\n print(f\"Wrote out puzzle for day #{helper_day}\")\n\n m = re.search(\"

    --- (.*?) ---

    \", resp)\n if m:\n ret = m.group(1)\n ret = ret.replace(\">\", \">\")\n ret = ret.replace(\"<\", \"<\")\n ret = ret.replace(\"&\", \"&\")\n\n return ret\n\n@opt(\"Compare expected results with website\")\ndef compare_results(helper_day):\n already_downloaded = False\n\n for helper in get_helpers_id(helper_day):\n print(f\"## {helper.DAY_DESC}\")\n if already_downloaded:\n time.sleep(0.250)\n\n filename = get_input_file(helper, file_type=\"expect\")\n data = []\n if os.path.isfile(filename):\n with open(filename) as f:\n for row in f:\n row = row.strip()\n if \" \" not in row:\n data.append(row)\n\n resp = get_page(f\"https://adventofcode.com/{YEAR_NUMBER}/day/{helper.DAY_NUM}\")\n expecteds = []\n for m in re.finditer(f'Your puzzle answer was (?P.*?)', resp):\n expecteds.append(m.group(\"answer\"))\n\n all_good = True\n for i, (current, expected) in enumerate(itertools.zip_longest(data, expecteds)):\n current = \"(nothing)\" if current is None else current.rstrip(\"\\r\\n\")\n expected = \"(nothing)\" if expected is None else expected.rstrip(\"\\r\\n\")\n if current != expected:\n print(error_msg(\"ERROR: \") + f\"Have '{current}', but website reports '{expected}' for line {i+1}\")\n all_good = False\n if all_good:\n print(\"(all results are good)\")\n\nif __name__ == \"__main__\":\n main_entry('func', program_desc=DESC)\n","sub_path":"2019/advent.py","file_name":"advent.py","file_ext":"py","file_size_in_byte":27548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"79517782","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom ImagePreprocess import *\nfrom Methods import *\nfrom Denoise import *\nimport argparse\nimport os\nfrom skimage.io import imread, imsave\nfrom skimage.measure import compare_psnr\n\n# Here, we can specify arguments. Using this, the code is user-friendly\nparser = argparse.ArgumentParser()\nparser.add_argument('--sigma', default=1, type=float, help='sigma')\nparser.add_argument('--adam_beta', default=0.5, type=float, help='adam penalty parameter')\nparser.add_argument('--reg_lambda', default=1, type=float, help='regularization parameter')\nparser.add_argument('--outer_iters', default=200, type=int, help='max outer iteration times')\nparser.add_argument('--inner_iters', default=50, type=int, help='max iterations for step1')\nparser.add_argument('--inner_iters2', default=5, type=int, help='max iterations for step2')\nparser.add_argument('--multi_ch', default=1, type=int, help='color image or grey image')\nparser.add_argument('--noise_level', default=25, type=int, help='noise level')\nparser.add_argument('--method', default='ADMM', type=str, help='method')\nparser.add_argument('--denoise_engine', default='bilateral', type=str, help='denoising engine')\nparser.add_argument('--test_dir', default='data/Set5', type=str, help='Folder of test dataset')\nparser.add_argument('--result_dir', default='results/Set5', type=str, help='Folder of results')\nparser.add_argument('--show_figures', default=0, type=int, help='show the noisy/denoised figures')\nparser.add_argument('--save_result', default=0, type=int, help='save the denoised image')\nargs = parser.parse_args()\n\n\ndef solver(orig_img, input_img, forward, forward_T):\n '''\n Input:\n -orig_img: true image\n -input_img: contaminated image\n -forward: forward operator\n -forward_T: transpose of the forward operator\n\n return:\n -recon_img: reconstructed img\n -recon_img: psnr of the reconstructed img\n '''\n if args.method == 'ADMM':\n recon_img, recon_PSNR = ADMM(orig_img, input_img, args.denoise_engine, args.noise_level/255, forward, forward_T,\n args.sigma, args.inner_iters, args.outer_iters, args.adam_beta, args.multi_ch)\n elif args.method == 'RED_SGD':\n recon_img, recon_PSNR = RED_SteepestGD(orig_img, input_img, args.reg_lambda, args.denoise_engine, args.noise_level/255,\n forward, forward_T, args.sigma, args.outer_iters, args.multi_ch)\n elif args.method == 'RED_ADMM':\n recon_img, recon_PSNR = RED_ADMM(orig_img, input_img, args.reg_lambda, args.denoise_engine, args.noise_level/255,\n forward, forward_T, args.sigma, args.inner_iters, args.inner_iters2,\n args.outer_iters, args.adam_beta, args.multi_ch)\n\n elif args.method == 'RED_FP':\n recon_img, recon_PSNR = RED_FP(orig_img, input_img, args.reg_lambda, args.denoise_engine, args.noise_level/255,\n forward, forward_T, args.sigma, args.inner_iters, args.outer_iters, args.multi_ch)\n else:\n print('Method Not Found')\n return 0\n\n return recon_img, recon_PSNR\n\n\nif args.save_result == 1:\n target_path = os.path.join(args.result_dir, args.denoise_engine, args.method)\n if not os.path.exists(target_path):\n os.makedirs(target_path)\n # print(\"Directory \", target_path, \" Created \")\n # else:\n # print(\"Directory \", target_path, \" already exists\")\n\npsnr_plain = [] # results for using only the denoising algorithm\npsnr = [] # results for P3 or RED\npsnr_noisy = []\n\n\n# test on images\nfor img_name in os.listdir(os.path.join(args.test_dir)):\n orig_img = np.array(imread(os.path.join(args.test_dir, img_name)), dtype=np.float32) / 255.0\n\n # add noise\n np.random.seed(seed=0)\n noisy_img = orig_img + np.random.normal(0, args.noise_level/255, orig_img.shape).astype(np.float32) # add noise\n\n # construct nosiy image\n problem = Denoising()\n input_img = problem.forward(noisy_img)\n\n # only use the denoising engine\n denoised_img = denoise(np.clip(noisy_img, 0, 1), args.noise_level/255, denoise_engine=args.denoise_engine,\n multi_ch=args.multi_ch)\n\n # using P3 or RED\n recon_img, recon_PSNR = solver(orig_img, noisy_img, problem.forward, problem.forward_T)\n\n # print('For %s, the PSNR of the reconstruted image is %.4f' % (img_name, recon_PSNR))\n\n psnr_plain.append(compare_psnr(orig_img, denoised_img))\n psnr.append(recon_PSNR)\n psnr_noisy.append(compare_psnr(orig_img, noisy_img))\n\n # plot\n if args.show_figures == 1:\n plt.subplot(121)\n plt.imshow(noisy_img, cmap='gray')\n plt.title('noisy image')\n plt.subplot(122)\n plt.imshow(recon_img, cmap='gray')\n plt.title('reconstructed image')\n plt.show()\n\n # save images\n if args.save_result == 1:\n imsave(os.path.join(target_path, img_name), np.clip(recon_img, 0, 1))\n\n# save psnr results\nif args.save_result == 1:\n np.savetxt(os.path.join(target_path, 'PSNR.txt'), psnr, delimiter=',', fmt='%.7f')\n np.savetxt(os.path.join(target_path, 'PSNR_plain.txt'), psnr_plain, delimiter=',', fmt='%.7f')\n np.savetxt(os.path.join(target_path, 'PSNR_noisy.txt'), psnr_noisy, delimiter=',', fmt='%.7f')\n\n# save psnr by choosing different parameters using different schemes\nif args.method == 'ADMM':\n print(args.sigma, args.adam_beta, ' '.join('%.4f' % x for x in psnr))\nelif args.method == 'RED_SGD':\n print(args.reg_lambda, args.sigma, ' '.join('%.4f' % x for x in psnr))\nelif args.method == 'RED_ADMM':\n print(args.reg_lambda, args.sigma, ' '.join('%.4f' % x for x in psnr))\n\nelif args.method == 'RED_FP':\n print(args.reg_lambda, args.sigma, ' '.join('%.4f' % x for x in psnr))\nelse:\n print('Method Not Found')\n\n","sub_path":"663/denoise_test.py","file_name":"denoise_test.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"43944450","text":"from libcn.libcn import CypherNode\ndef exec():\n# Defined in a non default config file location\n config = \"/path/to/config/file/cn.conf\"\n cn = CypherNode(configfile=config)\n# Defined with object arguments\n# cn = CypherNode(cnid='002', key='6aeryghaerysertyuhsretytse1xstr+6451lkszDFG456584sdz', url='https://url:2009/v0')\n chain = cn.getblockchaininfo()['chain']\n print(chain)\n balance = cn.getbalancebyxpublabel('Some_receiving_wallet')['balance']\n sat = float(balance) * 100000000\n print(\"{} Satoshi\".format(int(sat)))\n balance = format(balance, '.8f')\n print(\"{} ₿\".format(balance))\n# waddr = cn.getactivewatches()\n# bbh = cn.getbestblockhash()\nexec()\n","sub_path":"exemple2.py","file_name":"exemple2.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"579779053","text":"import tensorflow as tf\nconfig = tf.ConfigProto(allow_soft_placement = True,\n log_device_placement=False)\nconfig.gpu_options.allow_growth = True\nimport keras\nkeras.backend.set_session(tf.Session(config=config))\ndel tf, config, keras\n\nimport warnings\nwarnings.filterwarnings('ignore')\ndel warnings\n\nCN = 256\nIMGSZ = (64, 64)\nPATCH = 48\nBATCH = 128\nMINI_EPOCH = 6400 // BATCH\nEPOCH = 5000\n\nBASE = None\nSAVETO = 'img 64-48, ratio 100x, wh 1.5-3.5, batch 128, reg 20.h5'\n\n","sub_path":"autoencoder-wta/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"416792504","text":"#Programa para obtener datos:\n\nimport serial\nimport time\nimport threading\nimport Queue as queue\n\nser = serial.Serial()\nusbport = '/dev/ttyACM0'\nser = serial.Serial(usbport,9600, timeout=1)\n\n\n\t\n\nwhile True: \t\n\ttime.sleep(2)\n\t\n\tvar = raw_input(\"Hola, soy Arduino. Introduce 1 para leer infrarrojos y 2 para leer ultrasonidos /n\")\n\t\n\tif (var =='1'):\n\t\t\n\t\tser.write(b'1')\n\t\tdata = ser.read(100)\n\t\tprint(data.decode('ascii', errors='replace'))\n\t\t\n\tif (var =='2'):\t\n\t\tser.write(b'2')\n\t\tdata = ser.read(100)\n\t\tprint(data.decode('ascii', errors='replace'))\n\t\t\nser.close()\n\t\t\n\t\n\t\n","sub_path":"docs/AVANCES/12OCT19/TELEOPERADORBASICO2.py","file_name":"TELEOPERADORBASICO2.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"19285589","text":"# -*- coding: utf-8 -*-\n\"\"\" The script that resplicate the result in Alé Chilet, J. (2016).\nThe script contains the replication of the demand estimation using the following models.\n\n---------------------------------------------------------------------\n Outline of the script:\n\n 1. Simultaneous demand function estimatinon\n 1.1 Demand function estimation with SUR\n 1.2 Demand function estimation with SURDiff\n\n 2. Panel OLS estimation\n 2.1 PanelOLS\n 2.2 PanelOLSDiff\n\n 3. Pooled OLS estimation\n 3.1 PooledOLS\n 3.2 PooledOLSDiff\n\n 4. Tables and plots\n---------------------------------------------------------------------\nAlé Chilet, J. (2016). Gradually Rebuilding a Relationship : The Emergence of Collusion in Retail Pharmacies in Chile, 1–65. Retrieved from https://kelley.iu.edu/doc/bloomington/business-economics-and-public-policy/seminars/fall-2017/gradual-collusion.pdf\n\nTodo:\n * forecast by each type of regression\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport os\n# from linearmodels import PanelOLS\n# from linearmodels import PooledOLS\n# from linearmodels import RandomEffects\n# from linearmodels import FirstDifferenceOLS\n# from linearmodels.datasets import jobtraining\n# from statsmodels.datasets import grunfeld\n# import matplotlib.pyplot as plt\n# import seaborn as sns\n#\n# import statsmodels.api as sm\n# import statsmodels.formula.api as smf\n\nif (os.name == 'posix'):\n path = \"/Users/haoyu/Documents/GitHub/DrugStorePriceCollusion/data\"\nelse:\n path = \"C:\\\\Users\\\\Jasmine\\\\Documents\\\\Github\\\\DrugStorePriceCollusion\\\\data\"\nos.chdir(os.path.join(path,\"..\",\"pycode\"))\n\nfrom DrugStoreCollusion import *\nDSC = DrugStoreCollusion(path)\nattr = DSC.attr\nattr.ATCL4 = attr.ATCL6.apply(lambda x:x[0:4])\nattr.ATCL5 = attr.ATCL6.apply(lambda x:x[0:5])\n\nlist_to_keep = attr.columns.tolist()\nlist_to_keep = [i for i in list_to_keep if i != 'Unnamed: 0' ]\nattr = attr[list_to_keep]\n\ndata_symmetry = pd.DataFrame()\ndata_full = pd.DataFrame()\nSUR_result = pd.DataFrame()\nSUR_stderr = pd.DataFrame()\n\n\"\"\"\n---------------------------------------------------------------------\n 1. Simultaneous demand function estimatinon\n---------------------------------------------------------------------\n 1.1 Demand function estimation with SUR\n---------------------------------------------------------------------\n\"\"\"\nfor each_ATC,each_df in DSC.attr.groupby(\"ATCL6\"):\n try :\n index_list = each_df.index.values\n if ((42 in index_list)|(137 in index_list)|(202 in index_list)):\n continue\n else:\n result = DSC.SUR(index_list)\n SUR_result = SUR_result.append(result.params)\n SUR_stderr = SUR_stderr.append(result.std_errors)\n except:\n print(each_ATC)\n\"\"\"\n---------------------------------------------------------------------\n 1.2 Demand function estimation with SURDiff\n---------------------------------------------------------------------\n\"\"\"\nSURDiff_result = pd.DataFrame()\nfor each_ATC,each_df in DSC.attr.groupby(\"ATCL6\"):\n try :\n index_list = each_df.index.values\n if ((42 in index_list)|(137 in index_list)|(202 in index_list)):\n continue\n else:\n result = DSC.SURDiff(index_list)\n SURDiff_result = SURDiff_result.append(result.params)\n except:\n print(each_ATC)\n\"\"\"\n---------------------------------------------------------------------\n 2. Panel OLS estimation\n---------------------------------------------------------------------\n 2.1 PanelOLS\n---------------------------------------------------------------------\n\"\"\"\n\npanelols_result = pd.DataFrame(index=DSC.attr.ATCL6.unique(),columns=['beta_sb_cv','beta_fa_cv','beta_sb_fa','beta_cv_fa','beta_cv_sb','beta_fa_sb'])\n\nfor each_ATC,each_df in DSC.attr.groupby(\"ATCL6\"):\n try :\n index_list = each_df.index.values\n if ((42 in index_list)|(137 in index_list)|(202 in index_list)):\n continue\n else:\n result = DSC.PanelOLS(index_list)\n panelols_result.loc[each_ATC,'beta_sb_cv'] = result[0].params[0]\n panelols_result.loc[each_ATC,'beta_fa_cv'] = result[0].params[1]\n panelols_result.loc[each_ATC,'beta_sb_fa'] = result[1].params[0]\n panelols_result.loc[each_ATC,'beta_cv_fa'] = result[1].params[1]\n panelols_result.loc[each_ATC,'beta_cv_sb'] = result[2].params[0]\n panelols_result.loc[each_ATC,'beta_fa_sb'] = result[2].params[1]\n except:\n print(each_ATC)\n\npanelols_result.replace(0,np.nan,inplace=True)\n\n\"\"\"\n---------------------------------------------------------------------\n 2.2 PanelOLSDiff\n---------------------------------------------------------------------\n\"\"\"\n\npanelols_diff = pd.DataFrame(index=DSC.attr.ATCL6.unique(),columns=['beta_sb_cv','beta_fa_cv','beta_sb_fa','beta_cv_fa','beta_cv_sb','beta_fa_sb'])\n\nfor each_ATC,each_df in DSC.attr.groupby(\"ATCL6\"):\n try :\n index_list = each_df.index.values\n if ((42 in index_list)|(137 in index_list)|(202 in index_list)):\n continue\n else:\n result = DSC.PanelOLSDiff(index_list)\n panelols_diff.loc[each_ATC,'beta_sb_cv'] = result[0].params[0]\n panelols_diff.loc[each_ATC,'beta_fa_cv'] = result[0].params[1]\n panelols_diff.loc[each_ATC,'beta_sb_fa'] = result[1].params[0]\n panelols_diff.loc[each_ATC,'beta_cv_fa'] = result[1].params[1]\n panelols_diff.loc[each_ATC,'beta_cv_sb'] = result[2].params[0]\n panelols_diff.loc[each_ATC,'beta_fa_sb'] = result[2].params[1]\n except:\n print(each_ATC)\n\npanelols_diff.replace(0,np.nan,inplace=True)\n\n\"\"\"\n---------------------------------------------------------------------\n 3. Pooled OLS estimation\n---------------------------------------------------------------------\n 3.1 PooledOLS\n---------------------------------------------------------------------\n\"\"\"\n\nOLS_result = pd.DataFrame()\nattr = DSC.attr\nattr['estimation_3'] = np.nan\nfor each_ATC,each_df in DSC.attr.groupby(\"ATCL6\"):\n try :\n index_list = each_df.index.values\n if ((42 in index_list)|(137 in index_list)|(202 in index_list)):\n continue\n else:\n result = DSC.symmetricOLS(index_list)\n OLS_result = OLS_result.append(result.params,ignore_index=True)\n attr.loc[attr.ATCL6 == each_ATC,'estimation_3'] = result.params.P\n except:\n print(each_ATC)\n\n\n\"\"\"\n---------------------------------------------------------------------\n 3.2 PooledOLSDiff\n---------------------------------------------------------------------\n\"\"\"\n\nOLS_result_diff = pd.DataFrame()\nattr = DSC.attr\nfor each_ATC,each_df in DSC.attr.groupby(\"ATCL6\"):\n try :\n index_list = each_df.index.values\n if ((42 in index_list)|(137 in index_list)|(202 in index_list)):\n continue\n else:\n result = DSC.symmetricOLSDiff(index_list)\n OLS_result_diff = OLS_result_diff.append(result.params,ignore_index=True)\n attr.loc[attr.ATCL6 == each_ATC,'estimation_3'] = result.params.P\n except:\n print(each_ATC)\n\n\n\n# attr.to_csv(os.path.join(path,\"estimation_3.csv\"))\n\n\"\"\"\n---------------------------------------------------------------------\n 4. Tables and plots\n---------------------------------------------------------------------\n 4.1 Tables\n---------------------------------------------------------------------\n\"\"\"\n# SUR table\n\nSUR_table = pd.concat([SUR_result[['eq1_P_fa_cv','eq1_P_sb_cv','eq3_P_cv_fa','eq3_P_sb_fa','eq2_P_cv_sb','eq2_P_fa_sb']].median(),(SUR_result[['eq1_P_fa_cv','eq1_P_sb_cv','eq3_P_cv_fa','eq3_P_sb_fa','eq2_P_cv_sb','eq2_P_fa_sb']]>0).sum()],axis=1)\nSUR_table.columns=['SUR:median','SUR:positive count']\n\nSURDiff_table = pd.concat([SURDiff_result[['eq1_P_fa_cv','eq1_P_sb_cv','eq3_P_cv_fa','eq3_P_sb_fa','eq2_P_cv_sb','eq2_P_fa_sb']].median(),(SURDiff_result[['eq1_P_fa_cv','eq1_P_sb_cv','eq3_P_cv_fa','eq3_P_sb_fa','eq2_P_cv_sb','eq2_P_fa_sb']]>0).sum()],axis=1)\nSURDiff_table.columns=['SURDiff:median','SURDiff:positive count']\n\nSUR_tab = pd.concat([SUR_table,SURDiff_table],axis=1)\nSUR_tab.index = ['beta_fa_cv','beta_sb_cv','beta_cv_fa','beta_sb_fa','beta_cv_sb','beta_fa_sb']\n# Panel OLS table\n\nPanelOLS_table = pd.concat([panelols_result.median(),(panelols_result>0).sum()],axis=1)\nPanelOLS_table.columns=['PanelOLS:median','PanelOLS:positive count']\n\nPanelOLSDiff_table = pd.concat([panelols_diff.median(),(panelols_diff>0).sum()],axis=1)\nPanelOLSDiff_table.columns=['PanelOLSDiff:median','PanelOLSDiff:positive count']\n\nPanelOLS_tab = pd.concat([PanelOLS_table,PanelOLSDiff_table],axis=1)\n\n# Pooled OLS\n\nPooledOLS_series = pd.Series({'OLS: median': OLS_result['P'].median(),'OLS: positive count': (OLS_result['P']>0).sum(),'OLSDiff: median': OLS_result_diff['P'].median(),'OLSDiff: positive count': (OLS_result_diff['P']>0).sum()})\n\nPooledOLS_tab = pd.DataFrame({'beta_cv_fa':PooledOLS_series}).T\n\npd.concat([SUR_tab,PanelOLS_tab,PooledOLS_tab],axis=1,sort=False).to_csv(os.path.join(path,\"result\",\"DemandEstimationSummary.csv\"))\n\"\"\"\n---------------------------------------------------------------------\n 4.2 Plots\n---------------------------------------------------------------------\n\"\"\"\n\nimport seaborn as sns\nfrom statsmodels.iolib.summary2 import summary_col\nimport matplotlib.pyplot as plt\nsns.set_palette(\"cubehelix\")\n\nfirm_dict = {\"fa_ic\":\"FASA\",\"cv_ic\":\"Cruz Verde\",\"sb_ic\":\"Salcobrand\"}\nattr_tmp = attr[[\"estimation_3\",\"sb_ic\",\"fa_ic\",\"cv_ic\"]]\n\n# Plots: number of price increase against elasticity\nattr_tmp.columns = [\"Elasticity\",\"No. price increase(Salcobrand)\",\"No. price increase(FASA)\",\"No. price increase(CV)\"]\nsns_plot = sns.pairplot(attr_tmp,size=2.5,diag_kind=\"kde\",kind=\"reg\",markers=\"+\")\n\n# PairPlots: first date of price increase, elasticity, number of price increase\nattr_tmp_first = pd.melt(attr[[\"Prescription\",\"no\",\"beta\",\"fa_first\",\"sb_first\",\"cv_first\"]],id_vars=[\"Prescription\",\"beta\",\"no\"])\nattr_tmp_first.columns = [\"Prescription\",\"Elasticity\",\"No\",\"Firm\",\"First\"]\n\nattr_tmp_dec = pd.melt(attr[[\"Prescription\",\"no\",\"s1_1\",\"fa_dc\",\"sb_dc\",\"cv_dc\"]],id_vars=[\"Prescription\",\"s1_1\",\"no\"])\nattr_tmp_dec[\"Prescription\"] = attr_tmp_dec[\"Prescription\"].apply(lambda x: Prescription_dict[x] )\nattr_tmp_dec.columns = [\"Prescription\",\"Elasticity\",\"No\",\"Firm\",\"Dec\"]\n\nattr_tmp_inc = pd.melt(attr[[\"Prescription\",\"no\",\"beta\",\"fa_ic\",\"sb_ic\",\"cv_ic\",\"start_collude\",\"ATCL4\"]],id_vars=[\"Prescription\",\"beta\",\"no\",\"start_collude\",\"ATCL4\"])\n\nPrescription_dict = {0:\"OTC\",1:\"Prescription\",2:\"Restricted Recipe\"}\nattr_tmp_inc[\"Prescription\"] = attr_tmp_inc[\"Prescription\"].apply(lambda x: Prescription_dict[x] )\nattr_tmp_inc.columns = [\"Prescription\",\"Elasticity\",\"No\",\"Start\",\"ATCL4\",\"Firm\",\"Inc\"]\n\n\nattr_tmp_inc[\"Firm\"] = attr_tmp_inc[\"Firm\"].apply(lambda x:firm_dict[x])\nattr_tmp_inc[\"Dec\"] = attr_tmp_dec[\"Dec\"]\nattr_tmp_inc[\"First\"] = attr_tmp_first[\"First\"]\n\n\nsns_plot = sns.lmplot(x = \"Elasticity\",y = \"First\",data=attr_tmp_inc[attr_tmp_inc.Dec < 105],scatter=True)\nsns_plot = sns.lmplot(x = \"Elasticity\",y = \"First\",data=attr_tmp_inc[attr_tmp_inc.Dec > 5],scatter=True)\n\nsns_plot = sns.lmplot(x = \"First\",y = \"Start\",hue=\"Prescription\",data=attr_tmp_inc[attr_tmp_inc.Prescription!=\"OTC\"],scatter=True)\n\n\nsns_plot = sns.scatterplot(x = \"Elasticity\",y = \"First\",hue=\"Dec\",style=\"Firm\",data=attr_tmp_inc[attr_tmp_inc.Dec < 10],palette=\"Paired\")\nfigure = sns_plot.get_figure()\nfigure.set_size_inches(10, 8)\nfigure.savefig(os.path.join(path,\"figure\",\"pair_plot_price_inc_3_1.pdf\"),figsize=(16, 10))\n\n# PairPlot: elasticity, when to start collude, when does salcobrand start collude\nsns_plot = sns.pairplot(attr[[\"beta\",\"ic\",\"start_collude\",\"sb_first\"]],kind=\"reg\",markers=\"+\")\nsns_plot.savefig(os.path.join(path,\"figure\",\"pair_plot_price_inc_3.pdf\"),figsize=(16, 10))\n","sub_path":"Python/some_useful_pycode/replicate_panelOLS.py","file_name":"replicate_panelOLS.py","file_ext":"py","file_size_in_byte":11928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"256276384","text":"import pandas as pd\nfrom typing import List\nfrom ebaysdk.exception import ConnectionError\nfrom ebaysdk.finding import Connection as Finding\n\n\n# TODO:\n# TO Support In Future:\n# findItemsByCategory\n# findItemsByKeywords\n# getHistograms\n# getSearchKeywordsRecommendation\n# Eventually use this: 'GetCategoryInfo' to get valid category ids\n# findItemsByCategory (max: 3, will need to be specified separately for each one i)\n# search variation:\n# baseball card (both words) baseball,card (exact phrase baseball card)\n# (baseball,card) (items with either baseball or card) baseball -card (baseball but NOT card)\n# baseball -(card,star) (baseball but NOT card or star)\n\n\nclass EasyEbayData:\n\n def __init__(self, api_id: str, keywords: str, excluded_words: str, sort_order: str,\n search_type: str = \"findItemsByKeywords\", wanted_pages: int = False,\n usa_only: bool = True, min_price: float = 0.0, max_price: float = None):\n \"\"\"\n A class that returns a clean data set of items for sale based on a keyword search from ebay\n :param api_id: eBay developer app's ID\n :param keywords: Keywords should be between 2 & 350 characters, not case sensitive\n :param wanted_pages: The number of desired pages to return w/ 100 items per page\n :param search_type: Search type, for now only findItemsbyKeywords accepted\n \"\"\"\n self.api = Finding(appid=api_id, config_file=None)\n self.search_type = search_type\n self.keywords = keywords # keywords only search item titles\n self.exclude_words = excluded_words\n self.wanted_pages = wanted_pages # must be at least 1 & integer\n self.usa_only = True if usa_only else False\n self.min_price = min_price if min_price else 0.0\n self.max_price = max_price\n self.sort_order = sort_order\n self.search_url = \"\" # will be the result url of the first searched page\n self.total_pages = 0 # the total number of available pages\n self.total_entries = 0 # the total number of items available given keywords\n if len(excluded_words) > 2:\n excluded_words = \",\".join(word for word in excluded_words.split(\" \"))\n self.full_query = keywords + \" -(\" + excluded_words + \")\"\n else:\n self.full_query = keywords\n self.item_filter = self._create_item_filter()\n\n def _create_item_filter(self):\n item_filter = list()\n item_filter.append({'name': 'MinPrice', 'value': self.min_price})\n if self.max_price and self.max_price > self.min_price:\n item_filter.append({'name': 'MaxPrice', 'value': self.max_price})\n if self.usa_only:\n item_filter.append({'name': 'LocatedIn', 'value': 'US'})\n return item_filter\n\n def unembed_ebay_item_data(self, list_of_item_dics):\n unembedded = []\n for ebay_item in list_of_item_dics:\n assert isinstance(ebay_item, dict), \"The data should be returning a list of dictionaries.\"\n unembedded_dict = dict()\n for key, val in ebay_item.items():\n if isinstance(val, dict):\n for key2, val2 in val.items():\n if isinstance(val2, dict):\n for key3, val3 in val2.items():\n unembedded_dict[key2 + '_' + key3] = val3\n else:\n unembedded_dict[key + '_' + key2] = val2\n else:\n unembedded_dict[key] = val\n unembedded.append(unembedded_dict)\n return unembedded\n\n def test_connection(self):\n \"\"\"Tests that an initial API connection is successful\"\"\"\n try:\n # Might simplify this\n response = self.api.execute(self.search_type, {'keywords': self.full_query,\n 'paginationInput': {'pageNumber': 1,\n 'entriesPerPage': 100},\n 'itemFilter': self.item_filter,\n 'sortOrder': self.sort_order})\n assert response.reply.ack == 'Success'\n print('Successfully Connected to API!')\n self.search_url = response.dict()['itemSearchURL']\n return response\n except ConnectionError:\n print('Connection Error! Ensure that your API key was correctly entered.')\n return \"connection_error\"\n except AssertionError:\n print('There are no results for that search!')\n return \"no_results_error\"\n\n def get_wanted_pages(self, response):\n \"\"\"response comes from test_connection to access total pages without making another API call\"\"\"\n self.total_pages = int(response.reply.paginationOutput.totalPages)\n self.total_entries = int(response.reply.paginationOutput.totalEntries)\n if self.wanted_pages:\n # can't pull more than max pages\n pages2pull = min([self.total_pages, self.wanted_pages])\n else:\n pages2pull = self.total_pages\n return pages2pull\n\n def get_ebay_item_info(self):\n all_items = []\n\n response = self.test_connection()\n\n if response in [\"connection_error\", \"no_results_error\"]:\n return response\n\n # Add initial items from test\n data = response.dict()['searchResult']['item']\n all_items.extend(self.unembed_ebay_item_data(data))\n\n pages2pull = self.get_wanted_pages(response)\n\n if pages2pull < 2: # stop if only pulling one page or only one page exists\n return pd.DataFrame(all_items)\n\n total_errors = 0\n\n for page in range(2, pages2pull + 1):\n response = self.api.execute(self.search_type, {'keywords': self.full_query,\n 'paginationInput': {'pageNumber': page,\n 'entriesPerPage': 100},\n 'itemFilter': self.item_filter,\n 'sortOrder': self.sort_order\n })\n if response.reply.ack == 'Success':\n data = response.dict()['searchResult']['item']\n all_items.extend(self.unembed_ebay_item_data(data))\n\n else:\n print('Unable to connect to page #: ', page)\n total_errors += 1\n if total_errors == 2:\n print('API limit reached or pull finished. Pulled {} pages'.format(page - 2))\n return pd.DataFrame(all_items)\n\n return pd.DataFrame(all_items)\n","sub_path":"ebaydata/ebaydata.py","file_name":"ebaydata.py","file_ext":"py","file_size_in_byte":6886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"411932170","text":"\"\"\"\n Random number generation using linear congruent method\n \n Lev Kaplan 2019\n\"\"\"\n\n# rand1.py: experimenting with random numbers \n\nfrom pylab import *\n\ndef drand48():\n global rnd\n rnd = (0o273673163155 * rnd + 11) % 2**48 # 0o means octal notation\n return rnd/2**48 # return number between 0 and 1\n \nrnd = 1 # set seed to 1\n\nN = 10000000\n\nxlist = []\nylist = []\n\nx1 = drand48()\nfor i in range(0,N): # collect N pairs of adjacent random numbers\n x2 = x1\n x1 = drand48()\n if x1<=0.01 and x2<0.01:\n xlist.append(x1)\n ylist.append(x2)\n \nscatter(xlist,ylist,s=1) # scatter plot with points of size 1\nshow()\n ","sub_path":"04/rand1.py","file_name":"rand1.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"335194672","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport gin\nfrom dl.modules import *\nfrom collections import namedtuple\nimport numpy as np\n\n\n\"\"\"\nThis file defines torch modules for ValueFunction, QFunction, and Policy.\nThe constructors for these modules generally take as input:\n obs_shape (tuple):\n the shape of the observation space of the environment\n action_space (gym.Space):\n the action space of the enironment. QFunction and Policy only.\n base (nn.Module):\n the base module whose output is the features used by the\n QFunction or Policy. These modules place minor assumptions\n on the base:\n\n - base.__init__ takes one argument:\n Args:\n in_shape (int):\n the shape of the observation space\n\n For a non-recurrent base:\n - base.forward has the following interface:\n Args:\n X (torch.Tensor):\n The input to base. Usually the current observation.\n Returns:\n out (torch.Tensor):\n A 2-d Tensor containing the output of the module\n (i.e the output of the penultimate hidden layer\n of a DQN)\n\n For a recurrent base:\n - base has a method named recurrent_state_size, which returns\n a list of shapes for each temporal state of the model.\n - base.forward has the following interface:\n Args:\n X (torch.Tensor):\n The input to base. Usually the current observation.\n mask (torch.Tensor):\n The \"done mask\" to reset the hidden state when an\n episode ends.\n state_in (list):\n The hidden state of the base.\n Returns:\n out (torch.Tensor):\n A 2-d Tensor containing the output of the module\n (Usually the output of the penultimate hidden layer)\n state_out (list):\n A list of the temporal state of the module.\n Algorithms will pass the returned state as input\n to the forward method.\n\n If the base is not specified, a standard MLP or CNN will\n be used.\n\"\"\"\n@gin.configurable(whitelist=['base'])\nclass ValueFunction(nn.Module):\n def __init__(self, obs_shape, base=None):\n \"\"\"\n Args:\n See above. base is assumed to be not recurrent.\n \"\"\"\n super().__init__()\n if base:\n self.base = base(obs_shape)\n else:\n self.base = get_default_base(obs_shape)\n with torch.no_grad():\n in_shape = self.base(torch.zeros(obs_shape)[None]).shape[-1]\n\n self.vf = nn.Linear(in_shape, 1)\n nn.init.orthogonal_(self.vf.weight.data, gain=1.0)\n nn.init.constant_(self.vf.bias.data, 0)\n\n self.outputs = namedtuple('Outputs', ['value'])\n\n def forward(self, x):\n \"\"\"\n Computes Q-value.\n Args:\n Same as self.base.forward (see above)\n Returns:\n out (namedtuple):\n out.action: The action corresponding to the argmax of the Q-values\n out.maxq: The max of the Q-values\n out.qvals: The Q-value for each action\n \"\"\"\n x = self.base(x)\n value = self.vf(x).squeeze(-1)\n return self.outputs(value=value)\n\n\n@gin.configurable(whitelist=['base'])\nclass QFunction(nn.Module):\n def __init__(self, obs_shape, action_space, base=None):\n \"\"\"\n Args:\n See above. base is assumed to be not recurrent.\n\n When using a continuous action space, standard qfunction parameterizations\n take two inputs, state and action. In this case, the interface of\n 'base.forward' is assumed to take both arguements.\n \"\"\"\n super().__init__()\n self.action_space = action_space\n self.discrete = action_space.__class__.__name__ == 'Discrete'\n base_args = [obs_shape] if self.discrete else [obs_shape, action_space.shape]\n if base:\n self.base = base(*base_args)\n else:\n self.base = get_default_base(*base_args)\n if self.discrete:\n with torch.no_grad():\n in_shape = self.base(torch.zeros(obs_shape)[None]).shape[-1]\n self.qvals = nn.Linear(in_shape, self.action_space.n)\n else:\n with torch.no_grad():\n ac = torch.from_numpy(np.array([self.action_space.sample()]))\n in_shape = self.base(torch.zeros(obs_shape)[None], ac).shape[-1]\n self.qvals = nn.Linear(in_shape, 1)\n nn.init.orthogonal_(self.qvals.weight.data, gain=1.0)\n nn.init.constant_(self.qvals.bias.data, 0)\n\n self.outputs = namedtuple('Outputs', ['action', 'value', 'max_a', 'max_q', 'qvals'])\n\n def forward(self, x, action=None):\n \"\"\"\n Computes Q-value.\n Args:\n Same as self.base.forward (see above)\n Returns:\n out (namedtuple):\n out.action: If an action is specified, out.action is the same, otherwise it is the argmax of the Q-values\n out.value: The q value of (x, out.action)\n out.max_a: The argmax of the Q-values (only available for discrete action spaces)\n out.max_q: The max of the Q-values (only available for discrete action spaces)\n out.qvals: The Q-value for each action (only available for discrete action spaces)\n \"\"\"\n if action is None:\n assert self.discrete, \"You must provide an action for a continuous action space\"\n x = self.base(x)\n qvals = self.qvals(x)\n maxq, ac = qvals.max(dim=-1)\n return self.outputs(action=ac, value=maxq, max_a=ac, max_q=maxq, qvals=qvals)\n elif self.discrete:\n x = self.base(x)\n qvals = self.qvals(x)\n maxq, maxa = qvals.max(dim=-1)\n if len(action.shape) == 1:\n inds = action.long().unsqueeze(1)\n else:\n inds = action.long()\n value = qvals.gather(1, inds).squeeze(1)\n return self.outputs(action=action, value=value, max_a=maxa, max_q=maxq, qvals=qvals)\n else:\n x = self.base(x, action)\n value = self.qvals(x).squeeze(1)\n return self.outputs(action=action, value=value, max_a=None, max_q=None, qvals=None)\n\n\n\n\n\n@gin.configurable(blacklist=['obs_shape', 'action_space'])\nclass Policy(nn.Module):\n def __init__(self, obs_shape, action_space, base=None, critic=True, critic_base=None, norm_observations=False, dist=None):\n \"\"\"\n Args:\n obs_shape (tuple): See above\n action_space (gym.Space): See above\n base (nn.Module): See above\n critic (bool, optional):\n If False, no critic will be used.\n critic_base (nn.Module, optional):\n The base network for the value function.\n If not specified, critic_base will be the same as base.\n If specified, critic_base is assumed to be not recurrent.\n running_ob_norm (bool):\n If True, normalize observations passed to forward.\n dist (nn.Module):\n If specified, overrides the default distribution.\n \"\"\"\n super().__init__()\n if base:\n self.base = base(obs_shape)\n else:\n self.base = get_default_base(obs_shape)\n if critic and critic_base:\n self.critic_base = critic_base(obs_shape)\n else:\n self.critic_base = None\n self.critic = critic\n self.action_space = action_space\n with torch.no_grad():\n in_shape = self.base(torch.zeros(obs_shape)[None]).shape[-1]\n\n # init distribution\n if self.action_space.__class__.__name__ == 'Discrete':\n args = [in_shape, self.action_space.n]\n defualt_dist = Categorical\n elif self.action_space.__class__.__name__ == 'Box':\n args = [in_shape, np.prod(self.action_space.shape).item()]\n defualt_dist = DiagGaussian\n else:\n assert False, f\"Uknown action space {self.action_space.__class__.__name__}\"\n self.dist = dist(*args) if dist else defualt_dist(*args)\n\n # init value function haed\n if critic:\n if critic_base:\n with torch.no_grad():\n in_shape = self.critic_base(torch.zeros(obs_shape)[None]).shape[-1]\n self.vf = nn.Linear(in_shape, 1)\n\n\n if norm_observations:\n self.running_norm = RunningObNorm(obs_shape)\n else:\n self.running_norm = None\n\n self.outputs = namedtuple('Outputs', ['action', 'value', 'logp', 'logstd', 'dist', 'state_out'])\n\n def _run_bases(self, x, mask, state_in):\n if state_in is None:\n state_out = None\n out = self.base(x)\n else:\n out, state_out = self.base(x, mask=mask, state_in=state_in)\n if self.critic and self.critic_base:\n vf_out = self.critic_base(x)\n elif self.critic:\n vf_out = out\n else:\n vf_out = None\n return out, vf_out, state_out\n\n def forward(self, X, mask=None, state_in=None, deterministic=False, reparameterization_trick=False):\n \"\"\"\n Computes the action of the policy and the value of the input.\n Args:\n deterministic (bool): True => return mode of action dist,\n False => sample from action dist.\n Other args are the same as self.base.forward (see above)\n Returns:\n out (namedtuple):\n out.action: The sampled action, or the mode if deterministic=True\n out.value: The value of the current observation\n out.logp: The log probability of out.action\n out.logstd: The log std deviation of out.dist\n out.dist: The action distribution\n out.state_out: The temporal state of base (See above for details)\n \"\"\"\n if self.running_norm:\n X = self.running_norm(X)\n out, vf_out, state_out = self._run_bases(X, mask, state_in)\n\n if isinstance(self.dist, DiagGaussian):\n dist, logstd = self.dist(out, return_logstd=True)\n else:\n dist = self.dist(out)\n logstd = None\n if deterministic:\n action = dist.mode()\n elif reparameterization_trick:\n try:\n action = dist.rsample()\n except:\n assert False, f\"{dist.__class__.__name__} distribution does not have a reparameterization trick.\"\n else:\n action = dist.sample()\n\n if self.critic:\n value = self.vf(vf_out).squeeze(-1)\n else:\n value = None\n\n return self.outputs(value=value, action=action, logp=dist.log_prob(action), logstd=logstd, dist=dist, state_out=state_out)\n\n def recurrent_state_size(self):\n if not hasattr(self.base, 'recurrent_state_size'):\n return None\n else:\n return self.base.recurrent_state_size()\n\n\nfrom dl.util import conv_out_shape\n\n@gin.configurable\nclass NatureDQN(nn.Module):\n \"\"\"\n Deep network from https://www.nature.com/articles/nature14236\n \"\"\"\n def __init__(self, img_shape):\n super().__init__()\n self.conv1 = nn.Conv2d(img_shape[0], 32, 8, 4)\n self.conv2 = nn.Conv2d(32, 64, 4, 2)\n self.conv3 = nn.Conv2d(64, 64, 3, 1)\n\n shape = img_shape[1:]\n for c in [self.conv1, self.conv2, self.conv3]:\n shape = conv_out_shape(shape, c)\n self.nunits = 64 * np.prod(shape)\n self.fc = nn.Linear(self.nunits, 512)\n\n def forward(self, x):\n x = x.float() / 255.\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n return F.relu(self.fc(x.view(-1, self.nunits)))\n\n\n\n@gin.configurable\nclass FeedForwardBase(nn.Module):\n def __init__(self, ob_shape, *args, **kwargs):\n super().__init__()\n self.net = FeedForwardNet(ob_shape[0], *args, **kwargs)\n\n def forward(self, x):\n return self.net(x.float())\n\n\n@gin.configurable\nclass AppendActionFeedForwardBase(nn.Module):\n def __init__(self, ob_shape, ac_shape, *args, **kwargs):\n super().__init__()\n self.net = FeedForwardNet(ob_shape[0] + ac_shape[0], *args, **kwargs)\n\n def forward(self, x, a):\n return self.net(torch.cat([x.float(), a.float()], -1))\n\n\ndef get_default_base(obs_shape, ac_shape=None):\n if ac_shape:\n assert len(obs_shape) == 1, \"Default base for continuous action spaces requires one dimensional observations.\"\n return AppendActionFeedForwardBase(obs_shape, ac_shape, units=[64,64], activation_fn=torch.tanh, activate_last=True)\n if len(obs_shape) == 1:\n return FeedForwardBase(obs_shape, units=[64,64], activation_fn=torch.tanh, activate_last=True)\n if len(obs_shape) == 3:\n return NatureDQN(obs_shape)\n assert False, f\"No default network for inputs of {len(obs_shape)} dimensions\"\n\n\n\nimport unittest\nfrom dl.util import atari_env\nimport gym\n\nclass TestRLModules(unittest.TestCase):\n def testValueFunction(self):\n env = atari_env('Pong')\n net = ValueFunction(env.observation_space.shape)\n ob = env.reset()\n for _ in range(10):\n outs = net(torch.from_numpy(ob[None]))\n assert outs.value.shape == (1,)\n ob, r, done, _ = env.step(env.action_space.sample())\n\n def testQFunction(self):\n env = atari_env('Pong')\n net = QFunction(env.observation_space.shape, env.action_space)\n ob = env.reset()\n for _ in range(10):\n outs = net(torch.from_numpy(ob[None]))\n assert outs.action.shape == (1,)\n assert outs.max_q.shape == (1,)\n assert outs.qvals.shape == (1,env.action_space.n)\n ob, r, done, _ = env.step(outs.action[0])\n\n out1 = net(torch.from_numpy(ob[None]))\n outs = net(torch.from_numpy(ob[None]), (out1.action + 1) % env.action_space.n)\n assert outs.action.shape == (1,)\n assert outs.max_q.shape == (1,)\n assert outs.qvals.shape == (1,env.action_space.n)\n assert outs.action != out1.action\n assert outs.value != out1.value\n\n class SABase(nn.Module):\n def __init__(self, ob_shape, ac_shape):\n super().__init__()\n\n def forward(self, x, a):\n return x\n\n env = gym.make('MountainCarContinuous-v0')\n net = QFunction(env.observation_space.shape, env.action_space, base=SABase)\n ob = env.reset()\n for _ in range(10):\n ac = torch.from_numpy(np.array([env.action_space.sample()])).float()\n outs = net(torch.from_numpy(ob[None]).float(), ac)\n assert outs.action.shape == (1,*env.action_space.shape)\n assert outs.value.shape == (1,)\n assert outs.max_q == None\n assert outs.qvals == None\n ob, r, done, _ = env.step(outs.action[0])\n\n\n def testPolicy(self):\n env = atari_env('Pong')\n net = Policy(env.observation_space.shape, env.action_space, norm_observations=True)\n ob = env.reset()\n for _ in range(10):\n outs = net(torch.from_numpy(ob[None]))\n assert outs.action.shape == (1,1)\n assert outs.value.shape == (1,)\n assert outs.state_out is None\n ob, r, done, _ = env.step(outs.action[0])\n state = net.state_dict()\n assert 'running_norm.mean' in state\n assert 'running_norm.var' in state\n assert 'running_norm.count' in state\n\n env = gym.make('MountainCarContinuous-v0')\n net = Policy(env.observation_space.shape, env.action_space)\n ob = env.reset()\n for _ in range(10):\n outs = net(torch.from_numpy(ob[None]).float())\n assert outs.action.shape == (1,*env.action_space.shape)\n assert outs.value.shape == (1,)\n assert outs.state_out is None\n ob, r, done, _ = env.step(outs.action[0])\n\n net = Policy(env.observation_space.shape, env.action_space, dist=TanhDiagGaussian)\n ob = env.reset()\n for _ in range(10):\n outs = net(torch.from_numpy(ob[None]).float())\n assert outs.action.shape == (1,*env.action_space.shape)\n assert outs.value.shape == (1,)\n assert outs.state_out is None\n assert outs.dist.__class__.__name__ == 'TanhNormal'\n assert torch.abs(outs.action) < 1\n ob, r, done, _ = env.step(outs.action[0])\n\n\n\nif __name__=='__main__':\n unittest.main()\n","sub_path":"dl/modules/rl_modules.py","file_name":"rl_modules.py","file_ext":"py","file_size_in_byte":17243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"638778063","text":"class PlayerItem(object):\n def __init__(self, name, num):\n self.name = name\n self.quantity = num\n\nclass Character(object):\n def __init__(self, name, HP, MP, attack, defense, acc, eva):\n self.name = name\n self.HP = HP\n self.MaxHP = HP\n self.MP = MP\n self.attack = attack\n self.defense = defense\n self.acc = acc\n self.eva = eva\n self.CT = 0\n self.hasMoved = False\n self.hasActed = False\n self.hasUsedItem = False\n self.hasItems = False\n self.abilitiesUsed = 0\n self.id = None\n self.inventory = []\n\n def getName(self):\n return self.name\n\n def addItem(self, name, num):\n item = PlayerItem(name, num)\n self.inventory.append(item)\n\n def checkItems(self):\n self.hasItems = False\n for item in self.inventory:\n if item.quantity > 0:\n self.hasItems = True\n","sub_path":"Character.py","file_name":"Character.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"110776302","text":"import pygame\nimport os\nfrom video_data import *\nfrom button import *\n\npygame.init()\npygame.display.set_caption(GAME_CAPTION)\n\nwindow = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\nDIR = os.path.dirname(os.path.realpath(__file__))\nIMAGES_DIR = os.path.join(DIR, \"images\")\nmove_button_image = pygame.image.load(os.path.join(IMAGES_DIR,\"move_button.png\"))\n\ndef main():\n run = True\n\n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n clock.tick(FPS)\n update_display(\"battle\")\n pygame.quit()\n\ndef update_display(display_type):\n if display_type == \"battle\":\n update_display_battle()\n\ndef update_display_battle():\n window.fill(WHITE)\n\n m_1 = Button(move_button_image, (0,0), None)\n\n move_1 = pygame.Rect(0, HEIGHT - 2 * BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT)\n move_2 = pygame.Rect(BUTTON_WIDTH, HEIGHT - 2 * BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT)\n move_3 = pygame.Rect(0, HEIGHT - 1 * BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT)\n move_4 = pygame.Rect(BUTTON_WIDTH, HEIGHT - 1 * BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT)\n\n button_1 = pygame.Rect(WIDTH - BUTTON_WIDTH, HEIGHT - 2 * BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT)\n button_2 = pygame.Rect(WIDTH - BUTTON_WIDTH, HEIGHT - 1 * BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT)\n\n window.blit(m_1.image, m_1.rect)\n\n pygame.draw.rect(window, RED, move_1)\n pygame.draw.rect(window, GREEN, move_2)\n pygame.draw.rect(window, BLUE, move_3)\n pygame.draw.rect(window, RED, move_4)\n pygame.draw.rect(window, CYAN, button_1)\n pygame.draw.rect(window, MAGENTA, button_2)\n pygame.display.update()\n\nif __name__ == \"__main__\":\n main()","sub_path":"Unofficial Releases/Pygame_Monster_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"450557751","text":"import subprocess\nfrom pathlib import Path\nimport json\nimport re\n\np = Path('.')\nlibrary = list(p.glob('**/*.cpp'))\ntest = list(p.glob('**/*.test.cpp'))\n\nd = {}\nd['library'] = []\nfor l in library:\n if l not in test:\n if 'library-checker-problems' not in str(l):\n d['library'].append('/' + str(l))\n\nd['test'] = ['/' + str(t) for t in test]\n\nfor t in test:\n ignore = False\n with open(str(t)) as f:\n s = f.read()\n if '#define IGNORE' in s:\n ignore = True\n\n if ignore:\n continue\n\n proc1 = subprocess.Popen(\n [\"g++\", \"-I\", \".\", \"-MD\", \"-MF\", \"/dev/stdout\", \"-MM\",\n str(t)],\n stdout=subprocess.PIPE)\n out1, _ = proc1.communicate()\n proc2 = subprocess.Popen(\n [\"sed\", \"1s/[^:].*: // ; s/\\\\\\\\$//\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n out2, _ = proc2.communicate(input=out1)\n l = out2.decode('utf-8').split(' ')\n l.remove(str(t))\n u = [s.replace('\\n', '') for s in l if s != '\\n']\n v = [re.sub(r'.*\\.\\./', '/', s) for s in u]\n d['/' + str(t)] = v\n\n for s in v:\n if not s in d:\n d[s] = []\n d[s].append('/' + str(t))\n\nfor val in d.values():\n val.sort()\n\nprint(json.dumps(d, indent=2))\n","sub_path":"test/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"218422688","text":"import sys\n\nsys.stdin = open('in2.txt', 'rt')\n\nn, k = map(int, input().split())\nprint(n, k)\n\nsamples = list(map(int, input().split()))\nprint(samples)\n\nres = []\n\nfor i in range(n):\n for j in range(i + 1, n):\n for z in range(j + 1, n):\n res.append(samples[i] + samples[j] + samples[z])\n\nres = set(res)\nres = list(res)\nres.sort()\nprint(res)\nprint(res[len(res) - k])\n","sub_path":"Inflearn_Py/섹션 2/3. k번째 큰 수/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"79494302","text":"#-------------------------------------------------------------------------------\n#\n# Data filters - scalar and vector component range filters\n#\n# Authors: Martin Paces \n#-------------------------------------------------------------------------------\n# Copyright (C) 2016 EOX IT Services GmbH\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies of this Software or works derived from this Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#-------------------------------------------------------------------------------\n# pylint: disable=too-many-arguments\n\nfrom logging import getLogger, LoggerAdapter\nfrom vires.util import between\nfrom .base import Filter\nfrom .exceptions import FilterError\n\n\nclass BaseRangeFilter(Filter):\n \"\"\" Base scalar value range filter. \"\"\"\n # pylint: disable=abstract-method\n\n class _LoggerAdapter(LoggerAdapter):\n def process(self, msg, kwargs):\n return 'filter %s: %s' % (self.extra[\"variable\"], msg), kwargs\n\n def __init__(self, variable, vmin, vmax, logger):\n self.variable = variable\n self.vmin = vmin\n self.vmax = vmax\n self.logger = logger\n\n @property\n def label(self):\n \"\"\" Get filter label. \"\"\"\n return self.variable\n\n @property\n def required_variables(self):\n return (self.variable,)\n\n def _filter(self, data):\n \"\"\" Low level filter. \"\"\"\n self.logger.debug(\"value range: %s %s\", self.vmin, self.vmax)\n self.logger.debug(\"initial size: %d\", data.shape[0])\n return between(data, self.vmin, self.vmax)\n\n def __str__(self):\n return \"%s:%.17g,%.17g\" % (self.label, self.vmin, self.vmax)\n\n\nclass ScalarRangeFilter(BaseRangeFilter):\n \"\"\" Simple scalar value range filter. \"\"\"\n\n def __init__(self, variable, vmin, vmax, logger=None):\n BaseRangeFilter.__init__(\n self, variable, vmin, vmax, self._LoggerAdapter(\n logger or getLogger(__name__), {\"variable\": variable}\n )\n )\n\n def filter(self, dataset, index=None):\n data = dataset[self.variable]\n if data.ndim != 1:\n raise FilterError(\n \"An attempt to apply a scalar range filter to a non-scalar \"\n \"variable %s!\" % self.variable\n )\n if index is None:\n index = self._filter(data).nonzero()[0]\n else:\n index = index[self._filter(data[index])]\n self.logger.debug(\"filtered size: %d\", index.size)\n return index\n\n\nclass VectorComponentRangeFilter(BaseRangeFilter):\n \"\"\" Single vector component range filter. \"\"\"\n\n def __init__(self, variable, component, vmin, vmax, logger=None):\n BaseRangeFilter.__init__(\n self, variable, vmin, vmax, self._LoggerAdapter(\n logger or getLogger(__name__), {\n \"variable\": \"%s[%s]\" % (variable, component)\n }\n )\n )\n self.component = component\n\n @property\n def label(self):\n return \"%s[%d]\" % (self.variable, self.component)\n\n def filter(self, dataset, index=None):\n data = dataset[self.variable]\n if data.ndim != 2:\n raise FilterError(\n \"An attempt to apply a vector component range filter to a \"\n \"non-vector variable %s!\" % self.variable\n )\n if index is None:\n index = self._filter(data[:, self.component]).nonzero()[0]\n else:\n index = index[self._filter(data[index, self.component])]\n self.logger.debug(\"filtered size: %d\", index.size)\n return index\n","sub_path":"vires/vires/processes/util/filters/range.py","file_name":"range.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"49793646","text":"def Reconstruct_Fields_With_Elastic_Part(temps,Ux_tot, Uy_tot, KI_tild, KII_tild, Uref , listN_F_len ):\n\tUx_EL_ref_I = Uref.EL.I.x\n\tUy_EL_ref_I = Uref.EL.I.y\n\tUx_EL_ref_II = Uref.EL.II.x\n\tUy_EL_ref_II = Uref.EL.II.y\n\tUx_Res = []\n\tUy_Res = []\n\tCeR_tmp = []\n\tSum_EL = 0\n\tSum = 0\n\tNorme_tot = 0\n\tfor t in range(len(temps)-1):\n\t\tUx_R = []\n\t\tUy_R = []\n\t\tSum_tmp = 0\n\t\tSum_EL_tmp = 0\n\t\tNorme_tot_tmp = 0\n\t\tfor j in range(listN_F_len):\n\t\t\tUx_decomp_EL = KI_tild[t] * Ux_EL_ref_I[j] + KII_tild[t] * Ux_EL_ref_II[j] \n\t\t\tUy_decomp_EL = KI_tild[t] * Uy_EL_ref_I[j] + KII_tild[t] * Uy_EL_ref_II[j]\t\n\t\t\tUx_R += [Ux_tot[j][t] - Ux_decomp_EL]\n\t\t\tUy_R += [Uy_tot[j][t] - Uy_decomp_EL]\n\t\t\tSum_EL_tmp += pow(Ux_tot[j][t]- Ux_decomp_EL , 2) + pow(Uy_tot[j][t]-Uy_decomp_EL , 2)\n\t\t\tNorme_tot_tmp += pow(Ux_tot[j][t] , 2) + pow(Uy_tot[j][t] , 2)\n\t\tNorme_tot += Norme_tot_tmp\n\t\tSum_EL += Sum_EL_tmp\n\t\tCeR_tmp += [sqrt(Sum_EL_tmp / Norme_tot_tmp)]\n\t\tUx_Res.append(Ux_R)\n\t\tUy_Res.append(Uy_R)\n\tUx_Res = transpose(Ux_Res)\n\tUy_Res = transpose(Uy_Res)\n\tCeR = sqrt(Sum_EL / Norme_tot)\n\treturn CeR, CeR_tmp, Ux_Res, Uy_Res\n","sub_path":"src_Post_Proc/Reconstruct_Fields_With_Elastic_Part.py","file_name":"Reconstruct_Fields_With_Elastic_Part.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"645081667","text":"from datetime import datetime\n\nfrom pytezos.interop import Interop\nfrom pytezos.repl.control import instruction\nfrom pytezos.repl.context import Context\nfrom pytezos.repl.types import assert_stack_type, Mutez, ChainID, Address, Contract, Option, assert_equal_types, \\\n KeyHash, Timestamp, expr_equal, Operation\nfrom pytezos.repl.parser import get_entry_expr\n\nINITIAL_BALANCE = 257000000\nMAINNET_CHAIN_ID = 'NetXdQprcVkpaWU'\nUNIT_TYPE_EXPR = {'prim': 'unit'}\n\n\n@instruction('parameter', args_len=1)\ndef do_parameter(ctx: Context, prim, args, annots):\n ctx.set('parameter', args[0])\n\n\n@instruction('storage', args_len=1)\ndef do_parameter(ctx: Context, prim, args, annots):\n ctx.set('storage', args[0])\n\n\n@instruction('code', args_len=1)\ndef do_parameter(ctx: Context, prim, args, annots):\n ctx.set('code', args[0])\n\n\n@instruction('AMOUNT')\ndef do_amount(ctx: Context, prim, args, annots):\n res = ctx.get('AMOUNT', Mutez(0))\n ctx.push(res, annots=['@amount'])\n\n\ndef get_balance(ctx: Context):\n res = ctx.get('BALANCE')\n if res is None:\n res = Mutez(INITIAL_BALANCE)\n ctx.set('BALANCE', res)\n return res\n\n\n@instruction('BALANCE')\ndef do_balance(ctx: Context, prim, args, annots):\n res = get_balance(ctx)\n ctx.push(res, annots=['@balance'])\n\n\n@instruction('CHAIN_ID')\ndef do_chain_id(ctx: Context, prim, args, annots):\n res = ctx.get('CHAIN_ID', ChainID(MAINNET_CHAIN_ID))\n ctx.push(res, annots=[f'@{ctx.get(\"NETWORK\", \"mainnet\")}'])\n\n\n@instruction('SELF')\ndef do_self(ctx: Context, prim, args, annots):\n p_type_expr = ctx.get('parameter')\n assert p_type_expr, f'parameter type is not initialized'\n\n entry_annot = next((a for a in annots if a[0] == '%'), '%default')\n ctx.print(f'use {entry_annot}')\n\n p_type_expr, _ = get_entry_expr(p_type_expr, entry_annot)\n res = Contract.new(ctx.dummy_gen.self + entry_annot, type_expr=p_type_expr)\n ctx.push(res, annots=['@self'])\n\n\n@instruction('SENDER')\ndef do_sender(ctx: Context, prim, args, annots):\n res = ctx.get('SENDER')\n assert res is not None, f'SENDER is not initialized'\n ctx.push(res, annots=['@sender'])\n\n\n@instruction('SOURCE')\ndef do_source(ctx: Context, prim, args, annots):\n res = ctx.get('SOURCE')\n assert res is not None, f'SOURCE is not initialized'\n ctx.push(res, annots=['@source'])\n\n\n@instruction('NOW')\ndef do_now(ctx: Context, prim, args, annots):\n res = ctx.get('NOW')\n if res is None:\n network = ctx.get('NETWORK')\n if network:\n interop = Interop().using(network)\n constants = interop.shell.block.context.constants() # cached\n ts = interop.shell.head.header()['timestamp']\n dt = datetime.strptime(ts, '%Y-%m-%dT%H:%M:%SZ')\n first_delay = constants['time_between_blocks'][0]\n return int((dt - datetime(1970, 1, 1)).total_seconds()) + int(first_delay)\n else:\n now = int(datetime.utcnow().timestamp())\n res = Timestamp(now)\n ctx.push(res, annots=['@now'])\n\n\ndef check_contract(ctx: Context, address, entry_annot, type_expr):\n network = ctx.get('NETWORK')\n if not network:\n ctx.print('skip check')\n return True\n try:\n script = Interop().using(network).shell.contracts[address].script()\n p_type_expr = next(s for s in script['code'] if s['prim'] == 'parameter')\n actual, _ = get_entry_expr(p_type_expr, entry_annot)\n if expr_equal(type_expr, actual):\n return True\n else:\n ctx.print('entry type mismatch')\n except Exception:\n ctx.print('not found')\n\n return False\n\n\n@instruction('ADDRESS')\ndef do_address(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, Contract)\n res = Address.new(top.get_address())\n if top.name:\n annots.append(f'@{top.name}.address')\n ctx.push(res, annots=annots)\n\n\n@instruction('CONTRACT', args_len=1)\ndef do_contract(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, Address)\n\n entry_annot = next((a for a in annots if a[0] == '%'), '%default')\n contract = Contract.new(str(top) + entry_annot, type_expr=args[0])\n\n if check_contract(ctx, address=str(top), entry_annot=entry_annot, type_expr=args[0]):\n res = Option.some(contract)\n else:\n res = Option.none(contract.type_expr)\n\n if top.name:\n annots.append(f'@{top.name}.contract')\n ctx.push(res, annots=[a for a in annots if a[0] != '%'])\n\n\n@instruction('IMPLICIT_ACCOUNT')\ndef do_implicit_account(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, KeyHash)\n res = Contract.new(str(top), type_expr=UNIT_TYPE_EXPR)\n ctx.push(res, annots=annots)\n\n\ndef decrease_balance(ctx: Context, amount: Mutez):\n balance = get_balance(ctx)\n if int(balance) > 0:\n assert int(amount) <= int(balance), f'needed {int(amount)} utz, got only {int(balance)} utz'\n ctx.set('BALANCE', Mutez(int(balance) - int(amount)))\n\n\n@instruction('CREATE_CONTRACT', args_len=1)\ndef do_create_contract(ctx: Context, prim, args, annots):\n assert len(args[0]) == 3, 'expected { parameter ; storage ; code }'\n parameter_type, storage_type, code = args[0]\n delegate, amount, storage = ctx.pop3()\n\n assert_stack_type(amount, Mutez)\n decrease_balance(ctx, amount)\n\n assert_stack_type(delegate, Option)\n assert_equal_types(storage_type, storage.type_expr)\n\n originated_address = Address.new(ctx.dummy_gen.get_fresh_address())\n content = {\n 'kind': 'origination',\n 'source': ctx.dummy_gen.self,\n 'balance': str(int(amount)),\n 'script': {\n 'storage': storage.val_expr,\n 'code': code\n },\n 'originated_contract': str(originated_address)\n }\n\n if not delegate.is_none():\n content['delegate'] = str(delegate.get_some())\n\n orig = Operation.new(content)\n ctx.push(originated_address)\n ctx.push(orig)\n\n\n@instruction('SET_DELEGATE')\ndef do_set_delegate(ctx: Context, prim, args, annots):\n delegate = ctx.pop1()\n assert_stack_type(delegate, Option)\n\n content = {\n 'kind': 'delegation',\n 'source': ctx.dummy_gen.self,\n 'delegate': None if delegate.is_none() else str(delegate.get_some())\n }\n res = Operation.new(content)\n ctx.push(res)\n\n\n@instruction('TRANSFER_TOKENS')\ndef do_transfer_tokens(ctx: Context, prim, args, annots):\n param, amount, dest = ctx.pop3()\n\n assert_stack_type(amount, Mutez)\n decrease_balance(ctx, amount)\n\n assert_stack_type(dest, Contract)\n dest.assert_param_type(param)\n\n content = {\n 'kind': 'transaction',\n 'source': ctx.dummy_gen.self,\n 'amount': str(int(amount)),\n 'destination': dest.get_address(),\n 'parameters': {\n 'entrypoint': dest.get_entrypoint(),\n 'value': param.val_expr\n }\n }\n res = Operation.new(content)\n ctx.push(res)\n","sub_path":"pytezos/repl/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":6956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"305658270","text":"# Import data types\nfrom pyspark.sql.types import *\nfrom pyspark.sql import Row,SparkSession\nimport os\nos.environ['SPARK_HOME'] = \"/home/csk/sparkscala/spark-2.4.0-bin-hadoop2.6/\"\nspark = SparkSession \\\n .builder \\\n .appName(\"Python Spark SQL basic example\") \\\n .config(\"spark.some.config.option\", \"some-value\") \\\n .getOrCreate()\n\nprint(spark)\n\n# Generic Load/Save Functions\ndf = spark.read.load(\"data/users.parquet\")\ndf.show()\n#df.select(\"name\", \"favorite_color\").write.save(\"namesAndFavColors.parquet\")\n\n# Manually Specifying Options\ndf = spark.read.load(\"data/customers.json\", format=\"json\")\ndf.show()\n#df.select(\"name\", \"age\").write.save(\"namesAndAges.parquet\", format=\"parquet\")\n\n\n# csv file\n\ndf = spark.read.load(\"data/people.csv\",format=\"csv\", sep=\",\", inferSchema=\"true\", header=\"true\")\ndf.show()\n\n# run sql directly on files\n\ndf = spark.sql(\"SELECT * FROM parquet.`data/users.parquet`\")\ndf.show()","sub_path":"scripts/spark/dataframe/spark_df_datasources.py","file_name":"spark_df_datasources.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"183162062","text":"#-*- coding: utf-8 -*-\n\nimport sys, requests\nfrom bs4 import BeautifulSoup\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nparPage = sys.argv[1]\nparTitle = sys.argv[2]\nparContent = sys.argv[3]\n\nrequest = requests.get(parPage)\nsoup = BeautifulSoup(request.text)\n\ntitle = soup.select(parTitle)[0].text\nprint (title + '
    ')\nfor content in soup.select(parContent) :\n\tprint (content.text)\n\n","sub_path":"jsp/webpage.py","file_name":"webpage.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"299720093","text":"'''\nCopyright\n\nJelen forráskód a Budapesti Műszaki és Gazdaságtudományi Egyetemen tartott\n\"Deep Learning a gyakorlatban Python és LUA alapon\" tantárgy segédanyagaként készült.\n\nA tantárgy honlapja: http://smartlab.tmit.bme.hu/oktatas-deep-learning\nDeep Learning kutatás: http://smartlab.tmit.bme.hu/deep-learning\n\nA forráskódot GPLv3 licensz védi. Újrafelhasználás esetén lehetőség szerint kérjük\naz alábbi szerzőt értesíteni.\n\n2016 (c) Csapó Tamás Gábor (csapot kukac tmit pont bme pont hu)\n\nOriginal Variational AutoEncoder example from:\nLinks:\n [MNIST Dataset] http://yann.lecun.com/exdb/mnist/\n [VAE] https://jmetzen.github.io/2015-11-27/vae.html\n'''\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n# %matplotlib inline\n\nfrom VAE import VariationalAutoencoder\n\nnp.random.seed(0)\ntf.set_random_seed(0)\n\n# MNIST adatok betöltése\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\nn_samples = mnist.train.num_examples\n\n\ndef train(network_architecture, learning_rate=0.001,\n batch_size=100, training_epochs=10, display_step=5):\n \n vae = VariationalAutoencoder(network_architecture, \n learning_rate=learning_rate, \n batch_size=batch_size)\n total_batch = int(n_samples / batch_size)\n \n # tanítási ciklus\n for epoch in range(training_epochs):\n avg_cost = 0. \n # batch-ek\n for i in range(total_batch):\n batch_xs, _ = mnist.train.next_batch(batch_size)\n\n # tanítási lépés\n cost = vae.partial_fit(batch_xs)\n \n # hiba számítás\n avg_cost += cost / n_samples * batch_size\n\n # adott epoch után log kiírása\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \\\n \"cost=\", \"{:.9f}\".format(avg_cost))\n return vae\n\n\n### VAE tanítás, 20D látens tér\n\nnetwork_architecture = \\\n dict(n_hidden_recog_1=500, # 1. réteg encoder neuronok\n n_hidden_recog_2=300, # 2. réteg encoder neuronok\n n_hidden_gener_1=300, # 1. réteg decoder neuronok\n n_hidden_gener_2=500, # 2. réteg decoder neuronok\n n_input=784, # MNIST bemenet (img shape: 28*28)\n n_z=20) # látens tér dimenziója\n\nvae = train(network_architecture, training_epochs=1)\n\n# visszaállított adatok megjelenítése\nx_sample = mnist.test.next_batch(100)[0]\nx_reconstruct = vae.reconstruct(x_sample)\nplt.figure(figsize=(8, 12))\nfor i in range(5):\n plt.subplot(5, 2, 2*i + 1)\n plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1)\n plt.title(\"Test input\")\n plt.colorbar()\n plt.subplot(5, 2, 2*i + 2)\n plt.imshow(x_reconstruct[i].reshape(28, 28), vmin=0, vmax=1)\n plt.title(\"Reconstruction\")\n plt.colorbar()\nplt.tight_layout()\nplt.show()\n\n\n### VAE tanítás, 2D látens sík\n# Gauess eloszlás összenyomása a látens térbe, az átlag közelébe\n\nnetwork_architecture = \\\n dict(n_hidden_recog_1=500, # 1. réteg encoder neuronok\n n_hidden_recog_2=300, # 2. réteg encoder neuronok\n n_hidden_gener_1=300, # 1. réteg decoder neuronok\n n_hidden_gener_2=500, # 2. réteg decoder neuronok\n n_input=784, # MNIST bemenet (img shape: 28*28)\n n_z=2) # látens tér dimenziója\n\nvae_2d = train(network_architecture, training_epochs=5)\n\n# 2D látens sík megjelenítése\n\nx_sample, y_sample = mnist.test.next_batch(5000)\n# VAE.transform: adat transzformálása a látens térbe\nz_mu = vae_2d.transform(x_sample)\nplt.figure(figsize=(8, 6)) \nplt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))\nplt.colorbar()\nplt.show()\n\n# generáló hálózattal visszaállítunk 20x20 db képet, \n# és a látens tér megfelelő helyén ábrázoljuk azokat\nnx = ny = 20\nx_values = np.linspace(-3, 3, nx)\ny_values = np.linspace(-3, 3, ny)\n\n# képek rárajzolása a 2D látens síkra\ncanvas = np.empty((28*ny, 28*nx))\nfor i, yi in enumerate(x_values):\n for j, xi in enumerate(y_values):\n # bug a VAE forráskódjában, ezért fura méretű z_mu kell\n z_mu = np.zeros((100, 2))\n z_mu[0,0] = xi\n z_mu[0,1] = yi\n # VAE.generate: adat generálása a látens térből történő mintavételezéssel\n x_mean = vae_2d.generate(z_mu)\n canvas[(nx-i-1)*28:(nx-i)*28, j*28:(j+1)*28] = x_mean[0].reshape(28, 28)\n\n# ábra kirajzolása\nplt.figure(figsize=(8, 10)) \nXi, Yi = np.meshgrid(x_values, y_values)\nplt.imshow(canvas, origin=\"upper\")\nplt.tight_layout()\nplt.show()\n","sub_path":"2016/12/12-06-VariationalAutoencoder-solution.py","file_name":"12-06-VariationalAutoencoder-solution.py","file_ext":"py","file_size_in_byte":4693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"73535919","text":"import numpy as np \nfrom numpy.ctypeslib import ndpointer \nimport ctypes \n\n_intpp = ndpointer(dtype=np.int32, ndim=2, flags='C')\n\n_dll = ctypes.CDLL('max_flow/bfs.so')\n\n_mincut = _dll.mincut\n_mincut.argtypes = [ctypes.c_int, ctypes.c_int, _intpp,ctypes.c_int,ctypes.c_int,ctypes.c_int ]\n_mincut.restype = ctypes.POINTER(ctypes.c_bool)\n\ndef mincut(x,src,sink, V):\n m = ctypes.c_int(x.shape[0])\n n = ctypes.c_int(x.shape[1])\n res = _mincut(m, n, x, src,sink,V)\n # print [res[i] for i in range(6)]\n return res\n\n# if __name__ == '__main__':\n# x = np.array([\n# [0, 16,13,0 ,0, 0 ],\n# [0, 0, 10,12,0, 0 ],\n# [0, 4, 0, 0, 14,0 ],\n# [0, 0, 9, 0, 0, 20],\n# [0, 0, 0, 7, 0, 4 ],\n# [0, 0, 0, 0, 0, 0 ]\n# ],np.int32)\n#\n# res = mincut(x)\n# #print type(res)","sub_path":"max_flow/mincut_clib.py","file_name":"mincut_clib.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"370320982","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .forms import RawEnterpriseForm, EnterpriseModelForm\nfrom .models import Enterprise\n\n# Create your views here.\n\ndef dcf_index_view(request):\n return render(request, 'dcf_calculator/dcf_index.html', context={})\n\n\n# using POST.get instead of GET\n# sly= second last year; ly=last year\ndef dcf_results_view(request):\n\n company_name = request.POST.get('company_name')\n stock_ticker = request.POST.get('stock_ticker')\n sector = request.POST.get('sector')\n currency = request.POST.get('currency')\n\n total_cash_flows_operating_activities_sly = int(request.POST.get('tcfoa_sly'))\n total_cash_flows_operating_activities_ly = int(request.POST.get('tcfoa_ly'))\n capex_sly = int(request.POST.get('ce_sly'))\n capex_ly = int(request.POST.get('ce_ly'))\n fcf_sly = total_cash_flows_operating_activities_sly - capex_sly\n fcf_ly = total_cash_flows_operating_activities_ly - capex_ly\n\n revenue_sly = int(request.POST.get('rev_sly'))\n revenue_ly = int(request.POST.get('rev_ly'))\n #interest_expenses_sly = int(request.POST.get('ie_sly'))\n #interest_expenses_ly = int(request.POST.get('ie_ly'))\n #income_before_tax_sly = int(request.POST.get('ibt_sly'))\n #income_before_tax_ly = int(request.POST.get('ibt_ly'))\n net_income_sly = int(request.POST.get('ni_sly'))\n net_income_ly = int(request.POST.get('ni_ly'))\n net_profit_margins_sly = net_income_sly / revenue_sly\n net_profit_margins_ly = net_income_ly / revenue_ly\n fcf_profit_margins_sly = fcf_sly / net_income_sly\n fcf_profit_margins_ly = fcf_ly / net_income_ly\n\n # estimators needed for forecasting revenues and cash flows\n revenue_growth_rate = (revenue_ly - revenue_sly) / revenue_sly\n average_net_income_margins = (net_profit_margins_sly + net_profit_margins_ly) / 2\n average_fcf_income_margins = (fcf_profit_margins_sly + fcf_profit_margins_ly) / 2\n\n # rate assumptions\n wacc = float(request.POST.get('wacc'))\n perpetual_growth_rate = float(request.POST.get('pgr')) # this should be the growth rate of the economy\n shares_outstanding = int(request.POST.get('so'))\n\n # project revenues\n revenue_projections = [revenue_sly, revenue_ly]\n while len(revenue_projections) <= 5:\n next_revenue = int(revenue_projections[-1] * (1 + revenue_growth_rate))\n revenue_projections.append(next_revenue)\n\n # project net income:\n i = 2\n net_income_projections = [net_income_sly, net_income_ly]\n while len(net_income_projections) <= 5:\n next_net_income = int(revenue_projections[i] * average_net_income_margins)\n net_income_projections.append(next_net_income)\n i += 1\n\n # project free cash flows\n i = 2\n fcf_projections = [fcf_sly, fcf_ly]\n while len(fcf_projections) <= 5:\n next_fcf = int(net_income_projections[i] * average_fcf_income_margins)\n fcf_projections.append(next_fcf)\n i += 1\n\n # calculation steps\n terminal_value = fcf_projections[-1] * (1 + perpetual_growth_rate) / (wacc - perpetual_growth_rate)\n pv_future_cash_flows = []\n i = 1\n for cash_flow in fcf_projections[2:]:\n pv_cash_flow = cash_flow / (1 + wacc) ** i\n pv_future_cash_flows.append(pv_cash_flow)\n i += 1\n pv_future_cash_flows.append(terminal_value/ (1 + wacc) ** i)\n todays_value = sum(pv_future_cash_flows)\n fair_equity_value = round(todays_value / shares_outstanding, 2)\n\n projected_years = ['2018A', '2019A', '2020E', '2021E', '2022E', '2023E']\n\n revenue_dict = dict(zip(projected_years, revenue_projections))\n net_income_dict = dict(zip(projected_years, net_income_projections))\n fcf_dict = dict(zip(projected_years, fcf_projections))\n\n context = {\n 'company_name': company_name,\n 'stock_ticker': stock_ticker,\n 'currency': currency,\n 'revenue_dict': revenue_dict,\n 'net_income_dict': net_income_dict,\n 'fcf_dict': fcf_dict,\n 'fair_equity_value': fair_equity_value,\n 'projected_years': projected_years\n }\n\n Enterprise.objects.create(company_name=company_name, stock_ticker=stock_ticker, sector=sector, total_cash_flows_operating_activities_sly = total_cash_flows_operating_activities_sly, total_cash_flows_operating_activities_ly = total_cash_flows_operating_activities_ly, capex_sly = capex_sly, capex_ly = capex_ly, revenue_sly = revenue_sly, revenue_ly = revenue_ly, net_income_sly = net_income_sly, net_income_ly = net_income_ly, wacc = wacc, perpetual_growth_rate = perpetual_growth_rate, shares_outstanding = shares_outstanding, terminal_value = terminal_value, todays_value = todays_value, fair_equity_value = fair_equity_value)\n\n \"\"\"\n qs = Enterprise.objects.all().values()\n data = pd.DataFrame(qs)\n\n context = {'df': data.html()}\n\n # in template : {{ df | save }}\n\n \"\"\"\n\n return render(request, 'dcf_calculator/dcf_results.html', context)\n\n\ndef enterprise_database_view(request):\n queryset = Enterprise.objects.all()\n context = {\n \"object_list\": queryset\n }\n return render(request, 'dcf_calculator/enterprise_database.html', context=context)\n\ndef dynamic_lookup_view(request, id):\n obj = get_object_or_404(Enterprise, id=id)\n context = {\n \"object\":obj,\n }\n return render(request, 'dcf_calculator/enterprise_detail.html', context)\n\ndef individual_stock_view(request):\n return render(request, 'dcf_calculator/individual_stock.html', context={})\n\ndef enterprise_delete_view(request, id):\n obj = get_object_or_404(Enterprise, id=id)\n if request.method == \"POST\":\n obj.delete()\n return redirect('../../')\n context = {\n \"object\": obj\n }\n return render(request, \"dcf_calculator/enterprise_delete.html\", context)","sub_path":"securityvaluator/dcf_calculator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"33520011","text":"import numpy as np\r\nimport tensorflow as tf\r\nfrom sklearn.model_selection import train_test_split\r\ndef nearest_neighbor(non_cancer_data, cancer_data):\r\n \"\"\"\r\n Compute the close non-cancer and cancer neighbors to the input to determine the status of the input\r\n \"\"\"\r\n #status is based on the mean of smallest elements of cancer and non_cancer groups\r\n #sort\r\n non_cancer_data = sorted(non_cancer_data)\r\n cancer_data = sorted(cancer_data)\r\n\r\n #choose nearest neighbors\r\n non_cancer_neighbor = np.mean(non_cancer_data[:2])\r\n cancer_neighbor = np.mean(cancer_data[:2])\r\n\r\n #etermine cell status\r\n if non_cancer_neighbor > cancer_neighbor:\r\n print(\"Negative\")\r\n else:\r\n print(\"Positive\")\r\n","sub_path":"tumorstoppy/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"320522296","text":"\"\"\"\nkombu.utils.compat\n==================\n\nHelps compatibility with older Python versions.\n\n\"\"\"\nfrom __future__ import absolute_import\n\n\n############## timedelta_seconds() -> delta.total_seconds ####################\nfrom datetime import timedelta\n\nHAVE_TIMEDELTA_TOTAL_SECONDS = hasattr(timedelta, 'total_seconds')\n\n\nif HAVE_TIMEDELTA_TOTAL_SECONDS: # pragma: no cover\n\n def timedelta_seconds(delta):\n \"\"\"Convert :class:`datetime.timedelta` to seconds.\n\n Doesn't account for negative values.\n\n \"\"\"\n return max(delta.total_seconds(), 0)\n\nelse: # pragma: no cover\n\n def timedelta_seconds(delta): # noqa\n \"\"\"Convert :class:`datetime.timedelta` to seconds.\n\n Doesn't account for negative values.\n\n \"\"\"\n if delta.days < 0:\n return 0\n return delta.days * 86400 + delta.seconds + (delta.microseconds / 10e5)\n\n############## socket.error.errno ############################################\n\n\ndef get_errno(exc):\n \"\"\":exc:`socket.error` and :exc:`IOError` first got\n the ``.errno`` attribute in Py2.7\"\"\"\n try:\n return exc.errno\n except AttributeError:\n try:\n # e.args = (errno, reason)\n if isinstance(exc.args, tuple) and len(exc.args) == 2:\n return exc.args[0]\n except AttributeError:\n pass\n return 0\n\n############## collections.OrderedDict #######################################\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict # noqa\n\n############## time.monotonic ################################################\n\nimport platform\nSYSTEM = platform.system()\n\nif SYSTEM == 'Darwin':\n import ctypes\n libSystem = ctypes.CDLL('libSystem.dylib')\n CoreServices = ctypes.CDLL(\n '/System/Library/Frameworks/CoreServices.framework/CoreServices',\n use_errno=True,\n )\n mach_absolute_time = libSystem.mach_absolute_time\n mach_absolute_time.restype = ctypes.c_uint64\n absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds\n absolute_to_nanoseconds.restype = ctypes.c_uint64\n absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]\n\n def monotonic():\n return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9\nelif SYSTEM == 'Linux':\n # from stackoverflow:\n # questions/1205722/how-do-i-get-monotonic-time-durations-in-python\n import ctypes\n import os\n\n CLOCK_MONOTONIC = 1 # see \n\n class timespec(ctypes.Structure):\n _fields_ = [\n ('tv_sec', ctypes.c_long),\n ('tv_nsec', ctypes.c_long),\n ]\n\n librt = ctypes.CDLL('librt.so.1', use_errno=True)\n clock_gettime = librt.clock_gettime\n clock_gettime.argtypes = [\n ctypes.c_int, ctypes.POINTER(timespec),\n ]\n\n def monotonic(): # noqa\n t = timespec()\n if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:\n errno_ = ctypes.get_errno()\n raise OSError(errno_, os.strerror(errno_))\n return t.tv_sec + t.tv_nsec * 1e-9\nelse:\n from time import time as monotonic # noqa\n","sub_path":"kombu/utils/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"521185547","text":"#!/bin/python\n#calculador de 3n1\nimport sys\n\nnums = list(map(int, sys.argv[1:]))\n\nx = nums[0]\ny = nums[1]\n\nif (x > y):\n\tswap_var = x\n\tx = y\n\ty = swap_var\n\nkeeper = 0\n\nfor n in range(x, y+1):\n\n\tcount = 1\n\n\twhile(n!=1):\n\t\tif n % 2 == 1:\n\t\t\tn = 3*n + 1\n\t\telse:\n\t\t\tn = n/2\n\t\tcount = count + 1\n\tif count > keeper:\n\t\tkeeper = count\n\nprint (keeper)\n","sub_path":"part2-exercicio100.py","file_name":"part2-exercicio100.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"470250798","text":"# Copyright 2015 Observable Networks\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division, print_function\n\nfrom calendar import timegm\nfrom datetime import datetime, timedelta\n\nimport boto3\nfrom botocore.exceptions import NoRegionError, PaginationError\n\nDEFAULT_FILTER_PATTERN = (\n '[version=\"2\", account_id, interface_id, srcaddr, dstaddr, '\n 'srcport, dstport, protocol, packets, bytes, '\n 'start, end, action, log_status]'\n)\nDEFAULT_REGION_NAME = 'us-east-1'\nDUPLICATE_NEXT_TOKEN_MESSAGE = 'The same next token was received twice'\n\nACCEPT = 'ACCEPT'\nREJECT = 'REJECT'\nSKIPDATA = 'SKIPDATA'\nNODATA = 'NODATA'\n\n\nclass FlowRecord(object):\n \"\"\"\n Given a VPC Flow Logs event dictionary, returns a Python object whose\n attributes match the field names in the event record. Integers are stored\n as Python int objects; timestamps are stored as Python datetime objects.\n \"\"\"\n __slots__ = [\n 'version',\n 'account_id',\n 'interface_id',\n 'srcaddr',\n 'dstaddr',\n 'srcport',\n 'dstport',\n 'protocol',\n 'packets',\n 'bytes',\n 'start',\n 'end',\n 'action',\n 'log_status',\n ]\n\n def __init__(self, event, EPOCH_32_MAX=2147483647):\n fields = event['message'].split()\n self.version = int(fields[0])\n self.account_id = fields[1]\n self.interface_id = fields[2]\n\n # Contra the docs, the start and end fields can contain\n # millisecond-based timestamps.\n # http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html\n start = int(fields[10])\n if start > EPOCH_32_MAX:\n start /= 1000\n\n end = int(fields[11])\n if end > EPOCH_32_MAX:\n end /= 1000\n\n self.start = datetime.utcfromtimestamp(start)\n self.end = datetime.utcfromtimestamp(end)\n\n self.log_status = fields[13]\n if self.log_status in (NODATA, SKIPDATA):\n self.srcaddr = None\n self.dstaddr = None\n self.srcport = None\n self.dstport = None\n self.protocol = None\n self.packets = None\n self.bytes = None\n self.action = None\n else:\n self.srcaddr = fields[3]\n self.dstaddr = fields[4]\n self.srcport = int(fields[5])\n self.dstport = int(fields[6])\n self.protocol = int(fields[7])\n self.packets = int(fields[8])\n self.bytes = int(fields[9])\n self.action = fields[12]\n\n def __eq__(self, other):\n try:\n return all(\n getattr(self, x) == getattr(other, x) for x in self.__slots__\n )\n except AttributeError:\n return False\n\n def __hash__(self):\n return hash(tuple(getattr(self, x) for x in self.__slots__))\n\n def __str__(self):\n ret = ['{}: {}'.format(x, getattr(self, x)) for x in self.__slots__]\n return ', '.join(ret)\n\n def to_dict(self):\n return {x: getattr(self, x) for x in self.__slots__}\n\n def to_message(self):\n D_transform = {\n 'start': lambda dt: str(timegm(dt.utctimetuple())),\n 'end': lambda dt: str(timegm(dt.utctimetuple())),\n }\n\n ret = []\n for attr in self.__slots__:\n transform = D_transform.get(attr, lambda x: str(x) if x else '-')\n ret.append(transform(getattr(self, attr)))\n\n return ' '.join(ret)\n\n @classmethod\n def from_message(cls, message):\n return cls({'message': message})\n\n\nclass FlowLogsReader(object):\n \"\"\"\n Returns an object that will yield VPC Flow Log records as Python objects.\n * `log_group_name` is the name of the CloudWatch Logs group that stores\n your VPC flow logs.\n * `region_name` is the AWS region.\n * `profile_name` is the AWS boto3 configuration profile to use.\n * `start_time` is a Python datetime.datetime object; only the log events\n from at or after this time will be considered.\n * `end_time` is a Python datetime.datetime object; only the log events\n before this time will be considered.\n * `filter_pattern` is a string passed to CloudWatch as a filter pattern\n * `boto_client_kwargs` - keyword arguments to pass to the boto3 client\n * `boto_client` - your own boto3 client object. If given then region_name,\n profile_name, and boto_client_kwargs will be ignored.\n \"\"\"\n\n def __init__(\n self,\n log_group_name,\n region_name=None,\n profile_name=None,\n start_time=None,\n end_time=None,\n filter_pattern=DEFAULT_FILTER_PATTERN,\n boto_client_kwargs=None,\n boto_client=None,\n ):\n if boto_client is not None:\n self.logs_client = boto_client\n else:\n self.logs_client = self._get_client(\n region_name, profile_name, boto_client_kwargs\n )\n self.log_group_name = log_group_name\n\n self.paginator_kwargs = {}\n\n if filter_pattern is not None:\n self.paginator_kwargs['filterPattern'] = filter_pattern\n\n # If no time filters are given use the last hour\n now = datetime.utcnow()\n start_time = start_time or now - timedelta(hours=1)\n end_time = end_time or now\n\n self.start_ms = timegm(start_time.utctimetuple()) * 1000\n self.end_ms = timegm(end_time.utctimetuple()) * 1000\n\n self.iterator = self._reader()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.iterator)\n\n def next(self):\n # For Python 2 compatibility\n return self.__next__()\n\n def _get_client(self, region_name, profile_name, boto_client_kwargs):\n session_kwargs = {}\n if region_name is not None:\n session_kwargs['region_name'] = region_name\n\n if profile_name is not None:\n session_kwargs['profile_name'] = profile_name\n\n client_kwargs = boto_client_kwargs or {}\n\n session = boto3.session.Session(**session_kwargs)\n try:\n logs_client = session.client('logs', **client_kwargs)\n except NoRegionError:\n logs_client = session.client(\n 'logs', region_name=DEFAULT_REGION_NAME, **client_kwargs\n )\n\n return logs_client\n\n def _read_streams(self):\n paginator = self.logs_client.get_paginator('filter_log_events')\n response_iterator = paginator.paginate(\n logGroupName=self.log_group_name,\n startTime=self.start_ms,\n endTime=self.end_ms,\n interleaved=True,\n **self.paginator_kwargs\n )\n\n try:\n for page in response_iterator:\n for event in page['events']:\n yield event\n except PaginationError as e:\n if e.kwargs['message'].startswith(DUPLICATE_NEXT_TOKEN_MESSAGE):\n pass\n else:\n raise\n\n def _reader(self):\n # Loops through each log stream and its events, yielding a parsed\n # version of each event.\n for event in self._read_streams():\n yield FlowRecord(event)\n","sub_path":"flowlogs_reader/flowlogs_reader.py","file_name":"flowlogs_reader.py","file_ext":"py","file_size_in_byte":7709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"624492","text":"#!/usr/bin/python\n# autovivification - extends dict\n# author: Kuan-lin Huang (khuang@genome.wustl.edu) & Adam D Scott (ascott@genome.wustl.edu)\n# version: v0.0 - 2016*01*12\n\nclass autovivification(dict):\n\t'''Implementation of perl's autovivification feature.'''\n\tdef __init__( self , *args , **kwargs ):\n\t\tsuper( autovivification , self ).__init__( *args , **kwargs )\n\t\tself.itemlist = super( autovivification , self ).keys()\n\tdef __getitem__(self, item):\n\t\ttry:\n\t\t\treturn dict.__getitem__(self, item)\n\t\texcept KeyError:\n\t\t\tvalue = self[item] = type(self)()\n\t\t\treturn value\n","sub_path":"charger/autovivification.py","file_name":"autovivification.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"450746035","text":"#!/usr/bin/env python3\n# encoding: utf-8\n#\n# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.\n#\n# This file is part of ewm-cloud-robotics\n# (see https://github.com/SAP/ewm-cloud-robotics).\n#\n# This file is licensed under the Apache Software License, v. 2 except as noted\n# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/main/LICENSE)\n#\n\n\"\"\"Maps, positions etc. in FetchCore.\"\"\"\n\nimport logging\n\nfrom typing import Dict\n\nimport attr\n\nfrom requests import RequestException\n\nfrom .fetchcore import FetchInterface, HTTPstatusNotFound\n\n_LOGGER = logging.getLogger(__name__)\n\n\n@attr.s\nclass FetchPose:\n \"\"\"Representation of a FetchCore pose.\"\"\"\n\n name: str = attr.ib(validator=attr.validators.instance_of(str))\n id: int = attr.ib(validator=attr.validators.instance_of(int))\n x: float = attr.ib(default=0.0, validator=attr.validators.instance_of(float))\n y: float = attr.ib(default=0.0, validator=attr.validators.instance_of(float))\n theta: float = attr.ib(default=0.0, validator=attr.validators.instance_of(float))\n\n def get_xytheta(self) -> Dict:\n \"\"\"Get dictionary with coordinates of this pose.\"\"\"\n return {'x': self.x, 'y': self.y, 'theta': self.theta}\n\n\nclass FetchMap:\n \"\"\"Representation of a FetchCore Map.\"\"\"\n\n def __init__(self, map_id: str, fetch_api: FetchInterface) -> None:\n \"\"\"Construct.\"\"\"\n self.map_id = map_id\n self.name = ''\n self._fetch_api = fetch_api\n self._poses: Dict[str, FetchPose] = {}\n\n @property\n def poses(self) -> Dict:\n \"\"\"Get poses.\"\"\"\n return self._poses\n\n def get_pose(self, name: str) -> FetchPose:\n \"\"\"Get instance of one pose.\"\"\"\n try:\n pose = self._poses[name]\n except KeyError:\n raise ValueError('Pose {} is unknown on map {}'.format(name, self.name))\n\n return pose\n\n def update_map(self) -> None:\n \"\"\"Update map data.\"\"\"\n # Call FetchcCore API\n endpoint = '/api/v1/maps/{}/'.format(self.map_id)\n try:\n fetch_map = self._fetch_api.http_get(endpoint)\n except HTTPstatusNotFound:\n _LOGGER.error('Map ID %s not found in FetchCore', self.map_id)\n except RequestException as err:\n _LOGGER.error('Exception %s when connecting to FetchCore endpoint %s', err, endpoint)\n else:\n self.name = fetch_map['name']\n\n def update_poses(self) -> None:\n \"\"\"Update poses of this map.\"\"\"\n # Call FetchcCore API\n endpoint = '/api/v1/maps/{}/annotations/poses/'.format(self.map_id)\n try:\n fetch_poses = self._fetch_api.http_get(endpoint)\n except HTTPstatusNotFound:\n _LOGGER.error('Map ID %s not found in FetchCore', self.map_id)\n except RequestException as err:\n _LOGGER.error('Exception %s when connecting to FetchCore endpoint %s', err, endpoint)\n else:\n poses = {}\n for result in fetch_poses['results']:\n pose = FetchPose(\n result['name'], result['id'], result['x'], result['y'], result['theta'])\n poses[pose.name] = pose\n self._poses = poses\n","sub_path":"python-modules/fetchcontroller/fetchcontroller/fetchlocation.py","file_name":"fetchlocation.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87709465","text":"filepath = './Exercise 2/dataSets/gmm.txt'\n\nimport gauss_MLE\nimport numpy as np\nprint('')\nprint('######## EM - ALGORITHM ########')\nprint('')\n\ndata = gauss_MLE.read_file(filepath)\n\n\nprint('Mean of all Data:')\nmean = np.mean(data, 0)\nprint(mean)\n\nprint('Maximum of Data:')\nmaximum = np.max(data, 0)\nprint(maximum)\n\nprint('Minimum of Data:')\nminimum = np.min(data, 0)\nprint(minimum)\n\n\n# currently using abs with det because of value initialization\ndef multivariate_normal(x, mu, cov):\n x = np.matrix(x)\n cov = np.matrix(cov)\n det_cov = np.abs(np.linalg.det(cov))\n und_sqr = (2*np.pi)**2*det_cov\n under = np.sqrt(und_sqr)\n first_half = 1/under\n second_half = np.exp(-1/2*(x-mu)*np.linalg.inv(cov)*np.transpose((x-mu)))\n return first_half*second_half\n\ndef m_step(data_list, posteriors, parameter_list):\n for idx, parameters in enumerate(parameter_list):\n sum_mu = 0\n soft_count = 0\n sum_cov = 0\n for i, x in enumerate(data_list):\n sum_mu += posteriors[i][idx]*x\n soft_count += posteriors[i][idx]\n parameters[0] = soft_count/len(data_list)\n parameters[1] = (1/soft_count)*sum_mu\n for i, x in enumerate(data_list):\n dist = np.matrix(x-parameters[1])\n dist_2 = np.matmul(np.transpose(dist), dist)\n sum_cov += np.multiply(posteriors[i][idx], dist_2)\n parameters[2] = np.multiply((1/soft_count),sum_cov)\n return parameter_list\n\ndef e_step(data_list, posteriors, parameter_list):\n for i, x in enumerate(data_list):\n norm = 0\n for param in parameter_list:\n mul_norm = multivariate_normal(x, param[1], param[2])\n norm += param[0]*mul_norm\n for idx, parameters in enumerate(parameter_list):\n posteriors[i][idx] = (parameters[0]*multivariate_normal(x, parameters[1], parameters[2]))/norm\n return posteriors\n\ndef init_values():\n output = []\n for i in range(0,4):\n temp = [0,0,0]\n for j in range(0,3):\n if j == 1:\n temp[j] = np.random.rand(1,2)\n continue\n if j == 2:\n temp[j] = np.random.rand(2,2)\n continue\n temp[j] = np.random.uniform(0,1)\n output.append(temp)\n return output\n \ndef likelihood(data_list, parameter_list):\n sum_data = 0\n for data in data_list:\n sum_dists = 0\n for parameters in parameter_list:\n sum_dists += parameters[0]*multivariate_normal(data, parameters[1], parameters[2])\n sum_data += np.log(sum_dists)\n return sum_data\n\ndef em_loop(data_list, it_amount):\n iterations = 0\n posteriors = []\n likelihoods = []\n for x in data_list:\n posteriors.append([0,0,0,0])\n # * index 0 is pi\n # * index 1 is mu\n # * index 2 is cov\n parameter_list = init_values()\n notfinished = True\n while notfinished:\n old_parameters = parameter_list\n new_posteriors = e_step(data_list, posteriors, parameter_list)\n posteriors = new_posteriors\n parameter_list = m_step(data_list, new_posteriors, parameter_list)\n sum1 = old_parameters[0][0]\n sum2 = parameter_list[0][0]\n diff = sum1 - sum2\n likelihoods.append(likelihood(data_list, parameter_list))\n iterations += 1\n if iterations == it_amount:\n notfinished = False\n return parameter_list, posteriors, likelihoods\n\nparameters, posteriors, likelihoods = em_loop(data, 30)\n\nlikelihoods = np.reshape(likelihoods, [30,-1])\n\nimport matplotlib.pyplot as plt\n\ndef estimated_dist(x_list, posteriors=posteriors, parameters=parameters):\n output = []\n value = 0\n for x in x_list:\n for param in parameters:\n value += param[0]*multivariate_normal(x, param[1], param[2])\n output.append(value)\n value = 0\n return output\n\nX = np.arange(-2, 5, 0.1)\nY = np.arange(-2, 5, 0.1)\n\n#21 per x coordinate\nxy = np.mgrid[-2:5:0.1, -2:5:0.1].reshape(2, -1).T\n\nZ = estimated_dist(xy)\nZ = np.reshape(Z, [70, 70])\n\nplt.figure()\nplt.contour(X, Y, Z)\nplt.title('EM Mixed Model - Iterations:1')\n\nitera = range(30)\n\nplt.figure()\nplt.plot(itera, likelihoods)\nplt.title('Likelihoods EM 1-30 Iterations')\nplt.show()\n","sub_path":"Exercise 2/em.py","file_name":"em.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"403330886","text":"import re\n\nclass SentTokenizerWrapper():\n \n def __init__(self, PunktTokenizer):\n self.PunktTokenizer = PunktTokenizer\n \n def tokenize(self, s):\n \n pre = self.PunktTokenizer.tokenize(s)\n pre = [item.strip() for sublist in \n [re.split(\"(\\n|:|;)\", item) for item in pre] \n for item in sublist if item != \"\\n\" and item and item != \"\"]\n return_val = []\n merge_next = False\n \n for index, item in enumerate(pre): \n if merge_next == True:\n return_val[-1] = \" \".join((return_val[-1], item))\n merge_next = False\n continue\n \n if item in [\";\", \":\"]:\n if index < len(pre) - 1:\n try:\n if not pre[index + 1][0].isupper() or index == 2:\n merge_next = True\n except IndexError as e:\n if pre[index + 1] == \"\":\n pass\n else:\n raise\n if len(return_val) == 0:\n return_val = [item]\n else:\n return_val[-1] = \"\".join((return_val[-1], item))\n \n else:\n return_val.append(item)\n \n return return_val","sub_path":"code/lib/tokenizers.py","file_name":"tokenizers.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"115710617","text":"# -*- coding: utf-8 -*-\nfrom plone.app.blob.tests.base import BlobSchemaExtenderTestCase\nfrom plone.app.blob.tests.utils import hasSchemaExtender\nfrom unittest import defaultTestLoader\nfrom unittest import TestSuite\n\n\nclass ExtenderTests(BlobSchemaExtenderTestCase):\n\n def testImageOnDocument(self):\n \"\"\"Test that adding an image blob field to a document doesn't\n error for lack of EXIF helper functions\"\"\"\n document_id = self.folder.invokeFactory('Document', id='doc')\n document = self.folder[document_id]\n document.Schema().get('image').set(document, 'f')\n\n def testImageOnImage(self):\n \"\"\"Test that an extension image field works on a class that has image\n helper methods\"\"\"\n img_id = self.folder.invokeFactory('Image', id='img')\n img = self.folder[img_id]\n img.Schema().get('new_image').set(img, 'f')\n\n\ndef test_suite():\n if hasSchemaExtender():\n return defaultTestLoader.loadTestsFromName(__name__)\n else:\n return TestSuite()\n","sub_path":"src/plone/app/blob/tests/test_extensionblobfield.py","file_name":"test_extensionblobfield.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"96740794","text":"from abc import ABC, abstractmethod\nfrom typing import Type, Union, Iterable, Any\n\nfrom vkbottle_types.events import BaseGroupEvent, BaseUserEvent\n\n\nclass ABCRule(ABC):\n config: dict = {}\n\n @classmethod\n def with_config(cls, config: dict) -> Type[\"ABCRule\"]:\n cls.config = config\n return cls\n\n @abstractmethod\n async def check(self, event: Union[BaseUserEvent, BaseGroupEvent]):\n pass\n\n def __and__(self, other: \"ABCRule\") -> \"ABCFilter\":\n return AndFilter(self, other)\n\n def __or__(self, other: \"ABCRule\") -> \"ABCFilter\":\n return OrFilter(self, other)\n\n def __repr__(self):\n return f\"<{self.__class__.__qualname__}>\"\n\n\nclass ABCFilter(ABCRule):\n @property\n @abstractmethod\n def rules(self) -> Iterable[ABCRule]:\n pass\n\n\nclass AndFilter(ABCFilter):\n def __init__(self, *rules: ABCRule):\n self._rules = rules\n\n async def check(self, event: Any):\n context = {}\n\n for rule in self.rules:\n check_response = await rule.check(event)\n if check_response is False:\n return False\n elif isinstance(check_response, dict):\n context.update(check_response)\n\n return context\n\n @property\n def rules(self) -> Iterable[ABCRule]:\n return self._rules\n\n\nclass OrFilter(ABCFilter):\n def __init__(self, *rules: ABCRule):\n self._rules = rules\n\n async def check(self, event: Any):\n for rule in self.rules:\n check_response = await rule.check(event)\n if check_response is not False:\n return check_response\n return False\n\n @property\n def rules(self) -> Iterable[ABCRule]:\n return self._rules\n","sub_path":"vkbottle/dispatch/rules/abc.py","file_name":"abc.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"337944069","text":"from model.contact import Contact\nimport random\n\n\ndef test_modify_some_contact(app, db, check_ui):\n if app.contact.count() == 0:\n app.contact.create(Contact(first_name=\"test\"))\n old_contacts = db.get_contact_list()\n contact = random.choice(old_contacts)\n app.contact.modify_contact_by_id(contact.ident, Contact(first_name=\"test\"))\n new_contacts = db.get_contact_list()\n assert len(old_contacts) == len(new_contacts)\n if check_ui:\n sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)","sub_path":"test/test_modify_contact.py","file_name":"test_modify_contact.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"427502424","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^insertdata', views.insert_data ),\n url(r'^insertCourseData', views.insert_course_data ),\n url(r'^$', views.index , name='index'),\n url(r'^index', views.index , name='index'),\n url(r'^jobDataForm', views.job_data , name='job_data'),\n url(r'^course_content_form', views.course_data , name='course_data'),\n url(r'^selJobCategory', views.jobCategory_Select, name='jobCategory_Select'),\n url(r'^selectUni', views.university_Select, name='university_Select'),\n url(r'^similarity', views.similarity, name='similarity'),\n url(r'^showUni', views.showUni, name='showUni'),\n url(r'^uniDataShow', views.uniDataShow, name='uniDataShow'),\n url(r'^extractedKeywordJob', views.extractedKeywordJob, name='extractedKeywordJob'),\n url(r'^extractedKeywordUniversity', views.extractedKeywordUniversity, name='extractedKeywordUniversity'),\n url(r'^comparison', views.comparison , name='comparison'),\n\n\n\n\n]","sub_path":"thesis_implementation/education_gap_analyzer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"247647947","text":"from math import *\nn = int(input(\"Informe a precisão n:\"))\ncont = 0\nsoma = 0.0\nnum = 1\nwhile (cont < n):\n\tif (cont % 2 == 0):\n\t\tsoma = soma + (1.0/ (num * (3 ** cont)))\n\telse:\n\t\tsoma = soma - (1.0/ (num * (3 ** cont)))\n\tcont = cont + 1\n\tnum = num + 2\nmeu_pi = sqrt(12) * soma\nprint(round(meu_pi,8))","sub_path":"exs/1432-1141.py","file_name":"1432-1141.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"503609724","text":"#!/usr/bin/env python3\n\n# This code contains an implementation of the edge_betweenness\n# calculation necessary to perform the Girvan Newman algorithm for\n# graph partitioning.\n# \n# Made by Leonardo Tamiano on 04/06/2020.\n\n# ---------------------------------------------------------\n# Data Structures Description\n# -----------------\n\n# Graph represent by adjacent lists using dictionaries. In particular, we let\n# G[u] := {nodes v which are adjacent to u}\n\nG = {\n 1: [2, 3, 4],\n 2: [1, 3, 4],\n 3: [1, 2],\n 4: [1, 2]\n}\n\n# ---------------------------------------------------------\n# I/O with graphviz function\n# -----------------\n\n# writes graph to filename using grapvhiz format\n#\n# TODO: improve efficiency by removing the use of found_edges.\ndef write_graph_to_file(G, filename):\n found_edges = {}\n with open(filename, \"w\") as f:\n f.write(\"graph G {\\n\")\n for u in G:\n for v in G[u]:\n if (u, v) not in found_edges.keys() and (v, u) not in found_edges.keys():\n found_edges[(u, v)] = True\n f.write(f\"{u} -- {v} \\n\")\n f.write(\"}\\n\")\n\n# ---------------------------------------------------------\n# Helper functions\n# -----------------\n\n# computes the tree obtained by doing a BFS (Breath First Search)\n# starting from node s in the graph G\ndef bfs(G, s):\n T = {}\n border = [s]\n\n # continue as long as there are nodes to explore\n while border != []:\n u = border.pop(0)\n T[u] = []\n \n # explore new node\n for v in G[u]:\n # add it in the tree only if its the first time I reach this node\n if v not in T.keys():\n T[u].append(v)\n T[v] = []\n border.append(v)\n return T\n\n# computes sets V_1, V_2, ..., V_d, where\n# V_i := {nodes u which sit at distance i from the root of the tree T}\ndef compute_sets_by_distance(T, r, d, sets):\n for u in T[r]:\n if d+1 in sets.keys():\n sets[d+1].append(u)\n else:\n sets[d+1] = [u]\n compute_sets_by_distance(T, u, d+1, sets)\n\n# returns True if the graph is connected, and False otherwise.\ndef is_graph_connected(G):\n s = list(G.keys())[0]\n\n # execute a BFS starting from s\n T = bfs(G, s)\n\n # return True only if every node has been explored\n return len(T.keys()) == len(G.keys())\n\n# ---------------------------------------------------------\n# Main functions\n# -----------------\n\n# computes the function b_s(u, v), over all edges (u, v), where b_s(u,\n# v) is defined as the sum over all t in V of the fraction of the\n# shortest paths that go from s to t which pass over the edge (u, v)\n# over all the shortest paths that go from s to t\ndef _edge_betwenness_s(G, s):\n # ---------------------------------------------------\n # 1) BFS on s\n \n T = bfs(G, s)\n\n # ---------------------------------------------------\n # 2) Compute number of shortest paths from s to t\n\n # sigma_s[u] := number of shortest paths from s to u\n # sets[i] := V_i = {nodes at distances i from root of tree T}\n \n sigma_s = {} \n sets = {} \n compute_sets_by_distance(T, s, 0, sets)\n\n # -- base case\n for u in sets[1]:\n sigma_s[u] = 1\n \n ## -- general case\n if len(sets) > 1:\n for d in range(2, len(sets)+1):\n for u in sets[d]:\n sigma_s[u] = 0\n for v in sets[d-1]:\n if v in G[u]:\n sigma_s[u] += sigma_s[v]\n\n # ---------------------------------------------------\n # 3) Final bottom-up step\n\n # flux_node_s[u] := flux that node s sends to node u\n # flux_edge_s[(u, v)] := flux from node s sends through edge (u, v)\n \n flux_node_s = {} \n flux_edge_s = {} \n \n # -- base case\n # compute flux_node_s\n for t in sets[len(sets)]:\n flux_node_s[t] = 1\n\n # compute flux_edge_s\n if len(sets) == 1:\n for t in sets[1]:\n if t in G[s]:\n flux_edge_s[(s, t)] = 1\n else:\n for v in sets[len(sets)-1]:\n for t in sets[len(sets)]:\n if t in G[v]:\n flux_edge_s[(v, t)] = 1 * sigma_s[v] / sigma_s[t]\n\n ## -- general case\n for j in range(len(sets)-1, 0, -1):\n \n # compute flux_node_s\n for u in sets[j]:\n flux_node_s[u] = 1\n for v in sets[j+1]:\n if v in G[u]:\n flux_node_s[u] += flux_edge_s[(u, v)]\n\n # compute flux_edge_s\n if j > 1:\n for u in sets[j-1]:\n for v in sets[j]:\n if v in G[u]:\n flux_edge_s[(u, v)] = flux_node_s[v] * sigma_s[u] / sigma_s[v]\n elif j == 1:\n for v in sets[1]:\n if v in G[s]:\n flux_edge_s[(s, v)] = flux_node_s[v] * 1 / sigma_s[v]\n\n return flux_edge_s\n\n# computes betweenness of edge (u, v) in graph G\ndef edge_betwenness(G, u, v):\n res = 0\n for s in G.keys():\n flux_edge_s = _edge_betwenness_s(G, s)\n if (u, v) in flux_edge_s:\n res += flux_edge_s[(u, v)]\n elif (v, u) in flux_edge_s:\n res += flux_edge_s[(v, u)]\n \n return res/2\n\n\n# partition graph using girvan newman betwenness centrality measure.\ndef girvan_newman(G):\n connected = is_graph_connected(G)\n\n # graph is already disconnected, no need to partition it \n if not connected:\n return G\n \n # iterate over all edges of the graph to compute the\n # edge_betweenness\n betweenness_value = {}\n for u in G:\n for v in G[u]:\n betweenness_value[(u, v)] = edge_betwenness(G, u, v)\n\n # start removing edges with highest edge-betweenness until graph\n # disconnects\n while connected:\n # find edge with max betweenness value\n max_edge = max(betweenness_value, key=betweenness_value.get)\n u, v = max_edge\n \n # remove edge from graph\n G[u].remove(v)\n G[v].remove(u)\n \n # remove edge from betweennes dictionary\n betweenness_value.pop(max_edge, None)\n \n # check if graph is still connected\n connected = is_graph_connected(G)\n\n return G\n \n# ---------------------------------------------------------\n\nif __name__ == \"__main__\":\n G1 = {\n 1: [2, 4],\n 2: [1, 3],\n 3: [2, 4, 5],\n 4: [1, 3],\n 5: [3, 6, 8],\n 6: [5, 7],\n 7: [6, 8],\n 8: [5, 7]\n }\n\n G2 = girvan_newman(G1)\n","sub_path":"university/master/AR/code/community_partition/edge_betweenness.py","file_name":"edge_betweenness.py","file_ext":"py","file_size_in_byte":6562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"447398655","text":"#!/usr/bin/env python2.7 \n\nimport datetime\n\n##can be used for Temperature and Humidity Readings\n\nclass measurementThreshold:\n def __init__(self, name, measurement_lower, measurement_hysterysis, average_over, overall_averages_array_size, initial_temp):\n self.name = name\n self.measurement_lower = measurement_lower\n self.measurement_hysterysis = measurement_hysterysis\n self.average_over = average_over\n self.averages = [initial_temp]\n self.overall_averages = [initial_temp]\n self.overall_averages_time = []\n self.overall_averages_array_size = overall_averages_array_size\n self.overall_average = \"initial_temp\"\n\n##############################\n##Function to Get all the temps, and average everything\n \n def combine_measurements(self, lists): # where lists is a list of lists \n for measurement_array in lists:\n \n if len(measurement_array) >= self.average_over:\n average = sum(measurement_array[len(measurement_array)-self.average_over:])\n average = average / self.average_over\n self.averages.append(average)\n else:\n average = sum(measurement_array) / len(measurement_array)\n self.averages.append(self.average)\n\n self.overall_average = sum(self.averages) / len(self.averages)\n \n if len(self.overall_averages) >= self.overall_averages_array_size:\n self.overall_average.pop(0)\n self.overall_averages_time.pop(0)\n self.overall_averages.append(average)\n self.overall_averages_time.append(datetime.datetime.now())\n \n\n\n########################\n##Check if temperature of averages is lower than the temperature to trigger minus the hysterisis \n def check_if_under(self):\n if (self.measurement_lower - self.measurement_hysterysis) > self.overall_average:\n return True\n else:\n return False\n \n \n \n \n \n","sub_path":"thresholdsClass.py","file_name":"thresholdsClass.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"211814809","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\npython yield的使用\n\nsend方法用于与生成器的交互\n\n- 运行至yield函数返回一个值并暂停执行,相当于f.send(None)\n- next(f)也等价于f.send(None)\n\nf.send()有三个步骤(作用):\n1. 将send中的数值赋给yield的结果 ret = yield i中的ret\n2. 重新启动生长器往下走\n3. 再次执行next(f),相当于函数又返回了一次值\n\n将生成器重置,可以通过重新定义的方式:\na = Generator()\nb = Generator()\n\n\nyield简单介绍: https://blog.csdn.net/mieleizhi0522/article/details/82142856/\n\"\"\"\n\nimport doctest\n\n\nclass YieldDemo1:\n \"\"\"\n 基础应用\n >>> y1 = YieldDemo1()\n >>> y1.yield_demo1()\n 0\n 1\n 2\n over\n iter is over\n \"\"\"\n\n def generator_basic(self, n=3):\n \"\"\"自定义生成器\"\"\"\n # yield # 类似于return None\n for i in range(n):\n yield i # 类似于return i\n yield \"over\"\n\n def yield_demo1(self):\n \"\"\"yield基本使用\"\"\"\n a = self.generator_basic()\n try:\n for _ in range(100): # 为了让迭代结束\n print(next(a))\n except StopIteration:\n print(\"iter is over\")\n\n\nclass YieldDemo2:\n \"\"\"\n yield send的使用\n >>> y2 = YieldDemo2()\n >>> y2.yield_demo2()\n\n \"\"\"\n\n def generator_send(self):\n \"\"\"用send与生成器交互的示例\"\"\"\n for i in range(10):\n ret = yield i # ret接受send发送过来的数据\n if ret == \"break\": # 根据接收到的交互进行不同的操作\n print(\"break, over\")\n break\n print(\"generator over\")\n\n def yield_demo2(self):\n \"\"\"yield使用2之send方法\"\"\"\n a = self.generator_send()\n # a.send(111)\n print(next(a))\n a.send(111) # 将111赋值给了ret\n print(a.send(111)) # 将111赋值给了ret\n print(next(a))\n print(next(a))\n print(next(a))\n a.send(\"break\")\n # try:\n # a.send(\"break\")\n # # print(next(a))\n # # print(next(a))\n # except StopIteration:\n # print(\"iteration stopped.\")\n\n\ndef main():\n # y1 = YieldDemo1()\n # y1.yield_demo1()\n\n y2 = YieldDemo2()\n y2.yield_demo2()\n\n\nif __name__ == '__main__':\n # doctest.testmod()\n main()\n","sub_path":"language/python/python/yield/yield_demo.py","file_name":"yield_demo.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"245239607","text":"import os\n\nfrom trame import start, update_state, change, get_app_instance\nfrom trame.html import vuetify, vtk\nfrom trame.layouts import SinglePage\n\nfrom vtkmodules.vtkIOXML import vtkXMLImageDataReader\nfrom vtkmodules.vtkFiltersCore import vtkContourFilter\n\n# -----------------------------------------------------------------------------\n# VTK pipeline\n# -----------------------------------------------------------------------------\n\ndata_directory = os.path.join(\n os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),\n \"data\",\n)\nhead_vti = os.path.join(data_directory, \"head.vti\")\n\nreader = vtkXMLImageDataReader()\nreader.SetFileName(head_vti)\nreader.Update()\n\ncontour = vtkContourFilter()\ncontour.SetInputConnection(reader.GetOutputPort())\ncontour.SetComputeNormals(1)\ncontour.SetComputeScalars(0)\n\n# Extract data range => Update store/state\ndata_range = reader.GetOutput().GetPointData().GetScalars().GetRange()\ncontour_value = 0.5 * (data_range[0] + data_range[1])\nupdate_state(\"data_range\", data_range)\n\n# Configure contour with valid values\ncontour.SetNumberOfContours(1)\ncontour.SetValue(0, contour_value)\n\n\n# -----------------------------------------------------------------------------\n# Callbacks\n# -----------------------------------------------------------------------------\n\n\n@change(\"contour_value\")\ndef update_contour(contour_value, **kwargs):\n contour.SetValue(0, contour_value)\n html_polydata.update()\n\n\n# -----------------------------------------------------------------------------\n# GUI\n# -----------------------------------------------------------------------------\nhtml_polydata = vtk.VtkPolyData(\"contour\", dataset=contour)\n\nlayout = SinglePage(\"VTK contour - Remote/Local rendering\")\nlayout.title.content = \"Contour Application - Local rendering\"\nlayout.logo.content = \"mdi-virus-outline\"\nlayout.logo.click = \"$refs.view.resetCamera()\"\nlayout.toolbar.children += [\n vuetify.VSpacer(),\n vuetify.VSlider(\n value=(\"contour_value\", contour_value),\n min=[\"data_range[0]\"],\n max=[\"data_range[1]\"],\n hide_details=True,\n dense=True,\n style=\"max-width: 300px\",\n change=\"contour_value = $event\",\n ),\n vuetify.VSwitch(\n v_model=\"$vuetify.theme.dark\",\n hide_details=True,\n ),\n vuetify.VBtn(\n vuetify.VIcon(\"mdi-crop-free\"),\n icon=True,\n click=\"$refs.view.resetCamera()\",\n ),\n vuetify.VProgressLinear(\n indeterminate=True,\n absolute=True,\n bottom=True,\n active=[\"busy\"],\n ),\n]\n\nlayout.content.children += [\n vuetify.VContainer(\n fluid=True,\n classes=\"pa-0 fill-height\",\n children=[vtk.VtkView([vtk.VtkGeometryRepresentation([html_polydata])])],\n )\n]\n\n# -----------------------------------------------------------------------------\n# Main\n# -----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n # print(layout.html)\n start(layout, on_ready=update_contour)\n","sub_path":"examples/VTK/ContourGeometry/LocalRendering.py","file_name":"LocalRendering.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"141595383","text":"from asyncio import get_event_loop\n\nfrom client.utils import register_transform_event\nfrom dread_snarfle.creature_encounter import CreatureEncounter\n\n\ndef encounter(client, player, creature):\n register_transform_event(CreatureEncounter)\n\n loop = get_event_loop()\n\n response = loop.run_until_complete(client.broadcast_transform(CreatureEncounter(\n player=player,\n creature=creature\n )))\n\n print(response)\n","sub_path":"client/encounter.py","file_name":"encounter.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"557170475","text":"import random\n\n# lst représente la liste à trier, l l'index 0 et r la taille de la liste -1\ndef sort(lst, l, r):\n # mettre une condition pour arrêter la récursivité\n\n \n pivot_index = ... # Partie à compléter: Choisissez un pivot compris entre 0 et la longueur de votre liste - 1\n\n # Déplacer votre pivot dans votre liste\n\n # Partitionnez votre liste de telle sorte que les éléments plus petits que le pivot soient placés avant celui-ci et les éléments plus grands soient placés après\n \n # Replacer votre pivot à l'endroit adéquat\n\n # Effectuez le tri de façon récursive sur les parties gauches et droites de la liste\n\ndef quicksort(items): \n if items is None or len(items) < 2:\n return\n sort(items, 0, len(items) - 1)\n\nl = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\nquicksort(l)\nprint('Liste triée: ', l)\n","sub_path":"2021/week11/resources/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"99442489","text":"import psycopg2\nimport json\nimport collections\nimport datetime\nimport sys\nimport numpy as np\nimport ast\n\nfrom models.extractdata import *\n\nclass alexa_skill:\n\n def speak_populardestinations(self,list_destinations):\n session_attributes = {}\n card_title = \"Popularity\"\n reprompt_text = \"\"\n should_end_session = True\n\n dest_String = \",\".join(list_destinations)\n\n speech_output = \"The most popular destinations are \" + dest_String\n\n return self.build_response(session_attributes, self.build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n def speak_popularhotels(self,list_destinations):\n session_attributes = {}\n card_title = \"Popular Hotels\"\n reprompt_text = \"\"\n should_end_session = True\n\n dest_String = \",\".join(list_destinations)\n\n speech_output = \"The most popular hotels are \" + dest_String\n\n return self.build_response(session_attributes, self.build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\n\n def build_speechlet_response(self,title, output, reprompt_text, should_end_session):\n return {\n \"outputSpeech\": {\n \"type\": \"PlainText\",\n \"text\": output\n },\n \"card\": {\n \"type\": \"Simple\",\n \"title\": title,\n \"content\": output\n },\n \"reprompt\": {\n \"outputSpeech\": {\n \"type\": \"PlainText\",\n \"text\": reprompt_text\n }\n },\n \"shouldEndSession\": should_end_session\n }\n\n def build_response(self,session_attributes, speechlet_response):\n return {\n \"version\": \"1.0\",\n \"sessionAttributes\": session_attributes,\n \"response\": speechlet_response\n }\n\ndef __init__(self):\n print (\"in init\")\n","sub_path":"models/alexa.py","file_name":"alexa.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"45689580","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom .views import *\n\nfrom django.contrib.auth import views as auth_views\n\n\nurlpatterns = [\n\tpath('',login, name='login'),\n\tpath('iosLogin/',iosLogin),\n\tpath('main/',main,name='list'),\n\tpath('admin-panel/', admin.site.urls),\n\tpath('accounts/profile/', profile, name='profile'),\n path('register/', register, name='register'),\n path('logout/', auth_views.LogoutView.as_view(template_name='toy/logout.html'), name='logout'),\n\n path('childs/',childs_list,name='childs_list_url'),\n path('child/create/',ChildCreate.as_view(),name='child_create_url'),\n path('child//', ChildDetail.as_view(), name='child_detail_url'),\n path('child//update/', ChildUpdate.as_view(), name='child_update_url'),\n path('child//delete/', ChildDelete.as_view(), name='child_delete_url'),\n\n path('illnesses/',illnesses_list,name='illnesses_list_url'),\n path('illness/create/',IllnessCreate.as_view(),name='illness_create_url'),\n path('illness//', IllnessDetail.as_view(), name='illness_detail_url'),\n path('illness//update/', IllnessUpdate.as_view(), name='illness_update_url'),\n path('illness//delete/', IllnessDelete.as_view(), name='illness_delete_url'),\n\n path('persons/',persons_list,name='persons_list_url'),\n path('person/create/',PersonCreate.as_view(),name='person_create_url'),\n path('person//', PersonDetail.as_view(), name='person_detail_url'),\n path('person//update/', PersonUpdate.as_view(), name='person_update_url'),\n path('person//delete/', PersonDelete.as_view(), name='person_delete_url'),\n\n path('toys/',toys_list,name='toys_list_url'),\n path('toy/create/',ToyCreate.as_view(), name='toy_create_url'),\n path('toy//', ToyDetail.as_view(), name='toy_detail_url'),\n path('toy//update/', ToyUpdate.as_view(), name='toy_update_url'),\n path('toy//delete/', ToyDelete.as_view(), name='toy_delete_url'),\n\n path('medicalhistorys/',medicalhistory_list,name='medicalhistorys_list_url'),\n path('medicalhistory/create/',MedicalHistoryCreate.as_view(),name='medicalhistory_create_url'),\n path('medicalhistory//', MedicalHistoryDetail.as_view(), name='medical_history_detail_url'),\n path('medicalhistory//update/', MedicalHistoryUpdate.as_view(), name='medical_history_update_url'),\n path('medicalhistory//delete/', MedicalHistoryDelete.as_view(), name='medical_history_delete_url'),\n]","sub_path":"Toy/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"431401166","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 29 11:07:44 2020\nscript to define constants for file read-in and computation as below\n@author: dinos\n\"\"\"\n\nmy_apportionment = \"SEND\" #type of district boundaries to calculate - eg US congressional, state senate, house etc.\nmy_electionproxy = \"EL12G_GV_\" #pick the election to use as a statewide proxy for partisan voting for districted seats\nmy_electiondatafile = \"./shapefiles_multistate/NC-shapefiles-master/NC_VTD/NC_VTD_buf_SEND.shp\" #PATH to the election data\n\nstate = \"NC\"\nmy_electionproxy_alternate=\"EL12G_GV_\"\nmaxsplitlist=[80, 50 , 30, 25, 23, 21,20]\ncutedgemaxlist = [2,2, 2, 1.2, 1.2, 1.2,1.2]\npoptol=0.06\n","sub_path":"input_templates/NC_send_EL12G_GV.py","file_name":"NC_send_EL12G_GV.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"112913460","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Step 1 - Import the library\n\n# In[34]:\n\n\nimport numpy as np\nimport pandas as pd\n\n\n# In[36]:\n\n\ndetail ='C:\\\\Users\\\\Nivas\\\\Downloads\\\\detail.csv'\ndf=pd.read_csv(detail)\ndf.head(10)\n\n\n# Step 2 - Setting up the Data\n# This dataset is not bias so we are making it bias for better understanding of the functions\n\n# In[37]:\n\n\ndf.Status = pd.factorize(df.Status)[0]\n\n\n# In[38]:\n\n\nX=df.iloc[:,0:12]\ny = df['Status']\n\ny = np.where((y == 0), 0, 1)\nprint(\"Viewing the imbalanced target vector:\\n\", y)\n\n\n# Step 3 - Downsampling the dataset\n# First we are selecting the rows where target values are 0 and 1 in two different objects and then printing the number of observations in the two objects.\n\n# In[39]:\n\n\nw_class0 = np.where(y == 0)[0]\nw_class1 = np.where(y == 1)[0]\n\nn_class0 = len(w_class0) \nn_class1 = len(w_class1)\n \n\nprint(\"n_class0: \", n_class0)\nprint(\"n_class1: \", n_class1)\n\n\n# In the output we will see the number of samples having target values as 1 are much more greater than 0. So in downsampling we will randomly select the number of rows having target as 1 and make it equal to the number of rows having taregt values 0.\n# Then we have printed the joint dataset having target class as 0 and 1.\n# \n\n# In[40]:\n\n\nw_class1_downsampled = np.random.choice(w_class1, size=n_class0, replace=False)\n\nprint(); print(np.hstack((y[w_class0], y[w_class1_downsampled])))\n\n\n# In[49]:\n\n\ndetailTemp ='C:\\\\Users\\\\Nivas\\\\Downloads\\\\detailTemp.csv'\ndf1=pd.read_csv(detailTemp)\ndf1.head(10)\n\n\n# In[50]:\n\n\ndf1.Step_Name = pd.factorize(df1.Step_Name)[0]\n\n\n# In[52]:\n\n\nX = df1.iloc[:,0:6]\ny = df1['Step_Name']\n\ny = np.where((y == 0), 0, 1)\nprint(\"Viewing the imbalanced target vector:\\n\", y)\n\n\n# In[53]:\n\n\nw_class0 = np.where(y == 0)[0]\nw_class1 = np.where(y == 1)[0]\n\nn_class0 = len(w_class0) \nn_class1 = len(w_class1)\n \n\nprint(\"n_class0: \", n_class0)\nprint(\"n_class1: \", n_class1)\n\n\n# In[54]:\n\n\nw_class1_downsampled = np.random.choice(w_class1, size=n_class0, replace=False)\n\nprint(); print(np.hstack((y[w_class0], y[w_class1_downsampled])))\n\n\n# In[57]:\n\n\ndetailVol ='C:\\\\Users\\\\Nivas\\\\Downloads\\\\detailVol.csv'\ndf2=pd.read_csv(detailVol)\ndf2.head(10)\n\n\n# In[60]:\n\n\ndf2.Step_Name = pd.factorize(df1.Step_Name)[0]\n\n\n# In[61]:\n\n\nX = df2.iloc[:,0:6]\ny = df2['Step_Name']\n\ny = np.where((y == 0), 0, 1)\nprint(\"Viewing the imbalanced target vector:\\n\", y)\n\n\n# In[62]:\n\n\nw_class0 = np.where(y == 0)[0]\nw_class1 = np.where(y == 1)[0]\n\nn_class0 = len(w_class0) \nn_class1 = len(w_class1)\n \n\nprint(\"n_class0: \", n_class0)\nprint(\"n_class1: \", n_class1)\n\n\n# In[63]:\n\n\nw_class1_downsampled = np.random.choice(w_class1, size=n_class0, replace=False)\n\nprint(); print(np.hstack((y[w_class0], y[w_class1_downsampled])))\n\n\n# Apply low pass filter technique for noise removal on the data set for 'detailVol.csv' \n\n# In[64]:\n\n\n# import required library\nimport numpy as np\nimport scipy.signal as signal\nimport matplotlib.pyplot as plt\n\n\n# In[72]:\n\n\n# Specifications of the filter\nf1 = 25 # Frequency of 1st signal\nf2 = 50 # Frequency of 2nd signal\nN = 10 # Order of the filter\nfs=1000\n \n# Generate the time vector of 1 sec duration\nt = np.linspace(0, 1, 1000) # Generate 1000 samples in 1 sec\n \n# Generate the signal containing f1 and f2\nsig = np.sin(2*np.pi*f1*t) + np.sin(2*np.pi*f2*t)\n\n\n# In[73]:\n\n\n# Display the signal\nfig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\nax1.plot(t, sig)\nax1.set_title('25 Hz and 50 Hz sinusoids')\nax1.axis([0, 1, -2, 2])\n \n\n\n# In[ ]:\n\n\n# Design the Butterworth filter using signal.butter and output='sos'\n# START CODE HERE ### (≈ 1 line of code)\nsos = signal.butter(50, 35, 'lp', fs=1000, output='sos')\n\n# Filter the signal by the filter using signal.sosfilt\n# START CODE HERE ### (≈ 1 line of code)\n# Use signal.sosfiltfilt to get output inphase with input\n#filtered = signal.sosfiltfilt(sos, sig)\n \n \n# Display the output signal\nax2.plot(t, filtered)\nax2.set_title('After 35 Hz Low-pass filter')\nax2.axis([0, 1, -2, 2])\nax2.set_xlabel('Time [seconds]')\nplt.tight_layout()\nplt.show()\n\n","sub_path":"Assignment.py","file_name":"Assignment.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"426711618","text":"# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\nimport os\n\n\nversion = '0.1'\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\ndef read_file(*pathes):\n path = os.path.join(here, *pathes)\n if os.path.isfile(path):\n with open(path, 'r') as desc_file:\n return desc_file.read()\n else:\n return ''\n\ndesc_files = (('README.rst',), ('docs', 'CHANGES.rst'),\n ('docs', 'CONTRIBUTORS.rst'))\n\nlong_description = '\\n\\n'.join([read_file(*pathes) for pathes in desc_files])\nrequires = ['Sphinx>=0.6']\n\nsetup(\n name='sphinxcontrib-gen_node',\n version=version,\n download_url='http://pypi.python.org/pypi/sphinxcontrib-gen_node',\n license='BSD',\n author='Jean-Philippe Camguilhem',\n author_email='jean-philippe.camguilhem__at__makina-corpus.com',\n description='Sphinx generic nodes \"todo like\" extension',\n long_description=long_description,\n url='https://github.com/jpcw/sphinxcontrib-gen_node',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Documentation',\n 'Topic :: Utilities',\n ],\n platforms='any',\n packages=find_packages('src', exclude=['ez_setup']),\n package_dir = {'': 'src'},\n install_requires=requires,\n namespace_packages=['sphinxcontrib'],\n)\n","sub_path":"pypi_install_script/sphinxcontrib-gen_node-0.1-20131031/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"209050648","text":"import discord\r\nfrom discord.ext.commands import Bot\r\nfrom discord.ext import commands\r\nimport asyncio\r\nimport time\r\nimport random\r\n\r\n\r\n\r\n#BOT\r\n\r\nbot = commands.Bot(command_prefix='!')\r\n\r\nclient = discord.Client()\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print(\"name: <%s>\" %(bot.user.name) )\r\n print(\"id: <%s>\" %(bot.user.id) )\r\n print('------')\r\n\r\n\r\n@client.event\r\nasync def on_member_join(member):\r\n server = member.server\r\n fmt = 'Welcome {0.mention} to {1.name}!'\r\n await client.send_message(server, fmt.format(member, server))\r\n\r\n@client.event\r\nasync def on_voice_state_update(member, before, after):\r\n # if before.voice.voice_channel is None and after.voice.voice_channel is not None:\r\n print(\"name: <%s>\" %(member.name) )\r\n\r\n\r\n@bot.event\r\nasync def on_message(message):\r\n\r\n if \"skpd\" in message.content:\r\n lerasid = 222753236686209024\r\n await bot.send_message(message.channel, \"VASILIAS TWN SKOUPIDIWN <@%s> \" %(lerasid))\r\n\r\n elif \"prodotis\" in message.content:\r\n mafrikiId = 374865385909911553\r\n await bot.send_message(message.channel, \" o megaliteros apo olous <@%s> \" %(mafrikiId))\r\n\r\n elif \"kollitos\" in message.content:\r\n if \"mavriki\" in message.content:\r\n pyrroid = 211120234906648577\r\n await bot.send_message(message.channel, \"<@%s>\" %(pyrroid))\r\n\r\n elif \"tactician\" in message.content:\r\n manosid = 370277653812477973\r\n await bot.send_message(message.channel, \"<@%s>\" %(manosid))\r\n\r\n elif message.content.startswith('game?'):\r\n leras = 222753236686209024\r\n manoz = 370277653812477973\r\n alfert = 433795733230059520\r\n psycho = 408955131690876938\r\n tsiakkas = 228666893068795905\r\n bleiz = 230995663607824386\r\n\r\n await bot.send_message(message.channel, \"<@%s>\\n<@%s>\\n<@%s>\\n<@%s>\\n<@%s>\\n<@%s>\" %(leras,manoz,alfert,psycho,tsiakkas,bleiz) )\r\n\r\n await bot.process_commands(message)\r\n \r\n \r\n@bot.command()\r\nasync def choose(*choices : str):\r\n await bot.say(random.choice(choices))\r\n\r\n\r\n@bot.command()\r\nasync def joined(member : discord.Member):\r\n \"\"\"Says when a member joined.\"\"\"\r\n await bot.say(\"kalwstarxidiamastadyo\")\r\n\r\n@bot.command()\r\nasync def jj(member : discord.Member):\r\n \"\"\"Says when a member joined.\"\"\"\r\n \r\n\r\n\r\n\r\n\r\n##################################################################################\r\n\r\nclass Test:\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def qq(self, ctx):\r\n if ctx.message.author.id == 370147462893142017:\r\n await client.logout()\r\n\r\n\r\n\r\nbot.add_cog(Test(bot))\r\n\r\nbot.run('token')\r\n","sub_path":"kongo.py","file_name":"kongo.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28902291","text":"class Solution(object):\n def dfs(self, graph, visited, node, parent):\n if node in visited:\n return False\n\n visited.add(node)\n\n for neighbor in graph[node]:\n if neighbor == parent:\n continue\n if self.dfs(graph, visited, neighbor, node) is False:\n return False\n return True\n\n def validTree(self, n, edges):\n if not edges:\n return n == 1\n\n from collections import defaultdict\n graph = defaultdict(list)\n for s, d in edges:\n graph[s].append(d)\n graph[d].append(s)\n\n visited = set()\n if not self.dfs(graph, visited, 0, -1):\n return False\n return len(visited) == n\n\n\nclass Solution(object):\n def validTree(self, n, edges):\n parent = range(n)\n for s, d in edges:\n while s != parent[s]:\n s = parent[s]\n while d != parent[d]:\n d = parent[d]\n if s == d:\n return False\n parent[s] = d\n return len(edges) == n - 1\n\n\nclass Solution(object):\n def validTree(self, n, edges):\n if len(edges) != n - 1:\n return False\n parent = range(n)\n def find(x):\n return x if parent[x] == x else find(parent[x])\n def union(xy):\n x, y = map(find, xy)\n parent[x] = y\n return x != y\n return all(map(union, edges))\n","sub_path":"algorithms/GraphValidTree/GraphValidTree.py","file_name":"GraphValidTree.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"388752862","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2016 Supreeth Herle\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"VBSP Connection.\"\"\"\n\nimport time\nimport tornado.ioloop\nimport socket\nimport sys\n\nfrom protobuf_to_dict import protobuf_to_dict\n\nfrom empower.vbsp import EMAGE_VERSION\nfrom empower.vbsp import PRT_UE_JOIN\nfrom empower.vbsp import PRT_UE_LEAVE\nfrom empower.vbsp import PRT_VBSP_HELLO\nfrom empower.vbsp import PRT_VBSP_BYE\nfrom empower.vbsp import PRT_VBSP_REGISTER\nfrom empower.vbsp import PRT_VBSP_TRIGGER_EVENT\nfrom empower.vbsp import PRT_VBSP_AGENT_SCHEDULED_EVENT\nfrom empower.vbsp import PRT_VBSP_SINGLE_EVENT\nfrom empower.vbsp.messages import main_pb2\nfrom empower.vbsp.messages import configs_pb2\nfrom empower.core.utils import hex_to_ether\nfrom empower.core.utils import ether_to_hex\nfrom empower.core.ue import UE\n\nfrom empower.main import RUNTIME\n\nimport empower.logger\nLOG = empower.logger.get_logger()\n\n\ndef create_header(t_id, b_id, header):\n \"\"\"Create message header.\"\"\"\n\n if not header:\n LOG.error(\"header parameter is None\")\n\n header.vers = EMAGE_VERSION\n # Set the transaction identifier (module id).\n header.t_id = t_id\n # Set the Base station identifier.\n header.b_id = b_id\n # Start the sequence number for messages from zero.\n header.seq = 0\n\n\ndef serialize_message(message):\n \"\"\"Serialize message.\"\"\"\n\n if not message:\n LOG.error(\"message parameter is None\")\n return None\n\n return message.SerializeToString()\n\n\ndef deserialize_message(serialized_data):\n \"\"\"De-Serialize message.\"\"\"\n\n if not serialized_data:\n LOG.error(\"Received serialized data is None\")\n return None\n\n msg = main_pb2.emage_msg()\n msg.ParseFromString(serialized_data)\n\n return msg\n\n\nclass VBSPConnection(object):\n \"\"\"VBSP Connection.\n\n Represents a connection to a ENB (EUTRAN Base Station) using\n the VBSP Protocol. One VBSPConnection object is created for every\n ENB in the network. The object implements the logic for handling\n incoming messages. The currently supported messages are:\n\n Attributes:\n stream: The stream object used to talk with the ENB.\n address: The connection source address, i.e. the ENB IP address.\n server: Pointer to the server object.\n vbs: Pointer to a VBS object.\n \"\"\"\n\n def __init__(self, stream, addr, server):\n self.stream = stream\n self.stream.set_nodelay(True)\n self.addr = addr\n self.server = server\n self.vbs = None\n self.seq = 0\n self.stream.set_close_callback(self._on_disconnect)\n self.__buffer = b''\n self._hb_interval_ms = 500\n self._hb_worker = tornado.ioloop.PeriodicCallback(self._heartbeat_cb,\n self._hb_interval_ms)\n self.endian = sys.byteorder\n self._hb_worker.start()\n self._wait()\n\n def to_dict(self):\n \"\"\"Return dict representation of object.\"\"\"\n\n return self.addr\n\n def _heartbeat_cb(self):\n \"\"\"Check if connection is still active.\"\"\"\n\n if self.vbs and not self.stream.closed():\n timeout = (self.vbs.period / 1000) * 3\n if (self.vbs.last_seen_ts + timeout) < time.time():\n LOG.info('Client inactive %s at %r', self.vbs.addr, self.addr)\n self.stream.close()\n\n def stream_send(self, message):\n \"\"\"Send message.\"\"\"\n\n # Update the sequence number of the messages\n message.head.seq = self.seq + 1\n\n size = message.ByteSize()\n\n print(message.__str__())\n\n size_bytes = (socket.htonl(size)).to_bytes(4, byteorder=self.endian)\n send_buff = serialize_message(message)\n buff = size_bytes + send_buff\n\n if buff is None:\n LOG.error(\"errno %u occured\")\n\n self.stream.write(buff)\n\n def _on_read(self, line):\n \"\"\" Appends bytes read from socket to a buffer. Once the full packet\n has been read the parser is invoked and the buffers is cleared. The\n parsed packet is then passed to the suitable method or dropped if the\n packet type in unknown. \"\"\"\n\n self.__buffer = b''\n\n if line is not None:\n\n self.__buffer = self.__buffer + line\n\n if len(line) == 4:\n temp_size = int.from_bytes(line, byteorder=self.endian)\n size = socket.ntohl(int(temp_size))\n self.stream.read_bytes(size, self._on_read)\n return\n\n deserialized_msg = deserialize_message(line)\n\n # Update the sequency number from received message\n self.seq = deserialized_msg.head.seq\n\n print(deserialized_msg.__str__())\n\n self._trigger_message(deserialized_msg)\n self._wait()\n\n def _trigger_message(self, deserialized_msg):\n\n event_type = deserialized_msg.WhichOneof(\"event_types\")\n\n if event_type == PRT_VBSP_SINGLE_EVENT:\n msg_type = deserialized_msg.se.WhichOneof(\"events\")\n elif event_type == PRT_VBSP_AGENT_SCHEDULED_EVENT:\n msg_type = deserialized_msg.sche.WhichOneof(\"events\")\n elif event_type == PRT_VBSP_TRIGGER_EVENT:\n msg_type = deserialized_msg.te.WhichOneof(\"events\")\n else:\n LOG.error(\"Unknown message event type %s\", event_type)\n\n if not msg_type or msg_type not in self.server.pt_types:\n LOG.error(\"Unknown message type %s\", msg_type)\n return\n\n if msg_type != PRT_VBSP_HELLO and not self.vbs:\n return\n\n handler_name = \"_handle_%s\" % self.server.pt_types[msg_type]\n\n if hasattr(self, handler_name):\n handler = getattr(self, handler_name)\n handler(deserialized_msg)\n\n if msg_type in self.server.pt_types_handlers:\n for handler in self.server.pt_types_handlers[msg_type]:\n handler(deserialized_msg)\n\n def _handle_hello(self, main_msg):\n \"\"\"Handle an incoming HELLO message.\n\n Args:\n main_msg, a emage_msg containing HELLO message\n Returns:\n None\n \"\"\"\n\n enb_id = main_msg.head.b_id\n vbs_id = hex_to_ether(enb_id)\n\n try:\n vbs = RUNTIME.vbses[vbs_id]\n except KeyError:\n LOG.error(\"Hello from unknown VBS (%s)\", (vbs_id))\n return\n\n LOG.info(\"Hello from %s VBS %s seq %u\", self.addr[0], vbs.addr,\n main_msg.head.seq)\n\n # New connection\n if not vbs.connection:\n\n # set pointer to pnfdev object\n self.vbs = vbs\n\n # set connection\n vbs.connection = self\n\n # request registered UEs\n self.send_UEs_id_req()\n\n # generate register message\n self.send_register_message_to_self()\n\n # Update VBSP params\n vbs.period = main_msg.se.mHello.repl.period\n vbs.last_seen = main_msg.head.seq\n vbs.last_seen_ts = time.time()\n\n def _handle_UEs_id_repl(self, main_msg):\n \"\"\"Handle an incoming UEs ID reply.\n\n Args:\n message, a emage_msg containing UE IDs (RNTIs)\n Returns:\n None\n \"\"\"\n\n active_ues = {}\n inactive_ues = {}\n\n event_type = main_msg.WhichOneof(\"event_types\")\n msg = protobuf_to_dict(main_msg)\n ues_id_msg_repl = msg[event_type][\"mUEs_id\"][\"repl\"]\n\n if ues_id_msg_repl[\"status\"] != configs_pb2.CREQS_SUCCESS:\n return\n\n # List of active UEs\n if \"active_ue_id\" in ues_id_msg_repl:\n for ue in ues_id_msg_repl[\"active_ue_id\"]:\n active_ues[(self.vbs.addr, ue[\"rnti\"])] = {}\n if \"imsi\" in ue:\n active_ues[(self.vbs.addr, ue[\"rnti\"])][\"imsi\"] = ue[\"imsi\"]\n else:\n active_ues[(self.vbs.addr, ue[\"rnti\"])][\"imsi\"] = None\n if \"plmn_id\" in ue:\n active_ues[(self.vbs.addr, ue[\"rnti\"])][\"plmn_id\"] = \\\n ue[\"plmn_id\"]\n else:\n active_ues[(self.vbs.addr, ue[\"rnti\"])][\"plmn_id\"] = None\n\n # List of inactive UEs\n if \"inactive_ue_id\" in ues_id_msg_repl:\n for ue in ues_id_msg_repl[\"inactive_ue_id\"]:\n inactive_ues[(self.vbs.addr, ue[\"rnti\"])] = {}\n if \"imsi\" in ue:\n inactive_ues[(self.vbs.addr, ue[\"rnti\"])][\"imsi\"] = \\\n ue[\"imsi\"]\n else:\n inactive_ues[(self.vbs.addr, ue[\"rnti\"])][\"imsi\"] = None\n if \"plmn_id\" in ue:\n inactive_ues[(self.vbs.addr, ue[\"rnti\"])][\"plmn_id\"] = \\\n ue[\"plmn_id\"]\n else:\n inactive_ues[(self.vbs.addr, ue[\"rnti\"])][\"plmn_id\"] = None\n\n for vbs_id, rnti in active_ues.keys():\n\n ue_id = (self.vbs.addr, rnti)\n\n if ue_id not in RUNTIME.ues:\n new_ue = UE(ue_id, ue_id[1], self.vbs)\n RUNTIME.ues[ue_id] = new_ue\n\n ue = RUNTIME.ues[ue_id]\n\n imsi = active_ues[ue_id][\"imsi\"]\n plmn_id = int(active_ues[ue_id][\"plmn_id\"])\n\n # Setting IMSI of UE\n ue.imsi = imsi\n\n if not ue.plmn_id and plmn_id:\n\n # Setting tenant\n ue.tenant = RUNTIME.load_tenant_by_plmn_id(plmn_id)\n\n if ue.tenant:\n\n # Adding UE to tenant\n LOG.info(\"Adding %s to tenant %s\", ue.addr,\n ue.tenant.plmn_id)\n ue.tenant.ues[ue.addr] = ue\n\n # Raise UE join\n self.server.send_ue_join_message_to_self(ue)\n\n # Create a trigger for reporting RRC measurements config.\n from empower.ue_confs.ue_rrc_meas_confs import ue_rrc_meas_confs\n\n conf_req = {\n \"event_type\": \"trigger\"\n }\n\n ue_rrc_meas_confs(tenant_id=ue.tenant.tenant_id,\n vbs=ue.vbs.addr,\n ue=ue.rnti,\n conf_req=conf_req)\n\n if ue.plmn_id and not plmn_id:\n\n # Raise UE leave\n self.server.send_ue_leave_message_to_self(ue)\n\n # Removing UE from tenant\n LOG.info(\"Removing %s from tenant %s\", ue.addr,\n ue.tenant.plmn_id)\n del ue.tenant.ues[ue.addr]\n\n # Resetting tenant\n ue.tenant = None\n\n existing_ues = []\n existing_ues.extend(RUNTIME.ues.keys())\n\n for ue_addr in existing_ues:\n if ue_addr not in active_ues:\n RUNTIME.remove_ue(ue_addr)\n\n def _handle_rrc_meas_conf_repl(self, main_msg):\n \"\"\"Handle an incoming UE's RRC Measurements configuration reply.\n\n Args:\n message, a message containing RRC Measurements configuration in UE\n Returns:\n None\n \"\"\"\n\n event_type = main_msg.WhichOneof(\"event_types\")\n msg = protobuf_to_dict(main_msg)\n rrc_m_conf_repl = msg[event_type][\"mUE_rrc_meas_conf\"][\"repl\"]\n\n rnti = rrc_m_conf_repl[\"rnti\"]\n\n ue_id = (self.vbs.addr, rnti)\n\n if ue_id not in RUNTIME.ues:\n return\n\n ue = RUNTIME.ues[ue_id]\n\n if rrc_m_conf_repl[\"status\"] != configs_pb2.CREQS_SUCCESS:\n return\n\n del rrc_m_conf_repl[\"rnti\"]\n del rrc_m_conf_repl[\"status\"]\n\n if \"ue_rrc_state\" in rrc_m_conf_repl:\n ue.rrc_state = rrc_m_conf_repl[\"ue_rrc_state\"]\n del rrc_m_conf_repl[\"ue_rrc_state\"]\n\n if \"capabilities\" in rrc_m_conf_repl:\n ue.capabilities = rrc_m_conf_repl[\"capabilities\"]\n del rrc_m_conf_repl[\"capabilities\"]\n\n ue.rrc_meas_config = rrc_m_conf_repl\n\n def send_UEs_id_req(self):\n \"\"\" Send request for UEs ID registered in VBS \"\"\"\n\n ues_id_req = main_pb2.emage_msg()\n\n enb_id = ether_to_hex(self.vbs.addr)\n # Transaction identifier is zero by default.\n create_header(0, enb_id, ues_id_req.head)\n\n # Creating a trigger message to fetch UE RNTIs\n trigger_msg = ues_id_req.te\n trigger_msg.action = main_pb2.EA_ADD\n\n UEs_id_msg = trigger_msg.mUEs_id\n UEs_id_req_msg = UEs_id_msg.req\n\n UEs_id_req_msg.dummy = 1\n\n LOG.info(\"Sending UEs request to VBS %s (%u)\",\n self.vbs.addr, enb_id)\n\n self.stream_send(ues_id_req)\n\n def send_rrc_meas_conf_req(self, ue):\n \"\"\" Sends a request for RRC measurements configuration of UE \"\"\"\n\n rrc_m_conf_req = main_pb2.emage_msg()\n enb_id = ether_to_hex(self.vbs.addr)\n\n # Transaction identifier is zero by default.\n create_header(0, enb_id, rrc_m_conf_req.head)\n\n # Creating a trigger message to fetch UE RNTIs\n trigger_msg = rrc_m_conf_req.te\n trigger_msg.action = main_pb2.EA_ADD\n\n rrc_m_conf_msg = trigger_msg.mUE_rrc_meas_conf\n rrc_m_conf_req_msg = rrc_m_conf_msg.req\n\n rrc_m_conf_req_msg.rnti = ue.rnti\n\n LOG.info(\"Sending UEs RRC measurement config request to VBS %s (%u)\",\n self.vbs.addr, enb_id)\n\n self.stream_send(rrc_m_conf_req)\n\n def _wait(self):\n \"\"\" Wait for incoming packets on signalling channel \"\"\"\n self.stream.read_bytes(4, self._on_read)\n\n def _on_disconnect(self):\n \"\"\"Handle VBSP disconnection.\"\"\"\n\n if not self.vbs:\n return\n\n LOG.info(\"VBS disconnected: %s\", self.vbs.addr)\n\n # remove hosted ues\n for addr in list(RUNTIME.ues.keys()):\n ue = RUNTIME.ues[addr]\n if ue.vbs == self.vbs:\n RUNTIME.remove_ue(ue.addr)\n\n # reset state\n self.vbs.last_seen = 0\n self.vbs.connection = None\n self.vbs.ues = {}\n self.vbs.period = 0\n self.vbs = None\n\n def send_bye_message_to_self(self):\n \"\"\"Send a unsollicited BYE message to self.\"\"\"\n\n for handler in self.server.pt_types_handlers[PRT_VBSP_BYE]:\n handler(self.vbs)\n\n def send_register_message_to_self(self):\n \"\"\"Send a REGISTER message to self.\"\"\"\n\n for handler in self.server.pt_types_handlers[PRT_VBSP_REGISTER]:\n handler(self.vbs)\n","sub_path":"empower/vbsp/vbspconnection.py","file_name":"vbspconnection.py","file_ext":"py","file_size_in_byte":15120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"463813970","text":"import random\nimport numpy as np\nfrom keras import Sequential\nfrom keras.layers import Dense, Activation\n\n\nNUM_ITER = 10000\nPERCENT = 10\nREWARD = 2\n\n\nclass World:\n\n def __init__(self):\n self.state = (0, 0)\n self.available_actions = []\n self.win = \"win\"\n self.loss = \"loss\"\n self.cont = \"cont\"\n self.reward_from_last_played = 0\n\n def execute(self, action):\n self.state = action\n # self.state[action[0]][action[1]] = player_num\n # self.available_actions.remove(action)\n\n result = self.check_game()\n if result == self.win:\n self.reward_from_last_played = REWARD\n self.reset()\n elif result == self.loss:\n self.reward_from_last_played = -1\n else:\n self.reward_from_last_played = 0\n\n def get_state_and_reward(self):\n return self.state, self.reward_from_last_played\n\n def check_game(self):\n if self.state == (10, 10):\n return self.win\n\n # if self.state[1] == 9:\n # return self.loss\n\n return self.cont\n\n def reset(self):\n self.state = (0, 0)\n\n def get_actions(self):\n self.available_actions = []\n if 0 < self.state[0]:\n self.available_actions.append((self.state[0] - 1, self.state[1]))\n if 20 > self.state[0]:\n self.available_actions.append((self.state[0] + 1, self.state[1]))\n if 0 < self.state[1]:\n self.available_actions.append((self.state[0], self.state[1] - 1))\n if 20 > self.state[1]:\n self.available_actions.append((self.state[0], self.state[1] + 1))\n\n return self.available_actions\n\n def get_initial_state(self):\n return 0, 0\n\n\nclass ConsoleOutput:\n\n def __init__(self):\n self.world = World()\n\n def display_state(self):\n print(self.world.state)\n\n def make_action(self, action):\n self.world.execute(action)\n\n\nclass QLearning:\n\n def __init__(self):\n self.world = World()\n self.Q = {}\n self.epsilon = .2\n self.discount_factor = .6\n self.learning_rate = .8\n self.state = self.world.state\n\n def get_qvalue(self, state, action):\n if (state, action) in self.Q:\n return self.Q[(state, action)]\n else:\n self.Q[(state, action)] = 0\n return 0\n\n def set_qvalue(self, state, action, value):\n self.Q[(state, action)] = value\n\n def select_action(self, state):\n if random.random() > self.epsilon:\n best_q = 0\n best_actions = []\n for action in self.world.get_actions():\n qval = self.get_qvalue(state, action)\n if qval > best_q:\n best_q = self.get_qvalue(state, action)\n best_actions = [action]\n elif qval == best_q:\n best_actions.append(action)\n\n return random.sample(best_actions, 1)[0]\n\n else:\n return random.sample(self.world.get_actions(), 1)[0]\n\n def get_max_q(self, state):\n best_q = 0\n for action in self.world.get_actions():\n if self.get_qvalue(state, action) >= best_q:\n best_q = self.get_qvalue(state, action)\n return best_q\n\n def run_learning(self):\n self.state = self.world.get_initial_state()\n\n for iteration in range(1, NUM_ITER):\n if iteration % (NUM_ITER / PERCENT) == 0:\n print(str(float(iteration / (NUM_ITER / PERCENT)) * PERCENT) + \"% done with training\")\n\n # player one\n action = self.select_action(self.state)\n self.world.execute(action)\n next_state, reward = self.world.get_state_and_reward()\n\n # reward player 1\n value = self.get_qvalue(self.state, action) - \\\n self.learning_rate * (self.get_qvalue(self.state, action) - (\n reward + self.discount_factor * self.get_max_q(next_state)))\n\n self.set_qvalue(self.state, action, value)\n self.state = next_state\n\n num_explored = 0\n for k, v in self.Q.items():\n if v != 0:\n num_explored += 1\n # print \"in state \" + str(k[0]) + \" the move \" + str(k[1]) + \" had value \" + str(v)\n print(\"number of explored states is \" + str(num_explored))\n\n print(\"#TRAINING COMPLETE#\")\n\n def choose_best_move(self, world):\n best_q = -np.inf\n best_actions = []\n for action in world.get_actions():\n qval = self.get_qvalue(world.state, action)\n if qval > best_q:\n best_q = self.get_qvalue(world.state, action)\n best_actions = [action]\n elif qval == best_q:\n best_actions.append(action)\n\n return random.sample(best_actions, 1)[0]\n\n\nclass NN_QLearning:\n\n def __init__(self):\n self.experience = []\n self.world = World()\n\n self.epsilon = .3\n self.discount_factor = 1\n self.len_exp = 1000\n\n self.input_shape = 84\n self.output_shape = 1\n self.model = self.create_model()\n\n def create_model(self):\n model = Sequential()\n model.add(Dense(100, input_shape=(self.input_shape,)))\n model.add(Activation('relu')),\n model.add(Dense(100, input_shape=(self.input_shape,)))\n model.add(Activation('relu')),\n model.add(Dense(self.output_shape)),\n model.add(Activation('linear'))\n model.compile(optimizer='rmsprop',\n loss='mse')\n\n return model\n\n def state_to_vec(self, s):\n a = np.zeros((21,))\n a[s] = 1\n return a\n\n def gen_board_action_array(self, state, action):\n new_state = [self.state_to_vec(state[0]), self.state_to_vec(state[1]),\n self.state_to_vec(action[0]), self.state_to_vec(action[1])]\n vector = np.concatenate(new_state)\n vector = np.array([vector])\n return vector\n\n def generate_experience(self, size):\n # generate (s, a, r, s') tuples using current model (NN_Q)\n\n experiences = []\n for _ in range(0, size):\n tidbit = [None, None, None, None]\n tidbit[0] = self.world.state\n action = self.select_action(self.world.state)\n tidbit[1] = action\n self.world.execute(action)\n tidbit[3], tidbit[2] = self.world.get_state_and_reward()\n\n experiences.append(tidbit)\n\n return experiences\n\n def generate_training_pairs(self, experiences):\n data = np.zeros((len(experiences), self.input_shape))\n labels = np.zeros((len(experiences), self.output_shape))\n\n for i in range(len(experiences)):\n exp = experiences[i]\n data[i] = np.array(self.gen_board_action_array(exp[0], exp[1]))\n\n value = exp[2] + self.discount_factor * self.get_max_q(exp[3]) # reward + max Q of s'\n labels[i] = np.array([value])\n\n return data, labels\n\n def train_model(self, iterations):\n for i in range(iterations):\n print(str(i/iterations * 100) + \"% through training\")\n\n experiences = self.generate_experience(self.len_exp)\n data, labels = self.generate_training_pairs(experiences)\n\n self.model.fit(data, labels, batch_size=100, epochs=1)\n\n def get_qvalue(self, state, action):\n v=self.gen_board_action_array(state, action)\n return self.model.predict(v)\n\n def select_action(self, state):\n if random.random() > self.epsilon:\n best_q = -np.inf\n best_actions = []\n for action in self.world.get_actions():\n qval = self.get_qvalue(state, action)\n if qval > best_q:\n best_q = self.get_qvalue(state, action)\n best_actions = [action]\n elif qval == best_q:\n best_actions.append(action)\n\n return random.sample(best_actions, 1)[0]\n\n else:\n return random.sample(self.world.get_actions(), 1)[0]\n\n def get_max_q(self, state):\n best_q = 0\n for action in self.world.get_actions():\n if self.get_qvalue(state, action) >= best_q:\n best_q = self.get_qvalue(state, action)\n return best_q\n\n def choose_best_move(self, world):\n best_q = -np.inf\n best_actions = []\n for action in world.get_actions():\n qval = self.get_qvalue(world.state, action)\n if qval > best_q:\n best_q = self.get_qvalue(world.state, action)\n best_actions = [action]\n elif qval == best_q:\n best_actions.append(action)\n\n return random.sample(best_actions, 1)[0]\n\n\nif __name__ == \"__main__\":\n\n nn = NN_QLearning()\n nn.train_model(iterations=40)\n\n console = ConsoleOutput()\n print(\"#STARTING GAME#\")\n\n iter = 0\n\n while iter < 500:\n iter += 1\n console.make_action(nn.choose_best_move(console.world))\n if console.world.get_state_and_reward()[1] == REWARD:\n break\n console.display_state()\n\n print(iter)","sub_path":"locations.py","file_name":"locations.py","file_ext":"py","file_size_in_byte":9204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"521502585","text":"\"\"\"Quick exercise to build intuition for students on the topic of for loops.\"\"\"\n\n# Welcome to loop practice!\nprint(\"Welcome to loop practice!\")\nprint(\"________________________\")\nprint(\"For each question, type the answer as a number.\")\n\nscore = 0\n\nans_1 = int(input(\"What is x? \"))\nif ans_1 == 20:\n score += 1\n\nans_2 = int(input(\"What is x? \"))\nif ans_2 == 24:\n score += 1\n\nans_3 = int(input(\"What is x? \"))\nif ans_3 == 9:\n score += 1\n\nans_4 = int(input(\"What is x+y+z? \"))\nif ans_4 == 70:\n score += 1\n\nprint(\"Using a for loop, let's write a program that finds a sum of squares!\")\n","sub_path":"python/intro_to_python/for_loop_exercises2.py","file_name":"for_loop_exercises2.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"334380437","text":"#!/usr/bin/python\nimport argparse\nimport socket\nimport sys\n \n# We can't use regular python-memcache since it doesn't enable explicit flag setting. Implememnt my own.\nclass MemcacheRaw:\n def __init__(self, addr='localhost', port=11211, timeout=None):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.settimeout(timeout)\n self.sock.connect((addr, port))\n \n def set(self, key, val, flags=0, expiry=0):\n self.sock.sendall('set %s %d %d %d\\r\\n%s\\r\\n'%(key, flags, expiry, len(val), val))\n if not self.sockExpect('STORED\\r\\n'):\n raise Exception('Failed to store key %s'%key)\n \n def sockExpect(self, expect):\n buf = ''\n while True:\n d = self.sock.recv(len(expect) - len(buf))\n if not d:\n raise Exception('Socket closed')\n buf += d\n if len(buf) == len(expect):\n if buf == expect:\n return True\n else:\n return False\n\nif __name__ == '__main__':\n argParser = argparse.ArgumentParser(description='memcache populator.')\n argParser.add_argument('--addr', default='localhost', help='Server address. Defaults to localhost.')\n argParser.add_argument('--port', default=11211, type=int, help='Server port. Defaults to 11211.')\n argParser.add_argument('csv_file', help='CSV File to read data from. See: https://github.com/RedisLabs/memcache_populator for details.')\n \n args = argParser.parse_args()\n \n # Open input file\n try:\n f = open(args.csv_file)\n except Exception as e:\n print >> sys.stderr, 'Error %s opening file: %s'%e\n exit(1)\n\n # Connect to server\n try:\n srv = MemcacheRaw(addr=args.addr, port=args.port)\n except Exception as e:\n print >> sys.stderr, 'Error connecting to server: %s'%e\n exit(1)\n\n # Parse CSV file and fill server\n try:\n while True:\n line = f.readline()\n if not line:\n break\n parts = line.strip().split(',')\n srv.set(parts[0].decode('string_escape'), parts[1].decode('string_escape'), int(parts[2], 16), int(parts[3]))\n except Exception as e:\n print >> sys.stderr, 'Failed populating server from csv file: %s'%e\n exit(1)\n","sub_path":"mcpopulator.py","file_name":"mcpopulator.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"194101959","text":"# -*- coding:utf-8 -*-\n\"\"\" Embers Meshblu Rest API class\n\"\"\"\n\nimport sys\nimport requests\n# pylint: disable=import-error,no-name-in-module\n# pylint: disable=wrong-import-order\ntry: # pragma: no cover\n from urllib.parse import urljoin\n from urllib.error import HTTPError\nexcept ImportError: # pragma: no cover\n # pylint: disable=import-error,no-name-in-module\n from urlparse import urljoin\n from urllib2 import HTTPError\n\n\nBROKER_URL = 'http://msg.embers.citibrain.com'\n\n# pylint: disable=maybe-no-member,no-member\nclass MeshbluApi(object):\n \"\"\" Meshblu REST API \"\"\"\n\n status_codes = [requests.codes.ok, requests.codes.created]\n\n @classmethod\n def get_headers(cls, auth_uuid, auth_token):\n \"\"\" get headers that will be sent with HTTP request\n \"\"\"\n return {'meshblu_auth_uuid':auth_uuid, 'meshblu_auth_token':auth_token}\n\n def get_status(self):\n \"\"\" Returns the broker status.\n \"\"\"\n return self.method('status')\n\n def add_device(self, payload, auth_uuid=None, auth_token=None):\n \"\"\" Register a device. Meshblu returns an UUID device id\n and security token. You can pass any key/value pairs and\n even override Meshblu's auto-generated UUID and token\n by passing your own uuid and token in the payload.\n\n :param payload: key/value pair dictionnary\n :param auth_uuid: uuid authentication credential\n :param auth_token: secret token authentication credential\n \"\"\"\n if auth_uuid is not None and auth_token is not None:\n payload = payload.update({'meshblu_auth_uuid': auth_uuid,\n 'meshblu_auth_token': auth_token})\n return self.method('devices',\n 'post',\n params=payload)\n\n def delete_device(self, uuid, auth_uuid, auth_token):\n \"\"\" Delete a device currently registered that you have\n access to update.\n\n :param uuid : device uuid\n :param auth_uuid: uuid authentication credential\n :param auth_token: secret token authentication credential\n \"\"\"\n headers = self.get_headers(auth_uuid, auth_token)\n return self.method('devices/' + uuid,\n 'delete',\n headers=headers)\n\n def get_device(self, uuid, auth_uuid, auth_token):\n \"\"\" Returns all information (except the token) of a\n specific device or node\n \n :param uuid : device uuid\n :param auth_uuid: uuid authentication credential\n :param auth_token: secret token authentication credential\n \"\"\"\n headers = self.get_headers(auth_uuid, auth_token)\n return self.method('devices/' + uuid,\n headers=headers)\n \n def get_devices(self, payload, auth_uuid, auth_token):\n \"\"\" Returns an array of devices based on key/value\n query criteria (except the token)\n\n :param payload: key/value pair dictionnary\n :param auth_uuid: uuid authentication credential\n :param auth_token: secret token authentication credential\n \"\"\"\n headers = self.get_headers(auth_uuid, auth_token)\n return self.method('devices',\n params=payload,\n headers=headers)\n\n def update_device(self, uuid, payload, auth_uuid, auth_token):\n \"\"\" Update a device currently registered.\n You can pass any key/value pairs to update object as well as\n null to remove a propery (i.e. uid=null).\n\n :param uuid : device uuid\n :param payload: key/value pair dictionnary\n :param auth_uuid: uuid authentication credential\n :param auth_token: secret token authentication credential\n \"\"\"\n headers = self.get_headers(auth_uuid, auth_token)\n return self.method('devices/' + uuid,\n 'put',\n params=payload,\n headers=headers)\n\n def claim_device(self, uuid, auth_uuid, auth_token):\n \"\"\" claim ownership of another device\n \n :param uuid : device uuid\n :param auth_uuid: uuid authentication credential\n :param auth_token: secret token authentication credential\n \"\"\"\n headers = self.get_headers(auth_uuid, auth_token)\n return self.method('claimdevice/' + uuid,\n 'put',\n headers=headers)\n \n \n def subscribe_device(self, auth_uuid, auth_token):\n \"\"\" Subscribe to device\n \n :param uuid : device uuid\n :param auth_uuid: uuid authentication credential\n :param auth_token: secret token authentication credential\n \"\"\"\n headers = self.get_headers(auth_uuid, auth_token)\n return self.method('subscribe/' + uuid,\n headers=headers)\n \n \n def send_message(self, payload, auth_uuid, auth_token):\n \"\"\"Send a message to devices\n\n :param devices: gateway devices (\"uuid\", \"*\", [\"uuid1\", \"uuid2\"])\n :param payload: key/value pair dictionnary\n :param auth_uuid: uuid authentication credential\n :param auth_token: secret token authentication credential\n \"\"\"\n headers = self.get_headers(auth_uuid, auth_token)\n return self.method('messages', 'post',\n json=payload,\n headers=headers)\n\n\n def send_data(self, uuid, payload, auth_uuid, auth_token):\n \"\"\"Store sensor data for a device\n\n :param uuid : device uuid\n :param payload: key/value pair dictionnary\n :param auth_uuid: uuid authentication credential\n :param auth_token: secret token authentication credential\n \"\"\"\n headers = self.get_headers(auth_uuid, auth_token)\n self.method('data/' + uuid, 'post', params=payload, headers=headers)\n\n\n def method(self, url, method='get', # pylint:disable=too-many-arguments\n json=None, params=None, headers=None, raw=False):\n \"\"\"\n Call http `method`\n\n :param url: url of API.\n :param method: request method\n :param json: send as 'post' json encoded data\n :param params : dictionary sent in the query string\n :param headers : dictionary of HTTP headers\n :param raw: Should data be loaded as json or not\n \"\"\"\n assert method in ('get', 'post', 'delete', 'put')\n _url = urljoin(BROKER_URL, url)\n try:\n req = requests.request(\n method,\n _url,\n json=json,\n params=params,\n headers=headers)\n\n if req.status_code in self.status_codes:\n return req.content if raw else req.json()\n except: # show issue with old requests versions\n raise RuntimeError(sys.exc_info())\n else:\n # Indent req.text to pretty print it later\n indented_lines = ['\\t' + l for l in req.text.splitlines(True)]\n msg = '\\n' + ''.join(indented_lines)\n raise HTTPError(_url, req.status_code, msg, req.headers, None)\n\n ","sub_path":"rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"277007856","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport sqlite3\nimport re\nimport json\n\nDB_PATH = sys.argv[1]\nBASE_PATH = sys.argv[2]\n\ndef get_file_title(fpath):\n with open(fpath) as f:\n s = re.findall(r\"#\\+title:\\s(.*)\", f.read(), flags=re.IGNORECASE)\n\n if len(s) > 0:\n return s[0]\n return \"\"\n\ndef is_note_file(fpath):\n return (\n os.path.dirname(fpath) == BASE_PATH and\n os.path.splitext(fpath)[1] == \".org\"\n )\n\ndef make_slug(fpath):\n return os.path.splitext(os.path.basename(fpath))[0]\n\ndef make_url(fpath):\n return f\"/{make_slug(fpath)}\"\n\ndef make_node(fpath, nodes):\n slug = make_slug(fpath)\n url = make_url(fpath)\n title = get_file_title(fpath)\n nodes.add(tuple({\"id\": slug, \"title\":title, \"url\":url}.items()))\n\n return slug\n\nclass my_default_dict(dict):\n def __missing__(self, key):\n res = self[key] = {\"name\": key, \"children\": list()}\n return res\n\nconn = sqlite3.connect(DB_PATH)\nc = conn.cursor()\n\nedges = list()\nnodes = set()\nedge_dict = my_default_dict()\n\nfor (source, dest) in c.execute(\"SELECT source, dest FROM links WHERE type == '\\\"file\\\"'\"):\n source = os.path.expanduser(source.replace('\"', \"\"))\n dest = os.path.expanduser(dest.replace('\"', \"\"))\n\n if not (is_note_file(source) and is_note_file(dest)):\n continue\n\n src_slug = make_node(source, nodes)\n dest_slug = make_node(dest, nodes)\n\n edges.append({\"source\": src_slug, \"target\": dest_slug})\n edge_dict[src_slug][\"children\"].append(dest_slug)\n\ngraph = {\"nodes\": [dict(_) for _ in nodes], \"edges\": edges, \"edge_dict\": edge_dict}\n\nprint(json.dumps(graph))\n","sub_path":"scripts/make-graph.py","file_name":"make-graph.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"531483317","text":"from sanic import Sanic\nfrom sanic.log import logger\nimport os\n\nimport multiprocessing\n\nimport gunicorn.app.base\n\nfrom function.handler import handle\n\napp = Sanic(__name__)\n\nasync def event_handler(request):\n resp = await event_handler_path(request, '')\n return resp\n\n\nasync def event_handler_path(request, path):\n response_data = await handle(request)\n return response_data\n\n\napp.add_route(event_handler, '/', methods=['GET', 'PUT', 'POST', 'PATCH', 'DELETE'])\napp.add_route(event_handler_path, '/', methods=['GET', 'PUT', 'POST', 'PATCH', 'DELETE'])\n\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\n\nclass StandaloneApplication(gunicorn.app.base.BaseApplication):\n\n def __init__(self, app, options=None):\n self.options = options or {}\n self.application = app\n super(StandaloneApplication, self).__init__()\n\n def load_config(self):\n config = dict([(key, value) for key, value in self.options.items()\n if key in self.cfg.settings and value is not None])\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return self.application\n\n\nif __name__ == '__main__':\n options = {\n 'bind': '%s:%s' % ('0.0.0.0', '5000'),\n 'workers': number_of_workers(),\n 'worker_class': 'sanic.worker.GunicornWorker',\n }\n StandaloneApplication(app, options).run()","sub_path":"python/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"60200194","text":"import pytest\n\nfrom .._tensor_dropout import cp_dropout, remove_cp_dropout\nfrom .._tensor_dropout import tt_dropout, remove_tt_dropout\nfrom .._tensor_dropout import tucker_dropout, remove_tucker_dropout\n\nfrom .._trl import TuckerTRL, CPTRL, TensorTrainTRL\n\nimport tensorly as tl\ntl.set_backend('pytorch')\n\ndef test_tucker_dropout():\n \"\"\"Test for Tucker Dropout\"\"\"\n in_shape = (10, 10)\n out_shape = (10, )\n rank = (7, 8, 9)\n trl = TuckerTRL(in_shape, out_shape, rank=rank)\n trl = tucker_dropout(trl, 1)\n core, _ = trl._process_decomposition()\n assert (tl.shape(core) == (1, 1, 1))\n\n remove_tucker_dropout(trl)\n assert (not trl._decomposition_forward_pre_hooks)\n\n trl = tucker_dropout(trl, 0)\n core, _ = trl._process_decomposition()\n assert (tl.shape(core) == rank)\n\ndef test_cp_dropout():\n \"\"\"Test for CP Dropout\"\"\"\n in_shape = (10, 10)\n out_shape = (10, )\n rank = 8\n trl = CPTRL(in_shape, out_shape, rank=rank)\n trl = cp_dropout(trl, 1)\n weights, _ = trl._process_decomposition()\n assert (len(weights) == (1))\n\n remove_cp_dropout(trl)\n assert (not trl._decomposition_forward_pre_hooks)\n\n trl = cp_dropout(trl, 0)\n weights, _ = trl._process_decomposition()\n assert (len(weights) == rank)\n\n\ndef test_tt_dropout():\n \"\"\"Test for TT Dropout\"\"\"\n in_shape = (10, 10)\n out_shape = (10, )\n # Use the same rank for all factors\n rank = 4\n trl = TensorTrainTRL(in_shape, out_shape, rank=rank)\n trl = tt_dropout(trl, 1)\n factors = trl._process_decomposition()\n for f in factors:\n assert (f.shape[0] == f.shape[-1] == 1)\n\n remove_tt_dropout(trl)\n assert (not trl._decomposition_forward_pre_hooks)\n\n trl = tt_dropout(trl, 0)\n factors = trl._process_decomposition()\n for i, f in enumerate(factors):\n if i:\n assert (f.shape[0] == rank)\n else: # boundary conditions: first and last rank are equal to 1\n assert (f.shape[-1] == rank)","sub_path":"tltorch/tests/test_tensor_dropout.py","file_name":"test_tensor_dropout.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"162384080","text":"import os\nimport sys\npath = os.path.dirname(os.path.abspath(__file__))\nup_path, _ = os.path.split(path)\nsys.path.append(up_path)\nimport numpy as np\n\nfrom Plate_Recognition.common.characters import *\nfrom Plate_Recognition.common.read_dirfile import read_dirfile_name, license_plate\nfrom Plate_Recognition.common.generator import train_generator, valid_generator\nfrom sklearn.cross_validation import train_test_split\n\nfrom keras import backend as K\nfrom keras.layers.merge import add, concatenate\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers import Input, Dense, Activation, Reshape, Lambda, Dropout, merge\nfrom keras.layers.recurrent import GRU\nfrom keras.models import Model\nfrom keras.optimizers import SGD\nfrom keras.callbacks import *\n\nDATA_DIR = os.path.join(os.getcwd() + '/data/train/')\nALL_FILES = read_dirfile_name(DATA_DIR)\ntrain_data, valid_data = train_test_split(ALL_FILES, test_size=0.1)\nOUTPUT_DIR = 'image_ocr'\n\n# image size\nwidth, height = 320, 240\n\nepoch = 200\nbatch_size = 64\n\n# Network parameters\nconv_filters = 16\nkernel_size = (3, 3)\npool_size = 2\ntime_dense_size = 32\nrnn_size = 512\ninput_shape = (width, height, 3)\n\n\ndef ctc_lambda_func(args):\n y_pred, labels, input_length, label_length = args\n y_pred = y_pred[:, 2:, :]\n return K.ctc_batch_cost(labels, y_pred, input_length, label_length)\n\n\ninput_data = Input(name='the_input', shape=input_shape, dtype='float32')\nx = input_data\nfor i in range(3):\n x = Conv2D(32, (3, 3), activation='relu')(x)\n x = Conv2D(32, (3, 3), activation='relu')(x)\n x = MaxPooling2D(pool_size=(2, 2))(x)\n\nconv_shape = x.get_shape()\nx = Reshape(target_shape=(int(conv_shape[1]), int(conv_shape[2]*conv_shape[3])))(x)\n\nx = Dense(32, activation='relu')(x)\ngru_1 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru1')(x)\ngru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True,\n kernel_initializer='he_normal', name='gru1_b')(x)\ngru1_merged = add([gru_1, gru_1b])\ngru_2 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)\ngru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True,\n kernel_initializer='he_normal', name='gru2_b')(gru1_merged)\nx = concatenate([gru_2, gru_2b])\nx = Dropout(0.25)(x)\nx = Dense(len(characters)+1, kernel_initializer='he_normal', activation='softmax')(x)\nbase_model = Model(inputs=input_data, outputs=x)\n\nlabels = Input(name='the_labels', shape=[7], dtype='float32')\ninput_length = Input(name='input_length', shape=[1], dtype='int64')\nlabel_length = Input(name='label_length', shape=[1], dtype='int64')\nloss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([x, labels, input_length, label_length])\n\n\nsgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)\nmodel = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)\nmodel.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd) #optimizer='adadelta'\n\n\ndef evaluate(model, batch_num=5):\n batch_acc = 0\n generator = valid_generator(valid_data)\n for i in range(batch_num):\n inputs, output = next(generator)\n y_pred = base_model.predict(inputs['the_input'])\n shape = y_pred[:, 2:, :].shape\n ctc_decode = K.ctc_decode(y_pred[:, 2:, :], input_length=np.ones(shape[0]) * shape[1])[0][0]\n out = K.get_value(ctc_decode)[:, :4]\n print('output', out)\n if out.shape[1] == 7:\n batch_acc += ((inputs['the_labels'] == out).sum(axis=1) == 7).mean()\n return batch_acc / batch_num\n\n\nclass Evaluate(Callback):\n def __init__(self):\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n acc = evaluate(base_model) * 100\n self.accs.append(acc)\n print()\n print('acc: %f%%' % acc)\n\n\nevaluator = Evaluate()\n\n\nfile_name = str(epoch) + '_' + str(batch_size)\ncallbacks = [\n EarlyStopping(monitor='val_loss',\n patience=5,\n verbose=1,\n min_delta=0.01,\n mode='min'),\n TensorBoard(log_dir='logs/' + file_name),\n evaluator\n]\n\nmodel.fit_generator(generator=train_generator(train_data, batch_size),\n steps_per_epoch=int(np.ceil(len(train_data)/64)),\n epochs=epoch,\n verbose=1,\n callbacks=callbacks,\n validation_data=valid_generator(valid_data, batch_size),\n validation_steps=int(np.ceil(len(valid_data)/64)),\n )\nmodel.save('h5/' + file_name + '.h5')\n","sub_path":"Plate_Recognition_v2.py","file_name":"Plate_Recognition_v2.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"295734520","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Tablib - ODF Support.\n\"\"\"\n\nimport sys\n\n\nif sys.version_info[0] > 2:\n from io import BytesIO\nelse:\n from cStringIO import StringIO as BytesIO\n\nfrom tablib.compat import opendocument, style, table, text, unicode\n\ntitle = 'ods'\nextentions = ('ods',)\n\nbold = style.Style(name=\"bold\", family=\"paragraph\")\nbold.addElement(style.TextProperties(fontweight=\"bold\", fontweightasian=\"bold\", fontweightcomplex=\"bold\"))\n\ndef detect(stream):\n \"\"\"Returns True if given stream is a readable excel file.\"\"\"\n try:\n doc = opendocument.load(stream)\n return True\n except:\n return False\n\ndef export_set(dataset):\n \"\"\"Returns ODF representation of Dataset.\"\"\"\n\n wb = opendocument.OpenDocumentSpreadsheet()\n wb.automaticstyles.addElement(bold)\n\n ws = table.Table(name=dataset.title if dataset.title else 'Tablib Dataset')\n wb.spreadsheet.addElement(ws)\n dset_sheet(dataset, ws)\n\n stream = BytesIO()\n wb.save(stream)\n return stream.getvalue()\n\n\ndef export_book(databook):\n \"\"\"Returns ODF representation of DataBook.\"\"\"\n\n wb = opendocument.OpenDocumentSpreadsheet()\n wb.automaticstyles.addElement(bold)\n\n for i, dset in enumerate(databook._datasets):\n ws = table.Table(name=dset.title if dset.title else 'Sheet%s' % (i))\n wb.spreadsheet.addElement(ws)\n dset_sheet(dset, ws)\n\n\n stream = BytesIO()\n wb.save(stream)\n return stream.getvalue()\n\n\ndef import_set(dset, in_stream, headers=True):\n \"\"\"Returns dataset from ODS stream. Default sheet 1\"\"\"\n dset.wipe()\n\n doc = opendocument.load(in_stream)\n sheet = doc.spreadsheet.childNodes[0]\n rows = sheet.getElementsByType(table.TableRow)\n row_count = 0\n for row in rows:\n cells = row.getElementsByType(table.TableCell)\n arrCells = []\n cell_count = 0\n for cell in cells:\n # repeated value?\n repeat = cell.getAttribute(\"numbercolumnsrepeated\")\n if(not repeat):\n repeat = 1\n\n ps = cell.getElementsByType(text.P)\n textContent = \"\"\n\n # for each text node\n for p in ps:\n c = p.firstChild # TODO: Where is it used?\n textContent = textContent + unicode(p)\n\n if textContent and textContent[0] != \"#\": # ignore comments cells\n for rr in range(int(repeat)): # repeated?\n arrCells.append(textContent)\n cell_count += 1\n else:\n arrCells.append(\"\")\n\n if row_count == 0 and headers:\n dset.headers = arrCells\n elif cell_count > 1:\n # empty cells are needed, but last string == ['']\n dset.append(arrCells)\n else:\n pass\n row_count += 1\n\n\ndef dset_sheet(dataset, ws):\n \"\"\"Completes given worksheet from given Dataset.\"\"\"\n _package = dataset._package(dicts=False)\n\n for i, sep in enumerate(dataset._separators):\n _offset = i\n _package.insert((sep[0] + _offset), (sep[1],))\n\n for i, row in enumerate(_package):\n row_number = i + 1\n odf_row = table.TableRow(stylename=bold, defaultcellstylename='bold')\n for j, col in enumerate(row):\n try:\n col = unicode(col, errors='ignore')\n except TypeError:\n ## col is already unicode\n pass\n ws.addElement(table.TableColumn())\n\n # bold headers\n if (row_number == 1) and dataset.headers:\n odf_row.setAttribute('stylename', bold)\n ws.addElement(odf_row)\n cell = table.TableCell()\n p = text.P()\n p.addElement(text.Span(text=col, stylename=bold))\n cell.addElement(p)\n odf_row.addElement(cell)\n\n # wrap the rest\n else:\n try:\n if '\\n' in col:\n ws.addElement(odf_row)\n cell = table.TableCell()\n cell.addElement(text.P(text=col))\n odf_row.addElement(cell)\n else:\n ws.addElement(odf_row)\n cell = table.TableCell()\n cell.addElement(text.P(text=col))\n odf_row.addElement(cell)\n except TypeError:\n ws.addElement(odf_row)\n cell = table.TableCell()\n cell.addElement(text.P(text=col))\n odf_row.addElement(cell)\n","sub_path":"tablib/formats/_ods.py","file_name":"_ods.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509292887","text":"#!/usr/bin/env python\n#\n# Copyright 2017 Okinawa Open Laboratory\n#\n# All rights reserved. This program and the accompanying materials\n# are made available under the terms of the Apache License, Version 2.0\n# which accompanies this distribution, and is available at\n# http://www.apache.org/licenses/LICENSE-2.0\nimport functest.core.feature as base\nimport json\nimport os\n\nRESULT_DETAILS_FILE = \"test_result.json\"\n\n\nclass VrouterVnf(base.Feature):\n def __init__(self):\n super(VrouterVnf, self).__init__(project='functest',\n case='vyos_vrouter',\n repo='dir_repo_vrouter')\n self.cmd = 'cd %s && ./run.sh' % self.repo\n\n def set_result_details(self):\n filepath = os.path.join(self.repo, RESULT_DETAILS_FILE)\n if os.path.exists(filepath):\n f = open(filepath, 'r')\n self.details = json.load(f)\n f.close()\n\n def log_results(self):\n if self.criteria == 'PASS':\n self.set_result_details()\n super(VrouterVnf, self).log_results()\n","sub_path":"functest/opnfv_tests/vnf/router/vyos_vrouter.py","file_name":"vyos_vrouter.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"462431672","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\nfrom scrapy import Item, Field\n\n\nclass ProductAdsItem(scrapy.Item):\n table = 'product_targeting_ads_asins'\n\n asin = Field()\n price = Field()\n review_star = Field()\n review_num = Field()\n ads_asin = Field()\n update_time = Field()\n title = Field()\n bullet_points = Field()\n category = Field()\n","sub_path":"product_ads/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"186412324","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nThis script uses the csv file with the data and the json file for the metadata to produce an sqlite DB.\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport csv\r\nimport sqlite3\r\nimport json\r\nfrom argparse import ArgumentParser\r\n\r\n\r\ndef createMetadataDictionary(variablesMetadataPath):\r\n variablesMetadata = open(variablesMetadataPath)\r\n metadataJSON = json.load(variablesMetadata)\r\n\r\n metadataDictionary = {}\r\n metadataDictionary['subjectcode'] = 'text'\r\n metadataDictionary = addGroupVariablesToDictionary(metadataJSON,\r\n metadataDictionary)\r\n return metadataDictionary\r\n\r\n\r\ndef addGroupVariablesToDictionary(groupMetadata, metadataDictionary):\r\n if 'variables' in groupMetadata:\r\n for variable in groupMetadata['variables']:\r\n metadataDictionary[variable['code']] = variable['sql_type']\r\n if 'groups' in groupMetadata:\r\n for group in groupMetadata['groups']:\r\n metadataDictionary = addGroupVariablesToDictionary(group,\r\n metadataDictionary)\r\n return metadataDictionary\r\n\r\n\r\ndef createMetadataList(variablesMetadataPath):\r\n variablesMetadata = open(variablesMetadataPath)\r\n metadataJSON = json.load(variablesMetadata)\r\n\r\n metadataList = []\r\n metadataList = addGroupVariablesToList(metadataJSON,\r\n metadataList)\r\n return metadataList\r\n\r\n\r\ndef addGroupVariablesToList(groupMetadata, metadataList):\r\n if 'variables' in groupMetadata:\r\n for variable in groupMetadata['variables']:\r\n variableDictionary = {}\r\n variableDictionary['code'] = variable['code']\r\n variableDictionary['sql_type'] = variable['sql_type']\r\n variableDictionary['isCategorical'] = '1' if variable['isCategorical'] else '0'\r\n if 'enumerations' in variable: \r\n enumerations = []\r\n for enumeration in variable['enumerations']:\r\n enumerations.append(unicode(enumeration['code']))\r\n variableDictionary['enumerations'] = ','.join(enumerations)\r\n\r\n else:\r\n variableDictionary['enumerations'] = 'null'\r\n if 'min' in variable:\r\n variableDictionary['min'] = variable['min']\r\n else:\r\n variableDictionary['min'] = 'null'\r\n if 'max' in variable:\r\n variableDictionary['max'] = variable['max']\r\n else:\r\n variableDictionary['max'] = 'null'\r\n metadataList.append(variableDictionary)\r\n if 'groups' in groupMetadata:\r\n for group in groupMetadata['groups']:\r\n metadataList = addGroupVariablesToList(group,\r\n metadataList)\r\n return metadataList\r\n\r\n\r\ndef main():\r\n\r\n # Read the parameters\r\n\r\n parser = ArgumentParser()\r\n parser.add_argument('-c', '--csvFilePath', required=True,\r\n help='The folder of the csv dataset.')\r\n parser.add_argument('-v', '--variablesMetadataPath', required=True,\r\n help='The folder of the metadata file.')\r\n parser.add_argument('-o', '--outputDBAbsPath', required=True,\r\n help='The folder where the output db file is going to be.'\r\n )\r\n args = parser.parse_args()\r\n\r\n csvFilePath = os.path.abspath(args.csvFilePath)\r\n variablesMetadataPath = os.path.abspath(args.variablesMetadataPath)\r\n outputDBAbsPath = args.outputDBAbsPath\r\n\r\n # Transform the metadata json into a column name -> column type dictionary\r\n\r\n variablesTypesDict = createMetadataDictionary(variablesMetadataPath)\r\n\r\n # Create the query for the sqlite data table\r\n\r\n createDataTableQuery = 'CREATE TABLE DATA('\r\n\r\n csvFile = open(csvFilePath, 'r')\r\n csvReader = csv.reader(csvFile)\r\n csvHeader = next(csvReader)\r\n subjectcode = csvHeader[0]\r\n createDataTableQuery += ' ' + subjectcode + ' TEXT'\r\n for column in csvHeader[1:]:\r\n columnType = variablesTypesDict[column]\r\n createDataTableQuery += ', ' + column + ' ' + columnType\r\n createDataTableQuery += ')'\r\n\r\n # Create the data table\r\n con = sqlite3.connect(outputDBAbsPath)\r\n cur = con.cursor()\r\n cur.execute('DROP TABLE IF EXISTS DATA')\r\n cur.execute(createDataTableQuery)\r\n\r\n # Add data\r\n columnsString = csvHeader[0]\r\n for column in csvHeader[1:]:\r\n columnsString += ', ' + column\r\n columnsQuery = 'INSERT INTO DATA (' + columnsString + ') VALUES ('\r\n\r\n for row in csvReader:\r\n insertRowQuery = columnsQuery + \"'\" + row[0] + \"'\"\r\n for (value, column) in zip(row[1:], csvHeader[1:]):\r\n if variablesTypesDict[column] == 'text':\r\n insertRowQuery += \", '\" + value + \"'\"\r\n elif value == '':\r\n insertRowQuery += ', null'\r\n else:\r\n insertRowQuery += ', ' + value\r\n insertRowQuery += ');'\r\n try:\r\n cur.execute(insertRowQuery)\r\n except:\r\n raise ValueError('Row: ' + str(row) + ', Query: ' + str(insertRowQuery))\r\n\r\n # Transform the metadata JSON to a list\r\n metadataList = createMetadataList(variablesMetadataPath)\r\n\r\n # Create the query for the metadata table\r\n createMetadataTableQuery = 'CREATE TABLE METADATA('\r\n createMetadataTableQuery += ' code TEXT PRIMARY KEY ASC'\r\n createMetadataTableQuery += ', sql_type TEXT'\r\n createMetadataTableQuery += ', isCategorical INTEGER'\r\n createMetadataTableQuery += ', enumerations TEXT'\r\n createMetadataTableQuery += ', min INTEGER'\r\n createMetadataTableQuery += ', max INTEGER)'\r\n\r\n # Create the metadata table\r\n cur.execute('DROP TABLE IF EXISTS METADATA')\r\n cur.execute(createMetadataTableQuery)\r\n\r\n # Add data to the metadata table\t\tTODO\r\n columnsQuery = 'INSERT INTO METADATA (code, sql_type, isCategorical, enumerations, min, max) VALUES ('\r\n\r\n for variable in metadataList:\r\n insertVariableQuery = columnsQuery\r\n insertVariableQuery += \"'\" + variable['code'] + \"'\"\r\n insertVariableQuery += \", '\" + variable['sql_type'] + \"'\"\r\n insertVariableQuery += \", '\" + variable['isCategorical'] + \"'\"\r\n insertVariableQuery += \", '\" + variable['enumerations'] + \"'\"\r\n insertVariableQuery += \", '\" + variable['min'] + \"'\"\r\n insertVariableQuery += \", '\" + variable['max'] + \"'\"\r\n insertVariableQuery += \");\"\r\n cur.execute(insertVariableQuery)\r\n\r\n con.commit()\r\n con.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"Exareme-Docker/files/root/exareme/convert-csv-dataset-to-db.py","file_name":"convert-csv-dataset-to-db.py","file_ext":"py","file_size_in_byte":6559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"370885699","text":"import svm_ovo as f\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom scipy.stats import mode\n\nxLabel2, yLabel2, xLabel4, yLabel4, xLabel6, yLabel6 = f.data_split(\"../Data\")\n\nlabels = [(1,2),(1,3),(2,3)]\npermutations_x = [[xLabel2, xLabel4], [xLabel2, xLabel6], [xLabel4, xLabel6]]\npermutations_y = [[yLabel2, yLabel4], [yLabel2, yLabel6], [yLabel4, yLabel6]]\nhyperparameters = [(0.01,2,\"gauss\"), (0.1,1,\"gauss\"), (0.02,2,\"gauss\")]\n\ny_all_train = []\ny_all_test = []\nvotes_train = []\nvotes_test = []\n\niterations = 0\nrunning_time = 0\ndiffs = []\nfor i in range(len(labels)):\n \n permutations_y[i][0][:] = +1\n permutations_y[i][1][:] = -1\n X = np.concatenate(permutations_x[i])\n y = np.concatenate([permutations_y[i][0], permutations_y[i][1]])\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y, test_size=0.2, random_state=1696995) \n \n scaler = MinMaxScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.fit_transform(X_test)\n \n comb = hyperparameters[i]\n \n svm = f.Svm(gamma = comb[0], C = comb[1], kernel = comb[2])\n its, time_elapsed, diff, objective = svm.fit(X_train, y_train)\n \n iterations += its\n running_time += time_elapsed\n \n y_pred = svm.predict(X_train)\n y_pred[y_pred == 1] = labels[i][0]\n y_pred[y_pred == -1] = labels[i][1]\n votes_train.append(y_pred)\n \n y_pred = svm.predict(X_test)\n y_pred[y_pred == 1] = labels[i][0]\n y_pred[y_pred == -1] = labels[i][1]\n votes_test.append(y_pred)\n \n y_train[y_train == 1] = labels[i][0]\n y_train[y_train == -1] = labels[i][1]\n y_all_train.append(y_train)\n \n y_test[y_test == 1] = labels[i][0]\n y_test[y_test == -1] = labels[i][1]\n y_all_test.append(y_test)\n diffs.append(diff)\n# predicted labels\nvotes_train = np.array(votes_train).reshape(-1,1)\nvotes_test = np.array(votes_test).reshape(-1,1)\ny_all_train = np.array(y_all_train).reshape(-1,1)\ny_all_test = np.array(y_all_test).reshape(-1,1)\n\n# majority voting\nmajorities_test = mode(votes_test, axis = 0)[0].reshape(-1,1)\nmajorities_train = mode(votes_train, axis = 0)[0].reshape(-1,1)\n\nprint(\"- gamma_1 :\", hyperparameters[0][0], \"\\t C_1 :\", hyperparameters[0][1], \"\\t kernel_1 :\", hyperparameters[0][2])\nprint(\"\\n gamma_2 :\", hyperparameters[1][0], \"\\t C_2 :\", hyperparameters[1][1], \"\\t kernel_2 :\", hyperparameters[1][2])\nprint(\"\\n gamma_3 :\", hyperparameters[2][0], \"\\t C_3 :\", hyperparameters[2][1], \"\\t kernel_3 :\", hyperparameters[2][2])\nprint(\"- accuracy on train :\",np.mean(votes_train == y_all_train))\nprint(\"- accuracy on test :\",np.mean(votes_test == y_all_test))\nprint(\"- confusion matrix :\\n\",f.confusion_matrix(y_all_test, votes_test))\nprint(\"- time elapsed :\", running_time)\nprint(\"- iterations :\", iterations)\n#print(\"- m - M :\", diff)\nprint(\"- m - M Kernel_1: \",diffs[0])\nprint(\"- m - M Kernel_2: \",diffs[1])\nprint(\"- m - M Kernel_3: \",diffs[2])\n\n","sub_path":"omml homework 2 OptimusPrime/omml homework 2 OptimusPrime/Q4/run_4_OptimusPrime.py","file_name":"run_4_OptimusPrime.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"199519372","text":"# _*_ coding:utf-8 _*_\nimport tensorflow as tf\nfrom discriminator import Discriminator\nfrom unet import Unet\n\n\nclass GAN:\n def __init__(self,\n image_size,\n learning_rate=2e-5,\n batch_size=1,\n ngf=64,\n ):\n \"\"\"\n Args:\n input_size:list [N, H, W, C]\n batch_size: integer, batch size\n learning_rate: float, initial learning rate for Adam\n ngf: number of base gen filters in conv layer\n \"\"\"\n self.learning_rate = learning_rate\n self.input_shape = [int(batch_size / 4), image_size[0], image_size[1], image_size[2]]\n self.code_shape = [int(batch_size / 4), int(image_size[0] / 8), int(image_size[1] / 8), 4]\n self.ones_code = tf.ones(self.code_shape, name=\"ones_code\")\n self.tenaor_name = {}\n\n self.G_X = Unet('G_X', ngf=ngf)\n self.D_X = Discriminator('D_X', ngf=ngf)\n self.G_Y = Unet('G_Y', ngf=ngf)\n self.D_Y = Discriminator('D_Y', ngf=ngf)\n self.G_Z = Unet('G_Z', ngf=ngf)\n self.D_Z = Discriminator('D_Z', ngf=ngf)\n self.G_W = Unet('G_W', ngf=ngf)\n self.D_W = Discriminator('D_W', ngf=ngf)\n\n def model(self, s, x, y, z, w):\n self.tenaor_name[\"s\"] = str(s)\n\n x_g = self.G_X(1.0-s)\n y_g = self.G_Y(1.0-s)\n z_g = self.G_Z(1.0-s)\n w_g = self.G_W(1.0-s)\n self.tenaor_name[\"x_g\"] = str(x_g)\n self.tenaor_name[\"y_g\"] = str(y_g)\n self.tenaor_name[\"z_g\"] = str(z_g)\n self.tenaor_name[\"w_g\"] = str(w_g)\n\n j_x_g, j_x_g_c = self.D_X(x_g)\n j_y_g, j_y_g_c = self.D_Y(y_g)\n j_z_g, j_z_g_c = self.D_Z(z_g)\n j_w_g, j_w_g_c = self.D_W(w_g)\n\n j_x, j_x_c = self.D_X(x)\n j_y, j_y_c = self.D_Y(y)\n j_z, j_z_c = self.D_Z(z)\n j_w, j_w_c = self.D_W(w)\n\n D_loss = 0.0\n G_loss = 0.0\n D_loss += self.mse_loss(j_x, 1.0) * 5\n D_loss += self.mse_loss(j_x_g, 0.0) * 3\n G_loss += self.mse_loss(j_x_g, 1.0) * 3\n\n D_loss += self.mse_loss(j_y, 1.0) * 5\n D_loss += self.mse_loss(j_y_g, 0.0) * 3\n G_loss += self.mse_loss(j_y_g, 1.0) * 3\n\n D_loss += self.mse_loss(j_z, 1.0) * 5\n D_loss += self.mse_loss(j_z_g, 0.0) * 3\n G_loss += self.mse_loss(j_z_g, 1.0) * 3\n\n D_loss += self.mse_loss(j_w, 1.0) * 5\n D_loss += self.mse_loss(j_w_g, 0.0) * 3\n G_loss += self.mse_loss(j_w_g, 1.0) * 3\n\n G_loss += self.mse_loss(x_g, x) * 5\n G_loss += self.mse_loss(y_g, y) * 5\n G_loss += self.mse_loss(z_g, z) * 5\n G_loss += self.mse_loss(w_g, w) * 5\n\n image_list={}\n judge_list={}\n image_list[\"x_g\"] = x_g\n image_list[\"y_g\"] = y_g\n image_list[\"z_g\"] = z_g\n image_list[\"w_g\"] = w_g\n\n judge_list[\"j_x_g\"], judge_list[\"j_x_g_c\"] = j_x_g, j_x_g_c\n judge_list[\"j_y_g\"], judge_list[\"j_y_g_c\"] = j_y_g, j_y_g_c\n judge_list[\"j_z_g\"], judge_list[\"j_z_g_c\"] = j_z_g, j_z_g_c\n judge_list[\"j_w_g\"], judge_list[\"j_w_g_c\"] = j_w_g, j_w_g_c\n\n judge_list[\"j_x\"], judge_list[\"j_x_c\"] = j_x, j_x_c\n judge_list[\"j_y\"], judge_list[\"j_y_c\"] = j_y, j_y_c\n judge_list[\"j_z\"], judge_list[\"j_z_c\"] = j_z, j_z_c\n judge_list[\"j_w\"], judge_list[\"j_w_c\"] = j_w, j_w_c\n\n loss_list = [G_loss, D_loss]\n\n return loss_list,image_list,judge_list\n\n def get_variables(self):\n return [self.G_X.variables+\n self.G_Y.variables+\n self.G_Z.variables+\n self.G_W.variables,\n self.D_X.variables+\n self.D_Y.variables+\n self.D_Z.variables+\n self.D_W.variables\n ]\n\n def optimize(self):\n def make_optimizer(name='Adam'):\n learning_step = (\n tf.train.AdamOptimizer(self.learning_rate, beta1=0.5, name=name)\n )\n return learning_step\n\n G_optimizer = make_optimizer(name='Adam_G')\n D_optimizer = make_optimizer(name='Adam_D')\n\n return G_optimizer, D_optimizer\n\n def acc(self, x, y):\n correct_prediction = tf.equal(x, y)\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n return accuracy\n\n def auc(self, x, y):\n return tf.metrics.auc(x, y)\n\n def sensitivity(self, labels, predictions, specificity):\n return tf.metrics.sensitivity_at_specificity(labels, predictions, specificity)\n\n def precision(self, labels, predictions):\n return tf.metrics.precision( labels, predictions)\n def precision_at_k(self, labels, predictions,k):\n return tf.metrics.precision_at_k( labels, predictions,k)\n\n def recall(self, labels, predictions):\n return tf.metrics.recall( labels, predictions)\n def recall_at_k(self, labels, predictions,k):\n return tf.metrics.recall_at_k( labels, predictions,k)\n\n def iou(self, labels, predictions, num_classes):\n return tf.metrics.mean_iou( labels, predictions,num_classes)\n\n def dice_score(self, output, target, loss_type='jaccard', axis=(1, 2, 3, 4), smooth=1e-5):\n inse = tf.reduce_sum(output * target, axis=axis)\n if loss_type == 'jaccard':\n l = tf.reduce_sum(output * output, axis=axis)\n r = tf.reduce_sum(target * target, axis=axis)\n elif loss_type == 'sorensen':\n l = tf.reduce_sum(output, axis=axis)\n r = tf.reduce_sum(target, axis=axis)\n else:\n raise Exception(\"Unknow loss_type\")\n dice = (2. * inse + smooth) / (l + r + smooth)\n dice = tf.reduce_mean(dice)\n return dice\n\n def cos_score(self, output, target, axis=(1, 2, 3, 4), smooth=1e-5):\n pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.square(output), axis))\n pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.square(target), axis))\n pooled_mul_12 = tf.reduce_sum(tf.multiply(output, target), axis)\n score = tf.reduce_mean(tf.div(pooled_mul_12, pooled_len_1 * pooled_len_2 + smooth))\n return score\n\n def euclidean_distance(self, output, target, axis=(1, 2, 3, 4)):\n euclidean = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(output - target), axis)))\n return euclidean\n\n def MSE(self, output, target):\n mse = tf.reduce_mean(tf.square(output - target))\n return mse\n\n def MAE(self, output, target):\n mae = tf.reduce_mean(tf.abs(output - target))\n return mae\n\n def mse_loss(self, x, y):\n loss = tf.reduce_mean(tf.square(x - y))\n return loss\n\n def ssim_loss(self, x, y):\n loss = (1.0 - self.SSIM(x, y)) * 20\n return loss\n\n def PSNR(self, output, target):\n psnr = tf.reduce_mean(tf.image.psnr(output, target, max_val=1.0, name=\"psnr\"))\n return psnr\n\n def SSIM(self, output, target):\n ssim = tf.reduce_mean(tf.image.ssim(output, target, max_val=1.0))\n return ssim\n\n def norm(self, input):\n output = (input - tf.reduce_min(input, axis=[1, 2, 3])\n ) / (tf.reduce_max(input, axis=[1, 2, 3]) - tf.reduce_min(input, axis=[1, 2, 3]))\n return output\n","sub_path":"code_demo/BRATS2015/SkrGAN/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"363747153","text":"import random\n\ndiamonds = [\"AD\", \"2D\", \"3D\", \"4D\", \"5D\", \"6D\",\n \"7D\", \"8D\", \"9D\", \"10D\", \"JD\", \"QD\", \"KD\"]\n\nhand = []\n\nwhile diamonds:\n choice = input(\"Press ENTER to pick a card or Q and enter to quit: \")\n\n if choice == \"Q\" or choice == \"q\":\n break\n\n card = random.choice(diamonds)\n diamonds.remove(card)\n hand.append(card)\n print(\"Your hand: \", hand)\n print(\"Remaining Cards: \", diamonds)\n print()\n\nif not diamonds:\n print(\"There are no more cards to pick\")\n","sub_path":"week3/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"596726206","text":"# Suppose an array of length n sorted in ascending order is rotated between 1 \n# and n times. For example, the array nums = [0,1,2,4,5,6,7] might become: \n# \n# \n# [4,5,6,7,0,1,2] if it was rotated 4 times. \n# [0,1,2,4,5,6,7] if it was rotated 7 times. \n# \n# \n# Notice that rotating an array [a[0], a[1], a[2], ..., a[n-1]] 1 time results \n# in the array [a[n-1], a[0], a[1], a[2], ..., a[n-2]]. \n# \n# Given the sorted rotated array nums of unique elements, return the minimum \n# element of this array. \n# \n# You must write an algorithm that runs in O(log n) time. \n# \n# \n# Example 1: \n# \n# \n# Input: nums = [3,4,5,1,2]\n# Output: 1\n# Explanation: The original array was [1,2,3,4,5] rotated 3 times.\n# \n# \n# Example 2: \n# \n# \n# Input: nums = [4,5,6,7,0,1,2]\n# Output: 0\n# Explanation: The original array was [0,1,2,4,5,6,7] and it was rotated 4 \n# times.\n# \n# \n# Example 3: \n# \n# \n# Input: nums = [11,13,15,17]\n# Output: 11\n# Explanation: The original array was [11,13,15,17] and it was rotated 4 times. \n# \n# \n# \n# \n# Constraints: \n# \n# \n# n == nums.length \n# 1 <= n <= 5000 \n# -5000 <= nums[i] <= 5000 \n# All the integers of nums are unique. \n# nums is sorted and rotated between 1 and n times. \n# \n# Related Topics Array Binary Search 👍 4795 👎 347\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom typing import List\n\n\nclass Solution:\n def findMin(self, nums: List[int]) -> int:\n low, high = 0, len(nums) - 1\n\n while low < high:\n mid = (low + high) // 2\n\n if nums[mid] <= nums[high]:\n # else right half is normal and we go to the left half\n # note we are using high = mid, since this else condition is\n # nums[mid] <= nums[high], with possibility of mid also being the answer\n high = mid\n else:\n # the pivot must be in the right half, as its not sorted half\n # so we shrink to the right part\n low = mid + 1\n\n return nums[low]\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\nsolution = Solution()\nprint(solution.findMin([4, 5, 6, 7, 0, 1, 2]))\n","sub_path":"leetcode/editor/en/[153]Find Minimum in Rotated Sorted Array.py","file_name":"[153]Find Minimum in Rotated Sorted Array.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"230322946","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport fnmatch\nimport logging\nimport yaml\nimport jsonpath_rw\nfrom openstack_notifier.config import CONF\nfrom openstack_notifier.common.log import logger\n\nlogger.setLevel(logging.INFO)\n\nFILE_PATH = CONF.get('event', 'definitions_cfg_file')\n\n\ndef setup_events():\n if FILE_PATH is not None:\n logger.info(\"Event Definitions configuration file: %s\"\n % FILE_PATH.split('/')[-1])\n\n with open(FILE_PATH) as cf:\n config = cf.read()\n try:\n events_config = yaml.safe_load(config)\n except yaml.YAMLError as err:\n if hasattr(err, 'problem_mark'):\n mark = err.problem_mark\n errmsg = (\"Invalid YAML syntax in Event Definitions file \"\n \"%(file)s at line: %(line)s, column: %(column)s.\"\n % dict(file=FILE_PATH.split('/')[-1],\n line=mark.line + 1,\n column=mark.column + 1))\n else:\n errmsg = (\"YAML error reading Event Definitions file: %s\"\n % FILE_PATH.split('/')[-1])\n logger.error(errmsg)\n raise\n else:\n return EventConverter(events_config)\n else:\n logger.error(\"No Event Definitions configuration file found!\")\n\n\nclass EventConverter(object):\n def __init__(self, events_config):\n self.definitions = [EventDefinition(event_def) for event_def\n in reversed(events_config)]\n\n def to_event(self, notification_body):\n event_type = notification_body['event_type']\n edef = None\n for d in self.definitions:\n if d.match_type(event_type):\n edef = d\n break\n if edef is None:\n logger.info(\"Dropping Notification %s\" % event_type)\n return None\n logger.info(\"Event Notification is about to convert : %s from\"\n \" Module EventConverter.\" % event_type)\n return edef.to_event(notification_body)\n\n\nclass EventDefinition(object):\n def __init__(self, definition_cfg):\n self.traits = dict()\n self.include_types = []\n self.cfg = definition_cfg\n self.invalid_keys = {\"event_type\", \"Action\"}\n\n event_type = definition_cfg['event_type']\n self.include_types.append(event_type)\n for t in self.include_types:\n logger.info(\"Monitored resources from \"\n \"event definition yaml file: %s\" % t.split('.')[0])\n\n for trait_name in self.exclude_keys(self.cfg, self.invalid_keys):\n self.traits[trait_name] = TraitDefinition(\n trait_name, definition_cfg[trait_name])\n\n def exclude_keys(self, d, keys):\n return {x: d[x] for x in d if x not in keys}\n\n def match_type(self, event_type):\n for t in self.include_types:\n if fnmatch.fnmatch(event_type, t):\n return True\n else:\n return False\n\n def to_event(self, notification_body):\n event_type = notification_body['event_type']\n logger.info(\"Event_type from notification_body %s from\"\n \" Module EventDefnition.\" % event_type)\n traits = (self.traits[t].to_trait(notification_body)\n for t in self.traits)\n trait_dict = {}\n try:\n for trait in traits:\n trait_dict[trait.name] = trait.value or None\n except Exception as err:\n logger.error(\"The trait error is: %s\" % err)\n\n trait_dict['Action'] = self.cfg['Action']\n return trait_dict\n\n\nclass TraitDefinition(object):\n def __init__(self, name, trait_cfg):\n self.cfg = trait_cfg\n self.name = name\n\n if 'fields' not in trait_cfg:\n logger.error(\"Required fields in trait definition not \"\n \"specified:'%s'\" % 'fields', self.cfg)\n return None\n\n fields = trait_cfg['fields']\n try:\n self.fields = jsonpath_rw.parse(fields)\n logger.info(\"Jsonpath_rw's fields: %s\" % self.fields)\n except Exception as e:\n logger.error(\"Parse error in JSONPath specification \"\n \"'%(jsonpath)s' for %(trait)s: %(err)s\"\n % dict(jsonpath=fields, trait=name, err=e))\n\n def to_trait(self, notification_body):\n event_type = notification_body['event_type']\n\n values = [match for match in self.fields.find(notification_body)]\n value_list = [match.value or '' for match in values]\n value = ''.join(value_list)\n return Trait(self.name, value)\n\n\nclass Trait(object):\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n def __str__(self):\n return ('%s, %s') % (self.name, self.value)\n __repr__ = __str__\n","sub_path":"openstack_notifiler/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"234797645","text":"from PyQt4.QtGui import *\r\nfrom PyQt4 import QtCore, QtGui\r\nfrom xml.dom.minidom import *\r\nfrom xml.etree import ElementTree\r\nfrom urllib import request, parse\r\nfrom xml.etree import ElementTree\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nfrom mail import *\r\nfrom map import *\r\nfrom urllib import request\r\nimport webbrowser\r\nimport sys\r\nimport gui\r\nimport detailPopup\r\nimport datetime\r\nimport webbrowser\r\nimport smtplib\r\nimport folium\r\nimport urllib.parse\r\n\r\nglobal x, y\r\n\r\n\r\nhost = \"smtp.gmail.com\"\r\nport = \"587\"\r\npgNm = 1\r\nurl_home = 'http://openapi.animal.go.kr/openapi/service/rest/abandonmentPublicSrvc/'\r\nserviceKey = 'serviceKey=OyfS4qqxnYyHXNdGgHg%2Bem2F%2FLAjaG4C0X2kgqycc%2B2G3%2F0flCjg9GIptnv23C3UXWRH3wjd3EuE31%2FGSX71ZA%3D%3D'\r\nurl_sido = url_home + \"sido?\" + serviceKey\r\nurl_dog = url_home + \"kind?\" + serviceKey + \"&up_kind_cd=417000\"\r\n\r\nclass DetailPopupDialog(QDialog, detailPopup.Ui_Dialog):\r\n def __init__(self,item):\r\n global first\r\n QDialog.__init__(self)\r\n # setupUi() 메서드는 화면에 다이얼로그 보여줌\r\n self.setupUi(self)\r\n self.InitDraw(item)\r\n\r\n self.MapPushButton.clicked.connect(self.pushAction)\r\n self.sendMailPushButton.clicked.connect(self.pushActionMail)\r\n\r\n\r\n\r\n def pushAction(self,item):\r\n global loc\r\n self.MapPushButton.clicked.connect(self.show_map(loc))\r\n\r\n def pushActionMail(self,item):\r\n global mailData\r\n self.sendMailPushButton.clicked.connect(self.sendMail(mailData))\r\n\r\n def sendMail(self, data):\r\n global host, port\r\n senderAddr = \"eomdyeon@gmail.com\"\r\n passwd = \"**\" # 비밀번호\r\n\r\n now = datetime.datetime.now()\r\n nowDate = now.strftime('%Y-%m-%d')\r\n\r\n recipientAddr = self.sendMailLineEdit.text()\r\n title = '[유기동물정보] ' + nowDate\r\n\r\n msg = MIMEMultipart('alternative')\r\n\r\n msg['Subject'] = title\r\n msg['From'] = senderAddr\r\n msg['To'] = recipientAddr\r\n\r\n msgPart = MIMEText(data, 'plain')\r\n msg.attach(msgPart)\r\n\r\n print(\"서버 연결중 ... \")\r\n s = smtplib.SMTP(host, port)\r\n s.ehlo()\r\n s.starttls()\r\n s.ehlo()\r\n s.login(senderAddr, passwd) # 로긴을 합니다.\r\n s.sendmail(senderAddr, [recipientAddr], msg.as_string())\r\n s.close()\r\n\r\n print(\"메일 보내기 성공!\")\r\n\r\n def InitDraw(self, item):\r\n global desertionNo, loc, mailData\r\n\r\n processState = item.find(\"processState\")\r\n kindCd = item.find(\"kindCd\")\r\n age = item.find(\"age\")\r\n sexCd = item.find(\"sexCd\")\r\n colorCd = item.find(\"colorCd\")\r\n neuterYn = item.find(\"neuterYn\")\r\n specialMark = item.find(\"specialMark\")\r\n weight = item.find(\"weight\")\r\n happenPlace = item.find(\"happenPlace\")\r\n photo = item.find(\"popfile\")\r\n happenDt = item.find(\"happenDt\")\r\n careNm = item.find(\"careNm\")\r\n careAddr = item.find(\"careAddr\")\r\n careTel = item.find(\"careTel\")\r\n chargeNm = item.find(\"chargeNm\")\r\n officetel = item.find(\"officetel\")\r\n orgNm = item.find(\"orgNm\")\r\n noticeNo = item.find(\"noticeNo\")\r\n dNo = item.find(\"desertionNo\")\r\n\r\n self.noticeNoLabel.setText(\"공고번호: \"+ noticeNo.text)\r\n self.desertionNoLabel.setText(\"유기번호: \" + dNo.text)\r\n self.label_17.setText(\"접수일: \" + happenDt.text)\r\n self.happenPlaceLabel.setText(\"발견장소: \" + happenPlace.text)\r\n self.careNmLabel.setText(\"보호소 이름: \" + careNm.text)\r\n self.careAddrLabel.setText(\"보호주소: \" + careAddr.text)\r\n self.careTelLabel.setText(\"보호소 전화번호: \" + careTel.text)\r\n self.chargeNameLabel.setText(\"담당자: \" + chargeNm.text)\r\n self.officetelLabel.setText(\"담당자 연락처: \" + officetel.text)\r\n self.orgNmLabel.setText(\"관할기관: \" + orgNm.text)\r\n self.KindCdLabel.setText(\"품종: \" + kindCd.text)\r\n self.neuterYnLabel.setText(\"중성화 여부: \" + neuterYn.text)\r\n self.ageLabel.setText(\"나이 : \" + age.text)\r\n self.sexCdLabel.setText(\"성별 : \" + sexCd.text)\r\n self.colorCdLabel.setText(\"색상: \" + colorCd.text)\r\n self.label_8.setText(\"체중: \" + weight.text)\r\n self.specialMarkLabel.setText(\"특징: \" + specialMark.text)\r\n self.webView.setUrl(QtCore.QUrl(photo.text))\r\n print(specialMark.text)\r\n\r\n mailData = \"-----동물정보-----\" + \"\\n상태: \" + processState.text + \"\\n품종: \" + kindCd.text + \"\\n나이: \" + \\\r\n age.text + \"\\n성별: \" + sexCd.text + \"\\n색상: \" + colorCd.text + \"\\n중성화 여부: \" + neuterYn.text + \\\r\n \"\\n특징: \" + specialMark.text + \"\\n체중: \" + weight.text + \"\\n발견장소: \" + happenPlace.text \\\r\n + \"\\n사진: \" + photo.text + \"\\n-----보호 정보-----\" + \"\\n접수일: \" + happenDt.text + \"\\n보호소 이름: \" + \\\r\n careNm.text + \"\\n보호 주소: \" + careAddr.text + \"\\n보호소 전화번호: \" + careTel.text + \"\\n담당자: \" + \\\r\n chargeNm.text + \"\\n담당자 연락처: \" + officetel.text + \"\\n관할기관: \" + orgNm.text + \"\\n공고번호: \" + \\\r\n noticeNo.text + \"\\n유기번호: \" + dNo.text\r\n\r\n loc = careAddr.text\r\n\r\n def show_map(self,loc):\r\n global x, y\r\n address = parse.quote(loc)\r\n url = \"http://api.vworld.kr/req/address?service=address&version=2.0&request=getcoord&key=483E0418-2F46-3223-80A1-F66D16A24685&format=xml&type=road&address=\"+str(address)+\"&refine=true&simple=false&crs=epsg:4326\"\r\n res = request.urlopen(url).read()\r\n tree = ElementTree.fromstring(res)\r\n itemElements = tree.getiterator(\"point\")\r\n for item in itemElements:\r\n x = item.find('x')\r\n y = item.find('y')\r\n\r\n # 위도 경도 지정\r\n map_osm = folium.Map(location=[y.text, x.text], zoom_start=13)\r\n # 마커 지정\r\n folium.Marker([y.text, x.text], popup='Mt. Hood Meadows').add_to(map_osm)\r\n # html 파일로 저장\r\n map_osm.save('osm.html')\r\n\r\n # 지도 열기\r\n webbrowser.open('osm.html')\r\n\r\nclass XDialog(QDialog, gui.Ui_Dialog):\r\n def __init__(self):\r\n QDialog.__init__(self)\r\n # setupUi() 메서드는 화면에 다이얼로그 보여줌\r\n self.setupUi(self)\r\n self.SearchPushButton.clicked.connect(self.seletMenu)\r\n self.detailPushButton.clicked.connect(self.InitDetailPopup)\r\n self.NextPagePushButton.clicked.connect(self.seletNext)\r\n self.PrevPagePushButton.clicked.connect(self.seletPrev)\r\n\r\n def InitDetailPopup(self):\r\n global dlg2, desertionNo\r\n self.getDesertionNo(self.listWidget.currentItem())\r\n item = self.searchDesertionNo()\r\n dlg2 = DetailPopupDialog(item)\r\n dlg2.show()\r\n\r\n def seletMenu(self):\r\n sel = self.selectMenuComboBox.currentText()\r\n if(sel == \"보호소 찾기\"):\r\n self.searchShelter()\r\n else:\r\n self.searchAnimal()\r\n\r\n def seletNext(self):\r\n global pgNm\r\n sel = self.selectMenuComboBox.currentText()\r\n if(sel == \"보호소 찾기\"):\r\n pass\r\n else:\r\n pgNm += 1\r\n print(pgNm)\r\n self.searchAnimal()\r\n\r\n def seletPrev(self):\r\n global pgNm\r\n sel = self.selectMenuComboBox.currentText()\r\n if (sel == \"보호소 찾기\"):\r\n pass\r\n else:\r\n pgNm -= 1\r\n print(pgNm)\r\n self.searchAnimal()\r\n\r\n def printItem(self, url, item_name):\r\n response = request.urlopen(url).read()\r\n tree = ElementTree.fromstring(response)\r\n itemElements = tree.getiterator(\"item\")\r\n for item in itemElements:\r\n name = item.find(item_name)\r\n self.listWidget.addItem(name.text)\r\n\r\n def getDesertionNo(self,item):\r\n global desertionNo\r\n value = item.text()\r\n desertionNo = value[6:21]\r\n\r\n def searchDesertionNo(self):\r\n global sido_code, sigungu_code, desertionNo, kind_code, animal_code, bgn_date, end_date\r\n\r\n url_searchAbandonment = \\\r\n url_home + 'abandonmentPublic?' + serviceKey + '&bgnde=' + bgn_date + '&endde=' + end_date \\\r\n + animal_code + kind_code + '&org_cd=' + sigungu_code+ '&pageNo=' + str(pgNm) + '&numOfRows=50'\r\n response = request.urlopen(url_searchAbandonment).read()\r\n tree = ElementTree.fromstring(response)\r\n itemElements = tree.getiterator(\"item\")\r\n\r\n print(desertionNo)\r\n\r\n for item in itemElements:\r\n num = item.find(\"desertionNo\")\r\n if (num.text == desertionNo):\r\n print(\"찾음\")\r\n print(num.text)\r\n return item\r\n\r\n def getRegionCode(self, url, search_name):\r\n response = request.urlopen(url).read()\r\n tree = ElementTree.fromstring(response)\r\n itemElements = tree.getiterator(\"item\")\r\n for item in itemElements:\r\n name = item.find('orgdownNm')\r\n name = name.text\r\n if name == search_name:\r\n result = item.find(\"orgCd\")\r\n return result.text\r\n\r\n def getDogKindCode(self, search_name):\r\n response = request.urlopen(url_dog).read()\r\n tree = ElementTree.fromstring(response)\r\n itemElements = tree.getiterator(\"item\")\r\n for item in itemElements:\r\n name = item.find('KNm')\r\n name = name.text\r\n if name == search_name:\r\n result = item.find(\"kindCd\")\r\n return str(result.text)\r\n\r\n def searchShelter(self):\r\n global sido_code, sigungu_code\r\n self.listWidget.clear()\r\n # 시/도 코드 찾기\r\n sido_name = self.sidoLineEdit.text()\r\n sido_code = self.getRegionCode(url_sido, sido_name)\r\n\r\n # 시/군/구 코드 찾기\r\n sigungu_name = self.sigunguLineEdit.text()\r\n url_sigungu = url_home + 'sigungu?' + serviceKey + '&upr_cd=' + sido_code\r\n sigungu_code = self.getRegionCode(url_sigungu, sigungu_name)\r\n\r\n # 보호소 찾기\r\n url_shelter = url_home + 'shelter?' + serviceKey + '&upr_cd=' + sido_code + '&org_cd=' + sigungu_code\r\n # 보호소 출력\r\n self.printItem(url_shelter, \"careNm\")\r\n\r\n def searchAnimal(self):\r\n global bgn_date, end_date, mailData, sido_code, sigungu_code, animal_kind, animal_code, kind_code\r\n self.listWidget.clear()\r\n\r\n # 시/도 조건\r\n sido_name = self.sidoLineEdit.text()\r\n sido_code = self.getRegionCode(url_sido, sido_name)\r\n\r\n # 시/군/구 조건\r\n sigungu_name = self.sigunguLineEdit.text()\r\n url_sigungu = url_home + 'sigungu?' + serviceKey + '&upr_cd=' + sido_code\r\n sigungu_code = self.getRegionCode(url_sigungu, sigungu_name)\r\n\r\n\r\n tmpS = self.startDateEdit.textFromDateTime (self.startDateEdit.dateTime())\r\n tmpE = self.startDateEdit.textFromDateTime(self.endDateEdit.dateTime())\r\n tmpS = tmpS.split('-')\r\n tmpE = tmpE.split('-')\r\n dateS = tmpS[0] + tmpS [1] + tmpS[2]\r\n dateE = tmpE[0] + tmpE[1] + tmpE[2]\r\n bgn_date = dateS\r\n end_date = dateE\r\n\r\n\r\n # 축종 조건\r\n if(self.radioButtonDog.isChecked() == True):\r\n animal_kind = '1'\r\n elif(self.radioButtonCat.isChecked() == True):\r\n animal_kind = '2'\r\n elif (self.radioButtonEtc.isChecked() == True):\r\n animal_kind = '3'\r\n else:\r\n animal_kind = '4'\r\n\r\n print(animal_kind)\r\n if animal_kind == '1': # 개 품종 조건\r\n animal_code = \"&upkind=417000\"\r\n kind_code = \"\"\r\n elif animal_kind == '2': # 고양이\r\n animal_code = \"&upkind=422400\"\r\n kind_code = \"\"\r\n elif animal_kind == '3': # 기타\r\n animal_code = \"&upkind=429900\"\r\n kind_code = \"\"\r\n elif animal_kind == '4': # 상관없음\r\n animal_code = \"\"\r\n kind_code = \"\"\r\n\r\n # self.label_3.setText(\"변경\")\r\n url_searchAbandonment = \\\r\n url_home + 'abandonmentPublic?' + serviceKey + '&bgnde=' + bgn_date + '&endde=' + end_date \\\r\n + animal_code + kind_code + '&org_cd=' + sigungu_code+ '&pageNo=' + str(pgNm) + '&numOfRows=50'\r\n response = request.urlopen(url_searchAbandonment).read()\r\n tree = ElementTree.fromstring(response)\r\n itemElements = tree.getiterator(\"item\")\r\n\r\n for item in itemElements:\r\n self.printAnimal(item)\r\n\r\n def printAnimal(self,item):\r\n global mailData, loc\r\n processState = item.find(\"processState\")\r\n kindCd = item.find(\"kindCd\")\r\n age = item.find(\"age\")\r\n sexCd = item.find(\"sexCd\")\r\n colorCd = item.find(\"colorCd\")\r\n neuterYn = item.find(\"neuterYn\")\r\n specialMark = item.find(\"specialMark\")\r\n weight = item.find(\"weight\")\r\n happenPlace = item.find(\"happenPlace\")\r\n photo = item.find(\"popfile\")\r\n # -----보호 정보-----\r\n happenDt = item.find(\"happenDt\")\r\n careNm = item.find(\"careNm\")\r\n careAddr = item.find(\"careAddr\")\r\n careTel = item.find(\"careTel\")\r\n chargeNm = item.find(\"chargeNm\")\r\n officetel = item.find(\"officetel\")\r\n orgNm = item.find(\"orgNm\")\r\n noticeNo = item.find(\"noticeNo\")\r\n desertionNo = item.find(\"desertionNo\")\r\n self.listWidget.addItem(\"유기번호 [\" + desertionNo.text + \"]\\n\" + \"접수일: \" + happenDt.text + \"\\n\" + \"상태: \" + processState.text + \"\\n\" + \"품종: \" + kindCd.text + \"\\n\" +\"발견장소: \" + happenPlace.text + \"\\n\" )\r\n\r\n\r\n\r\n mailData = \"-----동물정보-----\" + \"\\n상태: \" + processState.text + \"\\n품종: \" + kindCd.text + \"\\n나이: \" + \\\r\n age.text + \"\\n성별: \" + sexCd.text + \"\\n색상: \" + colorCd.text + \"\\n중성화 여부: \" + neuterYn.text + \\\r\n \"\\n특징: \" + specialMark.text + \"\\n체중: \" + weight.text + \"\\n발견장소: \" + happenPlace.text \\\r\n + \"\\n사진: \" + photo.text + \"\\n-----보호 정보-----\" + \"\\n접수일: \" + happenDt.text + \"\\n보호소 이름: \" + \\\r\n careNm.text + \"\\n보호 주소: \" + careAddr.text + \"\\n보호소 전화번호: \" + careTel.text + \"\\n담당자: \" + \\\r\n chargeNm.text + \"\\n담당자 연락처: \" + officetel.text + \"\\n관할기관: \" + orgNm.text + \"\\n공고번호: \" + \\\r\n noticeNo.text + \"\\n유기번호: \" + desertionNo.text\r\n\r\n loc = careAddr.text\r\n\r\napp = QApplication(sys.argv)\r\ndlg = XDialog()\r\ndlg.show()\r\n\r\n\r\napp.exec_()\r\n","sub_path":"이민옥/2017_script_termProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"506230585","text":"from __future__ import division\nfrom matplotlib import pyplot as plt\n\nimport math\n\ndef uniform_pdf(x):\n if x >= 0 and x < 1: return 1\n else: return 0\n\ndef uniform_cdf(x):\n if x < 0: return 0\n elif x < 1: return x\n else: return 1\n\ndef normal_pdf(x, mu=0, sigma=1):\n sqrt_two_pi_sigma = math.sqrt(2*math.pi) * sigma\n x_mu_square = (x-mu) ** 2\n two_sigma_squre = 2 * sigma ** 2\n return math.exp(- x_mu_square / two_sigma_squre) / sqrt_two_pi_sigma\n\ndef normal_cdf(x, mu=0, sigma=1):\n '''https://ko.wikipedia.org/wiki/%EC%98%A4%EC%B0%A8_%ED%95%A8%EC%88%98'''\n sqrt_two = math.sqrt(2)\n return (1 + math.erf( (x-mu)/sqrt_two/sigma )) / 2\n\ndef inverse_normal_cdf(p, mu=0, sigma=1, tolerance=0.00001):\n ''' change to normal'''\n if mu != 0 or sigma != 1:\n return mu + sigma * inverse_normal_cdf(p, tolerance=tolerance)\n \n low_z, low_p = -10.0, 0\n hi_z, hi_p = 10.0, 1\n while hi_z - low_z > tolerance:\n mid_z = (low_z + hi_z) / 2\n mid_p = normal_cdf(mid_z)\n if mid_p < p:\n low_z, low_p = mid_z, mid_p\n elif mid_p > p:\n hi_z, hi_p = mid_z, mid_p\n else:\n break\n \n return mid_z\n\ndef plot_normal_func(func, xs, line, mu, sigma):\n plt.plot(xs, [func(x, mu, sigma) for x in xs], line, label=\"mu={},sigma={}\".format(mu, sigma))\n\ndef plot_show(title):\n plt.legend()\n plt.title(title)\n plt.show()\n \nif __name__ == '__main__':\n print(uniform_pdf(0.2))\n print(uniform_cdf(0.2))\n\n xs = [x/10.0 for x in range(-50, 50)]\n\n# plot_normal_func(normal_pdf, xs, '-', 0, 1)\n# plot_normal_func(normal_pdf, xs, '--', 0, 2)\n# plot_normal_func(normal_pdf, xs, ':', 0, 0.5)\n# plot_normal_func(normal_pdf, xs, '-.', -1, 1)\n# plot_show(\"Various Normal pdfs\")\n\n plot_normal_func(normal_cdf, xs, '-', 0, 1)\n plot_normal_func(normal_cdf, xs, '--', 0, 2)\n plot_normal_func(normal_cdf, xs, ':', 0, 0.5)\n plot_normal_func(normal_cdf, xs, '-.', -1, 1)\n plot_show(\"Various Normal cdfs\")\n","sub_path":"HelloPython/d06.py","file_name":"d06.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"100279423","text":"import xml.etree.ElementTree as ET\nimport os\nimport json\nimport logging\n\n\nclass clsGetIsDatabaseMessages:\n \"\"\"\n This class declares all functions that will be used for auto Scripting\n Args:\n\n \"\"\"\n def __init__(self):\n self.G_dctADCS_common_types = dict()\n self.G_dctDatabaseMessages = dict()\n self.G_lstTypeDict = ['ST', 'CH', 'I1', 'I2', 'I4', 'I8', 'U1', 'U2', 'U4', 'U8', 'F4', 'F8']\n self.G_dctTypeDict = {'ST': \"ac\", 'CH': \"ac\", 'I1': \"i1\", 'I2': \"i2\", 'I4': \"i4\", 'I8': \"i8\", 'U1': \"u1\", 'U2': \"u2\", 'U4': \"u4\", 'U8': \"u8\", 'F4': \"f4\", 'F8': \"f8\", 'E1': \"e1\", 'E2': \"e2\"}\n self.G_dctTypeValues = {'ST': \"\", 'CH': \"\", 'I1': 0, 'I2': 0, 'I4': 0, 'I8': 0, 'U1': 0, 'U2': 0, 'U4': 0, 'U8': 0, 'F4': 0.0, 'F8': 0.0}\n self.G_dctEnumToBaseType = {'E1': \"U1\", 'E2': \"U2\"}\n self.G_dctTypeFileName = dict()\n self.G_lstIntegers = ['I1', 'I2', 'I4', 'I8', 'U1', 'U2', 'U4', 'U8']\n self.G_lstFloats = ['F4', 'F8']\n self.G_dctEnumValues = dict()\n\n def vGetCommonTypes(self):\n \"\"\"\n get all common type files and pass to vGenerateCommonTypes\n Args:\n \n Returns:\n \n Raises:\n Raises no exceptions\n \"\"\"\n acPathToSICD = os.path.join(os.path.dirname(__file__), \"../SICD/\")\n lstFiles = os.listdir(acPathToSICD)\n\n for i in range(len(lstFiles)):\n acFileName = str(lstFiles[i])\n if 'common_types'.lower() in acFileName.lower():\n try:\n acTree = ET.parse(os.path.join(os.path.dirname(__file__), \"../SICD/\" + lstFiles[i]))\n objRoot = acTree.getroot()\n acFileName = \"Autogen.\" + lstFiles[i][:-4].lower()\n self.vGenerateCommonTypes(acFileName, objRoot)\n except Exception as E:\n logging.error(\"Error getting enums from adcs_common_types.xml, error -> %s\", E)\n\n def vGenerateCommonTypes(self, acFileNamePar, objRootPar):\n \"\"\"\n Autogen common types, get all enums and and structures and store in to a global dictionary\n Args:\n \n Returns:\n \n Raises:\n Raises no exceptions\n \"\"\"\n for objTypedef in objRootPar:\n if objTypedef.tag == 'Typedef':\n # check if the attribute is an Enum or Structure\n if objTypedef.attrib['Name'][:1] == 'E':\n # Store the enum name with the file name for later reference to a global dictionary\n self.G_dctTypeFileName.update({objTypedef.attrib['Name']: acFileNamePar})\n for objRecord in objTypedef:\n if objRecord.tag == 'Record':\n # Add all enums to a dictionary, with the enum type, enum name and value\n self.G_dctEnumValues.update({objTypedef.attrib['Name']: {}})\n dctEnums = dict()\n for objEnum in objRecord:\n if objEnum.tag == \"Enumeration\":\n dctEnums.update({objEnum.attrib['Name']: objEnum.attrib[\"Value\"]})\n self.G_dctEnumValues[objTypedef.attrib['Name']].update({\"enums\": dctEnums, \"Type\": objRecord.attrib[\"Type\"]})\n\n elif objTypedef.attrib['Name'][:1] == 's':\n self.G_dctADCS_common_types[objTypedef.attrib['Name']] = dict({\"Attr\": {}})\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"] = list()\n self.G_dctTypeFileName.update({objTypedef.attrib['Name']: acFileNamePar})\n objIndex = 0\n # Loop through the object to get all records\n for objRecord in objTypedef:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"].append(dict())\n if objRecord.tag == 'Record':\n # If the tag name of is 'Record', read its attribute \"Name, Type, Default, count, value, min value, max value ...\"\n if \"Name\" in objRecord.attrib:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Name\": objRecord.attrib[\"Name\"]})\n if \"Type\" in objRecord.attrib:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Type\": acFileNamePar + \".\" + objRecord.attrib[\"Type\"]})\n # Check if \"Type\" if of base type, enum or structure\n if objRecord.attrib[\"Type\"] in self.G_lstTypeDict:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Name\": self.G_dctTypeDict[objRecord.attrib[\"Type\"]] + objRecord.attrib[\"Name\"]})\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Type\": \"Autogen.adcs_base_types.clsAdcsBaseType\"})\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"acBaseType\": objRecord.attrib[\"Type\"]})\n if objRecord.attrib[\"Type\"] == \"CH\" or objRecord.attrib[\"Type\"] == \"ST\":\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"acBaseType\": objRecord.attrib[\"Type\"] + \":\" + objRecord.attrib[\"Count\"]})\n\n if \"Default\" in objRecord.attrib:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": objRecord.attrib[\"Default\"]})\n if objRecord.attrib[\"Type\"] in self.G_lstIntegers:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": int(str(objRecord.attrib[\"Default\"]), 0)})\n elif objRecord.attrib[\"Type\"] in self.G_lstFloats:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": float(objRecord.attrib[\"Default\"])})\n else:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": self.G_dctTypeValues[objRecord.attrib[\"Type\"]]})\n if objRecord.attrib[\"Type\"] in self.G_lstIntegers:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": int(str(self.G_dctTypeValues[objRecord.attrib[\"Type\"]]), 0)})\n elif objRecord.attrib[\"Type\"] in self.G_lstFloats:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": float(self.G_dctTypeValues[objRecord.attrib[\"Type\"]])})\n\n elif objRecord.attrib[\"Type\"][:1] == 'E':\n enumType = objRecord.attrib[\"Type\"].split(\"_\")[0]\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Name\": self.G_dctTypeDict[enumType] + objRecord.attrib[\"Name\"]})\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"acBaseType\": self.G_dctEnumToBaseType[enumType]})\n\n elif objRecord.attrib[\"Type\"][:1] == 's':\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Name\": \"s\" + objRecord.attrib[\"Name\"]})\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Type\": acFileNamePar + \".\" + objRecord.attrib[\"Type\"]})\n \n if \"Default\" in objRecord.attrib:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": objRecord.attrib[\"Default\"]})\n\n if objRecord.attrib[\"Type\"] in self.G_lstIntegers:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": int(str(objRecord.attrib[\"Default\"]), 0)})\n elif objRecord.attrib[\"Type\"] in self.G_lstFloats:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": float(objRecord.attrib[\"Default\"])})\n\n if objRecord.attrib[\"Type\"][:1] == 'E':\n if objRecord.attrib[\"Type\"] in self.G_dctEnumValues:\n enumVal = int(str(self.G_dctEnumValues[objRecord.attrib[\"Type\"]][\"enums\"][objRecord.attrib[\"Default\"]]), 0)\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": enumVal})\n else:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": 0})\n\n if \"Count\" in objRecord.attrib:\n if \"UseStruct\" not in objRecord.attrib:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"iBaseTypeCount\": int(str(objRecord.attrib[\"Count\"]), 0)})\n\n elif objRecord.attrib[\"Type\"][:1] != 's':\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"iBaseTypeCount\": 1})\n\n if \"Min\" in objRecord.attrib:\n if \"Default\" not in objRecord.attrib and \"Max\" not in objRecord.attrib:\n if objRecord.attrib[\"Type\"] in self.G_lstIntegers:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": int(str(objRecord.attrib[\"Min\"]), 0)})\n elif objRecord.attrib[\"Type\"] in self.G_lstFloats:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": float(objRecord.attrib[\"Min\"])})\n if \"Max\" in objRecord.attrib:\n if \"Default\" not in objRecord.attrib:\n if objRecord.attrib[\"Type\"] in self.G_lstIntegers:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": int(str(objRecord.attrib[\"Max\"]), 0)})\n elif objRecord.attrib[\"Type\"] in self.G_lstFloats:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Value\": float(objRecord.attrib[\"Max\"])})\n\n # if the Record is of struct type, get the count if exist and repeat the structure 'Count' times else get the structure as it is.\n if \"UseStruct\" in objRecord.attrib:\n if objRecord.attrib[\"UseStruct\"] == \"True\":\n if \"Count\" in objRecord.attrib and int(objRecord.attrib[\"Count\"]) > 1:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Name\": \"as\" + objRecord.attrib[\"Name\"]})\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Type\": \"Autogen.adcs_base_types.clsAdcsStructArrayType\"})\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Attr\": {\"Attr\": list()}})\n for structCount in range(int(objRecord.attrib[\"Count\"])):\n if objRecord.attrib['Type'] in self.G_dctADCS_common_types:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex][\"Attr\"][\"Attr\"].append({\"Attr\": list()})\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex][\"Attr\"][\"Attr\"][structCount][\"Attr\"].append(self.G_dctADCS_common_types[objRecord.attrib['Type']])\n\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex][\"Attr\"][\"Attr\"][structCount].update({\"Name\": \"Autogen.adcs_base_types.clsAdcsStructArrayType\" + str(structCount)})\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex][\"Attr\"][\"Attr\"][structCount].update({\"Type\": self.G_dctTypeFileName[objRecord.attrib[\"Type\"]] + \".\" + objRecord.attrib[\"Type\"]})\n\n else:\n if objRecord.attrib['Type'] in self.G_dctADCS_common_types:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Attr\": self.G_dctADCS_common_types[objRecord.attrib['Type']]})\n\n else:\n self.G_dctADCS_common_types[objTypedef.attrib['Name']][\"Attr\"][objIndex].update({\"Attr\": None})\n \n objIndex += 1\n\n def vGetAllMessages(self, acDatabaseFileNamePar):\n \"\"\"\n This function reads the message def files and get the message payload\n Args:\n\n Returns:\n\n Raises:\n Raises no exceptions\n \"\"\"\n acPathToSICD = os.path.join(os.path.dirname(__file__), \"../SICD/\")\n lstFiles = os.listdir(acPathToSICD)\n for i in range(len(lstFiles)):\n if lstFiles[i][-10:] != \"_Types.xml\" and lstFiles[i] != \".gitignore\" and lstFiles[i][-4:] == \".xml\":\n try:\n acTree = ET.parse(os.path.join(os.path.dirname(__file__), \"../SICD/\" + lstFiles[i]))\n objRoot = acTree.getroot()\n if objRoot.tag == \"MessageFile\":\n for objMessageFile in objRoot:\n if objMessageFile.tag == \"Section\":\n if \"struct\" not in objMessageFile.attrib[\"Name\"]:\n for objMessages in objMessageFile:\n # Only get isDatabase messages\n if objMessages.tag == \"Message\" and \"IsDatabaseMsg\" in objMessages.attrib:\n self.G_dctDatabaseMessages.update({objMessages.attrib[\"Name\"]: {}})\n self.G_dctDatabaseMessages[objMessages.attrib[\"Name\"]].update({\"acMessageName\": objMessages.attrib[\"Name\"], \"dctMessagePayload\": {\"Attr\": []}})\n for objPayload in objMessages:\n if objPayload.tag == \"Payload\":\n for objField in objPayload:\n if objField.tag == \"Field\":\n dctFieldAttr = dict()\n if \"Name\" in objField.attrib:\n dctFieldAttr[\"Name\"] = objField.attrib[\"Name\"]\n if \"Type\" in objField.attrib:\n dctFieldAttr[\"Type\"] = objField.attrib[\"Type\"]\n if objField.attrib[\"Type\"] in self.G_lstTypeDict:\n dctFieldAttr[\"Name\"] = self.G_dctTypeDict[objField.attrib[\"Type\"]] + objField.attrib[\"Name\"]\n dctFieldAttr[\"Type\"] = \"Autogen.adcs_base_types.clsAdcsBaseType\"\n dctFieldAttr[\"acBaseType\"] = objField.attrib[\"Type\"]\n if \"Default\" in objField.attrib:\n dctFieldAttr[\"Value\"] = (objField.attrib[\"Default\"])\n if objField.attrib[\"Type\"] in self.G_lstIntegers:\n dctFieldAttr[\"Value\"] = int(str(objField.attrib[\"Default\"]), 0)\n elif objField.attrib[\"Type\"] in self.G_lstFloats:\n dctFieldAttr[\"Value\"] = float(objField.attrib[\"Default\"])\n\n else:\n dctFieldAttr[\"Value\"] = 0\n if objField.attrib[\"Type\"] in self.G_lstIntegers:\n dctFieldAttr[\"Value\"] = 0\n elif objField.attrib[\"Type\"] in self.G_lstFloats:\n dctFieldAttr[\"Value\"] = 0.0\n else:\n dctFieldAttr[\"Name\"] = \"s\" + objField.attrib[\"Name\"]\n\n if objField.attrib[\"Type\"] in self.G_dctTypeFileName:\n dctFieldAttr[\"Type\"] = self.G_dctTypeFileName[objField.attrib[\"Type\"]] + \".\" + objField.attrib[\"Type\"]\n else:\n dctFieldAttr[\"Type\"] = objField.attrib[\"Type\"]\n\n if objField.attrib[\"Type\"][:1] == \"E\":\n fieldType = objField.attrib[\"Type\"].split(\"_\")[0]\n if fieldType in self.G_dctEnumToBaseType:\n dctFieldAttr[\"Name\"] = self.G_dctTypeDict[fieldType] + objField.attrib[\"Name\"]\n baseType = \"U\" + fieldType[1:]\n dctFieldAttr[\"acBaseType\"] = baseType\n if \"Default\" in objField.attrib:\n dctFieldAttr[\"Value\"] = objField.attrib[\"Default\"]\n if objField.attrib[\"Type\"] in self.G_lstIntegers:\n dctFieldAttr[\"Value\"] = int(str(objField.attrib[\"Default\"]), 0)\n elif objField.attrib[\"Type\"] in self.G_lstFloats:\n dctFieldAttr[\"Value\"] = float(objField.attrib[\"Default\"])\n\n if objField.attrib[\"Type\"][:1] == 'E':\n if objField.attrib[\"Type\"] in self.G_dctEnumValues:\n iEnumVal = int(str(self.G_dctEnumValues[objField.attrib[\"Type\"]][\"enums\"][objField.attrib[\"Default\"]]), 0)\n dctFieldAttr[\"Value\"] = iEnumVal\n\n if \"Count\" in objField.attrib and \"UseStruct\" not in objField.attrib:\n dctFieldAttr[\"iBaseTypeCount\"] = int(str(objField.attrib[\"Count\"]), 0)\n else:\n if objField.attrib[\"Type\"] in self.G_lstTypeDict and \"UseStruct\" not in objField.attrib:\n dctFieldAttr[\"iBaseTypeCount\"] = 1\n elif objField.attrib[\"Type\"][:1] == \"E\":\n fieldType = objField.attrib[\"Type\"].split(\"_\")[0]\n if fieldType in self.G_dctEnumToBaseType:\n dctFieldAttr[\"iBaseTypeCount\"] = 1\n\n if \"Min\" in objField.attrib:\n if \"Default\" not in objField.attrib and \"Max\" not in objField.attrib:\n if objField.attrib[\"Type\"] in self.G_lstIntegers:\n dctFieldAttr[\"Value\"] = int(str(objField.attrib[\"Min\"]), 0)\n elif objField.attrib[\"Type\"] in self.G_lstFloats:\n dctFieldAttr[\"Value\"] = float(objField.attrib[\"Min\"])\n\n if \"Max\" in objField.attrib:\n if \"Default\" not in objField.attrib:\n if objField.attrib[\"Type\"] in self.G_lstIntegers:\n dctFieldAttr[\"Value\"] = int(str(objField.attrib[\"Max\"]), 0)\n elif objField.attrib[\"Type\"] in self.G_lstFloats:\n dctFieldAttr[\"Value\"] = float(objField.attrib[\"Max\"])\n if \"UseStruct\" in objField.attrib:\n if objField.attrib[\"UseStruct\"] == \"True\":\n dctFieldAttr[\"Attr\"] = dict()\n dctFieldAttr[\"Name\"] = \"s\" + objField.attrib[\"Name\"]\n dctFieldAttr[\"Type\"] = self.G_dctTypeFileName[objField.attrib[\"Type\"]] + \".\" + objField.attrib['Type']\n if \"Count\" in objField.attrib:\n dctFieldAttr[\"Attr\"].update({\"Attr\": list()})\n dctFieldAttr[\"Name\"] = \"as\" + objField.attrib[\"Name\"]\n dctFieldAttr[\"Type\"] = \"Autogen.adcs_base_types.clsAdcsStructArrayType\"\n for structCount in range(int(objField.attrib[\"Count\"])):\n if objField.attrib['Type'] in self.G_dctADCS_common_types:\n dctFieldAttr[\"Attr\"][\"Attr\"].append({\"Attr\": list()})\n dctFieldAttr[\"Attr\"][\"Attr\"][structCount][\"Attr\"].append(self.G_dctADCS_common_types[objField.attrib['Type']])\n dctFieldAttr[\"Attr\"][\"Attr\"][structCount].update({\"Name\": \"Autogen.adcs_base_types.clsAdcsStructArrayType\" + str(structCount)})\n dctFieldAttr[\"Attr\"][\"Attr\"][structCount].update({\"Type\": self.G_dctTypeFileName[objField.attrib[\"Type\"]] + \".\" + objField.attrib[\"Type\"]})\n else:\n if objField.attrib['Type'] in self.G_dctADCS_common_types:\n dctFieldAttr[\"Attr\"] = self.G_dctADCS_common_types[objField.attrib['Type']]\n else:\n dctFieldAttr.update({\"Attr\": None})\n else:\n dctFieldAttr.update({\"Attr\": None})\n\n self.G_dctDatabaseMessages[objMessages.attrib[\"Name\"]][\"dctMessagePayload\"][\"Attr\"].append(dctFieldAttr)\n\n elif \"enumerations\" not in objMessageFile.attrib[\"Name\"] and \"Enum\" not in objMessageFile.attrib[\"Name\"] and \"struct\" in objMessageFile.attrib[\"Name\"]:\n self.vGenerateCommonTypes(\"\", objMessageFile)\n\n except Exception as E:\n logging.error(\"error -> %s\", E)\n\n with open(os.path.join(os.path.dirname(__file__), \"../databaseFiles/\" + acDatabaseFileNamePar + \".json\"), 'w') as f:\n f.write(json.dumps({\"factory\": self.G_dctDatabaseMessages, \"operational\": {}}, indent=1, sort_keys=True))\n f.close()\n","sub_path":"webmms/Source/BackEnd/Comms/getIsDatabaseMessages.py","file_name":"getIsDatabaseMessages.py","file_ext":"py","file_size_in_byte":26656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"228346346","text":"# To create the JSON file, run\n# pandoc --smart --parse-raw --to=json fenced/input.md > fenced/input.json\n\nimport panflute as pf\nimport pandocfilters, json\n\n\ndef fenced_action(options, data, element, doc):\n bar = options.get('foo')\n assert bar is None or bar == 'bar'\n assert not data or data == 'raw text' or \\\n data == \"\"\"raw1\\nraw2\\nthis\\n...\\nis\\n...\\nall raw\"\"\"\n # assert bar or data, (bar,data)\n return\n\n\ndef empty_filter(element, doc):\n return\n\n\ndef test_all():\n input_fn = './tests/fenced/input.json'\n output_fn = './tests/fenced/output.json'\n\n # Test fenced filter\n\n print('\\nLoading JSON...')\n with open(input_fn, encoding='utf-8') as f:\n doc = pf.load(f)\n print('Dumping JSON...')\n with open(output_fn, mode='w', encoding='utf-8') as f:\n pf.dump(doc, f)\n f.write('\\n')\n print(' - Done!')\n\n print('\\nComparing...')\n with open(input_fn, encoding='utf-8') as f:\n input_data = f.read()\n with open(output_fn, encoding='utf-8') as f:\n output_data = f.read()\n\n print('Are both files the same?')\n print(' - Length:', len(input_data) == len(output_data), len(input_data), len(output_data))\n print(' - Content:', input_data == output_data)\n\n print('\\nApplying trivial filter...')\n pf.run_filter(empty_filter, doc=doc)\n print(' - Done!')\n dump_and_compare(doc, input_fn, output_fn)\n\n print('\\nApplying YAML filter...')\n pf.run_filter(pf.yaml_filter, tag='spam', function=fenced_action, doc=doc)\n print(' - Done!')\n dump_and_compare(doc, input_fn, output_fn)\n\n print('\\nApplying Strict YAML filter...')\n pf.run_filter(pf.yaml_filter, tag='eggs', function=fenced_action, doc=doc, strict_yaml=True)\n print(' - Done!')\n dump_and_compare(doc, input_fn, output_fn)\n\n\ndef dump_and_compare(doc, input_fn, output_fn):\n print(' - Dumping JSON...')\n with open(output_fn, mode='w', encoding='utf-8') as f:\n pf.dump(doc, f)\n f.write('\\n')\n print(' - Done!')\n print(' - Comparing...')\n with open(input_fn, encoding='utf-8') as f:\n input_data = f.read()\n with open(output_fn, encoding='utf-8') as f:\n output_data = f.read()\n print(' - Are both files the same?')\n print(' - Length:', len(input_data) == len(output_data), len(input_data), len(output_data))\n print(' - Content:', input_data == output_data)\n assert input_data == output_data\n\n\nif __name__ == \"__main__\":\n test_all()\n","sub_path":"tests/test_fenced.py","file_name":"test_fenced.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"105637057","text":"\"\"\"\n@author:lijx\n@contact: 360595252@qq.com\n@site: http://blog.51cto.com/breaklinux\n@version: 1.0\n\"\"\"\nfrom __init__ import create_app\napp = create_app()\nif __name__ == '__main__':\n app.run(\n host=\"0.0.0.0\",\n port=5000,\n debug=True\n )\n","sub_path":"boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"219106439","text":"\"\"\"\nOverview\nMany people know that Apple uses the letter \"i\" in almost all of its devices to emphasize its personality.\n\nAnd so John, a programmer at Apple, was given the task of making a program that would add that letter to every word. Let's help him do it, too.\n\nTask:\nYour task is to make a function that takes the value of word and returns it with an \"i\" at the beginning of the word. For example we get the word \"Phone\", so we must return \"iPhone\". But we have a few rules:\n\nThe word should not begin with the letter \"I\", for example Inspire.\nThe number of vowels should not be greater than or equal to the number of consonants, for example East or Peace. (\"y\" is considered a consonant)\nThe first letter should not be lowercase, for example road.\nIf the word does not meet the rules, we return \"Invalid word\".\n\ndocker:\ndocker run -it --name iword -v $PWD:/home/app -w /home/app -p 5000:5000 python:3.8-slim python imeverywhere.py\n\ndockerfile:\n docker build -t iword:v1 .\n docker run -it image_id\n\"\"\"\nimport re\n\ndef i(word):\n invalid='Invalid word'\n if(word==\"\" or word[0].islower() or word[0]==\"I\"): return invalid\n\n\n vcount=len(re.findall(\"[aeiou]\",word.lower()))\n ccount=len(re.findall(r\"[^aeiou]\",word.lower()))\n if(vcount>=ccount): return invalid\n return (\"i\"+word)\n\n\n\nassert(i('')== 'Invalid word')\nassert(i('Inspire')== 'Invalid word')\nassert(i('East')== 'Invalid word')\nassert(i('Peace')== 'Invalid word')\nassert(i('Phone')== 'iPhone')\nassert(i('road')== 'Invalid word')\nprint(\"done\")","sub_path":"imeverywhere/imeverywhere.py","file_name":"imeverywhere.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"291346978","text":"#Note: since normals are flat across the cube, we don't interpolate\n# surface normals (Phong-style) over the primitives.\n# We DO interpolate the eye and light vectors for the frag shader\n\nimport os\n\nfrom OpenGL.GL import *\nfrom OpenGL.GL.shaders import compileShader, compileProgram\n\nimport pygame\nfrom pygame.locals import *\n\nimport numpy as N\nfrom ctypes import c_void_p\n\nfrom transforms import *\nfrom databuffer import DataBuffer\nfrom cylinder import Cylinder\nfrom dome import Dome\nfrom floor import Floor\nfrom frame import Frame\n\ndef loadFile(filename):\n with open(os.path.join(os.getcwd(), filename)) as fp:\n return fp.read()\n\ndef display():\n global time, light, frame, cyl, dome, fogEnd, pillarSeparation, pillarN\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n pMatrix = projectionMatrix(0.1, 1000.0, .10, 0.075)\n\n mvMatrix = frame.matrix\n\n xoffset = pillarSeparation*N.floor(frame.translation[0][3]/pillarSeparation)\n zoffset = pillarSeparation*N.floor(frame.translation[2][3]/pillarSeparation)\n\n trans = translationMatrix(-xoffset,0,-zoffset)\n floor.draw({'time':time,\n 'useKnot':0.0,\n 'fogEnd':fogEnd},\n {},\n {'vMatrix':frame.matrix,\n 'pMatrix':pMatrix,\n 'mMatrix':trans})\n for row in N.arange(-pillarN*pillarSeparation, pillarN*pillarSeparation, pillarSeparation):\n trans[0][3] = row-xoffset\n for col in N.arange(-pillarN*pillarSeparation, pillarN*pillarSeparation, pillarSeparation):\n trans[2][3] = col-zoffset\n cyl.draw({'time':time,\n 'useKnot':1.0,\n 'fogEnd':fogEnd},\n {},\n {'vMatrix':frame.matrix,\n 'pMatrix':pMatrix,\n 'mMatrix':trans})\n dome.draw({'time':time, 'useKnot':0.0, 'fogEnd':fogEnd},\n {},\n {'vMatrix':frame.matrix,\n 'pMatrix':pMatrix,\n 'mMatrix':trans})\n \ndef initializeVertexArray():\n # Must have a vertex array object to use vertex buffer objects.\n # Just one will do:\n global vao_array\n vao_array = N.zeros(1, dtype=N.uint)\n glGenVertexArrays(1, vao_array)\n glBindVertexArray(vao_array[0])\n\n\n# Must be called after we have an OpenGL context, i.e. after the pygame\n# window is created\ndef init():\n global cubeBuffers,textureBuffers, cyl, dome, floor, pillarN, pillarSeparation\n pillarN = 5\n pillarSeparation = 10.0\n cyl = Cylinder()\n dome = Dome(width=pillarSeparation)\n floor = Floor()\n initializeVertexArray() \n glClearColor(0.0, 0.0, 0.0, 0.0)\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_CULL_FACE)\n \ndef main():\n global window, time, light, inc, whichTex, frame, fogEnd\n pygame.init()\n screen = pygame.display.set_mode((640,480), OPENGL|DOUBLEBUF)\n screentoggle = False\n clock = pygame.time.Clock()\n time = 0.0\n light = N.array((10,10,10,0), dtype = N.float32)\n inc = 0.05\n whichTex = 0\n init()\n frame = Frame()\n fogEnd = 50.0\n while True: \n for event in pygame.event.get():\n if event.type == QUIT:\n return\n if event.type == KEYUP and event.key == K_ESCAPE:\n return\n if event.type == KEYDOWN:\n if event.key == K_s:\n if inc == 0.0:\n inc = 0.05\n else:\n inc = 0.0\n\n pressed = pygame.key.get_pressed()\n if pressed[K_UP]:\n frame.move(0.25)\n if pressed[K_DOWN]:\n frame.move(-0.25)\n if pressed[K_a]:\n frame.tilt(1.0)\n if pressed[K_z]:\n frame.tilt(-1.0)\n if pressed[K_LEFT]:\n if pressed[K_LSHIFT]:\n frame.strafe(0.25)\n else:\n frame.rotate(1.0)\n if pressed[K_RIGHT]:\n if pressed[K_LSHIFT]:\n frame.strafe(-0.25)\n else:\n frame.rotate(-1.0)\n if pressed[K_f]:\n fogEnd *= 1.1\n if pressed[K_v]:\n fogEnd *= 0.9\n \n clock.tick(30)\n time += inc\n display()\n pygame.display.flip()\n\nif __name__ == '__main__':\n try:\n main()\n finally:\n pygame.quit()\n","sub_path":"lectures/110opengl/examples/dungeon/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"215977624","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ndef init_weights(shape):\n return tf.Variable(tf.random_normal(shape, stddev=0.01))\n\nbatch_size=128\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\nx_train, y_train, x_test, y_test = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels\n\nx = tf.placeholder(tf.float32, shape=[None, 784])\ny = tf.placeholder(tf.float32, shape=[None, 10])\n\nw = init_weights([784, 10])\npy_x = tf.matmul(x, w)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=y))\ntrain_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)\npredict_op = tf.argmax(py_x, axis=1)\n\nwith tf.Session() as sess:\n tf.global_variables_initializer().run()\n for step in range(100):\n for start,end in zip(range(0, len(x_train), batch_size), range(batch_size, len(x_train)+1, batch_size)):\n sess.run(train_op, feed_dict={x: x_train[start:end], y:y_train[start:end]})\n print(\"Step {0}, Acc: {1}\".format(step, np.mean(np.argmax(y_test, axis=1)==sess.run(predict_op, feed_dict={x: x_test}))))\n\n","sub_path":"deeplearning/src/tensorflow/tutorials/t02-logistic-regression.py","file_name":"t02-logistic-regression.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148450687","text":"import re\nfrom sympy.assumptions.handlers import (CommonHandler, test_closed_group)\nfrom sympy.assumptions import ask, Q\nfrom checks_lib.regexes import (integer,number,sci_num_type_re)\nfrom checks_lib.default_values import separator_functions\n\nclass AskRationalHandler2(CommonHandler):\n \"\"\"\n Handler for Q.rational_2\n Test that an expression belongs to the field of rational numbers\n \"\"\"\n\n\n @staticmethod\n def Expr(expr, assumptions):\n return True\n\n @staticmethod\n def Add(expr, assumptions):\n \"\"\"\n Rational + Rational -> Rational\n Rational + !Rational -> !Rational\n !Rational + !Rational -> ?\n \"\"\"\n if expr.is_number:\n if expr.as_real_imag()[1]:\n return False\n return test_closed_group(expr, assumptions, Q.rational_2)\n\n\n Mul = Add\n \n @staticmethod\n def Pow(expr, assumptions):\n if ask(Q.integer(expr.exp), assumptions):\n return ask(Q.rational_2(expr.base), assumptions)\n else:\n return False\n Rational = staticmethod(CommonHandler.AlwaysTrue)\n\n Float = staticmethod(CommonHandler.AlwaysNone)\n\n ImaginaryUnit, Infinity, NegativeInfinity, Pi, Exp1,\\\n GoldenRatio, TribonacciConstant = \\\n [staticmethod(CommonHandler.AlwaysFalse)]*7\n\n @staticmethod\n def exp(expr, assumptions):\n x = expr.args[0]\n if ask(Q.rational(x), assumptions):\n return ask(~Q.nonzero(x), assumptions)\n\n @staticmethod\n def cot(expr, assumptions):\n x = expr.args[0]\n if ask(Q.rational(x), assumptions):\n return False\n\n @staticmethod\n def log(expr, assumptions):\n x = expr.args[0]\n if ask(Q.rational(x), assumptions):\n return ask(~Q.nonzero(x - 1), assumptions)\n\n sin, cos, tan, asin, atan = [exp]*5\n acos, acot = log, cot\n\n\ndef number_type(input,options):\n '''\n Checking for specific type of number\n Might be swaped to AskHandler later,\n right now we use regex\n '''\n equiv = True\n if 'complexType' in options:\n equiv = (re.search(r'(?= 1) & (df['CNT_CHILDREN'] <= 2)]\nGroupC = df[(df['CNT_CHILDREN'] >= 3) & (df['CNT_CHILDREN'] <= 5)]\nGroupD = df[ df['CNT_CHILDREN'] > 5 ]\n\n\n# In[]\n\nz_column = ['AMT_INCOME_TOTAL']\n\n\nsc.df_standardized(GroupA, z_column, parameter_save_folder_path = 'None')\nsc.df_standardized(GroupB, z_column, parameter_save_folder_path = 'None')\nsc.df_standardized(GroupC, z_column, parameter_save_folder_path = 'None')\nsc.df_standardized(GroupD, z_column, parameter_save_folder_path = 'None')","sub_path":"homework/Day_013_HW.py","file_name":"Day_013_HW.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"271909244","text":"\"\"\"\nCreated on Sun Mar 15 15:01:21 2020\n\n@author: jan-philippfranken\n\"\"\"\n###############################################################################\n########################### Main File #########################################\n###############################################################################\n\n\n####################### General imports #######################################\nimport numpy as np\nimport pandas as pd\nimport random\nfrom scipy.optimize import minimize\n# print(pd.__version__) # needs to be a relatively recent version (older versions coming with python <= 3.6 do not work)\n\n\n\n####################### Custom imports ########################################\nfrom mcmc_cond_1_new import mcmc_sampler, mcmc_sampler_map_surgery, mcmc_sampler_map\nfrom pcfg_generator import pcfg_generator\nfrom tree_regrower import tree_regrower\nfrom transform_functions import compute_orientation, check_structure, get_production_probs_prototype\nfrom tau_ml_estimation import get_tau, fitted_probs, compare_BIC, hard_max_selections, compute_acc, compute_distance\nfrom create_random_scenes import rand_scene_creator\nfrom reverse_rule import reverse_rule\n\n\n\n###################### Preliminaries ##########################################\nZ = pcfg_generator() # instantiating pcfg generator\ntg = tree_regrower()\nrr = reverse_rule()\nrandom.seed(1) # setting random.seed to allow replication of mcmc chain\nmain_data_formatted = pd.read_csv('main_data_formatted_cond_three_cut_rules_second_rule.csv') # getting the preprocessed data file\n\n\n####################### grammar ##############################################\nS = ['Z.exists(lambda xN: A,X)', 'Z.forall(lambda xN: A,X)', 'L(lambda xN: A,M,X)']\nA = ['B', 'S']\nB = ['C', 'J(B,B)', 'Z.not_operator(B)']\nC = ['Z.equal(xN, D)', 'K(xN, E)', 'Z.equal(xN,xO,G)', 'K(xN, xO, H)', 'Z.hor_operator(xN,xO,I)']\nD = {\"colour\": [\"'red'\", \"'blue'\", \"'green'\"], \"size\": [1, 2, 3], \"xpos\": np.arange(9),\"ypos\": np.arange(2, 6), \"rotation\": np.arange(0, 6.5, 0.5),\"orientation\": [\"'upright'\", \"'lhs'\", \"'rhs'\", \"'strange'\"], \"grounded\": [\"'no'\", \"'yes'\"]}\nE = {\"size\": [1, 2, 3], \"xpos\": np.arange(9), \"ypos\": np.arange(2, 6), \"rotation\": np.arange(0, 6.3, 0.1)}\nG = [\"'colour'\", \"'size'\", \"'xpos'\", \"'ypos'\", \"'rotation'\", \"'orientation'\", \"'grounded'\"]\nH = [\"'size'\", \"'xpos'\", \"'ypos'\", \"'rotation'\"]\nI = [\"'contact'\"]\nJ = ['Z.and_operator', 'Z.or_operator']\nK = ['Z.lequal', 'Z.grequal', 'Z.less', 'Z.greater']\nL = ['Z.atleast', 'Z.atmost', 'Z.exactly']\nM = [1, 2, 3]\n\n# summarizing grammar in dictionary\nproductions = {\"S\": S, \"A\": A, \"B\": B, \"C\": C, \"D\": D, \"E\": E, \"G\": G, \"H\": H, \"I\": I, \"J\": J, \"K\": K, \"L\": L, \"M\": M}\n\n# replacement dictionary\nreplacements = {\"S\": [\"S\"],\n \"A\": ['Z.exists','Z.forall','Z.atleast','Z.atmost','Z.exactly'],\n \"B\": ['Z.equal', 'Z.hor_operator', 'Z.lequal', 'Z.grequal', 'Z.less', 'Z.greater','Z.and_operator','Z.or_operator','Z.not_operator'],\n \"C\": ['Z.equal', 'Z.hor_operator', 'Z.lequal', 'Z.grequal', 'Z.less', 'Z.greater']}\n\n\nZ = pcfg_generator() # instantiating grammar generator (Z is an arbitrary choice, the letter G is already used in the grammar...)\ntg = tree_regrower() # instantiating tree regrower\n\n\n############################# rules used in the experiment ##################################\n# simple booleans\nthere_is_a_red = \"Z.exists(lambda x1: Z.equal(x1,'red','colour'),X)\"\nnothing_is_upright = \"Z.forall(lambda x1: Z.not_operator(Z.equal(x1,'upright','orientation')),X)\"\none_is_blue = \"Z.exactly(lambda x1: Z.equal(x1,'blue','colour'),1,X)\"\nthere_is_a_blue_and_small = \"Z.exists(lambda x1: Z.and_operator(Z.equal(x1,1,'size'),Z.equal(x1,'blue','colour')),X)\"\nall_are_blue_or_small = \"Z.forall(lambda x1: Z.or_operator(Z.equal(x1,1,'size'),Z.equal(x1,'blue','colour')),X)\"\n\n\n# more complex rules (not relvant for the present comparison focusing only on simple booleans)\nall_are_the_same_size = \"ob.forall(lambda x1: ob.forall(lambda x2: ob.equal(x1,x2,'size'), X), X)\"\ncontact = \"ob.exists(lambda x1: ob.exists(lambda x2: ob.hor_operator(x1,x2,'contact'), X), X)\"\nblue_to_red_contact = \"ob.exists(lambda x1: ob.exists(lambda x2: ob.and_operator(ob.and_operator(ob.equal(x1, 'blue','colour'), ob.equal(x2 , 'red', 'colour')), ob.hor_operator(x1,x2,'contact')), X), X)\"\nred_bigger_than_all_nonred = \"ob.exists(lambda x1: ob.forall(lambda x2: ob.or_operator(ob.and_operator(ob.equal(x1,'red','colour'), ob.greater(x1,x2,'size')), ob.equal(x2, 'red', 'colour')), X), X)\"\nstacked = \"ob.exists(lambda x1: ob.exists(lambda x2: ob.and_operator(ob.and_operator(ob.and_operator(ob.and_operator(ob.and_operator(ob.equal(x1,'upright','orientation'),ob.equal(x1,'yes','grounded')),ob.equal(x2,'upright','orientation')),ob.equal(x2,'no','grounded')),ob.equal(x1,x2,'xpos')),ob.hor_operator(x1,x2,'contact')),X),X)\"\n\n# summarising rules in dictionary\nrules_dict = { # simple booleans\n 'Zeta': there_is_a_red,\n 'Upsilon': nothing_is_upright,\n 'Iota': one_is_blue,\n 'Kappa': there_is_a_blue_and_small,\n 'Omega': all_are_blue_or_small,\n # complex rules\n 'Phi': all_are_the_same_size,\n 'Nu': contact,\n 'Xi': blue_to_red_contact,\n 'Mu': red_bigger_than_all_nonred,\n 'Psi': stacked}\n\nmain_data_formatted = main_data_formatted.query(\"post_resp != 's'\")\nmain_data_formatted = main_data_formatted.reset_index(drop=True)\n# removing all complex rules from the data frame, allowing only the five simple booleans to remain (Zeta, Upsilon, Iota, Kappa, Omega)\nmain_data_formatted = main_data_formatted.query(\"rule_name == 'Zeta' or rule_name == 'Upsilon' or rule_name == 'Iota' or rule_name == 'Kappa' or rule_name == 'Omega'\")\nmain_data_formatted = main_data_formatted.reset_index(drop=True)\n\n# print(len(main_data_formatted['rule_name'])) # remaining number of trials (450/450)\n\n# creating dictionary with subject's tokens as keys and the number of trials each subject completed after removing complex rules as values\ntrial_counts = main_data_formatted.groupby('token_id').size()\n\ntrial_counts = dict(trial_counts)\n\nprint(trial_counts)\nprint(sum(list(trial_counts.values())))\n\n####################### Sampling Algorithm #########################################\ndef predicted_selections(main_data_formatted, rules_dict, replacements, trial_counts, n_rep = 1, n_1=1, n_2=5): # computes the ll for getting participants responses to initial generalizations (n_1 * n_2 determines number of MCMC iterations)\n rep = 0 # index for number of repititions of the whole sampling procedure\n\n for repeat in np.arange(n_rep): # for each repeat, an independent outuput file will be created\n i = 0 # index over trials (= n_subjects * n_trials per subject)\n gt = [1,1,1,1,0,0,0,0] # ground truth for generalisations (first four are always correct, last four always wrong)\n\n # computing additional variables for a single trial for subjects prior responses\n fitted_taus_prior = [] # fitted temperature parameters for each trial for each participant (dictates soft vs hard maximisation)\n raw_probs_prior = [] # raw probabilities for each scene (i.e. if they follow a rule or not)\n select_probs_prior = [] # selection probabilities for each scene based on tau\n ll_model_prior = [] # negative log likelihood of observing participant data for a given rule (i.e. sum of all log ll for each scene for one rule)\n BICs_model_prior = [] # BIC\n ll_baseline_prior = [] # same for basline\n BICs_baseline_prior = []\n\n\n # computing the same variables for all 5 trials for each subject (just aggregating over trials for later model comparison)\n raw_probs_all_trials_prior = []\n raw_probs_all_trials_one_list_prior = []\n select_probs_all_trials_prior = []\n prior_resp_all_trials_prior = []\n fitted_taus_all_trials_prior = []\n ll_model_all_trials_prior = []\n BICs_model_all_trials_prior = []\n ll_baseline_all_trials_prior = []\n BICs_baseline_all_trials_prior = []\n\n # repeating above for prior labels predicting posterior labels\n fitted_taus_prior_labels = [] # fitted temperature parameters for each trial for each participant (dictates soft vs hard maximisation)\n raw_probs_prior_labels = [] # raw probabilities for each scene (i.e. if they follow a rule or not)\n select_probs_prior_labels = [] # selection probabilities for each scene based on tau\n ll_model_prior_labels = [] # negative log likelihood of observing participant data for a given rule (i.e. sum of all log ll for each scene for one rule)\n BICs_model_prior_labels = [] # BIC\n\n\n # computing the same variables for all 5 trials for each subject (just aggregating over trials for later model comparison)\n raw_probs_all_trials_prior_labels = []\n raw_probs_all_trials_one_list_prior_labels = []\n select_probs_all_trials_prior_labels = []\n prior_resp_all_trials_prior_labels = []\n fitted_taus_all_trials_prior_labels = []\n ll_model_all_trials_prior_labels = []\n BICs_model_all_trials_prior_labels = []\n\n\n # repeating the above for subjects posteriors based on map estimates\n\n fitted_taus_post_map = [] # fitted temperature parameters for each trial for each participant (dictates soft vs hard maximisation)\n raw_probs_post_map = [] # raw probabilities for each scene (i.e. if they follow a rule or not)\n select_probs_post_map = [] # selection probabilities for each scene based on tau\n ll_model_post_map = [] # negative log likelihood of observing participant data for a given rule (i.e. sum of all log ll for each scene for one rule)\n BICs_model_post_map = [] # BIC\n ll_baseline_post_map = [] # same for basline\n BICs_baseline_post_map = []\n\n\n raw_probs_all_trials_post_map = []\n raw_probs_all_trials_one_list_post_map = []\n select_probs_all_trials_post_map = []\n post_resp_all_trials_post_map = []\n fitted_taus_all_trials_post_map = []\n ll_model_all_trials_post_map = []\n BICs_model_all_trials_post_map = []\n ll_baseline_all_trials_post_map = []\n BICs_baseline_all_trials_post_map = []\n\n # # repeating the above for subjects posteriores based on all 16 data points\n fitted_taus_post_all = [] # fitted temperature parameters for each trial for each participant (dictates soft vs hard maximisation)\n raw_probs_post_all = [] # raw probabilities for each scene (i.e. if they follow a rule or not)\n select_probs_post_all = [] # selection probabilities for each scene based on tau\n ll_model_post_all = [] # negative log likelihood of observing participant data for a given rule (i.e. sum of all log ll for each scene for one rule)\n BICs_model_post_all = [] # BIC\n ll_baseline_post_all = [] # same for basline\n BICs_baseline_post_all = []\n\n\n raw_probs_all_trials_post_all = []\n raw_probs_all_trials_one_list_post_all = []\n select_probs_all_trials_post_all = []\n post_resp_all_trials_post_all = []\n fitted_taus_all_trials_post_all = []\n ll_model_all_trials_post_all = []\n BICs_model_all_trials_post_all = []\n ll_baseline_all_trials_post_all = []\n BICs_baseline_all_trials_post_all = []\n\n fitted_taus_post_all_seed = [] # fitted temperature parameters for each trial for each participant (dictates soft vs hard maximisation)\n raw_probs_post_all_seed = [] # raw probabilities for each scene (i.e. if they follow a rule or not)\n select_probs_post_all_seed = [] # selection probabilities for each scene based on tau\n ll_model_post_all_seed = [] # negative log likelihood of observing participant data for a given rule (i.e. sum of all log ll for each scene for one rule)\n BICs_model_post_all_seed = [] # BIC\n ll_baseline_post_all_seed = [] # same for basline\n BICs_baseline_post_all_seed = []\n\n\n raw_probs_all_trials_post_all_seed = []\n raw_probs_all_trials_one_list_post_all_seed= []\n select_probs_all_trials_post_all_seed = []\n post_resp_all_trials_post_all_seed = []\n fitted_taus_all_trials_post_all_seed = []\n ll_model_all_trials_post_all_seed = []\n BICs_model_all_trials_post_all_seed = []\n ll_baseline_all_trials_post_all_seed = []\n BICs_baseline_all_trials_post_all_seed = []\n\n\n # accuracy of different models\n prior_accs = []\n prior_accs_single = []\n prior_label_accs = []\n prior_label_accs_single = []\n post_map_accs = []\n post_map_accs_single = []\n post_all_accs = []\n post_all_accs_single = []\n post_all_accs_seed = []\n post_all_accs_single_seed = []\n\n\n # labels (= 1 if model is better fit for subject and 0 if baseline is better fit for subject)\n prior_labels = []\n prior_labels_labels = []\n post_map_labels = []\n post_all_labels = []\n post_all_labels_seed = []\n\n rules_prior = []\n rules_prior_label = []\n rules_post_map = []\n rules_post_all = []\n rules_post_all_seed = []\n\n # maps of each sampline procedure (= the hypothesis that occured most often)\n map_prior = []\n map_prior_label = []\n map_post_map = []\n map_post_all = []\n map_post_all_seed = []\n\n # accuracy of maps\n map_prior_acc = []\n map_prior_acc_label = []\n map_post_map_acc = []\n map_post_all_acc = []\n map_post_all_acc_seed = []\n\n\n # count how often correct rule occurs\n correct_rule_perc_prior = []\n correct_rule_perc_prior_labels = []\n correct_rule_perc_post_map = []\n correct_rule_perc_post_all = []\n correct_rule_perc_post_all_seed = []\n\n\n n_rows =82\n n_trials_counter = 1\n\n # looping over each trial for each subject (n_trials * n_subjects iterations and run mcmc chains)\n for data in main_data_formatted['data'][:n_rows]:\n\n Dwin = [.25,.25,0,0,0,.25,.25]\n\n # getting name and id to create a unique csv file for each mcmc chain for each subject and trial\n rule_name = main_data_formatted['rule_name'][i]\n # print(rule_name)\n rule_string = rules_dict[rule_name]\n # print(rule_string)\n\n subj_rule = main_data_formatted['prior_resp'][i]\n subj_rev_rule = main_data_formatted['post_resp'][i]\n bv_prior = eval(main_data_formatted['bound_vars'][i])\n # print(subj_rule)\n # print(bv_prior)\n\n correct_rule = rules_dict[rule_name]\n token_id = main_data_formatted['token_id'][i]\n n_trials = trial_counts[token_id]\n\n # getting subjects prior and posterior responses\n prior_response = eval(main_data_formatted['prior'][i])\n posterior_response = eval(main_data_formatted['posterior'][i])\n\n # getting the data\n init_trials = eval(data)[:8] # trials = scenes created by participants\n rev_trials = eval(main_data_formatted['data'][i])[:8]\n generalizations = eval(data)[8:]\n full_data = init_trials + rev_trials # full data used for the third mcmc chain combining trials and generalizations\n\n\n # getting additional training data for evaluation of model\n training_data = rand_scene_creator(correct_rule, n_scenes=0)\n full_training_dat_prior = init_trials + training_data\n label_dat_post_map = generalizations\n full_training_dat_post_map = rev_trials + label_dat_post_map\n full_training_dat_post_all = full_data + training_data\n\n\n prior_probs = get_production_probs_prototype(full_training_dat_prior,'prior',cond='1',feat_only=False)\n Dwin_prior = prior_probs[0]\n feat_probs_prior = prior_probs[1]\n\n\n prob_gen_follow_rule_prior_label = []\n\n map_res_prior_label = []\n\n\n # evaluating the rules based on the generalization data shown to subjects\n for gen in generalizations: # looping over all 8 generalization scenes\n\n\n\n res_prior_label = []\n\n\n\n\n global X # defining a global variable X\n X = [] # for each scene, X will include the objects (i.e., cones) of the scene\n\n # looping over the number of objects (ie cones) in each scene\n for i_3 in range(0, len(gen['ids'])):\n# # print(gen['contact'])\n contact = check_structure(gen['contact'], i_3) # converting misrepresented contact dictionaries into lists (see transform_functions.py for details)\n# # print(contact)\n # getting the properties for each object (triangle / cone) in the scene\n object = {\"id\": gen['ids'][i_3], \"colour\": gen['colours'][i_3] , \"size\": gen['sizes'][i_3], \"xpos\": int(np.round(gen['xpos'][i_3])),\n \"ypos\": int(np.round(gen['ypos'][i_3])), \"rotation\": np.round(gen['rotations'][i_3],1), \"orientation\": compute_orientation(gen['rotations'][i_3])[0],\n \"grounded\": gen['grounded'][i_3], \"contact\": contact}\n\n X.append(object) # appending the object to X which includes all objects for a scene\n# # print(X)\n# # print(ob.exists(lambda x1: ob.forall(lambda x2: ob.and_operator(ob.and_operator(ob.equal(x1,'red','colour'), ob.not_operator(ob.equal(x2, 'red', 'colour'))), ob.greater(x1,x2,'size')), X), X))\n # evaluating all sampled rules against the scenes for each of the different mcmc chains and appending results\n res_prior_label.append(eval(subj_rev_rule))\n# # print(eval(rule))\n# # print(X)\n# # print(ob.forall(lambda x1: ob.not_operator(ob.equal(x1, 'upright', 'orientation')), X))\n # for rule in df_prior_labels['rulestring']:\n# # print(subj_rule)\n\n\n map_res_prior_label.append(eval(subj_rev_rule))\n\n\n # computing the raw probabilities that the scenes follow a rule for each chain\n\n p_follow_rule_prior_label = (1 / len(res_prior_label)) * sum(res_prior_label) # len(res) = number of rules; sum(res) = number of rules matching the scene\n# # print(p_follow_rule_prior)\n print(len(res_prior_label))\n print(sum(res_prior_label))\n prob_gen_follow_rule_prior_label.append(p_follow_rule_prior_label)\n\n\n\n\n map_prior_acc_label.append(compute_acc(gt, map_res_prior_label))\n\n # return 1\n # fitting tau to the data using a generic minimize function from scipy.optimize for all chains\n\n\n # prior chain\n raw_probs_prior_labels.append(prob_gen_follow_rule_prior_label) # only used for single trial examples\n raw_probs_all_trials_prior_labels.append(prob_gen_follow_rule_prior_label)\n prior_resp_all_trials_prior_labels.append(posterior_response)\n fitted_tau_prior_label = minimize(get_tau, 1, bounds=[(0.01, 10000.00)],args=(prob_gen_follow_rule_prior_label,posterior_response), method='L-BFGS-B')\n fitted_taus_prior_labels.append(fitted_tau_prior_label.x[0])\n fitted_results_mod_prior_label = fitted_probs(fitted_tau_prior_label.x[0], prob_gen_follow_rule_prior_label, posterior_response)\n select_probs_prior_labels.append(fitted_results_mod_prior_label[0])\n ll_model_prior_labels.append(fitted_results_mod_prior_label[1])\n BICs_model_prior_labels.append(-2 * fitted_results_mod_prior_label[1] + 1 * np.log(8))\n\n\n\n\n\n #\n ####################### ALL TRIALS FOR ONE SUBJECT ##########################################\n # print(len(fitted_taus_prior))\n # print(n_trials)\n # print('ntrialsabove')\n if n_trials_counter % n_trials == 0:\n n_trials_counter = 0\n # print(n_trials_counter)\n\n raw_probs_all_trials_label_1 = [prob for sublist in raw_probs_all_trials_prior_labels[i-n_trials+1:] for prob in sublist]\n prior_resp_all_trials_label_1 = [response for sublist in prior_resp_all_trials_prior_labels[i-n_trials+1:] for response in sublist]\n\n\n overall_fitted_tau_label = minimize(get_tau, 1, bounds=[(0.01, 10000.00)],args=(raw_probs_all_trials_label_1,prior_resp_all_trials_label_1), method='L-BFGS-B')\n fitted_results_all_trials_mod_label = fitted_probs(overall_fitted_tau_label.x[0], raw_probs_all_trials_label_1, prior_resp_all_trials_label_1)\n\n\n\n\n\n # print(len(fitted_taus_prior))\n # print('fittedtausprior')\n if len(fitted_taus_prior) % len(main_data_formatted['rule_name'][:n_rows]) == 0:\n # print('vat')\n # len(main_data_formatted['rule_name'][:n_rows])\n\n # prior\n# # print('potato')\n raw_probs_all_subjects = [prob for sublist in raw_probs_all_trials_prior for prob in sublist]\n print(raw_probs_all_subjects)\n prior_resp_all_subjects = [response for sublist in prior_resp_all_trials_prior for response in sublist]\n\n # labels\n raw_probs_all_subjects_label = [prob for sublist in raw_probs_all_trials_prior_labels for prob in sublist]\n\n prior_resp_all_subjects_label = [response for sublist in prior_resp_all_trials_prior_labels for response in sublist]\n\n fitted_tau_all_subjects_prior_label = minimize(get_tau, 1, bounds=[(0.1, 100.00)],args=(raw_probs_all_subjects_label,prior_resp_all_subjects_label), method='L-BFGS-B')\n print(fitted_tau_all_subjects_prior_label)\n print(raw_probs_all_subjects_label)\n print(prior_resp_all_subjects_label)\n fitted_results_all_subjects_prior_label = fitted_probs(fitted_tau_all_subjects_prior_label.x[0], raw_probs_all_subjects_label,prior_resp_all_subjects_label)\n\n\n fitted_taus_all_subjects_prior_label = []\n fitted_taus_all_subjects_prior_label.append(float(fitted_tau_all_subjects_prior_label.x[0]))\n\n select_probs_all_subjects_prior_label = []\n select_probs_all_subjects_prior_label.append(fitted_results_all_subjects_prior_label[0])\n\n ll_model_all_subjects_prior_label = []\n ll_model_all_subjects_prior_label.append(fitted_results_all_subjects_prior_label[1])\n\n BICs_model_all_subjects_prior_label = []\n BICs_model_all_subjects_prior_label.append(-2 * fitted_results_all_subjects_prior_label[1] + 1 * np.log(8 * len(main_data_formatted['rule_name'][:n_rows])))\n\n # post map\n\n gt_all_subjects = gt * len(fitted_taus_prior) # ground truth\n\n\n prior_mod_select_all_subj_label = hard_max_selections(raw_probs_all_subjects_label)\n\n prior_acc_all_subj_label = sum([a and b or not a and not b for a, b in zip(gt_all_subjects, prior_mod_select_all_subj_label)]) / len(prior_mod_select_all_subj_label)\n # print(len(fitted_taus_prior))\n\n\n\n\n\n\n\n\n for trial in range(n_trials):\n\n\n\n fitted_taus_all_trials_prior_labels.append(overall_fitted_tau_label.x[0])\n select_probs_all_trials_prior_labels.append(fitted_results_all_trials_mod_label[0])\n ll_model_all_trials_prior_labels.append(fitted_results_all_trials_mod_label[1])\n BICs_model_all_trials_prior_labels.append(-2 * fitted_results_all_trials_mod_label[1] + 1 * np.log(8*n_trials))\n raw_probs_all_trials_one_list_prior_labels.append(raw_probs_all_trials_prior_labels)\n\n\n\n\n\n # computing accuracy of model predictions using hard maximization for selection probs\n gt_all = gt * n_trials # ground truth\n\n\n prior_mod_select_labels = hard_max_selections(raw_probs_all_trials_label_1)\n\n\n\n prior_acc_label = sum([a and b or not a and not b for a, b in zip(gt_all, prior_mod_select_labels)]) / len(prior_mod_select_labels)\n\n low_bound = 0\n up_bound = 8\n for acc in range(n_trials):\n\n prior_label_accs.append(prior_acc_label)\n\n\n prior_label_accs_single.append(sum([a and b or not a and not b for a, b in zip(gt, prior_mod_select_labels[low_bound:up_bound])]) / 8)\n\n low_bound+=8\n up_bound+=8\n\n\n\n\n\n\n\n i+=1 # pr\n n_trials_counter+=1\n print(i)\n # oceeding to next trial\n\n\n ################ APPENDING ALL DATA TO MAIN DATA FRAME ########################\n\n\n# # print(raw_probs_all_trials_one_list_prior)\n # prior trial specific data\n\n main_data_formatted = main_data_formatted[:n_rows]\n\n\n # main_data_formatted['select_probs_fitted_tau_prior'] = select_probs_prior\n\n #now labels\n # main_data_formatted['raw_probs_prior'] = raw_probs_prior\n main_data_formatted['fitted_tau_prior_label'] = fitted_taus_prior_labels\n main_data_formatted['log_ll_model_prior_label'] = ll_model_prior_labels\n main_data_formatted['BIC_model_prior_label'] = BICs_model_prior_labels\n\n # main_data_formatted['select_probs_fitted_tau_prior'] = select_probs_prior\n\n # prior data across trials\n # main_data_formatted['raw_probs_all_trials_prior'] = raw_probs_all_trials_one_list_prior\n main_data_formatted['fitted_tau_all_trials_prior_label'] = fitted_taus_all_trials_prior_labels\n main_data_formatted['log_ll_model_all_trials_prior_label'] = ll_model_all_trials_prior_labels\n main_data_formatted['BIC_model_all_trials_prior_label'] = BICs_model_all_trials_prior_labels\n\n # main_data_formatted['select_probs_fitted_tau_all_trials_prior'] = select_probs_all_trials_prior\n\n # print('giantpotato')\n # # print(fitted_taus_all_subjects_prior)\n # # print(len(select_probs_all_subjects_prior))\n # # print(select_probs_all_subjects_prior)\n # # print(len(select_probs_all_subjects_prior[0]))\n # # print(ll_model_all_subjects_prior)\n # # print(BICs_model_all_subjects_prior)\n # # print(ll_baseline_all_subjects_prior)\n # # print(BICs_baseline_all_subjects_prior)\n main_data_formatted['fitted_taus_all_subjects_prior_label'] = fitted_taus_all_subjects_prior_label * len(main_data_formatted['rule_name'][:n_rows])\n main_data_formatted['raw_probs_all_subjects_prior_label'] = [raw_probs_all_subjects_label] * len(main_data_formatted['rule_name'][:n_rows])\n# # print(len(select_probs_all_subjects_prior * 450)\n main_data_formatted['ll_model_all_subjects_prior_label'] = ll_model_all_subjects_prior_label* len(main_data_formatted['rule_name'][:n_rows])\n main_data_formatted['BICs_model_all_subjects_prior_label'] = BICs_model_all_subjects_prior_label* len(main_data_formatted['rule_name'][:n_rows])\n # main_data_formatted['ll_baseline_all_subjects_prior'] = ll_baseline_all_subjects_prior* len(main_data_formatted['rule_name'][:n_rows])\n # main_data_formatted['BICs_baseline_all_subjects_prior'] = BICs_baseline_all_subjects_prior* len(main_data_formatted['rule_name'][:n_rows])\n\n\n\n\n main_data_formatted['prior_label_acc'] = prior_label_accs\n main_data_formatted['prior_acc_single_label'] = prior_label_accs_single\n\n\n\n main_data_formatted['all_sub_prior_acc_label'] = prior_acc_all_subj_label\n\n\n\n\n main_data_formatted.to_csv('model_results/normative_res_three_rev_rule_only.csv') # writing main data to new csv file including all relevant data for analysis\n\n rep+=1\n\npredicted_selections(main_data_formatted, rules_dict, replacements, trial_counts, n_rep=1)\n","sub_path":"model/main_code/everything/revised_rule_BIC_cond_3.py","file_name":"revised_rule_BIC_cond_3.py","file_ext":"py","file_size_in_byte":29672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"40911178","text":"import logging\n\nfrom plugin import Plugin\n\nlog = logging.getLogger('discord')\n\nclass Welcome(Plugin):\n plugin_name = 'Welcome'\n plugin_version = '0.0.1'\n plugin_description = 'Gera menssagem de ajuda.'\n is_global = True\n is_beta = False\n\n async def on_member_join(self, member):\n message = 'Bem vindo ao server {} :wink:'.format(member.mention)\n destination = member.server\n await self.ene.send_message(destination, message)\n\n async def on_member_remove(self, member):\n message = 'Bye Bye {} :kissing_smiling_eyes:'.format(member.name)\n destination = member.server\n await self.ene.send_message(destination, message)\n","sub_path":"plugins/welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"188006876","text":"__author__ = 'lmxiang'\r\n\r\nimport unittest\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom autotest.lib.common_elements import *\r\nimport autotest.lib.webui_pages.site2cloud.s2c_conn as pages\r\nfrom autotest.lib.test_utils import testcases\r\n\r\n\r\nclass S2C_Conn_Add(unittest.TestCase):\r\n \"\"\"\r\n Add new site2cloud connection\r\n \"\"\"\r\n cases = testcases(__name__)\r\n case_list = cases.data\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n cls.logger = logging.getLogger(pages.__name__)\r\n #cls.logger =avx_logger()\r\n chrome_options = Options()\r\n chrome_options.add_argument(\"--disable-extensions\")\r\n cls.driver = webdriver.Chrome(chrome_options=chrome_options)\r\n #cls.driver.maximize_window()\r\n\r\n def test_s2c_add(self):\r\n self.logger.info(\"Start Adding Site2Cloud Connection\")\r\n\r\n s2c_view = pages.S2C_New(self.driver, login_required=True)\r\n self.logger.info(\"Navigating to Site2Cloud\")\r\n s2c_view.navigate_to_s2c()\r\n time.sleep(10)\r\n\r\n self.logger.info(\"Check if Site2Cloud is present in the current view area...\")\r\n assert s2c_view.match_view_title(),\"Site2Cloud view is not present\"\r\n\r\n self.logger.info(\"Fill site2cloud connection creation data\")\r\n\r\n for case in sorted(self.case_list):\r\n\r\n self.cases.start_test(case)\r\n \"\"\"\r\n if data['HA'].lower() == \"enable\" and s2c_view.is_ha_enabled():\r\n self.logger.info(\"Site2Cloud HA is already enabled. No action needed...\")\r\n elif data['HA'].lower() == \"enable\" and not s2c_view.is_ha_enabled():\r\n self.logger.info(\"Site2Cloud HA is disabled. Enable it now...\")\r\n time.sleep(5)\r\n s2c_view.ha_switch_button = \"select\"\r\n toaster_result = s2c_view.s2c_toaster.lower()\r\n assert (\"success\" in toaster_result), \"Fail to enable HA\"\r\n time.sleep(5)\r\n elif data['HA'].lower() == \"disable\" and not s2c_view.is_ha_enabled():\r\n self.logger.info(\"Site2Cloud HA is already disabled. No action needed...\")\r\n elif data['HA'].lower() == \"disable\" and s2c_view.is_ha_enabled():\r\n self.logger.info(\"Site2Cloud HA is enabled. Disable it now...\")\r\n self.logger.info(\"Disable Site2Cloud HA...\")\r\n time.sleep(5)\r\n s2c_view.ha_switch_button = \"deselect\"\r\n time.sleep(10)\r\n toaster_result = s2c_view.s2c_toaster.lower()\r\n assert (\"success\" in toaster_result), \"Fail to disable HA\"\r\n time.sleep(5)\r\n else:\r\n self.logger.error(\"Wrong value for HA. Either 'enable' or 'disable'. Abort...\")\r\n return False\r\n \"\"\"\r\n self.logger.info(\"Click 'Add New' button to create a new site2cloud connection...\")\r\n s2c_view.new_button = \"new\"\r\n time.sleep(10)\r\n\r\n assert s2c_view.fill_conn_fields(**self.cases.case_data),\"Fail to fill in Site2Cloud connection fields\"\r\n s2c_view.ok_button = \"ok\"\r\n time.sleep(10)\r\n toaster_result = s2c_view.s2c_toaster.lower()\r\n assert (self.cases.expected_result['toaster'] in toaster_result),\"Fail to create Site2Cloud connection: \"+toaster_result\r\n time.sleep(10)\r\n\r\n self.cases.end_test(case)\r\n\r\n @classmethod\r\n def tearDownClass(cls):\r\n cls.driver.close()\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()","sub_path":"autotest/frontend/site2cloud/s2c_conn_add.py","file_name":"s2c_conn_add.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"614821927","text":"def mayorDeDos(x, y):\n if x > y:\n return x\n\ndef mayorDeTres(x, y, z):\n if mayorDeDos(x, y) and mayorDeDos(x, z):\n return x\n elif mayorDeDos(y, x) and mayorDeDos(y, z):\n return y\n elif mayorDeDos(z, x) and mayorDeDos(z, y):\n return z\n\nn1=int(input(\"Primer número:\"))\nn2=int(input(\"Segundo número:\"))\nn3=int(input(\"Tercer número:\"))\nprint(mayorDeTres(n1,n2,n3))","sub_path":"Ejercicio_31.py","file_name":"Ejercicio_31.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"415557919","text":"import picamera\nimport time\nimport json\nimport csv\nimport serial\nimport dropbox\nimport moodstocks\nimport pprint\n\n#Serial Setup\nser = serial.Serial('/dev/ttyACM0', 9600, timeout = 1)\n\n#Moodstocks Setup\nms = moodstocks.APIClient(\"ezjhdtc0nugtxkc0s2px\", \"nbIWc1MTgoOi7x67\")\n\n#Dropbox Setup\ndb = 'CyMbtBwXsj0AAAAAAAAABi0yYGga1x0zy9Zn32_QgJHrxLDmFwnLb-LY7yD9T0a3'\nclient = dropbox.client.DropboxClient(db)\n\ndef rotateButton():\n if(GPIO.input(16) == 1):\n return True\n else:\n return False\n\ndef pictureButton():\n if(GPIO.input(18) == 1):\n return True\n else:\n return False\n\ndef startPreview():\n camera.start_preview()\n\ndef stopPreview():\n camera.stop_preview()\n\ndef takePicture():\n time.sleep(2)\n camera.capture('/home/pi/pantory/python/picture.jpg')\n return 'picture.jpg'\n\ndef searchPicture(pictureName): #returns a dictionary data structure\n return ms.search_image(pictureName)\n\ndef mapPicture(msPicName):\n msdb = {'skippy1': ['Skippy Peanut Butter','Misc.'],\n 'skippy2': ['Skippy Peanut Butter', 'Misc.'],\n 'honeybunch1': 'Honey Bunches of Oats',\n 'honeybunch2': 'Honey Bunches of Oats',\n 'honey1': 'Norcal Honey',\n 'honey2': 'Norcal Honey',\n 'tabasco1': 'Tabasco Hot Sauce',\n 'tabasco2': 'Tabasco Hot Sauce'}\n return msdb[msPicName]\n\ndef appendCSV(category, name, weight, rfid, picname):\n f = client.get_file('/csvtest.csv')\n out = open('/home/pi/csvtest.csv','wb')\n out.write(f.read())\n out.close()\n f = '/home/pi/csvtest.csv'\n with open(f, 'a') as csvfile:\n writeto = csv.writer(csvfile, delimiter=',', quotechar='|', lineterminator='\\n')\n writeto.writerow([category,name,weight,rfid,picname])\n\ndef upload(pictureName):\n csvfile = open('/home/pi/csvtest.csv', 'rb')\n picfile = open('/home/pi/temppic.jpeg', 'rb')\n response = client.file_delete('/csvtest.csv')\n time.sleep(0.5)\n response = client.put_file('/csvtest.csv', csvfile)\n response = client.put_file('/'+pictureName, picfile)\n\ndef rotatePantry():\n ser.write('1'.encode())\n\ndef rotateScan():\n ser.write('2'.encode())\n\ndef getWeight():\n ser.write('5'.encode())\n line = ser.readline()\n data = int(line)\n return data\n\ndef scanRFID():\n ser.write('6'.encode())\n done = ser.readline()\n while done != 'scanned\\r\\n':\n print(done)\n done = ser.readline()\n print(\"stopsig\")\n\ndef getRFID():\n ser.write('7'.encode())\n line = ser.readline()\n data1 = int(line)\n\n line = ser.readline()\n data2 = int(line)\n\n line = ser.readline()\n data3 = int(line)\n \n line = ser.readline()\n data4 = int(line)\n\n rfid = str(data1)+str(data2)+str(data3)+str(data4)\n return rfid\n","sub_path":"python/pantory.py","file_name":"pantory.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"307331335","text":"import tensorflow as tf\nimport numpy as np\n\n# ============================ACTIVATION FUNCTION=====================\ndef lrelu(x):\n #leaky relu\n return tf.maximum(x * 0.2, x)\n\ndef prelu(_x):\n # leaky relu\n alphas = tf.get_variable('alpha', _x.get_shape()[-1], dtype=tf.float32,\n initializer=tf.constant_initializer(0.0), collections=xnet_collections)\n pos = tf.nn.relu(_x)\n neg = alphas * (_x - abs(_x)) * 0.5\n return pos + neg\n\n# ============================WEIGHTS HELPER=========================\ndef write_biases(pf, param_dict, num ):\n nparam = 1\n for param in param_dict:\n\n wtmp = param.reshape([-1])\n for ntmp in wtmp:\n if nparam % num == 0:\n pf.write('\\n')\n pf.write('%f, ' % ntmp)\n nparam += 1\n\ndef write_weights(pf, param_dict, num):\n nparam = 1\n for param in param_dict:\n k_w = param.shape[0]\n\n for o in range(k_w):\n wtmp = param[o]\n if nparam % num == 0:\n pf.write('\\n')\n pf.write('%f, ' % wtmp)\n nparam += 1\n\ndef read_from_txt(txt):\n f = open(txt, 'r')\n\n weights = []\n\n lines = f.readlines()\n for line in lines:\n line = line.strip()\n line = line.split(' ')\n line = [float(tmp) for tmp in line]\n\n weights += line\n\n weights = np.asarray(weights).astype(np.float32).reshape([-1])\n\n return weights\n\n\n\n# ============================VARIABLE HELPER=========================\ndef get_variables_with_name(name, train_only=True, printable=False):\n \"\"\"Get variable list by a given name scope.\n\n Examples\n ---------\n >>> vars = get_variable_with_name('dense', True, True)\n \"\"\"\n\n print(\" [*] geting variables with %s\" % name)\n\n if train_only:\n t_vars = tf.trainable_variables()\n else:\n t_vars = tf.global_variables()\n\n\n d_vars = [var for var in t_vars if name in var.name]\n if printable:\n for idx, v in enumerate(d_vars):\n print(\" got {:3}: {:15} {}\".format(idx, v.name, str(v.get_shape())))\n\n return d_vars","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"371616983","text":"#!/usr/bin/env python\n\nimport argparse\nimport logging\nimport os\nimport sys\n\n# CCPP framework imports\nfrom parse_tools import init_log, set_log_level\nfrom metadata_table import MetadataHeader\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--metafile', '-m', action='store',\n help='name of metadata file to convert',\n required=True)\nparser.add_argument('--outputdir', '-o', action='store',\n help='directory where to write the html files',\n required=True)\n\nattributes = [ 'local_name', 'standard_name', 'long_name', 'units',\n 'type', 'dimensions', 'kind', 'intent', 'optional' ]\n\ndef parse_arguments():\n \"\"\"Parse command line arguments.\"\"\"\n args = parser.parse_args()\n filename = args.metafile\n outdir = args.outputdir\n return (filename, outdir)\n\ndef convert_to_html(filename_in, outdir, logger):\n \"\"\"Convert a metadata file into html (one html file for each table)\"\"\"\n if not os.path.isfile(filename_in):\n raise Exception(\"Metadata file {} not found\".format(filename_in))\n logger.info(\"Converting file {} to HTML\".format(filename_in))\n metadata_headers = MetadataHeader.parse_metadata_file(filename_in)\n for metadata_header in metadata_headers:\n filename_out = metadata_header.to_html(outdir, attributes)\n if filename_out:\n logger.info(\" ... wrote {}\".format(filename_out))\n\ndef main():\n # Initialize logging\n logger = init_log('metadata2html')\n set_log_level(logger, logging.INFO)\n # Convert metadata file\n (filename, outdir) = parse_arguments()\n convert_to_html(filename, outdir, logger)\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/metadata2html.py","file_name":"metadata2html.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"70865027","text":"import datetime\nfrom typing import ChainMap\nfrom app import db\nfrom app.models.task import Task\nfrom app.models.goal import Goal\nfrom flask import request, Blueprint, make_response, jsonify\nfrom datetime import datetime\nfrom dotenv import load_dotenv\nimport os\nimport requests\n\n\nload_dotenv()\n\n\ntask_bp = Blueprint(\"task_bp\", __name__, url_prefix=\"/tasks\")\ngoal_bp = Blueprint(\"goal_bp\", __name__, url_prefix=\"/goals\")\n\n\n@task_bp.route(\"\", methods=[\"GET\", \"POST\"])\ndef handle_tasks():\n task_response = []\n if request.method == \"GET\":\n if request.args.get(\"sort\") == \"asc\":\n tasks = Task.query.order_by(Task.title.asc())\n elif request.args.get(\"sort\") == \"desc\":\n tasks = Task.query.order_by(Task.title.desc())\n else:\n tasks = Task.query.all()\n task_response = [task.to_dict() for task in tasks]\n return jsonify(task_response), 200\n elif request.method == \"POST\":\n request_body = request.get_json()\n if \"title\" not in request_body or \"description\" not in request_body or \"completed_at\" not in request_body:\n return make_response({\"details\": \"Invalid data\"}, 400) \n new_task = Task(title=request_body[\"title\"],\n description=request_body[\"description\"], \n completed_at=request_body[\"completed_at\"])\n db.session.add(new_task)\n db.session.commit()\n return make_response({\"task\": new_task.to_dict()}, 201)\n\n\n@task_bp.route(\"/\", methods=[\"GET\", \"DELETE\", \"PUT\"])\ndef handle_task(task_id):\n task = Task.query.get(task_id)\n if request.method == \"GET\":\n if not task:\n return make_response(f\"Book {task_id} not found\", 404)\n return {\"task\": task.to_dict()}\n elif request.method == \"DELETE\":\n if not task:\n return make_response(\"\", 404)\n db.session.delete(task)\n db.session.commit()\n return {\"details\": f'Task {task.task_id} \"{task.title}\" successfully deleted'}\n elif request.method == \"PUT\":\n if not task:\n return make_response(\"\", 404)\n request_body = request.get_json()\n task.title = request_body[\"title\"] if \"title\" in request_body else task.title\n task.description = request_body[\"description\"] if \"description\" in request_body else task.description\n task.completed_at = request_body[\"completed_at\"] if \"completed_at\" in request_body else task.completed_at\n db.session.commit()\n return make_response({\"task\": task.to_dict()}, 200)\n\n\n@task_bp.route(\"//mark_complete\", methods=[\"PATCH\"])\ndef handle_task_complete(task_id):\n task = Task.query.get(task_id)\n if not task:\n return make_response(f\"Book {task_id} not found\", 404)\n task.completed_at = datetime.utcnow()\n db.session.commit()\n initiate_slack_message(task)\n return make_response({\"task\": task.to_dict()}, 200)\n\n\n@task_bp.route(\"//mark_incomplete\", methods=[\"PATCH\"])\ndef handle_task_incomplete(task_id):\n task = Task.query.get(task_id)\n if not task:\n return make_response(f\"Book {task_id} not found\", 404)\n task.completed_at = None\n db.session.commit()\n return make_response({\"task\": task.to_dict()}, 200)\n\n\n@goal_bp.route(\"\", methods=[\"GET\", \"POST\"])\ndef handle_goals():\n goal_response = []\n if request.method == \"GET\":\n goals = Goal.query.all()\n goal_response = [goal.to_dict() for goal in goals]\n return jsonify(goal_response), 200\n elif request.method == \"POST\":\n request_body = request.get_json()\n if \"title\" not in request_body:\n return make_response({\"details\": \"Invalid data\"}, 400) \n new_goal = Goal(title=request_body[\"title\"])\n db.session.add(new_goal)\n db.session.commit()\n return make_response({\"goal\": new_goal.to_dict()}, 201)\n\n\n@goal_bp.route(\"/\", methods=[\"GET\", \"DELETE\", \"PUT\"])\ndef handle_goal(goal_id):\n goal = Goal.query.get(goal_id)\n if request.method == \"GET\":\n if not goal:\n return make_response(f\"Goal {goal_id} not found\", 404)\n return {\"goal\": goal.to_dict()}\n elif request.method == \"DELETE\":\n if not goal:\n return make_response(\"\", 404)\n db.session.delete(goal)\n db.session.commit()\n return {\"details\": f'Goal {goal.id} \"{goal.title}\" successfully deleted'}\n elif request.method == \"PUT\":\n if not goal:\n return make_response(\"\", 404)\n request_body = request.get_json()\n goal.title = request_body[\"title\"] if \"title\" in request_body else goal.title\n db.session.commit()\n return make_response({\"goal\": goal.to_dict()}, 200)\n\n\n@goal_bp.route(\"//tasks\", methods=[\"GET\", \"POST\"])\ndef handle_goal_tasks(goal_id):\n goal = Goal.query.get(goal_id)\n if goal is None:\n return make_response(\"\", 404)\n if request.method == \"POST\":\n request_body = request.get_json()\n goal_tasks = request_body[\"task_ids\"]\n for task_id in goal_tasks:\n task = Task.query.get(task_id)\n task.goal_id = goal_id \n db.session.commit()\n return make_response({\"id\": int(goal_id), \"task_ids\": goal_tasks}, 200)\n elif request.method == \"GET\":\n return goal.dict_with_tasks(), 200\n\n\ndef initiate_slack_message(task):\n SLACKBOT_KEY = os.environ.get(\"SLACK_BOT\")\n CHANNEL = os.environ.get(\"CHANNEL\")\n url = \"https://slack.com/api/chat.postMessage\"\n message = f\"Someone just completed the task {task.title}\"\n query_params = {\"token\":SLACKBOT_KEY, \"channel\": CHANNEL, \"text\": message}\n requests.post(url, data=query_params)\n\n\n\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"243076246","text":"# -*- coding: utf-8 -*-\n\"\"\"\n livereload.server\n ~~~~~~~~~~~~~~~~~\n\n HTTP and WebSocket server for livereload.\n\n :copyright: (c) 2013 by Hsiaoming Yang\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport time\nimport mimetypes\nimport webbrowser\nimport hashlib\nfrom tornado import ioloop\nfrom tornado import escape\nfrom tornado import websocket\nfrom tornado.websocket import WebSocketHandler\nfrom tornado.web import RequestHandler, Application\nfrom tornado.util import ObjectDict\nfrom livereload.task import Task\nfrom ._compat import to_bytes\n\n\nclass LiveReloadHandler(WebSocketHandler):\n waiters = set()\n _last_reload_time = None\n\n def allow_draft76(self):\n return True\n\n def on_close(self):\n if self in LiveReloadHandler.waiters:\n LiveReloadHandler.waiters.remove(self)\n\n def send_message(self, message):\n if isinstance(message, dict):\n message = escape.json_encode(message)\n\n try:\n self.write_message(message)\n except:\n logging.error('Error sending message', exc_info=True)\n\n def poll_tasks(self):\n changes = Task.watch()\n if not changes:\n return\n self.watch_tasks()\n\n def watch_tasks(self):\n if time.time() - self._last_reload_time < 3:\n # if you changed lot of files in one time\n # it will refresh too many times\n logging.info('ignore this reload action')\n return\n\n logging.info('Reload %s waiters', len(self.waiters))\n\n msg = {\n 'command': 'reload',\n 'path': Task.last_modified or '*',\n 'liveCSS': True\n }\n\n self._last_reload_time = time.time()\n for waiter in LiveReloadHandler.waiters:\n try:\n waiter.write_message(msg)\n except:\n logging.error('Error sending message', exc_info=True)\n LiveReloadHandler.waiters.remove(waiter)\n\n def on_message(self, message):\n \"\"\"Handshake with livereload.js\n\n 1. client send 'hello'\n 2. server reply 'hello'\n 3. client send 'info'\n\n http://help.livereload.com/kb/ecosystem/livereload-protocol\n \"\"\"\n message = ObjectDict(escape.json_decode(message))\n if message.command == 'hello':\n handshake = {}\n handshake['command'] = 'hello'\n protocols = message.protocols\n protocols.append(\n 'http://livereload.com/protocols/2.x-remote-control'\n )\n handshake['protocols'] = protocols\n handshake['serverName'] = 'livereload-tornado'\n self.send_message(handshake)\n\n if message.command == 'info' and 'url' in message:\n logging.info('Browser Connected: %s' % message.url)\n LiveReloadHandler.waiters.add(self)\n if not LiveReloadHandler._last_reload_time:\n if os.path.exists('Guardfile'):\n logging.info('Reading Guardfile')\n execfile('Guardfile', {})\n elif Task.tasks:\n # Tasks have been added through library-use.\n logging.debug('Not loading any tasks, library-use.')\n pass\n else:\n logging.info('No Guardfile')\n Task.add(os.getcwd())\n\n LiveReloadHandler._last_reload_time = time.time()\n logging.info('Start watching changes')\n if not Task.start(self.watch_tasks):\n ioloop.PeriodicCallback(self.poll_tasks, 800).start()\n\n\nclass IndexHandler(RequestHandler):\n def initialize(self, root='.'):\n self._root = os.path.abspath(root)\n\n def get(self, path='/'):\n abspath = os.path.join(self._root, path.lstrip('/'))\n mime_type, encoding = mimetypes.guess_type(abspath)\n if not mime_type:\n mime_type = 'text/html'\n\n self.mime_type = mime_type\n self.set_header('Content-Type', mime_type)\n self.read_path(abspath)\n\n def inject_livereload(self):\n if self.mime_type != 'text/html':\n return\n ua = self.request.headers.get('User-Agent', 'bot').lower()\n if 'msie' not in ua:\n self.write('')\n\n def read_path(self, abspath):\n filepath = abspath\n if os.path.isdir(filepath):\n filepath = os.path.join(abspath, 'index.html')\n if not os.path.exists(filepath):\n self.create_index(abspath)\n return\n elif not os.path.exists(abspath):\n filepath = abspath + '.html'\n\n if not os.path.exists(filepath):\n return self.send_error(404)\n\n if self.mime_type == 'text/html':\n with open(filepath, 'r') as f:\n data = f.read()\n before, after = data.split('')\n self.write(before)\n self.inject_livereload()\n self.write('')\n self.write(after)\n else:\n with open(filepath, 'rb') as f:\n data = f.read()\n self.write(data)\n\n hasher = hashlib.sha1()\n hasher.update(to_bytes(data))\n self.set_header('Etag', '\"%s\"' % hasher.hexdigest())\n\n def create_index(self, root):\n self.inject_livereload()\n files = os.listdir(root)\n self.write('
      ')\n for f in files:\n path = os.path.join(root, f)\n self.write('
    • ')\n if os.path.isdir(path):\n self.write('%s' % (f, f))\n else:\n self.write('%s' % (f, f))\n self.write('
    • ')\n\n self.write('
    ')\n\n\nclass LiveReloadJSHandler(RequestHandler):\n def initialize(self, port=35729):\n self._port = port\n\n def get(self):\n js = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), 'livereload.js',\n )\n self.set_header('Content-Type', 'application/javascript')\n with open(js, 'r') as f:\n for line in f:\n if '{{port}}' in line:\n line = line.replace('{{port}}', str(self._port))\n self.write(line)\n\n\n\ndef create_app(port=35729, root='.'):\n handlers = [\n (r'/livereload', LiveReloadHandler),\n (r'/livereload.js', LiveReloadJSHandler, dict(port=port)),\n (r'(.*)', IndexHandler, dict(root=root)),\n ]\n return Application(handlers=handlers)\n\n\ndef start(port=35729, root='.', autoraise=False):\n try:\n from tornado.log import enable_pretty_logging\n except ImportError:\n from tornado.options import enable_pretty_logging\n\n logging.getLogger().setLevel(logging.INFO)\n enable_pretty_logging()\n\n app = create_app(port, root)\n app.listen(port)\n\n print('Serving path %s on 127.0.0.1:%s' % (root, port))\n\n if autoraise:\n webbrowser.open(\n 'http://127.0.0.1:%s' % port, new=2, autoraise=True\n )\n try:\n ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n print('Shutting down...')\n\n\nif __name__ == '__main__':\n start(8000)\n","sub_path":"livereload/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"161039795","text":"\"\"\" Instance model\n\"\"\"\nfrom django_mongoengine import fields, Document\nfrom mongoengine import errors as mongoengine_errors\n\nfrom core_main_app.commons import exceptions\nfrom core_main_app.commons.regex import NOT_EMPTY_OR_WHITESPACES\n\n\nclass Instance(Document):\n \"\"\" Represents an instance of a remote project\n \"\"\"\n name = fields.StringField(blank=False, unique=True, regex=NOT_EMPTY_OR_WHITESPACES)\n endpoint = fields.URLField(blank=False, unique=True)\n access_token = fields.StringField(blank=False)\n refresh_token = fields.StringField(blank=False)\n expires = fields.DateTimeField(blank=False)\n\n @staticmethod\n def get_all():\n \"\"\" Return all instances.\n\n Returns:\n instance collection\n\n \"\"\"\n return Instance.objects().all()\n\n @staticmethod\n def get_by_id(instance_id):\n \"\"\" Return the object with the given id.\n\n Args:\n instance_id:\n\n Returns:\n Instance (obj): Instance object with the given id\n\n \"\"\"\n try:\n return Instance.objects.get(pk=str(instance_id))\n except mongoengine_errors.DoesNotExist as e:\n raise exceptions.DoesNotExist(e.message)\n except Exception as ex:\n raise exceptions.ModelError(ex.message)\n\n @staticmethod\n def get_by_name(instance_name):\n \"\"\" Return the object with the given name.\n\n Args:\n instance_name:\n\n Returns:\n Instance (obj): Instance object with the given name\n\n \"\"\"\n try:\n return Instance.objects.get(name=str(instance_name))\n except mongoengine_errors.DoesNotExist as e:\n raise exceptions.DoesNotExist(e.message)\n except Exception as ex:\n raise exceptions.ModelError(ex.message)\n\n def save_object(self):\n \"\"\" Custom save.\n\n Returns:\n\n \"\"\"\n try:\n self.check_instance_name()\n return self.save()\n except mongoengine_errors.NotUniqueError as e:\n raise exceptions.NotUniqueError(\"Unable to create the new repository: Not Unique\")\n except Exception as ex:\n raise exceptions.ModelError(ex.message)\n\n def check_instance_name(self):\n \"\"\" Test if the name is 'Local'.\n\n Returns:\n\n \"\"\"\n if self.name.upper() == \"LOCAL\":\n raise exceptions.ModelError(\"By default, the instance named Local is the instance currently running.\")\n\n def clean(self):\n \"\"\" Clean is called before saving\n\n Returns:\n\n \"\"\"\n self.name = self.name.strip()\n","sub_path":"core_federated_search_app/components/instance/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"407376678","text":"from unittest import result\nfrom test.util.postBuilder import PostBuilder\nfrom typing import Dict, List, Optional\nimport unittest\nfrom unittest.mock import Mock, patch\n\nfrom src.Domain.Post.post import Post\nfrom src.Domain.Post.postApiGateway import PostApiGateway\nfrom src.Domain.Post.postRepository import PostRepository\nfrom src.Domain.Post.postsHandler import PostHandler\nfrom src.Domain.Tag.tagHandler import TagHandler\nfrom src.Domain.Post.sortByValues import SortByValues\nfrom src.Domain.Post.directionsValues import DirectionsValues\nfrom src.ServiceApplications.serviceApplicationsPost import ServiceApplicationPost\n\n\nclass ServiceApplicationPostTest(unittest.TestCase):\n \n def setUp(self) -> None:\n self.SOME_TECH_TAG: str = \"tech\"\n self.SOME_CRYPTO_TAG: str = \"crypto\"\n self.SOME_TRAVEL_TAG: str = \"travel\"\n self.SAMPLE_POSTS_BY_TAG: Dict[str, List[Post]] = self._create_sample_posts_by_tags()\n\n\n def test_when_getting_posts_then_all_tags_saved_is_retrived(self):\n some_tags_saved: List[str] = [self.SOME_TECH_TAG, self.SOME_CRYPTO_TAG]\n some_tags_input: str = \",\".join(some_tags_saved)\n self._create_mocks_and_dependecies(some_tags_saved, [], [], None, None)\n\n self.service_application_post.get_posts(some_tags_input, SortByValues.id, DirectionsValues.asc)\n\n self.post_repository.get_tags_saved.assert_called_once()\n\n def test_when_getting_posts_then_sort_posts_and_remove_duplicate_posts_are_called(self):\n some_tags_saved: List[str] = [self.SOME_TECH_TAG, self.SOME_TRAVEL_TAG]\n some_tags_input: str = \",\".join(some_tags_saved)\n some_posts_from_tags: List[Post] = self._get_posts_by_tag(some_tags_input)\n self._create_mocks_and_dependecies(some_tags_saved, some_posts_from_tags, [], some_tags_input, None)\n\n self.service_application_post.get_posts(some_tags_input, SortByValues.id, DirectionsValues.asc)\n\n self.post_handler.remove_duplicate_posts.assert_called_once_with(some_posts_from_tags)\n self.post_handler.sort_posts.assert_called_once_with(some_posts_from_tags, SortByValues.id, DirectionsValues.asc)\n \n def test_when_getting_posts_then_filter_new_tags_and_common_new_are_called(self):\n some_tags_saved: List[str] = [self.SOME_CRYPTO_TAG]\n some_tags_input: str = \",\".join(some_tags_saved)\n some_posts_from_tags: List[Post] = self._get_posts_by_tag(some_tags_input)\n self._create_mocks_and_dependecies(some_tags_saved, some_posts_from_tags, [], some_tags_input, None)\n \n self.service_application_post.get_posts(some_tags_input, None, None)\n\n self.tag_handler.filter_new_tags.assert_called_once_with(some_tags_input, some_tags_saved)\n self.tag_handler.filter_common_tags.assert_called_once_with(some_tags_input, some_tags_saved)\n \n def test_given_empty_posts_from_repository_when_getting_posts_then_posts_are_retrived_from_external_api_only_and_saved_in_repository(self):\n some_tags_saved: List[str] = []\n some_tags_input: str = \"{0},{1}\".format(self.SOME_CRYPTO_TAG, self.SOME_TECH_TAG)\n some_posts_from_repository: List[Post] = []\n some_posts_from_external_api: List[Post] = self._get_posts_by_tag(some_tags_input)\n self._create_mocks_and_dependecies(some_tags_saved, some_posts_from_repository, some_posts_from_external_api, None, some_tags_input)\n\n self.service_application_post.get_posts(some_tags_input, SortByValues.id, DirectionsValues.asc)\n\n self.post_repository.get_posts_by_tag.assert_not_called()\n self.assertEqual(len(some_tags_input.split(\",\")), self.post_repository.save_posts.call_count)\n self.assertEqual(len(some_tags_input.split(\",\")), self.post_api_gateway.get_posts.call_count)\n\n def test_given_string_with_new_tags_when_getting_posts_then_posts_are_retrived_from_api_only_and_saved_in_repository(self):\n some_tags_saved: List[str] = [self.SOME_TRAVEL_TAG]\n some_tags_input: str = \"{0},{1}\".format(self.SOME_CRYPTO_TAG, self.SOME_TECH_TAG)\n some_posts_from_repository: List[Post] = self._get_posts_by_tag(self.SOME_TECH_TAG)\n some_posts_from_external_api: List[Post] = self._get_posts_by_tag(some_tags_input)\n self._create_mocks_and_dependecies(some_tags_saved, some_posts_from_repository, some_posts_from_external_api, None, some_tags_input)\n\n self.service_application_post.get_posts(some_tags_input, SortByValues.id, DirectionsValues.asc)\n\n self.post_repository.get_posts_by_tag.assert_not_called()\n self.assertEqual(len(some_tags_input.split(\",\")), self.post_repository.save_posts.call_count)\n self.assertEqual(len(some_tags_input.split(\",\")), self.post_api_gateway.get_posts.call_count)\n\n\n def test_given_string_with_common_tags_when_getting_posts_then_posts_are_retrived_from_post_repository_only(self):\n some_tags_saved: List[str] = [self.SOME_TRAVEL_TAG, self.SOME_TECH_TAG]\n some_tags_input: str = \",\".join(some_tags_saved)\n some_posts_from_repository: List[Post] = self._get_posts_by_tag(some_tags_input)\n some_posts_from_external_api: List[Post] = []\n self._create_mocks_and_dependecies(some_tags_saved, some_posts_from_repository, some_posts_from_external_api, some_tags_input, None)\n\n self.service_application_post.get_posts(some_tags_input, SortByValues.id, DirectionsValues.asc)\n\n self.post_api_gateway.get_posts.assert_not_called()\n self.post_repository.save_posts.assert_not_called()\n self.assertEqual(len(some_tags_input.split(\",\")), self.post_repository.get_posts_by_tag.call_count)\n\n def test_given_string_with_new_and_common_tags_when_getting_posts_then_posts_are_retrived_from_post_repository_and_from_external_api(self):\n some_tags_saved: List[str] = [self.SOME_TECH_TAG, self.SOME_CRYPTO_TAG]\n some_common_tags: str = \",\".join(some_tags_saved)\n some_new_tags: str = self.SOME_TRAVEL_TAG\n some_posts_from_repository: List[Post] = self._get_posts_by_tag(some_common_tags)\n some_posts_from_external_api: List[Post] = self._get_posts_by_tag(some_new_tags)\n some_tags_input: str = some_common_tags + \",\" + some_new_tags\n self._create_mocks_and_dependecies(some_tags_saved, some_posts_from_repository, some_posts_from_external_api, some_common_tags, some_new_tags)\n\n self.service_application_post.get_posts(some_tags_input, SortByValues.id, DirectionsValues.asc)\n\n self.assertEqual(len(some_common_tags.split(\",\")), self.post_repository.get_posts_by_tag.call_count)\n self.assertEqual(len(some_new_tags.split(\",\")), self.post_api_gateway.get_posts.call_count)\n self.assertEqual(len(some_new_tags.split(\",\")), self.post_repository.save_posts.call_count)\n\n @patch.multiple(PostRepository, __abstractmethods__=set())\n @patch.multiple(PostApiGateway, __abstractmethods__=set())\n def _create_mocks_and_dependecies(self, \n some_tags_saved: List[str], \n some_posts_from_repository: List[Post], \n some_posts_from_api: List[Post], \n some_common_tags: Optional[str], \n some_new_tags: Optional[str]\n ) -> None:\n self.post_repository: Mock = Mock(PostRepository)\n self.post_repository.get_tags_saved.return_value = some_tags_saved\n self.post_repository.get_posts_by_tag.side_effect = self._get_posts_by_tag\n \n self.post_api_gateway: Mock = Mock(PostApiGateway)\n self.post_api_gateway.get_posts.side_effect = self._get_posts_by_tag\n\n\n self.tag_handler: Mock = Mock(TagHandler)\n self.tag_handler.filter_common_tags.return_value = [] if some_common_tags == None else some_common_tags.split(\",\")\n self.tag_handler.filter_new_tags.return_value = [] if some_new_tags == None else some_new_tags.split(\",\")\n\n posts_combined: List[Post] = some_posts_from_repository + some_posts_from_api\n\n self.post_handler: Mock = Mock(PostHandler)\n self.post_handler.sort_posts.return_value = posts_combined\n self.post_handler.remove_duplicate_posts.return_value = posts_combined\n\n self.service_application_post: ServiceApplicationPost = ServiceApplicationPost(self.post_api_gateway, self.post_repository, self.tag_handler, self.post_handler)\n \n def _get_posts_by_tag(self, tags: Optional[str]) -> List[Post]:\n results: List[Post] = []\n\n if tags == None:\n return results\n \n for tag in tags.split(\",\"):\n results.extend(self.SAMPLE_POSTS_BY_TAG.get(tag))\n \n return results\n \n def _create_sample_posts_by_tags(self) -> Dict[str, List[Post]]:\n postBuilder: PostBuilder = PostBuilder()\n samples: Dict[str, List[Post]] = {}\n\n SOME_POST_TECH: Post = postBuilder.set_id(1).set_author(\"Alpha\").set_author_id(10).set_tags([\"tech\", \"software\", \"jobs\"]).buildPost()\n SOME_POST_DIGITAL_MARKET: Post = postBuilder.set_id(2).set_author(\"Bravo\").set_author_id(20).set_tags([\"tech\", \"Ads\", \"Web\"]).buildPost()\n SOME_POST_CRYPTO: Post = postBuilder.set_id(3).set_author(\"Charlie\").set_author_id(10).set_tags([\"tech\", \"crypto\", \"NFT\", \"ETH\"]).buildPost()\n\n SOME_TRAVEL_POST: Post = postBuilder.set_id(4).set_author(\"Delta\").set_author_id(30).set_tags([\"food\", \"travel\", \"hotel\"]).buildPost()\n SOME_FOOD_POST: Post = postBuilder.set_id(5).set_author(\"Echo\").set_author_id(30).set_tags([\"food\", \"garden\", \"fruits\", \"vegtables\"]).buildPost()\n\n samples[self.SOME_TECH_TAG] = [SOME_POST_TECH, SOME_POST_DIGITAL_MARKET, SOME_POST_CRYPTO]\n samples[self.SOME_CRYPTO_TAG] = [SOME_POST_CRYPTO]\n samples[self.SOME_TRAVEL_TAG] = [SOME_TRAVEL_POST, SOME_FOOD_POST]\n\n return samples","sub_path":"test/ServiceApplications/serviceApplicationsPost_test.py","file_name":"serviceApplicationsPost_test.py","file_ext":"py","file_size_in_byte":9741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"17184974","text":"\"\"\"\nabc-classroom.github\n====================\n\n\"\"\"\n\nimport os\nimport logging\nimport random\nimport string\nimport subprocess\nimport sys\n\nimport github3 as gh3\n\nfrom .utils import input_editor\n\n\ndef _call_git(*args, directory=None):\n cmd = [\"git\"]\n cmd.extend(args)\n try:\n ret = subprocess.run(\n cmd,\n cwd=directory,\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n except subprocess.CalledProcessError as e:\n err = e.stderr.decode(\"utf-8\")\n if not err:\n err = e.stdout.decode(\"utf-8\")\n raise RuntimeError(err) from e\n\n return ret\n\n\ndef remote_repo_exists(org, repository, token=None):\n \"\"\"Check if the remote repository exists for the organization.\"\"\"\n\n try:\n g = gh3.login(token=token)\n g.repository(org, repository)\n\n except Exception:\n return False\n\n return True\n\n\ndef check_student_repo_exists(org, course, student, token=None):\n \"\"\"Check if the student has a repository for the course.\n\n It happens that students delete their repository or do not accept the\n invitation to the course. In either case they will not have a repository\n yet.\n \"\"\"\n # temporarily change log level of github3.py as it prints weird messages\n # XXX could be done more nicely with a context manager maybe\n gh3_log = logging.getLogger(\"github3\")\n old_level = gh3_log.level\n gh3_log.setLevel(\"ERROR\")\n\n try:\n g = gh3.login(token=token)\n repository = \"{}-{}\".format(course, student)\n g.repository(org, repository)\n\n except Exception as e:\n raise e\n\n finally:\n gh3_log.setLevel(old_level)\n\n\ndef clone_repo(organization, repo, dest_dir):\n \"\"\"Clone `repository` from `org` into a sub-directory in `directory`.\n Assumes you have ssh keys setup for github (rather than using GitHub API\n token).\"\"\"\n # If ssh it not setup correctly - or however we want to authenticate,\n # we need a\n # friendly message about that\n # We should add some message about what is being cloned here - the url\n # works\n url = \"git@github.com:{}/{}.git\".format(organization, repo)\n print(\"cloning:\", url)\n _call_git(\"-C\", dest_dir, \"clone\", url)\n\n\ndef create_repo(org, repository, token):\n \"\"\"Create a repository in the provided GitHub organization.\"\"\"\n github_obj = gh3.login(token=token)\n organization = github_obj.organization(org)\n print(\n \"Creating new repository {} at https://github.com/{}\".format(\n repository, org\n )\n )\n try:\n organization.create_repository(repository)\n except gh3.exceptions.UnprocessableEntity:\n print(\n \"Error: organization {} already has a repository named {}\".format(\n org, repository\n )\n )\n\n\ndef add_remote(directory, organization, remote_repo, token):\n remote_url = \"https://{}@github.com/{}/{}\".format(\n token, organization, remote_repo\n )\n _call_git(\"remote\", \"add\", \"origin\", remote_url, directory=directory)\n\n\ndef repo_changed(directory):\n \"\"\"Determine if the Git repository in directory is dirty\"\"\"\n ret = _call_git(\"status\", \"--porcelain\", directory=directory)\n return bool(ret.stdout)\n\n\ndef new_branch(directory, name=None):\n \"\"\"Create a new git branch in directory\"\"\"\n if name is None:\n postfix = \"\".join(\n [random.choice(string.ascii_letters) for n in range(4)]\n )\n name = \"new-material-{}\".format(postfix)\n\n _call_git(\"checkout\", \"-b\", name, directory=directory)\n\n return name\n\n\ndef get_commit_message():\n default_message = \"\"\"\n # Please enter the commit message for your changes. Lines starting\n # with '#' will be ignored, and an empty message aborts the commit.\n # This message will be used as commit and Pull Request message.\"\"\"\n message = input_editor(default_message)\n message = \"\\n\".join(\n [\n line\n for line in message.split(\"\\n\")\n if not line.strip().startswith(\"#\")\n ]\n )\n return message\n\n\ndef commit_all_changes(directory, msg=None):\n \"\"\"Run git add, git commit on a given directory. Checks git status\n first and does nothing if no changes.\n \"\"\"\n if msg is None:\n raise ValueError(\"Commit message can not be empty.\")\n if repo_changed(directory):\n _call_git(\"add\", \"*\", directory=directory)\n _call_git(\"commit\", \"-a\", \"-m\", msg, directory=directory)\n else:\n print(\"No changes in repository {}; doing nothing\".format(directory))\n\n\ndef init_and_commit(directory, custom_message=False):\n \"\"\"Run git init, git add, git commit on given directory. Checks git status\n first and does nothing if no changes.\n \"\"\"\n # local git things - initialize, add, commit\n # note that running git init on an existing repo is safe, so no need\n # to check anything first\n git_init(directory)\n if repo_changed(directory):\n message = \"Initial commit\"\n if custom_message:\n message = get_commit_message()\n if not message:\n print(\"Empty commit message, exiting.\")\n sys.exit(1) # sys is undefined - ask karen about this\n commit_all_changes(directory, message)\n else:\n print(\"No changes to local repository.\")\n\n\ndef push_to_github(directory, branch=\"master\"):\n \"\"\"Push `branch` back to GitHub\"\"\"\n try:\n _call_git(\n \"push\", \"--set-upstream\", \"origin\", branch, directory=directory\n )\n except RuntimeError as e:\n raise e\n\n\ndef pull_from_github(directory, branch=\"master\"):\n \"\"\"Pull `branch` of local repo in `directory` from GitHub\"\"\"\n try:\n _call_git(\"pull\", \"origin\", branch, directory=directory)\n except RuntimeError as e:\n raise e\n\n\ndef git_init(directory):\n \"\"\"Initialize git repository\"\"\"\n _call_git(\"init\", directory=directory)\n\n\n###################################################\n# Methods below are from before the re-factoring.\n# Retaining for reference, but with no guarantee\n# about correct function.\n\n\ndef close_existing_pullrequests(\n org, repository, branch_base=\"new-material-\", token=None\n):\n \"\"\"Close all oustanding course material update Pull Requests\n\n If there are any PRs open in a student's repository that originate from\n a branch starting with `branch_base` as name and created by the user\n we are logged in we close them.\n \"\"\"\n g = gh3.login(token=token)\n me = g.me()\n repo = g.repository(org, repository)\n for pr in repo.pull_requests(state=\"open\"):\n origin = pr.head.label\n origin_repo, origin_branch = origin.split(\":\")\n if origin_branch.startswith(branch_base) and pr.user == me:\n pr.create_comment(\n \"Closed in favor of a new Pull Request to \"\n \"bring you up-to-date.\"\n )\n pr.close()\n\n\ndef create_pr(org, repository, branch, message, token):\n \"\"\"Create a Pull Request with changes from branch\"\"\"\n msg_parts = message.split(\"\\n\\n\")\n if len(msg_parts) == 1:\n title = msg = msg_parts[0]\n else:\n title = msg_parts[0]\n msg = \"\\n\\n\".join(msg_parts[1:])\n\n g = gh3.login(token=token)\n repo = g.repository(org, repository)\n repo.create_pull(title, \"master\", branch, msg)\n\n\ndef fetch_student(org, course, student, directory, token=None):\n \"\"\"Fetch course repository for `student` from `org`\n\n The repository will be cloned into a sub-directory in `directory`.\n\n Returns the directory in which to find the students work.\n \"\"\"\n # use ssh if there is no token\n if token is None:\n fetch_command = [\n \"git\",\n \"clone\",\n \"git@github.com:{}/{}-{}.git\".format(org, course, student),\n ]\n else:\n fetch_command = [\n \"git\",\n \"clone\",\n \"https://{}@github.com/{}/{}-{}.git\".format(\n token, org, course, student\n ),\n ]\n subprocess.run(\n fetch_command,\n cwd=directory,\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n return os.path.join(directory, \"{}-{}\".format(course, student))\n","sub_path":"abcclassroom/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"547000867","text":"\"\"\"\nThis script uses the slack users.list endpoint to pull\nall users.\n\nRefer to https://api.slack.com/methods/users.list\n\"\"\"\n\nimport os\nimport logging\nimport pandas as pd\nfrom slack_sdk import WebClient # Import WebClient from Python SDK (github.com/slackapi/python-slack-sdk)\n\n\n\"\"\"\nFUNCTIONS\n\"\"\"\ndef connect(api_token):\n \"\"\" Executes request via Slack SDK and returns json\"\"\"\n client = WebClient(token=api_token)\n logger = logging.getLogger(__name__)\n response = client.users_list()\n\n return response\n\n\ndef format_users_list(response):\n \"\"\" Formats conversation list and creates dataframe \"\"\"\n\n user_list = []\n\n for i in response[\"members\"]:\n user_list.append({\n \"id\": i.get(\"id\"),\n \"email\": i[\"profile\"].get(\"email\"),\n \"title\": i[\"profile\"].get(\"title\"),\n \"first_name\": i[\"profile\"].get(\"first_name\"),\n \"last_name\": i[\"profile\"].get(\"last_name\"),\n \"real_name\": i[\"profile\"].get(\"real_name\"),\n \"tz\": i[\"profile\"].get(\"tz\"),\n \"display_name\": i[\"profile\"].get(\"display_name\"),\n \"is_email_confirmed\": i[\"profile\"].get(\"is_email_confirmed\"),\n \"updated\": i[\"profile\"].get(\"updated\")\n }\n )\n\n user_list_data = pd.DataFrame(user_list)\n\n return user_list_data\n\n\n\"\"\"\nMAIN\n\"\"\"\ndef main():\n\n C4SF_SLACK_API_TOKEN = os.getenv('SLACK_API_TOKEN')\n\n # Connect to slack and get users_list\n response = connect(C4SF_SLACK_API_TOKEN)\n\n # Make destination directory if it doesn't exist\n if not os.path.exists('data'):\n os.makedirs('data')\n\n # Format Users List and export to csv\n user_list_data = format_users_list(response)\n user_list_data.to_csv('data/user_list_data.csv')\n\n\n\nmain()\n\n\n\n","sub_path":"users_list.py","file_name":"users_list.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"303361748","text":"# b07901016\nfrom vpython import *\nimport math\nsizes = [0.06, 0.04] # size\nms = [0.2, 0.12] # mass\nL, k = 0.5, 15\n\nscene = canvas(width = 400, height = 400, center = vec(0.3, 0, 0), align = 'left', background = vec(0.5,0.5,0))\n\nball1 = sphere(pos = vec(-sizes[0], 0, 0), radius = sizes[0], color = color.red)\nball2 = sphere(pos = vec(sizes[1]+ L * 1.1, 0, 0), radius = sizes[1], color = color.red)\n\nball1.v = vec(0, 0, 0)\nball2.v = vec(0, 0, 0)\n\nspring = helix(pos = vec(0, 0, 0), radius = 0.02, thickness = 0.01)\nspring.axis = ball2.pos - vec(sizes[1], 0, 0)\n\ndt = 0.001\nball1_t, ball2_t = 0, 0\nwhile True:\n\trate(1000)\n\ttmp1v, tmp2v = ball1.v, ball2.v\n\tspring_force = -k * (mag(spring.axis) - L) * spring.axis.norm()\n\tball1.a = -spring_force / ms[0]\n\tball1.v += ball1.a * dt\n\tball1.pos += ball1.v * dt\n\n\tball2.a = spring_force / ms[1]\n\tball2.v += ball2.a * dt\n\tball2.pos += ball2.v * dt\n\tspring.pos = ball1.pos + vec(sizes[0], 0, 0)\n\tspring.axis = ball2.pos - spring.pos - vec(sizes[1], 0, 0)\n\n\tball1_t += dt\n\tball2_t += dt\n\tif ball1.pos.x <= -sizes[0]:\n\t\tprint(ball1_t)\n\t\tball1_t = 0\n\tif ball2.pos.x >= sizes[1]+ L * 1.1:\n\t\tprint(ball2_t)\n\t\tball2_t = 0\n\t\n\n\n\n","sub_path":"First_semester/HW3/hw3_optional.py","file_name":"hw3_optional.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"147252179","text":"import numpy as np\nfrom PIL import Image, ImageQt\nfrom PyQt5.QtGui import QImage, QPixmap\nfrom colour.models.rgb.deprecated import RGB_to_HSV, HSV_to_RGB\n\n\ndef load_colourised_pixmap(src, hue):\n \"\"\"Returns a colourised image for use in a Qt widget.\n\n Args:\n src (str): Path to a stimulus. Stimuli should all be PNGs with an\n alpha channel; this function will fail otherwise.\n hue (int): Colour of the image (0-359).\n\n Return:\n QPixmap: A Qt object that can be placed in a QLabel and displayed in\n a widget.\n\n Notes:\n `PIL` documentation is misleading; it can't convert to/from HSV. We \n therefore use the third-party package `colour-science`. This package\n seems to have some very nice features that I might use in the future,\n so I'm making it a requirement.\n\n \"\"\"\n orig = Image.open(src)\n rgb = np.array(orig)[..., : -1] / 255.\n a = orig.split()[-1] # save this for later\n hsv = RGB_to_HSV(rgb)\n hue = hue / 360. # colour-science normalises all values\n hsv[..., [0]] = hue\n rgb = HSV_to_RGB(hsv)\n new = Image.fromarray((rgb * 255).astype('uint8'), 'RGB')\n new.putalpha(a) # reinstate transparency\n return QPixmap.fromImage(QImage(ImageQt.ImageQt(new))) # convert for Qt\n\n\ndef colour_mask(shape=256, tile=32):\n \"\"\"Create a randomly-coloured image to use a mask for visual stimuli.\n\n \"\"\"\n assert shape % tile == 0, '%i not a divisor of %i' % (tile, shape)\n reps = int(shape / tile)\n rgb = np.random.random_integers(0, 255 + 1, (reps, reps, 3))\n rgb = np.repeat(rgb, tile, axis=0)\n rgb = np.repeat(rgb, tile, axis=1)\n new = Image.fromarray((rgb * 255).astype('uint8'), 'RGB')\n return QPixmap.fromImage(QImage(ImageQt.ImageQt(new)))\n\n\nif __name__ == '__main__':\n\n colour_mask()\n","sub_path":"loocius/tmp/tools/visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"436708272","text":"import unittest\nfrom Online_Book_Store import Online_Book_Store\nfrom Online_Book_Store import Book\nfrom Online_Book_Store import Author\n\nclass OnlineBookStoreTestCase(unittest.TestCase):\n def test_class_relationships(self):\n store = Online_Book_Store()\n\n \n mugisha = Author(name=\"Joshua Mugisha\",age = 56, nationality=\"Ugandan\")\n micheal = Author(name=\"Ssemwezi Micheal\", age = 45, nationality='British')\n\n \n Once_upon_a_time = Book(title=\"Things fall apart\", author=mugisha, year_of_publication = 1994, genre = \"Adventure\")\n dark = Book(title='Animal farm', author=micheal, year_of_publication = 2000, genre = \"Science fiction\")\n\n \n store.add_book(Once_upon_a_time)\n store.add_book(dark)\n\n self.assertEqual(store.books, [Once_upon_a_time,dark])\n\n self.assertEqual(Once_upon_a_time.author, mugisha)\n self.assertEqual(dark.author, micheal)\n\n\nclass ObjectsComparisonTestCase(unittest.TestCase):\n def test_compare_books(self):\n \"\"\"Books with same author and title should be equal\"\"\"\n mugisha= Author(name=\"Joshua Mugisha\", age = 56, nationality=\"Uganda\")\n\n b1 = Book(title=\"Things fall apart\", author=mugisha, year_of_publication = 1994, genre = \"Adventure\")\n b2 = Book(title=\"Things fall apart\", author=mugisha, year_of_publication = 1994, genre = \"Adventure\")\n self.assertEqual(b1, b2)\n\n def test_compare_authors(self):\n \"\"\"Authors with same name and nationality should be equal\"\"\"\n a1 = Author(name=\"Joshua Mugisha\", age = 56, nationality=\"Uganda\")\n a2 = Author(name=\"Joshua Mugisha\", age = 56, nationality=\"Uganda\")\n\n self.assertEqual(a1, a2)\n\n\nclass BookGeneratorTestCase(unittest.TestCase):\n\n def test_search_books_by_title_returns_generator(self):\n store = Online_Book_Store()\n\n \n mugisha = Author(name=\"Joshua Mugisha\",age = 56, nationality=\"Ugandan\")\n micheal = Author(name=\"Ssemwezi Micheal\", age = 45, nationality='British')\n\n \n Once_upon_a_time = Book(title=\"Things fall apart\", author=mugisha, year_of_publication = 1994, genre = \"Adventure\")\n dark = Book(title='Animal farm', author=micheal, year_of_publication = 2000, genre = \"Science fiction\")\n\n\n \n store.add_book(Once_upon_a_time)\n store.add_book(dark)\n\n results_generator = store.search_book(title='Things fall apart')\n self.assertEqual(next(results_generator), Once_upon_a_time)\n with self.assertRaises(StopIteration):\n next(results_generator)\n\n def test_search_books_by_authors_name(self):\n store = Online_Book_Store()\n\n \n mugisha = Author(name=\"Joshua Mugisha\",age = 56, nationality=\"Ugandan\")\n micheal = Author(name=\"Ssemwezi Micheal\", age = 45, nationality='British')\n\n\n \n Once_upon_a_time = Book(title=\"Things fall apart\", author=mugisha, year_of_publication = 1994, genre = \"Adventure\")\n dark = Book(title='Animal farm', author=micheal, year_of_publication = 2000, genre = \"Science fiction\")\n\n\n \n store.add_book(Once_upon_a_time)\n store.add_book(dark)\n\n results_generator = store.search_book(author='micheal')\n\n self.assertEqual(next(results_generator), dark)\n with self.assertRaises(StopIteration):\n next(results_generator)\n\n def test_search_without_title_or_author_raises_error(self):\n store = Online_Book_Store()\n with self.assertRaises(AttributeError):\n next(store.search_book())\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"609289595","text":"import sys\r\n\r\n\r\ndef read_file(filename: str) -> list:\r\n lines = []\r\n with open(filename, \"r\") as file:\r\n for line in file:\r\n lines.append(line.split('\\t')[0])\r\n\r\n return lines\r\n\r\n\r\ndef main():\r\n cal1_list = set(read_file(sys.argv[1])) #set : 配列を集合にする\r\n print('\\n'.join(sorted(cal1_list)))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n#https://eng-entrance.com/linux-command-split\r\n# A : sort -u cal1.txt\r\n","sub_path":"hwichan/chapter02/knock17.py","file_name":"knock17.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"155793696","text":"import numpy as np\nimport torch\n\n\ndef mixIdx_withEpoch(epoch, len_in_seq, pad_len, mode='pre', givenFW=2, givenBW=2, verbose=False, **kwarg):\n N = len(len_in_seq)\n list_out_2_input = []\n len_in_seq = len_in_seq.cpu().numpy()\n \n for inst in range(N):\n one_out_2_input= []\n mixrate=sampling(epoch, **kwarg)\n if np.random.random_sample() < mixrate:\n if mode is 'pre':\n for i in np.arange(pad_len):\n if pad_len-len_in_seq[inst]+givenFW <= i < pad_len-givenBW:\n one_out_2_input.append( 1 )\n else:\n one_out_2_input.append( 0 )\n elif mode is 'post':\n for i in np.arange(pad_len):\n if givenFW <= i < pad_len-len_in_seq[inst]-givenBW:\n one_out_2_input.append( 1 )\n else:\n one_out_2_input.append( 0 )\n else:\n raise ValueError\n else:\n for i in np.arange(pad_len):\n one_out_2_input.append( 0 )\n list_out_2_input.append( one_out_2_input ) \n \n #print( 'Number of seq:', len(list_out_2_input))\n #print( 'lens: ', [len(list_out_2_input[i]) for i in range(N)] )\n #print( 'weird case: ', [list_out_2_input[i] for i in range(N) if len(list_out_2_input[i])==1] )\n if verbose:\n print( 'Epoch: ', epoch, ', Mix Rate: ', mixrate)\n return torch.from_numpy( np.array(list_out_2_input) ), mixrate\n\ndef buildMask(len_in_seq, pad_len, givenSet, mode='pre', givenFW=2, givenBW=2, **kwarg):\n #givenSets = [('ff',5,2),('ff',10,2),('ff',15,2),('ff',20,2),('ff',25,2),\n # ('md',5,0),('md',10,0),('md',15,0),('md',20,0),('md',25,0),\n # ('bw',2,5),('bw',2,10),('bw',2,15),('bw',2,20),('bw',2,25)]\n assert type(givenSet) == 'tuple', 'Not proper setting input'\n t_type, outGivenFW, outGivenBW = givenSet # ('ff',15,2)\n N = len(len_in_seq)\n list_out_2_input = []\n len_in_seq = len_in_seq.cpu().numpy()\n \n for inst in range(N):\n one_out_2_input= []\n one_len = len_in_seq(inst)\n givenFW, given_BW = getBorderMargin(one_len, t_type, outGivenFW, outGivenBW, givenFW=givenFW, givenBW=givenBW )\n if mode is 'pre':\n for i in np.arange(pad_len):\n if pad_len-len_in_seq[inst]+givenFW <= i < pad_len-givenBW:\n one_out_2_input.append( 1 )\n else:\n one_out_2_input.append( 0 )\n elif mode is 'post':\n for i in np.arange(pad_len):\n if givenFW <= i < pad_len-len_in_seq[inst]-givenBW:\n one_out_2_input.append( 1 )\n else:\n one_out_2_input.append( 0 )\n else:\n raise ValueError\n \n list_out_2_input.append( one_out_2_input ) \n \n return torch.from_numpy( np.array(list_out_2_input) )\n\n\ndef getBorderMargin( L, t_type, outGivenFW, outGivenBW, givenFW=2, givenBW=2 ):\n if outGivenFW < L-2 and outGivenBW < L-2:\n if t_type=='ff':\n givenFW=L-outGivenFW\n givenBW=outGivenBW\n elif t_type=='bw':\n givenFW=outGivenFW\n givenBW=L-outGivenBW\n else:\n outGivenSum=outGivenFW\n if (L+outGivenSum) % 2 == 1:\n givenFW=L-(L+outGivenSum)/2-1 #L-outGivenFW\n givenBW=L-(L+outGivenSum)/2 \n else:\n givenFW=L-(L+outGivenSum)/2 #L-outGivenFW\n givenBW=L-(L+outGivenSum)/2 \n\n return givenFW, givenBW\n\n'''\n\ndef mixValue_withEpoch(X, pred, i, **kwarg):\n #print('(getCombined) X.shape:',X.shape ) # (96, 1, 52)\n #print('(getCombined) np.array(pred).shape:',np.array(pred).shape ) # (96, 1, 52)\n pred_in = np.squeeze( np.array(pred), axis=(1,) ) # (Y) 2 to t=T-1\n #print('(getCombined) pred_in.shape:',pred_in.shape )\n #print('(getCombined) X.shape:',X.shape )\n \n if np.random.random_sample() < sampling(i, **kwarg):\n newInp = X # (FF) 1 to t=T-2 , (BW) t=T to t=3\n else:\n newInp = np.vstack( (X[0,:], pred_in[:-1,:]) ) # (pred_FF) 2 to t=T-1, (pred_BW) T-1 to t=2\n \n #print('(getCombined) newInp.shape:',newInp.shape ) \n return np.expand_dims(newInp, axis=0)\n'''\n\ndef sampling(i, epsilon=0.01, k=0.99, c=0.001, kk=300.):\n #epsilon=0.01 # minimum amount of truth \n #k=0.99 # initial value\n #c=0.001 # slope of the decay. (if c=0.001, it will be 0 at 500 epoch)\n\n #kk=300. \n ## sum\n #newInp = X + pred_in\n \n # scheduled sampling\n # S. Bengio et al., Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks, NIPS 2015. \n # https://arxiv.org/abs/1506.03099\n\n ## linear decay (eps_i = max(epsilon,k-ci)), 0 <= epsilon < 1\n #eps_i=max(epsilon, k - c * i)\n # inverse sigmoid decay (eps_i = k / (k + exp(i/k))), k >= 1 (time index prob 0.5)\n eps_i=kk/(kk+np.exp(i/kk))\n return eps_i ","sub_path":"scheduled_sampling.py","file_name":"scheduled_sampling.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"165798736","text":"# @Author: JeeY\r\n# @Date: 2019-02-01T00:34:28+09:00\r\n# @Last modified by: JY\r\n# @Last modified time: 2019-02-11T09:39:13+09:00\r\n# @License: JY\r\n# @Copyright: JY\r\n\r\n\r\n# @Author: JY\r\n# @Date: 2019-01-24T10:29:08+09:00\r\n# @Filename: generate_raw_data_for_ETRI_parsing_02.py\r\n# @Last modified by: JY\r\n# @Last modified time: 2019-02-11T09:39:13+09:00\r\n# @Copyright: JeeY\r\n\r\nimport time\r\n\r\nsleeptime = 360000\r\n\r\nfile_path_00 = 'd:/Programming/Exercise_RNN/'\r\nfile_path_01 = 'd:/Programming/Corpus/'\r\n\r\nread_file = '구문구조부착문장.sentence'\r\nresult_file = 'result_03.result'\r\n\r\nskip_sent_num = ['21742', '2110', '12032', '13132', '16565', '18086',\\\r\n '22414', '39779', '46049', '124595']\r\n\r\nfull_up_sentence_list = list()\r\nfull_down_sentence_list = list()\r\nfull_result = list()\r\nfull_result_up = list()\r\n\r\ndef handle_one_sentence (f_r):\r\n up_sent_dict = dict()\r\n down_sent_dict = dict()\r\n sentence_0 = list()\r\n sentence_1 = list()\r\n w_switch = 1\r\n\r\n while True:\r\n line = f_r.readline()\r\n if 'Sentence No.' in line:\r\n for num in skip_sent_num:\r\n if num in line:\r\n return None, None, None, None, None\r\n temp_sent_num = line\r\n if '-------' in line:\r\n break\r\n\r\n raw_sentence = f_r.readline()\r\n sentence_0 = raw_sentence.split()\r\n len_0 = len(sentence_0)\r\n f_r.readline()\r\n\r\n line_num_0 = 0\r\n while True:\r\n line = f_r.readline()\r\n if '------' in line:\r\n break\r\n line = line.split()\r\n # print('++ : ', line)\r\n # print('== : ', line[0])\r\n # print(up_sent_dict, '\\n\\n')\r\n up_sent_dict[line_num_0+1] = list()\r\n up_sent_dict[line_num_0+1].append(line[0])\r\n # print(up_sent_dict)\r\n if len(line) == 1:\r\n while True:\r\n line = f_r.readline()\r\n if '======' in line:\r\n return None, None, None, None, None\r\n if ']+' in line[1]:\r\n # print(line[1])\r\n tmp_list = line[1].split(']+')\r\n # print(tmp_list)\r\n for j in tmp_list:\r\n # print(line)\r\n # time.sleep(sleeptime)\r\n if j[-1] == ']':\r\n if '[[' in j:\r\n up_sent_dict[line_num_0+1].append([j[0], j[2:-1]])\r\n # print(up_sent_dict[line[0]])\r\n time.sleep(sleeptime)\r\n else:\r\n temp = j[:-1].split('[')\r\n # print(temp)\r\n up_sent_dict[line_num_0+1].append([temp[0], temp[1]])\r\n # print('******* : ', line[0])\r\n else:\r\n if '[[' in j:\r\n up_sent_dict[line_num_0+1].append([j[0], j[2:]])\r\n else:\r\n temp = j.split('[')\r\n # print(temp)\r\n up_sent_dict[line_num_0+1].append([temp[0], temp[1]])\r\n\r\n # print('\\n')\r\n # print(up_sent_dict)\r\n # print('\\n')\r\n # time.sleep(sleeptime)\r\n\r\n else:\r\n # print(line)\r\n # time.sleep(sleeptime)\r\n j = line[1]\r\n if j[-1] == ']':\r\n if '[[' in j:\r\n up_sent_dict[line_num_0+1].append([j[0], j[2:-1]])\r\n print(up_sent_dict[line[0]])\r\n time.sleep(sleeptime)\r\n else:\r\n temp = j[:-1].split('[')\r\n # print(temp)\r\n up_sent_dict[line_num_0+1].append([temp[0], temp[1]])\r\n else:\r\n if '[[' in j:\r\n up_sent_dict[line_num_0+1].append([j[0], j[2:]])\r\n else:\r\n temp = j.split('[')\r\n print(temp, line)\r\n up_sent_dict[line_num_0+1].append([temp[0], temp[1]])\r\n # print('* * * * *\\n')\r\n # print(up_sent_dict)\r\n # print('\\n')\r\n line_num_0 += 1\r\n\r\n if len_0 != line_num_0:\r\n # print(sentence_0)\r\n return None, None, None, None, None\r\n\r\n sentence_1 = f_r.readline().split()\r\n len_1 = len(sentence_1)\r\n f_r.readline()\r\n\r\n line_num_1 = 0\r\n while True:\r\n line = f_r.readline()\r\n if '======' in line:\r\n break\r\n line = line.split()\r\n down_sent_dict[line[0]] = list()\r\n down_sent_dict[line[0]].append(line[1])\r\n # print(line)\r\n # time.sleep(sleeptime)\r\n\r\n if ']+' in line[2]:\r\n tmp_list = line[2].split(']+')\r\n # print(tmp_list)\r\n for j in tmp_list:\r\n if j[-1] == ']':\r\n if '[[' in j:\r\n down_sent_dict[line[0]].append([j[0], j[2:-1]])\r\n print(down_sent_dict[line[0]])\r\n time.sleep(sleeptime)\r\n else:\r\n # print(type(j))\r\n # if j.count('[') > 1:\r\n # print(j)\r\n temp = j[:-1].rsplit('[', 1)\r\n if len(temp) > 2:\r\n print(temp)\r\n # print(temp)\r\n down_sent_dict[line[0]].append([temp[0], temp[1]])\r\n else:\r\n if '[[' in j:\r\n down_sent_dict[line[0]].append([j[0], j[2:]])\r\n else:\r\n # print(type(j))\r\n # if j.count('[') > 1:\r\n # print(j)\r\n temp = j.rsplit('[', 1)\r\n if len(temp) > 2:\r\n print(temp)\r\n # print(temp)\r\n down_sent_dict[line[0]].append([temp[0], temp[1]])\r\n\r\n else:\r\n j = line[2]\r\n if j[-1] == ']':\r\n if '[[' in j:\r\n down_sent_dict[line[0]].append([j[0], j[2:-1]])\r\n print(down_sent_dict[line[0]])\r\n time.sleep(sleeptime)\r\n else:\r\n # print(type(j))\r\n # if j.count('[') > 1:\r\n # print(j)\r\n temp = j[:-1].rsplit('[', 1)\r\n if len(temp) > 2:\r\n print(temp)\r\n # print(temp)\r\n down_sent_dict[line[0]].append([temp[0], temp[1]])\r\n else:\r\n if '[[' in j:\r\n down_sent_dict[line[0]].append([j[0], j[2:]])\r\n else:\r\n # print(type(j))\r\n # if j.count('[') > 1:\r\n # print(j)\r\n temp = j.rsplit('[', 1)\r\n if len(temp) > 2:\r\n print(temp)\r\n # print(temp)\r\n down_sent_dict[line[0]].append([temp[0], temp[1]])\r\n\r\n line_num_1 += 1\r\n\r\n if len_1 != line_num_1:\r\n return None, None, None, None, None\r\n\r\n return up_sent_dict, down_sent_dict, sentence_0, sentence_1, raw_sentence\r\n\r\nfilename_00 = './' + read_file\r\nwith open(filename_00, 'r', encoding='utf-8') as f1:\r\n num = 0\r\n while True:\r\n line = f1.readline()\r\n if not line: break\r\n if '=====' in line:\r\n up_dict, down_dict, up_sent, down_sent, raw_sentence = handle_one_sentence(f1)\r\n # print(up_sent)\r\n # print(down_sent)\r\n # print(up_dict)\r\n # print(down_dict)\r\n\r\n if up_dict != None and down_dict != None:\r\n temp = list()\r\n for i, j in enumerate(up_dict):\r\n a = list()\r\n a.append(int(i+1))\r\n for k, l in enumerate(up_dict[j]):\r\n if k == 0:\r\n a.append(l)\r\n else:\r\n b = list()\r\n for m in l:\r\n b.append(m)\r\n a.append(b)\r\n temp.append(a)\r\n # print(temp)\r\n\r\n full_up_sentence_list.append(temp)\r\n\r\n temp = list()\r\n for i in down_dict:\r\n b = list()\r\n b.append(int(i))\r\n for j, k in enumerate(down_dict[i]):\r\n if j == 0:\r\n b.append(int(k))\r\n else:\r\n c = list()\r\n for l, m in enumerate(k):\r\n if l == 0:\r\n for n in m.split('_'):\r\n c.append(n)\r\n else:\r\n c.append(m)\r\n b.append(c)\r\n temp.append(b)\r\n\r\n full_down_sentence_list.append(temp)\r\n num += 1\r\n\r\nprint(num)\r\nunequal_num = 0\r\nfor n in range(num-1):\r\n up_cur = list(full_up_sentence_list[n])\r\n down_cur = list(full_down_sentence_list[n])\r\n # print(up_cur)\r\n up_words = list()\r\n up_s = list()\r\n\r\n for i in up_cur:\r\n up_s.append(i[1])\r\n for k, l in enumerate(i[2:]):\r\n up_words.append(l)\r\n\r\n # print(up_s)\r\n # print('\\n')\r\n####################################################\r\n # for i in up_cur:\r\n # print('$ : ', i)\r\n # print('\\n')\r\n####################################################\r\n #\r\n # for i in up_words:\r\n # up_s.append(i[0])\r\n\r\n for x in up_cur:\r\n if '6.29선언' in x[1] and x[2][0] != '6.29선언':\r\n for y in range(4):\r\n del x[2]\r\n temp = list()\r\n temp.append('6.29선언')\r\n temp.append('사건고유명사')\r\n x.insert(2, temp)\r\n print(x)\r\n # time.sleep(sleeptime)\r\n\r\n for x in up_cur:\r\n test = ['12·12', '숫자수사']\r\n replace = ['12·12', '사건고유명사']\r\n if test in x:\r\n a = x.index(test)\r\n del x[a]\r\n x.insert(a, replace)\r\n if '12·12' in x[1] and x[2][0] == '12':\r\n for y in range(3):\r\n del x[2]\r\n temp = list()\r\n temp.append('12·12')\r\n temp.append('사건고유명사')\r\n x.insert(2, temp)\r\n print(x)\r\n\r\n for x in up_cur:\r\n test = ['5·18', '숫자수사']\r\n replace = ['5·18', '사건고유명사']\r\n if test in x:\r\n a = x.index(test)\r\n del x[a]\r\n x.insert(a, replace)\r\n if '5·18' in x[1] and x[2][0] == '5':\r\n for y in range(3):\r\n del x[2]\r\n temp = list()\r\n temp.append('5·18')\r\n temp.append('사건고유명사')\r\n x.insert(2, temp)\r\n print(x)\r\n\r\n for m,i in enumerate(down_cur):\r\n if i[2] == ['']:\r\n for x in down_cur[:m]:\r\n if down_cur[m][0] < x[1]:\r\n x[1] -= 1\r\n for x in down_cur[m:]:\r\n x[0] -= 1\r\n x[1] -= 1\r\n print('\\n')\r\n for y in down_cur:\r\n print(y)\r\n time.sleep(sleeptime)\r\n if i[2][1] == '긍정지정사':\r\n b = i[0]\r\n for j in down_cur:\r\n if j[0] == (b-1):\r\n continue\r\n else:\r\n if b == j[1]:\r\n j[1] = j[1]-1\r\n\r\n new_list = list()\r\n # print('\\n')\r\n\r\n for j, i in enumerate(down_cur):\r\n adding_num = 0\r\n list_a = list()\r\n\r\n first = int(i[0])\r\n last = int(i[1])\r\n # print('first : ', first)\r\n for q, r in enumerate(i[2:]):\r\n new = list()\r\n if len(r) > 2:\r\n for s in r[:-1]:\r\n new = list()\r\n new.append(first+adding_num)\r\n new.append(first+1+adding_num)\r\n temp3 = list()\r\n temp3.append(s)\r\n new.append(temp3)\r\n new_list.append(new)\r\n adding_num += 1\r\n else:\r\n new = list()\r\n new.append(first+adding_num)\r\n new.append(first+1+adding_num)\r\n temp3 = list()\r\n temp3.append(r[0])\r\n if r[1] == '긍정지정사':\r\n temp3.append(r[1])\r\n new.append(temp3)\r\n new_list.append(new)\r\n adding_num +=1\r\n adding_num -= 1\r\n new_list[-1][1] = last+adding_num\r\n\r\n # print('adding_num : ', adding_num)\r\n for w,m in enumerate(new_list[:-(adding_num+1)]):\r\n # print('^^value : ', new_list[-1][0], m[1], first)\r\n if first < m[1]:\r\n m[1] += adding_num\r\n for w,m in enumerate(down_cur[j:]):\r\n m[0] += adding_num\r\n m[1] += adding_num\r\n\r\n new_list[-1][1] = 0\r\n\r\n####################################################\r\n # print('\\n\\n')\r\n # for z in new_list:\r\n # print(z)\r\n # print('\\n')\r\n####################################################\r\n\r\n temp_list = list(new_list)\r\n new_list = list()\r\n for i,j in enumerate(temp_list):\r\n if '긍정지정사' in j[2]: ## I have to use i+1\r\n # print('** j : ', j, i)\r\n new_list[-1][1] = j[1]\r\n if new_list[-1][-1][0] == j[2][0]:\r\n del new_list[-1][-1]\r\n for z in j[2:]:\r\n new_list[-1].append(z)\r\n for y in temp_list[i:]:\r\n y[0] -= 1\r\n y[1] -= 1\r\n for x in new_list:\r\n if x[1] > new_list[-1][0]:\r\n # print(new_list, i)\r\n x[1] -= 1\r\n else:\r\n new_list.append(j)\r\n del temp_list\r\n\r\n####################################################\r\n # print('\\n\\n')\r\n # for z in new_list:\r\n # print(z)\r\n # print('\\n')\r\n####################################################\r\n\r\n del up_words, down_cur\r\n new_list_2 = list()\r\n # abs_num = 0\r\n\r\n a = int()\r\n b = int()\r\n for i in up_cur:\r\n a += len(i[2:])\r\n for i in new_list:\r\n b += len(i[2:])\r\n # print('\\n++ : ', a, b, '\\n\\n')\r\n if a != b:\r\n unequal_num += 1\r\n print('\\n', 'skip this sentence__!!!')\r\n print(up_s, '\\n')\r\n continue\r\n # time.sleep(sleeptime)\r\n\r\n\r\n for i, j in enumerate(up_cur):\r\n temp = list()\r\n if len(j[2:]) == len(new_list[0][2:]):\r\n temp.append(new_list[0][0])\r\n temp.append(new_list[0][1])\r\n for x in j[2:]:\r\n temp.append(x)\r\n new_list_2.append(temp)\r\n del new_list[0]\r\n\r\n else:\r\n anum = len(j[2:])\r\n head = new_list[0][0]\r\n while (anum-1):\r\n anum -= len(new_list[0][2:])\r\n for y in new_list:\r\n y[0] -= 1\r\n y[1] -= 1\r\n tail = new_list[1][1]\r\n for z in new_list_2:\r\n if int(new_list[1][0]) < int(z[1]):\r\n z[1] -= 1\r\n del new_list[0]\r\n\r\n del new_list[0]\r\n temp.append(head)\r\n temp.append(tail)\r\n for x in j[2:]:\r\n temp.append(x)\r\n new_list_2.append(temp)\r\n\r\n new_list_2[-1][1] = 0\r\n\r\n####################################################\r\n # print(up_s)\r\n # for z in new_list_2:\r\n # print(z)\r\n # print('\\n')\r\n####################################################\r\n\r\n full_result_up.append(up_s)\r\n full_result.append(new_list_2)\r\n####################################################\r\n # print('\\n')\r\n # print('hello, world~!~! ', n, ' th line complete!!!')\r\n # print('\\n\\n\\n')\r\n####################################################\r\n # if '생산관계를' in up_s:\r\n # time.sleep(sleeptime)\r\n # if n == 5:\r\n # time.sleep(sleeptime)\r\n\r\nprint('\\n\\n\\n')\r\nprint('unequal # : ', unequal_num)\r\nprint('up sent length : ', len(full_result_up))\r\nprint('word list lendgth : ', len(full_result))\r\nprint('\\n\\n\\n')\r\n# for x,y in enumerate(full_result):\r\n# print(full_result_up[x])\r\n# print(y)\r\n# print('\\n')\r\n\r\nwith open(result_file, 'w', encoding='utf-8') as f:\r\n for i,j in enumerate(full_result):\r\n a = ' '.join(full_result_up[i])\r\n f.write(a + '\\n')\r\n for k in j:\r\n f.write(str(k[0]) +'\\t'+ str(k[1]) +'\\t')\r\n for l in k[2:]:\r\n b = ' '.join(l)\r\n f.write(b +'\\t')\r\n f.write('\\n')\r\n f.write('\\n')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n## endl\r\n","sub_path":"sourcefiles/parser/generate_raw_data_for_ETRI_parsing_03.py","file_name":"generate_raw_data_for_ETRI_parsing_03.py","file_ext":"py","file_size_in_byte":17016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"483543194","text":"\n\nfrom xai.brain.wordbase.nouns._mouthpiece import _MOUTHPIECE\n\n#calss header\nclass _MOUTHPIECES(_MOUTHPIECE, ):\n\tdef __init__(self,): \n\t\t_MOUTHPIECE.__init__(self)\n\t\tself.name = \"MOUTHPIECES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"mouthpiece\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_mouthpieces.py","file_name":"_mouthpieces.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"522969745","text":"'''\nCreated on 2016-05-03\n\n@author: jasonszang\n'''\n\nUNKNOWN_IDX = 0\nUNKNOWN_IDX_TOKEN = \"\"\nSECTION_ONEHOT_DICT = '[ONEHOT_DICT]\\n'\nSECTION_VOCABULARY_WORD_FREQ = '[VOCABULARY_WORD_FREQ]\\n'\nSECTION_PREPEND_APPEND = '[PREPEND_APPEND]\\n'\n\nclass OnehotDict(object):\n '''\n Dict-like object for encoding lists containing elements of arbitrary types into one-hot\n index. Intended for text-preprocessing propose. \n '''\n\n def __init__(self, mapping=None):\n '''\n Constructor\n '''\n if (mapping is None):\n self.mapping = dict()\n self.rev_mapping = dict()\n self.max_index = UNKNOWN_IDX\n self.mapping[UNKNOWN_IDX_TOKEN] = UNKNOWN_IDX\n self.rev_mapping[UNKNOWN_IDX] = UNKNOWN_IDX_TOKEN\n \n else:\n self.set_mapping(mapping)\n \n def set_mapping(self, mapping):\n '''\n Set this one-hot dict to use a provided tok-to-idx mapping.\n Index must be continuous positive integers from 1.\n '''\n # Sanity check for provided mapping\n for key in mapping.keys():\n idx = mapping[key]\n if (type(idx) is not int) or (idx <= 0):\n raise ValueError(\"Invalid mapping: one-hot index must be positive integers\")\n if (max(mapping.values()) != len(mapping.keys())):\n raise ValueError(\"Invalid mapping: discontinuous indexes\")\n \n self.mapping = mapping\n self.rev_mapping = dict()\n for i in self.mapping.keys():\n self.rev_mapping[self.mapping[i]] = i\n self.max_index = max(self.rev_mapping.keys())\n self.mapping[UNKNOWN_IDX_TOKEN] = UNKNOWN_IDX\n self.rev_mapping[UNKNOWN_IDX] = UNKNOWN_IDX_TOKEN\n \n def encode_token(self, tok, update_mapping=False):\n '''\n Encode a single token.\n \n update_mapping: whether to update vocabulary mapping when encountering a new token \n '''\n newindex = UNKNOWN_IDX\n if self.mapping.has_key(tok):\n newindex = self.mapping[tok]\n elif update_mapping:\n self.max_index += 1\n self.mapping[tok] = self.max_index\n self.rev_mapping[self.max_index] = tok\n newindex = self.max_index\n return newindex\n \n def encode_sentence(self, toklist, update_mapping=False):\n '''\n Encode a list, or sentence, to a list of one-hot indexes.\n \n update_mapping: whether to update vocabulary mapping when encountering a new token\n '''\n idxlist = list()\n for tok in toklist:\n idxlist.append(self.encode_token(tok, update_mapping))\n return idxlist\n \n def encode_text(self, text, update_mapping=False):\n '''\n Convert a text, which is a list of lists of tokens, into a list of lists of one-hot indexes.\n \n update_mapping: whether to update vocabulary mapping when encountering a new token\n '''\n idxtext = list()\n for line in text:\n idxtext.append(self.encode_sentence(line, update_mapping))\n return (idxtext)\n \n def decode_token(self, idx):\n '''\n Decode a single index. OOV tokens with an index = 2 will be lost\n and converted to \"\". \n '''\n return self.rev_mapping[idx] # let KeyError get out\n \n def decode_sentence(self, idxlist):\n '''\n Decode a list of indexes. OOV tokens with an index = 2 will be lost\n and converted to \"\". \n '''\n toklist = list()\n for idx in idxlist:\n toklist.append(self.rev_mapping[idx]) # let KeyError get out\n return toklist\n \n def decode_text(self, idxtext):\n '''\n Decode a indexed text. OOV tokens with an index = 2 will be lost\n and converted to \"\".\n '''\n text = list()\n for idxlist in idxtext:\n text.append(self.decode_sentence(idxlist))\n return text\n\n def to_string(self):\n '''\n Save a one-hot dict to string which can be saved to files and loaded later.\n '''\n buf = list()\n for i in range(1, self.max_index + 1):\n buf.append(\"%d\\t%s\\n\" % (i, self.rev_mapping[i]))\n return ''.join(buf)\n \n def from_string(self, s):\n '''\n Load from a previously saved string.\n '''\n lines = s.rstrip().split('\\n')\n mapping = dict()\n for line in lines:\n if len(line.strip()) == 0:\n continue\n idxstr, tok = line.split('\\t')\n mapping[tok] = int(idxstr)\n self.set_mapping(mapping)\n\nclass VocabularyDict(object):\n '''\n Utility class for preprocessing whole tokenized texts into one-hot representations\n while recording its vocabulary, or with an already prepared vocabulary record.\n '''\n\n def __init__(self, mapping=None, wordfreq=None, threshold=0, mask=UNKNOWN_IDX_TOKEN):\n '''\n Constructor\n '''\n self.onehot_dict = OnehotDict(mapping)\n if wordfreq is None:\n self.wordfreq = dict()\n else:\n self.wordfreq = wordfreq\n self.threshold = threshold\n self.mask = mask\n self.prepend_toks = []\n self.append_toks = []\n self.trained = False\n \n self.onehot_dict.encode_token(mask, update_mapping=True)\n \n def set_prepend(self, toks):\n '''\n Set the list of tokens that should be prepended to every line of the text.\n '''\n self.prepend_toks = toks\n # To make prepend toks appear in front of new vocabulary mappings for some\n # eye pleasing\n self.onehot_dict.encode_sentence(toks, update_mapping=True)\n \n def set_append(self, toks):\n '''\n Set the list of tokens that should be appended after every line of the text.\n '''\n self.append_toks = toks\n # To make append toks appear in front of new vocabulary mappings for some\n # eye pleasing\n self.onehot_dict.encode_sentence(toks, update_mapping=True)\n\n def train_vocabulary(self, strtext):\n if self.trained:\n raise ValueError('VocabularyDict already trained')\n lines = strtext.replace('\\r\\n', '\\n').split('\\n')\n for line in lines:\n toks = line.split()\n for tok in toks:\n if self.wordfreq.has_key(tok):\n self.wordfreq[tok] += 1\n else:\n self.wordfreq[tok] = 1\n toktext = self.__get_masked_toxtext(lines)\n self.onehot_dict.encode_text(toktext, update_mapping=True)\n self.trained = True\n\n def encode_text(self, strtext, mask_low_freq=False):\n if not self.trained:\n raise ValueError('VocabularyDict not trained')\n lines = strtext.replace('\\r\\n', '\\n').split('\\n')\n toktext = list()\n if mask_low_freq: \n toktext = self.__get_masked_toxtext(lines)\n else:\n for line in lines:\n toks = line.split()\n if (len(toks) != 0):\n toktext.append(self.prepend_toks + toks + self.append_toks)\n idxtext = self.onehot_dict.encode_text(toktext, update_mapping=False)\n return idxtext\n \n def decode_text(self, idxtext):\n if not self.trained:\n raise ValueError('VocabularyDict not trained')\n return self.onehot_dict.decode_text(idxtext)\n \n def __get_masked_toxtext(self, lines):\n '''\n Convert a list of lines of texts to token text, masking out low frequency words\n '''\n toktext = list()\n for line in lines:\n toks = line.split()\n toks_filtered = list()\n if (len(toks) == 0):\n continue\n for tok in toks:\n if self.wordfreq.has_key(tok):\n wf = self.wordfreq[tok]\n else:\n wf = 0\n if wf >= self.threshold:\n toks_filtered.append(tok)\n else:\n toks_filtered.append(self.mask)\n toktext.append(self.prepend_toks + toks_filtered + self.append_toks)\n return toktext\n \n def to_string(self):\n if not self.trained:\n raise ValueError('VocabularyDict not trained')\n buf = list()\n buf.append(SECTION_ONEHOT_DICT)\n buf.append(self.onehot_dict.to_string())\n buf.append(SECTION_VOCABULARY_WORD_FREQ)\n for tok in self.wordfreq.keys():\n buf.append('%s\\t%d\\n' % (tok, self.wordfreq[tok]))\n buf.append(SECTION_PREPEND_APPEND)\n buf.append('PREPEND:\\t')\n buf.append('\\t'.join(self.prepend_toks))\n buf.append('\\n')\n buf.append('APPEND:\\t')\n buf.append('\\t'.join(self.append_toks))\n buf.append('\\n')\n return ''.join(buf)\n \n def from_string(self, s):\n if self.trained:\n raise ValueError('VocabularyDict already trained')\n self.wordfreq = dict()\n s = s.split(SECTION_ONEHOT_DICT)[1]\n s_onehot, s = s.split(SECTION_VOCABULARY_WORD_FREQ)\n s_vocab, s_prepend_append = s.split(SECTION_PREPEND_APPEND)\n self.onehot_dict.from_string(s_onehot)\n lines = s_vocab.rstrip().split('\\n')\n for line in lines:\n if len(line.strip()) == 0:\n continue\n tok, freqstr = line.split('\\t')\n self.wordfreq[tok] = int(freqstr)\n line_prepend, line_append = s_prepend_append.rstrip().split('\\n')\n self.prepend_toks = line_prepend.split('\\t')[1:]\n self.append_toks = line_append.split('\\t')[1:]\n \n self.trained = True\n \n# TODO: Doc of VocabularyDict\n","sub_path":"deeplearning/util/onehot.py","file_name":"onehot.py","file_ext":"py","file_size_in_byte":9784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"287854536","text":"def query_info(node_list: list, influx: object, start: str, end: str, interval: str, value: str) -> dict:\n json_data = {}\n\n node_data = query_node_info(node_list, influx, start, end, interval, value)\n\n job_list = query_job_list(influx, start, end)\n job_data = query_job_info(influx, job_list)\n\n json_data.update({\n \"node_data\": node_data,\n \"job_data\": job_data\n })\n\n return json_data\n\ndef query_node_info(node_list: list, influx: object, start: str, end: str, interval: str, value: str) -> dict:\n \"\"\"\n Query node information\n \"\"\"\n # should configurable\n json_data = {}\n\n try:\n measurement = \"cluster_unified_metrics\"\n fields = [\"CPU1_temp\", \"CPU2_temp\", \"cpuusage\", \"fan1_speed\", \"fan2_speed\", \"fan3_speed\", \"fan4_speed\", \"inlet_temp\", \"jobID\", \"memoryusage\", \"powerusage_watts\"]\n \n for node in node_list:\n json_data[node] = {}\n for field in fields:\n node_sql = node_sql_gen(field, measurement, node, start, end, interval, value)\n node_data = influx.get(node_sql)\n # jobID is stored as string in influxdb\n if field == \"jobID\":\n for item in node_data:\n job_list_str = item['distinct']\n job_list = job_list_str.split(',')\n item['distinct'] = job_list\n json_data[node][field] = node_data\n \n except Exception as err:\n print(err)\n\n return json_data\n\ndef query_job_list(influx: object, start: str, end: str) -> list:\n # Get all jobs running during the time range, should configurable\n job_set = set()\n \n try: \n measurement = \"Current_Jobs_ID\"\n field = \"jobs_list\"\n\n job_list_sql = list_sql_gen(field, measurement, start, end)\n job_list_data = influx.get(job_list_sql)\n for item in job_list_data:\n job_list_str = item['distinct']\n id_list = job_list_str.split(',')\n for job_id in id_list:\n if job_id not in job_set:\n job_set.add(job_id)\n \n except Exception as err:\n print(err)\n \n job_list = list(job_set)\n\n return job_list\n\ndef query_job_info(influx: object, job_list: list) -> dict:\n \"\"\"\n Query job information\n \"\"\"\n json_data = {}\n try:\n fields = [\"startTime\", \"submitTime\", \"user\"]\n # fields = [\"start_time\", \"submit_time\", \"user_name\", \"finish_time\"]\n\n for job_id in job_list:\n json_data[job_id] = {}\n job_info_sql = job_sql_gen(job_id)\n job_info_data = influx.get(job_info_sql)\n for field in fields:\n if field == \"startTime\":\n re_field = \"start_time\"\n elif field == \"submitTime\":\n re_field = \"submit_time\"\n else:\n re_field = \"user_name\"\n json_data[job_id][re_field] = job_info_data[0][field]\n\n except Exception as err:\n print(err)\n\n return json_data\n\ndef node_sql_gen(field: str, measurement: str, host: str, start: str, end: str, interval: str, value: str) -> str:\n \"\"\"\n Generate influxdb SQL for retriving metrics from 'cluster_unified_metrics'\n \"\"\"\n if field == \"jobID\":\n return(\"SELECT DISTINCT(jobID) FROM \" + measurement + \" WHERE host = '\" + host + \"' AND time >= '\" + start + \"' AND time < '\" + end + \"' GROUP BY time(\" + interval + \") fill(null)\")\n else:\n return (\"SELECT \" + value + \"(\" + field + \") FROM \" + measurement + \" WHERE host = '\" + host + \"' AND time >= '\" + start + \"' AND time < '\" + end + \"' GROUP BY time(\" + interval + \")\")\n\ndef list_sql_gen(field: str, measurement: str, start: str, end: str) -> list:\n \"\"\"\n Generate influxdb SQL for retriving jobs running during the time range\n \"\"\"\n return(\"SELECT DISTINCT(\" + field + \") FROM \" + measurement + \" WHERE time >= '\" + start + \"' AND time < '\" + end + \"'\")\n\ndef job_sql_gen(measurement: str) -> list:\n return (\"SELECT * FROM \" + measurement + \" ORDER BY desc LIMIT 1\")","sub_path":"tools/tests/MBapi/query_db.py","file_name":"query_db.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"458505714","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport gc\nimport json\nimport logging\nimport os\nfrom glob import glob\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import mean_squared_error, accuracy_score\nfrom sklearn.model_selection import (KFold, GroupKFold, StratifiedKFold,\n TimeSeriesSplit)\n\n\ndef rmse(x, y):\n \"\"\"\n Root Mean Squared Error\n \"\"\"\n\n return np.sqrt(mean_squared_error(x, y))\n\n\nclass CVTrainer(object):\n \"\"\"\n Base class for ML model training, especially for GBDT (Gradient Boosting\n Decision Tree).\n\n This class includes the following features.\n * CV training: cross validation with basic strategy.\n * Out-of-fold: create oof predictions for validation.\n * Test: predict test data for submission.\n\n Parameters\n ----------\n model: BaseModel\n Custom model for datascience\n\n results_path: str\n Path to results data directory\n\n hyperparameter: dict\n Model parameters from json file\n\n train_config: dict\n Training paramters from json file\n\n n_splits: int\n Number of cv splitting\n\n split_seed: int\n random seed for k-fold\n\n cv_policy: str\n Cross Validation splitting policy, \n ex) 'stratified', 'groupcv', or 'time'\n \"\"\"\n\n def __init__(self, model, results_path=None, hyperparameter={},\n train_config={}, n_splits=10, split_seed=0,\n cv_policy='groupcv', save_file_name='', **kwargs):\n\n self.logger = logging.getLogger('.' + __name__)\n\n # Model\n self.model = model\n\n # Parameters\n self.results_path = results_path\n self.n_splits = n_splits\n self.split_seed = split_seed\n self.cv_policy = cv_policy\n self.hyperparameter = hyperparameter\n self.train_config = train_config\n self.save_file_name = save_file_name\n\n # Path\n self.output_path = ''\n self.feature_file_base = ''\n self.target_file_base = ''\n self.suffix_train = ''\n self.suffix_test = ''\n\n # Data\n self.feature_train = None\n self.target_train = None\n self.feature_test = None\n self.target_test = None\n\n # metric function\n self.metric_func = accuracy_score\n\n # CV result list\n self.cv_list = []\n\n # Out-of-fold prediction for validation\n self.target_train_oof = np.array([])\n self.oof_score = 0\n\n # Saving flag\n self.do_save_model = False\n\n # Set other keyword args\n self.__dict__.update(kwargs)\n\n def read_data(self, mode='train'):\n \"\"\"\n Read from feather file\n\n Parameters\n ----------\n mode: str\n Selection mode ('train' or 'test')\n \"\"\"\n\n # Path configuration\n if mode == 'train':\n feature_file = self.feature_file_base.format(self.suffix_train)\n target_file = self.target_file_base.format(self.suffix_train)\n elif mode == 'test':\n feature_file = self.feature_file_base.format(self.suffix_test)\n target_file = self.target_file_base.format(self.suffix_test)\n\n feature_path = os.path.join(self.output_path, feature_file)\n target_path = os.path.join(self.output_path, target_file)\n\n # Read from file\n if mode == 'train':\n self.feature_train = pd.read_feather(feature_path)\n self.target_train = pd.read_csv(target_path)\n elif mode == 'test':\n self.feature_test = pd.read_feather(feature_path)\n\n # You may not have target for test data\n try:\n self.target_test = pd.read_csv(target_path)\n except FileNotFoundError:\n self.logger.info('Target for test data is not found.')\n self.target_test = None\n else:\n raise ValueError('Select appropriate mode')\n\n def train_cv(self, do_save_model=True, cv_policy=None):\n \"\"\"\n Train ML model with cross validation.\n\n You should choose the appropriate cross validation policy,\n ex: Stratified k-fold, Group CV, Time Series CV\n\n Parameters\n ----------\n do_save_model: bool, optional\n Whether save ML model\n\n cv_policy: str, cv_policy\n Select cross validation policy\n \"\"\"\n\n self.do_save_model = do_save_model\n\n if cv_policy is not None:\n self.cv_policy = cv_policy\n\n # Initialize settings\n self.cv_list = []\n self.target_train_oof = np.zeros(len(self.target_train))\n\n # Select cv split policy, and train\n if self.cv_policy == 'normal':\n self._normal_cv()\n elif self.cv_policy == 'stratified':\n self._stratified_cv()\n elif self.cv_policy == 'groupcv':\n self._group_cv()\n elif self.cv_policy == 'time':\n self._time_series_cv()\n else:\n raise ValueError('{} is not implemented'.format(self.cv_policy))\n\n # Out-of-fold validation\n self.oof_score = self.metric_func(\n self.target_train, self.target_train_oof)\n self.logger.info(\n 'Out-of-fold validation: {:.4f}'.format(self.oof_score))\n\n def _normal_cv(self):\n \"\"\"\n Body function for normal k-fold\n \"\"\"\n\n kfold = KFold(n_splits=self.n_splits, shuffle=True,\n random_state=self.split_seed)\n for cv, (trn_idx, val_idx) in enumerate(kfold.split(\n self.feature_train, self.target_train)):\n self._cv_loop(cv, trn_idx, val_idx)\n\n def _stratified_cv(self):\n \"\"\"\n Body function for stratified k-fold\n \"\"\"\n\n kfold = StratifiedKFold(n_splits=self.n_splits, shuffle=True,\n random_state=self.split_seed)\n for cv, (trn_idx, val_idx) in enumerate(kfold.split(\n self.feature_train, self.target_train)):\n self._cv_loop(cv, trn_idx, val_idx)\n\n def _group_cv(self):\n \"\"\"\n Body function for Group CV\n \"\"\"\n\n kfold = GroupKFold(n_splits=self.n_splits)\n group_list = self._make_group()\n for cv, (trn_idx, val_idx) in enumerate(kfold.split(\n self.feature_train, self.target_train, group_list)):\n self._cv_loop(cv, trn_idx, val_idx)\n\n def _time_series_cv(self):\n \"\"\"\n Body function for Time Series CV\n \"\"\"\n\n kfold = TimeSeriesSplit(n_splits=self.n_splits)\n for cv, (trn_idx, val_idx) in enumerate(kfold.split(\n self.feature_train)):\n self._cv_loop(cv, trn_idx, val_idx)\n\n def _cv_loop(self, cv, trn_idx, val_idx):\n \"\"\"\n Main function executed in the CV loop.\n\n Parameters\n ----------\n cv: int\n Index for the current cv loop\n\n trn_idx: array_like\n Index for training data\n\n val_idx: array_like\n Index for validation data\n \"\"\"\n\n self.logger.info(f'--- CV session {cv} ---')\n\n # Data split\n X_trn = self.feature_train.iloc[trn_idx]\n y_trn = self.target_train.iloc[trn_idx]\n X_val = self.feature_train.iloc[val_idx]\n y_val = self.target_train.iloc[val_idx]\n\n # Training\n self.logger.info('Start training')\n self.model.fit(X_trn, y_trn, X_val, y_val)\n self.logger.info('Finish training')\n\n # Validation\n y_val_pred = self.model.predict(X_val)\n score = self.metric_func(y_val, y_val_pred)\n self.cv_list.append(score)\n self.logger.info('RMSE validation: {}'.format(score))\n\n # Save oof prediction\n self.target_train_oof[val_idx] = y_val_pred\n\n # Save results\n if self.do_save_model:\n self.model.save(suffix=cv)\n\n def _make_group(self):\n \"\"\"\n Supplemental function for Group CV.\n This returns index for grouping\n\n Returns\n -------\n groups: list\n Index of grouping, ex. [0, 0, 1, 1, 2, 2]\n \"\"\"\n\n groups = []\n for i in range(self.n_splits):\n groups += [i] * (len(self.feature_train) // self.n_splits)\n\n groups += [self.n_splits - 1] * (len(self.feature_train) - len(groups))\n\n assert len(groups) == len(self.feature_train)\n\n return groups\n\n def test(self):\n \"\"\"\n Predict test dataset, & save results to csv file.\n \"\"\"\n\n if self.feature_test is None:\n raise 'You should load test data before calling test()'\n\n self.logger.info('Predict test data')\n\n # Prepare results data frame\n df_tst = pd.DataFrame()\n df_tst['pred'] = np.zeros(len(self.feature_test))\n\n # Load all models saved in the training process.\n # All predictions are averaged.\n model_list = glob(os.path.join(self.results_path, '*.pkl'))\n model_num = len(model_list)\n for i, path in enumerate(model_list):\n self.logger.info(f'Load and predict, step: {i} / {model_num}')\n self.model.load(path)\n df_tst['pred'] += self.model.predict(self.feature_test) / model_num\n\n # Validate the test prediction score if test target is given\n if self.target_test is not None:\n df_tst['true'] = self.target_test\n score = self.metric_func(df_tst['true'], df_tst['pred'])\n self.logger.info('Test score: {:.4f}'.format(score))\n\n self.logger.info('Save prediction')\n df_tst.to_csv(os.path.join(self.results_path, 'test.csv'),\n index=False)\n\n del df_tst\n gc.collect()\n\n def report(self, results_path=None):\n \"\"\"\n Report training results, and save to json file.\n \"\"\"\n\n self.logger.info('--- Training reports ---')\n self.logger.info('CV n_splits = {}'.format(self.n_splits))\n self.logger.info('CV results = {}'.format(self.cv_list))\n self.logger.info('CV score = {:.4f}'.format(np.mean(self.cv_list)))\n self.logger.info('OOF score = {:.4f}'.format(self.oof_score))\n\n reports = {\n 'cv_results': self.cv_list,\n 'cv_score': np.mean(self.cv_list),\n 'oof_score': self.oof_score\n }\n\n with open(os.path.join(self.results_path, 'report.json'), 'w') as f:\n json.dump(reports, f, indent=4)\n\n def oof_to_csv(self, file_name='train_oof.csv'):\n \"\"\"\n Save Out-of-fold prediction to csv file.\n\n Parameters\n ----------\n file_name: str\n File name of csv file.\n \"\"\"\n\n df_ = pd.DataFrame()\n df_['true'] = self.target_train.values.reshape(1, -1)[0]\n df_['pred'] = self.target_train_oof\n \n path = os.path.join(self.results_path, file_name)\n df_.to_csv(path, index=False)\n self.logger.info(f'Saved oof prediction to csv: {path}')\n\n del df_\n gc.collect()\n\n def release(self):\n \"\"\"\n Release self data memory.\n \"\"\"\n\n del self.feature_train, self.target_train\n del self.feature_test, self.target_test\n gc.collect()\n\n self.feature_train = None\n self.target_train = None\n self.feature_test = None\n self.target_test = None\n\n def load_data(self, data, target=None, mode='train'):\n \"\"\"\n Load train data from arguments.\n\n Parameters\n ----------\n data: pandas.DataFrame, shape (length, n_features)\n Feature matrix\n\n target: pandas.DataFrame, optional\n Target for supervised learning\n\n mode: str\n Select train or test.\n \"\"\"\n\n if mode == 'train':\n self.feature_train = data\n self.target_train = target\n elif mode == 'test':\n self.feature_test = data\n self.target_test = target\n else:\n raise ValueError('Select appropriate mode')\n","sub_path":"datascience/train/cv_train.py","file_name":"cv_train.py","file_ext":"py","file_size_in_byte":12006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"458670574","text":"import re \nvalid_list = ['1 shiny gold']\nbags = []\nbag_dict = {}\n\n\ndef open_file():\n #going to open file and clean up data\n with open('./inputs/day7.txt', 'r') as f:\n scrubbed = []\n for line in f:\n scrubbed.append(line.strip())\n #print(scrubbed)\n return scrubbed\n \ndef generate_list(data):\n global bag_dict\n outer = []\n inner = []\n\n for bag in data:\n outer1 = bag.split('contain')[0]\n inner1 = bag.split('contain')[1]\n \n outer1 = re.sub(r'bags', '', outer1).strip()\n inner1 = re.sub(r'(\\sbag[s]?|\\.$)', '', inner1).strip()\n\n outer.append(outer1)\n inner.append(inner1)\n \n bag_dict = dict(zip(outer, inner))\n\n \n\ndef find_outers(colors):\n global bag_dict\n global valid_list\n #print(colors)\n done_check = len(valid_list)\n \n for k, v in bag_dict.items():\n for color in colors:\n if color in v:\n valid_list.append(k)\n #can't delete entry during recursion; set value to null\n bag_dict[k] = ''\n\n \n if done_check != len(valid_list):\n #sorted_list = sorted(\n #print(sorted(list(set(valid_list))))\n #hold_up = input()\n find_outers(valid_list)\n else:\n print(f'hopefully the total count is {len(set(valid_list)) -1}')\n\ngenerate_list(open_file())\n\ndef part2(colors):\n #this forms and loops through a queue, but recursion is hard and this doesn't work. \n global valid_list\n global bag_dict\n calculation = ''\n \n while valid_list:\n temp = valid_list.pop(0)\n qty, color = temp.split(' ', 1)\n \n #print(qty, color)\n for k, v in bag_dict.items():\n if color in k:\n if 'no other' in v:\n calculation += ')'\n continue\n else:\n calculation += f'{qty}(' \n for x in v.split(', '):\n valid_list.append(x)\n \n \n \n print(calculation)\n \npart2(valid_list)\n\n\n","sub_path":"day7_bags.py","file_name":"day7_bags.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"120674309","text":"#https://compuzzle.wordpress.com/2015/05/04/python-turtle-cheat-sheet-and-geometric-shapes/\nimport turtle\n\nturtle.hideturtle() \nturtle.speed('fastest') \nturtle.tracer(False) \n\nsize=1\nnum_squares = 300\n\nfor i in xrange(num_squares): \n turtle.forward(size)\n turtle.right(91)\n size = size + 1\n\nturtle.tracer(True)\nturtle.done()\n","sub_path":"answers/turtle-spiro-square.py","file_name":"turtle-spiro-square.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"153734880","text":"\"\"\"Helpers to the pair creator.\"\"\"\nimport json\n\n\ndef remove_all_traces(name, dictionary, remove_key=False):\n \"\"\"Remove all traces of name from dictionary with iterable values.\"\"\"\n if remove_key:\n del dictionary[name]\n for iterable in dictionary.values():\n try:\n iterable.remove(name)\n except:\n pass\n\n\ndef remove_from_cache(name, path):\n \"\"\"Remove a student from the cache.\"\"\"\n with open(path) as f:\n past_pairs = json.load(f)\n remove_all_traces(name, past_pairs, remove_key=True)\n with open(path, 'w') as f:\n json.dump(past_pairs, f)\n\n\ndef create_past_pairs(pairs):\n \"\"\"Create past pairs dict using pairs(list of tuples).\"\"\"\n past_pairs = {}\n for pair in pairs:\n for s in pair:\n past_pairs[s] = [x for x in pair if x != s]\n return past_pairs\n","sub_path":"src/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"642603771","text":"import boto3\nimport sys\nfrom random import randint\n\ndef vpc(region='us-west-2'):\n print('Processing VPCs')\n client = boto3.client('ec2',region_name=region)\n ec2 = boto3.resource('ec2',region_name=region)\n vpcs = client.describe_vpcs()\n for vpc in vpcs['Vpcs']:\n ID = vpc['VpcId']\n nacl_filter = [{'Name':'vpc-id', 'Values': [ID] }]\n network_acls = client.describe_network_acls(Filters=nacl_filter)\n nacls = network_acls['NetworkAcls']\n if nacls:\n for nacl in nacls:\n print('{}'.format(nacl['NetworkAclId']))\n # Block all Inbound traffic\n client.create_network_acl_entry(\n DryRun=True,\n CidrBlock='0.0.0.0/0',\n Egress=False,\n Protocol='-1',\n RuleAction='deny',\n RuleNumber=1,\n NetworkAclId=nacl['NetworkAclId'] \n )\n client.create_network_acl_entry(\n DryRun=True,\n Ipv6CidrBlock='::0/0',\n Egress=False,\n Protocol='-1',\n RuleAction='deny',\n RuleNumber=2,\n NetworkAclId=nacl['NetworkAclId'] \n )\n # Block all Outbound traffic\n client.create_network_acl_entry(\n DryRun=True,\n CidrBlock='0.0.0.0/0',\n Egress=True,\n Protocol='-1',\n RuleAction='deny',\n RuleNumber=1,\n NetworkAclId=nacl['NetworkAclId'] \n )\n client.create_network_acl_entry(\n DryRun=True,\n Ipv6CidrBlock='::0/0',\n Egress=True,\n Protocol='-1',\n RuleAction='deny',\n RuleNumber=2,\n NetworkAclId=nacl['NetworkAclId'] \n )\n\n\n\n#make them verify a random number because it will block traffic for all VPCs in all regions for that account.\nrannum=randint(1000, 9999)\nprint(\"Please enter the following number to continue {}: \".format(rannum))\ndata = input()\nif int(data) != rannum:\n print('Error: verification number does not match')\n sys.exit(1)\n\nclient = boto3.client('ec2')\nregions = [region['RegionName'] for region in client.describe_regions()['Regions']]\nfor region in regions:\n print('Starting Region = {}'.format(region))\n vpc(region)","sub_path":"all_region_kill_switch.py","file_name":"all_region_kill_switch.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"9512311","text":"\"\"\" Functions adapted from:\r\n https://bic-berkeley.github.io/psych-214-fall-2016/convolution_background.html\r\n Use these to convolve a Hemodynamic Response Function (HRF) to\r\n movie event data. \"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy.stats import gamma\r\n\r\n\r\ndef hrf(times):\r\n \"\"\" Return values for HRF at given times \"\"\"\r\n\r\n peak_values = gamma.pdf(times, 6)\r\n undershoot_values = gamma.pdf(times, 12)\r\n values = peak_values - 0.35 * undershoot_values\r\n hrf_at_trs = values / np.max(values) * 0.6\r\n\r\n return hrf_at_trs\r\n\r\ndef event_to_hrf(event_series, hrf_at_trs):\r\n \"\"\" Convolve an HRF to movie event data \"\"\"\r\n\r\n convolved = np.convolve(event_series, hrf_at_trs)\r\n n_to_remove = len(hrf_at_trs) - 1\r\n convolved = convolved[:-n_to_remove]\r\n\r\n return convolved\r\n","sub_path":"convolve.py","file_name":"convolve.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"381162123","text":"from complex_operations import Electrical\n\np = 10000\nvl = 440\nfp = 0.9\nfpn = 0.97\nc = 3**0.5\nil = p/(c*vl*fp)\nprint(il)\nz = Electrical.pfc\nq = z(p=p,fp=fp,fpn=fpn,il=il,vl=vl,is_rad=True)\nprint(q)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"182768524","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport threading\n\n\nclass myThread(threading.Thread):\n def __init__(self, thread_num, ticker_list_address):\n threading.Thread.__init__(self)\n self.thread_num = thread_num\n self.ticker_list_address = ticker_list_address\n\n def run(self):\n print(\"start:thread\" + str(self.thread_num))\n multi_threads_crawl_and_save(self.thread_num, self.ticker_list_address)\n print(\"end:thread\" + str(self.thread_num))\n\n\ndef multi_threads_crawl_and_save(thread_num, ticker_list_address):\n output = open('dividendData/dividend' + str(thread_num) + '.csv', 'w')\n f = open(ticker_list_address)\n try:\n reader = csv.reader(f)\n for row in reader:\n crawl_and_save(row[0], output)\n print(row[0])\n finally:\n f.close()\n output.close()\n\n\ndef crawl_and_save(symbol, out):\n count = 0\n url = 'http://www.nasdaq.com/symbol/%s/dividend-history' % symbol\n res = requests.get(url)\n soup = BeautifulSoup(res.text, 'html.parser')\n if soup.find(id='quotes_content_left_dividendhistoryGrid') is not None:\n entries = soup.find(id='quotes_content_left_dividendhistoryGrid').find_all('tr')\n for entry in entries:\n for item in entry.find_all('td'):\n out.write(item.get_text().strip() + ',')\n if count != 0:\n out.write(',' + symbol)\n out.write('\\n')\n count += 1\n\n\ndef main():\n thread_list = []\n for i in range(1, 3):\n thread = myThread(i, str(i) + '.csv')\n thread_list.append(thread)\n for i in range(1, 3):\n thread_list[i - 1].start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"363203429","text":"from django.contrib.sessions.middleware import SessionMiddleware\nfrom django.test import RequestFactory, TestCase\n\nfrom catalog.models import Mineral\nfrom catalog import views\n\n\nclass ImportViewsTests(TestCase):\n \"\"\"Test the views.\"\"\"\n\n def setUp(self):\n # Every test needs access to the request factory.\n self.factory = RequestFactory()\n\n def test_check_data_view_no_data(self):\n \"\"\"Check the index page is redirecting if DB contains NO data\"\"\"\n request = self.factory.get('/')\n response = views.check_data(request)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'The database is empty.')\n self.assertNotContains(response, 'items found.')\n\n def test_import_minerals_view(self):\n request = self.factory.get('import/')\n response = views.check_data(request)\n self.assertEqual(response.status_code, 200)\n\n\nclass MineralViewsTests(TestCase):\n \"\"\"Test the views.\"\"\"\n\n def setUp(self):\n # Every test needs access to the request factory.\n self.factory = RequestFactory()\n self.axinite = Mineral.objects.create(\n name=\"Axinite\",\n image_filename=\"Axinite.jpg\",\n streak=\"White to greyish white\",\n group='Silicates'\n )\n self.barstowite = Mineral.objects.create(\n name=\"Barstowite\",\n image_filename=\"Barstowite.jpg\",\n streak=\"White to brownish\",\n group='Organic Minerals',\n )\n\n def test_check_data_view(self):\n \"\"\"Check the index page is redirecting if DB contains data\"\"\"\n request = self.factory.get('/')\n response = views.check_data(request)\n self.assertEqual(response.status_code, 302)\n\n def test_letter_filter_view(self):\n \"\"\"\n Letter filter must show minerals that start with the selected letter only.\n letter 'b' used as filter\n \"\"\"\n request = self.factory.get('list/letter/')\n\n middleware = SessionMiddleware()\n middleware.process_request(request)\n request.session.save()\n\n response = views.mineral_list(request, **{'name_filter': 'b'})\n self.assertEqual(response.status_code, 200)\n self.assertNotContains(response, self.axinite.name)\n self.assertContains(response, self.barstowite.name)\n\n def test_groups_filter_view(self):\n \"\"\"\n Group filter must show minerals by selected group only.\n 'organic-minerals' used as slugified filter\n \"\"\"\n\n request = self.factory.get('list/group/')\n\n middleware = SessionMiddleware()\n middleware.process_request(request)\n request.session.save()\n\n response = views.mineral_group(request, **{'group_filter': 'organic-minerals'})\n self.assertEqual(response.status_code, 200)\n self.assertNotContains(response, self.axinite.name)\n self.assertContains(response, self.barstowite.name)\n\n def test_streak_filter_view(self):\n \"\"\"\n Streak filter must show minerals with selected streak only.\n 'white-to-greyish-white' used as slugified filter\n \"\"\"\n request = self.factory.get('list/group/')\n\n middleware = SessionMiddleware()\n middleware.process_request(request)\n request.session.save()\n\n response = views.mineral_streak(request, **{'streak_filter': 'white-to-greyish-white'})\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.axinite.name)\n self.assertNotContains(response, self.barstowite.name)\n\n def test_search_view(self):\n \"\"\"\n Keyword Search across all fields\n 'Greyish' used as serach term\n \"\"\"\n request = self.factory.get('search/', {'q': 'Greyish'})\n\n middleware = SessionMiddleware()\n middleware.process_request(request)\n request.session.save()\n\n response = views.search(request)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.axinite.name)\n self.assertNotContains(response, self.barstowite.name)\n\n def test_mineral_detail_view(self):\n \"\"\"\n Test the detail view\n \"\"\"\n request = self.factory.get('detail/')\n\n middleware = SessionMiddleware()\n middleware.process_request(request)\n request.session.save()\n\n response = views.mineral_detail(request, **{'pk': self.barstowite.pk})\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.barstowite.name)\n self.assertContains(response, self.barstowite.image_filename)\n self.assertContains(response, self.barstowite.streak)\n self.assertContains(response, self.barstowite.group)","sub_path":"catalog/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"68825489","text":"'''\n'''\n\nimport os\nimport os.path as osp\nfrom torch.utils import data\nimport torch\nimport torch.nn as nn\nfrom models.CompModels import _FPN\nimport numpy as np\nfrom torchvision import transforms\nimport pickle\nimport tqdm\nimport random\nimport sys\nimport torch.distributed as dist\nfrom utils.util_main import featuresNorm\nfrom utils.util_benchmarks import cal_PMAE, cal_TS_score, cal_MAE, filePathGenerate, dataGenerate, deepModels_dataLoader, data_interplote_nor_gt, floorHalf, ycluo1_dataLoader\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nclass FPN(object):\n def __init__(self,\n args,\n dataset_type,\n dataset_util,\n startDate,\n endDate,\n split_ratio,\n crop_scale,\n #### \n EC_size,\n in_feature,\n hidden_feature,\n ts_seq,\n train_batch_size,\n test_batch_size,\n training_inv,\n learning_rate,\n weight_decay, \n max_epoch,\n benchmark_name,\n **kwargs\n ):\n ## \n self.args = args\n self.dataset_name = args.dataset_name\n self.dataset_type = dataset_type\n# self.dataset_path = osp.join(args.basePath, args.dataset_name, dataset_type)\n ##△ ycluo\n self.dataset_path = '/mnt/pami14/yqliu/dataset_meteo/cropGrib'\n self.dataset_util = dataset_util\n self.startDate = startDate\n self.endDate = endDate\n self.ECSize = EC_size\n self.split_ratio = split_ratio \n self.crop_scale = crop_scale\n #\n self.in_feature = in_feature\n self.hidden_feature = hidden_feature\n self.train_batch_size = train_batch_size\n self.test_batch_size = test_batch_size\n self.training_inv = training_inv\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n # luo\n self.max_epoch = 10\n \n self.num_workers = args.num_workers \n #\n self.EC_center = floorHalf(self.ECSize)\n #\n self.writer = SummaryWriter(osp.join(self.args.save_root, 'tensorboard'))\n self.models_save_dir = osp.join(self.args.save_root, 'save_models')\n os.makedirs(self.models_save_dir, exist_ok=True)\n self.benchmark_name = benchmark_name\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n def __repr__(self):\n return self.__class__.__name__\n\n def init_module(self):\n# featuresMean_st, featuresStd_st = featuresNorm(self.args, self.dataset_name, self.dataset_util, 'modal_S')\n ##△ ycluo\n featuresMean_st, featuresStd_st = featuresNorm(self.args, self.dataset_name, self.dataset_util, 'ycluo')\n # 3DTransformer\n# transformer_4D = transforms.Compose([\n# transforms.Normalize(mean=featuresMean_st, std=featuresStd_st)\n# ])\n ##△ ycluo\n transformer_luo = transforms.Compose([\n transforms.Normalize(mean=featuresMean_st, std=featuresStd_st)\n ]) \n \n # dataLoader\n totalGribFileList = filePathGenerate(self.dataset_path, self.startDate, self.endDate)\n # Dataset split\n trainCropFileList, testCropFileList = dataGenerate(totalGribFileList, self.split_ratio) \n# DatasetFPN_train = deepModels_dataLoader(self.args,\n# trainCropFileList,\n# transformer_4D\n# )\n \n ##△ ycluo\n DatasetFPN_train = ycluo1_dataLoader(\n trainCropFileList,\n transformer_luo\n ) \n\n # Trainset N * C * D * H * W\n self.TrainDataLoader = data.DataLoader(\n DatasetFPN_train, \n batch_size=self.train_batch_size,\n drop_last=True,\n shuffle=True,\n pin_memory=True,\n num_workers=self.num_workers,\n ) \n\n DatasetFPN_test = ycluo1_dataLoader(\n testCropFileList,\n transformer_luo\n )\n \n # Testset N * C * D * H * W\n self.TestDataLoader = data.DataLoader(\n DatasetFPN_test, \n batch_size=self.test_batch_size,\n drop_last=True,\n shuffle=False,\n pin_memory=True,\n num_workers=self.num_workers,\n ) \n \n def init_model(self):\n # init\n torch.cuda.set_device(self.args.device_ids[0])\n# fpn = _FPN(\n# self.in_feature,\n# self.hidden_feature\n# )\n\n ##△ ycluo\n fpn = _FPN(\n 37,\n self.hidden_feature\n ) \n if len(self.args.device_ids) > 1:\n self.fpn = nn.DataParallel(fpn, device_ids=self.args.device_ids).cuda() \n else:\n self.fpn = fpn.cuda()\n print(self.args) \n self.optimizer = torch.optim.Adam(self.fpn.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) \n \n def fit(self):\n self.init_model()\n self.epoch_iterations = 0\n #\n self.mae_best = []\n self.mape_best = []\n self.ts0 = []\n self.ts1 = []\n self.ts10 = []\n self.maeBest = 100000\n \n # if max_epoch\n for epoch in range(self.max_epoch):\n self.epoch = epoch\n self.train()\n if self.max_epoch % self.training_inv == 0:\n self.inference()\n self.epoch_iterations = self.epoch_iterations + 1 \n \n #\n MAE_FPN = np.mean(self.mae_best)\n MAPE_FPN = np.mean(self.mape_best)\n TS0_FPN = np.mean(self.ts0)\n TS1_FPN = np.mean(self.ts1)\n TS10_FPN = np.mean(self.ts10)\n\n MAE_FPN_STD = np.std(self.mae_best)\n MAPE_FPN_STD = np.std(self.mape_best)\n TS0_FPN_STD = np.std(self.ts0)\n TS1_FPN_STD = np.std(self.ts1)\n TS10_FPN_STD = np.std(self.ts10) \n \n info_avg = {\"MAE\": MAE_FPN,\n \"MAE_FPN_STD\": MAE_FPN_STD, \n \"MPAE\": MAPE_FPN,\n \"MAPE_FPN_STD\": MAPE_FPN_STD, \n \"Ts0.1\": TS0_FPN,\n \"Ts0.1_std\": TS0_FPN_STD,\n \"Ts1:\": TS1_FPN,\n \"Ts1_std\": TS1_FPN_STD, \n \"Ts10:\": TS10_FPN,\n \"Ts10_std\": TS10_FPN_STD, \n }\n print(\"======= FPN Final Results Show =======\")\n \n print(\"info_AVG:\", info_avg) \n \n \n ########Train FPN########### \n def train(self):\n self.fpn.train()\n for batch_idx, (data, target) in tqdm.tqdm(\n enumerate(self.TrainDataLoader), \n total=len(self.TrainDataLoader), \n desc='Train FPN epoch=%d' % self.epoch, \n ncols=100, \n leave=False):\n ## Data\n# data = data[:,:,-1,:,:].to(self.device)\n# data = data[:,:,-1,:,:].to(self.device)\n target = target.to(self.device)\n self.optimizer.zero_grad()\n preds = self.fpn(data)\n ## Loss \n Loss = F.mse_loss(preds, target)\n #\n Loss.backward()\n self.optimizer.step() \n #\n self.writer.add_scalars('Loss_{}'.format(\"FPN\"), {\n 'fpn_loss': Loss.item()},\n self.epoch_iterations) \n print ('epoch-{}-last_miniBatch-MSELoss:'.format(self.epoch), Loss.item())\n \n \n ########Evaluation FPN########### \n def inference(self):\n self.fpn.eval() \n batch_preds = []\n batch_gts = [] \n for batch_idx, (data, target) in tqdm.tqdm(\n enumerate(self.TestDataLoader), total = len(self.TestDataLoader),\n desc = 'Test FPN epoch=%d' % self.epoch, ncols = 80,\n leave=False): \n \n ## Data\n# data = data[:,:,-1,:,:].to(device=self.device)\n \n target = target.to(device=self.device)\n \n # Inference\n with torch.no_grad():\n preds = self.fpn(data)\n Loss = F.mse_loss(preds, target).item()\n batch_preds.append(preds.cpu().numpy().squeeze())\n batch_gts.append(target.cpu().numpy().squeeze()) \n \n \n # list → numpy\n preds = np.concatenate(batch_preds)\n labelsVec = np.concatenate(batch_gts) \n #\n mae = cal_MAE(preds, labelsVec)\n pMae = cal_PMAE(preds, labelsVec) \n #\n ts_01 = cal_TS_score(preds, labelsVec, 0.1)\n ts_1 = cal_TS_score(preds, labelsVec, 1)\n ts_10 = cal_TS_score(preds, labelsVec, 10)\n\n # \n \n info = {\"MAE\": round(mae,2),\n \"MPAE\": round(pMae,2),\n \"Ts0.1\":round(ts_01,2),\n \"Ts1:\": round(ts_1,2),\n \"Ts10:\": round(ts_10,2),\n } \n \n print(\"======= FPN Results Show =======\".format(self.epoch_iterations + 1))\n print(info) \n\n # visual \n self.writer.add_scalars(\n 'Accuracy_{}'.format(self.benchmark_name), \n {\n \"MAE\": mae,\n \"MPAE\": pMae,\n \"Ts0.1\": ts_01,\n \"Ts1:\": ts_1,\n \"Ts10:\": ts_10}, \n self.epoch_iterations + 1) \n\n if self.epoch_iterations>=3 and mae < self.maeBest:\n # return dictionary\n self.maeBest = mae\n dict_params_fpn = self.fpn.state_dict()\n torch.save(dict_params_fpn, osp.join(self.models_save_dir, 'fpn-{}.pt'.format(self.epoch_iterations+1)))\n \n self.mae_best.append(mae)\n self.mape_best.append(pMae)\n self.ts0.append(ts_01)\n self.ts1.append(ts_1)\n self.ts10.append(ts_10)\n","sub_path":"experiments/bayes_naive.py","file_name":"bayes_naive.py","file_ext":"py","file_size_in_byte":10243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"110661796","text":"#!/usr/bin/env python3\nimport sys\nimport mrutil\n\nfor line in sys.stdin:\n if line:\n tokens = line.split('\\t',1)\n doc_id = mrutil.get_doc_title_hash_id(tokens[0])\n title = mrutil.format_title_spacing(tokens[0])\n #doc_id (title, body)\n print('%s\\t%s:%s' % (doc_id, title, tokens[1]))","sub_path":"assignment4/mr_apps/docs_mapper.py","file_name":"docs_mapper.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246196166","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('events', '0014_auto_20170102_1919'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post_test_03',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=200)),\n ('text', models.TextField()),\n ('created_date', models.DateTimeField(default=django.utils.timezone.now)),\n ('published_date', models.DateTimeField(blank=True)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='post_test',\n name='action_time_TT',\n field=models.DateTimeField(default=datetime.datetime(2017, 1, 2, 19, 33, 39, 475000, tzinfo=utc), verbose_name=b'action time', auto_now=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='post_test',\n name='is_staff_01',\n field=models.BooleanField(default=True, help_text=b'Designates whether the user can log into this admin site.', verbose_name=b'staff status'),\n ),\n migrations.AddField(\n model_name='post_test',\n name='is_staff_xx',\n field=models.BooleanField(default=False, help_text=b'Designates whether the user can log into this admin site.', verbose_name=b'staff status'),\n ),\n migrations.AlterField(\n model_name='planobjekt',\n name='pub_date',\n field=models.DateTimeField(verbose_name=datetime.datetime(2017, 1, 2, 19, 33, 10, 258000, tzinfo=utc)),\n ),\n ]\n","sub_path":"events/migrations/0015_auto_20170102_2033.py","file_name":"0015_auto_20170102_2033.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"230173218","text":"import FWCore.ParameterSet.Config as cms\nimport os\n\nprocess = cms.Process(\"Ana\")\n\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.load(\"Configuration.StandardSequences.Services_cff\")\n\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 10000\n\nprocess.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring())\nprocess.source.fileNames.append(\"/store/cmst3/user/cmgtools/CMG/TTJets_MassiveBinDECAY_TuneZ2star_8TeV-madgraph-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_10_0/patTuple_0.root\")\n#process.source.fileNames.append(\"file:///tmp/jhgoh/patTuple_0.root\")\n\nprocess.load(\"KrAFT.GeneratorTools.genJetAssociation_cff\")\nprocess.load(\"KrAFT.GeneratorTools.lumiWeight_cff\")\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\", \n fileName = cms.untracked.string(\"out.root\"),\n outputCommands = cms.untracked.vstring(\n \"drop *\",\n \"keep *_*_*_Ana\",\n )\n)\n\nprocess.outPath = cms.EndPath(process.out)\n\nprocess.p = cms.Path(\n process.recoToGenJet\n + process.genJetToPartons\n + process.lumiWeight\n)\n\n","sub_path":"GeneratorTools/test/test_cfg.py","file_name":"test_cfg.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"154576983","text":"from django.db import models\nfrom django.utils.translation import gettext as _\nfrom common.consts import category_type\nfrom feedback.models import Feedback\nimport uuid\n\n\ndef scrumble_uploaded_images(instance, filename):\n extension = filename.split('.')[-1]\n new_filename = '{}.{}'.format(uuid.uuid4(), extension)\n return new_filename\n\ndef scrumble_user_id():\n long = str(uuid.uuid4())\n print('--long:{}'.format(long))\n print('--short:{}'.format(long[-4:]))\n return long[-4:]\n\n\nclass Product(models.Model):\n image = models.ImageField(\n verbose_name=_('Зоображення'),\n upload_to=scrumble_uploaded_images,\n default='no-product-image.png'\n )\n name = models.CharField(\n verbose_name=_('Назва'),\n max_length=80,\n default='product'\n )\n rating = models.IntegerField(\n verbose_name=_('Рейтинг'),\n blank=True,\n null=True\n )\n vendor = models.CharField(\n verbose_name=_('Артикул'),\n max_length=64\n )\n category = models.IntegerField(\n verbose_name=_('Категорія'),\n choices=category_type.CATEGORY_TYPE_CHOISES,\n validators=[category_type.validate_category_type],\n blank=True,\n null=True\n )\n price = models.IntegerField(\n verbose_name=_('Ціна'),\n blank=True,\n null=True\n )\n discount = models.IntegerField(\n verbose_name=_('Знижка'),\n blank=True,\n null=True\n )\n bestseller = models.BooleanField(\n verbose_name=_('Хіт продаж'),\n default=False\n )\n in_stock = models.BooleanField(\n verbose_name=_('В наявності'),\n default=False\n )\n consist = models.TextField(\n verbose_name=_('Склад'),\n blank=True,\n null=True\n )\n short_description = models.TextField(\n verbose_name=_('Короткий опис'),\n blank=True,\n null=True\n )\n description = models.TextField(\n verbose_name=_('Повний опис'),\n blank=True,\n null=True\n )\n feedback = models.ForeignKey(\n Feedback,\n verbose_name=_('Відгук'),\n blank=True,\n null=True,\n on_delete=models.CASCADE\n )\n test_id = models.UUIDField(\n default=uuid.uuid4,\n editable=False,\n verbose_name=_('Тест ID')\n )\n\n def __str__(self):\n return self.name\n\n\n# import uuid\n# from django.db import models\n#\n# class Person(models.Model):\n# ...\n# id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n","sub_path":"product/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"574881739","text":"import math\n#sudo apt-get install python-pyaudio\nfrom pyaudio import PyAudio\nimport pyaudio\nimport wave\nimport array\n\nFs = 44000\nT = 3\nn = Fs*T\nf = 1000\ny = []\nfor x in range(n):\n\n y.append(int(math.sin(2*math.pi*f/Fs*x)*127 + 128 ))\n\nb = array.array('B', y).tobytes()\n\np = PyAudio()\nstream = p.open(\n format=p.get_format_from_width(1),\n channels=1,\n rate=44000,\n output=True,\n )\nstream.write(b)\nstream.stop_stream()\nstream.close()\np.terminate()\n\n# wf = wave.open(r'C:\\Users\\Administrator\\Desktop\\毕设\\论文下载\\test5.wav', 'wb')\n# wf.setnchannels(1)\n# wf.setsampwidth(p.get_sample_size(pyaudio.paInt8))\n# wf.setframerate(16000)\n# wf.writeframes(b)\n# wf.close()","sub_path":"raspy_python/audiofinal.py","file_name":"audiofinal.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"576978853","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 5 21:50:19 2017\n\n@author: HSIN\n\"\"\"\n\nimport csv\nimport pandas\nimport numpy as np\n\nimport keras.backend as K\nimport keras\nfrom keras.layers import Input, Embedding, Flatten, Add, Dot\nfrom keras.models import load_model\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\n\nratings = pandas.read_csv('data/train.csv', sep=',', engine='python', header = 0,\n names=['trainid', 'userid', 'movieid', 'rating']).set_index('trainid')\n\nratings.movieid = ratings.movieid.astype('category')\nratings.userid = ratings.userid.astype('category')\n\n\n\nmovieid = np.asarray(ratings.movieid.values, dtype='int')\nuserid = np.asarray(ratings.userid.values, dtype='int')\n\n\n\n\nn_movies = np.max(movieid)+1\nn_items = n_movies\n\nn_users = np.max(userid)+1\n\n\n\n \n\ntests = pandas.read_csv('data/test.csv', sep=',', engine='python', header = 0,\n names=['testid', 'userid', 'movieid']).set_index('testid')\n \n \ntests.movieid = tests.movieid.astype('category')\ntests.userid = tests.userid.astype('category')\n\ntest_id = tests.index.values\ntests_movieid = np.asarray(tests.movieid.values, dtype='int')\ntests_userid = np.asarray(tests.userid.values, dtype='int')\n\n\n\n\ny = np.zeros((ratings.shape[0], 5))\ny[np.arange(ratings.shape[0]), ratings.rating - 1] = 1\n\n\ny = np.dot(y, [1,2,3,4,5])\n\n\n\n\ndef rmse_score(y_true,y_pred):\n \n return K.mean((y_true - y_pred) ** 2) ** 0.5\n \n\n\ndef create_model(n_users, n_items, latent_dim=20):\n user_input = Input(shape=[1])\n item_input = Input(shape=[1])\n user_vec = Embedding(n_users, latent_dim, embeddings_initializer='random_normal')(user_input)\n user_vec = Flatten()(user_vec)\n item_vec = Embedding(n_items, latent_dim, embeddings_initializer='random_normal')(item_input)\n item_vec = Flatten()(item_vec)\n user_bias = Embedding(n_users, 1, embeddings_initializer='zeros')(user_input)\n user_bias = Flatten()(user_bias)\n item_bias = Embedding(n_items, 1, embeddings_initializer='zeros')(item_input)\n item_bias = Flatten()(item_bias)\n r_hat = Dot(axes=1)([user_vec, item_vec])\n r_hat = Add()([r_hat, user_bias, item_bias])\n model = keras.models.Model([user_input, item_input], r_hat)\n \n \n return model\n\n\n\n\n\nmodel = create_model(n_users, n_items)\nmodel.compile(loss='mse', optimizer='adam', metrics=[rmse_score])\nmodel.summary()\n\n\n\nnp.random.seed(seed = 1746)\n\n\ntrain_valid_ratio = 0.9\nindices = np.random.permutation(y.shape[0])\ntrain_idx, valid_idx = indices[:int(y.shape[0] * train_valid_ratio)], indices[int(y.shape[0] * train_valid_ratio):]\n\nmovieid_train, userid_train, y_train = movieid[train_idx], userid[train_idx], y[train_idx]\n\nmovieid_valid, userid_valid, y_valid = movieid[valid_idx], userid[valid_idx], y[valid_idx]\n\n\npatience = 10\nepochs = 1000\nbatch_size = 512\n\nearlystopping = EarlyStopping(monitor='val_rmse_score', patience = patience, verbose=1, mode='auto')\ncheckpoint = ModelCheckpoint('mf_model.h5',\n verbose=1,\n save_best_only=True,\n save_weights_only=True,\n monitor='val_rmse_score',\n mode='min')\n\nhistory = model.fit([userid_train, movieid_train], y_train, \n epochs=epochs, \n batch_size = batch_size,\n validation_data=([userid_valid, movieid_valid], y_valid),\n \t\t callbacks=[earlystopping,checkpoint])\n\n\ndel model\n\nmodel = create_model(n_users, n_items)\nmodel.load_weights('mf_model.h5')\n\nmodel.save('mf_complete_model.h5')\ndel model\n\n\nmodel = load_model('mf_complete_model.h5')\n\n\nvalid_res = model.predict([userid_valid, movieid_valid])\n\n\nvalid_pred = valid_res.flatten()\nvalid_true = y_valid\n\nvalid_error = np.mean((valid_true - valid_pred) ** 2) ** 0.5\nprint('valid error: ', valid_error)\n\nres = model.predict([tests_userid,tests_movieid])\n\n\npred = res.flatten()\n\n\nresult = [['TestDataID','Rating']]\nfor i, j in zip(test_id, pred):\n line = []\n line.append(int(i))\n line.append(float(j))\n result.append(line)\n\nf = open('mf_prediction.csv', 'w', encoding = 'big5', newline='')\nw = csv.writer(f)\nw.writerows(result)\nf.close()\n","sub_path":"hw6/hw6_train.py","file_name":"hw6_train.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"275856510","text":"import cv2\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nCATEGORIES = [\"Paper\", \"Rock\", \"Scissors\"]\r\n\r\ndef prepare(filepath):\r\n\tIMG_SIZE = 50\r\n\timg_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)\r\n\timg_array = img_array/255.0\r\n\tnew_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\r\n\tplt.imshow(img_array, cmap=\"gray\")\r\n\tplt.show()\r\n\treturn new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)\r\n\r\n\r\nmodel = tf.keras.models.load_model(\"65x3-CNN.model\")\r\n\r\np = prepare('C:/Users/ML/rps-test-set/scissors/testscissors03-07.png')\t# pfad Bild angeben\r\nprediction = model.predict([p])\r\nclass_name=model.predict_classes([p])\r\nprint(class_name)\t\t\t\t\t\t# gibt entweder [0] [1] [2] aus\r\nprint(CATEGORIES[int(class_name)])\t\t# gibt Name zu [0] [1] [2] aus\r\nprint(\"Done\")\r\n\r\n\r\n#END","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"492865729","text":"from setuptools import setup, find_packages\nimport glob\nimport os\n\nversion = '0.61'\nname = 'slapos.toolbox'\nlong_description = open(\"README.rst\").read() + \"\\n\"\n\nfor f in sorted(glob.glob(os.path.join('slapos', 'README.*.rst'))):\n long_description += '\\n' + open(f).read() + '\\n'\n\nlong_description += open(\"CHANGES.txt\").read() + \"\\n\"\n\n# Provide a way to install additional requirements\nadditional_install_requires = []\ntry:\n import argparse\nexcept ImportError:\n additional_install_requires.append('argparse')\n\nsetup(name=name,\n version=version,\n description=\"SlapOS toolbox.\",\n long_description=long_description,\n classifiers=[\n \"Programming Language :: Python\",\n ],\n keywords='slapos toolbox',\n license='GPLv3',\n namespace_packages=['slapos'],\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'Flask', # needed by servers\n 'atomize', # needed by pubsub\n 'feedparser', # needed by pubsub\n 'apache_libcloud>=0.4.0', # needed by cloudmgr\n 'lockfile', # used by equeue\n 'lxml', # needed for xml parsing\n 'paramiko', # needed by cloudmgr\n 'psutil', # needed for playing with processes in portable way\n 'setuptools', # namespaces\n 'slapos.core', # as it provides library for slap\n 'xml_marshaller', # needed to dump information\n 'GitPython', #needed for git manipulation into slaprunner\n 'passlib',\n 'netifaces',\n 'erp5.util',\n 'PyRSS2Gen',\n 'dnspython',\n ] + additional_install_requires,\n extras_require = {\n 'lampconfigure': [\"mysqlclient\"], #needed for MySQL Database access\n 'zodbpack': ['ZODB3'], # needed to play with ZODB\n 'flask_auth' : [\"Flask-Auth\"],\n 'networkbench' : ['pycurl'], \n 'check_web_page_http_cache_hit' : ['pycurl'], # needed for check_web_page_http_cache_hit module\n },\n tests_require = [\n 'mock',\n ],\n zip_safe=False, # proxy depends on Flask, which has issues with\n # accessing templates\n entry_points={\n 'console_scripts': [\n 'agent = slapos.agent.agent:main',\n 'check-web-page-http-cache-hit = slapos.promise.check_web_page_http_cache_hit:main',\n 'check-feed-as-promise = slapos.checkfeedaspromise:main',\n 'clouddestroy = slapos.cloudmgr.destroy:main',\n 'cloudgetprivatekey = slapos.cloudmgr.getprivatekey:main',\n 'cloudgetpubliciplist = slapos.cloudmgr.getpubliciplist:main',\n 'cloudlist = slapos.cloudmgr.list:main',\n 'cloudmgr = slapos.cloudmgr.cloudmgr:main',\n 'cloudstart = slapos.cloudmgr.start:main',\n 'cloudstop = slapos.cloudmgr.stop:main',\n 'equeue = slapos.equeue:main',\n 'generatefeed = slapos.generatefeed:main',\n 'htpasswd = slapos.htpasswd:main',\n 'is-local-tcp-port-opened = slapos.promise.is_local_tcp_port_opened:main',\n 'is-process-older-than-dependency-set = slapos.promise.is_process_older_than_dependency_set:main',\n 'killpidfromfile = slapos.systool:killpidfromfile', # BBB\n 'monitor.bootstrap = slapos.monitor.monitor:main',\n 'monitor.collect = slapos.monitor.collect:main',\n 'monitor.runpromise = slapos.monitor.runpromise:main',\n 'monitor.genstatus = slapos.monitor.globalstate:main',\n 'monitor.genrss = slapos.monitor.status2rss:main',\n 'monitor.configwrite = slapos.monitor.monitor_config_write:main',\n 'runResiliencyUnitTestTestNode = slapos.resiliencytest:runUnitTest',\n 'runResiliencyScalabilityTestNode = slapos.resiliencytest:runResiliencyTest',\n 'runStandaloneResiliencyTest = slapos.resiliencytest:runStandaloneResiliencyTest',\n 'lampconfigure = slapos.lamp:run [lampconfigure]',\n 'onetimedownload = slapos.onetimedownload:main',\n 'onetimeupload = slapos.onetimeupload:main',\n 'pubsubnotifier = slapos.pubsub.notifier:main',\n 'pubsubserver = slapos.pubsub:main',\n 'qemu-qmp-client = slapos.qemuqmpclient:main',\n 'rdiffbackup.genstatrss = slapos.resilient.rdiffBackupStat2RSS:main',\n 'slapos-kill = slapos.systool:kill',\n 'slaprunnertest = slapos.runner.runnertest:main',\n 'slaprunnerteststandalone = slapos.runner.runnertest:runStandaloneUnitTest',\n 'zodbpack = slapos.zodbpack:run [zodbpack]',\n 'networkbench = slapos.networkbench:main',\n 'cachechecker = slapos.cachechecker:web_checker_utility'\n ]\n },\n test_suite='slapos.test',\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"439871901","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/ddalg/itree/test__tree.py\n# Compiled at: 2020-03-30 11:18:59\n# Size of source mod 2**32: 4767 bytes\nimport unittest\nfrom ddalg.model.test__interval import make_intervals\nfrom ._tree import IntervalTree, SimpleInterval\n\nclass TestIntervalTree(unittest.TestCase):\n\n def setUp(self) -> None:\n intervals = make_intervals(0, 3, 9)\n self.tree = IntervalTree(intervals)\n\n def test_search(self):\n self.assertEqual(0, len(self.tree.search(0)))\n result = self.tree.search(1)\n self.assertEqual(1, len(result))\n self.assertListEqual([SimpleInterval(0, 3)], result)\n result = self.tree.search(6)\n self.assertEqual(3, len(result))\n self.assertListEqual([SimpleInterval(3, 6), SimpleInterval(4, 7), SimpleInterval(5, 8)], result)\n result = self.tree.search(11)\n self.assertEqual(1, len(result))\n self.assertEqual([SimpleInterval(8, 11)], result)\n self.assertEqual(0, len(self.tree.search(12)))\n self.assertRaises(ValueError, self.tree.search, 'BlaBla')\n\n def test_get_overlaps(self):\n self.assertEqual(0, len(self.tree.get_overlaps(-1, 0)))\n result = self.tree.get_overlaps(0, 1)\n self.assertEqual(1, len(result))\n self.assertListEqual([SimpleInterval(0, 3)], result)\n result = self.tree.get_overlaps(4, 6)\n self.assertEqual(4, len(result))\n self.assertListEqual([\n SimpleInterval.of(3, 6), SimpleInterval.of(4, 7), SimpleInterval.of(5, 8), SimpleInterval.of(2, 5)], result)\n result = self.tree.get_overlaps(10, 11)\n self.assertEqual(1, len(list(result)))\n self.assertListEqual([SimpleInterval(8, 11)], result)\n self.assertEqual(0, len(self.tree.get_overlaps(11, 12)))\n\n def test_len(self):\n self.assertEqual(0, len(IntervalTree([])))\n self.assertEqual(9, len(self.tree))\n\n def test_insert(self):\n self.assertEqual(0, len(self.tree.search(12)))\n self.tree.insert(SimpleInterval(9, 12))\n results = self.tree.search(12)\n self.assertEqual(1, len(results))\n self.assertListEqual([SimpleInterval(9, 12)], results)\n\n def test_fuzzy_query(self):\n intervals = make_intervals(-5, 95, 11)\n tree = IntervalTree(intervals)\n self.assertListEqual([SimpleInterval(0, 100)], tree.fuzzy_query(0, 100))\n self.assertListEqual([SimpleInterval(-1, 99),\n SimpleInterval(0, 100),\n SimpleInterval(1, 101)], tree.fuzzy_query(0, 100, coverage=0.98))\n self.assertListEqual([SimpleInterval(-2, 98),\n SimpleInterval(-1, 99),\n SimpleInterval(0, 100),\n SimpleInterval(1, 101),\n SimpleInterval(2, 102)], tree.fuzzy_query(0, 100, coverage=0.95))\n self.assertRaises(ValueError, tree.fuzzy_query, 0, 100, 1.5)\n\n def test_fuzzy_query_other(self):\n tree = IntervalTree(make_intervals(38, 62, 1))\n results = tree.fuzzy_query(40, 60, coverage=0.8)\n self.assertListEqual([SimpleInterval(38, 62)], results)\n\n def test_jaccard_query(self):\n tree = IntervalTree(make_intervals((-20), 80, 11, step=5))\n self.assertListEqual([SimpleInterval(-5, 95), SimpleInterval(0, 100)], tree.jaccard_query(-5, 100, 0.9))\n\n def test_bool(self):\n self.assertTrue(self.tree)\n self.assertFalse(IntervalTree([]))\n\n def test_iteration(self):\n self.tree.insert(SimpleInterval(4, 7))\n items = list(self.tree)\n self.assertListEqual([SimpleInterval(0, 3),\n SimpleInterval(1, 4),\n SimpleInterval(2, 5),\n SimpleInterval(3, 6),\n SimpleInterval(4, 7),\n SimpleInterval(4, 7),\n SimpleInterval(5, 8),\n SimpleInterval(6, 9),\n SimpleInterval(7, 10),\n SimpleInterval(8, 11)], items)\n\n def test_iteration_through_empty_tree(self):\n tree = IntervalTree([])\n items = list(tree)\n self.assertListEqual([], items)","sub_path":"pycfiles/ddalg-0.0.3.post0-py3.6/test__tree.cpython-36.py","file_name":"test__tree.cpython-36.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"431383319","text":"import matplotlib.pyplot as plot\nimport numpy as np\nimport sys\nfrom scipy import signal\n\n# m = 0.2\n# freq = 10\n# freqs = 2\nFs = 150.0; # sampling rate\nTs = 1.0/Fs; # sampling interval\n\n\n\"\"\" for frequency spectrum \"\"\"\ndef calc_fft(y):\n\tn = len(y) # length of the signal\n\tk = np.arange(n)\n\tT = n/Fs\n\tfrq = k/T # two sides frequency range\n\tfrq = frq[range(n//2)] # one side frequency range\n\tY = np.fft.fft(y)/n # fft computing and normalization\n\tY = Y[range(n//2)]\n\treturn(Y, frq)\n\n\ndef get_y(arr, name, f):\n\tfreq = int(f)\n\tbit_arr = np.array(arr)\n\tsamples_per_bit = 2*Fs/bit_arr.size \n\tdd = np.repeat(bit_arr, samples_per_bit)\n\n\tif name == \"fsk\":\n\t\treturn np.sin(2 * np.pi * (freq + dd) * t)\n\telif name == \"psk\":\n\t\treturn np.sin(2 * np.pi * (freq) * t+(np.pi*dd/180))\n\telse:\n\t\treturn dd*np.sin(2 * np.pi * freq * t)\n\n\nt = np.arange(0,2,Ts)\n\n###fsk = [5,5,-5,5,-5]\n###psk = [180,180,0,180,0]\n###ask = [1, 0, 1, 1, 0]\n\n\"\"\" START OF GRAPH \"\"\"\nfig,myplot = plot.subplots(3, 1)\n\ndef get_input():\n\tfrequency = input(\"Enter frequency: \")\n\treturn frequency\n\n\ndef plot_():\n\t# binary = 11010\n\tf = get_input()\n\tfsk = [5,5,-5,5,-5]\n\tpsk = [180,180,0,180,0]\n\task = [1, 1, 0, 1, 0]\n\tys = []\n\tys.append(get_y(fsk, \"fsk\", f))\n\tys.append(get_y(psk, \"psk\", f))\n\tys.append(get_y(ask, \"ask\", f))\n\n\t# Y, frq = calc_fft(y)\n\tfor i in range(0, 3):\n\t\tif (i == 0):\n\t\t\tlbl = \" (FSK)\"\n\t\telif (i == 1):\n\t\t\tlbl = \" (PSK)\"\n\t\telse:\n\t\t\tlbl = \" (ASK)\"\n\n\t\tmyplot[i].plot(t,ys[i])\n\t\tmyplot[i].set_xlabel('Time')\n\t\tmyplot[i].set_ylabel('Amplitude' + lbl)\n\n\nif __name__ == \"__main__\":\n\tplot_()\n\tplot.show()\n","sub_path":"modulation.py","file_name":"modulation.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"330102417","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass CloudError(Model):\n \"\"\"CloudError.\n \"\"\"\n\n _attribute_map = {\n }\n\n\nclass ManagementLockObject(Model):\n \"\"\"The lock information.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :param level: Required. The level of the lock. Possible values are:\n NotSpecified, CanNotDelete, ReadOnly. CanNotDelete means authorized users\n are able to read and modify the resources, but not delete. ReadOnly means\n authorized users can only read from a resource, but they can't modify or\n delete it. Possible values include: 'NotSpecified', 'CanNotDelete',\n 'ReadOnly'\n :type level: str or\n ~azure.mgmt.resource.locks.v2016_09_01.models.LockLevel\n :param notes: Notes about the lock. Maximum of 512 characters.\n :type notes: str\n :param owners: The owners of the lock.\n :type owners:\n list[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockOwner]\n :ivar id: The resource ID of the lock.\n :vartype id: str\n :ivar type: The resource type of the lock - Microsoft.Authorization/locks.\n :vartype type: str\n :ivar name: The name of the lock.\n :vartype name: str\n \"\"\"\n\n _validation = {\n 'level': {'required': True},\n 'id': {'readonly': True},\n 'type': {'readonly': True},\n 'name': {'readonly': True},\n }\n\n _attribute_map = {\n 'level': {'key': 'properties.level', 'type': 'str'},\n 'notes': {'key': 'properties.notes', 'type': 'str'},\n 'owners': {'key': 'properties.owners', 'type': '[ManagementLockOwner]'},\n 'id': {'key': 'id', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n }\n\n def __init__(self, *, level, notes: str=None, owners=None, **kwargs) -> None:\n super(ManagementLockObject, self).__init__(**kwargs)\n self.level = level\n self.notes = notes\n self.owners = owners\n self.id = None\n self.type = None\n self.name = None\n\n\nclass ManagementLockOwner(Model):\n \"\"\"Lock owner properties.\n\n :param application_id: The application ID of the lock owner.\n :type application_id: str\n \"\"\"\n\n _attribute_map = {\n 'application_id': {'key': 'applicationId', 'type': 'str'},\n }\n\n def __init__(self, *, application_id: str=None, **kwargs) -> None:\n super(ManagementLockOwner, self).__init__(**kwargs)\n self.application_id = application_id\n\n\nclass Operation(Model):\n \"\"\"Microsoft.Authorization operation.\n\n :param name: Operation name: {provider}/{resource}/{operation}\n :type name: str\n :param display: The object that represents the operation.\n :type display:\n ~azure.mgmt.resource.locks.v2016_09_01.models.OperationDisplay\n \"\"\"\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'display': {'key': 'display', 'type': 'OperationDisplay'},\n }\n\n def __init__(self, *, name: str=None, display=None, **kwargs) -> None:\n super(Operation, self).__init__(**kwargs)\n self.name = name\n self.display = display\n\n\nclass OperationDisplay(Model):\n \"\"\"The object that represents the operation.\n\n :param provider: Service provider: Microsoft.Authorization\n :type provider: str\n :param resource: Resource on which the operation is performed: Profile,\n endpoint, etc.\n :type resource: str\n :param operation: Operation type: Read, write, delete, etc.\n :type operation: str\n \"\"\"\n\n _attribute_map = {\n 'provider': {'key': 'provider', 'type': 'str'},\n 'resource': {'key': 'resource', 'type': 'str'},\n 'operation': {'key': 'operation', 'type': 'str'},\n }\n\n def __init__(self, *, provider: str=None, resource: str=None, operation: str=None, **kwargs) -> None:\n super(OperationDisplay, self).__init__(**kwargs)\n self.provider = provider\n self.resource = resource\n self.operation = operation\n","sub_path":"sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/models/_models_py3.py","file_name":"_models_py3.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"577924155","text":"from random import randint\n\n\nclass Solution:\n @staticmethod\n def isPalindrome_1(x: int) -> bool:\n\n s = str(x)\n length = len(s)\n for i in range(length // 2):\n if s[i] != s[length - 1 - i]:\n return False\n return True\n\n @staticmethod\n def isPalindrome_2(x: int) -> bool:\n if x < 0 or (x % 10 == 0 and x != 0):\n return False\n\n reverse = 0\n while reverse < x:\n reverse = reverse * 10 + x % 10\n x //= 10\n\n return reverse == x or x == reverse // 10\n\n @staticmethod\n def isPalindrome_3(x: int) -> bool:\n if x < 0: return False\n s = str(x)\n return s == s[::-1]\n # reverse the str by the index\n\n\nif __name__ == '__main__':\n\n x = [0, 101, 111, -101, 100, -100]\n s = ['t','s']\n print(s, s[::-1])\n print(x, x[::-1])\n # for val in x:\n # print(val, Solution.isPalindrome_3(val))\n # val = 111\n # print(val, Solution.isPalindrome_2(val))","sub_path":"src/9_palindrome_number.py","file_name":"9_palindrome_number.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312195680","text":"import sys\nfrom Functions import *\n\nIntervalInput = []\n\nwhile True:\n if not IntervalInput:\n IntervalList = raw_input(\"List of Intervals?\")\n if IntervalList == \"quit\":\n sys.exit()\n IntervalSplit = IntervalList.split(\",\")\n try:\n for x in IntervalSplit:\n temp_interval = interval(x)\n IntervalInput.append(temp_interval)\n except:\n raise ValueError(\"Invalid list of intervals\")\n\n IntervalInput = mergeOverlapping(IntervalInput)\n\nwhile True:\n InsertInterval = raw_input(\"Intervals\")\n if InsertInterval == \"quit\":\n sys.exit()\n try:\n NewInterval = interval(InsertInterval)\n except:\n raise ValueError(\"Invalid interval\")\n\n IntervalInput = insert(IntervalInput, NewInterval)\n\n print(IntervalInput)\n\n\n\n","sub_path":"zz880/assignment6.py","file_name":"assignment6.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"526507789","text":"from Logger import Logger\nfrom SystemMemory import SystemMemory\nfrom Parser import Parser\nfrom Instruction import Instruction, UnrecognizedInstructionError\nfrom Fetch import Fetch\nfrom Decode import Decode\nfrom Execute import Execute\nfrom Memory import Memory\nfrom Write import Write\n\nclass PAMS():\n\n def __init__(self):\n self.insnSet = []\n self.mem = SystemMemory()\n self.regs = SystemMemory()\n self.fetch = Fetch()\n self.decode = Decode()\n self.execute = Execute()\n self.memory = Memory()\n self.writeback = Write()\n \n def load(self, name):\n parser = Parser(name)\n lines = parser.readEntireFileToMemory(self.mem)\n for offset in range(lines):\n addr = hex(int(0x00001000) + 4 * offset)\n ins = Instruction(self.mem.readAddr(addr))\n self.insnSet.append(ins)\n Logger.inform( \"added \" + str(lines) + \" to insnSet\")\n \n def step(self):\n Logger.inform( \"taking a step.\")\n empty = False\n \"\"\"move everybody that already exists up one in the pipeline\"\"\"\n \n self.writeback.instruction = self.memory.instruction\n self.writeback.pc = self.memory.pc\r\n self.memory.instruction = self.execute.instruction\n self.memory.pc = self.execute.pc\n self.execute.instruction = self.decode.instruction\n self.execute.pc = self.decode.pc\n self.decode.instruction = self.fetch.instruction\n self.decode.pc = self.fetch.pc\n \n \"\"\"run their steps\"\"\"\n self.fetch.cycleStep(self.decode, self.insnSet)\r\n self.decode.cycleStep(self.fetch, self.execute, self.writeback, self.memory, self.insnSet, self.mem)\r\n self.execute.cycleStep(self.fetch, self.decode, self.insnSet)\n self.memory.cycleStep(self.mem)\n empty = (self.fetch.instruction.op == 0 and self.decode.instruction.op == 0 and self.execute.instruction.op == 0 and self.memory.instruction.op == 0)\n self.writeback.cycleStep(self.mem, self.insnSet, empty)\n \n return self.writeback.allInstructionsFinished","sub_path":"PAMS/PAMS.py","file_name":"PAMS.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"420616742","text":"\nfrom Exceptions.Exceptions import ServiceException\nimport time\n\n\nclass Application:\n def __init__(self, service):\n self.__service = service\n self.__commands = {\n '1': self.__add_new_edge,\n '2': self.__remove_edge,\n '3': self.__add_new_vertex,\n '4': self.__remove_vertex,\n '5': self.__read_from_file,\n '6': self.__write_to_file,\n '7': self.__create_random_graph,\n '8': self.__laboratory2Ex4\n }\n\n @staticmethod\n def print_options():\n print(\"The options are:\\n\"\n \"1. Add a new edge\\n\"\n \"2. Remove an edge\\n\"\n \"3. Add a vertex\\n\"\n \"4. Remove a vertex\\n\"\n \"5. Read from a file\\n\"\n \"6. Write to a file\\n\"\n \"7. Create random graph\\n\"\n \"8. Find the connected components of an undirected graph using a breadth-first traversal of the graph\\n\")\n\n def __add_new_edge(self):\n firstVertex = int(input(\"The first vertex is: \"))\n secondVertex = int(input(\"The second vertex is: \"))\n cost = int(input(\"The cost is: \"))\n self.__service.add_new_edge(firstVertex, secondVertex, cost)\n\n def __remove_edge(self):\n firstVertex = int(input(\"The first vertex is: \"))\n secondVertex = int(input(\"The second vertex is: \"))\n self.__service.remove_edge(firstVertex, secondVertex)\n\n def __add_new_vertex(self):\n newVertex = int(input(\"The new vertex is: \"))\n self.__service.add_new_vertex(newVertex)\n\n def __remove_vertex(self):\n vertex = int(input(\"The vertex to be deleted is: \"))\n self.__service.remove_vertex(vertex)\n\n def __read_from_file(self):\n fileName = input(\"The file name is: \")\n self.__service.read_from_file(fileName)\n\n def __write_to_file(self):\n fileName = input(\"The file name is: \")\n self.__service.write_to_file(fileName)\n\n def __create_random_graph(self):\n numberOfVertices = int(input(\"The number of vertices is: \"))\n numberOfEdges = int(input(\"The number of edges is: \"))\n self.__service.create_random_graph(numberOfVertices, numberOfEdges)\n\n def __laboratory2Ex4(self):\n numberOfConnectedComponents, stringToBePrinted = self.__service.laboratory2_exercise4()\n print(stringToBePrinted)\n print(\"The number of connected components is \", numberOfConnectedComponents)\n\n def start(self):\n while True:\n self.print_options()\n option = input(\"Your options is: \")\n if option == 'exit':\n print(\"Goodbye!\\n\")\n break\n if option in self.__commands:\n try:\n self.__commands[option]()\n except ValueError as valueError:\n print(\"Invalid input!\")\n except ServiceException as serviceException:\n print(serviceException)\n else:\n print(\"Invalid command!\")\n","sub_path":"Graph algorithms/Connected components of an undirected graph using BFS/UI/Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"132662328","text":"from getgauge.python import step, data_store\r\nfrom api.mt.mt import Mt\r\nimport urllib\r\nfrom api.mt.search.applet import Applet\r\nfrom api.mt.search.category_nav import Category_nav\r\nfrom api.mt.search.hint import Hint\r\nfrom api.mt.search.hot import Hot\r\nfrom api.mt.search.mall_middle_page import Mall_middle_page\r\nfrom api.mt.search.match import Match\r\nfrom api.mt.search.order import Order\r\nfrom api.mt.search.result_page import Result_page\r\nfrom api.mt.search.rank import Rank\r\nfrom api.mt.search.scene_page import Scene_page\r\nfrom api.mt.search.stage import Stage\r\nfrom api.mt.search.words import Words\r\n\r\n\r\n\r\n@step(\"搜索关键词绑定的小应用, search_keyword=\")\r\ndef applet(search_keyword):\r\n search_keyword = urllib.parse.quote(search_keyword)\r\n applet = Applet()\r\n applet.api = applet.api.replace('$search_keyword', search_keyword)\r\n applet.request()\r\n print(applet.resp.json())\r\n\r\n\r\n@step(\"搜索结果页 配置组合关键字, search_keyword=\")\r\ndef category_nav(search_keyword):\r\n search_keyword = urllib.parse.quote(search_keyword)\r\n category_nav = Category_nav()\r\n category_nav.api = category_nav.api.replace('$search_keyword', search_keyword)\r\n category_nav.request()\r\n print(category_nav.resp.json())\r\n\r\n\r\n@step(\"获取搜索提示, search_keyword=\")\r\ndef hint(search_keyword):\r\n search_keyword = urllib.parse.quote(search_keyword)\r\n hint = Hint()\r\n hint.api = hint.api.replace('$search_keyword', search_keyword)\r\n hint.request()\r\n print(hint.resp.json())\r\n\r\n\r\n@step(\"获取热门搜索\")\r\ndef hot():\r\n hot = Hot()\r\n hot.request()\r\n print(hot.resp.json())\r\n\r\n\r\n@step(\"搜索接口, search_keyword=\")\r\ndef match(search_keyword):\r\n search_keyword = urllib.parse.quote(search_keyword)\r\n match = Match()\r\n match.api = match.api.replace('$search_keyword', search_keyword)\r\n match.request()\r\n print(match.resp.json())\r\n\r\n\r\n@step(\"搜索订单, search_keyword=\")\r\ndef order(search_keyword):\r\n search_keyword = urllib.parse.quote(search_keyword)\r\n order = Order()\r\n order.api = order.api.replace('$search_keyword', search_keyword)\r\n order.request()\r\n print(order.resp.json())\r\n\r\n\r\n@step(\"搜索结果页接口, search_keyword=,sort=,min_price=,max_price=,expand_filters=,cat=\")\r\ndef page_result(search_keyword,sort,min_price,max_price,expand_filters,cat):\r\n search_keyword = urllib.parse.quote(search_keyword)\r\n result_page = Result_page()\r\n result_page.api = result_page.api.replace('$search_keyword', search_keyword)\r\n result_page.api =result_page.api.replace('$sort', sort)\r\n result_page.api=result_page.api.replace(\"$min_price\",min_price)\r\n result_page.api = result_page.api.replace(\"$max_price\", max_price)\r\n result_page.api = result_page.api.replace(\"$expand_filters\", expand_filters)\r\n result_page.api = result_page.api.replace(\"$cat\", cat)\r\n resp =result_page.request()\r\n assert resp['code'] == result_page.success_resp['code']\r\n result_item=resp['data']['items'][:-1]\r\n data_store.suite['result_item'+str(sort)]=result_item\r\n\r\n\r\n\r\n@step(\"判断搜索结果页的数据与搜索词一致,flag=\")\r\ndef verificat_page_result(serach_words,flag):\r\n result_page = Result_page()\r\n if flag==1 and result_page.host == 'http://sx.api.mengtuiapp.com':\r\n result_item=data_store.suite['result_item_default']\r\n for goods_name in [item['goods']['goods_name'] for item in result_item]:\r\n assert goods_name.find(serach_words) >= 0\r\n\r\n\r\n@step(\"判断根据排序方式搜索结果不同\")\r\ndef verificat_sort_result():\r\n result_default = data_store.suite['result_item_default']\r\n result_sales = data_store.suite['result_item_sales']\r\n if len(result_default)>0 and len(result_sales)>0:\r\n assert [item['goods']['goods_id'] for item in result_default]!=[item['goods']['sales'] for item in result_sales]\r\n\r\n@step(\"判断查询结果在同一个类目中\")\r\ndef verificat_goods_type():\r\n result_sales= data_store.suite['result_item_sales']\r\n for goods_type in [item['search_goods_type'] for item in result_sales]:\r\n assert goods_type==0\r\n\r\n#测试环境的排序是错的,所以此条case通不过\r\n@step(\"判断按照销量进行排序\")\r\ndef verificat_sales_sort():\r\n result_page = Result_page()\r\n if result_page.host != 'http://sx.api.mengtuiapp.com':\r\n result_sales = data_store.suite['result_item_sales']\r\n result_sales=[item['goods']['sales'] for item in result_sales]\r\n sales_sort=sorted(result_sales,reverse=True)\r\n assert result_sales==sales_sort\r\n\r\n#测试环境的排序是错的,所以此条case通不过\r\n@step(\"判断搜索结果按照价格顺序排列,sort=\")\r\ndef verificat_price_sort(sort):\r\n result_page=Result_page()\r\n if result_page.host!='http://sx.api.mengtuiapp.com':\r\n if sort==\"price\":\r\n ascending_price = [item['goods']['show_price'] for item in data_store.suite['result_itemprice']]\r\n assert ascending_price==sorted(ascending_price)\r\n else:\r\n descending_price = [item['goods']['show_price'] for item in data_store.suite['result_item_price']]\r\n assert descending_price == sorted(descending_price,reverse=True)\r\n\r\n@step(\"判断商品的价格在价格区间内\")\r\ndef verificat_price():\r\n result_page = Result_page()\r\n if result_page.host != 'http://sx.api.mengtuiapp.com':\r\n price_result = [item['goods']['show_price'] for item in data_store.suite['result_item_default']]\r\n assert min(price_result)>=10\r\n assert max(price_result)<=200\r\n\r\n\r\n@step(\"热搜榜接口\")\r\ndef rank():\r\n rank = Rank()\r\n rank.request()\r\n print(rank.resp.json())\r\n\r\n\r\n@step(\"搜索接口scene_page, search_keyword=\")\r\ndef scene_page(search_keyword):\r\n search_keyword = urllib.parse.quote(search_keyword)\r\n scene_page = Scene_page()\r\n scene_page.api = scene_page.api.replace('$search_keyword', search_keyword)\r\n scene_page.request()\r\n print(scene_page.resp.json())\r\n\r\n\r\n@step(\"搜索接口stage, search_keyword=\")\r\ndef stage(search_keyword):\r\n search_keyword = urllib.parse.quote(search_keyword)\r\n stage = Stage()\r\n stage.api = stage.api.replace('$search_keyword', search_keyword)\r\n stage.request()\r\n print(stage.resp.json())\r\n\r\n\r\n@step(\"获得搜索中间页的数据\")\r\ndef words():\r\n words = Words()\r\n resp = words.request()\r\n assert resp['code'] == words.success_resp['code']\r\n hot_words = resp['data']['hot_words']\r\n activity_words = resp['data']['activity_words']\r\n assert hot_words != ''\r\n assert activity_words != ''\r\n data_store.suite['hot_words'] = hot_words\r\n data_store.suite['activity_words'] = activity_words\r\n\r\n\r\n@step(\"获取搜索中间页的“搜索发现”数据并判断\")\r\ndef check_search_words():\r\n words = Words()\r\n hot_words = data_store.suite['hot_words']\r\n activity_words = data_store.suite['activity_words']\r\n ##判断对应词的style,以项目中的配置为准\r\n for item in hot_words:\r\n if item[\"word\"] in words.words.keys():\r\n assert str(item[\"style\"])==str(words.words[item[\"word\"]])\r\n # ##判断对应词的link,以项目的配置为准\r\n for item in activity_words:\r\n if item[\"word\"] in words.links.keys():\r\n assert str(item[\"link\"]) == str(words.links[item[\"word\"]])\r\n\r\n@step(\"判断搜索结果按照排序为,服务为萌推好店,类别为付费会员的结果\")\r\ndef verificat_filter(sort):\r\n #会员是搜索部门给的结果,无法通过字段判断\r\n filter_result=data_store.suite['result_item'+str(sort)]\r\n for goods in [item['goods']['mall']['labels'][0] for item in filter_result]:\r\n assert goods[\"type\"]==\"nice_mall\"\r\n if sort==\"_price\":\r\n descending_price = [item['goods']['show_price'] for item in filter_result]\r\n assert descending_price == sorted(descending_price, reverse=True)\r\n if sort==\"_sales\":\r\n result_sales = [item['goods']['sales'] for item in filter_result]\r\n assert result_sales==sorted(result_sales,reverse=False)\r\n\r\n\r\n@step(\"获得搜索店铺中间页的数据,mall_id=\")\r\ndef middle_page_result(mall_id):\r\n mall_middle_page=Mall_middle_page()\r\n mall_middle_page.api.replace(\"mallId\",mall_id)\r\n resp=mall_middle_page.request()\r\n #print(resp)\r\n assert resp['code']==mall_middle_page.success_resp['code']\r\n data_store.suite['mall_search_data']=resp['data']\r\n\r\n@step(\"判断搜索发现中的数据是否完整\")\r\ndef verificat_search_data():\r\n mall_middle_page = Mall_middle_page()\r\n assert mall_middle_page.word.sort()==data_store.suite['mall_search_data'].sort()\r\n\r\n@step(\"判断搜索结果页的商品只是本店的商品\")\r\ndef verificat_same_shop(mall_id_1):\r\n result_default = data_store.suite['result_item_default']\r\n for mall_id in [item['goods']['mall_id'] for item in result_default]:\r\n assert str(mall_id)==str(mall_id_1)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"banshee-master/step_impl/mt/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":9165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"294198370","text":"from lxml import html\nimport requests\nfrom requests_html import HTMLSession\nimport json\nimport time\nimport logging\nimport os\n\n# Appends flights ICAO present in coordinate space to list\ndef flights_in_zone():\n\n\ttop_left = (47.541779, -122.346872)\n\tbottom_left = (47.538762, -122.346872)\n\ttop_right = (47.541779, -122.278043)\n\tbottom_right = (47.538762,-122.278043)\n\n\tflights_in_zone = []\n\n\tr = requests.get('http://10.0.0.199/dump1090-fa/data/aircraft.json')\n\tflights_json = json.loads(r.content)\n\tfor flight in flights_json['aircraft']:\n\t\tif 'lat' in flight and 'lon' in flight:\n\t\t\tif flight['lon'] > top_left[1] and flight['lon'] < top_right[1]:\n\t\t\t\tif flight['lat'] < top_left[0] and flight['lat'] > bottom_right[0]:\n\t\t\t\t\tflights_in_zone.append(flight['hex'])\n\n\treturn flights_in_zone\n\n\n# Scrapes flight info from FlightAware using ICAO\ndef get_flight_info(icao):\n\n\tsession = HTMLSession()\n\tr = session.get('https://flightaware.com/live/modes/'+icao+'/redirect')\n\tprint(r)\n\n\tr.html.render()\n\n\t#file = open(\"output.html\", \"w\")\n\t#file.write(r.html.html)\n\t#file.close()\n\t\n\ttree = html.fromstring(r.html.html)\n\n\tsource = tree.xpath('//*[@id=\"flightPageTourStep1\"]/div[1]/div[1]/span[2]/text()')\n\tdestination = tree.xpath('//*[@id=\"flightPageTourStep1\"]/div[1]/div[2]/span[2]/span/text()')\n\taircraft = tree.xpath('//*[@id=\"slideOutPanel\"]/div[1]/div[2]/div[4]/div[8]/div[1]/div/div[1]/div[2]/text()')\n\tflight_ident = tree.xpath('//*[@id=\"slideOutPanel\"]/div/div/div[3]/div[1]/div[1]/div[1]/div[2]/div[1]/div[1]/text()')\n\n\tcleaned_aircraft = clean_text(aircraft[0])\n\tcleaned_source = clean_text(source[0])\n\tcleaned_destination = clean_text(destination[0])\n\tcleaned_ident = clean_text(flight_ident[0])\n\n\tlogging.info('%s %s %s %s',cleaned_aircraft,cleaned_source,cleaned_destination,cleaned_ident)\n\n\tprint(cleaned_ident)\n\tprint(cleaned_source)\n\tprint(cleaned_destination)\n\tprint(cleaned_aircraft)\n\tprint('\\n')\n\ttime.sleep(1)\n\n\n# Cleans flight information strings\ndef clean_text(text):\n\n\tcleaned_text = text.replace('\\n', '')\n\tcleaned_text = cleaned_text.replace('\\t', '')\n\n\topen_paren = cleaned_text.find('(')\n\tclose_paren = cleaned_text.find(')')\n\tif open_paren>0 and close_paren>0:\n\t\tcleaned_text = cleaned_text[0:open_paren]+cleaned_text[close_paren+1:len(cleaned_text)-1]\n\treturn cleaned_text\n\n\ndef main():\n\n\n\t# logging setup\n\tdirpath = os.path.dirname(os.path.realpath(__file__))\n\tlogname = 'flights.log'\n\tlogfile = os.path.join(dirpath, logname)\n\tlogging.basicConfig(filename=logfile, filemode='w',level=logging.INFO, format='%(asctime)s - %(message)s')\n\t\n\t#logging.info('accd69')\n\t#get_flight_info('accd69')\n\n\t\n\twhile(True):\n\t\tflights = flights_in_zone()\n\t\n\t\tif flights:\n\t\t\tfor flight in flights:\n\t\t\t\tlogging.info(flight)\n\t\t\t\tget_flight_info(flight)\n\t\ttime.sleep(1)\n\nif __name__=='__main__':\n\tmain()","sub_path":"get_flight_info.py","file_name":"get_flight_info.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"644250396","text":"from chalice import Chalice\nfrom chalicelib.todo import *\n\napp = Chalice(app_name=\"recurrist\")\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return {'app': 'Recurrist v1'}\n\n\n@app.route('/', methods=['POST'])\ndef create_task():\n request = app.current_request\n body = request.json_body\n\n add_task(body['name'], body['project'], body['labels'], body.get('due', None))\n\n return {'done': 'ok'}\n\n","sub_path":"python-recurrist/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"379116088","text":"common_words = [\n\"de\",\n\"que\",\n\"no\",\n\"a\",\n\"la\",\n\"el\",\n\"es\",\n\"y\",\n\"en\",\n\"lo\",\n\"un\",\n\"por\",\n\"me\",\n\"qué\",\n\"una\",\n\"te\",\n\"los\",\n\"se\",\n\"con\",\n\"para\",\n\"mi\",\n\"está\",\n\"si\",\n\"pero\",\n\"las\",\n\"su\",\n\"yo\",\n\"tu\",\n\"del\",\n\"al\",\n\"como\",\n\"le\",\n\"eso\",\n\"sí\",\n\"esta\",\n\"ya\",\n\"más\",\n\"muy\",\n\"hay\",\n\"bien\",\n\"estoy\",\n\"todo\",\n\"nos\",\n\"tengo\",\n\"ha\",\n\"este\",\n\"cuando\",\n\"sólo\",\n\"vamos\",\n\"cómo\",\n\"estás\",\n\"o\",\n\"soy\",\n\"puedo\",\n\"esto\",\n\"quiero\",\n\"aquí\",\n\"tiene\",\n\"tú\",\n\"ahora\",\n\"algo\",\n\"fue\",\n\"son\",\n\"ser\",\n\"he\",\n\"era\",\n\"eres\",\n\"así\",\n\"sé\",\n\"tienes\",\n\"ese\",\n\"bueno\",\n\"creo\",\n\"todos\",\n\"sus\",\n\"puede\",\n\"voy\",\n\"tan\",\n\"esa\",\n\"porque\",\n\"dónde\",\n\"hacer\",\n\"quién\",\n\"nunca\",\n\"nada\",\n\"él\",\n\"estaba\",\n\"están\",\n\"quieres\",\n\"va\",\n\"sabes\",\n\"vez\",\n\"hace\",\n\"ella\",\n\"dos\",\n\"tenemos\",\n\"puedes\",\n\"sin\",\n\"hasta\",\n\"sr\",\n\"oh\",\n\"entonces\",\n\"dios\",\n\"sobre\",\n\"usted\",\n\"ni\",\n\"has\",\n\"mis\",\n\"mejor\",\n\"mucho\",\n\"gracias\",\n\"ver\",\n\"hola\",\n\"solo\",\n\"estar\",\n\"les\",\n\"estamos\",\n\"siento\",\n\"alguien\",\n\"otra\",\n\"siempre\",\n\"hombre\",\n\"uno\",\n\"donde\",\n\"quiere\",\n\"parece\",\n\"antes\",\n\"ir\",\n\"mira\",\n\"vas\",\n\"tal\",\n\"tus\",\n\"decir\",\n\"han\",\n\"ahí\",\n\"poco\",\n\"estas\",\n\"nadie\",\n\"desde\",\n\"sea\",\n\"también\",\n\"tiempo\",\n\"gran\",\n\"dijo\",\n\"favor\",\n\"podemos\",\n\"casa\",\n\"gente\",\n\"cosas\",\n\"otro\",\n\"día\",\n\"buen\",\n\"podría\",\n\"debe\",\n\"necesito\",\n\"había\",\n\"después\",\n\"será\",\n\"hecho\",\n\"señor\",\n\"nuestro\",\n\"vida\",\n\"tener\",\n\"sabe\",\n\"quien\",\n\"tenía\",\n\"hablar\",\n\"buena\",\n\"ellos\",\n\"dije\",\n\"toda\",\n\"tipo\",\n\"fuera\",\n\"estado\",\n\"crees\",\n\"ven\",\n\"sido\",\n\"todas\",\n\"tienen\",\n\"menos\",\n\"dice\",\n\"haber\",\n\"tres\",\n\"cada\",\n\"padre\",\n\"años\",\n\"hijo\",\n\"nosotros\",\n\"claro\",\n\"aún\",\n\"seguro\",\n\"nuestra\",\n\"gusta\",\n\"espera\",\n\"ve\",\n\"trabajo\",\n\"lugar\",\n\"verdad\",\n\"pasa\",\n\"debo\",\n\"alguna\",\n\"quería\",\n\"unos\",\n\"esos\",\n\"luego\",\n\"mas\",\n\"oye\",\n\"quizá\",\n\"van\",\n\"mí\",\n\"hemos\",\n\"ti\",\n\"estos\",\n\"mismo\",\n\"algún\",\n\"pueden\",\n\"noche\",\n\"mundo\",\n\"visto\",\n\"cosa\",\n\"nombre\",\n\"realmente\",\n\"entre\",\n\"buenas\",\n\"ah\",\n\"somos\",\n\"veo\",\n\"hizo\",\n\"esas\",\n\"parte\",\n\"pues\",\n\"saber\",\n\"haciendo\",\n\"debes\",\n\"muchas\",\n\"casi\",\n\"amigo\",\n\"hora\",\n\"quizás\",\n\"dinero\",\n\"hacia\",\n\"haces\",\n\"demasiado\",\n\"ud\",\n\"pensé\",\n\"madre\",\n\"deja\",\n\"hoy\",\n\"veces\",\n\"debería\",\n\"contra\",\n\"primera\",\n\"mientras\",\n\"cualquier\",\n\"nuevo\",\n\"todavía\",\n\"debemos\",\n\"ustedes\",\n\"mal\",\n\"hombres\",\n\"tanto\",\n\"salir\",\n\"digo\",\n\"mujer\",\n\"nueva\",\n\"momento\",\n\"sería\",\n\"papá\",\n\"ves\",\n\"da\",\n\"hey\",\n\"importa\",\n\"cuánto\",\n\"días\",\n\"cuál\",\n\"espero\",\n\"mañana\",\n\"dame\",\n\"vaya\",\n\"iba\",\n\"mamá\",\n\"vi\",\n\"déjame\",\n\"sabía\",\n\"ningún\",\n\"buenos\",\n\"dr\",\n\"durante\",\n\"mierda\",\n\"forma\",\n\"allí\",\n\"hice\",\n\"único\",\n\"sigue\",\n\"bajo\",\n\"poder\",\n\"acuerdo\",\n\"volver\",\n\"nuestros\",\n\"haya\",\n\"supongo\",\n\"toma\",\n\"siquiera\",\n\"quieren\",\n\"chica\",\n\"dejar\",\n\"aunque\",\n\"unas\",\n\"idea\",\n\"ay\",\n\"dicho\",\n\"necesitamos\",\n\"viene\",\n\"allá\",\n\"ninguna\",\n\"significa\",\n\"hubiera\",\n\"algunos\",\n\"tomar\",\n\"sra\",\n\"dentro\",\n\"primer\",\n\"llama\",\n\"gustaría\",\n\"e\",\n\"dijiste\",\n\"diablos\",\n\"fin\",\n\"pasó\",\n\"maldita\",\n\"deberías\",\n\"muchos\",\n\"cree\",\n\"habla\",\n\"fui\",\n\"necesita\",\n\"policía\",\n\"última\",\n\"vete\",\n\"lado\",\n\"maldito\",\n\"otros\",\n\"razón\",\n\"primero\",\n\"cuenta\",\n\"amor\",\n\"justo\",\n\"chico\",\n\"llegar\",\n\"cuándo\",\n\"dime\",\n\"ok\",\n\"cinco\",\n\"dar\",\n\"algunas\",\n\"vale\",\n\"haré\",\n\"problema\",\n\"tuve\",\n\"dicen\",\n\"pasar\",\n\"mío\",\n\"familia\",\n\"viejo\",\n\"hago\",\n\"escucha\",\n\"pueda\",\n\"esté\",\n\"año\",\n\"pasado\",\n\"cuidado\",\n\"podía\",\n\"hacerlo\",\n\"eran\",\n\"personas\",\n\"tarde\",\n\"saben\",\n\"caso\",\n\"adónde\",\n\"venga\",\n\"fueron\",\n\"cuanto\",\n\"cerca\",\n\"pequeño\",\n\"estuvo\",\n\"necesitas\",\n\"hiciste\",\n\"manos\",\n\"estabas\",\n\"encontrar\",\n\"queda\",\n\"chicos\",\n\"estará\",\n\"historia\",\n\"número\",\n\"suerte\",\n\"cual\",\n\"feliz\",\n\"pronto\",\n\"amigos\",\n\"eh\",\n\"par\",\n\"igual\",\n\"venir\",\n\"cuatro\",\n\"clase\",\n\"trata\",\n\"iré\",\n\"capitán\",\n\"cierto\",\n\"hablando\",\n\"srta\",\n\"hagas\",\n\"hermano\",\n\"dices\",\n\"éste\",\n\"manera\",\n\"hacen\",\n\"puerta\",\n\"os\",\n\"bastante\",\n\"ciudad\",\n\"matar\",\n\"esposa\",\n\"nuestras\",\n\"única\",\n\"aqui\",\n\"camino\",\n\"cabeza\",\n\"tenido\",\n\"podrías\",\n\"dio\",\n\"tenga\",\n\"conozco\",\n\"niños\",\n\"mucha\",\n\"di\",\n\"creer\",\n\"niño\",\n\"cariño\",\n\"vino\",\n\"jefe\",\n\"miedo\",\n\"medio\",\n\"equipo\",\n\"habrá\",\n\"entrar\",\n\"deben\",\n\"muerte\",\n\"acerca\",\n\"último\",\n\"punto\",\n\"vivir\",\n\"haga\",\n\"misma\",\n\"realidad\",\n\"lleva\",\n\"guerra\",\n\"persona\",\n\"pienso\",\n\"sangre\",\n\"conmigo\",\n\"dile\",\n\"rápido\",\n\"seguir\",\n\"pensar\",\n\"seis\",\n\"además\",\n\"listo\",\n\"adiós\",\n\"entiendo\",\n\"ojos\",\n\"minutos\",\n\"mayor\",\n\"semana\",\n\"agua\",\n\"sabemos\",\n\"mano\",\n\"hija\",\n\"habría\",\n\"haz\",\n\"ayuda\",\n\"tío\",\n\"doctor\",\n\"auto\",\n\"diré\",\n\"asi\",\n\"ésta\",\n\"jamás\",\n\"incluso\",\n\"difícil\",\n\"noches\",\n\"contigo\",\n\"estaban\",\n\"paso\",\n\"tuvo\",\n\"estaré\",\n\"horas\",\n\"juego\",\n\"problemas\",\n\"piensa\",\n\"cuerpo\",\n\"quisiera\",\n\"queremos\",\n\"llamo\",\n\"demonios\",\n\"señora\",\n\"agente\",\n\"grande\",\n\"comida\",\n\"buscar\",\n\"digas\",\n\"muerto\",\n\"mil\",\n\"cara\",\n\"falta\",\n\"estuve\",\n\"acaba\",\n\"otras\",\n\"probablemente\",\n\"detrás\",\n\"vuelve\",\n\"tampoco\",\n\"pequeña\",\n\"sino\",\n\"siendo\",\n\"comer\",\n\"deberíamos\",\n\"recuerdo\",\n\"mala\",\n\"seas\",\n\"trabajar\",\n\"segundo\",\n\"final\",\n\"vemos\",\n\"mujeres\",\n\"jugar\",\n\"quédate\",\n\"poner\",\n\"amo\",\n\"importante\",\n\"dado\",\n\"odio\",\n\"creí\",\n\"suena\",\n\"buscando\",\n\"suficiente\",\n\"usar\",\n\"supuesto\",\n\"serio\",\n\"miren\",\n\"luz\",\n\"murió\",\n\"loco\",\n\"corazón\",\n\"piensas\",\n\"llamar\",\n\"acabo\",\n\"viste\",\n\"tierra\",\n\"diez\",\n\"resto\",\n\"vuelta\",\n\"tras\",\n\"hará\",\n\"alto\",\n\"culpa\",\n\"lamento\",\n\"preocupes\",\n\"arriba\",\n\"conoces\",\n\"fiesta\",\n\"morir\",\n\"joven\",\n\"ése\",\n\"esperar\",\n\"cuarto\",\n\"eras\",\n\"mire\",\n\"fácil\",\n\"tratando\",\n\"vine\",\n\"llamado\",\n\"perder\",\n\"sal\",\n\"chicas\",\n\"basta\",\n\"pregunta\",\n\"llegó\",\n\"haría\",\n\"supone\",\n\"ten\",\n\"trato\",\n\"recuerda\",\n\"padres\",\n\"podríamos\",\n\"oído\",\n\"rey\",\n\"grandes\",\n\"pensando\",\n\"hacemos\",\n\"hijos\",\n\"esperando\",\n\"tenías\",\n\"exactamente\",\n\"das\",\n\"armas\",\n\"pudo\",\n\"perro\",\n\"propia\",\n\"temo\",\n\"adelante\",\n\"lista\",\n\"próxima\",\n\"llevar\",\n\"abre\",\n\"fuerte\",\n\"lejos\",\n\"puta\",\n\"peor\",\n\"general\",\n\"anda\",\n\"darle\",\n\"frente\",\n\"meses\",\n\"fuiste\",\n\"gusto\",\n\"uds\",\n\"hermana\",\n\"haremos\",\n\"recuerdas\",\n\"millones\",\n\"diga\",\n\"dejó\",\n\"pobre\",\n\"ido\",\n\"vio\",\n\"estaría\",\n\"arma\",\n\"hubo\",\n\"llevo\",\n\"palabra\",\n\"tendrá\",\n\"escuela\",\n\"seguridad\",\n\"sistema\",\n\"tendrás\",\n\"disculpe\",\n\"asunto\",\n\"diciendo\",\n\"nave\",\n\"aun\",\n\"plan\",\n\"estan\",\n\"traje\",\n\"sentido\",\n\"control\",\n\"siente\",\n\"sientes\",\n\"vive\",\n\"cambiar\",\n\"vivo\",\n\"trae\",\n\"pude\",\n\"decirte\",\n\"presidente\",\n\"grupo\",\n\"genial\",\n\"simplemente\",\n\"encontré\",\n\"malo\",\n\"sean\",\n\"venido\",\n\"tengas\",\n\"dale\",\n\"prueba\",\n\"propio\",\n\"tendré\",\n\"mató\",\n\"sola\",\n\"pasando\",\n\"doy\",\n\"juntos\",\n\"sentir\",\n\"perdido\",\n\"oportunidad\",\n\"gustan\",\n\"conseguir\",\n\"pueblo\",\n\"daré\",\n\"verte\",\n\"dijeron\",\n\"sale\",\n\"veras\",\n\"conoce\",\n\"puesto\",\n\"deje\",\n\"orden\",\n\"cambio\",\n\"perdón\",\n\"tendremos\",\n\"atrás\",\n\"través\",\n\"mitad\",\n\"fuerza\",\n\"dan\",\n\"ante\",\n\"carajo\",\n\"hablo\",\n\"espere\",\n\"libro\",\n\"ropa\",\n\"veré\",\n\"querido\",\n\"abajo\",\n\"campo\",\n\"decirle\",\n\"segura\",\n\"busca\",\n\"según\",\n\"derecho\",\n\"calle\",\n\"salió\",\n\"hablas\",\n\"cuántos\",\n\"cállate\",\n\"encanta\",\n\"ayudar\",\n\"junto\",\n\"asesino\",\n\"centro\",\n\"creía\",\n\"pagar\",\n\"vienen\",\n\"quise\",\n\"viaje\",\n\"estés\",\n\"porqué\",\n\"oficial\",\n\"posible\",\n\"oí\",\n\"acá\",\n\"baja\",\n\"estábamos\",\n\"mejores\",\n\"oficina\",\n\"hicieron\",\n\"tipos\",\n\"pase\",\n\"hacía\",\n\"pensaba\",\n\"creen\",\n\"modo\",\n\"entra\",\n\"dormir\",\n\"media\",\n\"pudiera\",\n\"seré\",\n\"salvar\",\n\"profesor\",\n\"excepto\",\n\"fuego\",\n\"ojalá\",\n\"largo\",\n\"línea\",\n\"verdadero\",\n\"siete\",\n\"ganar\",\n\"james\",\n\"sitio\",\n\"comprar\",\n\"ejército\",\n\"encuentra\",\n\"país\",\n\"extraño\",\n\"niña\",\n\"llega\",\n\"dólares\",\n\"placer\",\n\"decirme\",\n\"san\",\n\"dolor\",\n\"sala\",\n\"caballeros\",\n\"segunda\",\n\"ésa\",\n\"usa\",\n\"sueño\",\n\"iremos\",\n\"completamente\",\n\"totalmente\",\n\"coche\",\n\"afuera\",\n\"encima\",\n\"diría\",\n\"vuelto\",\n\"llamada\",\n\"estarás\",\n\"querida\",\n\"volveré\",\n\"señorita\",\n\"llegado\",\n\"querías\",\n\"bebé\",\n\"hazlo\",\n\"regresar\",\n\"bienvenido\",\n\"empezar\",\n\"terminar\",\n\"teléfono\",\n\"estaremos\",\n\"compañía\",\n\"necesitan\",\n\"pena\",\n\"puso\",\n\"serán\",\n\"sacar\",\n\"ataque\",\n\"apenas\",\n\"caja\",\n\"tome\",\n\"sucede\",\n\"pone\",\n\"serás\",\n\"cielo\",\n\"podrían\",\n\"dejes\",\n\"hagan\",\n\"información\",\n\"programa\",\n\"duro\",\n\"acabó\",\n\"trabajando\",\n\"palabras\",\n\"negocio\",\n\"ambos\",\n\"trabaja\",\n\"noticias\",\n\"foto\",\n\"veamos\",\n\"decía\",\n\"cierra\",\n\"lindo\",\n\"oír\",\n\"quedan\",\n\"paz\",\n\"vayan\",\n\"quiera\",\n\"hospital\",\n\"acaso\",\n\"deseo\",\n\"pregunto\",\n\"servicio\",\n\"aire\",\n\"tranquilo\",\n\"semanas\",\n\"avión\",\n\"estados\",\n\"mente\",\n\"alrededor\",\n\"voz\",\n\"libre\",\n\"muchachos\",\n\"irme\",\n\"apuesto\",\n\"sigues\",\n\"especial\",\n\"dieron\",\n\"café\",\n\"mensaje\",\n\"anoche\",\n\"juro\",\n\"dejé\",\n\"debajo\",\n\"encontramos\",\n\"tenia\",\n\"santo\",\n\"maestro\",\n\"minuto\",\n\"fotos\",\n\"hayas\",\n\"estuviste\",\n\"habitación\",\n\"pareces\",\n\"alegro\",\n\"dejado\",\n\"darme\",\n\"respuesta\",\n\"hicimos\",\n\"música\",\n\"intento\",\n\"parecía\",\n\"dejaré\",\n\"sexo\",\n\"marido\",\n\"vengan\",\n\"darte\",\n\"ocurre\",\n\"listos\",\n\"habían\",\n\"esperen\",\n\"señal\",\n\"futuro\",\n\"permiso\",\n\"situación\",\n\"edad\",\n\"atención\",\n\"aquel\",\n\"debí\",\n\"cama\",\n\"vuelva\",\n\"amiga\",\n\"demás\",\n\"damas\",\n\"debía\",\n\"hacerte\",\n\"puedas\",\n\"club\",\n\"vámonos\",\n\"tendría\",\n\"esperaba\",\n\"llaman\",\n\"ninguno\",\n\"alegra\",\n\"nivel\",\n\"blanco\",\n\"negro\",\n\"tonto\",\n\"mantener\",\n\"estén\",\n\"causa\",\n\"vieja\",\n\"viendo\",\n\"linda\",\n\"déjeme\",\n\"tuviste\",\n\"raro\",\n\"boca\",\n\"ahi\",\n\"finalmente\",\n\"película\",\n\"salga\",\n\"preguntas\",\n\"llamó\",\n\"base\",\n\"tuvimos\",\n\"luna\",\n\"habías\",\n\"ocho\",\n\"mayoría\",\n\"vienes\",\n\"pido\",\n\"tenían\",\n\"llevó\",\n\"verás\",\n\"volvió\",\n\"solía\",\n\"daño\",\n\"tengan\",\n\"pon\",\n\"perdí\",\n\"llamaré\",\n\"empieza\",\n\"conocer\",\n\"estuviera\",\n\"ley\",\n\"quieras\",\n\"acabar\",\n\"podrá\",\n\"dra.\",\n\"dejo\",\n\"pelo\",\n\"gobierno\",\n\"pie\",\n\"corre\",\n\"últimos\",\n\"abrir\",\n\"siguiente\",\n\"misión\",\n\"llevas\",\n\"regreso\",\n\"existe\",\n\"quienes\",\n\"miles\",\n\"tanta\",\n\"encontrado\",\n\"secreto\",\n\"bonito\",\n\"montón\",\n\"hacerle\",\n\"barco\",\n\"entiendes\",\n\"maldición\",\n\"teniente\",\n\"carne\",\n\"irá\",\n\"real\",\n\"don\",\n\"regalo\",\n\"fuimos\"\n]","sub_path":"WhatsApp-Analyzer-master/es_cw.py","file_name":"es_cw.py","file_ext":"py","file_size_in_byte":9551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"37641085","text":"#!D:\\pythonWorkspaces\\venv\\Scripts\\python.exe\n# EASY-INSTALL-ENTRY-SCRIPT: 'sqlcmd==0.7.1','console_scripts','sqlcmd'\n__requires__ = 'sqlcmd==0.7.1'\nimport sys\nfrom pkg_resources import load_entry_point\n\nif __name__ == '__main__':\n sys.exit(\n load_entry_point('sqlcmd==0.7.1', 'console_scripts', 'sqlcmd')()\n )\n","sub_path":"venv/Scripts/sqlcmd-script.py","file_name":"sqlcmd-script.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"285604177","text":"class Rule:\r\n\r\n def __init__(self):\r\n self.conds = []\r\n self.cons = []\r\n\r\n def addCondition(self, cond):\r\n self.conds.append(cond)\r\n\r\n def addConsequence(self, cons):\r\n self.cons.append(cons)\r\n\r\n def satisfy(self, conds):\r\n if not conds:\r\n return False\r\n temprules = self.conds[:]\r\n instance = conds[0].split('(')[1].split(')')[0]\r\n for i in range(len(temprules)):\r\n splitted = temprules[i].split('(')\r\n temprules[i] = splitted[0] + '(' + instance + ')'\r\n for c in temprules:\r\n if c not in conds:\r\n return False\r\n return True\r\n\r\n def getConsequenceInstance(self, facts):\r\n if not facts:\r\n return []\r\n\r\n tempcons = self.cons[:]\r\n instance = facts[0].split('(')[1].split(')')[0]\r\n for i in range(len(tempcons)):\r\n tempcons[i] = tempcons[i].split('(')[0] + '(' + instance + ')'\r\n return tempcons\r\n\r\n\r\ndef loadRules():\r\n rules = []\r\n rulein = input()\r\n while rulein:\r\n rulein = rulein.replace(\"if\", \"\")\r\n condcons = rulein.split('then')\r\n conds = condcons[0].split('and')\r\n rule = Rule()\r\n for c in conds:\r\n c = c.strip()\r\n rule.addCondition(c)\r\n cons = condcons[1].split('and')\r\n for c in cons:\r\n c = c.strip()\r\n rule.addConsequence(c)\r\n rules.append(rule)\r\n rulein = input()\r\n return rules\r\n\r\ndef loadGeneric():\r\n goals = []\r\n goal = input()\r\n while goal:\r\n goals.append(goal)\r\n goal = input()\r\n return goals\r\n\r\ndef consNotInFacts(cons, facts):\r\n for c in cons:\r\n if c not in facts:\r\n return True\r\n return False\r\n\r\n\r\ndef tryHard(rules, goals, facts):\r\n while True:\r\n change = False\r\n for r in rules:\r\n if consNotInFacts(r.getConsequenceInstance(facts), facts) and r.satisfy(facts):\r\n newfacts = r.getConsequenceInstance(facts)\r\n facts += newfacts\r\n print('We now know new facts:', *newfacts)\r\n change = True\r\n if not change:\r\n break\r\n for g in goals:\r\n if g in facts:\r\n print(\"Goal\", g, \": OK\")\r\n else:\r\n print(\"Goal\", g, \": FAIL\")\r\n\r\n\r\n\r\n\r\n\r\n\r\nprint(\"Please enter the rules (possible formats are described in the README). When you are finished, press ENTER two times:\")\r\nrules = loadRules()\r\nprint(\"Please enter the facts:\")\r\nfacts = loadGeneric()\r\nprint(\"Now enter the goals:\")\r\ngoals = loadGeneric()\r\ntryHard(rules, goals, facts)\r\n","sub_path":"Forward/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"419105066","text":"from ui.gongjiancanshu_ui import GongJianCanShu_UI\nfrom PyQt5.QtWidgets import QDialog, QAbstractItemView,QHeaderView,QMessageBox\nfrom PyQt5.QtGui import QDoubleValidator, QStandardItemModel, QStandardItem, QFont\nfrom util.database_util import getAllGongJianCanShu, deleteGongJianCanShu\nfrom PyQt5.QtCore import Qt\nfrom controller.AddGongJianCanShu import AddGongJianCanShu\n\n\nclass GongJianCanShu(QDialog, GongJianCanShu_UI):\n\n def __init__(self, parent=None):\n super(GongJianCanShu, self).__init__(parent)\n self.setupUi(self)\n self.table_click_flag = False\n self.param_list_model = None\n self.table_header = ['工件型号', '压力', '角度', 'Z起钻', 'Z止钻', '钻头转速', '钻速', '相机X', '相机Y', '相机Z', '游丝轴位', '游丝角度', '视觉代码']\n self.initUI()\n self.updateUI()\n\n def initUI(self):\n self.add_btn.clicked.connect(self.add)\n self.del_btn.clicked.connect(self.delete)\n self.refresh_btn.clicked.connect(self.refresh)\n\n def updateUI(self):\n canshu_list = getAllGongJianCanShu()\n self.param_list_model = QStandardItemModel(len(canshu_list), 13)\n self.param_list_model.setHorizontalHeaderLabels(self.table_header)\n\n # 填充表格\n for row in range(len(canshu_list)):\n for col in range(13):\n canshu = canshu_list[row].parser()\n item = QStandardItem(canshu[col])\n item.setTextAlignment(Qt.AlignCenter)\n item.setFont(QFont('Times', 10, QFont.Black))\n #print(str(sensorList[row][col]))\n self.param_list_model.setItem(row, col, item)\n\n self.gongjian_list_view.setModel(self.param_list_model)\n self.gongjian_list_view.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.gongjian_list_view.setSelectionMode(QAbstractItemView.SingleSelection)\n self.gongjian_list_view.horizontalHeader().setSectionsClickable(False)\n self.gongjian_list_view.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.gongjian_list_view.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.gongjian_list_view.clicked.connect(self.tableClicked)\n\n self.table_click_flag = False\n\n def add(self):\n self.addGongJian = AddGongJianCanShu()\n self.addGongJian.show()\n\n\n def delete(self):\n if self.table_click_flag == True:\n index = self.gongjian_list_view.currentIndex()\n param = self.param_list_model.item(index.row(), 0)\n param_name = param.text()\n status = deleteGongJianCanShu(param_name)\n if status == 0:\n QMessageBox.information(self, '删除状态', '删除成功', QMessageBox.Yes)\n self.param_list_model.removeRow(index.row())\n elif status == 1:\n QMessageBox.information(self, '删除状态', '删除失败', QMessageBox.Yes)\n self.table_click_flag = False\n\n def tableClicked(self):\n self.table_click_flag = True\n\n def refresh(self):\n self.updateUI()\n","sub_path":"controller/GongJianCanShu.py","file_name":"GongJianCanShu.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"55868845","text":"import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport re\nimport xlwt\nimport xlrd\nimport os\nimport DateGUI\nimport wx\n\nurl_news = DateGUI.DateGUi.ThisWeekOrOther() #預設網站Yahoo電影上映中電影\n#GET request from url and parse via BeautifulSoup\nresp = requests.get(url_news,timeout=3) #跟URL要求反爬\n#print(resp.text)\nresp.encoding = 'utf-8' #預設編碼UTF-8\nsoup = BeautifulSoup(resp.text,'lxml') #爬網頁code\n#print(soup.text)\nnum_page = 1 #上映中電影第一頁\ntimes_url_links = [] #時刻表URL陣列\nfilename = 'MovieData.xls' #預設Excel檔案名稱\nbook = xlwt.Workbook(encoding='utf-8') #創建並預設Excel編碼\nsheet1 = book.add_sheet(\"Sheet1\", cell_overwrite_ok=True) #表格頁設置名稱Sheet1\nName_DB = [] #電影名稱陣列Databsae\nDate_DB = [] #電影上映日期陣列Database\nTheater_DB = [] #電影院陣列Database\nTaps_DB = [] #模式陣列Database\nTimes_DB = [] #時刻表陣列Database\ndef Read_Excel(row,col):\n try:\n OpenExcel = xlrd.open_workbook(filename=filename) #Open 該Excel檔案\n Find_sheet = OpenExcel.sheet_by_name(\"Sheet1\") #讀表\"Sheet1\"\n Find_rows = Find_sheet.row_values(row) #讀列\n Find_cols = Find_sheet.col_values(col) #讀行\n Find_form = Find_sheet.cell_value(row,col) #讀表格\n except Exception as e: #發生任何例外狀況 回傳空白\n Find_form = \"\"\n return Find_form\n\ndef Excel_Output(filename,row,col,MovieArray):\n name_col = sheet1.col(0) #設定名稱行\n burn_col = sheet1.col(1) #設定上映日期行\n taps_col = sheet1.col(2) #設定模式行\n theater_col = sheet1.col(3) #設定電影院行\n times_col = sheet1.col(4) #設定時刻表行\n Style = xlwt.XFStyle() #隨字串長度改行寬\n first_rowW = ['電影名稱','上映時間','數位/IMAX','電影院','時刻表'] #設定第一列說明列陣列\n for i in range(0,len(first_rowW),1): #說明列陣列寫入第一列\n sheet1.write(0,i,first_rowW[i])\n name_col.width = 256 * 40 #設定名稱行初始寬度\n burn_col.width = 256 * 25 #設定上映日期行初始寬度\n taps_col.width = 256 * 15 #設定模式行初始寬度\n theater_col.width = 256 * 40 #設定電影院行初始寬度\n times_col.width = 256 * 100 #設定時刻表行初始寬度\n if(Read_Excel(row,col) == \"\"): #如讀取表格值=空白\n sheet1.write(row,col,MovieArray) #寫入電影相關事項\n book.save(filename) #儲存Excel檔案\ndef Times_Display(Times_URL):\n Times_url_link = [] #各電影時刻表\n Times_res = requests.get(Times_URL)\n Times_res.encoding = 'utf-8'\n TimesSoup = BeautifulSoup(Times_res.text,'lxml')\n Times_gabtn = TimesSoup.find_all('a',class_='btn_s_time gabtn') #取時刻表html\n for Times_url in Times_gabtn: #將時刻表網址輸入至電影時刻表陣列\n Times_url_link.append(Times_url.get('href')) #取時刻表url至陣列\n return Times_url_link\n\ndef Times_DisplayTaichung():\n Times_url_link = [] #各電影時刻表\n Times_urlList = list(Times_Display(url_news)) #將時刻表轉成List\n for i in range(0,len(Times_urlList),1): #將List[i]拆成i個字\n Times_urlString = list(Times_urlList[i])\n Times_urlString[49] = '?' #將/換成?\n Times_urlTaichung = \"\".join(Times_urlString) + \"&area_id=2\" #將網址換成只顯示台中地區之網址\n Times_url_link.append(Times_urlTaichung) #將轉換後網址塞入陣列\n return Times_url_link\n\ndef Times_DisplayTheater(i):\n TimesFuture = [] #未來時刻表陣列\n Times_res = requests.get(Times_DisplayTaichung()[i])\n Times_res.encoding = 'utf-8'\n TimesSoup = BeautifulSoup(Times_res.text,'lxml')\n Times_Theater = TimesSoup.find_all('li',class_='adds') #取電影院html\n Times_Taps = TimesSoup.find_all('li',class_='taps') #取模式html\n Times_Times = TimesSoup.find_all('li',class_='time _c') #取時刻表html\n regex = re.compile(\"\\d{2}:\\d{2}\") #正則表達式=TT:MM\n if(Times_Theater == []): #判斷台中的電影院是否都沒有時刻表\n Theater_DB.append(\"\") #塞入空白\n Taps_DB.append(\"台中的電影院沒有撥~\") #塞入字串並寫入Excel\n Times_DB.append(\"\") #塞入空白\n #print(\"台中的電影院沒有撥~\")\n else:\n for Times_select in Times_Times: #取時刻表內還沒上演的時間\n Times_Future = Times_select.ul('li',class_='select') #取時刻表內還沒上演的時間\n Times_Future_Re = regex.findall(str(Times_Future)) #將html轉成時間形式TT:MM\n TimesFuture.append(Times_Future_Re) #TT:MM塞入未來時刻表陣列\n for i in range(0,len(Times_Theater),1): \n Times_TheaterString = list(Times_Theater.pop(0).stripped_strings)#電影院list輸出\n Times_TapsString = list(Times_Taps.pop(0).stripped_strings) #模式list輸出\n Theater_DB.append(Times_TheaterString[0]) #塞入電影院名稱\n Taps_DB.append(Times_TapsString[0]) #塞入模式名稱\n Times_DB.append(TimesFuture[i]) #塞入時刻表\n #print(Times_TheaterString[0] + '\\t' + Times_TapsString[0] + '\\t'\n #+str(TimesFuture[i]))#印出電影院+模式+時刻表\napp = wx.App()\nLoadingCount = 0\nLoad_keepGoing = True\nif(Read_Excel(0,1) != \"\"): #檢查是否已有檔案存在\n os.remove(filename) #有則刪除檔案\nprint('Please wait a few minute.....')\nwhile(num_page > 0):\n resp = requests.get(url_news)\n resp.encoding = 'utf-8'\n soup = BeautifulSoup(resp.text,'lxml')\n rows_name = soup.find_all('div',class_='release_movie_name') #取名字html\n rows_date = soup.find_all('div',class_='release_movie_time') #取上映時間html\n progressMax = len(rows_name)\n Loading = wx.ProgressDialog(\"Wait\", \"Please wait a few minute.....\", progressMax,style=wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_AUTO_HIDE)\n for i in range(0,len(rows_name),1): #取每個電影名稱、上映時間、時刻表URL\n col_name = list(rows_name.pop(0).stripped_strings) #名字list輸出\n col_date = list(rows_date.pop(0).stripped_strings) #上映時間list輸出\n Name_DB.append(col_name[0])\n #print(col_name[0] + '\\t' + col_date[0]) #印出名字+上映時間\n print('.')\n LoadingCount = LoadingCount + 1\n wx.Sleep(0.00001)\n Load_keepGoing = Loading.Update(LoadingCount)\n for j in range(0,1,1):\n if(Read_Excel(0,1) == \"\"): #判斷(0,1)是否為空 空則寫入(0,1) 有則寫入電影院全部後一列\n Excel_Output(filename,j + 1,0,col_name[0]) #電影名稱寫入Excel\n Excel_Output(filename,j + 1,1,col_date[0]) #上映日期寫入Excel\n else:\n Excel_Output(filename, len(Theater_DB) + 1,0,col_name[0])#電影名稱寫入Excel\n Excel_Output(filename, len(Theater_DB) + 1,1,col_date[0])#上映日期寫入Excel\n if(len(Times_Display(url_news)) > i): #判斷是否有時刻表\n #print('時刻表 ' + str(Times_DisplayTaichung()[i])) #印出台中時刻表URL\n Times_DisplayTheater(i)\n #print('----------------------------------------------')\n else:\n #print('還沒有時刻表~')\n #print('----------------------------------------------')\n print('.')\n for j in range(0,len(Theater_DB),1):\n Excel_Output(filename,j + 1,2,Taps_DB[j]) #模式寫入Excel\n Excel_Output(filename,j + 1,3,Theater_DB[j]) #電影院寫入Excel\n Excel_Output(filename,j + 1,4,str(Times_DB[j])) #時刻表寫入Excel\n num_page = num_page + 1 #頁碼+1\n if(num_page == 2): #判斷限制頁數(預設限制爬到第1頁)\n num_page = 0 #超過則跳出迴圈\n break\n else:\n if(soup.find_all('li',class_='nexttxt disabled') == []): #判斷是否有下一頁、有則往下、else頁碼=0\n for next_text in soup.find_all('li',class_ = 'nexttxt'): #找li裡面的nexttxt\n for next_url in next_text.find_all('a'): #找li裡面的a\n url_news = next_url.get('href') #找a裡面的href中URL\n times_url_links.clear() #時刻表URL清除\n else:\n num_page = 0\nprint(\"Success!! Please view the Excel(MovieData)!!\")\n","sub_path":"MovieSearch/YahooMovie.py","file_name":"YahooMovie.py","file_ext":"py","file_size_in_byte":11108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"559198504","text":"class Tokenizer():\n def __init__(self):\n self.__romanComparisons = {\n \"M\":7,\n \"D\":6,\n \"C\":5,\n \"L\":4,\n \"X\":3,\n \"V\":2,\n \"I\":1\n }\n\n def generateTokens(self, numeral):\n if numeral.getNumeralType() == \"Arabic\":\n return str(numeral.getRawData())\n\n romanTokens = []\n prefixCount = 0\n data = numeral.getRawData()\n i = 0\n while i < len(data):\n if prefixCount > 1:\n print(\"Double prefixes not allowed\")\n return\n # keep us in range\n if len(data) != i+1:\n if self.__isRomanTokenSmallerThan(data[i], data[i+1]):\n prefixCount = prefixCount+1\n romanTokens.append(data[i]+data[i+1])\n i = i+2\n continue\n\n romanTokens.append(data[i])\n i = i + 1\n \n return romanTokens\n\n def __isRomanTokenSmallerThan(self, r1, r2):\n if self.__romanComparisons[r1] < self.__romanComparisons[r2]:\n return True\n return False\n","sub_path":"py/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"227924888","text":"\nimport pygtk\npygtk.require('2.0')\nimport gtk\nimport numpy as np\n\nfrom channel import Channel\n\nclass registers:\n SPG = 16 # input charge polarity\n SDP = 17 # disable at peak\n SBMX = 18 # route analog monitor to pdo output\n SBFT = 19 # analog output buffers enable tdo\n SBFP = 20 # analog output buffers enable pdo\n SBFM = 21 # analog output buffers enable mo\n SLG = 22 # leakage current disable\n SM = 23 # monitor multiplexing\n SCMX = 29 # monitor multiplexing enable\n SFA = 30 # ART enable\n SFAM = 31 # ART mode\n ST = 32 # peaking time\n SFM = 34 # UNKNOWN\n SG = 35 # gain\n SNG = 38 # neighbor triggering enable\n STOT = 39 # timing outputs control\n STTT = 40 # timing outputs enable\n SSH = 41 # sub-hysteresis discrimination enable\n STC = 42 # TAC slope adjustment\n SDT = 44 # course threshold DAC\n SDP = 54 # test pulse DAC\n SC10b = 65 # 10-bit ADC conversion time\n SC8b = 67 # 8-bit ADC conversion time\n SC6b = 70 # 6-bit ADC conversion time\n S8b = 71 # 8-bit ADC conversion mode\n S6b = 72 # 6-bit ADC conversion enable\n SPDC = 73 # ADCs enable\n SDCKS = 74 # dual clock edge serialized data enable\n SDCKA = 75 # dual clock edge serialized ART enable\n SDCK6b = 76 # dual clock edge serialized 6-bit enable\n SDRV = 77 # tristates analog outputs with token, used in analog mode\n STPP = 78 # timing outputs control 2\n\nclass VMM:\n\n def get_channel_val(self):\n for ch_num in range(64):\n chan_val = self.chan_list[ch_num].get_chan_val()\n for i in range(24):\n self.reg[ch_num][i] = chan_val[i]\n return self.reg\n\n # quick set functions\n def SP_qs_callback(self, widget):\n widget.set_label(\"p\" if widget.get_active() else \"n\")\n\n def quick_set(self, widget):\n\n for chan in self.chan_list:\n\n if self.check_button_SP_qs.get_active():\n chan.button_SP.set_active(True if self.toggle_button_SP.get_active() else False)\n chan.button_SP.set_label(\"p\" if self.toggle_button_SP.get_active() else \"n\")\n\n if self.check_button_SC_qs.get_active(): chan.button_SC.set_active(self.check_button_SC.get_active())\n if self.check_button_SL_qs.get_active(): chan.button_SL.set_active(self.check_button_SL.get_active())\n if self.check_button_ST_qs.get_active(): chan.button_ST.set_active(self.check_button_ST.get_active())\n if self.check_button_SM_qs.get_active(): chan.button_SM.set_active(self.check_button_SM.get_active())\n if self.check_button_SD_qs.get_active(): chan.combo_SD.set_active(self.combo_SD_qs.get_active())\n if self.check_button_SMX_qs.get_active(): chan.button_SMX.set_active(self.check_button_SMX.get_active())\n if self.check_button_SZ10b_qs.get_active(): chan.combo_SZ10b.set_active(self.combo_SZ10b_qs.get_active())\n if self.check_button_SZ8b_qs.get_active(): chan.combo_SZ8b.set_active(self.combo_SZ8b_qs.get_active())\n if self.check_button_SZ6b_qs.get_active(): chan.combo_SZ6b.set_active(self.combo_SZ6b_qs.get_active())\n \n def glob_callback(self, widget, register):\n self.globalreg[register] = 1 if widget.get_active() else 0\n\n def glob_SM_value(self, widget):\n word = '{0:06b}'.format(widget.get_active())\n for bit in xrange(len(word)):\n self.globalreg[registers.SM + bit] = int(word[bit])\n\n def glob_ST_value(self, widget):\n word = '{0:02b}'.format(widget.get_active())\n for bit in xrange(len(word)):\n self.globalreg[registers.ST + bit] = int(word[bit])\n\n def glob_SG_value(self, widget):\n word = '{0:03b}'.format(widget.get_active())\n for bit in xrange(len(word)):\n self.globalreg[registers.SG + bit] = int(word[bit])\n\n def glob_STC_value(self, widget):\n word = '{0:02b}'.format(widget.get_active())\n for bit in xrange(len(word)):\n self.globalreg[registers.STC + bit] = int(word[bit])\n\n def glob_SDT_entry(self, widget, entry):\n value = int(widget.get_text())\n if value < 0 or value > 1023:\n sys.exit(\"SDT value out of range\")\n word = '{0:010b}'.format(value)\n for bit in xrange(len(word)):\n self.globalreg[registers.SDT + bit] = int(word[bit])\n\n def glob_SDP_entry(self, widget, entry):\n value = int(widget.get_text())\n if value < 0 or value > 1023:\n sys.exit(\"SDP value out of range\")\n word = '{0:010b}'.format(value)\n for bit in xrange(len(word)):\n self.globalreg[registers.SDP + bit] = int(word[bit])\n\n def glob_SC10b_value(self, widget):\n word = '{0:02b}'.format(widget.get_active())\n for bit in xrange(len(word)):\n self.globalreg[registers.SC10b - bit] = int(word[bit]) # reversed!\n\n def glob_SC8b_value(self, widget):\n word = '{0:02b}'.format(widget.get_active())\n for bit in xrange(len(word)):\n self.globalreg[registers.SC8b - bit] = int(word[bit]) # reversed!\n\n def glob_SC6b_value(self, widget):\n word = '{0:03b}'.format(widget.get_active())\n for bit in xrange(len(word)):\n self.globalreg[registers.SC6b - bit] = int(word[bit]) # reversed!\n\n def __init__(self):\n self.channel_settings = np.zeros((64, 24), dtype=int)\n self.global_settings = np.zeros((96), dtype=int)\n self.chan_list = []\n self.reg = np.zeros((64, 24), dtype=int)\n self.msg = np.zeros((67), dtype=np.uint32)\n self.globalreg = np.zeros((96), dtype=int)\n \n #%%%%%%%%%%%%%%%%%% VMM WIDGETS %%%%%%%%%%%%%%%%%%%%%\n # 64 CHANNELS \n # CHANNEL LABELS \n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n self.label_channels = gtk.Label(\"Channel Configuration\")\n self.label_channels.set_markup('Channel Configuration')\n self.box_channels = gtk.HBox()\n self.box_channels.pack_start(self.label_channels, expand=False)\n\n self.label_Chan_num_a = gtk.Label(\" \\n \")\n self.label_Chan_SP_a = gtk.Label(\" S \\n P \")\n self.label_Chan_SP_a.set_markup(' S \\n P ')\n self.label_Chan_SC_a = gtk.Label(\"S\\nC\")\n self.label_Chan_SC_a.set_markup(' S \\n C ')\n self.label_Chan_ST_a = gtk.Label(\"S\\nL\")\n self.label_Chan_ST_a.set_markup(' S \\n L')\n self.label_Chan_SL_a = gtk.Label(\"S\\nT\")\n self.label_Chan_SL_a.set_markup(' S \\n T ')\n self.label_Chan_SM_a = gtk.Label(\"S\\nM\")\n self.label_Chan_SM_a.set_markup(' S \\n M ')\n self.label_Chan_SD_a = gtk.Label(\"SD\")\n self.label_Chan_SD_a.set_markup(' SD ')\n self.label_Chan_SMX_a = gtk.Label(\"S\\nM\\nX\")\n self.label_Chan_SMX_a.set_markup(' S \\n M \\n X ')\n self.label_Chan_SZ10b_a = gtk.Label(\"SZ10b\")\n self.label_Chan_SZ10b_a.set_markup(' SZ10b ')\n self.label_Chan_SZ8b_a = gtk.Label(\"SZ8b\")\n self.label_Chan_SZ8b_a.set_markup(' SZ8b ')\n self.label_Chan_SZ6b_a = gtk.Label(\"SZ6b\") \n self.label_Chan_SZ6b_a.set_markup(' SZ6b ')\n\n self.box_chan_labels_a = gtk.HBox()\n self.box_chan_labels_a.pack_start(self.label_Chan_num_a)\n self.box_chan_labels_a.pack_start(self.label_Chan_SP_a)\n self.box_chan_labels_a.pack_start(self.label_Chan_SC_a)\n self.box_chan_labels_a.pack_start(self.label_Chan_ST_a)\n self.box_chan_labels_a.pack_start(self.label_Chan_SL_a)\n self.box_chan_labels_a.pack_start(self.label_Chan_SM_a)\n self.box_chan_labels_a.pack_start(self.label_Chan_SD_a)\n self.box_chan_labels_a.pack_start(self.label_Chan_SMX_a)\n self.box_chan_labels_a.pack_start(self.label_Chan_SZ10b_a)\n self.box_chan_labels_a.pack_start(self.label_Chan_SZ8b_a)\n self.box_chan_labels_a.pack_start(self.label_Chan_SZ6b_a)\n\n self.label_Chan_num_b = gtk.Label(\" \\n \")\n self.label_Chan_SP_b = gtk.Label(\" S \\n P \")\n self.label_Chan_SP_b.set_markup(' S \\n P ')\n self.label_Chan_SC_b = gtk.Label(\" S \\n C \")\n self.label_Chan_SC_b.set_markup(' S \\n C ')\n self.label_Chan_ST_b = gtk.Label(\" S \\n L \")\n self.label_Chan_ST_b.set_markup(' S \\n L ')\n self.label_Chan_SL_b = gtk.Label(\" S \\n T \")\n self.label_Chan_SL_b.set_markup(' S \\n T ')\n self.label_Chan_SM_b = gtk.Label(\"S \\nM \")\n self.label_Chan_SM_b.set_markup(' S \\n M ')\n self.label_Chan_SD_b = gtk.Label(\" SD \")\n self.label_Chan_SD_b.set_markup(' SD ')\n self.label_Chan_SMX_b = gtk.Label(\" S \\n M \\n X \")\n self.label_Chan_SMX_b.set_markup(' S \\n M \\n X ')\n self.label_Chan_SZ10b_b = gtk.Label(\" SZ10b \")\n self.label_Chan_SZ10b_b.set_markup(' SZ10b ')\n self.label_Chan_SZ8b_b = gtk.Label(\" SZ8b \")\n self.label_Chan_SZ8b_b.set_markup(' SZ8b ')\n self.label_Chan_SZ6b_b = gtk.Label(\" SZ6b \") \n self.label_Chan_SZ6b_b.set_markup(' SZ6b ')\n\n self.box_chan_labels_b = gtk.HBox()\n self.box_chan_labels_b.pack_start(self.label_Chan_num_b)\n self.box_chan_labels_b.pack_start(self.label_Chan_SP_b)\n self.box_chan_labels_b.pack_start(self.label_Chan_SC_b)\n self.box_chan_labels_b.pack_start(self.label_Chan_ST_b)\n self.box_chan_labels_b.pack_start(self.label_Chan_SL_b)\n self.box_chan_labels_b.pack_start(self.label_Chan_SM_b)\n self.box_chan_labels_b.pack_start(self.label_Chan_SD_b)\n self.box_chan_labels_b.pack_start(self.label_Chan_SMX_b)\n self.box_chan_labels_b.pack_start(self.label_Chan_SZ10b_b)\n self.box_chan_labels_b.pack_start(self.label_Chan_SZ8b_b)\n self.box_chan_labels_b.pack_start(self.label_Chan_SZ6b_b)\n\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # CHANNEL WIDGETS \n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n for chan_num in range(64):\n self.chan_list.append(Channel(chan_num))\n\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # QUICK SET WIDGETS \n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n self.button_quick_set = gtk.Button(\"QUICK SET\")\n self.button_quick_set.connect(\"clicked\",self.quick_set)\n self.button_quick_set.set_sensitive(True)\n\n self.check_button_SP_qs = gtk.CheckButton()\n self.check_button_SC_qs = gtk.CheckButton()\n self.check_button_SL_qs = gtk.CheckButton()\n self.check_button_ST_qs = gtk.CheckButton()\n self.check_button_SM_qs = gtk.CheckButton()\n self.check_button_SD_qs = gtk.CheckButton()\n self.check_button_SMX_qs = gtk.CheckButton()\n self.check_button_SZ10b_qs = gtk.CheckButton()\n self.check_button_SZ8b_qs = gtk.CheckButton()\n self.check_button_SZ6b_qs = gtk.CheckButton()\n\n self.toggle_button_SP = gtk.ToggleButton(label=\"n\")\n self.toggle_button_SP.connect(\"toggled\",self.SP_qs_callback)\n self.check_button_SC = gtk.CheckButton()\n self.check_button_SL = gtk.CheckButton()\n self.check_button_ST = gtk.CheckButton()\n self.check_button_SM = gtk.CheckButton()\n self.combo_SD_qs = gtk.combo_box_new_text()\n self.check_button_SMX = gtk.CheckButton()\n self.combo_SZ10b_qs = gtk.combo_box_new_text()\n self.combo_SZ8b_qs = gtk.combo_box_new_text()\n self.combo_SZ6b_qs = gtk.combo_box_new_text()\n\n #self.label_Chan_num_qs = gtk.Label(\" \\n \")\n self.label_Chan_SP_qs = gtk.Label(\"SP\")\n self.label_Chan_SC_qs = gtk.Label(\"SC\")\n self.label_Chan_SL_qs = gtk.Label(\"SL\")\n self.label_Chan_ST_qs = gtk.Label(\"ST\")\n self.label_Chan_SM_qs = gtk.Label(\"SM\")\n self.label_Chan_SD_qs = gtk.Label(\"SD\")\n self.label_Chan_SMX_qs = gtk.Label(\"SMX\")\n self.label_Chan_SZ10b_qs = gtk.Label(\"SZ10b\")\n self.label_Chan_SZ8b_qs = gtk.Label(\"SZ8b\")\n self.label_Chan_SZ6b_qs = gtk.Label(\"SZ6b\")\n\n for i in range(16):\n self.combo_SD_qs.append_text(str(i) + \" mv\")\n self.combo_SD_qs.set_active(0)\n for i in range(32):\n self.combo_SZ10b_qs.append_text(str(i) + \" ns\")\n self.combo_SZ10b_qs.set_active(0)\n for i in range(16):\n self.combo_SZ8b_qs.append_text(str(i) + \" ns\")\n self.combo_SZ8b_qs.set_active(0)\n for i in range(8):\n self.combo_SZ6b_qs.append_text(str(i) + \" ns\")\n self.combo_SZ6b_qs.set_active(0)\n\n self.check_button_SP_qs.set_sensitive(True)\n self.check_button_SC_qs.set_sensitive(True)\n self.check_button_SL_qs.set_sensitive(True)\n self.check_button_ST_qs.set_sensitive(True)\n self.check_button_SM_qs.set_sensitive(True)\n self.check_button_SD_qs.set_sensitive(True)\n self.check_button_SMX_qs.set_sensitive(True)\n self.check_button_SZ10b_qs.set_sensitive(True)\n self.check_button_SZ8b_qs.set_sensitive(True)\n self.check_button_SZ6b_qs.set_sensitive(True)\n\n self.toggle_button_SP.set_sensitive(True)\n self.check_button_SC.set_sensitive(True)\n self.check_button_SL.set_sensitive(True)\n self.check_button_ST.set_sensitive(True)\n self.check_button_SM.set_sensitive(True)\n self.combo_SD_qs.set_sensitive(True)\n self.check_button_SMX.set_sensitive(True)\n self.combo_SZ10b_qs.set_sensitive(True)\n self.combo_SZ8b_qs.set_sensitive(True)\n self.combo_SZ6b_qs.set_sensitive(True)\n\n self.qs_table = gtk.Table(rows=5, columns=10, homogeneous=False)\n self.qs_table.attach(self.label_Chan_SP_qs, left_attach=0, right_attach=1, top_attach=0, bottom_attach=1, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SP_qs, left_attach=0, right_attach=1, top_attach=1, bottom_attach=2, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.toggle_button_SP, left_attach=0, right_attach=1, top_attach=2, bottom_attach=3, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.label_Chan_SC_qs, left_attach=1, right_attach=2, top_attach=0, bottom_attach=1, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SC_qs, left_attach=1, right_attach=2, top_attach=1, bottom_attach=2, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SC, left_attach=1, right_attach=2, top_attach=2, bottom_attach=3, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.label_Chan_SL_qs, left_attach=2, right_attach=3, top_attach=0, bottom_attach=1, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SL_qs, left_attach=2, right_attach=3, top_attach=1, bottom_attach=2, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SL, left_attach=2, right_attach=3, top_attach=2, bottom_attach=3, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.label_Chan_ST_qs, left_attach=3, right_attach=4, top_attach=0, bottom_attach=1, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_ST_qs, left_attach=3, right_attach=4, top_attach=1, bottom_attach=2, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_ST, left_attach=3, right_attach=4, top_attach=2, bottom_attach=3, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.label_Chan_SM_qs, left_attach=4, right_attach=5, top_attach=0, bottom_attach=1, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SM_qs, left_attach=4, right_attach=5, top_attach=1, bottom_attach=2, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SM, left_attach=4, right_attach=5, top_attach=2, bottom_attach=3, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.label_Chan_SD_qs, left_attach=5, right_attach=6, top_attach=0, bottom_attach=1, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SD_qs, left_attach=5, right_attach=6, top_attach=1, bottom_attach=2, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.combo_SD_qs, left_attach=5, right_attach=6, top_attach=2, bottom_attach=3, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.label_Chan_SMX_qs, left_attach=6, right_attach=7, top_attach=0, bottom_attach=1, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SMX_qs, left_attach=6, right_attach=7, top_attach=1, bottom_attach=2, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SMX, left_attach=6, right_attach=7, top_attach=2, bottom_attach=3, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.label_Chan_SZ10b_qs, left_attach=7, right_attach=8, top_attach=0, bottom_attach=1, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SZ10b_qs, left_attach=7, right_attach=8, top_attach=1, bottom_attach=2, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.combo_SZ10b_qs, left_attach=7, right_attach=8, top_attach=2, bottom_attach=3, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.label_Chan_SZ8b_qs, left_attach=8, right_attach=9, top_attach=0, bottom_attach=1, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SZ8b_qs, left_attach=8, right_attach=9, top_attach=1, bottom_attach=2, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.combo_SZ8b_qs, left_attach=8, right_attach=9, top_attach=2, bottom_attach=3, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.label_Chan_SZ6b_qs, left_attach=9, right_attach=10, top_attach=0, bottom_attach=1, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.check_button_SZ6b_qs, left_attach=9, right_attach=10, top_attach=1, bottom_attach=2, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.combo_SZ6b_qs, left_attach=9, right_attach=10, top_attach=2, bottom_attach=3, xpadding=0, xoptions=gtk.SHRINK, ypadding=0)\n self.qs_table.attach(self.button_quick_set, left_attach=0, right_attach=10, top_attach=4, bottom_attach=5, xpadding=0, ypadding=0)\n\n ##################### vmm Global ###################\n ##################### variables ###################\n\n self.label_Global = gtk.Label(\"VMM Configuration\")\n self.label_Global.set_markup('VMM Configuration')\n self.label_Global.set_justify(gtk.JUSTIFY_CENTER)\n self.box_Global = gtk.HBox()\n self.box_Global.pack_start(self.label_Global, expand=False)\n\n self.label_vmm_number = gtk.Label(\"VMM #\")\n self.label_vmm_number.set_markup('VMM #')\n self.label_vmm_number.set_justify(gtk.JUSTIFY_CENTER)\n self.combo_vmm_number = gtk.combo_box_new_text()\n for ivmm in xrange(8):\n self.combo_vmm_number.append_text(str(ivmm))\n self.combo_vmm_number.append_text(\"all\")\n self.box_vmm_number = gtk.HBox()\n self.box_vmm_number.pack_start(self.label_vmm_number, expand=False)\n self.box_vmm_number.pack_start(self.combo_vmm_number, expand=False)\n\n self.check_button_SPG = gtk.CheckButton() \n self.check_button_SPG.connect(\"toggled\", self.glob_callback, registers.SPG)\n self.label_SPG = gtk.Label(\"Input Charge Polarity\")\n self.label_SPG.set_markup('Input Charge Polarity ')\n self.label_SPGa = gtk.Label(\" spg\") \n self.box_SPG = gtk.HBox()\n self.box_SPG.pack_start(self.label_SPG, expand=False) \n self.box_SPG.pack_start(self.check_button_SPG, expand=False)\n self.box_SPG.pack_start(self.label_SPGa, expand=False)\n \n self.check_button_SBMX = gtk.CheckButton(\"\")\n self.check_button_SBMX.connect(\"toggled\", self.glob_callback, registers.SBMX)\n self.check_button_SBMX.set_active(0)\n self.label_SBMX = gtk.Label(\"Route Analog Monitor to PDO Output\")\n self.label_SBMX.set_markup('Route Analog Monitor to PDO Output ')\n self.label_SBMXa = gtk.Label(\" sbmx\")\n self.box_SBMX = gtk.HBox()\n self.box_SBMX.pack_start(self.label_SBMX, expand=False)\n self.box_SBMX.pack_start(self.check_button_SBMX, expand=False)\n self.box_SBMX.pack_start(self.label_SBMXa, expand=False)\n\n self.check_button_SDP = gtk.CheckButton()\n self.check_button_SDP.connect(\"toggled\", self.glob_callback, registers.SDP)\n self.label_SDP = gtk.Label(\"Disable-at-Peak\")\n self.label_SDP.set_markup('Disable-at-Peak ')\n self.label_SDPa = gtk.Label(\" sdp\")\n self.box_SDP = gtk.HBox()\n self.box_SDP.pack_start(self.label_SDP, expand=False)\n self.box_SDP.pack_start(self.check_button_SDP, expand=False)\n self.box_SDP.pack_start(self.label_SDPa, expand=False)\n\n self.check_button_SBFT = gtk.CheckButton(\"TDO\")\n self.check_button_SBFT.connect(\"toggled\", self.glob_callback, registers.SBFT)\n self.check_button_SBFT.set_active(1)\n self.check_button_SBFP = gtk.CheckButton(\"PDO\")\n self.check_button_SBFP.connect(\"toggled\", self.glob_callback, registers.SBFP)\n self.check_button_SBFP.set_active(1)\n self.check_button_SBFM = gtk.CheckButton(\"MO\")\n self.check_button_SBFM.connect(\"toggled\", self.glob_callback, registers.SBFM)\n self.check_button_SBFM.set_active(1)\n self.label_SBXX = gtk.Label(\"Analog Output Buffers:\")\n self.label_SBXX.set_markup('Analog Output Buffers ')\n self.box_SBXX = gtk.HBox()\n self.box_SBXX.pack_start(self.label_SBXX, expand=False)\n self.box_SBXX.pack_start(self.check_button_SBFT, expand=False)\n self.box_SBXX.pack_start(self.check_button_SBFP, expand=False)\n self.box_SBXX.pack_start(self.check_button_SBFM, expand=False)\n \n self.check_button_SLG = gtk.CheckButton() \n self.check_button_SLG.connect(\"toggled\", self.glob_callback, registers.SLG)\n self.label_SLG = gtk.Label(\"Leakage Current Disable\")\n self.label_SLG.set_markup('Leakage Current Disable ') \n self.label_SLGa = gtk.Label(\" slg\")\n self.box_SLG = gtk.HBox()\n self.box_SLG.pack_start(self.label_SLG, expand=False) \n self.box_SLG.pack_start(self.check_button_SLG, expand=False)\n self.box_SLG.pack_start(self.label_SLGa, expand=False)\n\n self.label_SM = gtk.Label(\" Monitor\")\n self.label_SM.set_markup(' Monitor ')\n self.combo_SM = gtk.combo_box_new_text()\n self.combo_SM.connect(\"changed\", self.glob_SM_value)\n self.combo_SM.append_text(\"CHN 1\")\n self.combo_SM.append_text(\"CHN 2 | pulser DAC\")\n self.combo_SM.append_text(\"CHN 3 | threshold DAC\")\n self.combo_SM.append_text(\"CHN 4 | band-gap ref\")\n self.combo_SM.append_text(\"CHN 5 | temp\")\n for i in range(5, 64):\n self.combo_SM.append_text(\"CHN \" + str(i+1))\n self.combo_SM.set_active(8)\n\n self.label_SCMX = gtk.Label(\" scmx\")\n self.label_SCMX.set_markup('SCMX ')\n self.check_button_SCMX = gtk.CheckButton() \n self.check_button_SCMX.connect(\"toggled\", self.glob_callback, registers.SCMX)\n self.check_button_SCMX.set_active(1)\n self.box_SCMX = gtk.HBox()\n self.box_SCMX.pack_start(self.label_SCMX, expand=False)\n self.box_SCMX.pack_start(self.check_button_SCMX, expand=False)\n self.box_SCMX.pack_start(self.label_SM, expand=False) \n self.box_SCMX.pack_start(self.combo_SM, expand=False)\n\n self.label_SFA = gtk.Label(\"ART Enable\")\n self.label_SFA.set_markup('ART Enable ') \n self.check_button_SFA = gtk.CheckButton()\n self.check_button_SFA.connect(\"toggled\", self.glob_callback, registers.SFA)\n self.check_button_SFA.set_active(True)\n self.label_SFAa = gtk.Label(\" sfa\")\n self.label_mode_SFAM = gtk.Label(\" Mode \")\n self.label_mode_SFAM.set_markup(' Mode ')\n self.combo_SFAM = gtk.combo_box_new_text()\n self.combo_SFAM.connect(\"changed\", self.glob_callback, registers.SFAM)\n self.combo_SFAM.append_text(\"timing-at-threshold\") \n self.combo_SFAM.append_text(\"timing-at-peak\")\n self.combo_SFAM.set_active(0)\n self.label_SFAM = gtk.Label(\" sfam\")\n self.box_SFAM = gtk.HBox()\n self.box_SFAM.pack_start(self.label_SFA, expand=False)\n self.box_SFAM.pack_start(self.check_button_SFA, expand=False)\n self.box_SFAM.pack_start(self.label_SFAa, expand=False)\n self.box_SFAM.pack_start(self.label_mode_SFAM, expand=False)\n self.box_SFAM.pack_start(self.combo_SFAM, expand=False)\n self.box_SFAM.pack_start(self.label_SFAM, expand=False)\n\n self.label_Var_ST = gtk.Label(\"Peaking Time\")\n self.label_Var_ST.set_markup('Peaking Time ')\n self.combo_ST = gtk.combo_box_new_text()\n self.combo_ST.connect(\"changed\",self.glob_ST_value)\n self.combo_ST.append_text(\"200 ns\")\n self.combo_ST.append_text(\"100 ns\")\n self.combo_ST.append_text(\"50 ns\")\n self.combo_ST.append_text(\"25 ns\")\n self.combo_ST.set_active(0)\n self.label_ST = gtk.Label(\" st\")\n self.box_ST = gtk.HBox()\n self.box_ST.pack_start(self.label_Var_ST, expand=False)\n self.box_ST.pack_start(self.combo_ST, expand=False)\n self.box_ST.pack_start(self.label_ST, expand=False)\n\n self.check_button_SFM = gtk.CheckButton()\n self.label_SFM = gtk.Label(\"SFM\")\n self.label_SFM.set_markup('SFM ') \n self.check_button_SFM.connect(\"toggled\", self.glob_callback, registers.SFM)\n self.check_button_SFM.set_active(1)\n self.label_SFMb = gtk.Label(\" Doubles the Leakage Current\")\n self.label_SFMb.set_markup(' (Doubles the Leakage Current)') \n self.box_SFM = gtk.HBox()\n self.box_SFM.pack_start(self.label_SFM, expand=False) \n self.box_SFM.pack_start(self.check_button_SFM, expand=False)\n self.box_SFM.pack_start(self.label_SFMb, expand=False)\n\n self.label_Var_SG = gtk.Label(\"Gain\")\n self.label_Var_SG.set_markup('Gain ')\n self.combo_SG = gtk.combo_box_new_text()\n self.combo_SG.connect(\"changed\",self.glob_SG_value)\n self.combo_SG.append_text(\"0.5 (000)\") \n self.combo_SG.append_text(\"1 (001)\")\n self.combo_SG.append_text(\"3 (010)\")\n self.combo_SG.append_text(\"4.5 (011)\")\n self.combo_SG.append_text(\"6 (100)\")\n self.combo_SG.append_text(\"9 (101)\")\n self.combo_SG.append_text(\"12 (110)\")\n self.combo_SG.append_text(\"16 (111)\")\n self.combo_SG.set_active(5)\n self.label_SG = gtk.Label(\" (mV/fC) sg\")\n self.box_SG = gtk.HBox()\n self.box_SG.pack_start(self.label_Var_SG, expand=False)\n self.box_SG.pack_start(self.combo_SG, expand=False)\n self.box_SG.pack_start(self.label_SG, expand=False)\n\n self.check_button_SNG = gtk.CheckButton() \n self.label_SNG = gtk.Label(\"Neighbor Triggering\")\n self.label_SNG.set_markup('Neighbor Triggering ') \n self.check_button_SNG.connect(\"toggled\", self.glob_callback, registers.SNG)\n self.label_SNGa = gtk.Label(\" sng\")\n self.box_SNG = gtk.HBox()\n self.box_SNG.pack_start(self.label_SNG, expand=False) \n self.box_SNG.pack_start(self.check_button_SNG,expand=False)\n self.box_SNG.pack_start(self.label_SNGa, expand=False) \n\n self.label_STTT = gtk.Label(\"Timing Outputs\")\n self.label_STTT.set_markup('Timing Outputs ')\n self.check_button_STTT = gtk.CheckButton()\n self.check_button_STTT.connect(\"toggled\", self.glob_callback, registers.STTT)\n self.label_STTTa = gtk.Label(\" sttt\")\n self.label_mode_STOT = gtk.Label(\" Mode \")\n self.label_mode_STOT.set_markup(' Mode ')\n self.combo_STOT = gtk.combo_box_new_text()\n self.combo_STOT.connect(\"changed\", self.glob_callback, registers.STOT)\n self.combo_STOT.append_text(\"threshold-to-peak\")\n self.combo_STOT.append_text(\"time-over-threshold\")\n self.combo_STOT.set_active(0) \n self.label_STOT = gtk.Label(\" stot\")\n self.box_STXX = gtk.HBox()\n self.box_STXX.pack_start(self.label_STTT, expand=False) \n self.box_STXX.pack_start(self.check_button_STTT, expand=False)\n self.box_STXX.pack_start(self.label_STTTa, expand=False)\n self.box_STXX.pack_start(self.label_mode_STOT, expand=False)\n self.box_STXX.pack_start(self.combo_STOT, expand=False)\n self.box_STXX.pack_start(self.label_STOT, expand=False)\n\n self.label_SSH = gtk.Label(\"Sub-Hysteresis\\nDiscrimination\") \n self.label_SSH.set_markup('Sub-Hysteresis \\nDiscrimination')\n self.check_button_SSH = gtk.CheckButton()\n self.check_button_SSH.connect(\"toggled\", self.glob_callback, registers.SSH)\n self.label_SSHa = gtk.Label(\" ssh\")\n self.box_SSH = gtk.HBox()\n self.box_SSH.pack_start(self.label_SSH, expand=False) \n self.box_SSH.pack_start(self.check_button_SSH, expand=False)\n self.box_SSH.pack_start(self.label_SSHa, expand=False)\n\n self.label_STPP = gtk.Label(\"Timing Outputs Control 2\") \n self.label_STPP.set_markup('Timing Outputs Control 2 ')\n self.check_button_STPP = gtk.CheckButton()\n self.check_button_STPP.connect(\"toggled\", self.glob_callback, registers.STPP)\n self.label_STPPa = gtk.Label(\" stpp\")\n self.box_STPP = gtk.HBox()\n self.box_STPP.pack_start(self.label_STPP, expand=False) \n self.box_STPP.pack_start(self.check_button_STPP, expand=False)\n self.box_STPP.pack_start(self.label_STPPa, expand=False)\n\n self.label_Var_STC = gtk.Label(\"TAC Slope\")\n self.label_Var_STC.set_markup('TAC Slope ') \n self.combo_STC = gtk.combo_box_new_text()\n self.combo_STC.connect(\"changed\", self.glob_STC_value)\n self.combo_STC.append_text(\"125 ns (00)\") \n self.combo_STC.append_text(\"250 ns (01)\")\n self.combo_STC.append_text(\"500 ns (10)\")\n self.combo_STC.append_text(\"1000 ns (11)\")\n self.combo_STC.set_active(2)\n self.label_STC = gtk.Label(\" stc\")\n self.box_STC = gtk.HBox()\n self.box_STC.pack_start(self.label_Var_STC, expand=False)\n self.box_STC.pack_start(self.combo_STC, expand=False)\n self.box_STC.pack_start(self.label_STC, expand=False)\n\n self.label_Var_SDT = gtk.Label(\"Threshold DAC\")\n self.label_Var_SDT.set_markup('Threshold DAC ')\n self.entry_SDT = gtk.Entry(max=4)\n self.entry_SDT.set_text(\"300\")\n self.entry_SDT.connect(\"focus-out-event\", self.glob_SDT_entry)\n self.entry_SDT.connect(\"activate\", self.glob_SDT_entry, self.entry_SDT)\n\n self.label_SDT = gtk.Label()\n self.box_SDT = gtk.HBox()\n self.box_SDT.pack_start(self.label_Var_SDT, expand=False)\n self.box_SDT.pack_start(self.entry_SDT, expand=False)\n #self.box_SDT.pack_start(self.combo_SDT)\n self.box_SDT.pack_start(self.label_SDT, expand=False)\n #self.box_SDT.pack_start(self.label_Var_SDTb, expand=False)\n\n self.label_Var_SDP_ = gtk.Label(\"Test Pulse DAC\")\n self.label_Var_SDP_.set_markup('Test Pulse DAC ')\n self.entry_SDP_ = gtk.Entry(max=4)\n self.entry_SDP_.set_text(\"300\")\n self.entry_SDP_.connect(\"focus-out-event\", self.glob_SDP_entry ) #,self.entry_SDP_\n self.entry_SDP_.connect(\"activate\", self.glob_SDP_entry, self.entry_SDP_)\n\n self.label_SDP_ = gtk.Label()\n self.box_SDP_ = gtk.HBox()\n self.box_SDP_.pack_start(self.label_Var_SDP_,expand=False)\n self.box_SDP_.pack_start(self.entry_SDP_,expand=False)\n self.box_SDP_.pack_start(self.label_SDP_,expand=False)\n\n self.label_variable1 = gtk.Label(\" \\n \")\n self.label_variable2 = gtk.Label(\" \\n \")\n self.label_variable3 = gtk.Label(\" \\n \")\n self.label_variable4 = gtk.Label(\" \\n \") \n self.label_variable5 = gtk.Label(\" \")\n self.label_variable6 = gtk.Label(\"Values for Threshold and Test Pulse :\") \n self.label_variable6.set_markup('Values for Threshold and Test Pulse :') \n self.label_variable7 = gtk.Label(\" \\n \")\n self.label_variable9 = gtk.Label(\" \\n \") \n self.label_variable10 = gtk.Label(\" 0 <= x <= 1023\")\n self.label_variable11 = gtk.Label(\" \")\n self.label_variable12 = gtk.Label(\"to Set the Values for SDT and SDP_\")\n self.label_variable12.set_markup('to Set the Values for SDT and SDP_')\n self.box_SDP_SDT = gtk.HBox()\n self.box_SDP_SDT.pack_start(self.label_variable6,expand=False)\n self.box_SDP_SDT.pack_start(self.label_variable10,expand=False)\n\n self.label_Var_SC10b = gtk.Label(\"10-bit Conversion Time\")\n self.label_Var_SC10b.set_markup('10-bit Conversion Time ')\n self.combo_SC10b = gtk.combo_box_new_text()\n self.combo_SC10b.connect(\"changed\", self.glob_SC10b_value)\n self.combo_SC10b.append_text(\"0 ns (00)\")\n self.combo_SC10b.append_text(\"1 ns (10)\")\n self.combo_SC10b.append_text(\"2 ns (01)\")\n self.combo_SC10b.append_text(\"3 ns (11)\")\n self.combo_SC10b.set_active(0)\n self.label_SC10b = gtk.Label(\" sc10b\")\n self.box_SC10b = gtk.HBox()\n self.box_SC10b.pack_start(self.label_Var_SC10b, expand=False)\n self.box_SC10b.pack_start(self.combo_SC10b, expand=False)\n self.box_SC10b.pack_start(self.label_SC10b, expand=False)\n\n self.label_Var_SC8b = gtk.Label(\"8-bit Conversion Time\")\n self.label_Var_SC8b.set_markup('8-bit Conversion Time ')\n self.combo_SC8b = gtk.combo_box_new_text()\n self.combo_SC8b.connect(\"changed\", self.glob_SC8b_value)\n self.combo_SC8b.append_text(\"0 ns (00)\")\n self.combo_SC8b.append_text(\"1 ns (10)\")\n self.combo_SC8b.append_text(\"2 ns (01)\")\n self.combo_SC8b.append_text(\"3 ns (11)\")\n self.combo_SC8b.set_active(0)\n self.label_SC8b = gtk.Label(\" sc8b\")\n self.box_SC8b = gtk.HBox()\n self.box_SC8b.pack_start(self.label_Var_SC8b, expand=False)\n self.box_SC8b.pack_start(self.combo_SC8b, expand=False)\n self.box_SC8b.pack_start(self.label_SC8b, expand=False)\n\n self.label_Var_SC6b = gtk.Label(\"6-bit Conversion Time\")\n self.label_Var_SC6b.set_markup('6-bit Conversion Time ')\n self.combo_SC6b = gtk.combo_box_new_text()\n self.combo_SC6b.connect(\"changed\", self.glob_SC6b_value)\n self.combo_SC6b.append_text(\"0 ns (000)\")\n self.combo_SC6b.append_text(\"1 ns (100)\")\n self.combo_SC6b.append_text(\"2 ns (010)\")\n self.combo_SC6b.append_text(\"3 ns (110)\")\n self.combo_SC6b.append_text(\"4 ns (001)\")\n self.combo_SC6b.append_text(\"5 ns (101)\")\n self.combo_SC6b.append_text(\"6 ns (011)\")\n self.combo_SC6b.append_text(\"7 ns (111)\")\n self.combo_SC6b.set_active(0)\n self.label_Var_SC6ba = gtk.Label(\" sc6b\")\n self.box_SC6b = gtk.HBox()\n self.box_SC6b.pack_start(self.label_Var_SC6b, expand=False)\n self.box_SC6b.pack_start(self.combo_SC6b, expand=False)\n self.box_SC6b.pack_start(self.label_Var_SC6ba, expand=False)\n\n self.label_S6b = gtk.Label(\"6-bit ADC Enable\")\n self.label_S6b.set_markup('6-bit ADC Enable ')\n self.check_button_S6b = gtk.CheckButton() \n self.check_button_S6b.connect(\"toggled\", self.glob_callback, registers.S6b)\n self.check_button_S6b.set_active(False)\n self.label_S6ba = gtk.Label(\"Disables 8 & 10 bit ADC\")\n self.label_S6ba.set_markup(' (Disables 8 & 10 bit ADC)')\n self.label_S6bb = gtk.Label(\" s6b\")\n self.box_S6b = gtk.HBox()\n self.box_S6b.pack_start(self.label_S6b, expand=False)\n self.box_S6b.pack_start(self.check_button_S6b, expand=False)\n self.box_S6b.pack_start(self.label_S6ba, expand=False)\n self.box_S6b.pack_start(self.label_S6bb, expand=False)\n\n self.label_Var_S8b = gtk.Label(\"8-bit ADC Mode\")\n self.label_Var_S8b.set_markup('8-bit ADC Mode ')\n self.combo_S8b = gtk.CheckButton()\n self.combo_S8b.connect(\"toggled\", self.glob_callback, registers.S8b)\n self.combo_S8b.set_active(1)\n self.label_Var_S8ba = gtk.Label(\" s8b\")\n self.box_S8b = gtk.HBox()\n self.box_S8b.pack_start(self.label_Var_S8b, expand=False)\n self.box_S8b.pack_start(self.combo_S8b, expand=False)\n self.box_S8b.pack_start(self.label_Var_S8ba, expand=False)\n\n self.label_Var_SPDC = gtk.Label(\"ADCs Enable\")\n self.label_Var_SPDC.set_markup('ADCs Enable ')\n self.button_SPDC = gtk.CheckButton()\n self.button_SPDC.connect(\"toggled\", self.glob_callback, registers.SPDC)\n self.button_SPDC.set_active(1)\n self.label_Var_SPDCa = gtk.Label(\" spdc\")\n self.box_SPDC = gtk.HBox()\n self.box_SPDC.pack_start(self.label_Var_SPDC, expand=False)\n self.box_SPDC.pack_start(self.button_SPDC, expand=False)\n self.box_SPDC.pack_start(self.label_Var_SPDCa, expand=False)\n\n self.label_SDCKS = gtk.Label(\"Dual Clock Edge\\nSerialized Data Enable\\n\") \n self.label_SDCKS.set_markup('Dual Clock Edge\\nSerialized Data Enable\\n ')\n self.check_button_SDCKS = gtk.CheckButton()\n self.check_button_SDCKS.connect(\"toggled\", self.glob_callback, registers.SDCKS)\n self.label_SDCKSa = gtk.Label(\" sdcks\")\n self.box_SDCKS = gtk.HBox()\n self.box_SDCKS.pack_start(self.label_SDCKS, expand=False) \n self.box_SDCKS.pack_start(self.check_button_SDCKS, expand=False)\n self.box_SDCKS.pack_start(self.label_SDCKSa, expand=False)\n\n self.label_SDCKA = gtk.Label(\"Dual Clock Edge\\nSerialized ART Enable\\n\") \n self.label_SDCKA.set_markup('Dual Clock Edge\\nSerialized ART Enable\\n ')\n self.check_button_SDCKA = gtk.CheckButton()\n self.check_button_SDCKA.connect(\"toggled\", self.glob_callback, registers.SDCKA)\n self.label_SDCKAa = gtk.Label(\" sdcka\")\n self.box_SDCKA = gtk.HBox()\n self.box_SDCKA.pack_start(self.label_SDCKA, expand=False) \n self.box_SDCKA.pack_start(self.check_button_SDCKA, expand=False)\n self.box_SDCKA.pack_start(self.label_SDCKAa, expand=False)\n\n self.label_SDCK6b = gtk.Label(\"Dual Clock Edge\\nSerialized 6-bit Enable\\n\") \n self.label_SDCK6b.set_markup('Dual Clock Edge\\nSerialized 6-bit Enable\\n ')\n self.check_button_SDCK6b = gtk.CheckButton()\n self.check_button_SDCK6b.connect(\"toggled\", self.glob_callback, registers.SDCK6b)\n self.label_SDCK6ba = gtk.Label(\" sdck6b\")\n self.box_SDCK6b = gtk.HBox()\n self.box_SDCK6b.pack_start(self.label_SDCK6b, expand=False) \n self.box_SDCK6b.pack_start(self.check_button_SDCK6b, expand=False)\n self.box_SDCK6b.pack_start(self.label_SDCK6ba, expand=False)\n\n self.label_SDRV = gtk.Label(\"Tristates Analog Outputs\") \n self.label_SDRV.set_markup('Tristates Analog Outputs ')\n self.check_button_SDRV = gtk.CheckButton()\n self.check_button_SDRV.connect(\"toggled\", self.glob_callback, registers.SDRV)\n self.check_button_SDRV.set_active(0)\n self.label_SDRVa = gtk.Label(\" sdrv\")\n self.box_SDRV = gtk.HBox()\n self.box_SDRV.pack_start(self.label_SDRV, expand=False) \n self.box_SDRV.pack_start(self.check_button_SDRV, expand=False)\n self.box_SDRV.pack_start(self.label_SDRVa, expand=False)\n\n self.box_var_labels = gtk.VBox()\n self.box_var_labels.set_border_width(10)\n self.box_var_labels.pack_start(self.label_variable1)\n self.box_var_labels.pack_start(self.label_variable2)\n self.box_var_labels.pack_start(self.label_variable3)\n self.frame_qs = gtk.Frame()\n self.frame_qs.set_shadow_type(gtk.SHADOW_OUT)\n self.frame_qs.set_label(\"QUICK SET\")\n self.frame_qs.set_label_align(0.5,0.0)\n self.box_quick_set = gtk.VBox(homogeneous=False,spacing=0)\n self.box_quick_set.set_border_width(20)\n self.qs_label = gtk.Label(\"QUICK SET\")\n self.qs_label.set_markup('Tristates Analog Outputs ')\n self.box_quick_set.pack_start(self.qs_table)\n #self.box_quick_set.pack_end(self.button_quick_set) \n self.frame_qs.add(self.box_quick_set)\n\n self.label_But_Space6 = gtk.Label(\" \")\n self.label_But_Space7 = gtk.Label(\" \")\n\n self.box_variables = gtk.VBox()\n self.box_variables.set_border_width(5)\n\n self.box_variables.pack_start(self.box_Global, expand=False)\n self.box_variables.pack_start(self.box_vmm_number, expand=False)\n self.box_variables.pack_start(self.label_But_Space6, expand=False)\n self.box_variables.pack_start(self.label_But_Space7, expand=False)\n self.box_variables.pack_start(self.box_SPG, expand=False)\n self.box_variables.pack_start(self.box_SDP, expand=False)\n self.box_variables.pack_start(self.box_SBMX, expand=False)\n self.box_variables.pack_start(self.box_SBXX, expand=False)\n self.box_variables.pack_start(self.box_SLG, expand=False)\n self.box_variables.pack_start(self.box_SCMX, expand=False)\n self.box_variables.pack_start(self.box_SFAM, expand=False)\n self.box_variables.pack_start(self.box_ST, expand=False)\n self.box_variables.pack_start(self.box_SFM, expand=False)\n\n self.box_variables.pack_start(self.box_SG, expand=False)\n self.box_variables.pack_start(self.box_SNG, expand=False)\n self.box_variables.pack_start(self.box_STXX, expand=False)\n\n self.box_variables.pack_start(self.box_SSH, expand=False)\n self.box_variables.pack_start(self.box_STC, expand=False)\n\n self.box_variables.pack_start(self.box_SC10b, expand=False)\n self.box_variables.pack_start(self.box_S8b, expand=False)\n self.box_variables.pack_start(self.box_SC8b, expand=False)\n self.box_variables.pack_start(self.box_S6b, expand=False)\n self.box_variables.pack_start(self.box_SC6b, expand=False)\n self.box_variables.pack_start(self.box_SPDC, expand=False)\n \n self.box_variables.pack_start(self.box_SDCKS, expand=False)\n self.box_variables.pack_start(self.box_SDCKA, expand=False)\n self.box_variables.pack_start(self.box_SDCK6b, expand=False)\n self.box_variables.pack_start(self.box_SDRV, expand=False)\n self.box_variables.pack_start(self.box_STPP, expand=False)\n\n self.box_variables.pack_start(self.label_variable5,expand=False)\n self.box_variables.pack_start(self.label_variable11,expand=False)\n self.box_variables.pack_start(self.box_SDT,expand=False)\n self.box_variables.pack_start(self.box_SDP_,expand=False)\n self.box_variables.pack_start(self.box_SDP_SDT,expand=False)\n #self.box_variables.pack_start(self.label_variable12,expand=False)\n self.box_variables.pack_start(self.label_variable9)\n self.box_variables.pack_end(self.frame_qs,expand=False) \n\n self.frame_variables = gtk.Frame()\n self.frame_variables.set_border_width(4)\n self.frame_variables.set_shadow_type(gtk.SHADOW_IN)\n self.frame_variables.add(self.box_variables)\n # self.frame_variables.set_size_request(300, -1)\n\n self.box_all_variables = gtk.HBox()\n self.box_all_variables.pack_start(self.frame_variables)\n\n ################### CHANNELS FRAMES #############\n \n self.frame_channels = gtk.Frame()\n self.frame_channels.set_border_width(4)\n self.frame_channels.set_shadow_type(gtk.SHADOW_IN)\n\n self.box_channels_steer = gtk.VBox(homogeneous=False, spacing=0)\n self.box_channels_rows = gtk.VBox(homogeneous=True, spacing=0)\n\n for ch_num in range(64):\n self.box_channels_rows.pack_start(self.chan_list[ch_num].channel_box)\n\n self.box_channels_steer.pack_start(self.box_channels, expand=False)\n self.box_channels_steer.pack_start(self.box_chan_labels_a, expand=False)\n self.box_channels_steer.pack_start(self.box_channels_rows, expand=False)\n self.frame_channels.add(self.box_channels_steer)\n\n self.box_all_channels = gtk.HBox()\n self.box_all_channels.pack_start(self.frame_channels)\n\n\n","sub_path":"python/dev/tuna/vmm.py","file_name":"vmm.py","file_ext":"py","file_size_in_byte":47180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"90533466","text":"from copy import deepcopy as dcopy\nfrom math import sqrt, acos, pi\nfrom GameBoard.game_board import Screen\nfrom src.utils import flatten\nimport numpy as np\nimport time\n\nScoreBoardID = 0\nAgentID = 1\nConquerID = 2\nTreasureID = 3\nWallID = 4\ndx = [1, -1, 0, 0]\ndy = [0, 0, -1, 1]\n\nclass Player(object):\n \n def __init__(self, ID):\n self.ID = ID\n self.title_score = 0\n self.area_score = 0\n self.treasure_score = 0\n self.old_score = 0\n \n @property\n def total_score(self):\n \"\"\"\n Returns the total scores consits of title, area and treasure scores\n \"\"\"\n return self.title_score + self.area_score + self.treasure_score\n \n def reset(self):\n self.title_score = 0\n self.area_score = 0\n self.treasure_score = 0\n self.old_score = 0\n \n def show_scores(self):\n print(\"Player \" + str(self.ID) + \":\")\n print(\"\\tTitle Score: {}\".format(self.title_score))\n print(\"\\tTreasure Score: {}\".format(self.treasure_score))\n print(\"\\tArea Score {}\".format(self.area_score))\n print()\n \nclass Environment(object):\n\n def __init__(self, input_data = None, show_screen = False, MAX_SIZE = 20):\n self.MAX_SIZE = MAX_SIZE\n self.show_screen = show_screen\n self.data = dcopy(input_data)\n self.n_actions = 8\n self.punish = 0\n self.n_inputs = 7\n self.max_n_agents = 8\n self.max_n_turns = 100\n self.num_players = 2\n self.agent_step_dim = (1 + 2 * self.max_n_agents) * (self.MAX_SIZE ** 2) + self.max_n_turns\n self.action_dim = self.n_actions\n self.players = [Player(i) for i in range(self.num_players)]\n self.screen = Screen(self)\n self.reset()\n \n '''\n print(\"Infor map: \")\n print(\"\\tHeight - Width: {}-{}\".format(self.height, self.width))\n print(\"\\tNum agents: {}\".format(self.n_agents))\n print()\n '''\n \n def reset(self):\n \"\"\"\n height: height of table\n width: width of table\n score_board: title score in table\n agent_pos: location of agents in table (coord)\n treasure_board: treasures in table\n wall_board: walls in table\n conquer_board: conquered locations of players\n n_turns: number of turns in each game\n n_agents: number of agents\n\n \"\"\"\n height, width, score_board, agent_pos, treasure_board, wall_board, \\\n conquer_board, n_turns, n_agents = [dcopy(_data) for _data in self.data]\n \n self.score_board = []\n self.agent_board = [[], []]\n self.treasure_board = []\n self.wall_board = []\n self.conquer_board = [[], []]\n self.width = width\n self.height = height\n self.agent_pos = agent_pos\n self.n_turns = n_turns\n self.remaining_turns = n_turns\n self.n_agents = n_agents\n \n for player_ID in range(self.num_players):\n self.players[player_ID].reset()\n \n for _ in range(self.MAX_SIZE):\n self.score_board.append([0] * self.MAX_SIZE)\n self.treasure_board.append([0] * self.MAX_SIZE)\n self.wall_board.append([0] * self.MAX_SIZE)\n for player_ID in range(self.num_players):\n self.agent_board[player_ID].append([0] * self.MAX_SIZE)\n self.conquer_board[player_ID].append([0] * self.MAX_SIZE)\n\n for i in range(self.height):\n for j in range(self.width):\n self.score_board[i][j] = score_board[i][j]\n \n \n for i in range(self.n_agents): \n for j in range(self.num_players):\n x, y = self.agent_pos[j][i]\n self.agent_board[j][x][y] = 1\n self.conquer_board[j][x][y] = 1\n \n for x, y in wall_board:\n self.wall_board[x][y] = 1\n \n for i in range(self.MAX_SIZE):\n for j in range(self.MAX_SIZE):\n if i >= self.height or j >= self.width:\n self.wall_board[i][j] = 1\n \n for x, y, value in treasure_board:\n self.treasure_board[x][y] = value\n \n self.upper_bound_score = np.max(score_board)\n self.lower_bound_score = np.min(score_board)\n self.norm_score_board = dcopy(self.score_board)\n self.norm_treasure_board = dcopy(self.treasure_board)\n self.range_bound = (self.upper_bound_score - self.lower_bound_score)\n self.score_board = (self.score_board - self.lower_bound_score) \\\n / self.range_bound\n self.treasure_board /= self.range_bound\n \n self.observation = self.get_observation(0)\n \n title_scores, treasure_scores, area_scores = \\\n self.compute_score(self.observation, self.observation)\n \n for player_ID in range(self.num_players):\n self.players[player_ID].title_score = title_scores[player_ID]\n self.players[player_ID].treasure_score = treasure_scores[player_ID]\n self.players[player_ID].area_score = area_scores[player_ID]\n self.players[player_ID].old_score = self.players[player_ID].total_score\n \n self.old_observation = dcopy(self.observation)\n \n if self.show_screen:\n self.screen.setup(self)\n \n def soft_reset(self):\n \n for player_ID in range(self.num_players):\n self.players[player_ID].reset()\n for x in range(self.height):\n for y in range(self.width): \n self.agent_board[player_ID][x][y] = 0\n self.conquer_board[player_ID][x][y] = 0\n \n for i in range(self.n_agents): \n for j in range(2):\n x, y = self.agent_pos[j][i]\n if self.show_screen:\n self.screen.reset_square([x, y], -1, 0)\n \n height, width, _, agent_pos, _, _, \\\n conquer_board, n_turns, n_agents = [dcopy(_data) for _data in self.data]\n \n self.agent_pos = agent_pos\n self.remaining_turns = self.n_turns\n for player_ID in range(self.num_players):\n for agent_ID in range(self.n_agents):\n x, y = self.agent_pos[player_ID][agent_ID]\n self.agent_board[player_ID][x][y] = 1\n self.conquer_board[player_ID][x][y] = 1\n \n self.observation = self.get_observation(0)\n title_scores, treasure_scores, area_scores = \\\n self.compute_score(self.observation, self.observation)\n \n for player_ID in range(self.num_players):\n self.players[player_ID].title_score = title_scores[player_ID]\n self.players[player_ID].treasure_score = treasure_scores[player_ID]\n self.players[player_ID].area_score = area_scores[player_ID]\n self.players[player_ID].old_score = self.players[player_ID].total_score\n \n self.old_observation = dcopy(self.observation)\n \n if self.show_screen:\n self.screen.reset()\n \n \n def render(self):\n \"\"\"\n display game screen\n \"\"\"\n self.screen.render()\n \n def get_ub_board_size(self):\n \"\"\"\n Returns upper bound of board size\n \"\"\"\n return [self.MAX_SIZE, self.MAX_SIZE]\n \n def get_state(self, player):\n state = self.get_observation(player)\n return state\n \n def get_observation(self, player_ID):\n \"\"\"\n Returns current observation\n \"\"\"\n \n state = dcopy([self.score_board, \n self.agent_board, \n self.conquer_board, \n self.treasure_board, \n self.wall_board])\n \n if player_ID == 1:\n temp = dcopy(state[1][0])\n state[1][0] = dcopy(state[1][1])\n state[1][1] = temp\n temp = dcopy(state[2][0])\n state[2][0] = dcopy(state[2][1])\n state[2][1] = temp\n return state\n \n def convert_to_opn_obs(self, state, agent_pos):\n \"\"\"\n Returns opponent observation\n \"\"\"\n temp = state[1][0]\n state[1][0] = state[1][1]\n state[1][1] = temp\n temp = state[2][0]\n state[2][0] = state[2][1]\n state[2][1] = temp\n \n temp = agent_pos[1]\n agent_pos[1] = agent_pos[0]\n agent_pos[0] = temp\n return state, agent_pos\n \n def log_state(self, state):\n print(\"Score Board: \")\n for i in range(self.height):\n print(self.norm_score_board[i][:self.width])\n print(\"Agent Board 1: \")\n for i in range(self.height):\n print(state[AgentID][0][i][:self.width])\n print(\"Agent Board 2: \")\n for i in range(self.height):\n print(state[AgentID][1][i][:self.width])\n print('-----------')\n \n def get_states_for_step(self, states):\n states = np.array(flatten(states), dtype = np.float32)\\\n .reshape(-1, self.n_inputs, self.MAX_SIZE, self.MAX_SIZE)\n return states\n \n def get_agent_pos(self, player):\n return dcopy(self.agent_pos[player])\n \n def get_agent_for_step(self, agent_ID, agent_coord):\n agent_state = [[], []]\n for ag_id in range(self.max_n_agents):\n for player_ID in range(self.num_players):\n empty_board = []\n for _ in range(self.MAX_SIZE):\n empty_board.append([0] * self.MAX_SIZE)\n agent_state[player_ID].append(empty_board)\n if ag_id >= self.n_agents: continue\n x, y = agent_coord[player_ID][ag_id]\n agent_state[player_ID][ag_id][x][y] = 1\n # for i in range(self.height):\n # print(agent_state[player_ID][ag_id][i][:self.width])\n # print()\n \n index = agent_state[0][agent_ID]\n onehot_nturns = [0] * self.max_n_turns\n onehot_nturns[self.remaining_turns] = 1\n agent_state = flatten([agent_state, index, onehot_nturns])\n # print(len(agent_state))\n return np.array(agent_state, dtype = np.float32).reshape(-1, self.agent_step_dim)\n \n def get_agents_for_step(self, agents_ID):\n agents_step = [self.get_agent_for_step(agent_ID) for agent_ID in agents_ID]\n agents_step = np.array(agents_step, dtype = np.float32)\\\n .reshape(-1, )\n return agents_step\n \n def get_act(act):\n switcher = {\n (1, 0): 0,\n (1, 1): 1,\n (0, 1): 2,\n (-1, 1): 3,\n (-1, 0): 4,\n (-1, -1): 5,\n (0, -1): 6,\n (1, -1): 7,\n }\n return switcher.get(act, 0)\n \n def compute_score_area(self, state, player_ID):\n def is_border(x, y):\n return x <= 0 or x >= self.height - 1 or y <= 0 or y >= self.width - 1\n \n def can_move(x, y):\n return x >= 0 and x < self.height and y >= 0 and y < self.width\n \n def dfs(x, y, visited):\n visited[x][y] = True\n is_closed = True\n if is_border(x, y):\n is_closed = False\n temp_score = abs(score_board[x][y])\n if wall_board[x][y] == 1:\n temp_score = 0\n for i in range(4):\n _x = x + dx[i]\n _y = y + dy[i]\n if can_move(_x, _y) and not visited[_x][_y]:\n _score = dfs(_x, _y, visited)\n if _score < 0:\n is_closed = False\n else:\n temp_score += _score\n if not is_closed:\n return -1\n return temp_score\n \n visited = []\n score_board = state[ScoreBoardID]\n conquer_board = state[ConquerID]\n wall_board = state[WallID]\n score = 0\n for i in range(self.height):\n visited.append([False] * self.width)\n for j in range(self.width):\n if conquer_board[player_ID][i][j] == 1:\n visited[i][j] = True\n\n for i in range(self.height):\n for j in range(self.width):\n if not visited[i][j]:\n temp = dfs(i, j, visited)\n score += max(0, temp)\n # if score > 0:\n # print(score)\n # print(visited)\n return score\n \n def compute_score(self, state, old_state):\n \"\"\"\n \n Parameters\n ----------\n state : object\n state of game.\n old_state : object\n prestate of game.\n\n Returns\n -------\n title_scores : array\n title scores of players.\n treasure_score : TYPE\n treasure scores of players.\n area_scores : TYPE\n area scores of players.\n\n \"\"\"\n treasure_board = state[TreasureID]\n title_scores = [0, 0]\n treasure_score = [0, 0]\n area_scores = [0, 0]\n for i in range(self.height):\n for j in range(self.width):\n if state[ConquerID][0][i][j] == 1:\n title_scores[0] += state[ScoreBoardID][i][j]\n if state[ConquerID][1][i][j] == 1:\n title_scores[1] += state[ScoreBoardID][i][j]\n if state[TreasureID][i][j] > 0 and old_state[ConquerID][0][i][j] == 0 \\\n and old_state[ConquerID][1][i][j] == 0:\n if state[ConquerID][0][i][j] == 1:\n state[TreasureID][0] += treasure_board[i][j]\n if state[ConquerID][1][i][j] == 1:\n state[TreasureID][1] += treasure_board[i][j]\n \n for player_ID in range(self.num_players):\n area_scores[player_ID] = self.compute_score_area(state, player_ID)\n \n return title_scores, treasure_score, area_scores\n \n def get_score(self, state, old_state, player_ID):\n state = dcopy(state)\n title_scores, treasure_scores, area_scores = self.compute_score(state, old_state)\n result = title_scores[0] + treasure_scores[0] + area_scores[0] \\\n - title_scores[1] - treasure_scores[1] - area_scores[1]\n return result\n \n def check_next_action(self, _act, id_agent, agent_pos):\n x, y = agent_pos[id_agent][0], agent_pos[id_agent][1]\n x, y = self.next_action(x, y, _act)\n if not (x >= 0 and x < self.height and y >= 0 and y < self.width):\n return False\n \n return self.wall_board[x][y] == 0\n \n def next_action(self, x, y, act):\n def action(x):\n switcher = {\n 0: [1, 0], 1: [1, 1], 2: [0, 1], 3: [-1, 1], \n 4: [-1, 0], 5: [-1, -1], 6: [0, -1], 7: [1, -1]\n }\n return switcher.get(x, [1, 0])\n _action = action(act)\n return [x + _action[0], y + _action[1]]\n \n def angle(self, a1, b1, a2, b2):\n fi = acos((a1 * a2 + b1 * b2) / (sqrt(a1*a1 + b1*b1) * (sqrt(a2*a2 + b2*b2))))\n return fi\n \n def check(self, x0, y0, x, y, act):\n \n def action(x):\n switcher = {\n 0: [1, 0], 1: [1, 1], 2: [0, 1], 3: [-1, 1], \n 4: [-1, 0], 5: [-1, -1], 6: [0, -1], 7: [1, -1]\n }\n return switcher.get(x, [1, 0])\n \n a1, b1 = action(act)\n a2, b2 = x - x0, y - y0\n if abs(self.angle(a1, b1, a2, b2)) - 0.0001 <= pi / 3:\n return True\n return False\n \n def predict_spread_scores(self, x, y, state, act):\n score_board, agent_board, conquer_board, treasure_board, wall_board = state\n score = 0\n discount = 0.02\n reduce_negative = 0.02\n p_1 = 1.3\n p_2 = 1\n aux_score = 0\n for i in range(1, min(8, self.remaining_turns)):\n for j in range(max(0, x - i), min(self.height, x + i + 1)):\n new_x = j\n new_y = y - i\n if new_y >= 0:\n if wall_board[new_x][new_y] == 0: \n _sc = treasure_board[new_x][new_y] ** p_1\n if conquer_board[0][new_x][new_y] != 1:\n _sc += (max(reduce_negative * score_board[new_x][new_y], score_board[new_x][new_y]) ** p_2)\n if act == 0 or self.check(x, y, new_x, new_y, act):\n score += _sc * discount\n new_x = j\n new_y = y + i\n if new_y < self.width:\n if wall_board[new_x][new_y] == 0: \n _sc = treasure_board[new_x][new_y] ** p_1\n if conquer_board[0][new_x][new_y] != 1:\n _sc += (max(reduce_negative * score_board[new_x][new_y], score_board[new_x][new_y]) ** p_2)\n if act == 0 or self.check(x, y, new_x, new_y, act):\n score += _sc * discount\n for k in range(max(0, y - i), min(self.height, y + i + 1)):\n new_x = x - i\n new_y = k\n if new_x >= 0:\n if wall_board[new_x][new_y] == 0: \n _sc = treasure_board[new_x][new_y] ** p_1\n if conquer_board[0][new_x][new_y] != 1:\n _sc += (max(reduce_negative * score_board[new_x][new_y], score_board[new_x][new_y]) ** p_2)\n if act == 0 or self.check(x, y, new_x, new_y, act):\n score += _sc * discount\n new_x = x + i\n new_y = k\n if new_x < self.height:\n if wall_board[new_x][new_y] == 0: \n _sc = treasure_board[new_x][new_y] ** p_1\n if conquer_board[0][new_x][new_y] != 1:\n _sc += (max(reduce_negative * score_board[new_x][new_y], score_board[new_x][new_y]) ** p_2)\n if act == 0 or self.check(x, y, new_x, new_y, act):\n score += _sc * discount\n discount *= 0.7\n return score\n \n def soft_step(self, agent_id, state, act, agent_pos, exp = False):\n old_state = dcopy(state)\n old_scores, old_treasures_scores, area_scores = self.compute_score(old_state, old_state)\n old_score = old_scores[0] + area_scores[0] - old_scores[1] - area_scores[1]\n score_board, agent_board, conquer_board, treasure_board, wall_board = state\n x, y = agent_pos[agent_id][0], agent_pos[agent_id][1] \n _x, _y = self.next_action(x, y, act)\n valid = True\n aux_score = 0\n reward = 0\n if _x >= 0 and _x < self.height and _y >= 0 and _y < self.width and wall_board[_x][_y] == 0:\n if agent_board[0][_x][_y] == 0 and agent_board[1][_x][_y] == 0:\n if conquer_board[1][_x][_y] == 0:\n agent_board[0][_x][_y] = 1\n agent_board[0][x][y] = 0\n conquer_board[0][_x][_y] = 1\n agent_pos[agent_id][0] = _x\n agent_pos[agent_id][1] = _y\n else:\n conquer_board[1][_x][_y] = 0\n else:\n valid = False\n \n title_scores, treasures_scores, area_scores = self.compute_score(state, old_state)\n if valid:\n if exp:\n aux_score = self.predict_spread_scores(_x, _y, state, act)\n else:\n aux_score = 0\n reward = (title_scores[0] + treasures_scores[0] + area_scores [0] + aux_score\\\n - title_scores[1] - treasures_scores[1] - area_scores[1] - old_score)\n else:\n reward = - 0.5\n \n return valid, state, reward\n \n def soft_step_2(self, agent_id, state, acts, agent_pos, exp = False):\n ''' storage old state to compute changed scored '''\n old_state = dcopy(state)\n old_scores, old_treasures_scores, area_scores = self.compute_score(old_state, old_state)\n old_score = old_scores[0] + area_scores[0] - old_scores[1] - area_scores[1]\n score_board, agent_board, conquer_board, treasure_board, wall_board = state\n \n ''' get next action '''\n x0, y0 = agent_pos[0][agent_id][0], agent_pos[0][agent_id][1] \n x1, y1 = self.next_action(x0, y0, acts[0])\n x2, y2 = agent_pos[1][agent_id][0], agent_pos[1][agent_id][1] \n x3, y3 = self.next_action(x2, y2, acts[1])\n valids = [True, True]\n \n ''' invalid checking '''\n if x1 < 0 or x1 >= self.height or y1 < 0 or y1 >= self.width:\n x1, y1 = x0, y0\n valids[0] = False\n elif wall_board[x1][y1] == 1:\n x1, y1 = x0, y0\n valids[0] = False\n \n if x3 < 0 or x3 >= self.height or y3 < 0 or y3 >= self.width:\n x3, y3 = x2, y2\n valids[1] = False\n elif wall_board[x3][y3] == 1:\n x3, y3 = x2, y2\n valids[1] = False\n \n ''' two actions is invalid'''\n if x1 == x2 and y1 == y2 and x3 == x0 and y3 == y0:\n return state, [0, 0]\n \n ''' conflict to unique square '''\n if x1 == y1 and x3 == y3:\n return state, [0, 0]\n \n ''' go to conquered square '''\n if agent_board[0][x1][y1] == 1 or agent_board[1][x1][y1] == 1:\n x1, y1 = x0, y0\n valids[0] = False\n \n if agent_board[0][x3][y3] == 1 or agent_board[1][x3][y3] == 1:\n x3, y3 = x2, y2\n valids[1] = False\n \n ''' fit actions '''\n if conquer_board[1][x1][y1] == 0:\n agent_board[0][x1][y1] = 1\n agent_board[0][x0][y0] = 0\n conquer_board[0][x1][y1] = 1\n agent_pos[0][agent_id][0] = x1\n agent_pos[0][agent_id][1] = y1\n else:\n conquer_board[1][x1][y1] = 0\n \n if conquer_board[0][x3][y3] == 0:\n agent_board[1][x3][y3] = 1\n agent_board[1][x2][y2] = 0\n conquer_board[1][x3][y3] = 1\n agent_pos[1][agent_id][0] = x3\n agent_pos[1][agent_id][1] = y3\n else:\n conquer_board[1][x1][y1] = 0\n \n title_scores, treasures_scores, area_scores = self.compute_score(state, old_state)\n \n reward = (title_scores[0] + treasures_scores[0] + area_scores[0]\\\n - title_scores[1] - treasures_scores[1] - area_scores[1] - old_score)\n \n rewards = [reward, - reward]\n if not valids[0]:\n rewards[0] -= 1\n if not valids[1]:\n rewards[1] -= 1\n \n return state, rewards\n \n def soft_step_(self, agent_id, state, act, agent_pos):\n score_board, agent_board, conquer_board, treasure_board, wall_board = state\n x, y = agent_pos[agent_id][0], agent_pos[agent_id][1] \n _x, _y = self.next_action(x, y, act)\n if _x >= 0 and _x < self.height and _y >= 0 and _y < self.width and wall_board[_x][_y] == 0:\n if agent_board[0][_x][_y] == 0 and agent_board[1][_x][_y] == 0:\n if conquer_board[1][_x][_y] == 0:\n agent_board[0][_x][_y] = 1\n agent_board[0][x][y] = 0\n conquer_board[0][_x][_y] = 1\n agent_pos[agent_id][0] = _x\n agent_pos[agent_id][1] = _y\n else:\n conquer_board[1][_x][_y] = 0\n \n return state\n \n def get_next_action_pos(self, action_1, action_2):\n new_pos = [[], []]\n is_valid_action = [[True] * self.n_agents, [True] * self.n_agents]\n \n for i in range(self.n_agents):\n x, y = self.agent_pos[0][i][0], self.agent_pos[0][i][1]\n new_pos[0].append(self.next_action(x, y, action_1[i]))\n x, y = self.agent_pos[1][i][0], self.agent_pos[1][i][1]\n new_pos[1].append(self.next_action(x, y, action_2[i]))\n \n for i in range(self.n_agents):\n x, y = new_pos[0][i]\n if (x < 0 or x >= self.height or y < 0 or y >= self.width):\n is_valid_action[0][i] = False\n new_pos[0][i] = dcopy(self.agent_pos[0][i])\n elif self.wall_board[x][y] == 1:\n is_valid_action[0][i] = False\n new_pos[0][i] = dcopy(self.agent_pos[0][i])\n \n for i in range(self.n_agents):\n x, y = new_pos[1][i]\n if (x < 0 or x >= self.height or y < 0 or y >= self.width):\n is_valid_action[1][i] = False\n new_pos[1][i] = dcopy(self.agent_pos[1][i])\n elif self.wall_board[x][y] == 1:\n is_valid_action[1][i] = False\n new_pos[1][i] = dcopy(self.agent_pos[1][i])\n \n \"\"\" create connect matrix \"\"\"\n connected_matrix = []\n for j in range(2 * self.n_agents):\n connected_matrix.append([0] * (2 * self.n_agents))\n \n for i in range(2 * self.n_agents):\n X = new_pos[0][i] if i < self.n_agents else new_pos[1][i - self.n_agents]\n for j in range(2 * self.n_agents):\n if i == j: continue\n Y = self.agent_pos[0][j] if j < self.n_agents \\\n else self.agent_pos[1][j - self.n_agents]\n if X[0] == Y[0] and X[1] == Y[1]:\n connected_matrix[i][j] = 1\n \n \"\"\" handle conflict action, 1 square together\"\"\"\n for i in range(self.n_agents):\n for j in range(self.n_agents):\n if new_pos[0][i][0] == new_pos[1][j][0] and\\\n new_pos[0][i][1] == new_pos[1][j][1]:\n is_valid_action[0][i] = False\n is_valid_action[1][j] = False\n new_pos[0][i] = dcopy(self.agent_pos[0][i])\n new_pos[1][j] = dcopy(self.agent_pos[1][j])\n if i < j and new_pos[0][i][0] == new_pos[0][j][0] and\\\n new_pos[0][i][1] == new_pos[0][j][1]:\n is_valid_action[0][i] = is_valid_action[0][j] = False\n new_pos[0][i] = dcopy(self.agent_pos[0][i])\n new_pos[0][j] = dcopy(self.agent_pos[0][j])\n if i < j and new_pos[1][i][0] == new_pos[1][j][0] and\\\n new_pos[1][i][1] == new_pos[1][j][1]:\n is_valid_action[1][i] = is_valid_action[1][j] = False\n new_pos[1][i] = dcopy(self.agent_pos[1][i])\n new_pos[1][j] = dcopy(self.agent_pos[1][j])\n \n \"\"\" handle the clique \"\"\"\n for i in range(2 * self.n_agents):\n if i < self.n_agents:\n if not is_valid_action[0][i]:\n continue\n elif not is_valid_action[1][i - self.n_agents]:\n continue\n u = i\n stk = [u]\n visited = [False] * (2 * self.n_agents)\n visited[u] = True\n \n for _ in range(2 * self.n_agents):\n for j in range(2 * self.n_agents):\n if connected_matrix[u][j] == 1:\n stk.append(j)\n is_clique = False\n if j < self.n_agents:\n if not is_valid_action[0][j]: is_clique = True\n elif not is_valid_action[1][j - self.n_agents]:\n is_clique = True\n \n if visited[j]:\n is_clique = True\n \n if is_clique:\n for id in stk:\n if id < self.n_agents:\n is_valid_action[0][id] = False\n new_pos[0][id] = dcopy(self.agent_pos[0][id])\n else:\n is_valid_action[1][id - self.n_agents] = False\n new_pos[1][id - self.n_agents] = \\\n dcopy(self.agent_pos[1][id - self.n_agents])\n stk = []\n break\n u = j\n visited[j] = True\n \n \"\"\" handle the conflict remove action \"\"\"\n for i in range(2 * self.n_agents):\n u = i\n stk = []\n visited = [False] * (2 * self.n_agents)\n visited[u] = True\n if i < self.n_agents:\n if not is_valid_action[0][i]:\n continue\n elif not is_valid_action[1][i - self.n_agents]:\n continue\n \n for _ in range(2 * self.n_agents):\n for j in range(2 * self.n_agents):\n if connected_matrix[u][j] == 1:\n congested = False\n if j < self.n_agents:\n x, y = new_pos[0][j]\n if self.conquer_board[1][x][y] == 1 or\\\n not is_valid_action[0][j]:\n congested = True\n else:\n x, y = new_pos[1][j - self.n_agents]\n if self.conquer_board[0][x][y] == 1 or\\\n not is_valid_action[1][j - self.n_agents]:\n congested = True\n \n if visited[j]:\n congested = True\n \n visited[j] = True\n \n if congested:\n for id in stk:\n if id < self.n_agents:\n is_valid_action[0][id] = False\n new_pos[0][id] = dcopy(self.agent_pos[0][id])\n else:\n is_valid_action[1][id - self.n_agents] = False\n new_pos[1][id - self.n_agents] = \\\n dcopy(self.agent_pos[1][id - self.n_agents])\n stk = []\n break\n stk.append(j)\n u = j\n if len(stk) == 0:\n break\n \n return new_pos, is_valid_action\n \n def step(self, action_1, action_2, render = False):\n new_pos, is_valid_action = self.get_next_action_pos(action_1, action_2)\n \n # render before action\n for i in range(self.n_agents):\n if is_valid_action[0][i]:\n x, y = new_pos[0][i]\n if self.conquer_board[1][x][y] == 0:\n if self.agent_pos[0][i][0] != new_pos[0][i][0] \\\n or self.agent_pos[0][i][1] != new_pos[0][i][1]:\n self.agent_board[0][self.agent_pos[0][i][0]][self.agent_pos[0][i][1]] = 0\n self.agent_board[0][x][y] = 0\n if render:\n self.screen.redraw_squares(\n self.agent_pos[0][i][0], self.agent_pos[0][i][1], 0)\n \n if is_valid_action[1][i]:\n x, y = new_pos[1][i]\n if self.conquer_board[0][x][y] == 0 :\n if self.agent_pos[1][i][0] != new_pos[1][i][0] \\\n or self.agent_pos[1][i][1] != new_pos[1][i][1]:\n self.agent_board[1][x][y] = 0\n self.agent_board[1][self.agent_pos[1][i][0]][self.agent_pos[1][i][1]] = 0\n if render:\n self.screen.redraw_squares(\n self.agent_pos[1][i][0], self.agent_pos[1][i][1], 1)\n \n # render after action\n for i in range(self.n_agents):\n for j in range(2):\n if is_valid_action[j][i]:\n x, y = new_pos[j][i]\n if self.conquer_board[1 - j][x][y] == 1:\n self.conquer_board[1 - j][x][y] = 0\n if render:\n self.screen.reset_square([x, y], -1)\n new_pos[j][i] = dcopy(self.agent_pos[j][i])\n else:\n self.conquer_board[j][x][y] = 1\n self.agent_board[j][x][y] = 1\n \n self.compute_score(self.observation, self.old_observation)\n \n if render: self.render() \n for i in range(self.n_agents):\n self.agent_pos[0][i] = [new_pos[0][i][0], new_pos[0][i][1]]\n self.agent_pos[1][i] = [new_pos[1][i][0], new_pos[1][i][1]]\n \n self.observation = self.get_observation(0)\n \n title_scores, treasure_scores, area_scores = \\\n self.compute_score(self.observation, self.old_observation)\n \n if render: self.render()\n for player_ID in range(self.num_players):\n self.players[player_ID].title_score = title_scores[player_ID]\n self.players[player_ID].treasure_score += treasure_scores[player_ID]\n self.players[player_ID].area_score = area_scores[player_ID]\n \n self.old_observation = dcopy(self.observation)\n if render:\n for player_id in range(self.num_players):\n for agent_ID in range(self.n_agents):\n coord = self.agent_pos[player_id][agent_ID]\n self.screen.reset_square(coord, player_id, agent_ID)\n self.screen.show_score()\n \n if render: self.render()\n \n reward = (self.players[0].total_score - self.players[1].total_score - \\\n self.players[0].old_score + self.players[1].old_score)\n for player_ID in range(self.num_players):\n self.players[player_ID].old_score = self.players[player_ID].total_score\n # self.players[player_ID].show_scores()\n \n self.remaining_turns -= 1\n terminate = (self.remaining_turns == 0)\n # if terminate:\n # reward = 1 if self.players[0].total_score > self.players[1].total_score else -1\n # else:\n # reward = 0.8 if reward > 0 else -0.8\n \n return [self.observation, reward, terminate, self.remaining_turns]\n\n def next_state(self, state, action, agent_pos, player_ID, agent_ID):\n state = dcopy(state)\n if agent_ID == self.n_agents - 1:\n player_ID = 1 - player_ID\n agent_ID = 0\n else:\n agent_ID += 1\n \n if player_ID == 1:\n temp = dcopy(state[1][0])\n state[1][0] = dcopy(state[1][1])\n state[1][1] = temp\n temp = dcopy(state[2][0])\n state[2][0] = dcopy(state[2][1])\n state[2][1] = temp\n \n score_board, agent_board, conquer_board, treasure_board, wall_board = state\n x, y = agent_pos[player_ID][agent_ID]\n new_pos = (self.next_action(x, y, action))\n _x, _y = new_pos\n if _x >= 0 and _x < self.height and _y >= 0 and _y < self.width and wall_board[_x][_y] == 0:\n if agent_board[0][_x][_y] == 0 and agent_board[1][_x][_y] == 0:\n if conquer_board[1][_x][_y] == 0:\n agent_board[0][_x][_y] = 1\n agent_board[0][x][y] = 0\n conquer_board[0][_x][_y] = 1\n treasure_board[_x][_y] = 0\n agent_pos[player_ID][agent_ID] = [_x, _y]\n else:\n conquer_board[1][_x][_y] = 0\n state = [score_board, agent_board, conquer_board, treasure_board, wall_board]\n return state, agent_pos, player_ID, agent_ID\n \n def get_return(self, state, old_state, player_ID):\n return 1 if self.get_score(state, old_state, player_ID) >= 0 else -1\n \n def is_done_state(self, state, depth):\n return depth >= 2 * (1 + self.n_turns) * self.n_agents\n ","sub_path":"src/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":36667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"155781833","text":"import random\r\nfrom tkinter import *\r\nfrom math import *\r\nclass Hexagon:\r\n def __init__(self, row, col, canvas):\r\n self.row = row\r\n self.col = col\r\n self.points = [None, None, None, None, None, None] #NE, E, SE, SW, W, NW\r\n self.type = random.choice(Hexagon.list_of_areas)\r\n Hexagon.list_of_areas.remove(self.type)\r\n if self.type != 'desert':\r\n self.dice_throw = random.choice(Hexagon.list_of_dice_throw)\r\n Hexagon.list_of_dice_throw.remove(self.dice_throw)\r\n else:\r\n self.dice_throw = None\r\n Hexagon.thieves = self\r\n Hexagon.canvas = canvas\r\n ##############################################\r\n h = 2 * a * sin(radians(60))\r\n y = 2 * a + self.row * 0.5 * h\r\n x = 2 * a + self.col * 1.5 * a\r\n self.dice_throw_text = canvas.create_text(x - 0.5 * a, y - 0.5 * h, text=self.dice_throw, fill='black')\r\n self.canvas_hexagon = canvas.create_polygon(x - a, y, x, y, x + 0.5 * a, y - 0.5 * h, x, y - h, x - a, y - h, x - 1.5 * a,\r\n y - 0.5 * h, fill=Hexagon.dic_of_type_to_color[self.type], outline='black')\r\n canvas.tag_lower(self.canvas_hexagon )\r\n canvas.tag_bind(self.canvas_hexagon, '', self.click_on_hexagon)\r\n ##############################################\r\n thieves = None\r\n dic_of_type_to_color = {'tree':'green', 'wheat':'wheat1', 'sheep':'white', 'clay':'IndianRed3', 'stone':'grey', 'desert':'black'}\r\n list_of_dice_throw = [6,5,9,4,3,8,10,6,5,9,12,3,2,10,11,11,4,8]\r\n list_of_areas = ['tree', 'tree', 'tree', 'tree', 'wheat', 'wheat', 'wheat', 'wheat', 'sheep', 'sheep', 'sheep', 'sheep', 'clay', 'clay', 'clay', 'stone', 'stone', 'stone', 'desert']\r\n board = []\r\n\r\n def click_on_hexagon(self, event):\r\n if Player.move_thieves == True:\r\n Player.move_thieves = False\r\n if Hexagon.thieves.type == 'desert':\r\n pass\r\n else:\r\n Hexagon.canvas.itemconfig(Hexagon.thieves.dice_throw_text, fill='black')\r\n Hexagon.thieves = self\r\n Hexagon.canvas.itemconfig(Hexagon.thieves.dice_throw_text, fill='red')\r\n\r\n def create_board_of_hexagon(self, hexagon_canvas):\r\n col_to_row = {0: 3, 1: 4, 2: 5, 3: 4, 4: 3}\r\n col_to_start_index = {0: 2, 1: 1, 2: 0, 3: 1, 4: 2}\r\n for col in range(0, 5):\r\n temp_row = []\r\n index = col_to_start_index[col]\r\n for row in range(0, col_to_row[col]):\r\n temp_row.append(Hexagon(2*row + col_to_start_index[col], col, hexagon_canvas))\r\n index += 2\r\n Hexagon.board.append(temp_row)\r\n\r\n def print_hexagon_board(self):\r\n for row in Hexagon.board:\r\n for hexagon in row:\r\n print (hexagon.dice_throw, end=' ')\r\n print ('\\n')\r\n\r\n def check_and_append (self, point, hexagon):\r\n if hexagon not in point.hexagons:\r\n point.hexagons.append(hexagon)\r\n\r\n def check_close_hexagons_and_add_points(self):\r\n #DOWN\r\n for row in Hexagon.board:\r\n for hexagon in row:\r\n if self.col == hexagon.col and hexagon.row - self.row == 2:\r\n hexagon.points[5] = self.points[3]\r\n hexagon.points[0] = self.points[2]\r\n Hexagon.check_and_append(self, self.points[2], hexagon)\r\n Hexagon.check_and_append(self, self.points[3], hexagon)\r\n elif hexagon.col - self.col == 1 and hexagon.row - self.row == 1:\r\n hexagon.points[5] = self.points[1]\r\n hexagon.points[4] = self.points[2]\r\n Hexagon.check_and_append(self, self.points[1], hexagon)\r\n Hexagon.check_and_append(self, self.points[2], hexagon)\r\n elif hexagon.col - self.col == 1 and self.row - hexagon.row == 1:\r\n hexagon.points[4] = self.points[0]\r\n hexagon.points[3] = self.points[1]\r\n Hexagon.check_and_append(self, self.points[0], hexagon)\r\n Hexagon.check_and_append(self, self.points[1], hexagon)\r\n\r\n def set_points_to_hexagons(self, canvas):\r\n for row in Hexagon.board:\r\n for hexagon in row:\r\n for index in range (0,6):\r\n if hexagon.points[index] == None:\r\n hexagon.points[index] = Point(canvas, hexagon.row, hexagon.col, index)\r\n hexagon.points[index].hexagons.append(hexagon)\r\n hexagon.check_close_hexagons_and_add_points()\r\n\r\n def set_all_roads(self, canvas):\r\n for row in Hexagon.board:\r\n for hexagon in row:\r\n for point_index in range(0, 6):\r\n if point_index == 5:\r\n Point.set_road_between_two_points(Point, canvas, hexagon.points[5], hexagon.points[0], point_index)\r\n else:\r\n Point.set_road_between_two_points(Point, canvas, hexagon.points[point_index], hexagon.points[point_index + 1], point_index)\r\n\r\nclass Point:\r\n radius = 5\r\n h = 0\r\n canvas = None\r\n def __init__(self, canvas, row, col, index):\r\n self.hexagons = []\r\n self.roads = []\r\n self.player = None\r\n self.building = None\r\n self.can_build_on = True\r\n Point.point_list.append(self)\r\n self.id = Point.point_id\r\n Point.point_id += 1\r\n self.upgraded = False\r\n Point.canvas = canvas\r\n ##############################################\r\n h = 2 * a * sin(radians(60))\r\n Point.h = h\r\n self.x = 2 * a + col * 1.5 * a\r\n self.y = 2 * a + row * 0.5 * h\r\n if index == 0:\r\n self.x = self.x\r\n self.y = self.y\r\n self.x1 = self.x - Point.radius\r\n self.y1 = self.y - h - Point.radius\r\n self.x2 = self.x + Point.radius\r\n self.y2 = self.y - h + Point.radius\r\n self.point_on_canvas = canvas.create_oval(self.x1, self.y1, self.x2, self.y2, fill='white', outline = 'black', width = 1)\r\n elif index == 1:\r\n self.x = self.x + 0.5*a\r\n self.y = self.y +0.5*h\r\n self.x1 = self.x - Point.radius\r\n self.y1 = self.y - h - Point.radius\r\n self.x2 = self.x + Point.radius\r\n self.y2 = self.y - h + Point.radius\r\n self.point_on_canvas = canvas.create_oval(self.x1 , self.y1 , self.x2 , self.y2 , fill='white', outline = 'black', width = 1)\r\n elif index == 2:\r\n self.x = self.x\r\n self.y = self.y + h\r\n self.x1 = self.x - Point.radius\r\n self.y1 = self.y - h - Point.radius\r\n self.x2 = self.x + Point.radius\r\n self.y2 = self.y - h + Point.radius\r\n self.point_on_canvas = canvas.create_oval(self.x1, self.y1, self.x2, self.y2, fill='white', outline = 'black', width = 1)\r\n elif index == 3:\r\n self.x = self.x - a\r\n self.y = self.y + h\r\n self.x1 = self.x - Point.radius\r\n self.y1 = self.y - h - Point.radius\r\n self.x2 = self.x + Point.radius\r\n self.y2 = self.y - h + Point.radius\r\n self.point_on_canvas = canvas.create_oval(self.x1 , self.y1 , self.x2 , self.y2 , fill='white', outline = 'black', width = 1)\r\n elif index == 4:\r\n self.x = self.x - 1.5*a\r\n self.y = self.y + 0.5*h\r\n self.x1 = self.x - Point.radius\r\n self.y1 = self.y - h - Point.radius\r\n self.x2 = self.x + Point.radius\r\n self.y2 = self.y - h + Point.radius\r\n self.point_on_canvas = canvas.create_oval(self.x1, self.y1, self.x2 , self.y2 , fill='white', outline = 'black', width = 1)\r\n elif index == 5:\r\n self.x = self.x - a\r\n self.y = self.y\r\n self.x1 = self.x - Point.radius\r\n self.y1 = self.y - h - Point.radius\r\n self.x2 = self.x + Point.radius\r\n self.y2 = self.y - h + Point.radius\r\n self.point_on_canvas = canvas.create_oval(self.x1, self.y1, self.x2, self.y2, fill='white', outline = 'black' , width = 1)\r\n canvas.tag_bind(self.point_on_canvas,'', self.click_point_function)\r\n #############################################\r\n def click_point_function(self, event):\r\n Player.alter_chosen_item(Player, self)\r\n build_button_change_according_to_chosen_item()\r\n\r\n\r\n point_id = 0\r\n point_list = []\r\n\r\n def set_road_between_two_points (self,canvas, point_a, point_b, point_index):\r\n if [point_a, point_b] not in Road.connection_list and [point_b, point_a] not in Road.connection_list:\r\n current_road = Road(canvas, point_a, point_b, point_index)\r\n point_a.roads.append(current_road)\r\n point_b.roads.append(current_road)\r\n Road.roads_list.append(current_road)\r\n Road.connection_list.append([point_a, point_b])\r\n\r\n def raise_tag_for_all_poins(self, canvas):\r\n for point in Point.point_list:\r\n canvas.tag_raise(point.point_on_canvas)\r\n\r\n def disable_close_points(self):\r\n for road in self.roads:\r\n if road.point_a == self:\r\n road.point_b.can_build_on = False\r\n else:\r\n road.point_a.can_build_on = False\r\n\r\n def check_if_continued_road(self):\r\n if self.player in (Player.current_player, None):\r\n for road in self.roads:\r\n if road in Player.current_player.roads_list:\r\n return True\r\n return False\r\n\r\nclass Road:\r\n roads_list = []\r\n connection_list = []\r\n player = None\r\n canvas = None\r\n def __init__(self, canvas, point_a, point_b, point_index):\r\n self.point_a = point_a\r\n self.point_b = point_b\r\n self.player = None\r\n self.road_active = False\r\n self.canvas_line = canvas.create_line(self.point_a.x, self.point_a.y - Point.h, self.point_b.x, self.point_b.y - Point.h, fill='black', width=2)\r\n canvas.tag_bind(self.canvas_line, '', self.click_road_function)\r\n Road.canvas = canvas\r\n\r\n def click_road_function(self, event):\r\n Player.alter_chosen_item(Player, self)\r\n build_button_change_according_to_chosen_item()\r\n\r\nclass Player:\r\n master = None\r\n chosen_item = None\r\n current_player = None\r\n player_with_most_roads = None\r\n player_with_most_knights = None\r\n player_list = []\r\n id = 0\r\n colors = {0:'blue', 1:'yellow3', 2:'red', 3:'purple'}\r\n sum_dice_throw = None\r\n move_thieves = False\r\n is_first_turn = False###############################################################################################\r\n is_second_turn = False\r\n def __init__(self, root):\r\n self.id = Player.id\r\n Player.id += 1\r\n self.color = Player.colors[self.id]\r\n self.points = 0\r\n self.wheat = 99\r\n self.sheep = 99\r\n self.stone = 99\r\n self.clay = 99\r\n self.tree = 99\r\n self.knights = 0\r\n self.roads = 0\r\n self.roads_list = []\r\n self.cards = []\r\n self.did_throw_dice_yet = False\r\n Player.player_list.append(self)\r\n Player.master = root\r\n\r\n def check_if_winner(self):\r\n if self.points >= 10:\r\n Player.master.quit()\r\n print ('Player', self.id, 'Won')\r\n\r\n def alter_chosen_item(self, item):\r\n if Player.chosen_item == None:\r\n pass\r\n elif Player.chosen_item.__class__.__name__ == 'Point':\r\n Point.canvas.itemconfig(Player.chosen_item.point_on_canvas, outline='black', width=1)\r\n elif Player.chosen_item.__class__.__name__ == 'Road':\r\n Road.canvas.itemconfig(Player.chosen_item.canvas_line, width=2)\r\n Player.chosen_item = item\r\n if Player.chosen_item == None:\r\n pass\r\n elif Player.chosen_item.__class__.__name__ == 'Point':\r\n Point.canvas.itemconfig(Player.chosen_item.point_on_canvas, outline='black', width=4)\r\n elif Player.chosen_item.__class__.__name__ == 'Road':\r\n Road.canvas.itemconfig(Player.chosen_item.canvas_line, width=4)\r\n\r\n def update_player_stats(self):\r\n player_label_text.set(self.id)\r\n points_label_text.set(self.points)\r\n stone_label_text.set(self.tree)\r\n tree_label_text.set(self.tree)\r\n clay_label_text.set(self.clay)\r\n wheat_label_text.set(self.wheat)\r\n sheep_label_text.set(self.sheep)\r\n knight_label_text.set(self.knights)\r\n road_label_text.set(self.roads)\r\n Player.current_player.check_if_winner()\r\n\r\n def check_if_player_can_build_settelment(self):\r\n if self.clay >= 1 and self.wheat >= 1 and self.sheep >= 1 and self.tree >= 1:\r\n self.clay -= 1\r\n self.wheat -= 1\r\n self.sheep -= 1\r\n self.tree -= 1\r\n return True\r\n\r\n def check_if_player_can_build_road(self):\r\n if self.clay >= 1 and self.tree >= 1:\r\n self.clay -= 1\r\n self.tree -= 1\r\n return True\r\n\r\n def check_if_player_can_upgrade(self):\r\n if self.stone >= 3 and self.wheat >= 2:\r\n self.stone -= 3\r\n self.wheat -= 2\r\n return True\r\n\r\n def check_who_has_longest_road(self):\r\n for player in Player.player_list:\r\n if player.roads < 5 and Player.player_with_most_roads == None:\r\n pass\r\n\r\n elif player.roads >= 5 and Player.player_with_most_roads == None:\r\n Player.player_with_most_roads = player\r\n player.points += 2\r\n Player.player_with_most_roads.update_player_stats()\r\n\r\n elif player.roads > Player.player_with_most_roads.roads:\r\n Player.player_with_most_roads.points -= 2\r\n Player.player_with_most_roads.update_player_stats()\r\n Player.player_with_most_roads = player\r\n player.points += 2\r\n Player.player_with_most_roads.update_player_stats()\r\n\r\nclass OpeningScreen:\r\n OS = None\r\n number_of_players = 4\r\n def __init__(self, open_screen):\r\n open_screen.title = 'Catan'\r\n self.open_screen_label = Label(open_screen, text='Number of Players(3 or 4): ')\r\n self.open_screen_entry = Entry(open_screen)\r\n self.open_screen_button = Button(open_screen, text='Enter')\r\n OpeningScreen.OS = open_screen\r\n self.open_screen_button.bind('', self.open_screen_get_number_of_players)\r\n self.open_screen_label.pack(side='left')\r\n self.open_screen_entry.pack(side='left')\r\n self.open_screen_button.pack(side='left')\r\n\r\n def open_screen_get_number_of_players(self, event):\r\n input = self.open_screen_entry.get()\r\n print (input)\r\n if input == '3':\r\n OpeningScreen.number_of_players = 3\r\n OpeningScreen.OS.quit()\r\n elif input == '4':\r\n OpeningScreen.number_of_players = 4\r\n OpeningScreen.OS.quit()\r\n\r\ndef build_button_change_according_to_chosen_item():\r\n type = Player.chosen_item.__class__.__name__\r\n if type == 'Road':\r\n build_button_text.set('Build Road')\r\n elif type == 'Point':\r\n if Player.chosen_item.player == None:\r\n build_button_text.set('Build City')\r\n elif Player.chosen_item.upgraded == False:\r\n build_button_text.set('Upgrade City')\r\n else:\r\n build_button_text.set('City Upgraded')\r\n else:\r\n build_button_text.set('No Item Chosen')\r\n\r\ndef throw_dice(event):\r\n if Player.is_first_turn == False and Player.is_second_turn == False:\r\n if Player.current_player.did_throw_dice_yet == False:\r\n possible_dice = [1,2,3,4,5,6]\r\n first_dice = random.choice(possible_dice)\r\n second_dice = random.choice(possible_dice)\r\n dice1_string.set(first_dice)\r\n dice2_string.set(second_dice)\r\n Player.sum_dice_throw = first_dice + second_dice\r\n if Player.sum_dice_throw == 7:\r\n Player.move_thieves = True\r\n for row in Hexagon.board:\r\n for hexagon in row:\r\n if hexagon.dice_throw == Player.sum_dice_throw:\r\n for point in hexagon.points:\r\n if point.player != None:\r\n if hexagon.type == 'wheat' and Hexagon.thieves != hexagon:\r\n point.player.wheat += 1\r\n elif hexagon.type == 'clay' and Hexagon.thieves != hexagon:\r\n point.player.clay += 1\r\n elif hexagon.type == 'stone' and Hexagon.thieves != hexagon:\r\n point.player.stone += 1\r\n elif hexagon.type == 'sheep' and Hexagon.thieves != hexagon:\r\n point.player.sheep += 1\r\n elif hexagon.type == 'tree' and Hexagon.thieves != hexagon:\r\n point.player.tree += 1\r\n Player.current_player.update_player_stats()\r\n Player.current_player.did_throw_dice_yet = True\r\n\r\ndef check_if_player_can_build(event):\r\n if Player.chosen_item.__class__.__name__ == 'Point':\r\n if Player.chosen_item.player == None and Player.chosen_item.can_build_on == True:\r\n if Player.is_first_turn == True or Player.is_second_turn == True:\r\n Player.chosen_item.player = Player.current_player\r\n Player.chosen_item.canvas.itemconfig(Player.chosen_item.point_on_canvas, fill = Player.colors[Player.current_player.id])\r\n Player.chosen_item.disable_close_points()\r\n Player.current_player.points += 1\r\n Player.current_player.update_player_stats()\r\n\r\n elif Player.current_player.check_if_player_can_build_settelment() == True:\r\n Player.chosen_item.player = Player.current_player\r\n Player.chosen_item.canvas.itemconfig(Player.chosen_item.point_on_canvas, fill=Player.colors[Player.current_player.id])\r\n Player.chosen_item.disable_close_points()\r\n Player.current_player.points += 1\r\n Player.current_player.update_player_stats()\r\n\r\n elif Player.current_player.upgraded == False and Player.current_player.check_if_player_can_upgrade() == True:\r\n Player.chosen_item.upgraded = True\r\n Player.current_player.points += 1\r\n Player.current_player.update_player_stats()\r\n\r\n elif Player.chosen_item.__class__.__name__ == 'Road':\r\n if Player.chosen_item.player == None:\r\n\r\n #For the last player to have a double first turn:\r\n if Player.is_first_turn == True and Player.current_player.points == 1 and (Player.chosen_item.point_a.player == Player.current_player or Player.chosen_item.point_b.player == Player.current_player):\r\n Player.chosen_item.player = Player.current_player\r\n Player.chosen_item.canvas.itemconfig(Player.chosen_item.canvas_line, fill=Player.colors[Player.current_player.id])\r\n Player.current_player.roads += 1\r\n Player.current_player.roads_list.append(Player.chosen_item)\r\n Player.check_who_has_longest_road(Player)\r\n Player.current_player.update_player_stats()\r\n if Player.current_player == Player.player_list[-1]:\r\n Player.is_second_turn = True\r\n end_turn_function_without_event()\r\n\r\n elif Player.is_second_turn == True and Player.current_player.points == 2 and (Player.chosen_item.point_a.player == Player.current_player or Player.chosen_item.point_b.player == Player.current_player):\r\n Player.chosen_item.player = Player.current_player\r\n Player.chosen_item.canvas.itemconfig(Player.chosen_item.canvas_line, fill=Player.colors[Player.current_player.id])\r\n Player.current_player.roads += 1\r\n Player.current_player.roads_list.append(Player.chosen_item)\r\n Player.check_who_has_longest_road(Player)\r\n Player.current_player.update_player_stats()\r\n if Player.current_player.id == 0:\r\n Player.is_second_turn = False\r\n end_turn_function_without_event()\r\n\r\n #Normal Turn:\r\n elif Player.is_first_turn == False and Player.is_second_turn == False:\r\n if Player.chosen_item.point_a.player == Player.current_player or Player.chosen_item.point_b.player == Player.current_player:\r\n if Player.current_player.check_if_player_can_build_road() == True:\r\n Player.chosen_item.player = Player.current_player\r\n Player.chosen_item.canvas.itemconfig(Player.chosen_item.canvas_line, fill=Player.colors[Player.current_player.id])\r\n Player.current_player.roads += 1\r\n Player.current_player.roads_list.append(Player.chosen_item)\r\n Player.check_who_has_longest_road(Player)\r\n Player.current_player.update_player_stats()\r\n\r\n elif Player.chosen_item.point_a.check_if_continued_road()== True and Player.is_second_turn == False :\r\n if Player.current_player.check_if_player_can_build_road() == True:\r\n Player.chosen_item.player = Player.current_player\r\n Player.chosen_item.canvas.itemconfig(Player.chosen_item.canvas_line,fill=Player.colors[Player.current_player.id])\r\n Player.current_player.roads += 1\r\n Player.current_player.roads_list.append(Player.chosen_item)\r\n Player.check_who_has_longest_road(Player)\r\n Player.current_player.update_player_stats()\r\n\r\n elif Player.chosen_item.point_b.check_if_continued_road() == True and Player.is_second_turn == False:\r\n if Player.current_player.check_if_player_can_build_road() == True:\r\n Player.chosen_item.player = Player.current_player\r\n Player.chosen_item.canvas.itemconfig(Player.chosen_item.canvas_line,fill=Player.colors[Player.current_player.id])\r\n Player.current_player.roads += 1\r\n Player.current_player.roads_list.append(Player.chosen_item)\r\n Player.check_who_has_longest_road(Player)\r\n Player.current_player.update_player_stats()\r\n\r\n\r\ndef end_turn_function(event):\r\n if Player.is_first_turn == False and Player.is_second_turn == False:\r\n Player.current_player.did_throw_dice_yet = False\r\n Player.move_thieves = False\r\n if Player.current_player.id == len(Player.player_list) - 1:\r\n Player.current_player = Player.player_list[0]\r\n else:\r\n Player.current_player = Player.player_list[Player.current_player.id + 1]\r\n Player.current_player.update_player_stats()\r\n dice1_string.set(0)\r\n dice2_string.set(0)\r\n\r\n if Player.chosen_item == None:\r\n pass\r\n elif Player.chosen_item.__class__.__name__ == 'Point':\r\n Point.canvas.itemconfig(Player.chosen_item.point_on_canvas, outline='black', width=1)\r\n elif Player.chosen_item.__class__.__name__ == 'Road':\r\n Road.canvas.itemconfig(Player.chosen_item.canvas_line, width=2)\r\n Player.chosen_item = None\r\n Player.sum_dice_throw = None\r\n Player.current_player.check_if_winner()\r\n\r\ndef end_turn_function_without_event():\r\n Player.current_player.did_throw_dice_yet = False\r\n Player.move_thieves = False\r\n if Player.is_first_turn == True and Player.is_second_turn == False:\r\n if Player.current_player.id == len(Player.player_list) - 1:\r\n Player.current_player = Player.player_list[0]\r\n else:\r\n Player.current_player = Player.player_list[Player.current_player.id + 1]\r\n\r\n elif Player.is_first_turn == True and Player.is_second_turn == True: #Another Round for the last player\r\n Player.is_first_turn = False\r\n\r\n else: #if second turn\r\n if Player.current_player.id != 0:\r\n Player.current_player = Player.player_list[Player.current_player.id - 1]\r\n else:\r\n Player.current_player = Player.player_list[0]\r\n\r\n Player.current_player.update_player_stats()\r\n dice1_string.set(0)\r\n dice2_string.set(0)\r\n\r\n if Player.chosen_item == None:\r\n pass\r\n elif Player.chosen_item.__class__.__name__ == 'Point':\r\n Point.canvas.itemconfig(Player.chosen_item.point_on_canvas, outline='black', width=1)\r\n elif Player.chosen_item.__class__.__name__ == 'Road':\r\n Road.canvas.itemconfig(Player.chosen_item.canvas_line, width=2)\r\n Player.chosen_item = None\r\n Player.sum_dice_throw = None\r\n Player.current_player.check_if_winner()\r\n##########################################################\r\n##########################################################\r\n\"\"\"open_screen = Tk()\r\nOpeningScreen(open_screen)\r\nopen_screen.mainloop()\r\nopen_screen.destroy()\"\"\"\r\n##########################################################\r\n##########################################################\r\nroot = Tk()\r\nfor i in range (0,OpeningScreen.number_of_players):######################\r\n Player(root)\r\n\r\nPlayer.current_player = Player.player_list[0]\r\na = 50\r\nhexagon_canvas = Canvas(root, width=a*10, height=a*10)\r\n###############################################################\r\nHexagon.create_board_of_hexagon(Hexagon, hexagon_canvas)\r\nHexagon.set_points_to_hexagons(Hexagon, hexagon_canvas)\r\nHexagon.set_all_roads(Hexagon, hexagon_canvas)\r\nPoint.raise_tag_for_all_poins(Point, hexagon_canvas)\r\nbutton_frame = Frame(root)\r\n###############################################################\r\nthrow_dice_button = Button(button_frame, text='Throw Dice')\r\nthrow_dice_button.bind('', throw_dice)\r\nthrow_dice_button.grid(row = 0, column = 0)\r\n\r\ndice1_string = IntVar()\r\ndice2_string = IntVar()\r\ndice1_string.set(0)\r\ndice2_string.set(0)\r\ndice1 = Label(button_frame, textvariable = dice1_string)\r\ndice2 = Label(button_frame, textvariable = dice2_string)\r\ndice1.grid(row = 1, column = 0)\r\ndice2.grid(row = 1, column = 1)\r\n\r\nbuild_button_text = StringVar()\r\nbuild_button_index = 0\r\nbuild_button_text.set('Build City')\r\nbuild_button = Button(button_frame , textvariable=build_button_text)\r\nbuild_button.grid(row = 2, column = 0)\r\nbuild_button.bind('', check_if_player_can_build)\r\n\r\nend_turn_button = Button(button_frame , text = 'End Turn')\r\nend_turn_button.bind ('', end_turn_function)\r\nend_turn_button.grid(row = 3, column = 0)\r\n\r\ndraw_card_button = Button(button_frame, text = 'Draw Card')\r\n#draw_card_button.grid(row = 4, column = 0)\r\n\r\nplayer_frame = Frame(root)\r\nplayer_label_text = StringVar()\r\npoints_label_text = StringVar()\r\ntree_label_text = StringVar()\r\nclay_label_text = StringVar()\r\nwheat_label_text = StringVar()\r\nstone_label_text = StringVar()\r\nsheep_label_text = StringVar()\r\nknight_label_text = StringVar()\r\nroad_label_text = StringVar()\r\nplayer_label_text.set(Player.current_player.id)\r\npoints_label_text.set(Player.current_player.points)\r\ntree_label_text.set(Player.current_player.tree)\r\nclay_label_text.set(Player.current_player.clay)\r\nwheat_label_text.set(Player.current_player.wheat)\r\nstone_label_text.set(Player.current_player.stone)\r\nsheep_label_text.set(Player.current_player.sheep)\r\nknight_label_text.set(Player.current_player.knights)\r\nroad_label_text.set(Player.current_player.roads)\r\nvariable_player_label = Label (player_frame, textvariabl = player_label_text)\r\nvariable_points_label = Label (player_frame, textvariabl = points_label_text)\r\nvariable_tree_label = Label (player_frame, textvariabl = tree_label_text)\r\nvariable_clay_label = Label (player_frame, textvariabl = clay_label_text)\r\nvariable_wheat_label = Label (player_frame, textvariabl = wheat_label_text)\r\nvariable_stone_label = Label (player_frame, textvariabl = stone_label_text)\r\nvariable_sheep_label = Label (player_frame, textvariabl = sheep_label_text)\r\nvariable_knight_label = Label (player_frame, textvariabl = knight_label_text)\r\nvariable_road_label = Label (player_frame, textvariabl = road_label_text)\r\nplayer_label = Label (player_frame, text='Player: ')\r\npoints_label = Label (player_frame, text='Points: ')\r\ntree_label = Label (player_frame, text='Tree: ')\r\nclay_label = Label (player_frame, text='Clay: ')\r\nwheat_label = Label (player_frame, text='Wheat: ')\r\nstone_label = Label (player_frame, text='Stone: ')\r\nsheep_label = Label (player_frame, text='Sheep: ')\r\nknight_label = Label (player_frame, text='Knights: ')\r\nroad_label = Label (player_frame, text='Roads: ')\r\n\r\nplayer_label.grid(row=0, column=0)\r\npoints_label .grid(row=1, column=0)\r\ntree_label.grid(row=2, column=0)\r\nclay_label.grid(row=3, column=0)\r\nwheat_label.grid(row=4, column=0)\r\nstone_label.grid(row=5, column=0)\r\nsheep_label.grid(row=6, column=0)\r\nknight_label.grid(row=7, column=0)\r\nroad_label.grid(row=8, column=0)\r\nvariable_player_label.grid(row=0, column=1)\r\nvariable_points_label.grid(row=1, column=1)\r\nvariable_tree_label.grid(row=2, column=1)\r\nvariable_clay_label.grid(row=3, column=1)\r\nvariable_wheat_label.grid(row=4, column=1)\r\nvariable_stone_label.grid(row=5, column=1)\r\nvariable_sheep_label.grid(row=6, column=1)\r\nvariable_knight_label.grid(row=7, column=1)\r\nvariable_road_label.grid(row=8, column=1)\r\n\r\nbutton_frame.grid(row = 0, column = 2)\r\nplayer_frame.grid(row = 0, column = 1)\r\nhexagon_canvas.grid(row = 0, column = 0)\r\nroot.mainloop()","sub_path":"Cattan.py","file_name":"Cattan.py","file_ext":"py","file_size_in_byte":29945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"153418967","text":"# -*- coding: utf-8 -*-\nfrom OFS.event import ObjectWillBeRemovedEvent\nfrom plone.app.multilingual.browser.utils import is_language_independent\nfrom plone.app.multilingual.browser.utils import multilingualMoveObject\nfrom plone.app.multilingual.interfaces import ITranslationManager\nfrom plone.app.multilingual.testing import PAM_FUNCTIONAL_TESTING\nfrom plone.dexterity.utils import createContentInContainer\nfrom plone.uuid.interfaces import IUUID\nfrom zope.event import notify\nimport unittest2 as unittest\nfrom plone.app.multilingual.interfaces import IPloneAppMultilingualInstalled\nfrom zope.interface import alsoProvides\n\n\nclass TestLanguageRootFolder(unittest.TestCase):\n\n layer = PAM_FUNCTIONAL_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n alsoProvides(self.layer['request'], IPloneAppMultilingualInstalled)\n\n def test_shared_content(self):\n # Create shared document\n createContentInContainer(\n self.portal.en.media, 'Document', title=u\"Test document\")\n\n # Check shared document is there\n self.assertEqual(self.portal.en.media['test-document'],\n self.portal.ca.media['test-document'])\n self.assertEqual(self.portal.en.media['test-document'],\n self.portal.es.media['test-document'])\n\n # Delete shared document\n notify(ObjectWillBeRemovedEvent(self.portal.en.media['test-document']))\n self.portal.en.media.manage_delObjects('test-document')\n\n # Check that it is not available in LRFs\n self.assertNotIn('test-document', self.portal.ca.media.objectIds())\n self.assertNotIn('test-document', self.portal.es.media.objectIds())\n\n def test_shared_content_indexing(self):\n # Create shared document\n createContentInContainer(\n self.portal.en.media, 'Document', title=u\"Test document\")\n\n # Check that shared document is indexed in all LRFs\n elements = self.portal.portal_catalog.searchResults(id='test-document')\n self.assertEqual(len(elements), 3)\n\n # Remove shared document\n notify(ObjectWillBeRemovedEvent(self.portal.en.media['test-document']))\n self.portal.en.media.manage_delObjects('test-document')\n\n # Check that shared document is unindexed\n elements = self.portal.portal_catalog.searchResults(id='test-document')\n self.assertEqual(len(elements), 0)\n\n def test_shared_content_uuid(self):\n # Create shared document\n createContentInContainer(\n self.portal, 'LIF', title=u\"Media\", checkConstraints=False)\n createContentInContainer(\n self.portal.media, 'Document', title=u\"Test document\")\n\n root_uuid = IUUID(self.portal.media['test-document'])\n shared_uuid = IUUID(self.portal.ca.media['test-document'])\n\n self.assertEqual('{0:s}-ca'.format(root_uuid), shared_uuid)\n\n def test_moving_shared_content_to_lrf(self):\n # Create shared document\n createContentInContainer(\n self.portal, 'LIF', title=u\"Media\", checkConstraints=False)\n createContentInContainer(\n self.portal.media, 'Document', title=u\"Test document\")\n uuid = IUUID(self.portal.media['test-document'])\n\n # Check that ghost is ghost\n self.assertTrue(\n is_language_independent(self.portal.ca.media['test-document']))\n\n # Check is in the catalog\n brains = self.portal.portal_catalog.searchResults(UID=uuid)\n self.assertEqual(len(brains), 1)\n self.assertEqual(brains[0].getPath(), '/plone/media/test-document')\n\n brains = self.portal.portal_catalog.searchResults(\n UID='{0:s}-ca'.format(uuid))\n self.assertEqual(len(brains), 1)\n self.assertEqual(brains[0].getPath(), '/plone/ca/media/test-document')\n\n brains = self.portal.portal_catalog.searchResults(\n UID='{0:s}-es'.format(uuid))\n self.assertEqual(len(brains), 1)\n self.assertEqual(brains[0].getPath(), '/plone/es/media/test-document')\n\n # MOVE!\n moved = multilingualMoveObject(\n self.portal.ca.media['test-document'], 'ca')\n\n # Check that the old and the new uuid are the same\n moved_uuid = IUUID(self.portal.ca['test-document'])\n\n self.assertEqual(uuid, moved_uuid)\n self.assertFalse(is_language_independent(moved))\n\n # Check portal_catalog is updated after move\n brains = self.portal.portal_catalog.searchResults(UID=uuid)\n self.assertEqual(len(brains), 1)\n self.assertEqual(brains[0].getPath(),\n '/plone/ca/test-document')\n\n brains = self.portal.portal_catalog.searchResults(\n UID='{0:s}-ca'.format(uuid))\n self.assertEqual(len(brains), 0)\n\n brains = self.portal.portal_catalog.searchResults(\n UID='{0:s}-es'.format(uuid))\n self.assertEqual(len(brains), 0)\n\n # Check which translations it have\n self.assertEqual(\n ITranslationManager(moved).get_translations(), {'ca': moved})\n ITranslationManager(moved).add_translation('es')\n self.assertEqual(\n ITranslationManager(moved).get_translations(),\n {'ca': moved, 'es': self.portal.es['test-document']})\n\n # Check that ghost is no longer ghost\n self.assertFalse(\n is_language_independent(self.portal.es['test-document']))\n","sub_path":"buildout-cache/eggs/plone.app.multilingual-3.0.2-py2.7.egg/plone/app/multilingual/tests/test_lrf.py","file_name":"test_lrf.py","file_ext":"py","file_size_in_byte":5467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"540005444","text":"from django.shortcuts import render, redirect\nfrom functools import wraps\nfrom django.http import Http404\nfrom app.models import Task, Category\nfrom django.contrib.auth.models import User\n\ndef user_owns_category(func):\n\t\"\"\"Checks if the logged in user owns the catagory \"\"\"\n\t@wraps(func)\n\tdef wrapper(*args, **kwargs):\n\t\ttry:\n\t\t\tcategory = Category.objects.get(id=kwargs['id'])\n\t\texcept Category.DoesNotExist:\n\t\t\traise Http404(\"This category does not exsist\")\n\t\tif category.user_id.id == args[0].user.id:\n\t\t\tprint('successful')\n\t\t\treturn func(*args, **kwargs)\n\t\telse:\n\t\t\treturn redirect('/')\n\treturn wrapper\n\ndef user_owns_task(func):\n\t\"\"\" Checks if the logged in user owns the task\"\"\"\n\t@wraps(func)\n\tdef wrapper(*args, **kwargs):\n\t\ttry:\n\t\t\tcategory = Category.objects.get(id=kwargs['c_id'])\n\t\texcept Category.DoesNotExist:\n\t\t\traise Http404(\"This category does not exsist\")\n\t\ttry:\n\t\t\ttask = Task.objects.get(id=kwargs['t_id'])\n\t\texcept Task.DoesNotExist:\n\t\t\traise Http404(\"This task does not exsist\")\n\t\tif category.user_id == task.user_id == args[0].user:\n\t\t\tif task.category == category:\n\t\t\t\treturn func(*args, **kwargs)\n\t\t\telse:\n\t\t\t\treturn redirect('/')\n\t\telse:\n\t\t\treturn redirect('/')\n\treturn wrapper\n\ndef order_maintainer(request, original_category, original_order):\n\t\"\"\" Maintains the order when a task is deleted or is shifted to different category \"\"\"\n\ttemp_original = Task.objects.filter(category=original_category,\n\t\t\t\t user_id= User.objects.get(username=request.user.username)).order_by('order')\n\tfor task in temp_original:\n\t\tprint(task)\n\t\tif task.order > original_order:\n\t\t\ttask.order -= 1\n\t\t\ttask.save()\n","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"530655540","text":"import math\r\nimport numpy as np\r\nfrom Individual import Individual\r\nfrom matplotlib import pyplot as plt\r\n\r\nclass Agent:\r\n def __init__(self, rangeStart = -2, rangeEnd = 1, n = 1000):\r\n self.rangeStart = rangeStart\r\n self.rangeEnd = rangeEnd\r\n self.n = n\r\n self.parent = int(self.n / 10)\r\n self.population , self.currentPopulation = [], []\r\n\r\n def initPopulation(self):\r\n for i in range(self.n):\r\n node = Individual(np.random.uniform(self.rangeStart, self.rangeEnd))\r\n self.population.append(node)\r\n\r\n def selectMax(self):\r\n self.population.sort(key=lambda x: x.fitness(), reverse=True)\r\n for i in range(self.parent):\r\n self.currentPopulation.append(self.population[i])\r\n\r\n def selectMin(self):\r\n self.population.sort(key=lambda x: x.fitness())\r\n for i in range(self.parent):\r\n self.currentPopulation.append(self.population[i])\r\n\r\n def mutate(self):\r\n for _ in self.currentPopulation:\r\n if np.random.uniform(0, 1) < 0.1:\r\n self.currentPopulation.append(Individual(np.random.uniform(self.rangeStart, self.rangeEnd)))\r\n self.population = self.currentPopulation\r\n self.currentPopulation = []\r\n\r\n def crossover(self):\r\n for i in range(self.parent):\r\n p1 = self.currentPopulation[np.random.randint(0, self.parent - 1)]\r\n p2 = self.currentPopulation[np.random.randint(0, self.parent - 1)]\r\n self.currentPopulation.append(Individual(np.random.uniform(p1.x, p2.x)))\r\n\r\n def plotFuntion(self):\r\n x = np.arange(self.rangeStart, self.rangeEnd, 0.1)\r\n y = (x ** 3) * np.cos(x) + (x ** 2) * np.cos(x) - x * np.sin(x)\r\n plt.plot(x, y)\r\n plt.show()\r\n\r\n def run(self):\r\n self.initPopulation()\r\n for i in range(self.n):\r\n self.selectMax()\r\n self.crossover()\r\n self.mutate()\r\n print(\"Maximum value = \", self.population[0].fitness())\r\n self.population = []\r\n self.initPopulation()\r\n for i in range(self.n):\r\n self.selectMin()\r\n self.crossover()\r\n self.mutate()\r\n print(\"Minimum value = \", self.population[0].fitness())","sub_path":"geneticAlgorithm.py","file_name":"geneticAlgorithm.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"517554321","text":"\"\"\"Trying out some terrain generation via noise treated as height maps.\n\"\"\"\n\nfrom multiprocessing import Pool\nimport pygame\nfrom noise2d import perlin\n\n\n\ndef terrain(x):\n \"\"\"Interpolate the noise value and return the terrain type.\n \"\"\"\n if x <-1:\n return {\"name\": \"deep water\", \"color\": (0, 0, 100),}\n elif -1 <= x <= -0.5:\n return {\"name\": \"water\", \"color\": (0, 0, 180)}\n elif -0.5 < x <= -0.3: \n return {\"name\": \"shallow water\", \"color\": (0, 0, 230)}\n elif -0.3 < x <= 0.1:\n return {\"name\": \"beach\", \"color\": (244, 164, 96)}\n elif 0.1 < x <= 0.4:\n return {\"name\": \"grass\", \"color\": (127, 255, 0)}\n elif 0.4 < x <= 1:\n return {\"name\": \"forest\", \"color\": (0, 128, 0)}\n else:\n #x > -1\n return {\"name\": \"deep forest\", \"color\": (0, 50, 0)}\n\n\n\ndef gen_terrain(x, y, quadrants_wide, quadrants_tall, octaves, persistence):\n \"\"\"\n x {int} Horizontal quadrant offset.\n y {int} Vertical quadrant offset.\n quadrants_wide {int} How many quadrant columns.\n quadrants_tall {int} How many quadrant rows.\n octaves and persistence {numbers} noise modifiers.\n \n returns a pygame.Surface with colorized terrain.\n \"\"\"\n # v1\n #surface = pygame.Surface((quadrants_wide*quadrant_size, quadrants_tall*quadrant_size)).convert()\n # Reusable rect for space filling.\n #rect = pygame.Rect(0, 0, quadrant_size, quadrant_size)\n # Generate terrain.\n #for x in xrange(quadrant_x, quadrant_x+quadrants_wide):\n #for y in xrange(quadrant_y, quadrant_y+quadrants_tall):\n #color = terrain(perlin(x, y, octaves, persistence))[\"color\"]\n #rect.left = x*quadrant_size\n #rect.top = y*quadrant_size\n #surface.fill(color, rect=rect)\n # Seems like I can't pickle pygame surfaces, but I can convert them to string\n # buffers and send the string back.\n #return pygame.image.tostring(surface, \"RGB\")\n # v2\n rowiter = xrange(x, x+quadrants_wide)\n coliter = xrange(y, y+quadrants_tall)\n return (x, y, quadrants_wide, quadrants_tall, [[terrain(perlin(row, col, octaves, persistence)) for col in coliter] for row in rowiter])\n\n\n\nclass Game(object):\n \"\"\"Manage the terrain generation and the eventual display.\n \"\"\"\n # How many quadrants wide and tall.\n quadrants_wide = 92\n quadrants_tall = 92\n # How big - wide and tall - is each quadrant.\n quadrant_size = 5\n # Full pixel size of the area (width, height).\n world_dims = (quadrants_wide*quadrant_size, quadrants_tall*quadrant_size)\n # Adjustments to the noise function.\n octaves = 3\n persistence = 1/100\n\n def __init__(self):\n # Pygame needs a few things initialized.\n pygame.init()\n self.screen = pygame.display.set_mode(self.world_dims)\n \n self.clock = pygame.time.Clock()\n \n self.background = pygame.Surface(self.world_dims).convert()\n\n # Loading screen.\n font = pygame.font.SysFont(\"arial\", 18)\n text = font.render(\"Generating terrain...\", True, (0, 0, 0))\n self.background.fill((255, 255, 255))\n self.background.blit(text, (5, 5))\n \n def run(self):\n # Start the terrain processing.\n terrain_gen_pool = Pool(processes=2)\n process_args = (0, 0, self.quadrants_wide, self.quadrants_tall, \n self.octaves, self.persistence)\n # Prevent beachball of waiting.\n terrain_gen_results = terrain_gen_pool.apply_async(gen_terrain, process_args)\n \n while True:\n if terrain_gen_results.ready():\n # Replace loading screen with terrain.\n # v1\n #self.background = pygame.image.fromstring(terrain_gen_results.get(), self.world_dims, \"RGB\")\n # v2\n offx, offy, qwide, qtall, terrain = terrain_gen_results.get()\n surface = pygame.Surface((qwide*self.quadrant_size, qtall*self.quadrant_size)).convert()\n # Reusable rect for space filling.\n rect = pygame.Rect(0, 0, self.quadrant_size, self.quadrant_size)\n # Generate terrain.\n for x, row in enumerate(terrain):\n for y, cell in enumerate(row):\n rect.left = x*self.quadrant_size\n rect.top = y*self.quadrant_size\n surface.fill(cell[\"color\"], rect=rect)\n self.background.blit(surface, (offx,offy))\n \n self.clock.tick()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n\n self.screen.blit(self.background, (0, 0))\n pygame.display.flip()\n\nif __name__ == \"__main__\":\n game = Game()\n game.run()\n\n","sub_path":"general/noise/procedural.py","file_name":"procedural.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"37627583","text":"\"\"\"Kraken - maths.xfo module.\n\nClasses:\nXfo -- Transform.\n\"\"\"\n\nfrom math_object import MathObject\nfrom kraken.core.kraken_system import ks\nfrom vec3 import Vec3\nfrom quat import Quat\nfrom mat33 import Mat33\nfrom mat44 import Mat44\n\n\nclass Xfo(MathObject):\n \"\"\"Transform object.\"\"\"\n\n def __init__(self, tr=None, ori=None, sc=None):\n \"\"\"Initializes tr, ori and sc values for Xfo object.\"\"\"\n\n super(Xfo, self).__init__()\n if ks.getRTValTypeName(tr) == 'Xfo':\n self._rtval = tr\n else:\n self._rtval = ks.rtVal('Xfo')\n if isinstance(tr, Xfo):\n self.set(tr=tr.tr, ori=tr.ori, sc=tr.sc)\n else:\n if tr is not None:\n self.tr = tr\n if ori is not None:\n self.ori = ori\n if sc is not None:\n self.sc = sc\n\n\n def __str__(self):\n \"\"\"String representation of Transform.\n\n Returns:\n str: String representation of Transform.\n\n \"\"\"\n\n stringRep = \"Xfo(ori=\" + str(self.ori)\n stringRep += \", tr=\" + str(self.tr)\n stringRep += \", sc=\" + str(self.sc) + \")\"\n\n return stringRep\n\n\n @property\n def tr(self):\n \"\"\"Gets translation property of this transform.\n\n Returns:\n float: Translation property of this transform.\n\n \"\"\"\n\n return Vec3(self._rtval.tr)\n\n\n @tr.setter\n def tr(self, value):\n \"\"\"Sets translation of this transform.\n\n Args:\n value (Vec3): Vector to set the translation by.\n\n Returns:\n bool: True if successful.\n\n \"\"\"\n\n self._rtval.tr = ks.rtVal('Vec3', value)\n\n return True\n\n\n @property\n def ori(self):\n \"\"\"Gets orientation property of this transform.\n\n Returns:\n float: Orientation property of this transform.\n\n \"\"\"\n\n return Quat(self._rtval.ori)\n\n\n @ori.setter\n def ori(self, value):\n \"\"\"Sets orientation of this transform.\n\n Args:\n value (Quat): Quaternion to set the orientation by.\n\n Returns:\n bool: True if successful.\n\n \"\"\"\n\n self._rtval.ori = ks.rtVal('Quat', value)\n\n return True\n\n\n @property\n def sc(self):\n \"\"\"Gets scaling property of this transform.\n\n Returns:\n float: Scaling property of this transform.\n\n \"\"\"\n\n return Vec3(self._rtval.sc)\n\n\n @sc.setter\n def sc(self, value):\n \"\"\"Sets scaling of this transform.\n\n Args:\n value (Vec3): Quaternion to set the scaling by.\n\n Returns:\n bool: True if successful.\n\n \"\"\"\n\n self._rtval.sc = ks.rtVal('Vec3', value)\n\n return True\n\n\n def __eq__(self, other):\n return self.ori.equal(other.ori) and self.tr.equal(other.tr) and self.sc.equal(other.sc)\n\n def __ne__(self, other):\n return not self.ori.equal(other.ori) or not self.tr.equal(other.tr) or not self.sc.equal(other.sc)\n\n def __mul__(self, other):\n return self.multiply(other)\n\n\n def clone(self):\n \"\"\"Returns a clone of the Xfo.\n\n Returns:\n The cloned Xfo\n\n \"\"\"\n\n xfo = Xfo()\n xfo.tr = self.tr.clone()\n xfo.ori = self.ori.clone()\n xfo.sc = self.sc.clone()\n\n return xfo\n\n\n def set(self, tr, ori, sc):\n \"\"\"Setter from the translation, rotation and scaling.\n\n Args:\n tr (Vec3): Vector to set the translation by.\n ori (Quat): Quaternion to set the orientation by.\n sc (Vec3): Vector to set the scaling by.\n\n Returns:\n bool: True if successful.\n\n \"\"\"\n\n self._rtval.set('', ks.rtVal('Vec3', tr), ks.rtVal('Quat', ori),\n ks.rtVal('Vec3', sc))\n\n return True\n\n\n def setIdentity(self):\n \"\"\"Sets this transform to the identity.\n\n Returns:\n bool: True if successful.\n\n \"\"\"\n\n self._rtval.setIdentity('')\n\n return True\n\n\n def setFromMat44(self, m):\n \"\"\"Sets this transform from the supplied matrix.\n\n Args:\n m (Mat44): 4x4 matrix to set the transform from.\n\n Returns:\n Xfo: New transform set from input Mat44.\n\n \"\"\"\n\n return Xfo(self._rtval.setFromMat44('Xfo', ks.rtVal('Mat44', m)))\n\n\n def toMat44(self):\n \"\"\"Gets a Mat44 from this xfo.\n\n Returns:\n Mat44: Matrix from this transform.\n\n \"\"\"\n\n return Mat44(self._rtval.toMat44('Mat44'))\n\n\n def multiply(self, xfo):\n \"\"\"Overload method for the multiply operator.\n\n Args:\n xfo (Xfo): Other transform to multiply this one by.\n\n Returns:\n Xfo: New Xfo of the product of the two Xfo's.\n\n \"\"\"\n\n return Xfo(self._rtval.multiply('Xfo', ks.rtVal('Xfo', xfo)))\n\n\n def transformVector(self, v):\n \"\"\"Transforms a vector by this transform.\n\n Args:\n v (Vec3): Vector to transform.\n\n Returns:\n Vec3: New vector transformed by this transform.\n\n \"\"\"\n\n return Vec3(self._rtval.transformVector('Vec3', ks.rtVal('Vec3', v)))\n\n\n def transformRay(self, ray):\n \"\"\"Transforms a ray vector by this transform.\n\n Args:\n ray (Vec3): Ray vector to transform.\n\n Returns:\n Ray: New ray vector transformed by this transform.\n\n \"\"\"\n\n # return Ray(self._rtval.transformRay('Ray', ks.rtVal('Ray', ray)))\n raise NotImplementedError('Ray object is not implemented!')\n\n\n def inverse(self):\n \"\"\"Get the inverse transform of this transform.\n\n Returns:\n Xfo: Inverse of this transform.\n\n \"\"\"\n\n return Xfo(self._rtval.inverse('Xfo'))\n\n\n def inverseTransformVector(self, vec):\n \"\"\"Transforms a vector with this xfo inversely\n\n Note: We have 'inverseTransformVector' because Xfos with non-uniform\n scaling cannot be inverted as Xfos.\n\n Args:\n vec (Vec3): Vector to be inversely transformed.\n\n Returns:\n Vec3: Inversely transformed vector.\n\n \"\"\"\n\n return Vec3(self._rtval.inverseTransformVector('Vec3', ks.rtVal('Vec3', vec)))\n\n\n def linearInterpolate(self, other, t):\n \"\"\"Linearly interpolates this transform with another one based on a\n scalar blend value (0.0 to 1.0).\n\n Args:\n other (Xfo): Transform to blend to.\n t (float): Blend value.\n\n Returns:\n Xfo: New transform blended between this and the input transform.\n\n \"\"\"\n\n return Xfo(self._rtval.linearInterpolate('Xfo', ks.rtVal('Xfo', other),\n ks.rtVal('Scalar', t)))\n\n\n def setFromVectors(self, inVec1, inVec2, inVec3, translation):\n \"\"\"Set Xfo values from 3 axis vectors and a translation vector.\n\n Args:\n inVec1 (Vec3): X axis vector.\n inVec2 (Vec3): Y axis vector.\n inVec3 (Vec3): Z axis vector.\n translation (Vec3): Translation vector.\n\n Returns:\n bool: True if successful.\n\n \"\"\"\n\n mat33 = Mat33()\n mat33.setRows(inVec1, inVec2, inVec3)\n self.ori.setFromMat33(mat33.transpose())\n self.tr = translation\n\n return True\n\n\n# ===============\n# Helper Methods\n# ===============\ndef xfoFromDirAndUpV(base, target, upV):\n \"\"\"Creates a transform for base object pointing to target with an upvector\n upV.\n\n Args:\n base (Vec3): Base vec3 to use in calculation.\n target (Vec3): Target vec3 to use in calculation.\n upV (Vec3): UpV vec3 to use in calculation.\n\n Returns:\n Xfo: Output xfo.\n\n \"\"\"\n\n rootToTarget = target.subtract(base).unit()\n rootToUpV = upV.subtract(base).unit()\n normal = rootToUpV.cross(rootToTarget).unit()\n zAxis = rootToTarget.cross(normal).unit()\n outXfo = Xfo()\n outXfo.setFromVectors(rootToTarget, normal, zAxis, base)\n\n return outXfo\n\n\ndef aimAt(targetXfo, aimPos=None, aimVector=None, aimAxis=(1, 0, 0), upPos=None, upVector=None, upAxis=(0, 1, 0)):\n \"\"\"\n Point the xfo's aimAxis at the aimPos (or aimVector),\n while attempting to keep the xfo's upAxis pointing at the upPos (or upVector)\n Must provide\n 1. aimPos or aimVector\n 2. upPos or upVector\n\n The aim direction takes precendence over the up direction when the two are not orthoganal as input.\n\n Args:\n aimPos (Vec3): Aim the aimAxis of the Xform at this larget location\n upPos (Vec3): Aim the upAxis of the xform at this location (if upVector not provided)\n upVector (Vec3): Aim the upAxis of the xform in this direction (if upPos not provided)\n aimAxis (List): Use this axis of the xform to point at aimPos (NOTE: want to make this Vec3)\n upAxis (List): Use this axis of the xform to point at upPos (or point in direction of upVector)\n\n Returns:\n None\n\n \"\"\"\n\n if aimPos:\n aimVector = aimPos.subtract(targetXfo.tr).unit()\n elif not aimVector:\n raise ValueError(\"Must provide either aimPos or aimVector argument\")\n\n if upPos:\n upVector = upPos.subtract(targetXfo.tr).unit()\n elif not upVector:\n raise ValueError(\"Must provide either upPos or upVector argument\")\n\n\n aimAxisVector = aimVector # same as input arg always\n normalAxisVector = upVector.cross(aimAxisVector).unit() # perpendiculuar to aim, but could be one of two directions\n upAxisVector = normalAxisVector.cross(aimAxisVector).unit() # perpendicular to aim and normal, but could be one of two directions\n\n # Measure the upAxisVector against the original upVector to see if it is less that 90, if not, we want the opposite side, so negate\n angle = upVector.dot(upAxisVector)\n if angle < 0: # more than 90 degrees from the ideal upvector\n upAxisVector = upAxisVector.negate()\n\n # Simply negate the directions of aimAxis and upAxis if needed depending on sign of arguments\n if -1 in upAxis:\n upAxisVector = upAxisVector.negate()\n\n if -1 in aimAxis:\n aimAxisVector = aimAxisVector.negate()\n\n # Sort out which vectors are which axis\n argVectors = [None, None, None]\n\n for i, x in enumerate(aimAxis):\n if x:\n argVectors[i] = aimAxisVector\n\n for i, x in enumerate(upAxis):\n if x:\n argVectors[i] = upAxisVector\n\n # Now, that we have a definite aimAxisVector and upAxisVector,\n # let's find the \"real\" normalAxisVectortor with the guaranteed correct side.\n # Given the arguments, we know what axis the aim and up are supposed to be\n # That leaves us with a third to solve for\n # Based on the right-hand rule universe, we know the order to cross product the two known vectors\n\n if not aimAxis[0] and not upAxis[0]: # X is normal axis, so do Y cross Z (in that order) to get it\n normalAxisVector = argVectors[1].cross(argVectors[2]).unit()\n\n elif not aimAxis[1] and not upAxis[1]: # Y is normal axis, so do Z cross X (in that order) to get it\n normalAxisVector = argVectors[2].cross(argVectors[0]).unit()\n\n elif not aimAxis[2] and not upAxis[2]: # Z is normal axis, so do X cross Y (in that order) to get it\n normalAxisVector = argVectors[0].cross(argVectors[1]).unit()\n\n # add the normalAxisVector to the remaining axis\n for i, x in enumerate(argVectors):\n if x is None:\n argVectors[i] = normalAxisVector\n\n targetXfo.setFromVectors(argVectors[0], argVectors[1], argVectors[2], targetXfo.tr)\n","sub_path":"Python/kraken/core/maths/xfo.py","file_name":"xfo.py","file_ext":"py","file_size_in_byte":11627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"209699451","text":"__author__ = 'rich5127'\n\n# Esri start of added imports\nimport glob, zipfile, arcpy\n\n\ndef kmlToFeatures(kmlFile):\n arcpy.AddMessage(\"kmlToFeatures\")\n arcpy.KMLToLayer_conversion(kmlFile, arcpy.env.scratchFolder, \"outKML\")\n return arcpy.env.scratchFolder + \"/outKML.gdb/Placemarks/Polygons\"\n\ndef kmzToFeatures(inKMZ):\n arcpy.AddMessage(\"kmzToFeatures\")\n arcpy.AddMessage(\"parsed Zip\")\n zf = zipfile.ZipFile(inKMZ)\n zf.namelist()\n for name in zf.namelist():\n if name.endswith(\".kml\"):\n kmlExtract = zf.extract(name)\n arcpy.AddMessage(kmlExtract)\n arcpy.KMLToLayer_conversion(kmlExtract, arcpy.env.scratchFolder, \"outKML\")\n return arcpy.env.scratchFolder + \"/outKML.gdb/Placemarks/Polygons\"\n arcpy.AddError(\"No KML was found in KMZ file\")\n\ndef polygonsToMultiPolygon(inPolygons):\n arcpy.AddMessage(\"polygonsToMultiPolygon\")\n desc = arcpy.Describe(inPolygons)\n shapeFieldName = desc.ShapeFieldName\n polygonRows = arcpy.SearchCursor(inPolygons)\n multiPoly = (arcpy.CreateFeatureclass_management(arcpy.env.scratchGDB, \"multiPolygon\", \"POLYGON\",inPolygons,\"SAME_AS_TEMPLATE\",\"SAME_AS_TEMPLATE\",desc.spatialReference))\n polyArray = arcpy.Array()\n for polygonRow in polygonRows:\n feat = polygonRow.getValue(shapeFieldName)\n i = 0\n while i < feat.partCount:\n polyArray.append(feat.getPart(i))\n i += 1\n del polygonRows\n insertCursor = arcpy.InsertCursor(multiPoly)\n newRow = insertCursor.newRow()\n polygon = arcpy.Polygon(polyArray)\n newRow.shape = polygon\n insertCursor.insertRow(newRow)\n del newRow\n del insertCursor\n return arcpy.env.scratchGDB + \"/multiPolygon\"\n\ndef shpToFeatures(inShpZip):\n arcpy.AddMessage(\"shpToFeatures\")\n #extract the zip\n zf = zipfile.ZipFile(inShpZip)\n polygonFeaturesPath = arcpy.env.scratchFolder + \"/zipSHPExtract\"\n zf.extractall(polygonFeaturesPath)\n #find the shp\n shpFilesFromZip = glob.glob(polygonFeaturesPath + \"/*.shp\")\n arcpy.AddMessage(\"Shapefile count in zip: \" + str(len(shpFilesFromZip)))\n if len(shpFilesFromZip) > 0:\n outFeatures = arcpy.env.scratchGDB + \"/outFeatures\"\n arcpy.CopyFeatures_management(shpFilesFromZip[0], outFeatures)\n return outFeatures\n else:\n arcpy.AddError(\"No shapes were found\")\ndef ProcessInput():\n sourceKMLFile = arcpy.GetParameterAsText(0)\n sourceSHPFile = arcpy.GetParameterAsText(1)\n sourceKMZFile = arcpy.GetParameterAsText(2)\n if sourceKMLFile:\n outputFeatureClass = kmlToFeatures(sourceKMLFile)\n elif sourceSHPFile:\n outputFeatureClass = shpToFeatures(sourceSHPFile)\n elif sourceKMZFile:\n outputFeatureClass = kmzToFeatures(sourceKMZFile)\n\n outputFeatureClass = polygonsToMultiPolygon(outputFeatureClass)\n arcpy.SetParameterAsText(3, outputFeatureClass)\n\nProcessInput()\n\n\n\n","sub_path":"data/geoprocessing/source/GeometryUpload/uploadProcessor.py","file_name":"uploadProcessor.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"72997962","text":"'''\nCreated on August 29, 2019 22:05:51 JST\n\n@author: HOU BOWEI\n\nMail: my364007886@gmail.com\n'''\nimport numpy as np\nimport librosa\nfrom scipy.io import wavfile\nimport torch\n\nfrom embedder_net import SpeechEmbedder\n\n\ndef convert(model_path, wav_file_path):\n # data preprocessing\n tisv_frame = 180\n hop = 0.01\n window = 0.025\n nfft = 512\n nmels = 40\n sr, _ = wavfile.read(wav_file_path)\n utter_min_len = (tisv_frame * hop + window) * sr\n utter, sr = librosa.core.load(wav_file_path, sr)\n intervals = librosa.effects.split(utter, top_db=60)\n utterances_spec = []\n for interval in intervals:\n if (interval[1]-interval[0]) > utter_min_len:\n utter_part = utter[interval[0]:interval[1]]\n S = librosa.core.stft(y=utter_part, n_fft=nfft,\n win_length=int(window * sr), hop_length=int(hop * sr))\n S = np.abs(S) ** 2\n mel_basis = librosa.filters.mel(\n sr=sr, n_fft=nfft, n_mels=nmels)\n # log mel spectrogram of utterances\n S = np.log10(np.dot(mel_basis, S) + 1e-6)\n # first 180 frames of partial utterance\n utterances_spec.append(S[:, :tisv_frame])\n # last 180 frames of partial utterance\n utterances_spec.append(S[:, -tisv_frame:])\n utterances_spec = np.array(utterances_spec)\n utterance = utterances_spec[:, :, :160]\n utterance = torch.tensor(np.transpose(utterance, axes=(0, 2, 1)))\n\n # load model\n device = torch.device('cuda')\n checkpoint = torch.load(model_path)\n hidden, num_layers, proj = checkpoint['hyper_parameters']\n embedder_net = SpeechEmbedder(40, hidden, num_layers, proj).to(device)\n embedder_net.load_state_dict(checkpoint['model_state_dict'])\n embedder_net.eval()\n mel_db = utterance.flatten(0, 1)\n mel_db = mel_db.unsqueeze(0).to(device)\n embeddings = embedder_net(mel_db)\n return embeddings\n\n\nif __name__ is '__main__':\n convert('./models/vctk_tf',\n './datasets/jnas_smartcallcenter/F003/F003_BF003A01DT.wav')\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"553397531","text":"from PyQt5.uic import loadUi\nfrom PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QMainWindow, QFileDialog, QDialog, QInputDialog, QTableWidgetItem, QLineEdit\nfrom PyQt5.QtCore import pyqtSignal, Qt, QRegExp\nfrom PyQt5.QtGui import QIntValidator, QDoubleValidator, QFont, QRegExpValidator, QValidator\nfrom PyQt5.QtTest import QTest\nimport sys\nimport os\nimport numpy as np\nimport re\nimport scipy.constants\nsys.path.append(os.path.abspath('.'))\nfrom xraydb import XrayDB\nxdb = XrayDB()\n\n\nclass DoubleValidator(QDoubleValidator):\n def __init__(self, parent, bottom=0):\n QDoubleValidator.__init__(self, parent, bottom=bottom)\n self.bottom=bottom\n\n def validate(self, text, pos):\n try:\n if float(text)>= self.bottom:\n state = QDoubleValidator.Acceptable\n else:\n state = QDoubleValidator.Invalid\n except:\n state = QDoubleValidator.Invalid\n return state, text, pos\n\n\nclass RegExpValidator(QRegExpValidator):\n\n def validate(self, text, pos):\n regex=re.compile('[A-Z][A-Za-z0-9\\.]+|[A-Z]')\n if bool(re.match(regex,text)):\n m=regex.match(text)\n if m.end()0:\n precision='{0:.'+str(self.preSB.value())+'f}'\n string=string+key+str(precision.format(chemfor[key]).rstrip('0').rstrip('.'))\n return string\n\n def checkemptyinput(self):\n if self.bulkconLE.text()=='':\n self.bulkconLE.setText('1')\n if self.solmassdenLE.text()=='':\n self.solmassdenLE.setText('1')\n if self.solchemforLE.text()=='':\n self.solchemforLE.setText('H2O')\n for i in range(self.subphaseTW.rowCount()):\n if self.subphaseTW.cellWidget(i,0).text()=='':\n self.subphaseTW.cellWidget(i,0).setText('Cl')\n if self.subphaseTW.cellWidget(i,1).text()=='':\n self.subphaseTW.cellWidget(i,1).setText('1')\n if self.subphaseTW.cellWidget(i,2).text()=='':\n self.subphaseTW.cellWidget(i,2).setText('1')\n\n\n\n\n def messageBox(self,text,title='Warning'):\n mesgbox=QMessageBox()\n mesgbox.setText(text)\n mesgbox.setWindowTitle(title)\n mesgbox.exec_()\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n w = Den_Calc()\n w.setWindowTitle('Subphase Density Calculator')\n # w.setGeometry(50,50,800,800)\n\n w.show()\n sys.exit(app.exec_())","sub_path":"Tools/Calculators/Den_Calc.py","file_name":"Den_Calc.py","file_ext":"py","file_size_in_byte":10316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"528474446","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nsys.path.append('..')\nimport unittest\n\nfrom easytest import EasyTest\n\nimport tempfile\nimport os\nfrom file import File\nimport numpy as np\n\n\nclass TestData(unittest.TestCase):\n\n def setUp(self):\n self.refdir = tempfile.mkdtemp() + os.sep\n os.makedirs(self.refdir + 'A' + os.sep)\n os.makedirs(self.refdir + 'B' + os.sep)\n self.files = ['a.txt', 'b.dat', 'c.xls', 'd.doc', 'A' + os.sep + 'Aout1.dat', 'A' + os.sep + 'Aout2.dat', 'B' + os.sep + 'Bout.dat']\n for f in self.files:\n #print f\n #print self.refdir + f\n o = open(self.refdir + f, 'w')\n o.write('test')\n o.close()\n\n self._output_directory = tempfile.mkdtemp() + os.sep\n os.system('cp -r ' + self.refdir + '* ' + self._output_directory)\n self._s = 'echo \"Hello world\"'\n self._l = ['a', 'xx', 'b']\n self.T = EasyTest(self._s, self._l, refdirectory=self.refdir, output_directory=self._output_directory)\n\n def test_init(self):\n T = self.T\n s = 'echo \"Hello world\"'\n self.assertEqual(T.exe, s)\n l = ['a', 'xx', 'b']\n for i, v in enumerate(l):\n self.assertEqual(v, T.args[i])\n self.assertEqual(T.refdirectory, self.refdir)\n\n def test_get_reference_file_list(self):\n T = self.T\n\n files = T._get_reference_file_list('all')\n print(self.files)\n for f in files:\n self.assertTrue(f.replace(self.refdir, '') in self.files)\n self.assertTrue(self.refdir in f)\n self.assertEqual(len(files), len(self.files))\n\n ref = ['xx.png', 'yy.txt']\n files = T._get_reference_file_list(ref)\n for f in files:\n self.assertTrue(os.path.basename(f) in ref)\n self.assertTrue(self.refdir in f)\n self.assertEqual(len(files), len(ref))\n\n def test_test_files(self):\n T = self.T\n self.assertTrue(T._test_files(T._get_reference_file_list('all')))\n self.assertFalse(T._test_files(['nope.z']))\n\n def test_execute(self):\n T = self.T\n T.run_tests(files='all', checksum_files='all', check_size='all')\n\n def test_basedir(self):\n curdir = os.path.abspath(os.curdir)\n tdir = tempfile.mkdtemp()\n T = EasyTest(self._s, self._l, refdirectory=self.refdir, output_directory=self._output_directory, basedir=tdir, switch_back=False)\n T.run_tests(files='all', checksum_files='all', check_size='all')\n # self.assertEqual(os.path.abspath(os.curdir), tdir)\n os.chdir(curdir)\n\n T = EasyTest(self._s, self._l, refdirectory=self.refdir, output_directory=self._output_directory, basedir=tdir, switch_back=True)\n T.run_tests(files='all', checksum_files='all', check_size='all')\n self.assertEqual(os.path.abspath(os.curdir), curdir)\n\n def test_test_checksum(self):\n T = self.T\n tdir = tempfile.mkdtemp() + os.sep\n T.refdirectory = tdir\n #write some file with different content\n tfile = tdir + 'a.txt'\n o = open(tfile, 'w')\n o.write('test1')\n o.close()\n\n self.assertFalse(T._test_checksum([tfile]))\n self.assertTrue(T._test_checksum([self.refdir + 'a.txt']))\n\n def test_test_filesize(self):\n T = self.T\n tdir = tempfile.mkdtemp() + os.sep\n T.refdirectory = tdir\n #write some file with different content\n tfile = tdir + 'a.txt'\n o = open(tfile, 'w')\n o.write('test1')\n o.close()\n\n self.assertFalse(T._test_filesize([tfile]))\n self.assertTrue(T._test_filesize([self.refdir + 'a.txt']))\n\n def test_test_filesize_gt_0(self):\n T = self.T\n tdir = tempfile.mkdtemp() + os.sep\n T.refdirectory = tdir\n #write some file with different content\n tfile1 = tdir + 'a.txt'\n o = open(tfile1, 'w')\n o.write('test1')\n o.close()\n\n tfile2 = tdir + 'b.txt'\n o = open(tfile2, 'w')\n o.write('test2')\n o.close()\n\n tfile3 = tdir + 'c.txt' # empty file\n o = open(tfile3, 'w')\n o.close()\n\n self.assertTrue(T._test_filesize_gt_0([tfile1, tfile2]))\n self.assertFalse(T._test_filesize_gt_0([tfile3]))\n self.assertFalse(T._test_filesize_gt_0([tfile1, tfile2, tfile3]))\n self.assertFalse(T._test_filesize_gt_0([tfile3, tfile2, tfile1]))\n\n def test_netcdf_compare(self):\n #self.T = EasyTest(s, l, refdirectory=self.refdir, output_directory = output_directory)\n\n nx = 10\n ny = 20\n variables = ['var1', 'var2', 'var3']\n f1 = tempfile.mktemp(suffix='.nc')\n f2 = tempfile.mktemp(suffix='.nc')\n f3 = tempfile.mktemp(suffix='.nc')\n f4 = tempfile.mktemp(suffix='.nc')\n\n F1 = File(f1, 'x', 'y', mode='w')\n F1.create_dimension('x', nx)\n F1.create_dimension('y', ny)\n\n F2 = File(f2, 'x', 'y', mode='w')\n F2.create_dimension('x', nx)\n F2.create_dimension('y', ny)\n\n F3 = File(f3, 'x', 'y', mode='w')\n F3.create_dimension('x', nx)\n F3.create_dimension('y', ny)\n\n F4 = File(f4, 'x', 'y', mode='w')\n F4.create_dimension('x', nx)\n F4.create_dimension('y', ny)\n\n cnt = 1\n for k in variables:\n x = np.random.random((ny, nx))\n x = np.ma.array(x, mask=x != x)\n F1.append_variable(k, x)\n F2.append_variable(k, x) # ... two same files\n y = np.random.random((ny, nx))\n y = np.ma.array(y, mask=y != y)\n F3.append_variable(k, y) # ... and one different\n if cnt == 1:\n F4.append_variable(k, x) # one file with different number of variables\n cnt += 1\n\n F1.close()\n F2.close()\n F3.close()\n F4.close()\n\n T = self.T\n self.assertTrue(T._compare_netcdf(f1, f2, compare_variables=True, compare_values=False))\n self.assertTrue(T._compare_netcdf(f1, f2, compare_variables=False, compare_values=True))\n self.assertTrue(T._compare_netcdf(f1, f2, compare_variables=True, compare_values=True))\n\n self.assertTrue(T._compare_netcdf(f1, f3, compare_variables=True, compare_values=False))\n self.assertFalse(T._compare_netcdf(f1, f3, compare_variables=False, compare_values=True))\n self.assertFalse(T._compare_netcdf(f1, f3, compare_variables=True, compare_values=True))\n\n self.assertFalse(T._compare_netcdf(f1, f4, compare_variables=True, compare_values=False))\n\n # subsetting\n self.assertTrue(T._compare_netcdf(f1, f2, compare_variables=False, compare_values=True, allow_subset=False))\n #~ self.assertTrue(T._compare_netcdf(f1, f2, compare_variables=False, compare_values=True, allow_subset=True))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_easy.py","file_name":"test_easy.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"98698796","text":"import argparse\n\nparser = argparse.ArgumentParser(\n description='Extract ion mobility values from SpectroMine/Spectronaut report.'\n)\nparser.add_argument(\n '--in', nargs='+',\n help='input SpectroMine/Spectronaut report'\n)\nparser.add_argument(\n '--out',\n help='output ion mobility file'\n)\nparser.add_argument(\n '--type', choices=['SpectroMine', 'Spectronaut'], \n default='SpectroMine',\n help='input report type (default: %(default)s)'\n)\n\nfilter_group = parser.add_argument_group('entries filters')\nfilter_group.add_argument(\n '--precursor_charge', type=int, nargs='+', default=[2,3],\n help='list of allowed charge states of precursor ions (default: %(default)s)'\n)\nfilter_group.add_argument(\n '--min_peptide_length', type=int, default=7,\n help='lower sequence length limit of peptides (default: %(default)s)'\n)\nfilter_group.add_argument(\n '--max_peptide_length', type=int, default=50,\n help='upper sequence length limit of peptides (default: %(default)s)'\n)\nfilter_group.add_argument(\n '--modification_action', choices=[\n None, 'keep_if_any', 'keep_if_exclusive',\n 'remove_if_any', 'remove_if_exclusive'\n ], default='remove_if_exclusive',\n help='filter precursors according to modifications (default: %(default)s)'\n)\nfilter_group.add_argument(\n '--modification_list', nargs='+',\n default=['Carbamidomethyl'],\n help='selected modifications (default: %(default)s)'\n)\n\n\nargs = parser.parse_args()\nreport_files = getattr(args, 'in')\nout_file = args.out\nreport_type = args.type\n\nfilter_args = vars(args)\nfilter_args.pop('in')\nfilter_args.pop('out')\nfilter_args.pop('type')\n\n# %%\nimport logging\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(filename)s: [%(levelname)s] %(message)s'\n)\n\n# %%\nfrom util import list_files\n\nif globals().get('report_files', None) is None:\n report_files = list_files(\n path='.',\n pattern='PSM( )?Report\\\\.(csv|xls)$'\n )\n\nif len(report_files) == 0:\n raise ValueError('no report files')\n\n# %%\nimport os\nimport re\n\nif globals().get('out_file', None) is None:\n out_file = os.path.splitext(report_files[0])[0]\n out_file = re.sub(\n '[\\\\._]?PSM( )?Report$',\n '', out_file\n )\n if len(report_files) > 1:\n out_file += '_' + str(len(report_files))\n out_file += '.ionMobility.csv'\n\n# %%\nimport pandas as pd\n\nlogging.info('loading report(s): ' + '; '.join(report_files))\n\nreport = pd.concat(\n (\n pd.read_csv(f, sep=',' if f.endswith('.csv') else '\\t')\n for f in report_files\n ),\n ignore_index=True\n)\n\nlogging.info('report(s) loaded: {0} rows' \\\n .format(len(report)))\n\n# %%\nfrom formatting.generic import PeptideReportCleaner\n\nif report_type == 'Spectronaut':\n from formatting.spectronaut import \\\n Spectronaut_im_report_columns as im_report_columns\nelse:\n from formatting.spectronaut import \\\n SpectroMine_im_report_columns as im_report_columns\n\ncleaner = PeptideReportCleaner(columns=im_report_columns())\n\nlogging.info('parsing ion mobility report')\n\ndata = cleaner.parse_report(report)\n\nlogging.info('ion mobility report parsed: {0} entries' \\\n .format(len(data)))\n\n# %%\nlogging.info('remove duplicated entries')\n\ndata = cleaner.remove_duplicates(data)\n\nlogging.info('duplicated entries removed: {0} non-redundant entries' \\\n .format(len(data)))\n\n# %%\nlogging.info(\n 'filtering entries using the following parameters: \\n' + \\\n '\\n'.join((\n k + '=' + str(v)\n for k, v in filter_args.items()\n if v is not None\n ))\n)\n\ndata = cleaner.filter_peptides(\n data, **filter_args\n)\n\ndata = cleaner.finalize(data)\n\nlogging.info('entries filtered: {0} entries' \\\n .format(len(data)))\n\n\n# %%\nfor charge in data['charge'].unique():\n data_charge = data.loc[data['charge'] == charge]\n\n out_file_charge = os.path.splitext(out_file)[0]\n if out_file_charge.endswith('.ionMobility'):\n out_file_charge = out_file_charge[:-len('.ionMobility')]\n out_file_charge += '_charge' + str(charge) + '.ionMobility.csv'\n\n logging.info('saving ion mobility report: {0}, charge {1}+' \\\n .format(out_file_charge, charge))\n\n data_charge.to_csv(out_file_charge, index=False)\n\n logging.info('ion mobility report saved: {0}, charge {1}+, {2} entries' \\\n .format(out_file_charge, charge, len(data_charge)))\n\n\n","sub_path":"src/extract_im_from_SpectroMine.py","file_name":"extract_im_from_SpectroMine.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"542603636","text":"from requests import get\nfrom csv import DictReader\nfrom uuid import UUID\nfrom datetime import datetime\nfrom mongo import db\n\nendpoint = 'http://prod2.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/'\n\nPROPERTY_TYPES = {'D': 'Detached',\n 'S': 'Semi-Detached',\n 'T': 'Terraced',\n 'F': 'Flat/Maisonette',\n 'O': 'Other'}\n\nNEW_BUILD = {'Y': 'New build',\n 'N': 'Existing property'}\n\nFREEHOLD = {'F': 'Freehold',\n 'L': 'Leasehold'}\n\nPRICE_PAID = {'A': 'Standard Price Paid',\n 'B': 'Additional Price Paid'}\n\n\ndef current_month():\n return endpoint + 'pp-monthly-update.txt'\n\n\ndef current_year():\n year = datetime.today().year\n return previous_year(year)\n\n\ndef previous_year(year):\n return endpoint + 'pp-{year}.txt'.format(year=year)\n\n\ndef complete():\n return endpoint + 'pp-complete.txt'\n\n\ndef download_file(url):\n \"\"\" https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads \"\"\"\n\n collection = db['house-prices']\n fieldnames = [\n 'uuid',\n 'price',\n 'date',\n 'address.postcode',\n 'details.property_type',\n 'details.new_build',\n 'details.freehold',\n 'address.PAON',\n 'address.SAON',\n 'address.street',\n 'address.locality',\n 'address.town',\n 'address.district',\n 'address.county',\n 'details.standard_price',\n 'status',\n ]\n\n response = get(url, stream=True, allow_redirects=False)\n print(response.status_code, response.url)\n response.raise_for_status()\n\n reader = DictReader(response.iter_lines(decode_unicode=True), fieldnames=fieldnames)\n for row in reader:\n uuid = UUID(row.pop('uuid'))\n row['date'] = datetime.strptime(row['date'], '%Y-%m-%d %H:%M')\n row['price'] = int(row['price'])\n\n if row.get('status') == 'D':\n collection.delete_one(\n {'_id': uuid}\n )\n\n elif row.get('status') == 'U':\n row.pop('status')\n collection.update_one(\n {'_id': uuid},\n {'$set': row}\n )\n\n else:\n row.pop('status')\n collection.update_one(\n {'_id': uuid},\n {'$setOnInsert': row},\n upsert=True\n )\n\n if reader.line_num % 10000 == 0:\n print('{0:,.0f} rows loaded'.format(reader.line_num))\n\n print('{0:,.0f} rows loaded'.format(reader.line_num))\n\n\ndef load_postcodes(query):\n \"\"\" https://api.postcodes.io/ \"\"\"\n\n collection = db['house-prices']\n url = 'https://api.postcodes.io/postcodes/{0}'\n\n postcodes = collection.distinct('address.postcode', query=query)\n print('{0} postcodes found'.format(len(postcodes)))\n\n for postcode in postcodes:\n response = get(url.format(postcode))\n data = response.json()\n\n if data['status'] == 200:\n locality = {'locality': data['result']}\n else:\n locality = {'locality': data}\n\n result = collection.update_many(\n {'address.postcode': postcode},\n {'$set': locality}\n )\n\n print('{0}: {1} records updated'.format(postcode, result.modified_count))\n\n\ndef load_epc(address=None, postcode=None, local_authority=None, constituency=None):\n \"\"\" https://epc.opendatacommunities.org/docs/api/domestic \"\"\"\n n = 0\n\n params = {\n 'from-year': 1995,\n 'to-year': 2018,\n 'size': 5000,\n }\n if address:\n params['address'] = address\n if postcode:\n params['postcode'] = postcode\n if local_authority:\n params['local-authority'] = local_authority\n if constituency:\n params['constituency'] = constituency\n\n # collection = db['house-prices']\n collection = db['epc-certificates']\n url = 'https://epc.opendatacommunities.org/api/v1/domestic/search'\n\n # key = 'f7708db9ad243efc7bc8f7f2a191c5d5f04babb2'\n key = 'bmlja21haGVyODRAZ21haWwuY29tOmY3NzA4ZGI5YWQyNDNlZmM3YmM4ZjdmMmExOTFjNWQ1ZjA0YmFiYjI='\n\n headers = {\n 'Accept': 'text/csv',\n 'Authorization': 'Basic '+key,\n }\n\n response = get(url, params=params, headers=headers, stream=True)\n print(response.status_code, response.url)\n response.raise_for_status()\n\n reader = DictReader(response.iter_lines(decode_unicode=True))\n for row in reader:\n ref = row['building-reference-number']\n collection.update_one(\n {'_id': ref},\n {'$setOnInsert': row},\n upsert=True\n )\n n += 1\n\n print('{1}: {0} records loaded'.format(n, postcode))\n\n\nif __name__ == '__main__':\n f = current_month()\n download_file(f)\n","sub_path":"uk-house-prices/uk-house-prices.py","file_name":"uk-house-prices.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"641097425","text":"\n\nfrom xai.brain.wordbase.nouns._subterfuge import _SUBTERFUGE\n\n#calss header\nclass _SUBTERFUGES(_SUBTERFUGE, ):\n\tdef __init__(self,): \n\t\t_SUBTERFUGE.__init__(self)\n\t\tself.name = \"SUBTERFUGES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"subterfuge\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_subterfuges.py","file_name":"_subterfuges.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"241833909","text":"import unittest\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\nimport numpy as np\nfrom phonopy import Phonopy\nfrom phonopy.phonon.moment import PhononMoment\nfrom phonopy.interface.vasp import read_vasp\nfrom phonopy.file_IO import parse_FORCE_SETS, parse_BORN\nimport os\n\ndata_dir = os.path.dirname(os.path.abspath(__file__))\n\nresult_full_range = \"\"\"\n1.000000 1.000000 1.000000\n 4.063222 4.236805 3.889623\n17.935854 19.412820 16.458756\n 1.000000 1.000000 1.000000\n 3.530039 3.621065 3.451029\n12.557720 13.205191 11.995720\n\"\"\"\n\n\nclass TestMoment(unittest.TestCase):\n def setUp(self):\n self._cell = read_vasp(os.path.join(data_dir, \"..\", \"POSCAR_NaCl\"))\n\n def tearDown(self):\n pass\n\n def test_moment(self):\n data = np.loadtxt(StringIO(result_full_range), dtype='double')\n\n phonon = self._get_phonon(self._cell)\n moment = phonon.run_mesh([13, 13, 13],\n with_eigenvectors=True,\n is_mesh_symmetry=False)\n num_atom = phonon.get_primitive().get_number_of_atoms()\n q = phonon.mesh.qpoints\n w = phonon.mesh.weights\n f = phonon.mesh.frequencies\n e = phonon.mesh.eigenvectors\n vals = np.zeros((6, num_atom + 1), dtype='double')\n\n moment = PhononMoment(f, w)\n for i in range(3):\n moment.run(order=i)\n vals[i, 0] = moment.get_moment()\n self.assertTrue(np.abs(moment.get_moment() - data[i, 0]) < 1e-5)\n\n moment = PhononMoment(f, w, eigenvectors=e)\n for i in range(3):\n moment.run(order=i)\n moms = moment.get_moment()\n vals[i, 1:] = moms\n self.assertTrue((np.abs(moms - data[i, 1:]) < 1e-5).all())\n\n moment = PhononMoment(f, w)\n moment.set_frequency_range(freq_min=3, freq_max=4)\n for i in range(3):\n moment.run(order=i)\n vals[i + 3, 0] = moment.get_moment()\n self.assertTrue(\n np.abs(moment.get_moment() - data[i + 3, 0]) < 1e-5)\n\n moment = PhononMoment(f, w, eigenvectors=e)\n moment.set_frequency_range(freq_min=3, freq_max=4)\n for i in range(3):\n moment.run(order=i)\n moms = moment.get_moment()\n vals[i + 3, 1:] = moms\n self.assertTrue((np.abs(moms - data[i + 3, 1:]) < 1e-5).all())\n\n # self._show(vals)\n\n def _show(self, vals):\n for v in vals:\n print((\"%9.6f \" * len(v)) % tuple(v))\n\n def _get_phonon(self, cell):\n phonon = Phonopy(cell,\n np.diag([2, 2, 2]),\n primitive_matrix=[[0, 0.5, 0.5],\n [0.5, 0, 0.5],\n [0.5, 0.5, 0]])\n filename = os.path.join(data_dir, \"..\", \"FORCE_SETS_NaCl\")\n force_sets = parse_FORCE_SETS(filename=filename)\n phonon.set_displacement_dataset(force_sets)\n phonon.produce_force_constants()\n filename_born = os.path.join(data_dir, \"..\", \"BORN_NaCl\")\n nac_params = parse_BORN(phonon.get_primitive(), filename=filename_born)\n phonon.set_nac_params(nac_params)\n\n return phonon\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestMoment)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"test/phonon/test_moment.py","file_name":"test_moment.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"4412786","text":"import pytest\n\nfrom money import Money, Bank, Sum\n\n\ndef test_multiplication():\n five = Money.dollar(5)\n assert Money.dollar(10) == five.times(2)\n assert Money.dollar(15) == five.times(3)\n\n\ndef test_equality():\n assert Money.dollar(5) == Money.dollar(5)\n assert Money.dollar(5) != Money.dollar(6)\n assert Money.franc(5) != Money.dollar(5)\n\n\ndef test_currency():\n assert \"USD\" == Money.dollar(1).currency()\n assert \"CHF\" == Money.franc(1).currency()\n\n\ndef test_simple_addition():\n five = Money.dollar(5)\n sum = five.plus(five)\n bank = Bank()\n reduced = bank.reduce(sum, \"USD\")\n assert Money.dollar(10) == reduced\n\n\ndef test_plus_return_sum():\n five = Money.dollar(5)\n result = five.plus(five)\n assert five == result.augend\n assert five == result.addend\n\n\ndef test_reduce_sum():\n sum = Sum(Money.dollar(3), Money.dollar(4))\n bank = Bank()\n result = bank.reduce(sum, \"USD\")\n assert Money.dollar(7) == result\n\n\ndef test_reduce_money():\n bank = Bank()\n result = bank.reduce(Money.dollar(1), \"USD\")\n assert Money.dollar(1) == result\n\n\ndef test_reduce_money_different_currency():\n bank = Bank()\n bank.add_rate(\"CHF\", \"USD\", 2)\n result = bank.reduce(Money.franc(2), \"USD\")\n assert Money.dollar(1) == result\n\n\ndef test_identity_rate():\n assert 1 == Bank().rate(\"USD\", \"USD\")\n\n\ndef test_mixed_addition():\n five_bucks = Money.dollar(5)\n ten_francs = Money.franc(10)\n bank = Bank()\n bank.add_rate(\"CHF\", \"USD\", 2)\n result = bank.reduce(five_bucks.plus(ten_francs), \"USD\")\n assert Money.dollar(10) == result\n\n\ndef test_sum_plus_money():\n five_bucks = Money.dollar(5)\n ten_francs = Money.franc(10)\n bank = Bank()\n bank.add_rate(\"CHF\", \"USD\", 2)\n sum = Sum(five_bucks, ten_francs).plus(five_bucks)\n result = bank.reduce(sum, \"USD\")\n assert Money.dollar(15) == result\n\n\ndef test_sum_times():\n five_bucks = Money.dollar(5)\n ten_francs = Money.franc(10)\n bank = Bank()\n bank.add_rate(\"CHF\", \"USD\", 2)\n sum = Sum(five_bucks, ten_francs).times(2)\n result = bank.reduce(sum, \"USD\")\n assert Money.dollar(20) == result\n","sub_path":"chp16/test_money.py","file_name":"test_money.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487033396","text":"#!/usr/bin/env python3\n\nimport re\n\n\ndef make_stat(filename):\n stat = {'M': {}, 'F': {}}\n year = ''\n years = []\n with open(filename, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n for line in lines:\n result = re.findall('href=.*/>(.*)|

    (.*)

    ', line)\n # print(result)\n for occurence in result:\n if occurence[0] == '':\n year = occurence[1]\n years.append(year)\n stat['M'][year] = {}\n stat['F'][year] = {}\n # print(year, years[year])\n else:\n name_parts = occurence[0].split()\n surname = name_parts[0]\n name = name_parts[1]\n sex_letter = 'M'\n if (name[-1] == 'а' or name[-1] == \"я\" or\n name == 'Любовь') and name != 'Илья' \\\n and name != 'Никита' and name != 'Лёва':\n sex_letter = 'F'\n if stat[sex_letter][year].__contains__(name):\n stat[sex_letter][year][name] = \\\n stat[sex_letter][year][name] + 1\n else:\n stat[sex_letter][year][name] = 1\n return stat\n\n\ndef extract_years(stat):\n return sorted(stat['M'].keys())\n\n\ndef extract_general(stat):\n general_list = []\n male = extract_general_male(stat)\n female = extract_general_female(stat)\n for man in male:\n general_list.append(man)\n for man in female:\n general_list.append(man)\n\n general = sorted(general_list, reverse=True, key=lambda kv: kv[1])\n return general\n\n\ndef extract_general_male(stat):\n general_dict = {}\n for year in stat['M'].keys():\n for man in stat['M'][year].keys():\n if general_dict.__contains__(man):\n general_dict[man] = general_dict[man] + stat['M'][year][man]\n else:\n general_dict[man] = stat['M'][year][man]\n general = sorted(general_dict.items(), reverse=True, key=lambda kv: kv[1])\n return general\n\n\ndef extract_general_female(stat):\n general_dict = {}\n for year in stat['F'].keys():\n for man in stat['F'][year].keys():\n if general_dict.__contains__(man):\n general_dict[man] = general_dict[man] + stat['F'][year][man]\n else:\n general_dict[man] = stat['F'][year][man]\n general = sorted(general_dict.items(), reverse=True, key=lambda kv: kv[1])\n return general\n\n\ndef extract_year(stat, year):\n year_stat = []\n for man in stat['M'][year]:\n year_stat.append((man, stat['M'][year][man]))\n\n for man in stat['F'][year]:\n year_stat.append((man, stat['F'][year][man]))\n\n year_stat = sorted(year_stat, reverse=True, key=lambda kv: kv[1])\n return year_stat\n\n\ndef extract_year_male(stat, year):\n year_stat = []\n for man in stat['M'][year]:\n year_stat.append((man, stat['M'][year][man]))\n\n year_stat = sorted(year_stat, reverse=True, key=lambda kv: kv[1])\n return year_stat\n\n\ndef extract_year_female(stat, year):\n year_stat = []\n for man in stat['F'][year]:\n year_stat.append((man, stat['F'][year][man]))\n\n year_stat = sorted(year_stat, reverse=True, key=lambda kv: kv[1])\n return year_stat\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"Python.hw/homestat_cp1251.py","file_name":"homestat_cp1251.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"581613091","text":"from datetime import datetime\nfrom bs4 import BeautifulSoup as bs\nimport re\nimport requests\nimport pandas as pd\n\nclass MusinsaRanking:\n\n def __init__(self, url: str) -> None:\n self.url = url\n\n def _get_request(self) -> requests.Response:\n res = requests.get(self.url)\n if res.status_code != 200:\n res.raise_for_status()\n return res\n\n def get_ranking(self) -> pd.DataFrame:\n res = self._get_request()\n data = res.text\n soup = bs(data, 'html.parser')\n ranking_section = soup.find(class_='tbl_box_sranking')\n\n # 키워드 부분\n item_html = ranking_section.find_all('a')\n # 순위 변동 방향 부분\n status_html = ranking_section.find_all(class_='arrow')\n # 순위 변동 부분\n chng_html = ranking_section.find_all(class_='p_srank_last')\n\n item = []\n status = []\n change = []\n\n for i in range(len(status_html)):\n item_name = item_html[i].attrs['title']\n item.append(item_name)\n change_direction = status_html[i].get_text()\n status.append(change_direction)\n status_change = chng_html[i].get_text()\n change_num = re.sub(r'[^0-9]', '', status_change)\n change.append(change_num)\n\n item_mapping = list(zip(item, status, change))\n ranking = pd.DataFrame(item_mapping,\n columns=['Item', 'Status', 'Change'])\n return ranking\n","sub_path":"ranking.py","file_name":"ranking.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"60374383","text":"\"\"\"\nForces spider created on the top of ATSSpider\n\nscrapy crawl forces -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.forces.ca/en/jobexplorer/browsejobs-70\"\n\nSample URL:\n http://www.forces.ca/en/jobexplorer/browsejobs-70\n\"\"\"\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import md5_hash, RemoveBadElements, HtmlFormatter, Prefix, Replace\n\n\nclass Forces(ATSSpider):\n\n name = 'forces'\n\n def parse(self, response):\n sel = Selector(response)\n for href in sel.xpath(\n '//ul[@id=\"jobresultlist\"]/li//div[@class=\"span-6\"]/a/@href'\n ).extract():\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, href)\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n loader = BrightcorpItemLoader(selector=sel)\n loader.add_xpath(\n 'title',\n '//div/h1[@id=\"wb-cont\"]/text()'\n )\n loader.add_value(\n 'referencenumber',\n response.url,\n md5_hash,\n Prefix('%s-' % self.name)\n )\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'description',\n '//div[@id=\"info\"]/node()',\n RemoveBadElements(['a', 'img', ]),\n Replace('On this page:'),\n HtmlFormatter()\n )\n loader.add_value('apply_url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/forces.py","file_name":"forces.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"430199989","text":"\r\nimport pymysql # 导入 pymysql\r\n\r\n# 打开数据库连接\r\ndb = pymysql.connect(host=\"127.0.0.1\", user=\"root\",\r\n password=\"root\", db=\"controlarm\", port=3306,charset='utf8mb4')\r\n\r\n# 使用cursor()方法获取操作游标\r\ncur = db.cursor()\r\nimport logging\r\nimport time\r\n\r\nfrom selenium.webdriver import ChromeOptions\r\nfrom selenium import webdriver\r\nfrom openpyxl import load_workbook\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n\r\n#输出日志的\r\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')\r\nTIME_OUT = 30\r\nTOTAL_PAGE = 12\r\n#操作浏览器的基础配置\r\noption = ChromeOptions()\r\n#隐藏自动化程序\r\noption.add_experimental_option('excludeSwitches', ['enable-automation'])\r\noption.add_experimental_option('useAutomationExtension', False)\r\n#限制图片加载\r\nprefs={\r\n 'profile.default_content_setting_values': {\r\n 'images': 2}\r\n}\r\noption.add_experimental_option('prefs',prefs)\r\n\r\n#开启无界面模式\r\n# option.add_argument(\"--headless\")\r\n# option.add_argument(\"--disable-gpu\")\r\n# wait = WebDriverWait(browser, TIME_OUT)\r\n\r\n\r\n\r\n# 'https://www.ebay.de':77,\r\n# market_dict = {'https://www.ebay.com':1,'https://www.ebay.co.uk':3,'https://www.ebay.com.au':15,}\r\nmarket_dict = {'https://www.ebay.de':77}\r\n# 1.查询操作\r\n# 编写sql 查询语句 user 对应我的表名\r\ndef dataSql(itemid,title,picture,oe,market,judege,sold):\r\n sql = \"insert into baseinfo_copy(itemid,title,picture,oe,market,judege,sold) values(%s,%s,%s,%s,%s,%s,%s)\"\r\n try:\r\n res = cur.execute(sql,(itemid,title,picture,oe,market,judege,sold)) # 执行sql语句\r\n db.commit()\r\n except Exception as e:\r\n print(e)\r\n finally:\r\n pass\r\n # cur.close() # 关闭连接\r\n\r\nclass helpIn:\r\n def __init__(self,searchid=1):\r\n self.searchkey = searchid\r\n self.url = '{}/sch/i.html?_from=R40&_nkw={}&_sacat=0&LH_TitleDesc=0&_ipg=200&_fcid={}&_pgn={}&LH_ItemCondition=1000'\r\n self.name= None\r\n self.searchword = None\r\n def dataRead(self,start,end):\r\n wb = load_workbook('TURBOde.xlsx')\r\n wt = wb[\"Sheet1\"]\r\n for sdf in range(start,end):\r\n self.searchword = wt.cell(row=sdf,column=1).value\r\n self.spiderData()\r\n\r\n def dataWrite(self,itemid,title,picture,price,sold):\r\n #写入数据库\r\n sql = 'insert into turbode(itemid,title,picture,name,searchword,price,sold) values(%s,%s,%s,%s,%s,%s,%s)'\r\n cur.execute(sql,(itemid,title,picture,self.name,self.searchword,price,sold))\r\n db.commit()\r\n def spiderData(self):\r\n ert = open(\"turbode.text\",mode=\"a+\",encoding='utf-8')\r\n print(\"数据自动化程序即将执行,请不要关闭自动打开的浏览器\")\r\n browser = webdriver.Chrome(options=option, executable_path='../chromedriver.exe')\r\n # 爬取数据\r\n browser.delete_all_cookies()\r\n search_name = str(self.searchword).replace(\" \", \"+\")\r\n market = []\r\n # 判断市场\r\n for mark_url, mark_id in market_dict.items():\r\n area = mark_url.split('.')[-1]\r\n if area == \"com\":\r\n area = \"us\"\r\n else:\r\n pass\r\n self.name =area\r\n\r\n for page in range(0, 10):\r\n # 页面内容提取\r\n print(page)\r\n browser.get(self.url.format(mark_url, search_name, mark_id, page + 1))\r\n time.sleep(1)\r\n i = 1\r\n try:\r\n count = browser.find_element_by_xpath(\r\n '//*[@id=\"mainContent\"]/div[1]/div/div[3]/div[1]/div[1]/h1/span[1]').text\r\n count = int(count)\r\n jcount = count\r\n except Exception as yut:\r\n break\r\n ert.write(str(self.searchword)+\"#\" + str(count)+\"\\n\")\r\n # 计数结果判断\r\n if count == 0:\r\n break\r\n elif count > 200 and page > 0:\r\n count = count - 200*page\r\n\r\n for i in range (1,count+1):\r\n # 循环提取每页内容\r\n price = None\r\n look_count_result = \"no data\"\r\n try:\r\n commodity_name = browser.find_element_by_xpath(\r\n '//*[@id=\"srp-river-results\"]/ul/li[{}]/div/div[2]/a/h3'.format(i)).text\r\n commodity_link = browser.find_element_by_xpath(\r\n '//*[@id=\"srp-river-results\"]/ul/li[{}]/div/div[2]/a'.format(i)).get_attribute(\"href\")\r\n image_link = browser.find_element_by_xpath(\r\n '//*[@id=\"srp-river-results\"]/ul/li[{}]/div/div[1]/div/a/div/img'.format(i)).get_attribute(\r\n \"src\")\r\n price = browser.find_element_by_xpath('//*[@id=\"srp-river-results\"]/ul/li[{}]//span[@class=\"s-item__price\"]'.format(i)).text\r\n try:\r\n # 尝试抓取销售数据\r\n look_count = browser.find_elements_by_xpath(\r\n '//*[@id=\"srp-river-results\"]/ul/li[{}]/div/div[2]//span[contains(@class,\"BOLD\")]'.format(\r\n i))\r\n if look_count:\r\n look_count_result = \"\"\r\n for gh in look_count:\r\n look_count_result += gh.text\r\n except Exception as e:\r\n pass\r\n # print(i)\r\n i += 1\r\n # print(name,look_count,look_url)\r\n judege = \"no\"\r\n itemid = commodity_link.split(\"/\")[-1].split('?')[0]\r\n self.dataWrite(itemid=itemid,title=commodity_name,picture=image_link,price=price,sold=look_count_result)\r\n except Exception as y:\r\n # browser.find_element_by_xpath('//*[@id=\"gh-ac\"]').send_keys(inner)\r\n # browser.find_element_by_xpath('//*[@id=\"gh-btn\"]').click()\r\n print(y)\r\n print(\"第一页内容提取完毕\")\r\n break\r\n if count < 200:\r\n break\r\n try:\r\n # 页面class值是唯一值\r\n next_page = browser.find_element_by_xpath('//*[contains(@class,\"pagination__next\")]')\r\n # print(next_page)\r\n next_page_judge = next_page.get_attribute('aria-disabled')\r\n print(next_page_judge)\r\n if next_page_judge:\r\n break\r\n else:\r\n continue\r\n except Exception as q:\r\n print(str(self.searchword) + '采集完毕' + '\\t' + '无下一页')\r\n break\r\n # print(\"执行完毕\")\r\n browser.quit()\r\n ert.close()\r\n def run(self,start,end):\r\n self.dataRead(start=start,end=end)\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n helpIn(searchid=1).run(start=2075,end=2076)","sub_path":"dataCapture/cherry/turbo_de.py","file_name":"turbo_de.py","file_ext":"py","file_size_in_byte":7459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"241115529","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\ntry:\n url = 'https://www.mhanational.org/issues/2020/mental-health-america-all-data'\n\n page = requests.get(url)\n\n soup = BeautifulSoup(page.text, 'html.parser')\n\n data = []\n data_rankings = soup.find_all('tr', {'class': 'rankings'})\n\n for data_ranking in range(len(data_rankings)):\n cols = data_rankings[data_ranking].find_all('td')\n cols = [ele.text.strip() for ele in cols]\n data.append([ele for ele in cols if ele])\n\n result = pd.DataFrame(data, columns=['State', 'Rank', 'Percentage', 'Number'])\n result.to_csv('./datasets/mha.csv', index=False)\nexcept IOError:\n print(\"can't create CSV\")\n\n# print(result)\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"17700055","text":"#\n#\n# test.py\n# -------------\n# The main program asks the user to input a search query, from which\n# it will process the aggregated average retail and wholesale price,\n# the price differential, and the percentage difference, the sentiment\n# score based on news relating to that search query, the sentiment text\n# (i.e. generally negative, very positive)\n#\n#\n\n\nimport search\nimport processingRetail\nimport processingWholesale\n\n\ndef getPriceDifferentialText(wholesalePrice, retailPrice):\n priceDifferential = retailPrice - wholesalePrice\n\n return priceDifferential\n\n\ndef getPercentDifferentialText(wholesalePrice, retailPrice):\n percentDifferential = (retailPrice / wholesalePrice) * 100\n\n return percentDifferential\n\ndef test():\n query = input('Enter a search query (\"q\" to exit): ')\n\n if query == 'q':\n exit()\n\n print()\n aggregatedWholesalePrice = processingWholesale.aggregatePrices(query).get('averageItemPrice', 0)\n aggregatedRetailPrice = processingRetail.aggregatePrices(query).get('averageItemPrice', 0)\n print('Average Wholesale Price:', '{0:.2f}'.format(aggregatedWholesalePrice))\n print('Average Retail Price:', '{0:.2f}'.format(aggregatedRetailPrice))\n print('Price Differential:', '{0:.2f}'.format(getPriceDifferentialText(aggregatedWholesalePrice,\n aggregatedRetailPrice)))\n print('Percent Differential:', '{0:.2f}'.format(getPercentDifferentialText(aggregatedWholesalePrice,\n aggregatedRetailPrice)), '%')\n search.performSearch(query)\n\ntest()","sub_path":"processing/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"140694351","text":"from typing import List\n\n\n# brutal force\nclass Solution:\n def largestRectangleArea(self, heights: List[int]) -> int:\n result = 0\n for i in range(0, len(heights)):\n for j in range(1, heights[i] + 1):\n # print(j, end=\" \")\n result = max(self.find_max(i, heights, j), result)\n # print()\n\n return result\n\n def find_max(self, pos, heights, tall):\n width = 0\n flag = True\n for i in range(0, len(heights)):\n if i <= pos:\n continue\n # print(i)\n width += 1\n if heights[i] < tall:\n flag = not flag\n # print(\"heights[i]: \", heights[i], \"tall: \", tall, i)\n break\n if flag:\n width += 1\n # print(\"pos\", pos, \" tall: \", tall, \" width: \", width)\n # print(\"width: \", width, \"(i,j) \", pos, \" \", tall)\n return width * tall\n\n\nif __name__ == '__main__':\n sol = Solution()\n inp = [2, 1, 5, 6, 2, 3]\n print(sol.largestRectangleArea(inp))\n","sub_path":"84.py","file_name":"84.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255038901","text":"from detectedClient import DetectedClient\nimport os\nimport sys\nimport time\nimport math\nimport logging\nimport logging.config\nfrom threadsafeLogger import ThreadsafeLogger\n\n#TODO: clean up mess from our trials on the scan normalizing\nclass BtleRegisteredClient:\n #part of interface for Registered Client\n def __init__(self,detectedClient,collectionPointConfig,loggingQueue):\n # Logger\n self.loggingQueue = loggingQueue\n self.logger = ThreadsafeLogger(loggingQueue, __name__)\n \n self.logger = logging.getLogger('btleRegisteredClient.BtleRegisteredClient')\n self.clientEventLogger = logging.getLogger('btleRegisteredClient.BtleEventTesting')\n self.clientEventSendLogger = logging.getLogger('eventSend')\n self.clientInRangeTrigerCount = 2\n self.lastTimeMessageClientInWasSentToController = -1\n self.lastTimeMessageClientOutWasSentToController = -1\n self.__countClientInRange=0\n self.__countClientOutOfRange=0\n self.timeInCollectionPointInMilliseconds = 0\n self.firstRegisteredTime = time.time()\n self.collectionPointConfig = collectionPointConfig\n self.__clientOutThresholdMin = int(self.collectionPointConfig['BtleRssiClientInThreshold'] + (self.collectionPointConfig['BtleRssiClientInThreshold'] * self.collectionPointConfig['BtleRssiErrorVariance']))\n self.handleNewDetectedClientEvent(detectedClient) #standard shared methods when we see a detected client\n\n #part of interface for Registered Client\n def updateWithNewDectedClientData(self,detectedClient):\n self.timeInCollectionPointInMilliseconds = time.time() - self.firstRegisteredTime\n self.handleNewDetectedClientEvent(detectedClient) #standard shared methods when we see a detected client\n\n #Common methods are handled here for updateWithNewDectedClientData and init\n def handleNewDetectedClientEvent(self,detectedClient):\n self.lastRegisteredTime = time.time()\n self.detectedClient = detectedClient\n self.txPower = detectedClient.extraData['tx']\n self.beaconId = detectedClient.extraData['udid'] #TODO HACK FIX\n self.incrementInternalClientEventCounts(detectedClient)\n\n def incrementInternalClientEventCounts(self,detectedClient):\n self.clientEventLogger.debug(\"==================================== EVENT COUNTS DATA START ====================================\")\n self.clientEventLogger.debug(\"Counts before inCount %i : outCount %i\" %(self.__countClientInRange,self.__countClientOutOfRange))\n\n #self.clientEventLogger.debug(\"rssi types\")\n #self.clientEventLogger.debug(\"type of self.detectedClient.extraData['rssi'] = %s\" %type(self.detectedClient.extraData['rssi']))\n #self.clientEventLogger.debug(\"type of self.detectedClient.extraData['rssi'] = %s\" %type(self.detectedClient.extraData['rssi']))\n #self.clientEventLogger.debug(\"type of self.collectionPointConfig['btleRssiClientInThreshold'] = %s \" %type(self.collectionPointConfig['btleRssiClientInThreshold']))\n #self.clientEventLogger.debug(\"type of self.__clientOutThresholdMin = %s \" %type(self.__clientOutThresholdMin))\n\n if self.collectionPointConfig['gatewayType'] == 'proximity':\n #check threshold type\n if self.collectionPointConfig['btleRssiClientInThresholdType'] == 'rssi':\n #are they in or are they out of range --- increament internal count. we use the count to normalize the events even more\n #self.logger.debug(\"rssi average %i > btleRssi threshold %i: %s\" %(self.getRssiAverage(),self.collectionPointConfig['btleRssiClientInThreshold'],self.getRssiAverage() > self.collectionPointConfig['btleRssiClientInThreshold']))\n self.clientEventLogger.debug(\"Registered Client Event\")\n self.clientEventLogger.debug(\"UDID is %s \" %self.getUdid())\n self.clientEventLogger.debug(\"Beacon ID is %s \" %self.beaconId)\n self.clientEventLogger.debug(\"RSSI %i\" %self.detectedClient.extraData['rssi'])\n self.clientEventLogger.debug(\"BTLE RSSI client in threshold %i\" %self.collectionPointConfig['BtleRssiClientInThreshold'])\n self.clientEventLogger.debug(\"BTLE RSSI client out threshold %i\" %self.__clientOutThresholdMin)\n\n if self.detectedClient.extraData['rssi'] >= self.collectionPointConfig['BtleRssiClientInThreshold']:\n self.__countClientInRange = self.__countClientInRange + 1\n self.__countClientOutOfRange = 0\n self.clientEventLogger.debug(\"CLIENT IN RANGE>>>>>>>>>>>\")\n\n else:\n if self.detectedClient.extraData['rssi'] <= self.__clientOutThresholdMin:\n self.__countClientOutOfRange = self.__countClientOutOfRange + 1\n #self.__countClientInRange = 0\n self.clientEventLogger.debug(\"CLIENT OUT OF RANGE<<<<<<<<<<<\")\n\n else:\n self.clientEventLogger.debug(\"CLIENT IN BUFFER AREA==========\")\n\n self.clientEventLogger.debug(\"Counts after inCount %i : outCount %i\" %(self.__countClientInRange,self.__countClientOutOfRange))\n self.clientEventLogger.debug(\"==================================== EVENT COUNTS DATA END ====================================\")\n self.clientEventLogger.debug(\"\")\n\n #part of interface for Registered Client\n def shouldSendClientInEvent(self):\n if self.collectionPointConfig['gatewayType'] == 'proximity':\n #we compare on seconds so we need to adjust this to seconds\n proximityEventIntervalInSeconds = (self.collectionPointConfig['ProximityEventIntervalInMilliseconds']/1000)\n\n timeDiff = math.trunc(time.time() - self.lastTimeMessageClientInWasSentToController)\n self.logger.debug(\"shouldSendClientInEvent timeDiff %f > %s\" %(timeDiff,proximityEventIntervalInSeconds) )\n\n if timeDiff > proximityEventIntervalInSeconds:\n if self.__countClientInRange > self.clientInRangeTrigerCount:\n self.logClientEventSend(\"SHOULD ClientIN event to controller for\")\n self.zeroEventRangeCounters()\n return True\n\n #TODO add in other types of gateway types\n\n return False\n\n #part of interface for Registered Client\n def shouldSendClientOutEvent(self):\n if self.collectionPointConfig['gatewayType'] == 'proximity':\n #we compare on seconds so we need to adjust this to seconds\n proximityEventIntervalInSeconds = (self.collectionPointConfig['ProximityEventIntervalInMilliseconds']/1000)\n\n #check the time to see if we need to send a message\n #have we ever sent an IN event? if not we dont need to send an out event\n if self.lastTimeMessageClientInWasSentToController > 0:\n #check timing on last event sent\n #self.logger.debug(\"shouldSendClientOutEvent lastTimeMessageClientOutWasSentToController=%f\"%self.lastTimeMessageClientOutWasSentToController)\n timeDiff = time.time() - self.lastTimeMessageClientOutWasSentToController\n\n #have we sent a client out since the last client in? if so we dont need to throw another\n if self.lastTimeMessageClientOutWasSentToController < self.lastTimeMessageClientInWasSentToController:\n #do we have enought qualifying out events. we dont want to throw one too soon\n if self.__countClientOutOfRange >= self.collectionPointConfig['BtleClientOutCountThreshold']:\n self.logClientEventSend(\"SHOULD ClientOUT event to controller for\")\n self.zeroEventRangeCounters()\n return True\n\n #lets check to see if we need to clean up the out count --- not sure this is the best idea\n else:\n if self.__countClientOutOfRange > self.collectionPointConfig['BtleClientOutCountThreshold']:\n self.clientEventLogger.debug(\"Client out count %i is past max. Resetting.\" %self.__countClientOutOfRange)\n self.__countClientOutOfRange = 0\n\n else:\n #lets check to see if we need to clean up the out count --- not sure this is the best idea\n if self.__countClientOutOfRange > self.collectionPointConfig['BtleClientOutCountThreshold']:\n self.clientEventLogger.debug(\"Client out count %i is past max. Resetting.\" %self.__countClientOutOfRange)\n self.__countClientOutOfRange = 0\n\n #TODO add in other types of gateway types\n\n return False\n\n #part of interface for Registered Client\n def sweepShouldSendClientOutEvent(self):\n if self.collectionPointConfig['gatewayType'] == 'proximity':\n #has an out event already been sent? if so we dont need to throw another on sweep\n if self.lastTimeMessageClientOutWasSentToController > 0:\n #was there a in event sent after the last out?\n if self.lastTimeMessageClientInWasSentToController > self.lastTimeMessageClientOutWasSentToController:\n self.logClientEventSend(\"Sweep case a is sending ClientOUT on\")\n self.zeroEventRangeCounters()\n return True\n else:\n return False\n else:\n self.logClientEventSend(\"Sweep case b is sending ClientOUT on\")\n self.zeroEventRangeCounters()\n return True\n\n #TODO add in other types of gateway types\n\n return True\n\n #part of interface for Registered Client\n def getUdid(self):\n return self.detectedClient.extraData[\"beaconMac\"]\n\n def getTxPower(self):\n return self.txPower\n\n #zero out the BTLE event counters\n def zeroEventRangeCounters(self):\n self.__countClientOutOfRange = 0\n self.__countClientInRange = 0\n\n def logClientEventSend(self,message):\n self.clientEventSendLogger.debug(\"\")\n self.clientEventSendLogger.debug(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n self.clientEventSendLogger.debug(\"%%%%%%%%%%%%%%%%%% %s %%%%%%%%%%%%%%%%%%\" %message)\n self.clientEventSendLogger.debug(\"UDID is %s \" %self.getUdid())\n self.clientEventSendLogger.debug(\"Beacon ID is %s \" %self.beaconId)\n self.clientEventSendLogger.debug(\"RSSI %i\" %self.detectedClient.extraData['rssi'])\n self.clientEventSendLogger.debug(\"BTLE RSSI client in threshold %i\" %self.collectionPointConfig['BtleRssiClientInThreshold'])\n self.clientEventSendLogger.debug(\"BTLE RSSI client out threshold %i\" %self.__clientOutThresholdMin)\n self.clientEventSendLogger.debug(\"inCount %i : outCount %i\" %(self.__countClientInRange,self.__countClientOutOfRange))\n self.clientEventSendLogger.debug(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n self.clientEventSendLogger.debug(\"\")\n\n\n #part of interface for Registered Client\n def getExtenedDataForEvent(self):\n extraData = {}\n extraData['lastRegisteredTime'] = self.lastRegisteredTime\n extraData['firstRegisteredTime'] = self.firstRegisteredTime\n extraData['lastTimeMessageClientInWasSentToController'] = self.lastTimeMessageClientInWasSentToController\n extraData['lastTimeMessageClientOutWasSentToController'] = self.lastTimeMessageClientOutWasSentToController\n extraData['timeInCollectionPointInMilliseconds'] = self.timeInCollectionPointInMilliseconds\n extraData['rssi'] = self.detectedClient.extraData['rssi']\n extraData['averageRssi'] = self.detectedClient.extraData['rssi']\n extraData['txPower'] = self.getTxPower()\n #TODO INSTALL FIX\n extraData['beaconId'] = self.beaconId\n\n return extraData\n\n #part of interface for Registered Client\n def setClientInMessageSentToController(self):\n self.lastTimeMessageClientInWasSentToController = time.time()\n self.__countClientInRange = 0\n\n #part of interface for Registered Client\n def setClientOutMessageSentToController(self):\n self.lastTimeMessageClientOutWasSentToController = time.time()\n self.__countClientOutOfRange = 0\n","sub_path":"collection_modules/btleCollectionPoint/btle/btleRegisteredClient.py","file_name":"btleRegisteredClient.py","file_ext":"py","file_size_in_byte":12426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"462296682","text":"import pymysql\n\n# now 클레스화 -> 재사용 하기위해서\nclass SelectNowDao:\n def select_now(self, connection):\n \"\"\"현재 시점 선언\n\n - 이력 관리를 위한 now 시점 선언\n\n Author:\n 백승찬\n\n Args:\n connection (객체): pymysql 객체\n\n Raises:\n\n Returns:\n 200 : datetime.datetime()\n \"\"\"\n\n query = \"\"\"\n SELECT now()\n \"\"\"\n \n with connection.cursor(pymysql.cursors.DictCursor) as cursor:\n\n cursor.execute(query)\n\n result = cursor.fetchone()\n result = result[\"now()\"]\n\n return result","sub_path":"BackEnd/model/util_dao.py","file_name":"util_dao.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"444088945","text":"# -*- coding: utf-8 -*-\nimport requests\nfrom Queue import Queue\nimport threading\nimport time\nimport telepot\nimport json\ntoken = \"416666512:AAFgiPNP8tqtn9EgkVF_Dwww0CU4uvCTLDw\"\nTelegramBot = telepot.Bot(token)\nanswer = json.dumps(TelegramBot.getUpdates()[-1])\ndata = json.loads(answer)\nold_id = data['update_id']\nLastTradeInfo = {\"ltc_sell\":0, \"ltc_buy\":0, \"ltc_last\":0, \"btc_sell\":0, \"btc_buy\":0, \"btc_last\":0, \"eth_sell\":0, \"eth_buy\":0, \"eth_last\":0, \"dsh_sell\":0, \"dsh_buy\":0, \"dsh_last\":0}\ndef crypto_api():\n\tglobal queue\n\tglobal LastTradeInfo\n\tlist = {'/btc':'btc', '/eth':'eth', '/DASH':'dsh', '/ltc':'ltc'}\n\twhile True:\n\t\ttime.sleep(10)\n\t\t#\"закрывает\" участок кода только для одного потока\n\t\t# в этот момент времени, чтоб не возникало конфликта записи\n\t\t# нам не требется, с этим участком работает только один поток\n\t\t#LOCK.acquire()\n\t\ttrade_data = json.loads(requests.get(\"https://btc-e.com/api/3/ticker/btc_usd-ltc_usd-eth_usd-dsh_usd\").content)\n\t\tfor name in list:\n\t\t\tname = list[name]\n\t\t\tfor key in [\"sell\", \"buy\", \"last\"]:\n\t\t\t\tLastTradeInfo[name+\"_\"+key] = trade_data[name+\"_usd\"][key] \n\t\t#LOCK.release()\ndef chat(old_id, TelegramBot):\n\tglobal queue\n\tglobal LastTradeInfo\n\tlist = {'/btc':'btc', '/eth':'eth', '/DASH':'dsh', '/ltc':'ltc'}\n\twhile True:\n\t\tanswer = json.dumps(TelegramBot.getUpdates()[-1])\n\t\tdata = json.loads(answer)\n\t\tnew_id = data['update_id']\n\t\ttext = data['message']['text']\n\t\tchat_id = data['message']['chat']['id']\n\t\tif new_id != old_id:\n\t\t\ttry:\n\t\t\t\tname = list[text]\n\t\t\t\tbot_say = \"покупаем - \" + str(LastTradeInfo[name+\"_sell\"]) + \", продаем - \" + str(LastTradeInfo[name+\"_buy\"]) + \", последняя - \" + str(LastTradeInfo[name+\"_last\"])\n\t\t\t\tTelegramBot.sendMessage(chat_id, bot_say)\n\t\t\texcept:\n\t\t\t\tTelegramBot.sendMessage(chat_id, u\"Это мы не проходили, это нам не задавали\")\n\t\told_id = new_id\nqueue = Queue()\nLOCK = threading.RLock()\nthread_ = threading.Thread(target=crypto_api)\nthread_.start()\nthread_ = threading.Thread(target=chat(old_id, TelegramBot))\nthread_.start()\n","sub_path":"potok.py","file_name":"potok.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21721791","text":"import os\nimport json\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n\n\n# search_pfrase = 'о+войне'\n#\n#\n# search = 'о+войне'\n# param = {'s': search}\n# print(param)\n#\n# r = requests.get('https://zagge.ru/', params=param) #отправляем HTTP запрос и получаем результат\n#\n# soup = BeautifulSoup(r.text, 'html.parser')\n# link = soup.find_all('div', {'class': 'loop-entry-thumbnail'})\n# l = len(link)\n# if l > 0:\n# k = round(random.uniform(0, l-1))\n# url_fackt = link[k].find('a').get('href')\n# print(url_fackt)\n# rf = requests.get(url_fackt)\n# soup_rez = BeautifulSoup(rf.text, 'html.parser')\n#\n# link_rez = soup_rez.find('div', {'class': 'post-content'})\n# print(link_rez.get_text())\n\nurl = \"https://www.googleapis.com/customsearch/v1\"\nsearch = 'рецепти на ужен'\nparam = {\n 'key': 'AIzaSyC9z0aEVdY6gvx9_EP8rmfRhIa6n_QR8FM',\n 'cx': '010349152742613606712:5h9izqqtp1s',\n 'q': search\n}\n# print(param)\n\nr = requests.get(url, params=param) #отправляем HTTP запрос и получаем результат\narr_url = r.json()['items']\nfor i in arr_url:\n print(i['title'])\n print(i['link'])\n\n# soup = BeautifulSoup(r.json(), 'json.parser')\n# print(soup)\n# link = soup.find_all('div', {'class': 'hlcw0c'})\n# print(link)\n# l = len(link)\n# if l > 0:\n# k = round(random.uniform(0, l-1))\n# url_fackt = link[k].find('a').get('href')\n# print(url_fackt)\n# rf = requests.get(url_fackt)\n# soup_rez = BeautifulSoup(rf.text, 'html.parser')\n#\n# link_rez = soup_rez.find('div', {'class': 'post-content'})\n# print(link_rez.get_text())\n","sub_path":"test/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"29965548","text":"#we get inputs as an array and target ,we have to check whether the array has any pair of element whose sum eqauls to target.for eg-a[2,7,9,11] target=9 we return 1 since 2+7= 9\n\n\nimport time\n\nstart=time.time()\nl=input().strip().split()\n#k=[]\n#for i in l:\n#\tk.append(int(i))\nk=[int(i) for i in l]\nt=int(input())\nflag=False\nfor i in k:\n\tif t-i in k:\n\t\tprint(i,t-i)\n\t\tflag=True\n\t\tbreak\nif flag==False:\n\tprint(\"not present\")\nend=time.time()\nprint(end-start)\n","sub_path":"python/CheckforTargetInArray.py","file_name":"CheckforTargetInArray.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"273924940","text":"import os\nimport logging\n\nfrom rcGlobalEnv import rcEnv\nfrom rcUtilities import which\nfrom rcUtilitiesLinux import lv_info\nfrom subprocess import *\nfrom converters import print_duration\nimport rcExceptions as ex\nimport rcStatus\nimport datetime\nimport resSync\n\nclass syncDds(resSync.Sync):\n def pre_action(self, action):\n resources = [r for r in self.rset.resources if \\\n not r.skip and not r.is_disabled() and \\\n r.type == self.type]\n\n if len(resources) == 0:\n return\n\n self.pre_sync_check_prd_svc_on_non_prd_node()\n\n for i, r in enumerate(resources):\n if not r.svc_syncable():\n return\n r.get_info()\n if action == 'sync_full':\n r.remove_snap1()\n r.create_snap1()\n elif action in ['sync_update', 'sync_resync', 'sync_drp', 'sync_nodes']:\n if action == 'sync_nodes' and self.target != ['nodes']:\n return\n if action == 'sync_drp' and self.target != ['drpnodes']:\n return\n r.get_info()\n r.get_snap1_uuid()\n nb = 0\n tgts = r.targets.copy()\n for n in tgts:\n try:\n r.check_remote(n)\n nb += 1\n except:\n self.targets -= set([n])\n if nb != len(tgts):\n self.log.error('all destination nodes must be present for dds-based synchronization to proceed')\n raise ex.excError\n r.create_snap2()\n\n def snap_exists(self, dev):\n if not os.path.exists(dev):\n self.log.debug('dev path does not exist')\n return False\n cmd = [rcEnv.syspaths.lvs, '--noheadings', '-o', 'snap_percent', dev]\n (ret, out, err) = self.call(cmd, errlog=False)\n if ret != 0:\n return False\n if len(out.strip()) == 0:\n self.log.debug('dev is not a snapshot')\n return False\n return True\n\n def create_snap(self, dev, lv):\n if self.snap_exists(dev):\n self.log.error('%s should not exist'%dev)\n raise ex.excError\n cmd = ['lvcreate', '-s', '-n', lv,\n '-L', str(self.snap_size)+'M',\n os.path.join(os.sep, 'dev', self.src_vg, self.src_lv)\n ]\n (ret, out, err) = self.vcall(cmd)\n if ret != 0:\n raise ex.excError\n\n def set_statefile(self):\n self.statefile = os.path.join(self.var_d, 'dds_state')\n\n def create_snap1(self):\n if self.snap_exists(self.snap2):\n self.log.error('%s should not exist'%self.snap2)\n raise ex.excError\n self.create_snap(self.snap1, self.snap1_lv)\n self.write_statefile()\n\n def create_snap2(self):\n self.create_snap(self.snap2, self.snap2_lv)\n\n def snap_name(self, snap):\n return os.path.basename(self.src_lv).replace('-', '_')+'_osvc_'+snap\n\n def get_src_info(self):\n (self.src_vg, self.src_lv, self.src_size) = lv_info(self, self.src)\n if self.src_lv is None:\n self.log.error(\"unable to fetch source logical volume information\")\n raise ex.excError\n if self.snap_size == 0:\n self.snap_size = self.src_size//10\n self.snap1_lv = self.snap_name('snap1')\n self.snap2_lv = self.snap_name('snap2')\n self.snap1 = os.path.join(os.sep, 'dev', self.src_vg, self.snap1_lv)\n self.snap2 = os.path.join(os.sep, 'dev', self.src_vg, self.snap2_lv)\n self.snap1_cow = os.path.join(os.sep, 'dev', 'mapper',\n '-'.join([self.src_vg.replace('-', '--'),\n self.snap1_lv,\n 'cow'])\n )\n\n def get_peersenders(self):\n self.peersenders = set()\n if 'nodes' not in self.target:\n self.peersenders |= self.svc.nodes\n self.peersenders -= set([rcEnv.nodename])\n\n def get_targets(self):\n self.targets = set()\n if 'nodes' in self.target:\n self.targets |= self.svc.nodes\n if 'drpnodes' in self.target:\n self.targets |= self.svc.drpnodes\n self.targets -= set([rcEnv.nodename])\n\n def get_info(self):\n self.get_targets()\n self.get_src_info()\n\n def svc_syncable(self):\n try:\n self.pre_sync_check_svc_not_up()\n self.pre_sync_check_flex_primary()\n except ex.excAbortAction:\n return False\n return True\n\n def sync_full(self):\n if not self.svc_syncable():\n return\n for n in self.targets:\n self.do_fullsync(n)\n\n def do_fullsync(self, node):\n dst = self.dsts[node]\n cmd1 = ['dd', 'if='+self.snap1, 'bs=1M']\n cmd2 = rcEnv.rsh.split() + [node, 'dd', 'bs=1M', 'of='+dst]\n self.log.info(' '.join(cmd1 + [\"|\"] + cmd2))\n p1 = Popen(cmd1, stdout=PIPE)\n p2 = Popen(cmd2, stdin=p1.stdout, stdout=PIPE)\n buff = p2.communicate()\n if p2.returncode == 0:\n stats_buff = buff[1]\n stats = self.parse_dd(stats_buff)\n self.update_stats(stats, target=node)\n else:\n if buff[1] is not None and len(buff[1]) > 0:\n self.log.error(buff[1])\n self.log.error(\"full sync failed\")\n raise ex.excError\n self.push_statefile(node)\n\n def get_snap1_uuid(self):\n cmd = [rcEnv.syspaths.lvs, '--noheadings', '-o', 'uuid', self.snap1]\n (ret, out, err) = self.call(cmd)\n if ret != 0:\n raise ex.excError\n self.snap1_uuid = out.strip()\n\n def write_statefile(self):\n self.set_statefile()\n self.get_snap1_uuid()\n self.log.info(\"update state file with snap uuid %s\"%self.snap1_uuid)\n with open(self.statefile, 'w') as f:\n f.write(str(datetime.datetime.now())+';'+self.snap1_uuid+'\\n')\n\n def _push_statefile(self, node):\n cmd = rcEnv.rcp.split() + [self.statefile, node+':'+self.statefile]\n (ret, out, err) = self.vcall(cmd)\n if ret != 0:\n raise ex.excError\n\n def push_statefile(self, node):\n self.set_statefile()\n self._push_statefile(node)\n self.get_peersenders()\n for s in self.peersenders:\n self._push_statefile(s)\n\n def apply_delta(self, node):\n if not which('dds'):\n raise ex.excError(\"dds executable not found\")\n dst = self.dsts[node]\n extract_cmd = ['dds', '--extract', '--cow', self.snap1_cow, '--source',\n self.snap2]\n merge_cmd = ['dds', '--merge', '--dest', dst, '-v']\n merge_cmd = rcEnv.rsh.split() + [node] + merge_cmd\n self.log.info(' '.join(extract_cmd + [\"|\"] + merge_cmd))\n p1 = Popen(extract_cmd, stdout=PIPE)\n pi = Popen([\"dd\", \"bs=4096\"], stdin=p1.stdout, stdout=PIPE, stderr=PIPE)\n p2 = Popen(merge_cmd, stdin=pi.stdout, stdout=PIPE)\n buff = p2.communicate()\n if p2.returncode == 0:\n stats_buff = pi.communicate()[1]\n stats = self.parse_dd(stats_buff)\n self.update_stats(stats, target=node)\n else:\n if buff[1] is not None and len(buff[1]) > 0:\n self.log.error(buff[1])\n self.log.error(\"sync update failed\")\n raise ex.excError\n if buff[0] is not None and len(buff[0]) > 0:\n self.log.info(buff[0])\n\n def do_update(self, node):\n self.apply_delta(node)\n\n def remove_snap1(self):\n if not self.snap_exists(self.snap1):\n return\n cmd = ['lvremove', '-f', self.snap1]\n (ret, out, err) = self.vcall(cmd)\n if ret != 0:\n raise ex.excError\n\n def rename_snap2(self):\n if not self.snap_exists(self.snap2):\n self.log.error(\"%s should exist\"%self.snap2)\n raise ex.excError\n if self.snap_exists(self.snap1):\n self.log.error(\"%s should not exist\"%self.snap1)\n raise ex.excError\n cmd = ['lvrename', self.src_vg, self.snap2_lv, self.snap1_lv]\n (ret, out, err) = self.vcall(cmd)\n if ret != 0:\n raise ex.excError\n\n def rotate_snaps(self):\n self.remove_snap1()\n self.rename_snap2()\n\n def check_remote(self, node):\n rs = self.get_remote_state(node)\n if self.snap1_uuid != rs['uuid']:\n self.log.error(\"%s last update uuid doesn't match snap1 uuid\"%(node))\n raise ex.excError\n\n def get_remote_state(self, node):\n self.set_statefile()\n cmd1 = ['env', 'LANG=C', 'cat', self.statefile]\n cmd = rcEnv.rsh.split() + [node] + cmd1\n (ret, out, err) = self.call(cmd)\n if ret != 0:\n self.log.error(\"could not fetch %s last update uuid\"%node)\n raise ex.excError\n return self.parse_statefile(out, node=node)\n\n def get_local_state(self):\n self.set_statefile()\n with open(self.statefile, 'r') as f:\n out = f.read()\n return self.parse_statefile(out)\n\n def parse_statefile(self, out, node=None):\n self.set_statefile()\n if node is None:\n node = rcEnv.nodename\n lines = out.strip().split('\\n')\n if len(lines) != 1:\n self.log.error(\"%s:%s is corrupted\"%(node, self.statefile))\n raise ex.excError\n fields = lines[0].split(';')\n if len(fields) != 2:\n self.log.error(\"%s:%s is corrupted\"%(node, self.statefile))\n raise ex.excError\n return dict(date=fields[0], uuid=fields[1])\n\n def sync_nodes(self):\n if self.target != ['nodes']:\n return\n self.sync_update()\n\n def sync_drp(self):\n if self.target != ['drpnodes']:\n return\n self.sync_update()\n\n def sync_update(self):\n if not self.svc_syncable():\n return\n for n in self.targets:\n self.do_update(n)\n self.rotate_snaps()\n self.write_statefile()\n for n in self.targets:\n self.push_statefile(n)\n self.write_stats()\n\n def checksum(self, node, bdev, q=None):\n cmd = ['md5sum', bdev]\n if node != rcEnv.nodename:\n cmd = rcEnv.rsh.split() + [node] + cmd\n (ret, out, err) = self.call(cmd)\n if ret != 0:\n return \"\"\n o = out.split()\n if q is not None:\n q.put(o[0])\n else:\n self.checksums[node] = o[0]\n\n def sync_verify(self):\n if not self.svc_syncable():\n return\n self.get_info()\n from multiprocessing import Process, Queue\n self.checksums = {}\n queues = {}\n ps = []\n self.log.info(\"start checksum threads. please be patient.\")\n for n in self.targets:\n dst = self.dsts[n]\n queues[n] = Queue()\n p = Process(target=self.checksum, args=(n, dst, queues[n]))\n p.start()\n ps.append(p)\n self.checksum(rcEnv.nodename, self.snap1)\n self.log.info(\"md5 %s: %s\"%(rcEnv.nodename, self.checksums[rcEnv.nodename]))\n for p in ps:\n p.join()\n for n in self.targets:\n self.checksums[n] = queues[n].get()\n self.log.info(\"md5 %s: %s\"%(n, self.checksums[n]))\n if len(self.checksums) < 2:\n self.log.error(\"not enough checksums collected\")\n raise ex.excError\n err = False\n for n in self.targets:\n if self.checksums[rcEnv.nodename] != self.checksums[n]:\n self.log.error(\"src/dst checksums differ for %s/%s\"%(rcEnv.nodename, n))\n err = True\n if not err:\n self.log.info(\"src/dst checksums verified\")\n\n def start(self):\n pass\n\n def stop(self):\n pass\n\n def can_sync(self, target=None):\n try:\n ls = self.get_local_state()\n last = datetime.datetime.strptime(ls['date'], \"%Y-%m-%d %H:%M:%S.%f\")\n except IOError:\n return True\n return not self.skip_sync(last)\n\n def sync_status(self, verbose=False):\n try:\n ls = self.get_local_state()\n now = datetime.datetime.now()\n last = datetime.datetime.strptime(ls['date'], \"%Y-%m-%d %H:%M:%S.%f\")\n delay = datetime.timedelta(seconds=self.sync_max_delay)\n except ex.excError:\n self.status_log(\"failed to get status\")\n return rcStatus.WARN\n except IOError:\n self.status_log(\"dds state file not found\")\n return rcStatus.WARN\n except:\n import sys\n import traceback\n e = sys.exc_info()\n print(e[0], e[1], traceback.print_tb(e[2]))\n return rcStatus.WARN\n if last < now - delay:\n self.status_log(\"Last sync on %s older than %s\"%(last, print_duration(self.sync_max_delay)))\n return rcStatus.WARN\n return rcStatus.UP\n\n def _info(self):\n data = [\n [\"src\", self.src],\n [\"target\", \" \".join(self.target) if self.target else \"\"],\n ]\n data += self.stats_keys()\n return data\n\n def __init__(self,\n rid=None,\n target=None,\n src=None,\n dsts={},\n snap_size=0,\n **kwargs):\n resSync.Sync.__init__(self,\n rid=rid,\n type=\"sync.dds\",\n **kwargs)\n\n self.label = \"dds of %s to %s\"%(src, \", \".join(target))\n self.target = target\n self.src = src\n self.dsts = dsts\n self.snap_size = snap_size\n\n def __str__(self):\n return \"%s target=%s src=%s\" % (resSync.Sync.__str__(self),\\\n self.target, self.src)\n\n @resSync.notify\n def sync_all(self):\n self.sync_nodes()\n self.sync_drp()\n","sub_path":"lib/resSyncDds.py","file_name":"resSyncDds.py","file_ext":"py","file_size_in_byte":14141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"46455385","text":"import tkinter, time , random\n\nokno = tkinter.Tk()\nokno.geometry('555x555')\n\nholst = tkinter.Canvas(okno, width=555, height=555)\nholst.pack()\n\nzvezda = holst.create_polygon(\n 20,40,\n 33,66,\n 90,40,\n fill='#bbffbb'\n)\n\ndef nikita():\n return random.randrange(1,3)\n\n\n\nx = 1\ny = 2\nwhile 1:\n holst.move(zvezda, x, y )\n okno.update()\n time.sleep(0.01)\n k = holst.coords(zvezda)\n print(k, k[4])\n if k[4]>555:\n x = -x * nikita()\n if k[0] < 0:\n x = -x * nikita()\n if k[5]>555:\n y = -y * nikita()\n if k[1] < 0:\n y = -y * nikita()\n\n\n\nokno.mainloop()","sub_path":"Bonjor!.py","file_name":"Bonjor!.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"564790252","text":"import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, 'README.md')) as f:\n README = f.read()\n\nsetup(\n name='aphrodite',\n version='0.1.4',\n description='ENOS API',\n long_description=README,\n author='Kid QU',\n long_description_content_type='text/markdown',\n author_email='kidcrazequ@gmail.com',\n url='https://github.com/EnvisionIot',\n license='GPLv3',\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n platforms=['all'],\n zip_safe=False,\n install_requires=[\n 'pycryptodome>=3.8.2',\n 'simplejson>=3.16.0',\n ],\n)","sub_path":"pypi_install_script/aphrodite-0.1.4.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"539233940","text":"#!/usr/bin/env python\n# coding:utf-8\n\"\"\"\n Purpose: Multiple linear regression\n Created: 14/04/2017\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_absolute_error\n\ndf = pd.read_csv('50_Startups.csv')\nX = df[df.columns[:4]].values\ny = df[[\"Profit\"]].values\n\nlabel_encoder = LabelEncoder()\nX[:, -1] = label_encoder.fit_transform(X[:, -1])\nonehot_encoder = OneHotEncoder(categorical_features=[3])\nX = onehot_encoder.fit_transform(X).toarray()\nX = X[:, 1:] # avoid dummy variable trap\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.6, random_state=0)\n\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\ny_pred = regressor.predict(X_test)\n\nprint(mean_absolute_error(y_test, y_pred))\n\n# Backward elimination, etc.\n","sub_path":"Machine Learning Course/Part 2 - Regression/Section 5 - Multiple Linear Regression/multiple_linear_regression.py","file_name":"multiple_linear_regression.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"221838998","text":"import time\nfrom dateutil import parser\n\nfrom pyramid.view import view_config\nfrom pyramid.response import Response\nfrom pyramid.httpexceptions import HTTPNotModified\nfrom speasy.core.inventory.indexes import to_json, to_dict, SpeasyIndex\nfrom speasy.inventories import tree\nfrom speasy import list_providers\nfrom ..inventory_updater import EnsureUpdatedInventory\nimport zstd\nimport logging\nimport uuid\n\nfrom . import pickle_data\n\nlog = logging.getLogger(__name__)\n\n\ndef _get_inventory(provider):\n if provider == \"all\":\n if 'build_date' not in tree.__dict__:\n build_dates = [parser.parse(tree.__dict__[provider].build_date) for provider in tree.__dict__.keys()]\n tree.__dict__[\"build_date\"] = max(build_dates).isoformat()\n return SpeasyIndex(name=\"all\", provider=\"speasy_proxy\", uid=\"\", meta=tree.__dict__)\n return tree.__dict__[provider]\n\n\ndef encode_output(inventory: SpeasyIndex, request):\n output_format = request.params.get(\"format\", \"json\")\n if output_format == \"python_dict\":\n return pickle_data(to_dict(inventory), request), \"application/python-pickle\"\n elif output_format == 'json':\n return to_json(inventory), \"application/json; charset=utf-8\"\n\n\ndef compress_if_asked(data, mime, request):\n if request.params.get(\"zstd_compression\", \"false\") == \"true\":\n if type(data) is str:\n data = data.encode()\n mime = \"application/x-zstd-compressed\"\n data = zstd.compress(data)\n return data, mime\n\n\n@view_config(route_name='get_inventory', openapi=True, decorator=(EnsureUpdatedInventory(),))\ndef get_inventory(request):\n request_start_time = time.time_ns()\n request_id = uuid.uuid4()\n provider = request.params.get(\"provider\", None)\n\n if provider is None:\n log.error(f'Missing parameter: provider')\n return Response(\n content_type=\"text/plain\",\n body=f\"Error: missing provider parameter\",\n headerlist=[('Access-Control-Allow-Origin', '*'), ('Content-Type', \"text/plain\")]\n )\n\n log.debug(f'New inventory request {request_id}: {provider}')\n\n inventory = _get_inventory(provider)\n if \"If-Modified-Since\" in request.headers:\n if parser.parse(request.headers[\"If-Modified-Since\"]) >= parser.parse(inventory.build_date):\n log.debug(f'{request_id}, client inventory is up to date')\n return HTTPNotModified()\n\n result, mime = compress_if_asked(*encode_output(inventory, request), request)\n request_duration = (time.time_ns() - request_start_time) / 1000.\n\n log.debug(f'{request_id}, duration = {request_duration}us')\n\n return Response(content_type=mime, body=result,\n headerlist=[('Access-Control-Allow-Origin', '*'), ('Content-Type', mime)])\n","sub_path":"speasy_proxy/views/get_inventory.py","file_name":"get_inventory.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"615426297","text":"\"\"\"\n1102. Path With Maximum Minimum Value\nGiven a matrix of integers A with R rows and C columns, find the maximum score of a path starting at [0,0] and ending at [R-1,C-1].\n\nThe score of a path is the minimum value in that path. For example, the value of the path 8 → 4 → 5 → 9 is 4.\n\nA path moves some number of times from one visited cell to any neighbouring unvisited cell in one of the 4 cardinal directions (north, east, west, south).\n\n \n\nExample 1:\n\n\n\nInput: [[5,4,5],[1,2,6],[7,4,6]]\nOutput: 4\nExplanation: \nThe path with the maximum score is highlighted in yellow. \nExample 2:\n\n\n\nInput: [[2,2,1,2,2,2],[1,2,2,2,1,2]]\nOutput: 2\nExample 3:\nInput: [[3,4,6,3,4],[0,2,1,1,7],[8,8,3,2,7],[3,2,4,9,8],[4,1,2,0,0],[4,6,5,4,3]]\nOutput: 3\n\"\"\"\n\n# greedy again\n# no bfs no dfs no dp\n# should practice more greedy algorithm\n# Runtime: 1304 ms, faster than 61.06% of Python3 online submissions for Path With Maximum Minimum Value.\n# Memory Usage: 15.7 MB, less than 100.00% of Python3 online submissions for Path With Maximum Minimum Value.\nimport heapq\nclass Solution:\n def maximumMinimumPath(self, A: List[List[int]]) -> int:\n # max heap solution\n \n visited = set([(0, 0)])\n DIRECTIONS = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n n_row, n_col = len(A), len(A[0])\n res = min(A[0][0], A[-1][-1])\n max_heap = [[-A[0][0], (0, 0)]]\n while max_heap:\n val, loc = heapq.heappop(max_heap)\n val = -val\n res = min(res, val)\n x, y = loc\n for dx, dy in DIRECTIONS:\n new_x, new_y = x + dx, y + dy\n if new_x<0 or new_x>=n_row or new_y<0 or new_y>=n_col:\n continue\n if (new_x, new_y) in visited:\n continue\n if new_x == n_row-1 and new_y == n_col-1:\n return res\n visited.add((new_x, new_y))\n heapq.heappush(max_heap, [-A[new_x][new_y], (new_x, new_y)])\n return res\n \n \n \n# Runtime: 2588 ms, faster than 8.26% of Python3 online submissions for Path With Maximum Minimum Value.\n# Memory Usage: 17.5 MB, less than 100.00% of Python3 online submissions for Path With Maximum Minimum Value.\nclass Solution:\n def maximumMinimumPath(self, A: List[List[int]]) -> int:\n n_row, n_col = len(A), len(A[0])\n self.union_find_set = collections.defaultdict()\n all_ele = []\n for i in range(n_row):\n for j in range(n_col):\n all_ele.append([A[i][j], (i, j)])\n self.union_find_set[(i, j)] = (i, j)\n res = min(A[0][0], A[-1][-1])\n all_ele = sorted(all_ele, key=lambda x:x[0])\n visited = set([(0, 0), (n_row-1, n_col-1)])\n DIRECTIONS = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n while self.find((0, 0)) != self.find((n_row-1, n_col-1)):\n val, loc = all_ele.pop()\n res = min(res, val)\n x, y = loc\n visited.add((x, y))\n base_parent = self.find((x, y))\n for dx, dy in DIRECTIONS:\n new_x, new_y = x + dx, y + dy\n if new_x<0 or new_x >= n_row or new_y<0 or new_y>=n_col:\n continue\n if (new_x, new_y) not in visited:\n continue\n visited.add((new_x, new_y))\n parent = self.find((new_x, new_y))\n if parent != base_parent:\n self.union_find_set[parent] = base_parent\n return res\n \n def find(self, loc):\n tmp = loc\n while self.union_find_set[tmp] != tmp:\n tmp = self.union_find_set[tmp]\n father = tmp\n while self.union_find_set[loc] != father:\n tmp = self.union_find_set[loc]\n self.union_find_set[loc] = father\n loc = tmp\n return father\n\n\n \n ","sub_path":"Widen/LC1102_Path_With_Maximum_Minimum_Value.py","file_name":"LC1102_Path_With_Maximum_Minimum_Value.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"503625294","text":"#!/usr/bin/python\nfrom workflow.workflow import ICON_HELP as WARNINGICON\n\n# Switches that autually controls the workflow behavior\nclass Options:\n def __init__(self, parser, workflow):\n self._parser=parser\n self._workflow=workflow\n return None\n\n def search(self, sheetName, keyword):\n if sheetName not in self._parser.availableSheets():\n Options.warning(\"Cheat sheet not found.\",\"\", self._workflow)\n return None\n if sheetName==None:\n ret=self._parser.searchAcrossAll(keyword, self._workflow)\n else:\n ret=self._parser.searchInSheet(keyword, sheetName, self._workflow)\n if ret==[]:\n Options.warning(\"Not found\", \"No match found for search {}\".format(keyword), self._workflow)\n return None\n for item in ret:\n self._workflow.add_item(\n title=item[\"command\"],\n subtitle=item[\"comment\"],\n copytext=item.get(\"command\"),\n valid=True,\n arg=item.get(\"command\")\n )\n return None\n\n def list(self, sheetName):\n ret=self._parser.list(sheetName)\n for item in ret:\n self._workflow.add_item(\n title=item.get(\"command\"),\n subtitle=item.get(\"comment\"),\n valid=True,\n copytext=item.get(\"command\"),\n arg=item.get(\"command\")\n )\n return None\n\n def showAvailable(self, sheetName):\n names=self._parser.availableSheets()\n ret=self._workflow.filter(sheetName, names, key=lambda x: x)\n if ret==[]:\n Options.warning(\"Cheat sheet not found.\",\"\", self._workflow)\n return None\n for sheet in ret:\n self._workflow.add_item(\n title=sheet,\n autocomplete=sheet,\n )\n return None\n\n @staticmethod\n def warning(msg,subtitle,workflow):\n workflow.warn_empty(\n title=msg,\n subtitle=subtitle,\n icon=WARNINGICON,\n )\n return None\n","sub_path":"lib/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"459762173","text":"#!/usr/bin/env python\n\nimport sys\n\ndef nbspReplace(args):\n '''Write each line into w, from f, replacing instances of   with \" \".\n\n Takes in a text file name as an argument and will produce an output file\n that is the same txt file with   characters replaced with \" \".\n \n '''\n f = open(args, 'r')\n w = open(\"formatted.txt\", 'w')\n for line in f:\n w.write(line.replace(\" \", \" \"))\n f.close()\n w.close()\n\ndef main():\n '''Calls nbspReplace on the argument from the command-line'''\n nbspReplace(sys.argv[-1])\n\n\n# Boilerplate to call main() function\nif __name__ == '__main__':\n main()\n","sub_path":"nbspKiller.py","file_name":"nbspKiller.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"612967049","text":"from typing import List\n\nfrom cloudrail.knowledge.context.azure.resources.constants.azure_resource_type import AzureResourceType\nfrom cloudrail.knowledge.context.azure.resources.databases.azure_cosmos_db_account import AzureCosmosDBAccount, \\\n CosmosDBAccountConsistencyPolicy, CosmosDBAccountConsistencyLevel, CosmosDBAccountGeoLocation, \\\n CosmosDBAccountCapabilities, CosmosDBAccountVirtualNetworkRule, CosmosDBAccountMongoServerVersion, \\\n CosmosDBAccountBackup, CosmosDBAccountCorsRule, ComosDBAccountBackupType\nfrom cloudrail.knowledge.context.azure.resources.managed_identities.azure_managed_identity import AzureManagedIdentity\nfrom cloudrail.knowledge.context.azure.resources_builders.common_resource_builder_functions import create_terraform_system_managed_identity\n\nfrom cloudrail.knowledge.context.azure.resources_builders.terraform.azure_terraform_builder import AzureTerraformBuilder\n\n\nclass CosmosDBAccountBuilder(AzureTerraformBuilder):\n\n def do_build(self, attributes: dict) -> AzureCosmosDBAccount:\n consistency_policy_list = []\n geo_location_list = []\n capabilities_list = []\n virtual_network_rule_list = []\n cors_rule_list = []\n backup_list = []\n for consistency_policy in attributes['consistency_policy']:\n consistency_policy_list.append(\n CosmosDBAccountConsistencyPolicy(CosmosDBAccountConsistencyLevel(self._get_known_value(consistency_policy, 'consistency_level')),\n self._get_known_value(consistency_policy, 'max_interval_in_seconds'),\n self._get_known_value(consistency_policy, 'max_staleness_prefix')))\n for geo_location in attributes['geo_location']:\n geo_location_list.append(CosmosDBAccountGeoLocation(self._get_known_value(geo_location, 'prefix'),\n self._get_known_value(geo_location, 'location'),\n self._get_known_value(geo_location, 'failover_priority'),\n self._get_known_value(geo_location, 'zone_redundant')))\n if isinstance(attributes['backup'], list):\n for backup in attributes['backup']:\n if self._is_known_value(backup, 'typtypee') and self._is_known_value(backup, 'interval_in_minutes') and self._is_known_value(backup, 'retention_in_hours'):\n backup_list.append(CosmosDBAccountBackup(ComosDBAccountBackupType(self._get_known_value(backup, 'type')),\n self._get_known_value(backup, 'interval_in_minutes'),\n self._get_known_value(backup, 'retention_in_hours')))\n\n for cors_rule in attributes['cors_rule']:\n cors_rule_list.append(CosmosDBAccountCorsRule(self._get_known_value(cors_rule, 'allowed_headers'),\n self._get_known_value(cors_rule, 'allowed_methods'),\n self._get_known_value(cors_rule, 'allowed_origins'),\n self._get_known_value(cors_rule, 'exposed_headers'),\n self._get_known_value(cors_rule, 'max_age_in_seconds')))\n\n for capabilities in attributes['capabilities']:\n capabilities_list.append(CosmosDBAccountCapabilities(self._get_known_value(capabilities, 'name')))\n\n for virtual_network_rule in attributes['virtual_network_rule']:\n virtual_network_rule_list.append(CosmosDBAccountVirtualNetworkRule(self._get_known_value(virtual_network_rule, 'id'),\n self._get_known_value(virtual_network_rule,\n 'ignore_missing_vnet_service_endpoint')))\n if attributes['mongo_server_version'].isnumeric():\n mongo_server_version = CosmosDBAccountMongoServerVersion(attributes['mongo_server_version'])\n else:\n mongo_server_version = None\n identity = create_terraform_system_managed_identity(attributes)\n managed_identities: List[AzureManagedIdentity] = []\n if identity:\n managed_identities.append(identity)\n return AzureCosmosDBAccount(name=attributes['name'],\n offer_type=attributes['offer_type'],\n kind=self._get_known_value(attributes, 'kind'),\n consistency_policy_list=consistency_policy_list,\n geo_location_list=geo_location_list,\n ip_range_filter=self._get_known_value(attributes, 'ip_range_filter'),\n enable_free_tier=self._get_known_value(attributes, 'enable_free_tier'),\n analytical_storage_enabled=self._get_known_value(attributes, 'analytical_storage_enabled'),\n enable_automatic_failover=self._get_known_value(attributes, 'enable_automatic_failover'),\n public_network_access_enabled=self._get_known_value(attributes, 'public_network_access_enabled'),\n capabilities_list=capabilities_list,\n is_virtual_network_filter_enabled=self._get_known_value(attributes,'is_virtual_network_filter_enabled'),\n virtual_network_rule_list=virtual_network_rule_list,\n enable_multiple_write_locations=self._get_known_value(attributes,'enable_multiple_write_locations'),\n access_key_metadata_writes_enabled=self._get_known_value(attributes,'access_key_metadata_writes_enabled'),\n mongo_server_version=mongo_server_version,\n network_acl_bypass_for_azure_services=self._get_known_value(attributes,'network_acl_bypass_for_azure_services'),\n network_acl_bypass_ids=self._get_known_value(attributes,'network_acl_bypass_ids'),\n local_authentication_disabled=self._get_known_value(attributes,'local_authentication_disabled'),\n backup=backup_list,\n cors_rule_list=cors_rule_list,\n managed_identities=managed_identities,\n tags=self._get_known_value(attributes, 'tags'),\n key_vault_key_id=self._get_known_value(attributes,'key_vault_key_id'))\n\n def get_service_name(self) -> AzureResourceType:\n return AzureResourceType.AZURERM_COSMOSDB_ACCOUNT\n","sub_path":"cloudrail/knowledge/context/azure/resources_builders/terraform/cosmos_db_account_builder.py","file_name":"cosmos_db_account_builder.py","file_ext":"py","file_size_in_byte":7069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"517135034","text":"# Imports here\nimport numpy as np\nimport torchvision as tv\nimport torch\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport matplotlib as plt\nimport torch.nn as nn\nfrom collections import OrderedDict\nfrom PIL import Image\n\ndata_dir = 'flowers'\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'\n\n# TODO: Define your transforms for the training, validation, and testing sets\nmean=[0.485, 0.456, 0.406]\nstd=[0.229, 0.224, 0.225]\ntrain_transform = transforms.Compose([\n transforms.RandomRotation(30),\n transforms.Resize(255),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean,std=std)])\nvalid_transform = transforms.Compose([\n transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean,std=std)])\ntest_transform = transforms.Compose([\n transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean,std=std)])\n# TODO: Load the datasets with ImageFolder\ntrain_set = tv.datasets.ImageFolder(train_dir, transform=train_transform)\nvalid_set = tv.datasets.ImageFolder(valid_dir, transform=valid_transform)\ntest_set = tv.datasets.ImageFolder(test_dir, transform=test_transform)\n \n# TODO: Using the image datasets and the trainforms, define the dataloaders\ntrain_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)\nvalid_loader = torch.utils.data.DataLoader(valid_set, batch_size=64, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(test_set, batch_size=64, shuffle=True)\n\n\nimport json\n\nwith open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)\n\n#print(len(cat_to_name))\n\n# TODO: Build and train your network\n#alexnet = tv.models.alexnet(pretrained=True)\n# Use GPU if it's available\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = tv.models.vgg11(pretrained=True)\nprint(model)\n#Freeze parameters, turn off gradient for the model\nfor param in model.parameters():\n param.requires_grad = False\n#Define new classifier \nclassifier = nn.Sequential(nn.Linear(25088,4096),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096,4096),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096,len(cat_to_name)),\n nn.LogSoftmax(dim=1))\nmodel.classifier = classifier \nprint(model)\n\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.classifier.parameters(), lr=0.001)\nmodel.to(device)\n\nepochs = 4\nfor epoch in range(epochs):\n train_loss = 0\n valid_loss = 0\n accuracy = 0\n \n # Training the model\n model.train()\n counter = 0\n for inputs, labels in train_loader:\n # Move to device\n inputs, labels = inputs.to(device), labels.to(device)\n # Clear optimizers\n optimizer.zero_grad()\n # Forward pass\n output = model.forward(inputs)\n # Loss\n loss = criterion(output, labels)\n # Calculate gradients (backpropogation)\n loss.backward()\n # Adjust parameters based on gradients\n optimizer.step()\n # Add the loss to the training set's rnning loss\n train_loss += loss.item()*inputs.size(0)\n \n # Print the progress of our training\n counter += 1\n print(counter, \"/\", len(train_loader))\n \n # Evaluating the model\n model.eval()\n counter = 0\n # Tell torch not to calculate gradients\n with torch.no_grad():\n for inputs, labels in valid_loader:\n # Move to device\n inputs, labels = inputs.to(device), labels.to(device)\n # Forward pass\n output = model.forward(inputs)\n # Calculate Loss\n valloss = criterion(output, labels)\n # Add loss to the validation set's running loss\n valid_loss += valloss.item()*inputs.size(0)\n \n # Since our model outputs a LogSoftmax, find the real \n # percentages by reversing the log function\n output = torch.exp(output)\n # Get the top class of the output\n top_p, top_class = output.topk(1, dim=1)\n # See how many of the classes were correct?\n equals = top_class == labels.view(*top_class.shape)\n # Calculate the mean (get the accuracy for this batch)\n # and add it to the running accuracy for this epoch\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n \n # Print the progress of our evaluation\n counter += 1\n print(counter, \"/\", len(valid_loader))\n \n # Get the average loss for the entire epoch\n train_loss = train_loss/len(train_loader.dataset)\n valid_loss = valid_loss/len(valid_loader.dataset)\n # Print out the information\n print('Accuracy: ', accuracy/len(valid_loader))\n print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(epoch, train_loss, valid_loss))\n \n# TODO: Do validation on the test set\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") \nmodel.eval()\naccuracy = 0\ntest_loss = 0\nwith torch.no_grad(): \n for images, labels in test_loader:\n images, labels = images.to(device), labels.to(device)\n output = model(images)\n test_loss += criterion(output, labels).item()\n \n output = torch.exp(output)\n top_prb, top_class = output.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item() \n \ntest_loss = test_loss/len(test_loader.dataset)\nprint('Accuracy: ', accuracy/len(test_loader))\nprint('Epoch: {} \\tTest Loss: {:.6f}'.format(epoch, test_loss))\nmodel.train();\n\n# TODO: Save the checkpoint \n\ndef save_checkpoint(model, path):\n model.class_to_idx = train_set.class_to_idx\n checkpoint = {'input_size': 25088,\n 'output_size': len(cat_to_name),\n #'hidden_layers': [each.out_features for each in model.hidden_layers], error: vgg object has no such attribute\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'class_to_idx': model.class_to_idx,\n 'classifier': classifier,\n 'classifier_state_dict': model.classifier.state_dict()}\n torch.save(checkpoint, path)\n return checkpoint\ncheckpoint = save_checkpoint(model,'checkpoint.pth')\n#print(checkpoint)\n#print(classifier)\n#print(classifier.state_dict())\n#print(optimizer.state_dict())\nprint(optimizer)\n\n# TODO: Write a function that loads a checkpoint and rebuilds the model\ndef load_checkpoint(path):\n checkpoint = torch.load(path)\n model = tv.models.vgg11(pretrained=True)\n #model.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])\n classifier = checkpoint['classifier']\n \n for par in model.parameters():\n par.requires_grad = False\n \n classifier.load_state_dict(checkpoint['classifier_state_dict'])\n model.classifier = classifier \n model.load_state_dict(checkpoint['state_dict'])\n optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)\n optimizer.load_state_dict(checkpoint['optimizer'])\n return checkpoint, model, optimizer\n#checkpoint, model, optimizer = load_checkpoint('checkpoint.pth')\n#print(checkpoint,model,optimizer)\n\ndef process_image(image_path):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n # TODO: Process a PIL image for use in a PyTorch model\n # Load Image\n image = Image.open(image_path)\n \n # Get the dimensions of the image\n width, height = image.size\n \n # Resize by keeping the aspect ratio, but changing the dimension\n # so the shortest size is 255px\n if width < height:\n image = image.resize((255, int(255*(height/width))))\n else:\n image = image.resize(int(255*(width/height)), 255)\n \n # Get the dimensions of the new image size\n width, height = image.size\n \n # Set the coordinates to do a center crop of 224 x 224\n left = (width - 224)/2\n top = (height - 224)/2\n right = (width + 224)/2\n bottom = (height + 224)/2\n image = image.crop((left, top, right, bottom))\n \n # Turn image into numpy array\n image = np.array(image)\n \n # Make all values between 0 and 1\n image = image/255\n \n # Normalize based on the preset mean and standard deviation\n image[0] = (image[0] - 0.485)/0.229\n image[1] = (image[1] - 0.456)/0.224\n image[2] = (image[2] - 0.406)/0.225\n \n # Make the color channel dimension first instead of last\n image = image.transpose((2, 0, 1))\n \n # Add a fourth dimension to the beginning to indicate batch size\n #img = img[np.newaxis,:]\n \n # Turn into a torch tensor\n final_image = torch.FloatTensor([image])\n\n return final_image\n\n '''print(np_image)\n print(np_image.shape)\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n print(mean.shape)\n print(std.shape)\n np_image = (np_image - mean) / std\n np_image[0] = (np_image[0] - 0.485)/0.229\n np_image[1] = (np_image[1] - 0.456)/0.224\n np_image[2] = (np_image[2] - 0.406)/0.225\n \n # Change to a torch tensor\n final_image = torch.FloatTensor([np_image])\n\n return final_image'''\n \ndef imshow(image, ax=None, title=None):\n import matplotlib.pyplot as plt\n \"\"\"Imshow for Tensor.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n \n # PyTorch tensors assume the color channel is the first dimension\n # but matplotlib assumes is the third dimension\n image = image.numpy().transpose((1, 2, 0))\n \n # Undo preprocessing\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n \n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\n image = np.clip(image, 0, 1)\n \n ax.imshow(image)\n \n return ax\ndef predict(image_path, model, topk=5):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n # TODO: Implement the code to predict the class from an image file\n image = process_image(image_path) \n print(image.shape)\n #image = image.squeeze(0)\n image = image.to(device)\n print(image.shape)\n # Send image to get output\n output = model.forward(image)\n \n # Reverse output's log function\n output = torch.exp(output)\n \n # Get the top predicted class, and the output percentage for that class\n probs, classes = output.topk((1,topk), dim=1)\n #print(probs,classes)\n return probs, classes, image\n\n# TODO: Display an image along with the top 5 classes\nmodel.eval()\nclass_names = []\n# Process Image\nimage_path = 'input2.png'\n\n# Give image to model to predict output\nprobs, classes, image = predict(image_path, model)\nprint('probs: {} and classes: {}'.format(probs, classes))\n\nprint(probs,classes)\nprint(cat_to_name)\nprint(model.class_to_idx)\n#probs=probs.detach.numpy()\n#classes=classes.detach.numpy()\nprint(probs,classes)\nprint(classes[0])\nfor i in classes[0]:\n class_names.append(model.class_to_idx.item(int(classes[0,i])))\nprint(class_names)\nfor c in classes:\n class_names.append(cat_to_name[c])\nprint('classnames: {}'.format(class_names))\n# Show the image\nax = imshow(image)\nplt.barh(probs, class_names)\nplt.xlabel('Probability')\nplt.title('Predicted Flower Names')\nplt.show()\n","sub_path":"train01.py","file_name":"train01.py","file_ext":"py","file_size_in_byte":11720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"17912258","text":"import requests\nfrom selenium import webdriver\nimport lxml.html\nfrom threading import Timer\nimport time\n\ns = requests.session()\n\ndef geturl():\n item_url = 'http://www.kickz.com/de/jordan-sneakers-low-air-jordan-1-mid-gs-white_white_white-141511017'\n return item_url\n\n\ndef targetsize(avsize):\n # 按照优先级设置尺码\n targetsize = ['41','38','42']\n # 比对预设尺码与可用尺码\n for targetsize in targetsize:\n if targetsize in avsize:\n return targetsize\n\n\n\ndef getelement(url):\n r = s.get(url)\n r.encoding = 'utf-8'\n el = lxml.html.fromstring(r.content)\n return el\n\ndef getavliablesize(el):\n # 获取商品名称\n name = el.xpath('//div[@class=\"content l-table-cell\"]/span[@class=\"title\"]/text()')\n # 获取商品尺码\n size = el.xpath('//div[@class=\"chooseSizeLinkContainer active\"]//a[contains(@id,\"EU-\")]//@data-size')\n print('Name: ' + name[0])\n for s in size:\n print('size ' + s + ' is available')\n print('---------------------------------------')\n\n return(size)\n\ndef addtobasket(el, size, cookies):\n # 获取商品尺码内部id\n innerId = el.xpath('//div[@class=\"chooseSizeLinkContainer active\"]//a[@data-size=' + size + ']//@onclick')[0].strip()\n innerId = innerId.replace('\\'', '')\n innerId = innerId.replace(' ', '')\n innerId = innerId.strip('ProductDetails.changeSizeAffectedLinks(')\n innerId = innerId.strip(');')\n innerId = [str(x) for x in innerId.split(',')][0].strip()\n # 获取ttoken\n ttoken = el.xpath('//div[@class=\"add-wishlist-basket-containter\"]//input[@id=\"ttoken\"]//@value')\n ttoken = \"\".join(ttoken)\n data = {\n 'productVariantIdAjax': innerId,\n 'ttoken': ttoken\n }\n try:\n r = s.post('https://www.kickz.com/en/cart/ajaxAdd', data=data, cookies=cookies)\n except(requests.exceptions.ConnectionError):\n print('size' + size + '加入购物车失败')\n else:\n print('size' + size + '加入购物车成功')\n\ndef checkout():\n return None\n\n\ndef main():\n # 设置头文件\n header = {\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'\n }\n # 设置登录信息\n logindata = {\n 'login': '553594151@qq.com',\n 'password': '950625lk'\n }\n # 设置url\n url = geturl()\n # 获取网页内容\n el = getelement(url)\n # 获取商品信息\n avliablesize = getavliablesize(el)\n # 设置目标尺码\n size = targetsize(avliablesize)\n # 模拟登陆获取cookies\n s.post('https://www.kickz.com/en/user/login', data=logindata, headers=header)\n cookies = s.cookies\n # 添加进购物车\n addtobasket(el, size, cookies)\n\n\n # -----------------------------------------------\n print('---------------------------------------')\n # -----------------------------------------------\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"kickz.py","file_name":"kickz.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"585268195","text":"import os,sys,re,csv\r\nimport pickle\r\nfrom collections import Counter, defaultdict\r\nimport numpy as np\r\nimport scipy\r\nimport math\r\nimport random\r\nimport nltk\r\nfrom scipy.spatial.distance import cosine\r\nfrom nltk.corpus import stopwords\r\nfrom numba import jit\r\nfrom nltk.tokenize import word_tokenize\r\n\r\n\r\n\r\n#... (1) First load in the data source and tokenize into one-hot vectors.\r\n#... Since one-hot vectors are 0 everywhere except for one index, we only need to know that index.\r\n\r\n\r\n#... (2) Prepare a negative sampling distribution table to draw negative samples from.\r\n#... Consistent with the original word2vec paper, this distribution should be exponentiated.\r\n\r\n\r\n#... (3) Run a training function for a number of epochs to learn the weights of the hidden layer.\r\n#... This training will occur through backpropagation from the context words down to the source word.\r\n\r\n\r\n#... (4) Re-train the algorithm using different context windows. See what effect this has on your results.\r\n\r\n\r\n#... (5) Test your model. Compare cosine similarities between learned word vectors.\r\n\r\n#.................................................................................\r\n#... global variables\r\n#.................................................................................\r\n\r\n\r\nrandom.seed(10)\r\nnp.random.seed(10)\r\nrandcounter = 10\r\nnp_randcounter = 10\r\n\r\nvocab_size = 0\r\nhidden_size = 100\r\nuniqueWords = [\"\"] #... list of all unique tokens\r\nwordcodes = {} #... dictionary mapping of words to indices in uniqueWords\r\nwordcounts = Counter() #... how many times each token occurs\r\nsamplingTable = [] #... table to draw negative samples from\r\n\r\n#.................................................................................\r\n#... load in the data and convert tokens to one-hot indices\r\n#.................................................................................\r\n\r\ndef loadData(filename):\r\n\tglobal uniqueWords, wordcodes, wordcounts\r\n\toverride = False\r\n\tif override:\r\n\t\tfullrec = pickle.load(open(\"w2v_fullrec.p\",\"rb\"))\r\n\t\twordcodes = pickle.load( open(\"w2v_wordcodes.p\",\"rb\"))\r\n\t\tuniqueWords = pickle.load(open(\"w2v_uniqueWords.p\",\"rb\"))\r\n\t\twordcounts = pickle.load(open(\"w2v_wordcounts.p\",\"rb\"))\r\n\t\tfullrec = [int(r) for r in fullrec]\r\n\t\tprint(len(fullrec))\r\n\t\treturn fullrec\r\n\r\n\r\n\t#... load in the unlabeled data file. You can load in a subset for debugging purposes.\r\n\thandle = open(filename, \"r\", encoding=\"utf8\")\r\n\tfullconts = handle.read().split(\"\\n\")\r\n\tfullconts = [entry.split(\"\\t\")[1].replace(\"
    \", \"\") for entry in fullconts[1:(len(fullconts)-1)]]\r\n\r\n\t#... apply simple tokenization (whitespace and lowercase)\r\n\tfullconts = [\" \".join(fullconts).lower()]\r\n\tfullconts = str(fullconts)\r\n\r\n\tprint (\"Generating token stream...\")\r\n\tfullrec = []\r\n\tmin_count = 50\r\n\tstop_words = stopwords.words('english')\r\n\r\n\ttokenize = [token for token in word_tokenize(fullconts.replace(\"\\\\\", \"\")) if token not in stop_words]\r\n\ttokenize_words = [fullrec.append(word) for word in tokenize if word.isalpha()]\r\n\torigcounts = Counter(fullrec)\r\n\r\n\tprint (\"Performing minimum thresholding..\")\r\n\r\n\tfullrec_filtered = []\r\n\tfor token in fullrec:\r\n\t\tif origcounts[token] >= min_count:\r\n\t\t\tfullrec_filtered.append(token)\r\n\t\telse:\r\n\t\t\tfullrec_filtered.append(\"\")\r\n\r\n\twordcounts = {}\r\n\r\n\tinitialize = 1\r\n\tfor token in fullrec_filtered:\r\n\t\tif token in origcounts:\r\n\t\t\twordcounts[token] = origcounts[token]\r\n\t\telse:\r\n\t\t\twordcounts[token] = initialize\r\n\t\t\tinitialize += 1\r\n\r\n\tfullrec = np.array(fullrec_filtered)\r\n\r\n\tprint (\"Producing one-hot indicies\")\r\n\r\n\tuniqueWords = sorted(wordcounts)\r\n\twordcodes = {}\r\n\r\n\tfor i, token in enumerate(uniqueWords):\r\n\t\twordcodes[token] = i\r\n\r\n\tfor i, token in enumerate(wordcodes):\r\n\t\tnp.place(fullrec, fullrec == token, int(i))\r\n\r\n\t#... close input file handle\r\n\thandle.close()\r\n\r\n\tpickle.dump(fullrec, open(\"w2v_fullrec.p\",\"wb+\"))\r\n\tpickle.dump(wordcodes, open(\"w2v_wordcodes.p\",\"wb+\"))\r\n\tpickle.dump(uniqueWords, open(\"w2v_uniqueWords.p\",\"wb+\"))\r\n\tpickle.dump(dict(wordcounts), open(\"w2v_wordcounts.p\",\"wb+\"))\r\n\r\n\treturn fullrec\r\n\r\n#.................................................................................\r\n#... compute sigmoid value\r\n#.................................................................................\r\n@jit(nopython=True)\r\ndef sigmoid(x):\r\n\treturn float(1)/(1+np.exp(-x))\r\n\r\n#.................................................................................\r\n#... generate a table of cumulative distribution of words\r\n#.................................................................................\r\n\r\n\r\ndef negativeSampleTable(train_data, uniqueWords, wordcounts, exp_power=0.75):\r\n\t#global wordcounts\r\n\tmax_exp_count = 0\r\n\r\n\tprint (\"Generating exponentiated count vectors\")\r\n\r\n\texp_count_array = []\r\n\tfor token in uniqueWords:\r\n\t\tvalue = wordcounts[token]\r\n\t\texp_count_array.append(value ** exp_power)\r\n\tmax_exp_count = sum(exp_count_array)\r\n\r\n\tprint (\"Generating distribution\")\r\n\r\n\tprob_dist = [value/max_exp_count for value in exp_count_array]\r\n\r\n # Testing that the sum of prob_dist equals 1:\r\n\t# print(sum(prob_dist))\r\n\r\n\tprint (\"Filling up sampling table\")\r\n\ttable_size = 1e7\r\n\tcumulative_dict = {}\r\n\tct=0\r\n\r\n\tfor idx, val in enumerate(prob_dist):\r\n\t\ti=0\r\n\t\tcheck_val = round(val*table_size)\r\n\t\twhile ct < table_size and i < check_val:\r\n\t\t\tcumulative_dict[ct]=idx\r\n\t\t\tct+=1\r\n\t\t\ti+=1\r\n\tprint(len(cumulative_dict.keys())==table_size)\r\n\r\n\treturn cumulative_dict\r\n\r\n#.................................................................................\r\n#... generate a specific number of negative samples\r\n#.................................................................................\r\n\r\ndef generateSamples(context_idx, num_samples):\r\n\tglobal samplingTable, uniqueWords, randcounter\r\n\tresults = []\r\n\tfor n in range(0, num_samples):\r\n\t\twhile len(results) != num_samples:\r\n\t\t\trand = random.randint(0, len(samplingTable)-1)\r\n\t\t\tif context_idx != samplingTable[rand]:\r\n\t\t\t\tresults.append(samplingTable[rand])\r\n\r\n\treturn results\r\n\r\n# @jit(nopython=True)\r\ndef performDescent(num_samples, learning_rate, center_token, context_word_ids,W1,W2,negative_indices):\r\n\tnll_new = 0\r\n\tchunks = [negative_indices[x:x+2] for x in range(0, len(negative_indices), 2)]\r\n\tfor k in range(0, len(context_word_ids)):\r\n\t\tneg_ll_total = 0\r\n\t\tcontext_index = context_word_ids[k]\r\n\t\th = np.array(W1[center_token])\r\n\t\tW2_p = np.array(W2[context_index])\r\n\r\n\t\t# Updating W2 for the postive context sample\r\n\t\ts = sigmoid(np.dot(W2[context_index], h))\r\n\t\tW2[context_index] = W2_p - (learning_rate * ((s - 1) * h))\r\n\r\n\t\ttot_p_neg = 0\r\n\r\n\t\t# iterating over the negative samples for the given context word\r\n\t\tfor neg in chunks[k]:\r\n\t\t\t# Updating W prime for the two negtive samples\r\n\t\t\tW2_p_neg = np.array(W2[neg])\r\n\t\t\ts_neg = sigmoid(np.dot(W2[neg], h))\r\n\t\t\tW2[neg] = W2_p_neg - (learning_rate * ((s_neg - 0) * h))\r\n\r\n\t\t\ttot_p_neg += (sigmoid(np.dot(W2_p_neg, h) - 0) * W2_p_neg)\r\n\r\n\t\t\t# Negative LL for both the negative samples\r\n\t\t\tnsig = sigmoid(np.negative(np.dot(W2[neg], h)))\r\n\t\t\tneg_ll_total += np.log(nsig)\r\n\r\n\t\t# Updating W1 for the center token\r\n\t\ts2_pos = sigmoid(np.dot(W2_p, h))\r\n\t\tpos_vj = (s2_pos - 1)* W2_p\r\n\t\ttotal_vj = (pos_vj + tot_p_neg)\r\n\t\tW1[center_token] = h - (learning_rate * total_vj)\r\n\r\n\t\t# calculating the negtive LL for the postive context token\r\n\t\tpos = (np.negative(np.log(sigmoid(np.dot(W2[context_index], h)))))\r\n\t\tnll_new += pos - neg_ll_total\r\n\r\n\treturn [nll_new]\r\n\r\n#.................................................................................\r\n#... learn the weights for the input-hidden and hidden-output matrices\r\n#.................................................................................\r\n\r\n\r\ndef trainer(curW1=None, curW2=None):\r\n\tglobal uniqueWords, wordcodes, fullsequence, vocab_size, hidden_size,np_randcounter, randcounter\r\n\tvocab_size = len(uniqueWords) #... unique characters\r\n\thidden_size = 100 #... number of hidden neurons\r\n\tcontext_window = [-2,-1,1,2] #... specifies which context indices are output. Indices relative to target word. Don't include index 0 itself.\r\n\tnll_results = [] #... keep array of negative log-likelihood after every 1000 iterations\r\n\r\n\t#... determine how much of the full sequence we can use while still accommodating the context window\r\n\tstart_point = int(math.fabs(min(context_window)))\r\n\tend_point = len(fullsequence)-(max(max(context_window),0))\r\n\tmapped_sequence = fullsequence\r\n\r\n\t#... initialize the weight matrices. W1 is from input->hidden and W2 is from hidden->output.\r\n\tif curW1==None:\r\n\t\tnp_randcounter += 1\r\n\t\tW1 = np.random.uniform(-.5, .5, size=(vocab_size, hidden_size))\r\n\t\tW2 = np.random.uniform(-.5, .5, size=(vocab_size, hidden_size))\r\n\telse:\r\n\t\t#... initialized from pre-loaded file\r\n\t\tW1 = curW1\r\n\t\tW2 = curW2\r\n\r\n\t#... set the training parameters\r\n\tepochs = 5\r\n\tnum_samples = 2\r\n\tlearning_rate = 0.05\r\n\tnll = 0\r\n\titernum = 0\r\n\r\n\t#... Begin actual training\r\n\tfor j in range(0,epochs):\r\n\t\tprint (\"Epoch: \", j)\r\n\t\tprevmark = 0\r\n\r\n\t\t#... For each epoch, redo the whole sequence...\r\n\t\tfor i in range(start_point,end_point):\r\n\r\n\t\t\tif (float(i)/len(mapped_sequence))>=(prevmark+0.1):\r\n\t\t\t\tprint (\"Progress: \", round(prevmark+0.1,1))\r\n\t\t\t\tprevmark += 0.1\r\n\t\t\tif iternum%10000==0:\r\n\t\t\t\tprint (\"Negative likelihood: \", nll)\r\n\t\t\t\tnll_results.append(nll)\r\n\t\t\t\tnll = 0\r\n\r\n\t\t\ttoken_index = mapped_sequence[i]\r\n\t\t\tif token_index == 0:\r\n\t\t\t\tcontinue\r\n\t\t\tcenter_token = token_index\r\n\r\n\t\t\titernum += 1\r\n\t\t\t#... now propagate to each of the context outputs\r\n\t\t\tmapped_context = [mapped_sequence[i+ctx] for ctx in context_window]\r\n\t\t\tnegative_indices = []\r\n\t\t\tfor q in mapped_context:\r\n\t\t\t\tnegative_indices += generateSamples(q, num_samples)\r\n\t\t\t[nll_new] = performDescent(num_samples, learning_rate, center_token, mapped_context, W1,W2, negative_indices)\r\n\t\t\tnll += nll_new\r\n\r\n\tfor nll_res in nll_results:\r\n\t\tprint (nll_res)\r\n\treturn [W1,W2]\r\n\r\n#.................................................................................\r\n#... Load in a previously-saved model. Loaded model's hidden and vocab size must match current model.\r\n#.................................................................................\r\n\r\ndef load_model():\r\n\thandle = open(\"saved_W1.data\",\"rb\")\r\n\tW1 = np.load(handle)\r\n\thandle.close()\r\n\thandle = open(\"saved_W2.data\",\"rb\")\r\n\tW2 = np.load(handle)\r\n\thandle.close()\r\n\treturn [W1,W2]\r\n\r\n#.................................................................................\r\n#... Save the current results to an output file. Useful when computation is taking a long time.\r\n#.................................................................................\r\n\r\ndef save_model(W1,W2):\r\n\thandle = open(\"saved_W1.data\",\"wb+\")\r\n\tnp.save(handle, W1, allow_pickle=False)\r\n\thandle.close()\r\n\r\n\thandle = open(\"saved_W2.data\",\"wb+\")\r\n\tnp.save(handle, W2, allow_pickle=False)\r\n\thandle.close()\r\n\r\n\r\n#... so in the word2vec network, there are actually TWO weight matrices that we are keeping track of. One of them represents the embedding\r\n#... of a one-hot vector to a hidden layer lower-dimensional embedding. The second represents the reversal: the weights that help an embedded\r\n#... vector predict similarity to a context word.\r\n\r\n#.................................................................................\r\n#... code to start up the training function.\r\n#.................................................................................\r\nword_embeddings = []\r\nproj_embeddings = []\r\ndef train_vectors(preload=False):\r\n\tglobal word_embeddings, proj_embeddings\r\n\tif preload:\r\n\t\t[curW1, curW2] = load_model()\r\n\telse:\r\n\t\tcurW1 = None\r\n\t\tcurW2 = None\r\n\t[word_embeddings, proj_embeddings] = trainer(curW1,curW2)\r\n\tsave_model(word_embeddings, proj_embeddings)\r\n\r\n#.................................................................................\r\n#... for the averaged morphological vector combo, estimate the new form of the target word\r\n#.................................................................................\r\n\r\n# def morphology(word_seq):\r\n# \tglobal word_embeddings, proj_embeddings, uniqueWords, wordcodes\r\n# \tembeddings = word_embeddings\r\n# \tvectors = [word_seq[0], # suffix averaged\r\n# \tembeddings[wordcodes[word_seq[1]]]]\r\n# \tvector_math = vectors[0]+vectors[1]\r\n# \t#... find whichever vector is closest to vector_math\r\n# \t#... (TASK) Use the same approach you used in function prediction() to construct a list\r\n# \t#... of top 10 most similar words to vector_math. Return this list.\r\n#\r\n#\r\n\r\n#.................................................................................\r\n#... for the triplet (A,B,C) find D such that the analogy A is to B as C is to D is most likely\r\n#.................................................................................\r\n\r\n# def analogy(word_seq):\r\n# \tglobal word_embeddings, proj_embeddings, uniqueWords, wordcodes\r\n# \tembeddings = word_embeddings\r\n# \tvectors = [embeddings[wordcodes[word_seq[0]]],\r\n# \tembeddings[wordcodes[word_seq[1]]],\r\n# \tembeddings[wordcodes[word_seq[2]]]]\r\n# \tvector_math = -vectors[0] + vectors[1] - vectors[2] # + vectors[3] = 0\r\n# \t#... find whichever vector is closest to vector_math\r\n#\r\n# \tvectorized_list = []\r\n# \tfor word in uniqueWords:\r\n# \t\tword_idx = uniqueWords.index(word)\r\n# \t\tfull_vector_math = vector_math + sum(proj_embeddings[word_idx])\r\n# \t\tvectorized_list.append((word, full_vector_math))\r\n#\r\n# \tvectorized_list.sort(key=lambda x: abs(x[1]))\r\n# \tnew_vectorized_list = vectorized_list[:10]\r\n#\r\n# \treturn new_vectorized_list\r\n\r\n\t# ... (TASK) Use the same approach you used in function prediction() to construct a list\r\n\t# ... of top 10 most similar words to vector_math. Return this list.\r\n\r\n#.................................................................................\r\n#... find top 10 most similar words to a target word\r\n#.................................................................................\r\n\r\ndef prediction(target_word):\r\n\tglobal word_embeddings, uniqueWords, wordcodes\r\n\ttargets = [target_word]\r\n\toutputs = []\r\n\ttarget_idx = uniqueWords.index(target_word)\r\n\tfor word in uniqueWords:\r\n\t\tword_idx = uniqueWords.index(word)\r\n\t\tdistance = cosine(word_embeddings[word_idx], word_embeddings[target_idx])\r\n\t\tword_similarity = 1 - distance\r\n\t\toutputs.append((word, word_similarity))\r\n\r\n\t# sorted(outputs, key = lambda tup: tup[1])\r\n\toutputs.sort(key=lambda tup: tup[1], reverse=True)\r\n\tnew_lst = outputs[0:10]\r\n\r\n\treturn new_lst\r\n\r\ndef task_4_prediction(row):\r\n\tglobal word_embeddings, uniqueWords, wordcodes\r\n\ts1_idx = uniqueWords.index(row[1])\r\n\ts2_idx = uniqueWords.index(row[2])\r\n\tdistance = cosine(word_embeddings[s1_idx], word_embeddings[s2_idx])\r\n\tword_similarity = 1 - distance\r\n\treturn [row[0], word_similarity]\r\n\r\nif __name__ == '__main__':\r\n\tif len(sys.argv)==2:\r\n\t\tfilename = sys.argv[1]\r\n\t\t#... load in the file, tokenize it and assign each token an index.\r\n\t\t#... the full sequence of characters is encoded in terms of their one-hot positions\r\n\r\n\t\tfullsequence= loadData(filename)\r\n\t\tprint (\"Full sequence loaded...\")\r\n\r\n\t\t#... now generate the negative sampling table\r\n\t\tprint (\"Total unique words: \", len(uniqueWords))\r\n\t\tprint(\"Preparing negative sampling table\")\r\n\t\tsamplingTable = negativeSampleTable(fullsequence, uniqueWords, wordcounts)\r\n\r\n\t\t# ... we've got the word indices and the sampling table. Begin the training.\r\n\t\t# ... NOTE: If you have already trained a model earlier, preload the results (set preload=True) (This would save you a lot of unnecessary time)\r\n\t\t# ... If you just want to load an earlier model and NOT perform further training, comment out the train_vectors() line\r\n\t\t# ... ... and uncomment the load_model() line\r\n\r\n\t\ttrain_vectors(preload=False)\r\n\t\t[word_embeddings, proj_embeddings] = load_model()\r\n\r\n\t\t# ... we've got the trained weight matrices. Now we can do some predictions\r\n\t\ttargets = [\"good\", \"bad\", \"scary\", \"funny\"]\r\n\t\tfor targ in targets:\r\n\t\t\tprint(\"Target: \", targ)\r\n\t\t\tbestpreds= (prediction(targ))\r\n\t\t\tfor pred in bestpreds:\r\n\t\t\t\tprint (pred[0],\":\",pred[1])\r\n\t\t\tprint (\"\\n\")\r\n\r\n\r\n\t\t#... try an analogy task. The array should have three entries, A,B,C of the format: A is to B as C is to ?\r\n\t\t# print (analogy([\"son\", \"daughter\", \"man\"]))\r\n\t\t# print (analogy([\"thousand\", \"thousands\", \"hundred\"]))\r\n\t\t# print (analogy([\"amusing\", \"fun\", \"scary\"]))\r\n\t\t# print (analogy([\"terrible\", \"bad\", \"amazing\"]))\r\n\r\n#.................................................................................\r\n##### The below is to open the new test data and see if the model classifies correctly #####\r\n##### Uncomment to get the intrinsic_predictions csv file created. #####\r\n #\r\n\t\t# rdata = []\r\n\t\t# f = open('intrinsic-test_v2.tsv', 'r', encoding = 'utf-8')\r\n\t\t# for x in f.readlines()[1:]:\r\n\t\t# \trdata.append(re.split('\\t', x.replace('\\n', '')))\r\n #\r\n\t\t# totals = []\r\n\t\t# for row in rdata:\r\n\t\t# \ttotals.append(task_4_prediction(row))\r\n #\r\n\t\t# with open('intrinsic_predictions_1.csv', 'w', newline = '') as results_csv:\r\n\t\t# \tr_csv = csv.writer(results_csv, delimiter = ',')\r\n\t\t# \tr_csv.writerow([\"id\", \"similarity\"])\r\n\t\t# \tfor x in totals:\r\n\t\t# \t\tr_csv.writerow([x[0], x[1]])\r\n\t\t# results_csv.close()\r\n\t\t# f.close\r\n\r\n\t\t#... try morphological task. Input is averages of vector combinations that use some morphological change.\r\n\t\t#... see how well it predicts the expected target word when using word_embeddings vs proj_embeddings in\r\n\t\t#... the morphology() function.\r\n\r\n\t\t# s_suffix = [word_embeddings[wordcodes[\"stars\"]] - word_embeddings[wordcodes[\"star\"]]]\r\n\t\t# others = [[\"types\", \"type\"],\r\n\t\t# [\"ships\", \"ship\"],\r\n\t\t# [\"values\", \"value\"],\r\n\t\t# [\"walls\", \"wall\"],\r\n\t\t# [\"spoilers\", \"spoiler\"]]\r\n\t\t# for rec in others:\r\n\t\t# \ts_suffix.append(word_embeddings[wordcodes[rec[0]]] - word_embeddings[wordcodes[rec[1]]])\r\n\t\t# s_suffix = np.mean(s_suffix, axis=0)\r\n\t\t# print (morphology([s_suffix, \"techniques\"]))\r\n\t\t# print (morphology([s_suffix, \"sons\"]))\r\n\t\t# print (morphology([s_suffix, \"secrets\"]))\r\n\r\n\r\n\telse:\r\n\t\tprint (\"Please provide a valid input filename\")\r\n\t\tsys.exit()\r\n","sub_path":"word2vec_v2.py","file_name":"word2vec_v2.py","file_ext":"py","file_size_in_byte":18132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"9185151","text":"#!/usr/bin/env python\n\nimport glob\nimport os\nimport re\nimport sys\n\n\ninfile = '/etc/jailkit/jk_init.ini'\noutfile = infile + '.new'\n\ndirs = ['/lib', '/lib64', '/usr/lib', '/usr/lib64']\ndirs.extend(glob.glob('/lib/*-gnu'))\ndirs.extend(glob.glob('/usr/lib/*-gnu'))\n\nline_re = re.compile(r'^(\\s*paths\\s*=\\s*)(.*)')\nsplit_re = re.compile(r'\\s*,\\s*')\nsuffix_re = re.compile(r'(\\.so)\\..*')\n\nwith open(infile, 'r') as inf, open(outfile, 'w') as outf:\n outf.write('# FIXED\\n')\n for line in inf:\n line_match = line_re.match(line)\n if line_match is None:\n outf.write(line)\n continue\n paths = split_re.split(line_match.group(2))\n matches = []\n for path in paths:\n if '/lib' not in path or '.so' not in path:\n matches.append(path)\n continue\n basename = os.path.basename(path)\n basename = suffix_re.sub(r'\\1.*', basename)\n for d in dirs:\n pattern = os.path.join(d, basename)\n if glob.glob(pattern):\n if pattern not in matches:\n matches.append(pattern)\n outf.write(line_match.group(1) + ', '.join(matches) + '\\n')\n\nif not os.path.exists(infile + '.orig'):\n os.rename(infile, infile + '.orig')\n\nos.rename(outfile, infile)\n","sub_path":"jk_init_fixer.py","file_name":"jk_init_fixer.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"373720878","text":"from django_dynamic_fixture import G, N\nimport random\nfrom django.core.urlresolvers import reverse\nfrom django_webtest import WebTest\nimport django.contrib.auth\nfrom board.models import Post\n\n\nclass BoardTests(WebTest):\n def setUp(self):\n User = django.contrib.auth.get_user_model()\n self.user = User.objects.create_user(username='testUser', password='test')\n self.another_user = User.objects.create_user(username='anotherUser', password='test')\n super(BoardTests, self).setUp()\n\n def add_reply_to(self, p, **kwargs):\n r = N(Post, fill_nullable_fields=False, **kwargs)\n r.in_reply_to = p\n r.save()\n return r\n\n\nclass TestPosting(BoardTests):\n def fill_form(self, form, data):\n for k, i in data.items():\n form[k] = i\n return form\n\n def test_new_post_as_anonymous(self):\n page = reverse('board_post_list')\n response = self.app.get(page, user=self.user)\n data = {'title': 'testing%i' % random.randint(1, 10000), 'body_markup': 'this is a test body',\n 'summary': 'this is a test summary', 'username': 'anonymousUsername'}\n\n form = self.fill_form(response.forms['post-form'], data)\n response = form.submit().follow()\n #Where should we be redirected?\n self.assertIn(data['title'], response.content)\n self.assertNotEqual('testUser', response.context['post_list'][0].posted_by)\n self.assertIn(data['username'], response.content)\n\n def test_new_post_as_logged_in(self):\n page = reverse('board_post_list')\n data = {'title': 'testing%i' % random.randint(1, 10000), 'body_markup': 'this is a test body',\n 'summary': 'this is a test summary'}\n response = self.app.get(page, user=self.user)\n form = self.fill_form(response.forms['post-form'], data)\n response = form.submit().follow()\n self.assertIn(data['title'], response.content)\n self.assertIn('testUser', response.content)\n\n def test_a_reply(self):\n p = G(Post, fill_nullable_fields=False)\n page = reverse('board_post_view', kwargs={'pk': p.pk})\n response = self.app.get(page, user=self.user)\n data = {'title': 'testing%i' % random.randint(1, 10000), 'body_markup': 'this is a test body',\n 'summary': 'this is a test summary'}\n form = self.fill_form(response.forms['post-form'], data)\n response = form.submit().follow()\n self.assertEqual(page, response.request.path) # We should remain on the same page\n self.assertIn(data['title'], response.content)\n self.assertIn('testUser', response.content)\n\n def test_editing_as_same_user(self):\n page = reverse('board_post_list')\n response = self.app.get(page, user=self.user)\n data = {'title': 'testing editing %i' % random.randint(1, 10000), 'body_markup': 'edit this'}\n form = self.fill_form(response.forms['post-form'], data)\n response = form.submit().follow()\n post_page = response.click(description=data['title'])\n self.assertEqual(post_page.status_code, 200)\n editing_url = reverse('board_post_save', kwargs={'pk': post_page.context['post'].pk})\n\n # Unauthorized as anonymous & as different user\n response = self.app.get(editing_url, user=self.another_user, expect_errors=True)\n self.assertEqual(response.status_code, 403)\n response = self.app.get(editing_url, expect_errors=True)\n self.assertEqual(response.status_code, 403)\n\n # Form present when accessing as correct user\n response = self.app.get(editing_url, user=self.user)\n self.assertIn('new_post_form', response.context)\n\n # check that editing actually works\n form = response.forms['post-form']\n form['title'] = 'changed title'\n form['body_markup'] = 'this was edited'\n form['next'] = post_page.request.path\n response = form.submit().follow()\n self.assertEqual(response.context['post'].title, 'changed title')\n self.assertEqual(response.context['post'].body_markup, 'this was edited')\n\n\nclass TestPostViews(BoardTests):\n def test_post_and_replies_have_separate_views(self):\n p = G(Post, fill_nullable_fields=False)\n self.add_reply_to(p)\n url = reverse('board_post_view', kwargs={'pk': p.pk})\n response = self.app.get(url)\n self.assertTemplateUsed(response, 'board/replies_list.html')\n url = reverse('board_post_view_single', kwargs={'pk': p.pk})\n response = self.app.get(url)\n self.assertTemplateNotUsed(response, 'board/replies_list.html')\n\n def test_viewing_a_post_sets_last_read_for_user(self):\n p = G(Post, fill_nullable_fields=False)\n url = reverse('board_post_view', kwargs={'pk': p.pk})\n response = self.app.get(url, user='testUser')\n self.assertEqual(self.user.last_read_for(p), 0)\n # if we add a reply, and open the page again, we want the last_read_for value to be that of the reply\n r = self.add_reply_to(p)\n response = self.app.get(url, user='testUser')\n self.assertEqual(self.user.last_read_for(p), r.pk)","sub_path":"board/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"210209895","text":"import tensorflow as tf\nimport numpy as np\nimport utils as u\nimport callbacks as cb\nfrom tensorflow.keras.layers import Input, Dense, LSTM, Bidirectional\nfrom tensorflow.keras.optimizers import RMSprop, Adam, SGD\nfrom tensorflow.keras.models import Model, load_model\nfrom tensorflow.keras.callbacks import EarlyStopping\nimport tensorflow.keras.backend as K\nimport matplotlib.pyplot as plt\n\nbatch=3000\nclr=cb.OneCycleLR(\n max_lr=.1,\n end_percentage=0.1,\n scale_percentage=None,\n maximum_momentum=0.95,\n minimum_momentum=0.85,\n verbose=True)\nmin_lr = 1e-7\nmax_lr = 1e-2\n\n\n\nstrategy = tf.distribute.MirroredStrategy()\n\ndef holder(a,b):\n\treturn a, b\n\ntrain_set = u.PreTrainGenerator(\"../EvaluationDataset\", [u.butter_highpass_filter], [u.add_noise], batch_size = batch, scale=False\n )\nval_set = u.PreValGenerator(\"../PreTrainingDataset\", [u.butter_highpass_filter], [holder], batch_size = batch, scale=False)\n\nprint(len(train_set))\nprint(train_set.__getitem__(0)[0].shape)\nprint(len(val_set))\nprint(val_set.__getitem__(0)[0].shape)\n\nwith strategy.scope():\n\tinputs = Input((52,8))\n\tx = inputs\n\tdrop=0.028638817753399493\n\trec_drop = 0.14330185842693177\n\tunits=[450, 150, 400]\n\tseq=[True, True, False]\n\tfor i in range(3):\n\t\tx = LSTM(units[i], activation=\"tanh\", dropout=drop, recurrent_dropout=rec_drop, return_sequences=seq[i], name = \"lstm_{}\".format(i))(x)\n\toutputs = Dense(7, activation='softmax')(x)\n\tlstm = Model(inputs, outputs)\n\n#\tls = (LSTM(450, activation=\"tanh\", dropout=0.5,\n#\t\trecurrent_dropout=0.5, return_sequences=True, return_state=True))\n#\tx, h1, c1 = ls(inputs)\n#\tls2 = (LSTM(450, activation=\"tanh\", dropout=0.5, recurrent_dropout=0.5,\n#\t\t\treturn_sequences=True, return_state=True\n#\t\t\t))\n#\tx, h, c = ls2(x,initial_state = [h1,c1])\n#\tx = (LSTM(450, activation=\"tanh\", dropout=0.5, recurrent_dropout=0.5\n#\t\t\t)(x,initial_state = [h,c]))\n#\toutputs = Dense(7, activation='softmax')(x)\n#\tlstm=Model(inputs, outputs)\n#\t# clip your gradients!!\n\toptim = SGD(momentum=0.9, nesterov=True)\n\tlstm.compile(loss='sparse_categorical_crossentropy', optimizer=optim, metrics = ['accuracy'])\n#\t# https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html\n\nprint(lstm.summary())\nstopper = EarlyStopping(monitor = \"val_loss\", patience=100)\nhistory = lstm.fit(train_set, epochs=100, validation_data=val_set, callbacks=[stopper, clr], workers=16, use_multiprocessing=True, steps_per_epoch=len(train_set)//4)\nlstm.save(\"result/source_net\")\n\n","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"362930735","text":"#!/usr/bin/env python\n\nimport csv,json\nimport sys\n\n#Opens 'testData.csv' for reading and creates a new file 'jsonFile' for writing\ncsvfile = open('testData.csv', 'r')\njsonfile = open('testData.json', 'w')\n\n#Enter the field names for the data\nfieldnames = (\"Time\",\"Par1\",\"Par2\",\"Par3\",\"Level\",\"Date\")\nreader = csv.DictReader( csvfile, fieldnames)\nfor row in reader:\n json.dump(row,jsonfile)\n jsonfile.write('\\n')\n","sub_path":"Trashed/jsonConverter/csvtojson.py","file_name":"csvtojson.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"241669466","text":"from clawpack.pyclaw.solution import Solution\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pylab as pl\nfrom matplotlib import rc\n#rc('text', usetex=True)\nimport numpy as np\nimport os\n\nsolver_list=[\"HLLE\",\"Roe\",\"exact\"]\npath_list=[\"./_output_\"+i for i in [\"HLLE\",\"Roe\",\"exact\"]]\nspacing_list=[(40,80),(13,26),(4,8),(1,2)]\n\ngrid_list=[50*3**i for i in range(4)]\n\ndef get_last_h(path):\n sol=Solution(frame=10,read_aux=False,path=path,file_prefix='fort')\n h=np.copy(sol.state.q[0,:])\n x=sol.state.grid.x.centers\n nx=len(x)\n dx=x[1]-x[0]\n return h,x,dx,nx\n\ndef path_fine(solver):\n return(\"./_output_\"+solver+\"/4050\")\n\ndef coarse_from_fine(fine,spacing,len_coarse):\n #spacing should be a tuple\n a,b=spacing\n coarse=np.array([fine[a+i*(b+1)] for i in range(len_coarse)])\n return coarse\n\ndef L2_grid_function_norm(x,dx):\n return np.sqrt(dx)*np.linalg.norm(x,ord=2)\n\ndef L_infty_grid_function_norm(x):\n return np.linalg.norm(x,ord=np.inf)\n\ndef main():\n print(f\"L2 norm\")\n for idx, grid in enumerate(grid_list):\n print(f\"####Accuracy {grid} points####\")\n for solver in solver_list:\n path=\"./_output_\"+solver+\"/\"+str(grid)\n h,x,dx,nx = get_last_h(path)\n h_fine,x_fine,dx_fine,nx_fine = get_last_h(path_fine(solver))\n exact_coarse = coarse_from_fine(fine=h_fine,spacing=spacing_list[idx],len_coarse=len(h))\n diff = h-exact_coarse \n print(f\"{solver}: {100*round(L2_grid_function_norm(x=diff,dx=dx)/L2_grid_function_norm(x=exact_coarse,dx=dx),4)}%\")\n print(f\"L infty norm\")\n for idx, grid in enumerate(grid_list):\n print(f\"####Accuracy {grid} points####\")\n for solver in solver_list:\n path=\"./_output_\"+solver+\"/\"+str(grid)\n h,x,dx,nx=get_last_h(path)\n h_fine,x_fine,dx_fine,nx_fine=get_last_h(path_fine(solver))\n exact_coarse = coarse_from_fine(fine=h_fine,spacing=spacing_list[idx],len_coarse=len(h))\n diff=h-exact_coarse\n print(f\"{solver}: {100*round(L_infty_grid_function_norm(x=diff)/L_infty_grid_function_norm(x=exact_coarse),4)}%\")\n\nif __name__==\"__main__\":\n main()\n\n\n\n","sub_path":"SWEs/Finite_volumes_simulations/First_order/get_accuracy_FV_SWEs.py","file_name":"get_accuracy_FV_SWEs.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"164940354","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\nfrom tkinter import *\r\nfrom tkinter import ttk, font\r\nfrom classPaciente import Paciente\r\n\r\nclass PacientList(tk.Frame):\r\n def __init__(self, master, **kwargs):\r\n super().__init__(master)\r\n self.lb = tk.Listbox(self, **kwargs)\r\n scroll = tk.Scrollbar(self, command=self.lb.yview)\r\n self.lb.config(yscrollcommand=scroll.set)\r\n scroll.pack(side=tk.RIGHT, fill=tk.Y)\r\n self.lb.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)\r\n def insertar(self, paciente, index=tk.END):\r\n text = \"{}, {}\".format(paciente.getApellido(), paciente.getNombre())\r\n self.lb.insert(index, text)\r\n def borrar(self, index):\r\n self.lb.delete(index, index)\r\n def modificar(self, paciente, index):\r\n self.borrar(index)\r\n self.insertar(paciente, index)\r\n def bind_doble_click(self, callback):\r\n handler = lambda _: callback(self.lb.curselection()[0])\r\n self.lb.bind(\"\", handler)\r\n\r\nclass PacientForm(tk.LabelFrame):\r\n fields = (\"Apellido\", \"Nombre\", \"Teléfono\", \"Altura\", \"Peso\")\r\n def __init__(self, master, **kwargs):\r\n super().__init__(master, text=\"Paciente\", padx=10, pady=10, **kwargs)\r\n self.frame = tk.Frame(self)\r\n self.entries = list(map(self.crearCampo, enumerate(self.fields)))\r\n self.frame.pack()\r\n def crearCampo(self, field):\r\n position, text = field\r\n label = tk.Label(self.frame, text=text)\r\n entry = tk.Entry(self.frame, width=25)\r\n label.grid(row=position, column=0, pady=5)\r\n entry.grid(row=position, column=1, pady=5)\r\n return entry\r\n def mostrarEstadoPacienteEnFormulario(self, paciente):\r\n # a partir de un paciente, obtiene el estado\r\n # y establece en los valores en el formulario de entrada\r\n values = (paciente.getApellido(), paciente.getNombre(), \r\n paciente.getTelefono(),paciente.getAltura(),paciente.getPeso())\r\n for entry, value in zip(self.entries, values):\r\n entry.delete(0, tk.END)\r\n entry.insert(0, value)\r\n def crearPacienteDesdeFormulario(self):\r\n #obtiene los valores de los campos del formulario\r\n #para crear un nuevo paciente\r\n values = [e.get() for e in self.entries]\r\n paciente=None\r\n try:\r\n paciente = Paciente(*values)\r\n except ValueError as e:\r\n messagebox.showerror(\"Error de Validación\", str(e), parent=self)\r\n return paciente\r\n def limpiar(self):\r\n for entry in self.entries:\r\n entry.delete(0, tk.END)\r\n\r\nclass NewPacient(tk.Toplevel):\r\n def __init__(self, parent):\r\n super().__init__(parent)\r\n self.paciente = None\r\n self.form = PacientForm(self)\r\n self.btn_add = tk.Button(self, text=\"Confirmar\", command=self.confirmar)\r\n self.form.pack(padx=10, pady=10)\r\n self.btn_add.pack(pady=10)\r\n def confirmar(self):\r\n self.paciente = self.form.crearPacienteDesdeFormulario()\r\n if self.paciente:\r\n self.destroy()\r\n def show(self):\r\n self.grab_set()\r\n self.wait_window()\r\n return self.paciente\r\n\r\nclass IMC(tk.Toplevel):\r\n __imc=None\r\n __composicion=None\r\n def __init__(self, parent,paciente):\r\n super().__init__(parent)\r\n self.geometry('325x150')\r\n self.paciente=paciente\r\n self.resizable(0,0)\r\n self.frame = tk.Frame(self)\r\n self.frame.place(x=0,y=0,relheight=1,relwidth=1)\r\n tk.Label(self.frame,text='IMC').place(relx=0.27,y=30,anchor=tk.N)\r\n tk.Label(self.frame,text='Composicion corporal').place(relx=0.27,y=60,anchor=tk.N)\r\n self.__imc=StringVar()\r\n self.__composicion=StringVar()\r\n self.entry1=tk.Entry(self.frame,textvariable=self.__imc,width=25,state='disabled')\r\n self.entry2=tk.Entry(self.frame,textvariable=self.__composicion,width=25,state='disabled')\r\n self.entry1.place(relx=0.70,y=30,anchor=tk.N)\r\n self.entry2.place(relx=0.70,y=60,anchor=tk.N)\r\n self.btn_close = tk.Button(self.frame, text=\"Volver\", command=self.volver)\r\n self.btn_close.place(relx=0.5,rely=0.92,anchor=tk.S)\r\n self.__imc.set('{:.2f} kg/m2'.format(paciente.getIMC()))\r\n self.__composicion.set(self.paciente.getComposicion())\r\n def volver(self):\r\n self.destroy()\r\n def show(self):\r\n self.grab_set()\r\n self.wait_window()\r\n\r\nclass UpdatePacientForm(PacientForm):\r\n def __init__(self, master, **kwargs):\r\n super().__init__(master, **kwargs)\r\n self.btn_save = tk.Button(self, text=\"Guardar\")\r\n self.btn_delete = tk.Button(self, text=\"Borrar\")\r\n self.btn_imc =tk.Button(self,text='Ver IMC')\r\n self.btn_save.pack(side=tk.RIGHT, ipadx=5, padx=5, pady=5)\r\n self.btn_delete.pack(side=tk.RIGHT, ipadx=5, padx=5, pady=5)\r\n self.btn_imc.pack(side=tk.RIGHT,ipadx=5, padx=5, pady=5)\r\n def bind_save(self, callback):\r\n self.btn_save.config(command=callback)\r\n def bind_delete(self, callback):\r\n self.btn_delete.config(command=callback)\r\n def bind_imc(self,callback):\r\n self.btn_imc.config(command=callback)\r\n\r\nclass PacientsView(tk.Tk):\r\n def __init__(self):\r\n super().__init__()\r\n self.title(\"Lista de Pacientes\")\r\n self.list = PacientList(self, height=15)\r\n self.form = UpdatePacientForm(self)\r\n self.btn_new = tk.Button(self, text=\"Agregar Paciente\")\r\n self.list.pack(side=tk.LEFT, padx=10, pady=10)\r\n self.form.pack(padx=10, pady=10)\r\n self.btn_new.pack(side=tk.BOTTOM, pady=5)\r\n def setControlador(self, ctrl):\r\n #vincula la vista con el controlador\r\n self.btn_new.config(command=ctrl.crearPaciente)\r\n self.list.bind_doble_click(ctrl.seleccionarPaciente)\r\n self.form.bind_save(ctrl.modificarPaciente)\r\n self.form.bind_delete(ctrl.borrarPaciente)\r\n self.form.bind_imc(ctrl.calcularIMC)\r\n def agregarPaciente(self, paciente):\r\n self.list.insertar(paciente)\r\n def modificarPaciente(self, paciente, index):\r\n self.list.modificar(paciente, index)\r\n def borrarPaciente(self, index):\r\n self.form.limpiar()\r\n self.list.borrar(index)\r\n #obtiene los valores del formulario y crea un nuevo paciente\r\n def obtenerDetalles(self):\r\n return self.form.crearPacienteDesdeFormulario()\r\n #Ver estado de Paciente en formulario de pacientes\r\n def verPacienteEnForm(self, paciente):\r\n self.form.mostrarEstadoPacienteEnFormulario(paciente)\r\n","sub_path":"classVistas.py","file_name":"classVistas.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"400262212","text":"import json\n\njson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])\nprint(json.dumps(\"\\\"foo\\bar\"))\nprint(json.dumps('\\u1234'))\nprint(json.dumps('\\\\'))\nprint(json.dumps({\"c\": 0, \"b\": 0, \"a\": 0}, sort_keys=True))\n\nimport xml.etree.ElementTree as ET\ntree = ET.parse('country_data.xml')\nroot = tree.getroot()\nroot = ET.fromstring(country_data_as_string)\n\nfrom html.parser import HTMLParser\n\nclass MyHTMLParser(HTMLParser):\n def handle_starttag(self, tag, attrs):\n print(\"Encountered a start tag:\", tag)\n\n def handle_endtag(self, tag):\n print(\"Encountered an end tag :\", tag)\n\n def handle_data(self, data):\n print(\"Encountered some data :\", data)\n\nparser = MyHTMLParser()\nparser.feed('Test'\n '

    Parse me!

    ')","sub_path":"docs/test_modules/json_test.py","file_name":"json_test.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"76585628","text":"\"\"\"The tests for the Modbus sensor component.\"\"\"\nimport logging\n\nfrom homeassistant.components.binary_sensor import DOMAIN as SENSOR_DOMAIN\nfrom homeassistant.components.modbus.const import (\n CALL_TYPE_COIL,\n CALL_TYPE_DISCRETE,\n CONF_ADDRESS,\n CONF_INPUT_TYPE,\n CONF_INPUTS,\n)\nfrom homeassistant.const import CONF_NAME, STATE_OFF, STATE_ON\n\nfrom .conftest import run_base_test\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def run_sensor_test(hass, use_mock_hub, register_config, value, expected):\n \"\"\"Run test for given config.\"\"\"\n sensor_name = \"modbus_test_binary_sensor\"\n entity_domain = SENSOR_DOMAIN\n data_array = {\n CONF_INPUTS: [\n dict(**{CONF_NAME: sensor_name, CONF_ADDRESS: 1234}, **register_config)\n ]\n }\n await run_base_test(\n sensor_name,\n hass,\n use_mock_hub,\n data_array,\n register_config.get(CONF_INPUT_TYPE),\n entity_domain,\n value,\n expected,\n )\n\n # Check state\n entity_id = f\"{entity_domain}.{sensor_name}\"\n state = hass.states.get(entity_id).state\n assert state == expected\n\n\nasync def test_coil_true(hass, mock_hub):\n \"\"\"Test conversion of single word register.\"\"\"\n register_config = {\n CONF_INPUT_TYPE: CALL_TYPE_COIL,\n }\n await run_sensor_test(\n hass,\n mock_hub,\n register_config,\n [0xFF],\n STATE_ON,\n )\n\n\nasync def test_coil_false(hass, mock_hub):\n \"\"\"Test conversion of single word register.\"\"\"\n register_config = {\n CONF_INPUT_TYPE: CALL_TYPE_COIL,\n }\n await run_sensor_test(\n hass,\n mock_hub,\n register_config,\n [0x00],\n STATE_OFF,\n )\n\n\nasync def test_discrete_true(hass, mock_hub):\n \"\"\"Test conversion of single word register.\"\"\"\n register_config = {\n CONF_INPUT_TYPE: CALL_TYPE_DISCRETE,\n }\n await run_sensor_test(\n hass,\n mock_hub,\n register_config,\n [0xFF],\n expected=\"on\",\n )\n\n\nasync def test_discrete_false(hass, mock_hub):\n \"\"\"Test conversion of single word register.\"\"\"\n register_config = {\n CONF_INPUT_TYPE: CALL_TYPE_DISCRETE,\n }\n await run_sensor_test(\n hass,\n mock_hub,\n register_config,\n [0x00],\n expected=\"off\",\n )\n","sub_path":"tests/components/modbus/test_modbus_binary_sensor.py","file_name":"test_modbus_binary_sensor.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"151329912","text":"# -*- coding: utf-8 -*-\n\nfrom trac.core import *\n\nfrom webadmin.web_ui import IAdminPageProvider\n\nfrom tracdownloads.api import *\n\nclass DownloadsWebAdmin(Component):\n \"\"\"\n The webadmin module implements downloads plugin administration\n via WebAdminPlugin.\n \"\"\"\n implements(IAdminPageProvider)\n\n # IAdminPageProvider\n\n def get_admin_pages(self, req):\n if req.perm.has_permission('DOWNLOADS_ADMIN'):\n yield ('downloads', 'Downloads System', 'downloads', 'Downloads')\n yield ('downloads', 'Downloads System', 'architectures',\n 'Architectures')\n yield ('downloads', 'Downloads System', 'platforms', 'Platforms')\n yield ('downloads', 'Downloads System', 'types', 'Types')\n\n def process_admin_request(self, req, category, page, path_info):\n # Get cursor.\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n # Prepare arguments and HDF structure.\n req.args['context'] = 'admin'\n req.args['page'] = page\n if page == 'architectures':\n req.args['architecture'] = path_info\n elif page == 'platforms':\n req.args['platform'] = path_info\n elif page == 'types':\n req.args['type'] = path_info\n elif page == 'downloads':\n req.args['download'] = path_info\n\n # Return page content.\n api = self.env[DownloadsApi]\n content = api.process_downloads(req, cursor)\n db.commit()\n return content\n","sub_path":"downloadsplugin/branches/0.10/tracdownloads/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"638684114","text":"import tkinter as tk\nimport random as rn\nimport time as tm\n\nclass Obj():\n\n def __init__(this, color):\n this.x = this.rng(X)\n this.y = this.rng(Y)\n this.color = color\n this.draw()\n\n def rng(this, limit):\n return rn.randint(1, limit - 1) * step\n\n def draw(this):\n this.body = canvas.create_rectangle((this.x, this.y),\n (this.x+step, this.y + step),\n fill = this.color)\n\n def move(enemy):\n x, y = coord_get(enemy.body)\n print(x,y)\n if y <= Y*step:\n canvas.move(enemy.body, 0, step)\n else:\n Hostile.enemies.remove(enemy)\n\nclass Player(Obj):\n hp = 4\n\n def __init__(this, color = 'black'):\n this.x = X*step / 2\n this.y = Y*step - step\n this.color = color\n super().draw()\n\n def comparePoz(this, ot_x, ot_y):\n return this.x == ot_x and this.y == ot_y\n\n def reDraw(this, x, y):\n old_x, old_y = this.x, this.y\n this.x = (this.x + x) % (step*X)\n this.y = (this.y + y) % (step*Y)\n canvas.move(this.body, (this.x - old_x), (this.y - old_y))\n \nclass Hostile(Obj):\n hazards = {-1}\n enemies = []\n\n def __init__(this, color = 'red'):\n this.x = -1\n this.y = 0\n while this.x in Hostile.hazards:\n this.x = super().rng(X)\n Hostile.hazards.add(this.x)\n this.color = color\n this.draw()\n\n def draw(this):\n this.body = canvas.create_rectangle((this.x, this.y),\n (this.x+step, this.y + step),\n fill = this.color)\n this.enemies.append(this)\n\nclass Meteor(Hostile):\n def __init__(this):\n super().__init__('brown')\n\ndef enemy_gen(Q):\n for i in range(Q):\n p=20\n \n r=rn.randint(1,100)\n if (r', k_prss)\nwhile True:\n if delay >= 20:\n moving()\n delay = 0\n else:\n delay += 1\n damage()\n gameOver()\n tm.sleep(0.05)\n gui.update()\n\n\n\n\n\n\n\n\n","sub_path":"fix.py","file_name":"fix.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"648913739","text":"# coding=utf-8\n# environment set: \n# http://www.tuicool.com/articles/eiM3Er3\n\nimport glob\nimport os\nimport sys\nimport logging\nimport re\nimport errno\n\nimport time\n\n\nprint('my_test.py started...')\nlogger = logging.getLogger('piaoyimq') # piaoyimq is the user who print the log,the default user is root\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(filename)s[line:%(lineno)d] %(levelname)-10s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='my_test.log',\n filemode='w')\n\n\n# ################################################################################################\n# Define a StreamHandler,put the info log and above level print to the strand error,and write it to the log file\n# ####loglevel : crital>error>warning>info>debug>noset\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nformatter = logging.Formatter('%(name)-12s: %(levelname)-10s %(message)s')\nconsole.setFormatter(formatter)\nlogging.getLogger('').addHandler(console)\n# ################################################################################################\n#\n# logger.debug('This is debug message')\n# logger.critical('This is critical message')\n#\n# logging.info('This is info message')\n# logging.warning('This is warning message')\n\n\n\n\n# ##############_____________1_____________######################\ndef testm():\n ''' a practise test'''\n logger.info('Type is: ' + os.name)\n logger.info('OS is: ' + sys.platform)\n\n\n# testm()\n\n\n\n\n\n# ##############_____________2_____________######################\ndef generate_mapping():\n ''' Generate the mapping between the testcase and the crash log'''\n logger.info('generate_mapping ...')\n crash_logs = set(\n [\n '/proj/tecsas-scratch/arrakis/application/sf_app/lib/fast_testrunner-1/priv/configs/972000/post_before_first_tc.log',\n '/proj/tecsas-scratch/arrakis/application/sf_app/lib/fast_testrunner-1/priv/configs/972001/post_before_fourth_tc.log',\n '/proj/tecsas-scratch/arrakis/application/sf_app/lib/fast_testrunner-1/priv/configs/972002/post_before_fourth_tc.log',\n '/proj/tecsas-scratch/arrakis/application/sf_app/lib/fast_testrunner-1/priv/configs/972003/post_before_fourth_tc.log',\n '/proj/tecsas-scratch/arrakis/application/sf_app/lib/fast_testrunner-1/priv/configs/972004/post_before_fifth_tc_1.log',\n '/proj/tecsas-scratch/arrakis/application/sf_app/lib/fast_testrunner-1/priv/configs/972005/post_before_fourth_tc.log',\n '/proj/tecsas-scratch/arrakis/application/sf_app/lib/fast_testrunner-1/priv/configs/972006/post_before_third_tc.log',\n '/proj/tecsas-scratch/arrakis/application/sf_app/lib/fast_testrunner-1/priv/configs/972007/post_before_second_tc_1.log',\n '/proj/tecsas-scratch/arrakis/application/sf_app/lib/fast_testrunner-1/priv/configs/972008/post_before_second_tc.log',\n '/proj/tecsas-scratch/arrakis/application/sf_app/lib/fast_testrunner-1/priv/configs/972009/post_before_second_tc.log'])\n testcases = {}\n for crash_log in crash_logs:\n origin_logpath = crash_log\n # matched = re.match(r'.*\\/([^\\/]*)\\/post_(.*_\\D+)(_\\d+){0,1}\\.log', origin_logpath)\n # matched = re.match(r\".*\\/([^\\/]*)\\/(post_.*_\\D+\\.log)\", origin_logpath)\n matched = re.match(r\".*\\/([^\\/]*)\\/(post*.log)\", origin_logpath)\n\n if matched:\n node = matched.group(1)\n testcase = matched.group(2)\n # myt =matched.group(3)\n\n # logger.info(node)\n # logger.info(testcase)\n # logger.info(myt)\n # saved_logpath = self._save_crash_log(node, origin_logpath)\n testcases[node] = testcase\n\n for n, t in testcases.items(): # walk dictionary\n logger.info(n + '--->' + t)\n\n return testcases\n\n\n# ##############_____________3___class field__________######################\n\nclass myPortalResultHandler(object):\n '''myPortalResultHandler test'''\n # id = ''\n\n def __init__(self, regression_id):\n self._regression_id = regression_id\n myPortalResultHandler.id = regression_id # str(regression_id)\n\n\nclass test(object):\n '''my test'''\n\n def get_id(self):\n self.my_id = myPortalResultHandler.id\n # print('1 my_id=%s' % self.my_id)\n\n\n# myportal = myPortalResultHandler('20')\n# tt = test()\n# tt.get_id()\n# print('test.id=%s' % tt.my_id)\n\n\n\n\n# ##############_____________4___dict{dict{list}}__________######################piaoyimq\n# class my_MasterServerHandler(object):\n# '''my_MasterServerHandler test'''\n# def __init__(self, queue):\n# self.queue = queue\n# self.testcases = {}\n# self.testsuites = []\n# self.client_testcases = {'client_1': {'testcase_list': ['suite_1'.'tc_1_1', 'suite_1'.'tc_1_2'],\n# 'client_id': ['client_1']},\n# 'client_2': {'testcase_list': ['suite_2'.'tc_2_1', 'suite_2'.'tc_2_2'],\n# 'client_id': ['client_2', 'client_3']},\n# 'client_3': {'testcase_list': ['suite_2'.'tc_2_1', 'suite_2'.'tc_2_2'],\n# 'client_id': ['client_2', 'client_e']},\n# }\n#\n# def my_get_testcase_from_one_suite(self, client_id):\n# '''Get the testcase from client_testcases dict inside one single\n# suite, and remove the client/suite from the dict if there's no\n# testcase left'''\n# testcase = self.client_testcases[client_id]['testcase_list'].pop(0)\n# if self.client_testcases[client_id]['testcase_list'] == []:\n# for id_to_pop in self.client_testcases[client_id]['client_id']:\n# self.client_testcases.pop(id_to_pop)\n#\n# print(testcase[0])\n# return testcase\n\n\n# ##############_____________5_____________######################\n\ndef test():\n a = {'a_1': 123, 'b_1': 456, 'c_1': 789}\n if 'a_1' not in a: # if not 'a_1' in a:\n print('Hello:123')\n else:\n print('Can not find 123')\n\n\n# ##############_____________6___Pointer__________######################\nclass my_Processor:\n def __init__(self):\n self._processMap = {}\n self._processMap[\"get_testcase\"] = my_Processor.my_process_get_testcase\n # self._processMap[\"return_testcase\"] = Processor.process_return_testcase\n # self._processMap[\"report_result\"] = Processor.process_report_result\n # self._processMap[\"report_status\"] = Processor.process_report_status\n\n def process(self):\n (name, seqid, iprot, oprot) = ('get_testcase', 2, 3, 4)\n if name not in self._processMap:\n print('%s not in _processMap' % name)\n return\n else:\n self._processMap[name](self, seqid, iprot, oprot)\n return True\n\n def my_process_get_testcase(self, seqid, iprot, oprot):\n # print('my_process_get_testcase:\\n seqid=%s,iprot=%s,oprot=%s' % seqid, str(iprot), str(oprot))\n print('my_process_get_testcase:\\n')\n#\n#\n# my_processor = my_Processor()\n# my_processor.process()\n#\n# prog_name = os.path.basename(__file__)\n# prog_name_1 = os.path.basename(__file__).split('.')[0]\n# prog_name_2 = os.path.basename(__file__).split('.')[1]\n# print(prog_name)\n# print(prog_name_1)\n# print(prog_name_2)\n#\n# string = 'PmTypes::PGWCLI::Brief::Groups'\n# print('split resut: %s' % string.split('::'))\n# # temp_1 = string.split('::')[0]\n# temp_2 = string.split('::')[1]\n# temp_3 = string.split('::')[2]\n# temp_4 = string.split('::')[3]\n# temp_5 = string.split('::')[4]\n#\n# print('string=%s' % string)\n# print('temp_1=%s' % temp_1)\n# print('temp_2=%s' % temp_2)\n# print('temp_3=%s' % temp_3)\n# print('temp_4=%s' % temp_4)\n# print('temp_5=%s' % temp_5)\n# print('string2=%s' % string)\n\n\n\n# ##############_____________mkdir and join_____________######################\n\ndef make_dirs(path):\n ''' The similliar functionality as mkdir -p'''\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef join_path():\n log_directory = os.path.join('/proj/simfast', 'portal', 'cond')\n print(log_directory)\n\n if not os.path.exists(log_directory):\n make_dirs(log_directory)\n\n#\n#\n# baseline = 123\n# session = 456\n# # log_directory = os.path.join(get_deploy_directory(), 'node_logs', baseline, session)\n# log_directory = os.path.join('/home/piaoyimq', 'node_logs', str(baseline), str(session))\n#\n# print(log_directory)\n# # join_path()\n\n\n\n\n# ##############_____________multiprocessing_____________######################\n\nimport multiprocessing\n# from multiprocessing import *\nimport os\n\n#\n# def output():\n# print(\"My pid is :%d\\n\" % os.getpid())\n# print(\"My parent is:%d\\n\" % os.getppid())\n#\n#\n# def my_multiprocessing_1():\n# p = Process(target=output)\n# p.start()\n# print(\"I am parent %d\\n\" % os.getpid())\n#\n#\n# my_multiprocessing_1()\n\ndef func(msg):\n print(\"Sub-process done 1\")\n for i in range(3):\n print(msg)\n time.sleep(1)\n\n\ndef single_process():\n p = multiprocessing.Process(target=func, args=(\"hello\", ))\n p.start()\n # p.join()\n print(\"Sub-process done.\")\n\n\n# single_process()\n\n\n\ndef func(msg):\n for i in range(3):\n print(msg)\n time.sleep(1)\n\n\ndef process_pool():\n pool = multiprocessing.Pool(processes=4)\n for i in range(2):\n msg = \"hello %d\" % (i)\n pool.apply_async(func, (msg, ))\n pool.close()\n pool.join()\n print(\"Sub-process(es) done.\")\n\n\n# process_pool()\n\n\n\n\n# ##############_____________list1-list2_____________######################\n\ndef list1_sub_list2(b):\n a = ['1', '2', '3', '4', '5']\n ret = list(set(a) ^ set(b))\n print(ret)\n\n#\n# list1_sub_list2(['1', '2', '3', '6'])\n\n\n\n# ##############_____________Get the specific directory files or subdirectory_____________######################\n\n# ####Method 1########\n\n# crash_log_directory_1 = 'C:\\\\Users\\\\piaoyimq\\\\PycharmProjects\\\\my-test'\n#\n# postcheck_logs = set(glob.glob('%s/post_*.log' % crash_log_directory_1))\n# result = glob.glob('%s/post_*.log' % crash_log_directory_1)\n#\n# print(postcheck_logs)\n# print(result)\n# if len(result):\n# print(\"Have\")\n# else:\n# print(\"None\")\n#\n# os.getcwd() #show current directory path\n\n\n# crash_log_directory = os.path.join('/proj/simfast/deploy/piaoyimq/sf_app/lib/fast_testrunner-1/priv/',\n# str('configs'))\n#\n# logger.info('____crash_log_directory=%s' % crash_log_directory)\n# result = glob.glob('%s/post_*.log' % crash_log_directory)\n# logger.info('____result=%s' % result)\n# print('result=%s' % result)\n\n\n# cmd='ls %s|grep \\'[0-9]\\''%path_a\n# a=[]\n# list_file = os.popen(cmd).readlines()\n# for d in list_file:\n# d=d.strip('\\n')\n# a.append(d)\n\n\n# ####Method 2########\n#\n# cc = os.listdir()\n# cc_new=[]\n# for c in cc:\n# if c.isdigit():\n# print(c)\n# cc_new.append(c)\n# print(cc_new)\n\n\n###############_____________small skills_____________#####################piaoyimq\n\n# my_string = 'Hello'\n# logger.info('test string: %s', '-'.join(my_string))\n#\n# import pwd\n# def _check_lock_file_if_exists(session): #zhuweibo add\n# ''' check lock file if exists'''\n# user = pwd.getpwuid(os.getuid())[0]\n# lock = os.path.join('/proj/tecsas-scratch/var/tbox/sessions',\n# '%s_%d.lock' % (user, session))\n#\n# print('user:%s'% user)\n# if not os.path.exists(lock):\n# logger.info('____%s is not exists' % lock)\n#\n# _check_lock_file_if_exists(1234)\n\nimport socket\n#\n# hostname = socket.gethostname()\n# print('hostname=%s' % hostname)\n\n\n###############_____________class init_____________#####################\n\nclass A:\n def __init__(self, view):\n self._view = view\n\n @classmethod #when you call the method ,you should not add the paramater self\n def test1(self):\n print('A.test1')\n\n @staticmethod #when you call the method ,you should add the paramater self\n def test2(a):\n print('a=%s' % a)\n print('A.test2')\n\n\nclass B:\n def __init__(self, view=None):\n self._view = view\n\n @classmethod #when you call the method ,you should not add the paramater self\n def test1(self):\n print('B.test1')\n\n @staticmethod #when you call the method ,you should add the paramater self\n def test2(a):\n print('a=%s' % a)\n print('B.test2')\n\n\n#\n# a = A('a', 'b', 'c', 'd')\n# a._test()\n\n# b = A(None,None,None,None)\n# b._test()\n# A._test()\n#\n# A(None).test1() #class A must have paramater, because it have default paramater\n# A(None).test2(None)\n#\n# B().test1() #class B need not have paramater, because it have default paramater\n# B().test2(None)\n#\n\n\n###############_____________class singleton instance 1_____________#####################\n\ndef singleton(cls, *args, **kwargs):\n instances = {}\n\n def _singleton(*args, **kwargs):\n if cls not in instances:\n instances[cls] = cls(*args, **kwargs)\n print('instances[cls]=%s' % instances[cls])\n print('args type:%s' % type(args))\n for value in args:\n print(' %s ' % value)\n print('kwargs type:%s' % type(kwargs))\n for key in kwargs:\n print('%s=%s' % (key, kwargs[key]))\n\n return instances[cls]\n\n return _singleton\n\n\n@singleton\nclass MyClass4(object):\n a = 1\n\n def __init__(self, *args, **kwargs): #*args is tuple,**kwargs is a dict\n a = 1\n\n\nclass MyClass5(object):\n a = 1\n\n def __init__(self, x=0):\n self.x = x\n\n\n# if True:\n# one = MyClass4(1, 2, 3, 4, name='piaoyimq', age=27)\n# else:\n# args = [11, 22, 33, 44]\n# kwargs = {'name': 'zhu', 'age': 7}\n# one = MyClass4(args, kwargs)\n\n# two = MyClass4()\n#\n# three = MyClass5()\n# four = MyClass5()\n#\n# two.a = 3\n# print('one.a=%s' % one.a)\n# print('two.a=%s' % two.a)\n# print('id(one)=%s' % id(one))\n# print('id(two)=%s' % id(two))\n#\n# three.a = 10\n# print('three.a=%s' % three.a)\n# print('four.a=%s' % four.a)\n# print('id(three)=%s' % id(three))\n# print('id(four)=%s' % id(four))\n\n\n\n###############_____________class singleton instance 2_____________#####################\n\nclass Singleton(object):\n _instance = None\n name = 'I am singleton'\n\n def __init__(self):\n self.var1 = 'I am a object'\n print('__init__')\n\n def __new__(cls, *args, **kwargs):\n print('__new__')\n if Singleton._instance is None:\n Singleton._instance = object.__new__(cls, *args, **kwargs)\n return Singleton._instance\n\n\nclass MyClass(Singleton):\n a = 1\n #\n #\n # obj1 = Singleton()\n # obj2 = Singleton()\n #\n # obj3 = MyClass()\n # obj4 = MyClass()\n #\n # print('id(obj1)=%s' % id(obj1))\n # print('id(obj2)=%s' % id(obj2))\n #\n # obj3.a = 7\n # print('obj4.a=%s' % obj4.a)\n # print('id(obj3)=%s' % id(obj3))\n # print('id(obj4)=%s' % id(obj4))\n\n\n###############_____________class singleton instance 2_____________#####################\n\ndef my_parse_arguments():\n ''' Parse arguments, we need the view, session id, and master_ip, and\n optional tdp_tarball'''\n import argparse\n\n parser = argparse.ArgumentParser('my_test.py')\n\n parser.add_argument(\"-y\", choices=['a', 'b', 'd'])\n\n group = parser.add_argument_group('Mandatory arguments')\n group.add_argument('--session', dest='session', type=int, required=True, # mandatory\n help='The tbox session id')\n\n group.add_argument('-view', dest='view', type=str, required=False, #optional ,not mandatory\n default='view_default', #optional ,it can have default value\n help='The Clear Case view')\n\n group.add_argument('--install_src', dest='install_src',\n type=str, required=True,\n help='The src from which the installation is from, '\n 'could be \\'vob\\' for CC mode or full path to tdp tar '\n 'ball in tdp mode')\n group.add_argument('--enable_exmachina', dest='enable_exmachina',\n default=False, action='store_false', #store_false or store_true\n help='Enable eXmachina collection')\n\n return parser.parse_args()\n\n\ndef read_args():\n args = my_parse_arguments()\n session = args.session\n view = args.view\n install_src = args.install_src\n y = args.y\n exmachina = args.enable_exmachina\n\n print('Parse argument:')\n print('session=%s' % str(session))\n print('view=%s' % str(view))\n print('install_src=%s' % install_src)\n print('y=%s' % y)\n print('exmachina=%s' % exmachina)\n\n\n# read_args()\n\n\nclass Get_and_Set_attr(object):\n def __setattr__(self, name, value):\n print('__setattr__')\n if name == 'value':\n object.__setattr__(self, name, value - 100)\n else:\n object.__setattr__(self, name, value)\n\n def __getattr__(self, name):\n print('__getattr__')\n try:\n return object.__getattribute__(name)\n except:\n return name + ' is not found!'\n\n def __str__(self):\n print('__str__')\n return self.name + ' cost : ' + str(self.value)\n\n#\n# print('1-1')\n# c = Get_and_Set_attr()\n#\n# print('1-2')\n# c.name = 'Python'\n#\n# print('1-3')\n# c.value = 100\n#\n# print('1-4')\n# print('c.name = %s' % c.name)\n#\n# print('1-5')\n# print('c.value = %s' % c.value)\n#\n# print('1-6')\n# print('c = %s' % c)\n#\n# print('1-7')\n# print('c.Type = %s' % c.Type)\n#\n# print('1-8')\n\n\n###############_____________read config file_____________#####################\n\n# from ConfigParser import SafeConfigParser\n#\n#\n# class Configuration(object):\n# \"\"\" Configuration represents the configuration file with all our settings.\n# All sections and options can be reached as attributes instead of using the\n# ConfigParser interface.\n#\n# Example:\n#\n# config = Configuration()\n# config.common.heartbeats => 10 \"\"\"\n#\n# def __init__(self, config_parser=None):\n# config_file = self._config_file()\n# if config_parser:\n# self._config = config_parser\n# else:\n# self._config = SafeConfigParser()\n# self._config.read(config_file)\n#\n# def _config_file(self):\n# current_directory = os.path.abspath(__file__)\n# core_dir = os.path.split(current_directory)[0]\n# return os.path.join(core_dir, './config/', 'arrakis.cfg')\n#\n# def __getattr__(self, section): #piaoyimq ???\n# print('__getattr__')\n# if self._config.has_section(section):\n# return Section(self._config, section)\n# else:\n# return object.__getattribute__(self, section)\n#\n#\n# class Section(object):\n# \"\"\" A Section, as it sounds, is a section in the configuration file. It is\n# seperated from the Configuration class to improve clarity. \"\"\"\n#\n# def __init__(self, config, section):\n# self._config = config\n# self._section = section\n#\n# def __getattr__(self, attribute):\n# if attribute in self._config.options(self._section):\n# return self._config.get(self._section, attribute)\n# else:\n# return object.__getattribute__(self, attribute)\n#\n# def get_boolean(self, attribute):\n# return self._config.getboolean(self._section, attribute)\n\n#\n#\n# config = Configuration()\n# install_info = dict()\n# install_info['simfast_lib'] = config.simfast.simfast_lib\n#\n# print('install_info[\\'simfast_lib\\']=%s' % install_info['simfast_lib'])\n# print('tboxpool=%s' % config.tbox.tboxpool)\n\n\n\n###############_____________signal_____________#####################\nimport signal\n\n\nclass TboxClient(object):\n '''The tbox client is a supervisor which handles the operations in the\n tbox.'''\n\n def __init__(self, session, baseline):\n signal.signal(signal.SIGINT, self._handler) #ctrl+C\n signal.signal(signal.SIGTERM, self._handler) #kill command\n signal.signal(signal.SIGUSR1, self._sig_usr1)\n signal.signal(signal.SIGUSR2, self._handler)\n self._simfast_communicator = SimFastCommunicator(session, baseline)\n\n def _handler(self, _signum, _frame): #note: can't be lack of _signum and _frame\n # print('_signum=%s'%_signum)\n # print('_frame=%s'%_frame)\n print('Got an interrupt, will try to stop SimFAST')\n self._simfast_communicator.stop()\n sys.exit(1)\n\n def _sig_usr1(self, signum, frame):\n print(\"recive a SIGUSR1 sinal from process(exited)\")\n\n def start(self):\n print('Start to sleep 180s')\n time.sleep(180)\n print('End to sleep 180s')\n\n\nclass SimFastCommunicator(object):\n def __init__(self, session, baseline):\n self._session = session\n self._baseline = baseline\n\n def stop(self):\n print('Shut down the server')\n\n\n# TboxClient('session', 'baseline').start()\n\n\n\n########################subprocess.Popen####################\n# import subprocess\n#\n# cmd = \"sleep 10\"\n# fd = open(\"/home/piaoyimq/test/python_test/my_test.log\", \"w\")\n# data = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n# print('data.stdout.read():\\n %s' % data.stdout.read())\n# fd.write(data.stdout.read())\n# fd.close()\n\n\n# sys.path.append('c:\\\\mypythonlib')\nsys.path.append('./lib')\n# import pexpect\n\n\ndef ssh_cmd(user, ip, passwd, cmd):\n ret = -1\n ssh = pexpect.spawn('ssh %s@%s \"%s\"' % (user, ip, cmd))\n print('ssh %s@%s \"%s\"' % (user, ip, cmd))\n try:\n i = ssh.expect(['Password: ', 'continue connecting (yes/no)?'], timeout=5)\n if i == 0:\n ssh.sendline(passwd)\n print('if end')\n elif i == 1:\n ssh.sendline('yes')\n print('1-1')\n ssh.expect('Password: ')\n ssh.sendline(passwd)\n print('1-2')\n ssh.sendline(cmd)\n r = ssh.read()\n print(r)\n ret = 0\n except pexpect.EOF:\n print(\"EOF\")\n ssh.close()\n ret = -1\n except pexpect.TIMEOUT:\n print(\"TIMEOUT\")\n ssh.close()\n ret = -2\n return ret\n\n # example:\n # ssh sysadm@10.184.28.4\n # password:wpp_admin\n\n#\n# user = 'sysadm'\n# ip = '10.184.28.4'\n# passwd = 'wpp_admin'\n# command = 'ls;pwd'\n# ssh_cmd(user, ip, passwd, command)\n\n\n# import paramiko\nimport threading\n\n\ndef ssh2(ip, username, passwd, cmd):\n try:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip, 22, username, passwd, timeout=5)\n for m in cmd:\n stdin, stdout, stderr = ssh.exec_command(m)\n #stdin.write(\"Y\") #input 'Y'\n out = stdout.readlines()\n for o in out:\n print(o),\n print('%s\\tOK\\n' % (ip))\n ssh.close()\n except:\n print('%s\\tError\\n' % (ip))\n#\n#\n# cmd = ['cal', 'echo hello!'] #command list\n# username = \"sysadm\"\n# passwd = \"wpp_admin\"\n# threads = [] #multi thread\n# print (\"Begin......\")\n#\n# if False:\n# for i in range(1, 254):\n# ip = '10.184.28.' + str(i)\n# a = threading.Thread(target=ssh2, args=(ip, username, passwd, cmd))\n# a.start()\n# else:\n# ip = '10.184.28.4'\n# a = threading.Thread(target=ssh2, args=(ip, username, passwd, cmd))\n# a.start()\n\n# import argparse\ndef argument():\n parser = argparse.ArgumentParser(description='Outputs a MIM-file containing counter definitions from the Rhapsody model.')\n parser.add_argument('-b', '--build_number', default='', action=\"store\", dest=\"build_number\", help='Build number (will be included in the MIM-file)')\n parser.add_argument('-n', '--node', action=\"store\", dest=\"node\", choices=['sgw', 'pgw', 'mbmsgw', 'node', 'all'], default='all',\n help='Default is to print all of PGW, SGW and MBMSGW and common counters. If you want just one of them then specify it here.')\n parser.add_argument('-p', '--platform', required=True, choices=['juniper', 'ssr'], help='Platform')\n args = parser.parse_args()\n\n node_dict = {'pgw': (['pgw'], 'PGW', 'PDNGateway', 'Counters for the PDN Gateway node'),\n 'sgw': (['sgw'], 'SGW', 'ServingGateway', 'Counters for the Serving Gateway node'),\n 'mbmsgw': (['mbmsgw'], 'MBMSGW', 'MBMSGateway', 'Counters for the MBMS Gateway node'),\n 'node': (['node'], 'NODE', 'NodeCounters', 'Common node counters'),\n 'all': (['pgw', 'sgw', 'mbmsgw', 'node'], 'PGW, SGW, MBMSGW and common', 'PDN, Serving and MBMS Gateway plus common counters', 'PDN, Serving and MBMS Gateway plus common counters')}\n nodes, node_name_short, node_name_long, node_name_long_description = node_dict[args.node]\n\n print(' node=%s\\n node_name_short=%s\\n node_name_long=%s\\n node_name_long_description=%s\\n build_number=%s\\n'\n % (nodes,node_name_short,node_name_long,node_name_long_description,args.build_number))\n\n\n# argument()\n\ndef re_match():\n text = 'PmTypes::SGW::Groups'\n matched = re.match(r\"::SGW::GROUPS\", text.upper())\n\n # matched = re.match(r'.*\\/([^\\/]*)\\/post_(.*_\\D+)(_\\d+){0,1}\\.log', origin_logpath)\n # matched = re.match(r\".*\\/([^\\/]*)\\/(post_.*_\\D+\\.log)\", origin_logpath)\n # matched = re.match(r\".*\\/([^\\/]*)\\/(post*.log)\", origin_logpath)\n\n if matched:\n print('matched ok')\n else:\n print('matched failed')\n\ndef re_match_2():\n text = 'PgwL2tpPmCliCommand'\n matched = re.match(r'.+(PmCliCommand)$', text)\n if matched:\n print('matched ok')\n else:\n print('matched failed')\n\n# re_match_2()\n\n\ndef string_in():\n text = 'PgwL2tpPmCliCommand'\n if 'PmCli' in text:\n print('ok in')\n else:\n print('not in')\n\n# string_in()\n\n\n\n\ndef list_for():\n a_list = [1,2,3,4,5]\n for x in a_list:\n print('x=%s'% x)\n if x ==3 :\n a_list.index(0)\n\n\n\ndef while_list():\n i = 0\n while True:\n\n for data in [1, 2, 3, 4, 5]:\n print('i=%s\\t' % data)\n i += 1\n print('The %s loop\\n' % i)\n if i == 5:\n break\n\n# while_list()\n\ndef or_test():\n sbs_file_path = '/workspace/git/piaoyimq/epg-2/application/PerformanceManagement/PerformanceMgmtTypes_rpy/PmTypes/SGW/ggsnApnFbcPrasStats.sbs'\n\n # if ('PGW/' or 'SGW/') in sbs_file_path: # can't do like this\n if 'PGW/' in sbs_file_path or 'SGW/' in sbs_file_path:\n print('ok in sbs_file_path')\n else:\n print('not in sbs_file_path')\n\n# or_test()\n\nimport pyparsing as pp\n\n\n\n\ndef resolve_kconfig_ifdefs(code_str):\n \"Implements simple preprocessor as a performance optimization, to avoid starting an external process for each invokation\"\n\n def should_include(macro_stack, active_macros):\n for macro in macro_stack:\n if not macro.startswith('!'):\n if macro not in active_macros:\n return False\n else:\n if macro[1:] in active_macros:\n return False\n return True\n\n def invert(macro):\n if macro.startswith('!'):\n return macro[1:]\n else:\n return '!' + macro\n\n def preprocess(code, active_macros):\n result = []\n lines = code.split('\\n')\n macro_stack = []\n for line in lines:\n words = line.split(' ')\n if words and words[0] in ['#ifdef', '#ifndef']:\n macro = words[1]\n if words[0] == '#ifndef':\n macro = invert(macro)\n macro_stack.append(macro)\n elif words and words[0] == '#endif':\n del macro_stack[-1]\n elif words and words[0] == '#else':\n macro_stack[-1] = invert(macro_stack[-1])\n else:\n if should_include(macro_stack, active_macros):\n result.append(line)\n return '\\n'.join(result)\n\n kconfig_defines = os.environ.get('KCONFIG_CC_DEFINES', '').split()\n # print('kconfig_defines=%s\\n' % kconfig_defines)\n active_macros = [m[2:] for m in kconfig_defines]\n # print('active_macros=%s\\n' % active_macros)\n return preprocess(code_str, active_macros)\n\n\n\n\ndef pyparsing_test_1():\n string_a = 'CounterType( /* */ \\\n \\\"ggsnGtpUplinkPackets\\\", /* Name */ \\\n \\\"uplink-packets\\\", /* CLI Name */ \\\n \\\"ggsnGtpUplinkPackets\\\", /* XML Name */ \\\n PmCommon::current, /* Status */ \\\n PmCommon::BitLen64 /* Size */ \\\n )'\n\n string_b = '''CounterType( /* ======*/\n \"ggsnGtpUplinkPackets\", /* Name */\n \"uplink-packets\", /* CLI Name */\n \"ggsnGtpUplinkPackets\", /* XML Name */\n PmCommon::current, /* Status */\n PmCommon::BitLen64 /* Size */\n )'''\n\n print('string: %s' % resolve_kconfig_ifdefs(string_b))\n\n\n stripped_identifier = pp.Combine((pp.ZeroOrMore(pp.Suppress(pp.Word(pp.alphanums) + '::')) + pp.Word('.:' + pp.alphanums)))\n compound_string = pp.Combine(pp.Optional(pp.Word('_' + pp.alphanums)) + pp.QuotedString('\"'))\n string = pp.QuotedString('\"')\n cppStyleComment = '//' + pp.restOfLine\n print('stripped_identifier: %s' % stripped_identifier)\n print('compound_string: %s' % compound_string)\n print('string: %s' % string)\n\n\n\n init_list = \\\n stripped_identifier + '(' + \\\n compound_string(\"name\") + ',' + \\\n string(\"nameInCLI\") + ',' + \\\n string(\"XMLName\") + ',' + \\\n stripped_identifier(\"status\") + \\\n pp.Optional(',' + stripped_identifier(\"size\")) + \\\n pp.Optional(',' + stripped_identifier(\"platform\")) + \\\n ')'\n\n init_list.ignore(pp.cStyleComment | cppStyleComment)\n\n return init_list.parseString(resolve_kconfig_ifdefs(string_b))\n\n\n\n\ndef pyparsing_test_2():\n\n string_c = '''RatioPmCliCounter( /* ================= =====*/\n \"ggsn3gdtActiveContextsRatio\", /* Name || */\n \"ratio-3gdt\", /* CLI Name || */\n PmCommon::BitLen32 /* Size || */\n )'''\n\n string_b = '''RatioPmCliCounter( /* ================= =====*/\n \"ggsn3gdtActiveContextsRatio\", /* Name || */\n \"\" /* CLI Name */\n )'''\n\n print('string: %s' % resolve_kconfig_ifdefs(string_c))\n\n stripped_identifier = pp.Combine((pp.ZeroOrMore(pp.Suppress(pp.Word(pp.alphanums) + '::')) + pp.Word('.:' + pp.alphanums)))\n compound_string = pp.Combine(pp.Optional(pp.Word('_' + pp.alphanums)) + pp.QuotedString('\"'))\n string = pp.QuotedString('\"')\n\n # size_string = pp.Combine((pp.ZeroOrMore(pp.Suppress(pp.Word(pp.alphanums) + '::')) + pp.Word('.:' + pp.alphanums)))\n cppStyleComment = '//' + pp.restOfLine\n\n init_list = \\\n stripped_identifier + '(' + \\\n compound_string(\"name\") + ',' + \\\n string(\"nameInCLI\") + pp.Optional(',') + \\\n pp.Optional(stripped_identifier(\"size\")) + \\\n ')'\n\n # init_list = \\\n # stripped_identifier + '(' + \\\n # compound_string(\"name\") + ',' + \\\n # string(\"nameInCLI\") + ',' + \\\n # size_string + \\\n # ')'\n\n init_list.ignore(pp.cStyleComment | cppStyleComment)\n\n return init_list.parseString(resolve_kconfig_ifdefs(string_c))\n\nresult = pyparsing_test_2()\nprint('out: %s' % result)\nif 'nameInCLI' in result:\n print('nameInCLI: %s' % result['nameInCLI'])\nif 'size' in result:\n print('size: %s' % result['size'])\nsize_mapping = { \"BitLen64\": \"64\",\n \"BitLen32\": \"32\" }\nprint('name: %s' % result.get('name', ''))\nprint('cli: %s' % result.get('nameInCLI', ''))\nprint('size: %s' % result.get('size', ''))\nprint('mapping_size: %s' % size_mapping[result.get('size', '')])\n\n\n\n\ndef pyparsing_test_3():\n string_a = '''appendIndexName(\"ggsnGtpuAddress\",\"ebm-server-name\"); //Index 1, Type: IPAddress\n appendIndexName(\"arp-value\",\"ARP-VALUE\"); //Index 2, Type: String\n appendIndexName(\"ggsnServIdentIndex\"); //Index 3, Type: Integer32 (1..4096)\n addCounter(new ggsnApnFbcServIdentUplinkBytes );\n addCounter(new ggsnApnFbcServIdentDownlinkBytes );\n addCounter(new ggsnApnFbcServIdentEventTrans);\n //addCounter(new ggsnApnFbcServIdentEventTransFail );\n addCounter(new ggsnApnFbcServIdentEventStartTrans );\n addCounter(new ggsnApnFbcServIdentEventSuccessTrans );'''\n\n string_b = '''appendIndexName(\"ebm-server-name\"); //Index 1, Type: , String appendIndexName(\"ebmServerName\",\"ebm-server-name\");\n appendIndexName(\"ggsnServIdentIndex\"); //Index 3, Type: Integer32 (1..4096)\n '''\n\n\n###################old##############\n # index_struct = pp.Combine(pp.Suppress('appendIndexName' + '(') + pp.dblQuotedString + pp.Suppress(')' + ';')).setParseAction(pp.removeQuotes)\n # counter_struct = pp.Suppress('addCounter' + '(' + 'new' + pp.Word(pp.alphanums) + ');')\n # cppStyleComment = '//' + pp.restOfLine\n #\n # init_list = \\\n # pp.ZeroOrMore(index_struct)(\"indexNames\") + \\\n # pp.ZeroOrMore(counter_struct)\n\n\n###################start##############\n #pp.Combine(pp.Optional(pp.Word('_' + pp.alphanums)) + pp.QuotedString('\"'))\n # pp.ZeroOrMore(pp.Suppress(pp.Word(pp.alphanums)\n\n # index_struct = pp.Combine(pp.Suppress('appendIndexName' + '(') + pp.Combine((pp.dblQuotedString).setParseAction(pp.removeQuotes) + pp.Optional(',') + pp.ZeroOrMore(' ') + pp.Optional(pp.dblQuotedString)) + pp.Suppress(')' + ';'))\n index_struct = pp.Combine(pp.Suppress('appendIndexName' + '(') + pp.dblQuotedString + pp.Suppress(pp.Optional(',') + pp.ZeroOrMore(' ') + pp.Optional(pp.dblQuotedString) +')' + ';')).setParseAction(pp.removeQuotes)\n\n\n\n\n\n counter_struct = pp.Suppress('addCounter' + '(' + 'new' + pp.Word(pp.alphanums) + ');')\n cppStyleComment = '//' + pp.restOfLine\n\n init_list = \\\n pp.ZeroOrMore(index_struct)(\"indexNames\") + \\\n pp.ZeroOrMore(counter_struct)\n\n init_list.ignore(pp.cStyleComment | cppStyleComment)\n\n return init_list.parseString(string_a)\n\nresult = pyparsing_test_3()\nprint('out: %s' % result)\nprint('indexNames: %s' % result.get(\"indexNames\", \"\"))\n\n\ndef in_key_words_test():\n\n if 'a' in ['ab', 'b', 'c']:\n print('ok in 1')\n else:\n print('not in 1')\n\n# in_key_words_test()\n\n\ndef string_replace():\n text = '''Results:\n Mandatory IE missing total:'''\n\n text2 = '''The number of successfully completed deactivation PDP context procedures initiated by the GGSN on a per APN basis.\n\nNote: This counter cannot be retrieved by CLI.'''\n print('text_1: %s' % text2)\n print('text_2: %s' % text2.replace('\\n', ' '))\n print('text_3: %s' % text2.strip('\\n')) ## can not delete '\\n'\n\n# string_replace()\n\n\ndef striplinex_test():\n pass\n","sub_path":"PiaoyimqGeneralPythonCode/python-test/my-python-test.py","file_name":"my-python-test.py","file_ext":"py","file_size_in_byte":35698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"314608996","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport time\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\n\n\ndef when_file_changed(filename):\n cls()\n filename = os.path.abspath(filename)\n print(filename)\n package = \"\"\n if not filename.endswith(\"_test.py\"):\n basename = os.path.basename(filename).replace(\".py\", \"\")\n package = basename\n filename = filename.replace(basename, \"tests/\" + basename + \"_test\")\n else:\n package = os.path.basename(filename).replace(\"_test.py\", \"\")\n nose = \"nosetests\" # python2\n # nose = \"nosetests3\" # python3\n options = \"--rednose --with-coverage --cover-erase \" \\\n \"--cover-package={package} -v {filename}\".format(**locals())\n # -v verbose show a list of tests\n cmd = nose + \" \" + options\n os.system(cmd)\n\n\ndef cls():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\nclass ModifiedHandler(PatternMatchingEventHandler):\n patterns = [\"*.py\"]\n\n def on_created(self, event):\n when_file_changed(event.src_path)\n\n def on_any_event(self, event):\n pass\n\n def on_modified(self, event):\n when_file_changed(event.src_path)\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n event_handler = ModifiedHandler()\n observer = Observer()\n observer.schedule(event_handler,\n path=args[1] if args else '.', recursive=True)\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n","sub_path":"script-template/watchdog_python.py","file_name":"watchdog_python.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"113433304","text":"def countInversions(array):\n '''\n Counts the number of inversions in a given array\n Returns the sorted array and the number of inversions\n '''\n # base case\n if len(array) == 1:\n return array, 0\n\n else:\n # split the list into two halves\n left = array[:len(array) // 2]\n right = array[len(array) // 2:]\n\n # recursively sort and count both havles\n left, left_inversions = countInversions(left)\n right, right_inversions = countInversions(right)\n\n # sum inversions\n sorted_array = []\n i = 0\n j = 0\n inversions = 0 + left_inversions + right_inversions\n\n # merge and count inversions\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n sorted_array.append(left[i])\n i += 1\n else:\n sorted_array.append(right[j])\n j += 1\n inversions += (len(left) - i)\n sorted_array += left[i:]\n sorted_array += right[j:]\n\n return sorted_array, inversions\n","sub_path":"web/utils/countInversions.py","file_name":"countInversions.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487061074","text":"from random import randint\nimport string\n\n\ndef strip(cin):\n cout = ''.join(cin.split())\n cout = ''.join(l for l in cout if l not in string.punctuation)\n cout = ''.join(l for l in cout if l not in string.ascii_letters)\n return cout\n\n\ndef cow_check(game, user):\n cow = 0\n game = str(game)\n user = str(user)\n for i in range(0, 4):\n if user[i] == game[i]:\n cow += 1\n return cow\n\n\ndef bull_check(game, user):\n bull = 0\n game = game\n user = user\n for i in range(0, 4):\n if user[i] != game[i] and user[i] in game:\n bull += 1\n return bull\n\n\n__name__ == \"__main__\"\nloop = True\ngame = [randint(1, 9), randint(1, 9), randint(1, 9), randint(1, 9)]\ngame = ''.join(map(str, game))\nprint(game) # debug\nwhile loop is True:\n print('Digite um número de 4 digitos')\n user = input(': ')\n user = strip(user)\n if user == game:\n print('')\n print('Parabéns, você acertou!')\n break\n elif len(user) == 4:\n print('')\n print('Cows: %s' % cow_check(game, user))\n print('Bulls: %s' % bull_check(game, user))\n print('Continue tentando...', '\\n')\n else:\n print('')\n print('Algo deu errado. Tente novamente.')\n print('')\n","sub_path":"#18 - Cows and Bulls.py","file_name":"#18 - Cows and Bulls.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"407010458","text":"#coding=utf-8\n#引入matplotlib的pyplot子库,用于画简单的2D图\nimport timeanalysis\n\ndef iconplot(asin):\n stat_dict = timeanalysis.timeanalysis(asin)\n time_list = []\n for time in stat_dict:\n time_list.append(time)\n time_list.sort()\n y1 = []\n for i in time_list:\n print \n y1.append((stat_dict[i])[0])\n y2 = []\n for i in time_list:\n y2.append((stat_dict[i])[1])\n y3 = []\n for i in time_list:\n y3.append((stat_dict[i])[2])\n y4 = []\n for i in time_list:\n y4.append((stat_dict[i])[3])\n y5 = []\n for i in time_list:\n y5.append((stat_dict[i])[4])\n import xlwt\n file = xlwt.Workbook(encoding = 'ascii')\n table = file.add_sheet(asin)\n\n for i in range(1,len(time_list)+1):\n table.write(0,i,time_list[i-1])\n table.write(1,i,y1[i-1])\n table.write(2,i,y2[i-1])\n table.write(3,i,y3[i-1])\n table.write(4,i,y4[i-1])\n table.write(5,i,y5[i-1])\n count = range(1,6)\n for i in range(1,6):\n table.write(i,0,count[i-1])\n\n file.save(asin+'.xls')\n\n x = range(0,len(time_list))\n import matplotlib.pyplot as plt\n\n fig = plt.figure(figsize=(8,6))\n ax = fig.add_subplot()\n\n plt.plot(x,y1,'o-',label=u\"线条\")\n plt.plot(x,y2,'o-',label=u\"线条\")\n plt.plot(x,y3,'o-',label=u\"线条\")\n plt.plot(x,y4,'o-',label=u\"线条\")\n plt.plot(x,y5,'o-',label=u\"线条\")\n\n plt.show()\n plt.savefig(\"temp.png\")\n\nwhile 1:\n asin = raw_input('输入asin:')\n result = iconplot(asin)\n\n","sub_path":"datanalysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"449508846","text":"lista = []\r\nlistb = []\r\ncombowombo= []\r\nimport random\r\n\r\nfor x in range(0,100):\r\n lista.append( random.randint(0,500) )\r\nfor x in range(0,150):\r\n listb.append( random.randint(0,700) )\r\n\r\ncombowombo = [a for a in lista for b in listb if a == b ]\r\n\r\nprint(lista)\r\nprint(\"\\n\\n\\n\")\r\nprint(listb)\r\nprint(\"\\n\\n\\n\")\r\nprint(combowombo)","sub_path":"practice_python_10.py","file_name":"practice_python_10.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"166317596","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom django.views.generic import TemplateView\nfrom . import views\n\napp_name = 'tree'\n\nurlpatterns = [\n path('', TemplateView.as_view(template_name=\"tree/index.html\")),\n path(r'api/node/', views.NodeGeneralAPIView.as_view(), name='node'),\n path(\n r'api/node/',\n views.NodeSingleAPIView.as_view(),\n name='node-single'\n ),\n]\n","sub_path":"tree/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"630886314","text":"# Copyright 2017 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys\n\nif sys.version_info[:2] <= (2, 6):\n try:\n import unittest2 as unittest\n except ImportError:\n sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')\n sys.exit(1)\nelse:\n import unittest\n\nfrom pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql import SparkSession\n\n\nclass SparkDLTestCase(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.sc = SparkContext('local[*]', cls.__name__)\n cls.sql = SQLContext(cls.sc)\n cls.session = SparkSession.builder.getOrCreate()\n\n @classmethod\n def tearDownClass(cls):\n cls.session.stop()\n cls.session = None\n cls.sc.stop()\n cls.sc = None\n cls.sql = None\n\n def assertDfHasCols(self, df, cols = []):\n map(lambda c: self.assertIn(c, df.columns), cols)\n","sub_path":"python/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"620385312","text":"from __future__ import print_function\r\nfrom __future__ import division\r\n\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport time\r\nimport json\r\nimport numpy as np\r\nimport tabulate\r\nimport argparse\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-l', '--log-dir', default='',\r\n help='Path of experiment log directory')\r\n parser.add_argument('--labels', required=True,\r\n help='List of labels (as JSON file)')\r\n parser.add_argument('-o', '--output-prefix', required=True,\r\n help='Output prefix')\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\ndef perf_gain(base, hypothesis):\r\n return 100 * (hypothesis - base) / base\r\n\r\n\r\ndef main():\r\n args = get_args()\r\n \r\n log_dir = os.path.abspath(args.log_dir)\r\n \r\n with open(args.labels, 'r') as fp:\r\n labels = json.load(fp)\r\n\r\n table = [['#, SchIC, CoMA, Performance Gain']]\r\n \r\n case = []\r\n schIC = []\r\n coma = []\r\n \r\n count = 1\r\n\r\n for label in labels:\r\n\r\n surtrac_summary = os.path.join(log_dir, label + '-uncoordinated.surtrac.summary.log')\r\n with open(surtrac_summary, 'r') as fp:\r\n for line in fp:\r\n if 'WaitingTime' in line:\r\n s = line.split(' ')\r\n surtrac_delay = float(s[2])\r\n schIC.append(surtrac_delay)\r\n\r\n heuristic_summary = os.path.join(log_dir, label + '-coordinated.heuristic.summary.log')\r\n with open(heuristic_summary, 'r') as fp:\r\n for line in fp:\r\n if 'WaitingTime' in line:\r\n s = line.split(' ')\r\n coma_delay = float(s[2])\r\n coma.append(coma_delay)\r\n gain = perf_gain(surtrac_delay, coma_delay)\r\n\r\n table.append([count, surtrac_delay, coma_delay, gain])\r\n \r\n case.append(count)\r\n count += 1\r\n\r\n with open('{}.{}.log'.format(args.output_prefix, time.strftime('%d.%m.%Y.%H.%M.%S')), 'w') as fp:\r\n print(tabulate.tabulate(table, headers='firstrow', tablefmt='grid'),\r\n file=fp)\r\n \r\n with open('{}.{}.json'.format(args.output_prefix, time.strftime('%d.%m.%Y.%H.%M.%S')), 'w') as fp:\r\n json.dump(table, fp, indent=4)\r\n\r\n plt.figure()\r\n lines = plt.plot(case, schIC, 'r--.', case, coma, 'b--.', alpha=0.7)\r\n plt.legend(lines, ['SchIC', 'CoMA'])\r\n plt.xlabel('Test Case')\r\n plt.ylabel('Average Delay')\r\n plt.title('SchIC vs CoMA')\r\n plt.savefig('{}.{}.png'.format(args.output_prefix, time.strftime('%d.%m.%Y.%H.%M.%S')))\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"utils/results/analyse_surtrac_results.py","file_name":"analyse_surtrac_results.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"4591448","text":"import requests\nimport json\nimport settings\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_callback_url():\n if settings.CALLBACK_IP_ADDRESS is not None:\n return \"http://{}/api/translation\".format(settings.CALLBACK_IP_ADDRESS)\n else:\n return settings.CALLBACK_URL\n\n\ndef send_request_to_unbabel(uid, source_text):\n \"\"\"\n Sends the translation to Unbabel Service to be processed\n\n :param uid: UID tp identify the message\n :param source_text: Text to be translated\n :return: Response from unbabel\n \"\"\"\n\n headers = {\n \"Authorization\": \"ApiKey {}:{}\".format(settings.UNBABEL_USERNAME, settings.UNBABEL_PASSWORD),\n \"Content-Type\": \"application/json\"\n }\n\n data = {\n \"text\": source_text,\n \"source_language\": settings.SOURCE_TRANSLATION_LANGUAGE,\n \"target_language\": settings.TARGET_TRANSLATION_LANGUAGE,\n \"callback_url\": get_callback_url(),\n \"uid\": uid\n }\n try:\n logger.info(\"Sending {} to {}\".format(str(data), settings.UNBABEL_TRANSLATION_URL))\n response = requests.post(settings.UNBABEL_TRANSLATION_URL, data=json.dumps(data), headers=headers)\n except Exception:\n response = None\n\n return response\n\n","sub_path":"app/core/unbabel.py","file_name":"unbabel.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"384523569","text":"import modules.Staff as Staff\nimport modules.WildlifeInfo as WildlifeInfo\nimport modules.WildlifeCare as WildlifeCare\nimport modules.SalesInformation as SalesInformation\nimport modules.Giftshop as Giftshop\nimport website.CustomerLogin as CustomerLogin\nimport website.ZooInformation as ZooInformation\nimport website.AlterZooInformation as AlterZooInformation\nimport data.Database as Database\n\nimport os\n\n#Created By: Marcus Taylor\n#Date: 03-16-21\n\n#Initial iteration of the ZIS by Marcus Taylor: implemented using command line interaction with the skeleton structure\n\n#Simple main method for the application start\ndef main():\n getUserInput(\"init\")\n\n#This method controls the state of the user interaction and changes depending on the entered information\ndef getUserInput(state):\n #Change state values to lower case and initialize clear to the os command\n state = state.lower()\n clear = lambda: os.system('cls')\n\n #Initializes the loop and starts the application\n if(state == \"init\"):\n clear()\n print(\"Welcome to the Zoo Information System\")\n userInput = \"\"\n looping = True\n while looping:\n userInput = input(\"Input staff or website to start using the service: \")\n userInput = userInput.lower()\n if(userInput == \"staff\" or userInput == \"website\"):\n looping = False\n\n getUserInput(userInput)\n\n elif(state == \"staff\"):\n portalInteraction(\"staff\", state, clear)\n \n elif(state == \"website\"):\n portalInteraction(\"website\", state, clear)\n \n#Object oriented implementation for code reusage.\n#This method is the program loop when a state is entered for either staff or website\ndef portalInteraction(name, state, clear):\n #Lists that will hold values for sending to the displayInfo method\n displayInfoValues = []\n displayInfoValues1 = []\n\n #loop control boolean, start of the main loop\n Outerloop = True\n while Outerloop:\n #Clean up the console while using the menu, works on windows\n clear()\n\n #Displays the module names\n displayInfoValues = [name, \"\"]\n displayInfo(displayInfoValues, state)\n\n #User input to select the module the user wants to use, Case Sensitive\n #Along with getting the user input for the module, the structure allows for the navigation of the menu when available\n userInput = input(\"Enter one of the above to use the service, or exit to return to the main menu, or close to exit completely \")\n if(str(userInput) == \"exit\"):\n getUserInput(\"init\")\n elif(str(userInput) == \"close\"):\n clear()\n break\n #Check if module is in the list, if it is, the next stage is entered\n displayInfoValues1 = [\"checkKey\", str(userInput)]\n if(displayInfo(displayInfoValues1, state)):\n #Loop control structure, by changing these values the menus can be traversed up and down\n Outerloop = False\n #Display the methods for the chosen module\n displayInfoValues = [name, str(userInput)]\n\n #The current module that is chosen is assigned to a key value for the dictionary\n key = str(userInput)\n #Loop control value\n innerLoop = True\n #The inner loop allows for the user to stay within the methods for the module without exiting out unless specified in the input\n while innerLoop:\n #Clean console and display methods for the active module\n clear()\n displayInfo(displayInfoValues, state)\n \n #Menu interaction\n userInput = input(\"Enter one of the above to use the service, or up to return to the menu above, or exit to return to the main menu, or close to exit completely \")\n if(str(userInput) == \"up\"):\n innerLoop = False\n Outerloop = True\n elif(str(userInput) == \"exit\"):\n getUserInput(\"init\")\n elif(str(userInput) == \"close\"):\n clear()\n break\n #Because the methods were turned into plain text versions of the methods so that they were clearer to the user this for loop takes any input and changes it to the correct case\n #By using enumerate(somelist) the index of the current value is also provided along with the current value within two variables\n tempString = \"\"\n for x, y in enumerate(userInput):\n if(x == 0):\n tempString += y.upper()\n else:\n tempString += y.lower()\n\n userInput = tempString\n #Check value for accuracy, if the value is accurate it is sent to the module method translation method where the method is called\n displayInfoValues1 = [\"checkValues\", key, str(userInput)]\n if(displayInfo(displayInfoValues1, state)):\n clear()\n methodInfoValues = [name, key, str(userInput)]\n getMethod(methodInfoValues)\n userInput = input(\"Enter next to do another task, up to go back one level, or exit to restart, or close to exit completely \")\n if(str(userInput) == \"next\"):\n pass\n elif(str(userInput) == \"up\"):\n innerLoop = False\n Outerloop = True\n elif(str(userInput) == \"exit\"):\n getUserInput(\"init\")\n elif(str(userInput) == \"close\"):\n clear()\n break\n\n# This method has lists/dictionaries that display their contents based on user input\ndef displayInfo(displayValue, portal):\n staffPortalNames = [\"Staff\", \"WildlifeInfo\", \"WildlifeCare\", \"SalesInformation\", \"GiftShop\"]\n staffMethodNames = {\"Staff\": [\"Look up staff\",\"Modify staff info\",\"Create staff\",\"Get schedule\",\"Create schedule\",\"Modify schedule\",\"Print schedule\",\"Get tasks\",\"Create tasks\",\"Modify tasks\",\"Print tasks\"],\n \"WildlifeInfo\": [\"Look up animal\",\"Modify wildlife info\",\"Create wildlife info\"],\n \"WildlifeCare\": [\"Get Animal info\",\"Care routine\",\"Create care routine\",\"Modify care routine\",\"Schedule\",\"Print\"],\n \"SalesInformation\": [\"Get ticket price\",\"Set ticket price\",\"Modify ticket price\",\"Get customer information\",\"Create customer information\",\"Modify customer information\",\"Create transaction\",\"Receipt printout\"],\n \"GiftShop\": [\"Display inventory\",\"Modify inventory\",\"Create inventory\",\"Get customer information\",\"Create customer information\",\"Modify customer information\",\"Create transaction\",\"Receipt printout\"]}\n\n websitePortalNames = [\"CustomerLogin\", \"ZooInformation\", \"AlterZooInformation\"]\n websiteMethodNames = {\"CustomerLogin\": [\"Authentication\", \"Get customer info\", \"Modify customer info\", \"Create customer info\"],\n \"ZooInformation\": [\"Current events\", \"Public message\", \"Operating hours\", \"Wildlife information\"],\n \"AlterZooInformation\": [\"Create current events\", \"Modify current events\", \"Create public message\", \"Modify public message\", \"Create operating hours\", \"Modify operating hours\"]}\n\n if(portal == \"staff\"):\n #Based on the current state of the software the output will be displayed for the module name, or the method names\n if(displayValue[0] == \"staff\" and displayValue[1] == \"\"):\n for x in staffPortalNames:\n print(x)\n elif(displayValue[0] == \"staff\" and displayValue[1] != \"\"):\n for x in staffMethodNames[displayValue[1]]:\n print(x)\n #Methods to check if the value is in the list or dictionary\n elif(displayValue[0] == \"checkKey\"):\n return displayValue[1] in staffPortalNames\n elif(displayValue[0] == \"checkValues\"):\n return displayValue[2] in staffMethodNames[displayValue[1]]\n\n elif(portal == \"website\"):\n #Based on the current state of the software the output will be displayed for the module name, or the method names\n if(displayValue[0] == \"website\" and displayValue[1] == \"\"):\n for x in websitePortalNames:\n print(x)\n elif(displayValue[0] == \"website\" and displayValue[1] != \"\"):\n for x in websiteMethodNames[displayValue[1]]:\n print(x)\n #Methods to check if the value is in the list or dictionary \n elif(displayValue[0] == \"checkKey\"):\n return displayValue[1] in websitePortalNames\n elif(displayValue[0] == \"checkValues\"):\n return displayValue[2] in websiteMethodNames[displayValue[1]]\n \n# This gets sent the information that the user wants to access for the methods of each module. It translates the plain text into the corresponding method\ndef getMethod(methodInfo):\n db = Database\n\n # dictionaries of the different modules to allow for object oriented implementation\n staffPortalModules = {\"Staff\": Staff.Staff(db), \n \"WildlifeInfo\": WildlifeInfo.WildlifeInfo(db),\n \"WildlifeCare\": WildlifeCare.WildlifeCare(db),\n \"SalesInformation\": SalesInformation.SalesInformation(db),\n \"GiftShop\": Giftshop.GiftShop(db)}\n webPortalModules = {\"CustomerLogin\": CustomerLogin.CustomerLogin(db),\n \"ZooInformation\": ZooInformation.ZooInformation(db),\n \"AlterZooInformation\": AlterZooInformation.AlterZooInformation(db)}\n\n #Values for methodInfo are [Portal, Key, Value]\n if(methodInfo[0] == \"staff\"):\n if(methodInfo[1] == \"Staff\"):\n if(methodInfo[2] == \"Look up staff\"):\n staffPortalModules[methodInfo[1]].lookUpStaff()\n elif(methodInfo[2] == \"Modify staff info\"):\n staffPortalModules[methodInfo[1]].modifyStaffInfo()\n elif(methodInfo[2] == \"Create staff\"):\n staffPortalModules[methodInfo[1]].createStaff()\n elif(methodInfo[2] == \"Get schedule\"):\n staffPortalModules[methodInfo[1]].getSchedule()\n elif(methodInfo[2] == \"Create schedule\"):\n staffPortalModules[methodInfo[1]].createSchedule()\n elif(methodInfo[2] == \"Modify schedule\"):\n staffPortalModules[methodInfo[1]].modifySchedule()\n elif(methodInfo[2] == \"Print schedule\"):\n staffPortalModules[methodInfo[1]].printSchedule()\n elif(methodInfo[2] == \"Get tasks\"):\n staffPortalModules[methodInfo[1]].getTasks()\n elif(methodInfo[2] == \"Create tasks\"):\n staffPortalModules[methodInfo[1]].createTasks()\n elif(methodInfo[2] == \"Modify tasks\"):\n staffPortalModules[methodInfo[1]].modifyTasks()\n elif(methodInfo[2] == \"Print tasks\"):\n staffPortalModules[methodInfo[1]].printTasks() \n elif(methodInfo[1] == \"WildlifeInfo\"):\n if(methodInfo[2] == \"Look up animal\"):\n staffPortalModules[methodInfo[1]].lookUp()\n elif(methodInfo[2] == \"Modify wildlife info\"):\n staffPortalModules[methodInfo[1]].modifyWildlifeInfo()\n elif(methodInfo[2] == \"Create wildlife info\"):\n staffPortalModules[methodInfo[1]].createWildlifeInfo()\n elif(methodInfo[1] == \"WildlifeCare\"):\n if(methodInfo[2] == \"Get Animal info\"):\n staffPortalModules[methodInfo[1]].animalInfo()\n elif(methodInfo[2] == \"Care routine\"):\n staffPortalModules[methodInfo[1]].careRoutine()\n elif(methodInfo[2] == \"Create care routine\"):\n staffPortalModules[methodInfo[1]].createCareRoutine()\n elif(methodInfo[2] == \"Modify care routine\"):\n staffPortalModules[methodInfo[1]].modifyCareRoutine()\n elif(methodInfo[2] == \"Schedule\"):\n staffPortalModules[methodInfo[1]].schedule()\n elif(methodInfo[2] == \"Print\"):\n staffPortalModules[methodInfo[1]].print()\n elif(methodInfo[1] == \"SalesInformation\"):\n if(methodInfo[2] == \"Get ticket price\"):\n staffPortalModules[methodInfo[1]].getTicketPrice()\n elif(methodInfo[2] == \"Set ticket price\"):\n staffPortalModules[methodInfo[1]].setTicketPrice()\n elif(methodInfo[2] == \"Modify ticket price\"):\n staffPortalModules[methodInfo[1]].modifyTicketPrice()\n elif(methodInfo[2] == \"Get customer information\"):\n staffPortalModules[methodInfo[1]].getCustomerInformation()\n elif(methodInfo[2] == \"Create customer information\"):\n staffPortalModules[methodInfo[1]].createCustomerInformation()\n elif(methodInfo[2] == \"Modify customer information\"):\n staffPortalModules[methodInfo[1]].modifyCustomerInformation()\n elif(methodInfo[2] == \"Create transaction\"):\n staffPortalModules[methodInfo[1]].createTransaction()\n elif(methodInfo[2] == \"Receipt printout\"):\n staffPortalModules[methodInfo[1]].receiptPrintout()\n elif(methodInfo[1] == \"GiftShop\"):\n if(methodInfo[2] == \"Display inventory\"):\n staffPortalModules[methodInfo[1]].displayInventory()\n elif(methodInfo[2] == \"Modify inventory\"):\n staffPortalModules[methodInfo[1]].modifyInventory()\n elif(methodInfo[2] == \"Create inventory\"):\n staffPortalModules[methodInfo[1]].createInventory()\n elif(methodInfo[2] == \"Get customer information\"):\n staffPortalModules[methodInfo[1]].getCustomerInformation()\n elif(methodInfo[2] == \"Create customer information\"):\n staffPortalModules[methodInfo[1]].createCustomerInformation()\n elif(methodInfo[2] == \"Modify customer information\"):\n staffPortalModules[methodInfo[1]].modifyCustomerInformation()\n elif(methodInfo[2] == \"Create transaction\"):\n staffPortalModules[methodInfo[1]].createTransaction()\n elif(methodInfo[2] == \"Receipt printout\"):\n staffPortalModules[methodInfo[1]].receiptPrintout() \n elif(methodInfo[0] == \"website\"):\n if(methodInfo[1] == \"CustomerLogin\"):\n if(methodInfo[2] == \"Authentication\"):\n webPortalModules[methodInfo[1]].authentication()\n elif(methodInfo[2] == \"Get customer info\"):\n webPortalModules[methodInfo[1]].getCustomerInfo()\n elif(methodInfo[2] == \"Modify customer info\"):\n webPortalModules[methodInfo[1]].modifyCustomerInfo()\n elif(methodInfo[2] == \"Create customer info\"):\n webPortalModules[methodInfo[1]].createCustomerInfo()\n elif(methodInfo[1] == \"ZooInformation\"):\n if(methodInfo[2] == \"Current events\"):\n webPortalModules[methodInfo[1]].currentEvents()\n elif(methodInfo[2] == \"Public message\"):\n webPortalModules[methodInfo[1]].publicMessage()\n elif(methodInfo[2] == \"Operating hours\"):\n webPortalModules[methodInfo[1]].operatingHours()\n elif(methodInfo[2] == \"Wildlife information\"):\n webPortalModules[methodInfo[1]].wildlifeInformation()\n elif(methodInfo[1] == \"AlterZooInformation\"):\n if(methodInfo[2] == \"Create current events\"):\n webPortalModules[methodInfo[1]].createCurrentEvents()\n elif(methodInfo[2] == \"Modify current events\"):\n webPortalModules[methodInfo[1]].modifyCurrentEvents()\n elif(methodInfo[2] == \"Create public message\"):\n webPortalModules[methodInfo[1]].createPublicMessage()\n elif(methodInfo[2] == \"Modify public message\"):\n webPortalModules[methodInfo[1]].modifyPublicMessage()\n elif(methodInfo[2] == \"Create operating hours\"):\n webPortalModules[methodInfo[1]].createOperatingHours()\n elif(methodInfo[2] == \"Modify operating hours\"):\n webPortalModules[methodInfo[1]].modifyOperatingHours()\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"ZIS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"213030339","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 2 20:30:03 2020\n\n@author: sukarno\n\"\"\"\n\nimport sys\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QApplication, QDialog, QPushButton, QLineEdit, QHBoxLayout\n\nclass Form(QDialog):\n def __init__ (self, parent=None):\n super(Form,self).__init__(parent)\n self.setWindowTitle(\"My Form\")\n self.initUI()\n \n def initUI(self):\n self.btn = QPushButton(\"Send\")\n self.btn.clicked.connect(self.click_handle)\n \n self.edit = QLineEdit(\"Wirite text here...\")\n \n hlayout = QHBoxLayout()\n hlayout.addWidget(self.edit)\n hlayout.addWidget(self.btn)\n \n \n self.setLayout(hlayout)\n \n @pyqtSlot()\n def click_handle(self):\n print(self.edit.text())\n \n \nif __name__ == '__main__':\n app = QApplication(sys.argv)\n form = Form()\n form.show()\n sys.exit(app.exec_())\n \n ","sub_path":"tutorial03/tutorial03_pyqt5.py","file_name":"tutorial03_pyqt5.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"413404956","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 2 21:13:44 2018\n\n@author: Administrator\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 神经层的函数\ndef add_layer(inputs,in_size,out_size,activation_function=None):\n with tf.name_scope('layer'):\n with tf.name_scope('weightes'):\n Weights=tf.Variable(tf.random_normal([in_size,out_size])) \n with tf.name_scope('biases'):\n biases=tf.Variable(tf.zeros([1,out_size])+0.1)\n with tf.name_scope('Wx_plus_b'):\n Wx_plus_b=tf.matmul(inputs,Weights)+biases\n if activation_function is None:\n outputs=Wx_plus_b\n else:\n outputs=activation_function(Wx_plus_b)\n return outputs\n\n# 导入数据\nx_data=np.linspace(-1,1,300,dtype=np.float32)[:,np.newaxis]\nnoise=np.random.normal(0,0.05,x_data.shape).astype(np.float32)\ny_data=np.square(x_data)-0.5+noise\n\nwith tf.name_scope('inputs'):\n xs=tf.placeholder(tf.float32,[None,1],name='x_input')\n ys=tf.placeholder(tf.float32,[None,1],name='y_input')\n\n# 搭建网络\nl1=add_layer(xs,1,10,activation_function=tf.nn.relu)\npredication=add_layer(l1,10,1,activation_function=None)\nwith tf.name_scope('loss'):\n loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-predication),\n reduction_indices=[1]))\n\n#train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)\nwith tf.name_scope('train_step'):\n train_step=tf.train.AdamOptimizer(0.1).minimize(loss)\n\n# 初始化变量\ninit=tf.global_variables_initializer()\nsess=tf.Session()\nwriter=tf.summary.FileWriter(\"logs/\",sess.graph)\nsess.run(init)\n\n# 用散点图描述真实数据\nfig=plt.figure()\nax=fig.add_subplot(1,1,1)\nax.scatter(x_data,y_data)\nplt.ion() #用于连续显示\n#plt.show()\n\n# 训练\nfor i in range(1000):\n sess.run(train_step,feed_dict={xs:x_data,ys:y_data})\n if i % 50 ==0:\n try:\n ax.lines.remove(lines[0])\n except Exception:\n pass\n predication_value=sess.run(predication,feed_dict={xs:x_data})\n lines=ax.plot(x_data,predication_value,'r-',lw=5)\n plt.pause(0.1)\n# print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))\n ","sub_path":"Archive/cwz/tensorboard/tensorboard_example.py","file_name":"tensorboard_example.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"450964043","text":"# coding=utf-8\n#********************************************************\n# > OS : Windows / Cent OS 6.5\n#\t> Author : JasonGUTU\n#\t> Mail : intergujinjin@foxmail.com\n#\t> Time : 2015/12/30\n#********************************************************\n\n# Print the calendar for a month in a year\ndef printMonth(year,month):\n\t# Print the Title of month\n\tprintMonthTitle(year,month)\n\t# Print the body of this month\n\tprintMonthBody(year,month)\n\n# Peint the Title :e.g., May 1999\ndef printMonthTitle(year,month):\n\tprint(\" \",getMonthName(month),\" \",year)\n\tprint(\"====================================\")\n\tprint(\" Sun Mon Tue Wed Thu Fri Sat\")\n\tprint(\"====================================\")\n\n# Print month body\ndef printMonthBody(year,month):\n\t# Get start day of the week for the date in the month\n\tstartDay = getStartDay(year,month)\n\t# Get number of days in month\n\tnumberOfDaysInMonth = getNumberOfDaysInMonth(year,month)\n\t# Pad space before the first day of the month\n\tfor i in range(0,startDay):\n\t\tprint(\" \",end = \" \")\n\tfor i in range(1,numberOfDaysInMonth + 1):\n\t\tprint(format(i,\"4d\"),end = \" \")\n\t\tif (i + startDay) % 7 == 0:\n\t\t\tprint()# Jump to the new line\n\n# Get the name of month\ndef getMonthName(month):\n\tif month == 1:\n\t\tmonthName = \"January\"\n\telif month == 2:\n\t\tmonthName = \"February\"\n\telif month == 3:\n\t\tmonthName = \"March\"\n\telif month == 4:\n\t\tmonthName = \"April\"\n\telif month == 5:\n\t\tmonthName = \"May\"\n\telif month == 6:\n\t\tmonthName = \"June\"\n\telif month == 7:\n\t\tmonthName = \"July\"\n\telif month == 8:\n\t\tmonthName = \"August\"\n\telif month == 9:\n\t\tmonthName = \"September\"\n\telif month == 10:\n\t\tmonthName = \"October\"\n\telif month == 11:\n\t\tmonthName = \"November\"\n\telif month == 12:\n\t\tmonthName = \"December\"\n\treturn monthName\n\n# Get start day of month/1/year\ndef getStartDay(year,month):\n\tSTART_DAY_FOR_JAN_1_1800 = 3\n\t# Get total number of days from 1/1/1800 to month/1/year\n\ttotalNumberOfDays = getTotalNumberOfDays(year,month)\n\treturn (totalNumberOfDays + START_DAY_FOR_JAN_1_1800) % 7\n\n# Get the totla number of days since 1/1/1800\ndef getTotalNumberOfDays(year,month):\n\ttotal = 0\n\n\t# Get the total days from 1/1/1800 to 1/1/year\n\tfor i in range(1800,year):\n\t\tif isLeapYear(i):\n\t\t\ttotal = total + 366\n\t\telse:\n\t\t\ttotal = total + 365\n\t# Add days from Jan to the month prior to the calendar month\n\tfor i in range(1,month):\n\t\ttotal = total + getNumberOfDaysInMonth(year,i)\n\n\treturn total\n# Get the number of day in month\ndef getNumberOfDaysInMonth(year,month):\n\tif (month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12):\n\t\treturn 31\n\tif (month == 4 or month == 6 or month == 9 or month == 11):\n\t\treturn 30\n\tif month == 2:\n\t\tif isLeapYear(year):\n\t\t\treturn 29\n\t\telse:\n\t\t\treturn 28\n\n# Determine the year\ndef isLeapYear(year):\n\treturn year % 400 == 0 or (year % 4 == 0 and year % 100 != 0)\n\ndef main():\n\t# Get input\n\tyear = eval(input(\"Enter the year :\"))\n\tmonth = eval(input(\"Enter the month :\"))\n\tprintMonth(year,month)\n# Call the main\nmain()\n","sub_path":"outputMonth.py","file_name":"outputMonth.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"282265624","text":"#! /home/a_filipchyk/soft/home/a_filipchyk/anaconda3/bin/python\n'''Finds and explores differentially expressed genes'''\n\nimport argparse\nimport os\nimport sys\nfrom collections import defaultdict\nfrom itertools import combinations\nfrom multiprocessing.dummy import Pool\n\nimport numpy as np;\nimport matplotlib.pyplot as plt;\nfrom scipy.stats import fisher_exact, variation, pearsonr\n\nfrom afbio.LRGFDR import lrg\n\n#from pybedtools import BedTool\n\nparser = argparse.ArgumentParser(description='Finds and explores differentially expressed genes');\nparser.add_argument('path', metavar = 'N', nargs = '?', type = str, help = \"Path to the gene expression file, tsv format (compile_expression.py output)\");\nparser.add_argument('--minexpr', nargs = '?', default=0.001, type = float, help = \"Minimum required expression for the differential analysis\");\nparser.add_argument('--fdr', nargs = '?', default=0.05, type = float, help = \"Threshold for the local empirical false discovery rate\");\nparser.add_argument('--plot', nargs = '?', required=True, type = str, help = \"Output directory for the plots\");\nparser.add_argument('--labelnames', nargs = '?' , type = str, help = \"Path to the file with gene names to be labeled on loglog plot\");\nargs = parser.parse_args();\n\n\n###########################################################################################################################################################\n###PARSE INPUT FILES\n\nbasename = os.path.basename(args.path).split(\".\")[0]\n\ngenenames = [];\nexpressions = [];\nmisfits = []\n\nwith open(args.path) as f:\n header = next(f).strip().split(\"\\t\")\n ratiostr = \"%s/%s\" % (header[2], header[1])\n for l in f:\n a = l.strip().split(\"\\t\")\n s1 = [float(x) for x in a[1].split(\";\")]\n s2 = [float(x) for x in a[2].split(\";\")]\n if(all([x>args.minexpr for x in s1+s2])):\n genenames.append(a[0]);\n expressions.append((s1, s2));\n else:\n misfits.append((a[0], s1, s2))\n\nlabelnames = [];\nif(args.labelnames):\n with open(args.labelnames) as f:\n for l in f:\n labelnames.append(l.strip());\n \n\n\n###########################################################################################################################################################\n###Assign and explore intra- and inter-sample VARIATIONS\n\ndef get_variation(expression):\n return variation(expression[0]), variation(expression[1]), variation(expression[0] + expression[1])\n\nvariations = np.array([get_variation(x) for x in expressions])\nvariations = variations.transpose()\n \n#Plot variations\nplt.style.use('seaborn-deep')\nbins = np.linspace(0,1,51)\n\nplt.hist([variations[0], variations[2]], bins, label=['intra', 'inter'])\nplt.legend(loc='upper right')\nplt.savefig(os.path.join(args.plot, '%s.variations_1.png' % basename), format = 'png')\nplt.clf()\n\nplt.hist([variations[1], variations[2]], bins, label=['intra', 'inter'])\nplt.legend(loc='upper right')\nplt.savefig(os.path.join(args.plot, '%s.variations_2.png' % basename), format = 'png')\nplt.clf()\n\n###########################################################################################################################################################\n### Assign and explore intra- and inter-sample FOLD CHANGES\n\ndef localfold(a1, a2):\n return (a2-a1)/(a2+a1)\n\ndef get_fold(expression):\n return localfold(*expression[0]), localfold(*expression[1]), localfold(sum(expression[0]), sum(expression[1]))\n\ndef norm_fold_2_log_fold(fold):\n return np.log2( (fold+1)/(1-fold) )\n\nfolds = np.array([get_fold(x) for x in expressions])\nfolds = folds.transpose()\n \n#Plot folds\nplt.style.use('seaborn-deep')\nbins = np.linspace(-1,1,51)\n\nplt.hist([folds[0], folds[2]], bins, label=['intra', 'inter'])\nplt.legend(loc='upper right')\nplt.savefig(os.path.join(args.plot, '%s.folds_1.png' % basename), format = 'png')\nplt.clf()\n\nplt.hist([folds[1], folds[2]], bins, label=['intra', 'inter'])\nplt.legend(loc='upper right')\nplt.savefig(os.path.join(args.plot, '%s.folds_2.png' % basename), format = 'png')\nplt.clf()\n\n\n###########################################################################################################################################################\n#GET COMPOSITE THRESHOLDS based on fold change and expression\n\n\nnoise_fold = [max( abs(x[0]), abs(x[1]) ) for x in zip(folds[0], folds[1])]\nsignal_fold = [abs(x) for x in folds[2]]\nlogfolds = [norm_fold_2_log_fold(x) for x in folds[2]]\n\ncommon_expr = [np.log2( (max(x[0]) + max(x[1]))/2.0 + 1) for x in expressions]\nnoise_total = list(zip(noise_fold, common_expr))\nsignal_total = list(zip(signal_fold, common_expr))\n\ndef get_fold_cutoff(signal, noise, fdr):\n threshold = 1.01;\n for t in np.linspace(0,1,1001):\n fs = len([x for x in signal if x > t])\n fn = len([x for x in noise if x > t])\n if(not fs):\n return threshold, fs, fn\n p = fn/(fs+fn);\n if(pl1 and x[1]<=l2]\n noise = [x[0] for x in noise_total if x[1]>l1 and x[1]<=l2]\n threshold, ps, pn = get_fold_cutoff(signal, noise, args.fdr)\n sys.stderr.write(\"%.1f\\t%.1f\\t%.2f\\t%d\\t%d\\t%d\\t%d\\n\" % (l1, l2, threshold, len(signal), ps, len(noise), pn) )\n thresholds.append((l1, l2, threshold))\n \nlboundary = levels[0];\nfor l1, l2, t in thresholds:\n if(t<1):\n lboundary = l1;\n break;\n \n#print(lboundary)\n#sys.exit()\n\n\n\n#upper_signal = [x[0] for x in signal_total if x[1]>lboundary]\n#upper_noise = [x[0] for x in noise_total if x[1]>lboundary]\n#basal_level = get_fold_cutoff(upper_signal, upper_noise, args.fdr)[0]\n\ndef trline(expr, lboundary, factor, basal_level):\n if(expr>lboundary):\n return 2**(-factor*expr)+basal_level\n else:\n return 1;\n\ndef estimate_fdr_for_trline(signal, noise, factor, basal_level):\n fs = len([x for x in signal if x[0] > trline(x[1], lboundary, factor, basal_level)])\n fn = len([x for x in noise if x[0] > trline(x[1], lboundary, factor, basal_level) ])\n if(fn):\n return fs, fn, fn/(fs+fn)\n else:\n return fs, 0, 0;\n \n \n\nbasal_level = list(sorted([x[2] for x in thresholds]))[0]\nroc = [];\nfor factor in np.linspace(0, 2, 41):\n fs, fn, fdr= estimate_fdr_for_trline(signal_total, noise_total, factor, basal_level)\n #print(\"%.2f\\t%d\\t%d\\t%.3f\" % (factor, fs, fn, fdr))\n roc.append((fs/len(signal_total), fdr, factor))\n\n#rough strategy so far\nsensitivity, empirical_fdr, bestfactor = max([x for x in roc if x[1]=l1 and x[1]=l1 and x[1]tr)\n\nscatter = []\ntextlabels = [];\n\nprint(\"\\t\".join(header + [\"log2(%s)\" % ratiostr, \"normed_fold\", \"diff\"]))\nfor name, expression, logfold, normed_fold, common_expr in zip(genenames, expressions, logfolds, folds[2], common_expr):\n e1 = \";\".join([str(x) for x in expression[0]])\n e2 = \";\".join([str(x) for x in expression[1]])\n diff = check_diff(abs(normed_fold), common_expr)\n print(\"%s\\t%s\\t%s\\t%1.3f\\t%1.3f\\t%d\" % (name, e1, e2, logfold, normed_fold, diff))\n scatter.append((common_expr, logfold, diff))\n \nfor name, wt_values, ko_values in misfits:\n e1 = \";\".join([str(x) for x in wt_values])\n e2 = \";\".join([str(x) for x in ko_values])\n wt = np.mean(wt_values)\n ko = np.mean(ko_values)\n if(wt+ko):\n localfold = (ko-wt)/(wt+ko)\n else:\n localfold = 0;\n if(wt and ko):\n logfold = np.log2(ko/wt)\n elif(ko):\n logfold = 999999\n elif(wt):\n logfold = -999999\n else:\n logfold = 0;\n print(\"%s\\t%s\\t%s\\t%1.3f\\t%1.3f\\t%d\" % (name, e1, e2, logfold, localfold, 0))\n \n \n \n\n if(name in labelnames):\n textlabels.append((name, common_expr, logfold))\n\n\n\n\n\n\n\n \n###generate loglog plot\nscatter1 = np.array([(x[0], x[1]) for x in scatter if not x[-1]]);\nscatter2 = np.array([(x[0], x[1]) for x in scatter if x[-1]]);\nsys.stderr.write(\"\\n%d genes are differentially expressed\\n\" % scatter2.shape[0])\n\nfig, ax = plt.subplots(figsize=(16, 9))\nplt.plot(scatter1[:,0], scatter1[:,1], marker = 'o', markerfacecolor='lightblue', markeredgecolor='lightblue', linestyle = \"None\", markersize = 4, label = 'non-differential');\nplt.plot(scatter2[:,0], scatter2[:,1], marker = 'o', markerfacecolor='coral', markeredgecolor='coral', linestyle = \"None\", markersize = 4, label = \"differential\");\nplt.ylabel(\"log2(%s)\" % ratiostr);\nplt.xlabel(\"log2(TPM)\")\nplt.legend(frameon=False, fontsize='xx-large')\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nplt.title(\"Expression vs Fold Change\")\n\nfor text, x, y in textlabels:\n ax.text(x, y, text);\n\nfor item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize('xx-large')\n\n\nplt.savefig(os.path.join(args.plot, '%s.scatter.eps' % basename), format = 'eps')\nplt.clf()\n\n\n###Generate scatter expression plot\nyvals = [np.log2(sum(x[0])+1) for x in expressions]\nxvals = [np.log2(sum(x[1])+1) for x in expressions]\nfig, ax = plt.subplots(figsize=(16, 9))\nplt.plot(xvals, yvals, marker = 'o', markerfacecolor='lightblue', markeredgecolor='lightblue', linestyle = \"None\", markersize = 4);\nplt.xlabel(\"log2(%s)\" % header[1]);\nplt.ylabel(\"log2(%s)\" % header[2]);\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nplt.title(\"%s vs %s\" % tuple(header[1:3]))\npcorr_log = pearsonr(xvals, yvals)[0]\npcorr = pearsonr([sum(x[0]) for x in expressions], [sum(x[1]) for x in expressions])[0]\nsys.stderr.write(\"\\nLog2 correlation coefficent between %s and %s is equal %.3f\\n\" % (header[1], header[2], pcorr_log))\nsys.stderr.write(\"\\nCorrelation coefficent between %s and %s is equal %.3f\\n\" % (header[1], header[2], pcorr))\nplt.text(0.1, 0.8, 'r(log)=%.2f\\nr=%.2f' % (pcorr_log, pcorr), transform=ax.transAxes, fontsize = 'xx-large')\nfor item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize('xx-large')\nplt.savefig(os.path.join(args.plot, '%s.loglog.eps' % basename), format = 'eps')\nplt.clf()\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"rnaseq/analyse_differential_gene_expression.py","file_name":"analyse_differential_gene_expression.py","file_ext":"py","file_size_in_byte":11293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"25221639","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 1 00:21:14 2019\r\n\r\n@author: hfyjc\r\n\"\"\"\r\n\r\ndef windowsum(l,k):\r\n ans=[]\r\n for i in range(len(l)-k+1):\r\n print(i)\r\n ans.append(l[i]+l[i+1]+l[i+2])\r\n \r\n return(ans)\r\n\r\nprint(windowsum([0,1,2,3,4,5,6,7,8,9],3))\r\n ","sub_path":"Egbert/Algorithm/Algorithm/out/production/untitled104/AmazonOA/amazonoa2/windowsum.py","file_name":"windowsum.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"528682151","text":"import json\nimport requests\nfrom amonpy.exceptions import ConnectionException\nfrom amonpy.config import config\nimport datetime\n\nclass AmonAPI(object):\n\n headers = {\"Content-type\": \"application/json\"}\n\n errors = {'connection': 'Could not establish connection to the Amon API.\\\n\t\t\tPlease ensure that the web application is running'}\n\n def jsonify(self, data):\n return json.dumps(data)\n\n def _post(self, url, data, headers=None):\n\n headers = headers if headers else self.headers\n \n # Log the data to file \n if config.file:\n now = datetime.datetime.now()\n string = 'date: \"{0}\", data: {1}\\n'.format(str(now)[:19], str(data))\n \n try:\n with open(config.file, 'a+') as f:\n f.write(string)\n except:\n pass\n\n # Append the application key if present\n if config.application_key:\n url = \"{0}?key={1}\".format(url, config.application_key)\n \n # Don't post the data if offline is true\n if not config.offline:\n r = requests.post(url, data, headers=headers, timeout=5)\n\n if r.status_code != 200:\n raise ConnectionException(self.errors['connection'])\n else:\n return 'ok'\n\n\nclass Log(AmonAPI):\n\n def __call__(self, message, tags='notset'):\n url = config.connection_url() + '/api/log'\n\n log_data = {}\n log_data['message'] = message\n log_data['tags'] = tags\n\n data = self.jsonify(log_data)\n\n return self._post(url, data)\n\n# Shortcuts\n# import amonpy\n# amonpy.log(message, level='')\nlog = Log()\n\nclass Exception(AmonAPI):\n\n def __call__(self, data):\n data = self.jsonify(data)\n url = config.connection_url() + '/api/exception'\n\n return self._post(url, data)\n\n# Shortcut\n# import amonpy\n# amonpy.exception()\nexception = Exception()\n","sub_path":"amonpy/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"408880626","text":"from django.conf.urls.defaults import patterns, url, include\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n\n (r'^log/', include('requestlog.urls')),\n (r'^admin/', include(admin.site.urls)),\n\n # Pass anything that doesn't match on to the mrs app\n url(r'^',\n include('moca.mrs.urls')),\n\n)\n\nfrom django.conf import settings\nif settings.DEBUG:\n urlpatterns += patterns(\n '',\n (r'^static/(?P.*)$',\n 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT}),\n )\n","sub_path":"moca/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"219955365","text":"from selenium import webdriver\n\n## 如果是chrome浏览器的驱动\n#driver = webdriver.Chrome(\"G:\\Anaconda3-5.3.0\\chromedriver.exe\")\n\n##如果是firefox浏览器的驱动\n#driver = webdriver.Firefox(executable_path=\"G:\\Anaconda3-5.3.0\\geckodriver.exe\")\n\n######如果浏览器驱动的目录加入了环境变量的话\n\n## 如果是chrome浏览器的驱动\ndriver = webdriver.Chrome()\ndriver.get(url='https://www.baidu.com')\ndriver.find_element_by_xpath(\"//select[@id='9560af43bfc949c4826d329c352e4eb6_class']\").select_by_index(4) # 定位公共互联网环境\n\n##如果是firefox浏览器的驱动\n#driver = webdriver.Firefox()\n\ndriver.find_elemant_by_xpath(\"//html/body/form/input[1]\")\n\ndriver.find_element_by_xpath(\"//form/input\")\n\n# 用相对路径和属性进行定位,form标签下的input标签的name值等于username的标签\ndriver.find_element_by_xpath(\"//form/input[@name='username']\")\n\ndriver.find_element_by_xpath(\"//a[contains(@href,'login')]\")\n\n# 切换到指定的iframe框架\ndriver.switch_to.frame(\"mainFrame\") # 切换iframe框架\ndriver.switch_to.default_content() # 切换到主框架\n","sub_path":"test_selenium.py","file_name":"test_selenium.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"78745897","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nfrom filehandler import response\nfrom extraFunctions import initials\nimport os\ndir_path,express_dict,mimeTypes,host,port,wildcard,project_dir,encryption,logging = initials()\n#HTTPReuestHandler class\n\n\nclass testHTTPServer_RequestHandler(BaseHTTPRequestHandler):\t\t\n\t#GET\n\tdef do_GET(self):\n\t\tprint(self.path)\n\t\tqueryString = self.path.split(\"?\")\n\t\tprint(queryString)\n\t\texpress = express_dict.get(queryString[0],False)\n\t\tif express:\n\t\t\trequested_file = dir_path+project_dir+express\n\t\telse:\n\t\t\trequested_file = dir_path+project_dir+queryString[0]\n\t\t\n\t\t\t\n\t\tif len(queryString)>1:\n\t\t\tdata = str(queryString[1])\n\t\telse:data = \"\"\n\t\tprint(\"Get::requested file is \",requested_file)\n\t\tcontent,mimeType,errorCode = response(requested_file,data,mimeTypes)\n\t\tself.send_response(errorCode)\n\t\tself.send_header('content-type',mimeType)\n\t\tself.end_headers()\n\t\ttry:\n\t\t\tself.wfile.write(content)\n\t\texcept TypeError:\n\t\t\tself.wfile.write(bytes(content,'utf-8'))\n\t\treturn\n\t\n\tdef do_POST(self):\n\t\tprint(self.path)\n\t\texpress = express_dict.get(self.path,False)\n\t\tif express:\n\t\t\trequested_file = dir_path+project_dir+express\n\t\telse:\n\t\t\trequested_file = dir_path+project_dir+self.path\n\t\tprint(\"Post::requested file is \",requested_file)\n\t\t#content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n\t\tdata = str(self.rfile.read(int(self.headers['Content-Length'])),'utf-8')# <--- Gets the data itself\n\t\tprint(\"posted data was::: \",data)\n\t\tcontent,mimeType,errorCode = response(requested_file,data,mimeTypes)\n\t\tself.send_response(errorCode)\n\t\tself.send_header('content-type',mimeType)\n\t\tself.end_headers()\n\t\ttry:\n\t\t\tself.wfile.write(content)\n\t\texcept TypeError:\n\t\t\tself.wfile.write(bytes(content,'utf-8'))\n\t\treturn\t\t\n\t\ndef run():\n\tprint(\"Starting server ...\")\n\tserver_address = (host,port)\n\thttpd = HTTPServer(server_address,testHTTPServer_RequestHandler)\n\tprint(\"running server at \",server_address)\n\thttpd.serve_forever()\n\t\n\t\nrun()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"173026886","text":"import numpy as np\nimport cv2 as cv\n\nfrom sklearn.cluster import KMeans\nimport random\nimport utils\n\n\n# referenced impl, fast\ndef generating_patches_rep(image, step, patch):\n sift = cv.SIFT_create()\n _, ret = sift.detectAndCompute(image, None)\n return ret\n\n# naive impl, maybe wrong and slow\ndef generating_patches_rep_hand_coded(image, step, patch):\n # first use gaussian to smooth the image\n image = cv.GaussianBlur(image, (5,5), 0)\n # Then calculate the norm and direction of gradients\n sobelx = cv.Sobel(image, cv.CV_64F, 1, 0, ksize=5)\n sobelx = np.mean(sobelx, axis = 2)\n sobely = cv.Sobel(image, cv.CV_64F, 0, 1, ksize=5)\n sobely = np.mean(sobely, axis = 2)\n gradient_norm = np.sqrt(sobelx**2 + sobely**2)\n gradient_dir = np.round((np.arctan2(sobelx, sobely) + np.pi) * 180/ np.pi /45)\\\n .astype(np.int32)\n # some may be round up to 8, but it is 0 actually\n gradient_dir[gradient_dir == 8] = 0\n \n patches = []\n sub_size = int(patch/4)\n x, y, _ = image.shape\n i, j = 0, 0\n # calculate the subpatches\n subpatches = np.ones(np.floor(np.array([x/sub_size, y/sub_size, 8])).astype(np.int32))\n a, b, _ = subpatches.shape\n for i in range(a):\n for j in range(b):\n # extract the dirs and norms of gradients in this local region\n # flatten it\n subgradient_dir = gradient_dir[i*sub_size:(i+1)*sub_size,\n j*sub_size:(j+1)*sub_size]\n subgradient_dir = subgradient_dir.reshape(-1)\n subgradient_norm = gradient_norm[i*sub_size:(i+1)*sub_size,\n j*sub_size:(j+1)*sub_size]\n subgradient_norm = subgradient_norm.reshape(-1)\n # since some are added multiple times, use np.add.at for correct results\n np.add.at(subpatches[i,j], subgradient_dir, subgradient_norm)\n # normalize\n subpatches[i,j,:] /= np.sum(subpatches[i,j,:])\n xx = int(np.floor((x - patch)/step + 1))\n yy = int(np.floor((y - patch)/step + 1))\n for i in range(xx):\n for j in range(yy):\n patch_vec = np.zeros(128)\n for k in range(4):\n for l in range(4):\n patch_vec[8*(4*k+l):8*(4*k+l+1)] = subpatches[2*i+k,2*j+l,:]\n patches.append(patch_vec)\n return patches\n\ndef generate_kmeans_model(data, dim, patch_size, step_size, \n number_of_samples, verbose = False):\n \n k = 0\n all_patches = []\n for i in range(0, len(data)):\n patches = generating_patches_rep(data[i], patch_size, \n step_size)\n try:\n k += patches.shape[0]\n except AttributeError as e:\n continue\n \n all_patches.append(patches)\n \n # make the patches an ndarray\n all_patches_arr = np.zeros([k, 128])\n k = 0\n for i in all_patches:\n l = i.shape[0]\n all_patches_arr[k:k+l, :] = i\n k += l\n \n # random sample the indices\n all_patches = np.random.choice(all_patches_arr.shape[0], number_of_samples, \n replace = False)\n \n all_patches_arr = all_patches_arr[all_patches]\n \n kmeans = KMeans(n_clusters=dim, random_state=0,\n verbose = verbose * 2).fit(all_patches_arr)\n return kmeans\n\n# feature function uninitialized\ndef feature_function_model_unfeeded(image, dim, step, batch, kmeans_model):\n a = generating_patches_rep(image, step, batch)\n feature_vec = np.zeros(dim)\n a = np.array(a).astype(np.float)\n kmeans_model.predict(a)\n np.add.at(feature_vec, kmeans_model.predict(a), 1)\n if np.sum(feature_vec) == 0:\n return feature_vec\n feature_vec /= np.sum(feature_vec)\n return feature_vec ","sub_path":"assignments/assignment2/.ipynb_checkpoints/BoW-checkpoint.py","file_name":"BoW-checkpoint.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"423336047","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/Dani/Documents/Projects/Golismero_2.0/src_github/golismero/managers/auditmanager.py\n# Compiled at: 2014-02-10 15:24:09\n\"\"\"\nManager for audits.\n\"\"\"\n__license__ = '\\nGoLismero 2.0 - The web knife - Copyright (C) 2011-2013\\n\\nAuthors:\\n Daniel Garcia Garcia a.k.a cr0hn | cr0hn<@>cr0hn.com\\n Mario Vilas | mvilas<@>gmail.com\\n\\nGolismero project site: https://github.com/golismero\\nGolismero project mail: golismero.project<@>gmail.com\\n\\nThis program is free software; you can redistribute it and/or\\nmodify it under the terms of the GNU General Public License\\nas published by the Free Software Foundation; either version 2\\nof the License, or (at your option) any later version.\\n\\nThis program is distributed in the hope that it will be useful,\\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\nGNU General Public License for more details.\\n\\nYou should have received a copy of the GNU General Public License\\nalong with this program; if not, write to the Free Software\\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\\n'\n__all__ = [\n 'AuditManager', 'Audit', 'AuditException']\nfrom .importmanager import ImportManager\nfrom .processmanager import PluginContext\nfrom .reportmanager import ReportManager\nfrom .rpcmanager import implementor\nfrom ..api.data import Data\nfrom ..api.data.resource import Resource\nfrom ..api.config import Config\nfrom ..api.logger import Logger\nfrom ..api.plugin import STAGES\nfrom ..common import AuditConfig\nfrom ..database.auditdb import AuditDB\nfrom ..main.scope import AuditScope, DummyScope\nfrom ..messaging.codes import MessageType, MessageCode, MessagePriority\nfrom ..messaging.message import Message\nfrom ..messaging.notifier import AuditNotifier\nfrom collections import defaultdict\nfrom warnings import catch_warnings, warn\nfrom time import time\nfrom traceback import format_exc\n\n@implementor(MessageCode.MSG_RPC_AUDIT_COUNT)\ndef rpc_audit_get_count(orchestrator, current_audit_name):\n return orchestrator.auditManager.get_audit_count()\n\n\n@implementor(MessageCode.MSG_RPC_AUDIT_NAMES)\ndef rpc_audit_get_names(orchestrator, current_audit_name):\n return orchestrator.auditManager.get_audit_names()\n\n\n@implementor(MessageCode.MSG_RPC_AUDIT_CONFIG)\ndef rpc_audit_get_config(orchestrator, current_audit_name, audit_name=None):\n if audit_name:\n return orchestrator.auditManager.get_audit(audit_name).config\n return orchestrator.config\n\n\n@implementor(MessageCode.MSG_RPC_AUDIT_TIMES)\ndef rpc_audit_get_times(orchestrator, current_audit_name, audit_name=None):\n if not audit_name:\n audit_name = current_audit_name\n return orchestrator.auditManager.get_audit(audit_name).database.get_audit_times()\n\n\n@implementor(MessageCode.MSG_RPC_AUDIT_STATS)\ndef rpc_audit_get_stats(orchestrator, current_audit_name, audit_name=None):\n if not audit_name:\n audit_name = current_audit_name\n return orchestrator.auditManager.get_audit(audit_name).get_runtime_stats()\n\n\n@implementor(MessageCode.MSG_RPC_AUDIT_SCOPE)\ndef rpc_audit_get_scope(orchestrator, current_audit_name, audit_name=None):\n if audit_name:\n return orchestrator.auditManager.get_audit(audit_name).scope\n return DummyScope()\n\n\nclass AuditException(Exception):\n \"\"\"Exception for audits\"\"\"\n pass\n\n\nclass AuditManager(object):\n \"\"\"\n Manage and control audits.\n \"\"\"\n\n def __init__(self, orchestrator):\n \"\"\"\n :param orchestrator: Core to send messages to.\n :type orchestrator: Orchestrator\n \"\"\"\n self.__audits = dict()\n self.__orchestrator = orchestrator\n\n @property\n def orchestrator(self):\n \"\"\"\n :returns: Orchestrator instance.\n :rtype: Orchestrator\n \"\"\"\n return self.__orchestrator\n\n def new_audit(self, audit_config):\n \"\"\"\n Creates a new audit.\n\n :param audit_config: Parameters of the audit.\n :type audit_config: AuditConfig\n\n :returns: Newly created audit.\n :rtype: Audit\n \"\"\"\n if not isinstance(audit_config, AuditConfig):\n raise TypeError('Expected AuditConfig, got %r instead' % type(audit_config))\n self.orchestrator.uiManager.check_params(audit_config)\n audit = Audit(audit_config, self.orchestrator)\n self.__audits[audit.name] = audit\n if audit.is_new:\n Logger.log('Audit name: %s' % audit.name)\n else:\n Logger.log_verbose('Audit name: %s' % audit.name)\n if hasattr(audit.database, 'filename') and audit.database.filename != ':memory:':\n Logger.log_verbose('Audit database: %s' % audit.database.filename)\n try:\n audit.run()\n return audit\n except Exception as e:\n tb = format_exc()\n try:\n self.remove_audit(audit.name)\n except Exception:\n pass\n\n Logger.log_error(str(e))\n Logger.log_error_more_verbose(tb)\n raise AuditException('Failed to add new audit, reason: %s' % e)\n\n def get_audit_count(self):\n \"\"\"\n Get the number of currently running audits.\n\n :returns: Number of currently running audits.\n :rtype: int\n \"\"\"\n return len(self.__audits)\n\n def get_audit_names(self):\n \"\"\"\n Get the names of the currently running audits.\n\n :returns: Audit names.\n :rtype: set(str)\n \"\"\"\n return {audit.name for audit in self.__audits}\n\n def get_all_audits(self):\n \"\"\"\n Get the currently running audits.\n\n :returns: Mapping of audit names to instances.\n :rtype: dict(str -> Audit)\n \"\"\"\n return self.__audits\n\n def has_audit(self, name):\n \"\"\"\n Check if there's an audit with the given name.\n\n :param name: Audit name.\n :type name: str\n\n :returns: True if the audit exists, False otherwise.\n :rtype: bool\n \"\"\"\n return name in self.__audits\n\n def get_audit(self, name):\n \"\"\"\n Get an instance of an audit by its name.\n\n :param name: Audit name.\n :type name: str\n\n :returns: Audit instance.\n :rtype: Audit\n\n :raises KeyError: No audit exists with that name.\n \"\"\"\n return self.__audits[name]\n\n def remove_audit(self, name):\n \"\"\"\n Delete an instance of an audit by its name.\n\n :param name: Audit name.\n :type name: str\n\n :raises KeyError: No audit exists with that name.\n \"\"\"\n try:\n self.orchestrator.netManager.release_all_slots(name)\n finally:\n try:\n audit = self.__audits[name]\n try:\n audit.close()\n finally:\n del self.__audits[name]\n\n finally:\n self.orchestrator.cacheManager.clean(name)\n\n def dispatch_msg(self, message):\n \"\"\"\n Process an incoming message from the Orchestrator.\n\n :param message: Incoming message.\n :type message: Message\n \"\"\"\n if not isinstance(message, Message):\n raise TypeError('Expected Message, got %r instead' % type(message))\n if message.message_type == MessageType.MSG_TYPE_DATA:\n if not message.audit_name:\n raise ValueError('Data message with no target audit!')\n self.get_audit(message.audit_name).dispatch_msg(message)\n elif message.message_type == MessageType.MSG_TYPE_CONTROL:\n if message.message_code == MessageCode.MSG_CONTROL_ACK:\n if message.audit_name:\n self.get_audit(message.audit_name).acknowledge(message)\n elif message.message_code == MessageCode.MSG_CONTROL_START_AUDIT:\n try:\n self.new_audit(message.message_info)\n except AuditException as e:\n tb = format_exc()\n message = Message(message_type=MessageType.MSG_TYPE_STATUS, message_code=MessageCode.MSG_STATUS_AUDIT_ABORTED, message_info=(\n message.message_info.audit_name,\n str(e), tb), priority=MessagePriority.MSG_PRIORITY_HIGH, audit_name=None)\n self.orchestrator.enqueue_msg(message)\n\n elif message.message_code == MessageCode.MSG_CONTROL_STOP_AUDIT:\n if not message.audit_name:\n raise ValueError(\"I don't know which audit to stop...\")\n self.get_audit(message.audit_name).close()\n self.remove_audit(message.audit_name)\n elif message.message_code == MessageCode.MSG_CONTROL_LOG:\n if message.audit_name:\n self.get_audit(message.audit_name).dispatch_msg(message)\n return\n\n def close(self):\n \"\"\"\n Release all resources held by all audits.\n \"\"\"\n self.__orchestrator = None\n for name in self.__audits.keys():\n try:\n self.remove_audit(name)\n except:\n pass\n\n return\n\n\nclass Audit(object):\n \"\"\"\n Instance of an audit, with its custom parameters,\n scope, target, plugins, etc.\n \"\"\"\n\n def __init__(self, audit_config, orchestrator):\n \"\"\"\n :param audit_config: Audit configuration.\n :type audit_config: AuditConfig\n\n :param orchestrator: Orchestrator instance that will receive messages\n sent by this audit.\n :type orchestrator: Orchestrator\n \"\"\"\n if not isinstance(audit_config, AuditConfig):\n raise TypeError('Expected AuditConfig, got %r instead' % type(audit_config))\n self.__audit_config = audit_config\n self.__orchestrator = orchestrator\n self.__current_stage = orchestrator.pluginManager.min_stage\n self.__is_report_started = False\n self.__must_update_stop_time = True\n self.__followed_links = 0\n self.__show_max_links_warning = True\n self.__expecting_ack = 0\n self.__stage_cycles = defaultdict(int)\n self.__processed_count = 0\n self.__total_count = 0\n self.__stages_enabled = tuple()\n self.__notifier = None\n self.__plugin_manager = None\n self.__import_manager = None\n self.__report_manager = None\n self.__is_new = not audit_config.audit_name or audit_config.audit_db == ':auto:'\n self.__database = AuditDB(audit_config)\n self.__name = self.__database.audit_name\n return\n\n @property\n def name(self):\n \"\"\"\n :returns: Name of the audit.\n :rtype: str\n \"\"\"\n return self.__name\n\n @property\n def is_new(self):\n \"\"\"\n :returns: True if the audit is new, False if it's a reopened audit.\n :rtype: bool\n \"\"\"\n return self.__is_new\n\n @property\n def orchestrator(self):\n \"\"\"\n :returns: Orchestrator instance that will receive messages\n sent by this audit.\n :rtype: Orchestrator\n \"\"\"\n return self.__orchestrator\n\n @property\n def config(self):\n \"\"\"\n :returns: Audit configuration.\n :rtype: AuditConfig\n \"\"\"\n return self.__audit_config\n\n @property\n def scope(self):\n \"\"\"\n :returns: Audit scope.\n :rtype: AuditScope\n \"\"\"\n return self.__audit_scope\n\n @property\n def database(self):\n \"\"\"\n :returns: Audit database.\n :rtype: AuditDB\n \"\"\"\n return self.__database\n\n @property\n def pluginManager(self):\n \"\"\"\n :returns: Audit plugin manager.\n :rtype: AuditPluginManager\n \"\"\"\n return self.__plugin_manager\n\n @property\n def importManager(self):\n \"\"\"\n :returns: Import manager.\n :rtype: ImportManager\n \"\"\"\n return self.__import_manager\n\n @property\n def reportManager(self):\n \"\"\"\n :returns: Report manager.\n :rtype: ReportManager\n \"\"\"\n return self.__report_manager\n\n @property\n def expecting_ack(self):\n \"\"\"\n :returns: Number of ACKs expected by this audit.\n :rtype: int\n \"\"\"\n return self.__expecting_ack\n\n @property\n def current_stage(self):\n \"\"\"\n :returns: Current execution stage.\n :rtype: int\n \"\"\"\n return self.__current_stage\n\n @property\n def is_report_started(self):\n \"\"\"\n :returns: True if report generation has started, False otherwise.\n :rtype: bool\n \"\"\"\n return self.__is_report_started\n\n def get_runtime_stats(self):\n r\"\"\"\n Returns a dictionary with runtime statistics with at least the\n following keys:\n\n - \"current_stage\": [int]\n Current stage number.\n - \"total_count\": [int]\n Total number of data objects to process in this stage.\n - \"processed_count\": [int]\n Number of data objects already processed in this stage.\n - \"stage_cycles\": [dict(int -> int)]\n Map of stage numbers and times each stage ran.\n - \"stages_enabled\": [tuple(int)]\n Stages enabled for this audit.\n\n Future versions of GoLismero may include more keys.\n\n :returns: Runtime statistics.\n :rtype: dict(str -> \\*)\n \"\"\"\n return {'current_stage': self.__current_stage, \n 'total_count': self.__total_count, \n 'processed_count': self.__processed_count, \n 'stage_cycles': dict(self.__stage_cycles), \n 'stages_enabled': self.__stages_enabled}\n\n def run(self):\n \"\"\"\n Start execution of an audit.\n \"\"\"\n start_time = time()\n self.__expecting_ack = 0\n old_context = Config._context\n try:\n self.__audit_scope = DummyScope()\n Config._context = PluginContext(msg_queue=old_context.msg_queue, audit_name=self.name, audit_config=self.config, audit_scope=self.scope, orchestrator_pid=old_context._orchestrator_pid, orchestrator_tid=old_context._orchestrator_tid)\n self.__plugin_manager = self.orchestrator.pluginManager.get_plugin_manager_for_audit(self)\n self.__plugin_manager.initialize(self.config)\n testing_plugins = self.pluginManager.load_plugins('testing')\n self.__notifier = AuditNotifier(self)\n self.__notifier.add_multiple_plugins(testing_plugins)\n self.__stages_enabled = sorted(stage_num for stage, stage_num in STAGES.iteritems() if self.pluginManager.get_plugins(stage))\n self.__import_manager = ImportManager(self.orchestrator, self)\n self.__report_manager = ReportManager(self.orchestrator, self)\n audit_scope = self.database.get_audit_scope()\n if audit_scope is None:\n if self.config.targets:\n audit_scope = AuditScope(self.config)\n else:\n audit_scope.add_targets(self.config)\n if audit_scope is not None:\n self.__audit_scope = audit_scope\n self.database.save_audit_scope(self.scope)\n Config._context = PluginContext(msg_queue=old_context.msg_queue, audit_name=self.name, audit_config=self.config, audit_scope=self.scope, orchestrator_pid=old_context._orchestrator_pid, orchestrator_tid=old_context._orchestrator_tid)\n if not self.database.get_audit_times()[0]:\n self.database.set_audit_start_time(start_time)\n count = self.database.get_data_count()\n if count:\n Logger.log_verbose('Found %d objects in database' % count)\n target_data = self.scope.get_targets()\n targets_added_count = 0\n for data in target_data:\n if not self.database.has_data_key(data.identity, data.data_type):\n self.database.add_data(data)\n targets_added_count += 1\n\n if targets_added_count:\n Logger.log_verbose('Added %d new targets to the database.' % targets_added_count)\n self.database.clear_all_stage_marks()\n imported_count = 0\n if self.importManager.is_enabled:\n self.send_msg(message_type=MessageType.MSG_TYPE_STATUS, message_code=MessageCode.MSG_STATUS_STAGE_UPDATE, message_info='import', priority=MessagePriority.MSG_PRIORITY_HIGH)\n if not target_data:\n target_types = (Resource.RESOURCE_BASE_URL,\n Resource.RESOURCE_FOLDER_URL,\n Resource.RESOURCE_URL,\n Resource.RESOURCE_IP,\n Resource.RESOURCE_DOMAIN)\n old_data = set()\n for data_subtype in target_types:\n old_data.update(self.database.get_data_keys(Data.TYPE_RESOURCE, data_subtype))\n\n imported_count = self.importManager.import_results()\n if not target_data:\n new_data = set()\n for data_subtype in target_types:\n new_data.update(self.database.get_data_keys(Data.TYPE_RESOURCE, data_subtype))\n\n new_data.difference_update(old_data)\n old_data.clear()\n self.config.targets = [ str(self.database.get_data(identity)) for identity in new_data\n ]\n new_data.clear()\n self.__audit_scope = AuditScope(self.config)\n self.database.save_audit_scope(self.scope)\n Config._context = PluginContext(msg_queue=old_context.msg_queue, audit_name=self.name, audit_config=self.config, audit_scope=self.scope, orchestrator_pid=old_context._orchestrator_pid, orchestrator_tid=old_context._orchestrator_tid)\n target_data = self.scope.get_targets()\n targets_added_count = 0\n for data in target_data:\n if not self.database.has_data_key(data.identity):\n self.database.add_data(data)\n targets_added_count += 1\n\n if targets_added_count:\n Logger.log_verbose('Added %d new targets to the database.' % targets_added_count)\n Logger.log_more_verbose(str(self.scope))\n if not not isinstance(self.scope, DummyScope):\n raise AssertionError('Internal error!')\n raise (self.scope.targets or ValueError)('No targets selected for audit, aborting execution.')\n existing = self.database.get_data_keys()\n stack = list(existing)\n visited = set()\n while stack:\n identity = stack.pop()\n if identity not in visited:\n visited.add(identity)\n data = self.database.get_data(identity)\n if data.is_in_scope():\n for data in data.discovered:\n identity = data.identity\n if identity not in existing and data.is_in_scope():\n self.database.add_data(data)\n existing.add(identity)\n stack.append(identity)\n\n del existing\n del visited\n finally:\n Config._context = old_context\n\n self.__must_update_stop_time = imported_count or targets_added_count\n if testing_plugins:\n Logger.log_verbose('Launching tests...')\n self.update_stage()\n else:\n self.__current_stage = self.__plugin_manager.max_stage + 1\n self.generate_reports()\n return\n\n def send_msg(self, message_type=MessageType.MSG_TYPE_DATA, message_code=MessageCode.MSG_DATA_REQUEST, message_info=None, priority=MessagePriority.MSG_PRIORITY_MEDIUM):\n \"\"\"\n Send messages to the Orchestrator.\n\n :param message_type: Message type.\n Must be one of the constants from MessageType.\n :type mesage_type: int\n\n :param message_code: Message code.\n Must be one of the constants from MessageCode.\n :type message_code: int\n\n :param message_info: The payload of the message.\n Its type depends on the message type and code.\n :type message_info: *\n\n :param priority: Priority level.\n Must be one of the constants from MessagePriority.\n :type priority: int\n \"\"\"\n m = Message(message_type=message_type, message_code=message_code, message_info=message_info, audit_name=self.name, priority=priority)\n self.orchestrator.enqueue_msg(m)\n\n def acknowledge(self, message):\n \"\"\"\n Got an ACK for a message sent from this audit to the plugins.\n\n :param message: The message with the ACK.\n :type message: Message\n \"\"\"\n try:\n self.__expecting_ack -= 1\n self.__notifier.acknowledge(message)\n finally:\n if not self.expecting_ack:\n self.update_stage()\n\n def update_stage(self):\n \"\"\"\n Sets the current stage to the minimum needed to process pending data.\n When the last stage is completed, sends the audit stop message.\n \"\"\"\n database = self.database\n pluginManager = self.pluginManager\n if self.__is_report_started:\n self.__report_manager.generate_screen_report(self.orchestrator.uiManager.notifier)\n self.send_msg(message_type=MessageType.MSG_TYPE_CONTROL, message_code=MessageCode.MSG_CONTROL_STOP_AUDIT, message_info=True)\n else:\n for stage in xrange(pluginManager.min_stage, pluginManager.max_stage + 1):\n self.__current_stage = stage\n pending = database.get_pending_data(stage)\n if not pending:\n continue\n if not pluginManager.stages[stage]:\n database.mark_stage_finished_many(pending, stage)\n continue\n candidates = list(pending)\n pending.clear()\n for i in xrange(0, len(candidates), 10):\n batch_ids = set(candidates[i:i + 10])\n batch = database.get_many_data(batch_ids)\n if not batch:\n database.mark_stage_finished_many(batch_ids, stage)\n continue\n data_ok = []\n ids_ok = set()\n ids_not_ok = set()\n for data in batch:\n if data.is_in_scope(self.scope):\n ids_ok.add(data.identity)\n data_ok.append(data)\n else:\n ids_not_ok.add(data.identity)\n\n if ids_not_ok:\n database.mark_stage_finished_many(ids_not_ok, stage)\n batch_ids = ids_ok\n batch = data_ok\n if not batch:\n continue\n if not self.__notifier.is_runnable_stage(batch, stage):\n database.mark_stage_finished_many(batch_ids, stage)\n continue\n pending.update(batch_ids)\n batch = []\n\n if not pending:\n continue\n self.__stage_cycles[self.__current_stage] += 1\n self.__processed_count = 0\n self.__total_count = len(pending)\n self.__must_update_stop_time = True\n stage_name = pluginManager.get_stage_name_from_value(stage)\n self.send_msg(message_type=MessageType.MSG_TYPE_STATUS, message_code=MessageCode.MSG_STATUS_STAGE_UPDATE, message_info=stage_name)\n to_send = list(pending)\n for i in xrange(0, len(to_send), 10):\n datalist = database.get_many_data(to_send[i:i + 10])\n self.send_msg(message_type=MessageType.MSG_TYPE_DATA, message_code=MessageCode.MSG_DATA_REQUEST, message_info=datalist)\n\n return\n\n self.__current_stage = pluginManager.max_stage + 1\n self.generate_reports()\n\n def dispatch_msg(self, message):\n \"\"\"\n Send messages to the plugins of this audit.\n\n :param message: The message to send.\n :type message: Message\n \"\"\"\n if not isinstance(message, Message):\n raise TypeError('Expected Message, got %r instead' % type(message))\n old_context = Config._context\n try:\n Config._context = PluginContext(msg_queue=old_context.msg_queue, audit_name=self.name, audit_config=self.config, audit_scope=self.scope, ack_identity=message.ack_identity, orchestrator_pid=old_context._orchestrator_pid, orchestrator_tid=old_context._orchestrator_tid)\n self.__dispatch_msg(message)\n finally:\n Config._context = old_context\n\n def __dispatch_msg(self, message):\n database = self.database\n pluginManager = self.pluginManager\n if message.message_type == MessageType.MSG_TYPE_CONTROL and message.message_code == MessageCode.MSG_CONTROL_LOG:\n text, level, is_error = message.message_info\n plugin_id = message.plugin_id\n ack_id = message.ack_identity\n timestamp = message.timestamp\n database.append_log_line(text, level, is_error, plugin_id, ack_id, timestamp)\n return\n if message.message_type == MessageType.MSG_TYPE_DATA:\n if isinstance(message.message_info, Data):\n message.message_info = [\n message.message_info]\n if message.message_code == MessageCode.MSG_DATA_REQUEST:\n launched = self.__notifier.notify(message)\n if launched:\n self.__expecting_ack += launched\n else:\n self.__expecting_ack += 1\n self.send_msg(message_type=MessageType.MSG_TYPE_CONTROL, message_code=MessageCode.MSG_CONTROL_ACK, priority=MessagePriority.MSG_PRIORITY_LOW)\n self.__processed_count += len(message.message_info)\n return\n if message.message_code == MessageCode.MSG_DATA_RESPONSE:\n data_for_plugins = []\n for data in message.message_info:\n if not isinstance(data, Data):\n warn('TypeError: Expected Data, got %r instead' % type(data), RuntimeWarning, stacklevel=3)\n continue\n if not database.has_data_key(data.identity):\n if data.data_type == Data.TYPE_RESOURCE and data.resource_type == Resource.RESOURCE_URL:\n self.__followed_links += 1\n if self.config.max_links > 0 and self.__followed_links >= self.config.max_links:\n if self.__show_max_links_warning:\n self.__show_max_links_warning = False\n w = 'Maximum number of links (%d) reached! Audit: %s'\n w = w % (self.config.max_links, self.name)\n with catch_warnings(record=True) as (wlist):\n warn(w, RuntimeWarning)\n self.send_msg(message_type=MessageType.MSG_TYPE_CONTROL, message_code=MessageCode.MSG_CONTROL_WARNING, message_info=wlist, priority=MessagePriority.MSG_PRIORITY_HIGH)\n continue\n database.add_data(data)\n if data.is_in_scope():\n plugin_id = message.plugin_id\n if plugin_id:\n plugin_info = pluginManager.get_plugin_by_id(plugin_id)\n if not plugin_info.recursive:\n database.mark_plugin_finished(data.identity, plugin_id)\n data_for_plugins.append(data)\n else:\n database.mark_stage_finished(data.identity, pluginManager.max_stage)\n\n visited = {data.identity for data in data_for_plugins}\n for data in list(data_for_plugins):\n links = set(data.links)\n queue = list(data.discovered)\n links = set(data.links).difference(links)\n while queue:\n data = queue.pop(0)\n if data.identity not in visited and not database.has_data_key(data.identity):\n database.add_data(data)\n visited.add(data.identity)\n queue.extend(data.discovered)\n if data.is_in_scope():\n data_for_plugins.append(data)\n else:\n database.mark_stage_finished(data.identity, pluginManager.max_stage)\n\n if links:\n database.add_data(data)\n\n if data_for_plugins and self.current_stage == self.pluginManager.min_stage:\n self.__total_count += len(data_for_plugins)\n self.send_msg(message_type=MessageType.MSG_TYPE_DATA, message_code=MessageCode.MSG_DATA_REQUEST, message_info=data_for_plugins)\n\n def generate_reports(self):\n \"\"\"\n Start the generation of reports for the audit.\n \"\"\"\n if self.__is_report_started:\n raise RuntimeError('Why are you asking for the report twice?')\n self.__expecting_ack += 1\n try:\n self.__is_report_started = True\n if self.__must_update_stop_time:\n self.database.set_audit_stop_time(time())\n launched = 0\n if self.__report_manager.plugin_count > 0:\n self.send_msg(message_type=MessageType.MSG_TYPE_STATUS, message_code=MessageCode.MSG_STATUS_STAGE_UPDATE, message_info='report')\n launched = self.__report_manager.generate_reports(self.__notifier)\n if launched:\n self.__expecting_ack += launched\n else:\n self.__expecting_ack += 1\n self.send_msg(message_type=MessageType.MSG_TYPE_CONTROL, message_code=MessageCode.MSG_CONTROL_ACK, priority=MessagePriority.MSG_PRIORITY_LOW)\n finally:\n self.send_msg(message_type=MessageType.MSG_TYPE_CONTROL, message_code=MessageCode.MSG_CONTROL_ACK, priority=MessagePriority.MSG_PRIORITY_LOW)\n\n def close(self):\n \"\"\"\n Release all resources held by this audit.\n \"\"\"\n try:\n try:\n try:\n try:\n try:\n if self.database is not None:\n try:\n self.database.compact()\n finally:\n self.database.close()\n\n finally:\n if self.__notifier is not None:\n self.__notifier.close()\n\n finally:\n if self.__plugin_manager is not None:\n self.__plugin_manager.close()\n\n finally:\n if self.__import_manager is not None:\n self.__import_manager.close()\n\n finally:\n if self.__report_manager is not None:\n self.__report_manager.close()\n\n finally:\n self.__database = None\n self.__orchestrator = None\n self.__notifier = None\n self.__audit_config = None\n self.__audit_scope = None\n self.__plugin_manager = None\n self.__import_manager = None\n self.__report_manager = None\n\n return","sub_path":"pycfiles/golismero-2.0.3-1.tar/auditmanager.py","file_name":"auditmanager.py","file_ext":"py","file_size_in_byte":32390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"640617133","text":"import os\nimport os.path\nfrom os.path import join, getsize\nimport re\nimport hashlib\nimport pandas as pd\nimport cv2\n\ndef copyFSVs(src,dest):\n FOUND_DUP_FLAG = 0\n for root, dirs, files in os.walk(src):\n for file in files:\n if os.path.isfile(dest+\"\\\\\"+file):\n FOUND_DUP_FLAG = 1\n print(FOUND_DUP_FLAG)\n command = 'copy \"' + root+\"\\\\\"+file+'\" ' + '\"'+dest+'\"'\n print(command)\n os.system(command)\n print(FOUND_DUP_FLAG)\n\ndef checkDuplicates(src):\n file_names = []\n dup_fnames = []\n for root, dirs, files in os.walk(src):\n for file in files:\n if file in file_names:\n dup_fnames.append(root+\"\\\\\"+file)\n else:\n file_names.append(file)\n for file in dup_fnames:\n print(file)\n\n###returns a tuple (lastname,firstname) from a .fsv file\ndef nameExtractFSV(filename):\n #Asdf Asdf\n matchObj = re.match(\"^([a-zA-Z' -]*) ([a-zA-Z]*).*$\", filename, flags=0)\n if matchObj:\n # Ex Anisha Wells_E.fsv\n return (matchObj.group(2).upper(), matchObj.group(1).upper())\n #-Asdf\n matchObj = re.match(\"^([a-zA-Z',. ]*)-([a-zA-Z]*).*$\", filename, flags=0)\n if matchObj:\n # Ex Cunningham-Lilia-TE-05-12-2014-145249-MT.fsv\n return (matchObj.group(1).upper(), matchObj.group(2).upper())\n else:\n raise Exception(\"fsv name extraction failed\")\n \n###returns a tuple (lastname,firstname,type) from a .pdf file\ndef nameExtractReport(filename):\n#-Asdf\n matchObj = re.match(\"^([a-zA-Z'_ -]*)(\\.|, |,| )([a-zA-Z_ -]*)(\\.| )([a-zA-Z]*)\\.(pdf|doc)$\", filename, flags=0)\n if matchObj:\n # Ex Miller.Boyd.E.pdf\n return (matchObj.group(1).upper(), matchObj.group(3).upper(), matchObj.group(5).upper())\n else:\n raise Exception(\"fsv name extraction failed\")\n\ndef matchReportsStudies(src_reports, src_studies):\n match={} #name=>(path1,path2)\n for root, dirs, files in os.walk(src_reports):\n for file in files:\n try:\n lname, fname, ftype = nameExtractReport(file)\n name = lname, fname\n if name not in match:\n match[name] = [root + \"/\" + file]\n else:\n match[name].append(root + \"/\" + file)\n except:\n #print(\"--------ERROR----------\")\n pass\n for root, dirs, files in os.walk(src_studies):\n for dir in dirs:\n try:\n name = nameExtractFSV(dir)\n if name not in match:\n match[name] = [root + \"/\" + dir]\n else:\n match[name].append(root + \"/\" + dir)\n except:\n# #print(\"--------ERROR----------\")\n pass\n break \n return match\n\ndef disposeUnmatched(match_dict):\n newdict = {}\n for key, val_arr in match_dict.items():\n #print(val_arr)\n contains_fsv = 0\n contains_pdf_or_doc = 0\n for filepath in val_arr:\n if filepath.endswith(\".fsv\"):\n contains_fsv = 1\n if filepath.endswith(\".pdf\") or filepath.endswith(\".doc\"):\n contains_pdf_or_doc = 1\n if contains_fsv and contains_pdf_or_doc:\n newdict[key] = val_arr\n return newdict\n\ndef getFileName(whole_path):\n return whole_path[whole_path.rfind('/')+1:]\n\ndef getFSVDate(whole_path):\n return re.findall(r'\\d{2}-{1}\\d{2}-{1}\\d{4}', whole_path)\n\ndef rename_and_anonymize(match, metadata, dest_studies, dest_reports):\n for key, val_arr in match.items():\n name = key[0] + ' ' + key[1]\n hashed_name = hashlib.md5(name.encode()).hexdigest()\n fsv_version = 0\n pdf_version = 0\n for filepath in val_arr:\n try:\n if filepath.endswith(\".pdf\") or filepath.endswith(\".doc\"):\n _, _, reporttype = nameExtractReport(getFileName(filepath))\n newfilename = hashed_name + \"_\" + reporttype + '.' + filepath.split('.')[-1]\n if newfilename not in os.listdir(dest_reports):\n command = 'cp \"' + filepath + '\" ' + '\"' + dest_reports + '/' + newfilename + '\"'\n print(command)\n os.system(command)\n else:\n print('{} already copied over'.format(filepath))\n# if filepath.endswith(\".fsv\"):\n else:\n patient_metadata = metadata.loc[metadata['Patient'] == filepath.split('/')[-1]]\n newfilename = patient_metadata['Patient_Dir'].values[0]\n if newfilename not in os.listdir(dest_studies):\n command = 'cp -r \"' + filepath + '\" ' + '\"' + dest_studies + '/' + newfilename + '\"'\n print(command)\n os.system(command)\n else:\n print('{} already copied over'.format(filepath))\n except Exception as e:\n print(filepath)\n print(e)\n \n\ndef rename_reports(path_to_extracted_reports, metadata):\n for root, dirs, files in os.walk(path_to_extracted_reports):\n for file in files:\n file_bd = file.split('_')\n try:\n os.chdir(path_to_extracted_reports)\n pdf_file = open(file, 'rb')\n read_pdf = PyPDF2.PdfFileReader(pdf_file)\n page = read_pdf.getPage(0)\n page_content = page.extractText()\n# print(page_content)\n date = re.findall(r'(?<=DATE:).*(?=PHYSICIAN:)', page_content, re.DOTALL)[0]\n month = date.split('-')[1].strip()\n day = date.split('-')[2].strip()\n year = date.split('-')[0].strip()\n file_bd.insert(1, month + '-' + day + '-' + year)\n file_w_date = '_'.join(file_bd)\n os.rename(file, file_w_date)\n except:\n pass\n \ndef crop_echos(path_to_echos, destination_path, metadata): # Don't include '/' at the end \n for _, patients, _ in os.walk(path_to_echos): # Walk through directory with patient labels\n for patient in patients:\n if (len(os.listdir(path_to_echos+'/'+patient)) > 0):\n for _, views, _ in os.walk(path_to_echos + '/' + patient):\n for view in views:\n try:\n ## for extracted fsvs\n# patient_metadata = metadata.loc[(metadata['Patient_Dir'] == patient) & (metadata['Instance_Number'] == float(view)+1)]\n ## for extracted dicoms\n patient_metadata = metadata.loc[(metadata['Patient_Dir'] == patient) & (metadata['Folder'] == view)]\n if int(patient_metadata['Region_Spatial_Format'].values[0]) == 1:\n for _, _, frames in os.walk(path_to_echos+'/'+patient+'/'+view):\n jpg_frames = [i for i in frames if i.split('.')[-1] == 'jpg']\n if (len(jpg_frames) == int(patient_metadata['Number_of_Frames'].values[0])):\n for frame in jpg_frames:\n try:\n img = cv2.imread(path_to_echos+'/'+patient+'/'+view+'/'+frame)\n # Accuson Cypress\n if (img.shape == (480, 640, 3)):\n print('extracting img {}'.format(path_to_echos+'/'+patient+'/'+view+'/'+frame))\n img = img[30:,:] \n img[15:30,382:400] = 0\n img[0:120,0:120] = 0\n img[0:50,560:640] = 0\n img = img[15:360,115:580]\n if int(patient_metadata['Region_Data_Type'].values[0]) == 1:\n path_to_view = destination_path+'/'+'dim_480_640/Standard'+'/'+patient+'/'+view\n os.makedirs(path_to_view,exist_ok=True)\n cv2.imwrite(path_to_view+'/'+frame, img)\n if int(patient_metadata['Region_Data_Type'].values[0]) == 2:\n path_to_view = destination_path+'/'+'dim_480_640/Color'+'/'+patient+'/'+view\n os.makedirs(path_to_view,exist_ok=True)\n cv2.imwrite(path_to_view+'/'+frame, img) \n # Accuson Cypress\n elif (img.shape == (456, 576, 3)):\n print('extracting img {}'.format(path_to_echos+'/'+patient+'/'+view+'/'+frame))\n img = img[30:,:] \n img[15:30,382:400] = 0\n img[0:120,0:120] = 0\n img[0:50,495:576] = 0\n img = img[15:345,48:505]\n if int(patient_metadata['Region_Data_Type'].values[0]) == 1:\n path_to_view = destination_path+'/'+'dim_456_576/Standard'+'/'+patient+'/'+view\n os.makedirs(path_to_view,exist_ok=True)\n cv2.imwrite(path_to_view+'/'+frame, img) \n if int(patient_metadata['Region_Data_Type'].values[0]) == 2:\n path_to_view = destination_path+'/'+'dim_456_576/Color'+'/'+patient+'/'+view\n os.makedirs(path_to_view,exist_ok=True)\n cv2.imwrite(path_to_view+'/'+frame, img)\n # Vivid iq \n elif (img.shape == (708, 1016, 3)):\n print('extracting img {}'.format(path_to_echos+'/'+patient+'/'+view+'/'+frame))\n img[0:300,0:260] = 0\n img[:,962:] = 0\n if int(patient_metadata['Region_Data_Type'].values[0]) == 1:\n path_to_view = destination_path+'/'+'dim_708_1016/Standard'+'/'+patient+'/'+view\n os.makedirs(path_to_view,exist_ok=True)\n cv2.imwrite(path_to_view+'/'+frame, img) \n if int(patient_metadata['Region_Data_Type'].values[0]) == 2:\n path_to_view = destination_path+'/'+'dim_708_1016/Color'+'/'+patient+'/'+view\n os.makedirs(path_to_view,exist_ok=True)\n cv2.imwrite(path_to_view+'/'+frame, img) \n # Vivid i \n elif (img.shape == (422, 636, 3)):\n print('extracting img {}'.format(path_to_echos+'/'+patient+'/'+view+'/'+frame))\n img[0:26,0:260] = 0\n img[26:135,0:130] = 0\n img[:,610:] = 0\n if int(patient_metadata['Region_Data_Type'].values[0]) == 1:\n path_to_view = destination_path+'/'+'dim_422_636/Standard'+'/'+patient+'/'+view\n os.makedirs(path_to_view,exist_ok=True)\n cv2.imwrite(path_to_view+'/'+frame, img) \n if int(patient_metadata['Region_Data_Type'].values[0]) == 2:\n path_to_view = destination_path+'/'+'dim_422_636/Color'+'/'+patient+'/'+view\n os.makedirs(path_to_view,exist_ok=True)\n cv2.imwrite(path_to_view+'/'+frame, img)\n # Terason \n elif (img.shape == (650, 880, 3)):\n print('extracting img {}'.format(path_to_echos+'/'+patient+'/'+view+'/'+frame))\n img = img[42:,:] \n img[:215,685:] = 0\n if int(patient_metadata['Region_Data_Type'].values[0]) == 1:\n path_to_view = destination_path+'/'+'dim_650_880/Standard'+'/'+patient+'/'+view\n os.makedirs(path_to_view,exist_ok=True)\n cv2.imwrite(path_to_view+'/'+frame, img) \n if int(patient_metadata['Region_Data_Type'].values[0]) == 2:\n path_to_view = destination_path+'/'+'dim_650_880/Color'+'/'+patient+'/'+view\n os.makedirs(path_to_view,exist_ok=True)\n cv2.imwrite(path_to_view+'/'+frame, img) \n else:\n f=open('/extracted_data/errorlogcrop.txt','a+')\n f.write('error with {}\\n'.format(path_to_view+'/'+frame+' '+'image size = ' +str(img.shape)))\n f.close()\n except Exception as e:\n print('unable to crop {}'.format(path_to_echos + '/' + patient + '/' + view + '/' + frame))\n except:\n pass\n break\n break","sub_path":"Extract/extraction_tools.py","file_name":"extraction_tools.py","file_ext":"py","file_size_in_byte":15386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"427632029","text":"import os\n\nimport cv2\n\n\nclass FaceDetection(object):\n model = cv2.CascadeClassifier(\n os.path.join(os.path.dirname(os.path.dirname(__file__)),\n 'models/haarcascade_frontalface_default.xml'))\n\n def detect_faces(self, ret, frame):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # Detect the faces\n faces = self.model.detectMultiScale(gray, 1.1, 4)\n # Draw the rectangle around each face\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n return ret, frame","sub_path":"ml/ml_services/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"153640318","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 24 14:04:38 2020\n\n@author: jkin0004\n\"\"\"\n\nimport numpy as np\nimport math\n\n\ndef sector(dataSet, centre, width, save='0', describer=None):\n\n xcentre = 0\n ycentre = 0\n\n sectcent = centre # In radians\n sectwid = width # In radians\n\n # dataSet should be a sasView 2D data object\n mag = dataSet.q_data # q values\n sectang = []\n\n for i in range(len(mag)):\n if dataSet.qy_data[i] > 0:\n sectang.append(math.atan(dataSet.qx_data[i]/dataSet.qy_data[i]))\n elif dataSet.qy_data[i] < 0:\n sectang.append(math.atan(dataSet.qx_data[i]/dataSet.qy_data[i]) + np.pi)\n\n sectang = np.array(sectang)\n\n cwmax = sectcent+(0.5*sectwid) # Max bound for top sector\n cwmin = sectcent-(0.5*sectwid) # Min bound for top sector\n cwmax_ref = sectcent+(0.5*sectwid)+math.pi # Max bound for bottom sector\n cwmin_ref = sectcent-(0.5*sectwid)+math.pi # Min bound for bottom sector\n\n # Sorting according to angle\n sortI = np.argsort(sectang)\n sectang = sectang[sortI]\n mag = mag[sortI]\n err = dataSet.err_data[sortI]\n data = dataSet.data[sortI]\n\n seccrop_data = np.zeros_like(data)\n seccrop_err = np.zeros_like(err)\n seccrop_mag = np.zeros_like(mag)\n seccrop_ang = np.zeros_like(sectang)\n\n # Find logic gates\n posLog = np.logical_and(cwmin < sectang, sectang < cwmax)\n negLog = np.logical_and(cwmin_ref < sectang, sectang < cwmax_ref)\n\n # Find values according to logic gates\n seccrop_data[posLog] = data[posLog]\n seccrop_err[posLog] = err[posLog]\n seccrop_mag[posLog] = mag[posLog]\n seccrop_ang[posLog] = sectang[posLog]\n\n seccrop_data[negLog] = data[negLog]\n seccrop_err[negLog] = err[negLog]\n seccrop_mag[negLog] = mag[negLog]\n seccrop_ang[negLog] = sectang[negLog]\n\n zeros = seccrop_mag != 0 # Find zeros\n\n # remove zeros\n seccrop_data = seccrop_data[zeros]\n seccrop_err = seccrop_err[zeros]\n seccrop_mag = seccrop_mag[zeros]\n seccrop_ang = seccrop_ang[zeros]\n\n # Sort by ascending q\n sortq = np.argsort(seccrop_mag)\n\n seccrop_data = seccrop_data[sortq]\n seccrop_err = seccrop_err[sortq]\n seccrop_mag = seccrop_mag[sortq]\n seccrop_ang = seccrop_ang[sortq]\n\n # Make 100 bins spaced log linearly between min and max q\n nbs = 100\n minMag = np.min(seccrop_mag)\n maxMag = np.max(seccrop_mag)\n\n logMinMag = np.log10(minMag)\n logMaxMag = np.log10(maxMag)\n\n logLinear = np.linspace(logMinMag, logMaxMag, nbs)\n bins = 10**logLinear\n\n binindex = np.zeros([nbs]) # number points summed per bin\n bintotal = np.zeros([nbs]) # summed intensity\n errtotal = np.zeros([nbs]) # summed error\n\n for i in range(nbs - 1):\n for ii in range(len(seccrop_data)):\n if seccrop_mag[ii] >= bins[i]:\n if seccrop_mag[ii] <= bins[i + 1]:\n binindex[i] = binindex[i] + 1\n bintotal[i] = bintotal[i] + seccrop_data[ii]\n errtotal[i] = errtotal[i] + seccrop_err[ii]\n# print(errtotal[i])\n\n binZeros = binindex != 0\n\n bins = bins[binZeros]\n binindex = binindex[binZeros]\n bintotal = bintotal[binZeros]\n errtotal = errtotal[binZeros]\n# print(errtotal)\n\n binave = bintotal/binindex\n errave = errtotal/binindex\n\n# allerror = [err, seccrop_err, errtotal, errave]\n\n if save == '1':\n fileType = '.dat'\n if dataSet.shear[0][0] == '0':\n fileName = describer + '_' + str(dataSet.sample[0]) + '_' + 'static'\n else:\n fileName = describer + '_' + \\\n str(dataSet.sample[0]) + '_' + str(dataSet.shear[0][0:-14]) + 'ps'\n location = '../2D_annular_sector_extraction/py_sect_radAve/'\n fullName = location + fileName + fileType\n with open(fullName, 'wt') as fh:\n fh.write(\"q I(q) err_I\\n\")\n for x, y, z in zip(bins, binave, errave):\n fh.write(\"%g %g %g\\n\" % (x, y, z))\n\n return bins, binave, errave\n\n\ndef annular(dataSet, radius, thx, save='0', describer=None):\n\n radius = radius\n thx = thx\n\n mag = dataSet.q_data\n\n # Draw a set of x,y points for the circles chosen\n theta = np.linspace(0, 2*np.pi, 314)\n cx = radius*np.cos(theta)\n cy = radius*np.sin(theta)\n cxouter = (radius + thx)*np.cos(theta)\n cyouter = (radius + thx)*np.sin(theta)\n\n # Capture points that fall within the annular rings based on their magnitude\n\n I_ann = np.logical_and(radius < mag, mag < (radius+thx))\n annul_x = dataSet.qx_data[I_ann]\n annul_y = dataSet.qy_data[I_ann]\n annul_I = dataSet.data[I_ann]\n annul_err = dataSet.err_data[I_ann]\n annul_mag = dataSet.q_data[I_ann]\n\n # Calculate the angles for the obtained points. Zero is twleve o clock (vertically up on y-axis)\n\n annul_ang = []\n\n for i in range(len(annul_mag)):\n if annul_y[i] > 0:\n annul_ang.append(math.atan(annul_x[i]/annul_y[i]))\n elif annul_y[i] < 0:\n annul_ang.append(math.atan(annul_x[i]/annul_y[i]) + np.pi)\n\n annul_ang = np.array(annul_ang)\n\n # Sorts data to give as a function of increasing angle\n sortI = np.argsort(annul_ang)\n annul_ang = annul_ang[sortI]\n annul_mag = annul_mag[sortI]\n annul_x = annul_x[sortI]\n annul_y = annul_y[sortI]\n annul_I = annul_I[sortI]\n annul_err = annul_err[sortI]\n\n # Data binning\n nbsa = 100\n deltheta = 2*np.pi/nbsa\n binsa = np.linspace(-np.pi/2, 3*np.pi/2, nbsa)\n\n binindexa = np.zeros([nbsa]) # number points summed per bin\n bintotala = np.zeros([nbsa]) # summed intensity\n errtotala = np.zeros([nbsa]) # summed error\n\n for i in range(nbsa - 1):\n for ii in range(len(annul_mag)):\n if annul_ang[ii] >= binsa[i]:\n if annul_ang[ii] <= binsa[i + 1]:\n binindexa[i] = binindexa[i] + 1\n bintotala[i] = bintotala[i] + annul_I[ii]\n errtotala[i] = errtotala[i] + annul_err[ii]\n# print(errtotal[i])\n\n binZeros = binindexa != 0\n\n binsa = binsa[binZeros]\n binindexa = binindexa[binZeros]\n bintotala = bintotala[binZeros]\n errtotala = errtotala[binZeros]\n# print(errtotal)\n\n binavea = bintotala/binindexa\n erravea = errtotala/binindexa\n# print(errave)\n\n # if save == '1':\n # fileType = '.dat'\n # fileName = describer + '_' + \\\n # str(dataSet.sample[0]) + '_' + str(dataSet.shear[0][0:-14]) + 'ps'\n # location = '../2D_annular_sector_extraction/py_annular/'\n # fullName = location + fileName + fileType\n # with open(fullName, 'wt') as fh:\n # fh.write(\"q I(q) err_I\\n\")\n # for x, y, z in zip(binsa, binavea, binavea):\n # fh.write(\"%g %g %g\\n\" % (x, y, z))\n\n return binsa, binavea, erravea\n","sub_path":"annular_sector_extraction.py","file_name":"annular_sector_extraction.py","file_ext":"py","file_size_in_byte":6884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"279768962","text":"from typing import List\r\n\r\n\r\nclass Solution:\r\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\r\n i = 0\r\n j = len(numbers) - 1\r\n find_num = target - numbers[0]\r\n while i < j:\r\n while numbers[i] + numbers[j] > target:\r\n j -= 1\r\n if numbers[i] + numbers[j] == target:\r\n return [i + 1, j + 1]\r\n i += 1\r\n\r\n\r\ns = Solution()\r\nprint(s.twoSum([2, 7, 11, 15], 9)) # 2\r\n","sub_path":"Python/167.twoSum.py","file_name":"167.twoSum.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"140350675","text":"from vars.modules.functions import listencommand\nprint('Welcome!\\nType \"new game\" to start a new game.\\nType \"load game\" to load saved game\\n')\ncmmnd = input()\nfrom vars.modules.mapclasses import mainmap\nfrom vars.modules.objclasses import mainhero\nimport shelve\nif cmmnd == 'initiate':\n currentmap = mainmap(1)\n hero = mainhero()\n database = shelve.open('vars/dbase/defaultdb')\n database['cmap'] = currentmap\n database['hero'] = hero\n database.close()\nelif cmmnd == 'new game':\n database = shelve.open('vars/dbase/defaultdb')\n currentmap = database['cmap']\n hero = database['hero']\n database.close()\n listencommand(currentmap,hero)\nelif cmmnd == 'load game':\n F=open('vars/dbase/savestatus.txt')\n for line in F:\n if line.rstrip() == '1':\n database = shelve.open('vars/dbase/actualdb')\n currentmap = database['cmap']\n hero = database['hero']\n database.close()\n listencommand(currentmap,hero)\n else:\n print('No saves!')\nelif cmmnd == 'delete saves':\n F=open('vars/dbase/savestatus.txt','w')\n print('0',file=F)\nelif cmmnd == 'exit':\n pass\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"318994664","text":"import serial\nimport pdfcrowd\nimport sys\nfrom zplgrf import GRF\nfrom PIL import Image\nfrom io import BytesIO\nfrom app.models.baseModel import BaseModel\nfrom app.models.courriers.constants import set_html\n\n\nclass Package(BaseModel):\n def __init__(self, origin_zipcode: str, destiny_zipcode: str, weight: float, height: float, width: float,\n length: float, _id: str = None):\n super().__init__(_id)\n self.origin_zipcode = origin_zipcode\n self.destiny_zipcode = destiny_zipcode\n self.weight = weight\n self.height = height\n self.width = width\n self.length = length\n\n def get_volumetric_weight(self):\n return (self.height * self.length * self.width) / 5000\n\n def calculate_weight(self):\n vol_weight = self.get_volumetric_weight()\n if vol_weight > self.weight:\n self.weight = vol_weight\n\n @staticmethod\n def get_weight():\n ser = serial.Serial('/dev/ttyUSB0')\n s = ser.read(100)\n return float(s.strip()[:-2])\n\n\n\n @staticmethod\n def print(weight, public_price, price_pakke):\n try:\n\n client = pdfcrowd.HtmlToImageClient(\n 'JosepRoo', 'd508735c4b86f87fd4a3961d5195126b')\n\n # configure the conversion\n client.setOutputFormat('png')\n\n # run the conversion and store the result into an image variable\n image = client.convertString(set_html(weight, public_price, price_pakke))\n temp_buff = BytesIO()\n temp_buff.write(image)\n # need to jump back to the beginning before handing it off to PIL\n temp_buff.seek(0)\n image = Image.open(temp_buff)\n # image = image.resize([720, 1080]) next resolution\n image = image.resize([830, 1133])\n temp_buff.seek(0)\n image.save(temp_buff, format='PNG')\n grf = GRF.from_image(temp_buff.getvalue(), 'ZPL')\n grf.optimise_barcodes()\n import socket\n mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host = \"169.254.239.30\"\n port = 6101\n print(grf.to_zpl(compression=3, quantity=1))\n try:\n mysocket.connect((host, port)) # connecting to host\n mysocket.send(bytes(grf.to_zpl(compression=3, quantity=1), \"utf-8\")) # using bytes\n mysocket.close() # closing connection\n except:\n print(\"Error with the connection\")\n # Some random options\n\n except pdfcrowd.Error as why:\n # report the error\n sys.stderr.write('Pdfcrowd Error: {}\\n'.format(why))\n\n # handle the exception here or rethrow and handle it at a higher level\n raise\n","sub_path":"app/models/packages/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"594463314","text":"#!/usr/bin/env python\n\nfrom math import floor\nfrom sys import argv\n\ndef floatbase(s, b):\n\tsawsign = False\n\tsawdp = False\n\tsign = +1\n\tman = 0\n\texp = 0\n\tfor ch in s:\n\t\tif ch == '+' and not sawsign:\n\t\t\tsawsign = True\n\t\t\tsign = +1\n\t\telif ch == '-' and not sawsign:\n\t\t\tsawsign = True\n\t\t\tsign = -1\n\t\telif ch == '.' and not sawdp:\n\t\t\tsawsign = True\n\t\t\tsawdp = True\n\t\telse:\n\t\t\tsawsign = True\n\t\t\tdigit = int(ch, b)\n\t\t\tman = man * b + digit\n\t\t\tif sawdp: exp += 1\n\treturn float(sign * man) / (float(b) ** float(exp))\n\ndef strbase(n, b):\n\tif n < 0:\n\t\treturn \"-\" + strbase(-n, b)\n\telse:\n\t\tdef strdigit(d):\n\t\t\treturn chr(48 + d) if d < 10 else chr(55 + d)\n\t\tip = floor(n)\n\t\tfp = n - ip\n\t\tips = \"\"\n\t\tfps = \"\"\n\t\twhile ip > 0:\n\t\t\t(ip, digit) = divmod(ip, b)\n\t\t\tips = strdigit(int(digit)) + ips\n\t\twhile fp > 0 and len(fps) < 100:\n\t\t\tfp *= b\n\t\t\tdigit = floor(fp)\n\t\t\tfp -= digit\n\t\t\tfps = fps + strdigit(int(digit))\n\t\tif ips == \"\": ips = \"0\"\n\t\treturn ips if fps == \"\" else ips + \".\" + fps\n\ndef main():\n\tif len(argv) <= 3:\n\t\tprint(\"usage: bcr [ [...]]\")\n\t\treturn\n\ttry:\n\t\tsb = int(argv[1])\n\t\tif sb < 2 or sb > 36:\n\t\t\tprint(\"input radix out of range: \" + str(sb))\n\t\t\treturn\n\texcept:\n\t\tprint(\"input radix not an integer: \" + argv[1])\n\t\treturn\n\ttry:\n\t\tdb = int(argv[2])\n\t\tif db < 2 or db > 36:\n\t\t\tprint(\"output radix out of range: \" + str(db))\n\t\t\treturn\n\texcept:\n\t\tprint(\"output radix not an integer: \" + argv[2])\n\t\treturn\n\tfor i in range(3, len(argv)):\n\t\ttry:\n\t\t\tn = floatbase(argv[i], sb)\n\t\t\tdn = strbase(n, db)\n\t\t\tprint(dn)\n\t\texcept:\n\t\t\tprint(\"invalid value: \" + argv[i])\n\nif __name__ == \"__main__\": main()\n","sub_path":"bcr.py","file_name":"bcr.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"207068874","text":"import os\nimport unittest\n\nimport numpy as np\nfrom models.preprocess import preprocessCsv\n\n\nclass TestPreprocessCSV(unittest.TestCase):\n def setUp(self):\n self.path = os.path.join(\n os.path.dirname(__file__), \"..\", \"data\", \"HR-Employee.csv\"\n )\n\n def test_data_is_numpy_array(self):\n (X, y) = preprocessCsv(self.path)\n self.assertIsInstance(X, np.ndarray)\n\n def test_zero_var_removed(self):\n (X, y) = preprocessCsv(self.path)\n mask = X.var() == 0\n self.assertTrue(not mask.all())\n\n def test_correct_shape(self):\n (X, y) = preprocessCsv(self.path)\n self.assertEqual(X.shape, (1470, 45))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_preprocess.py","file_name":"test_preprocess.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"167781881","text":"def maximum_wealth(accounts: list[list[int]]) -> int:\n \"\"\"\n >>> maximum_wealth([[1, 2, 3], [3, 2, 1]])\n 6\n \"\"\"\n output = 0\n for i in accounts:\n if sum(i) > output:\n output = sum(i)\n return output\n\n\ndef faster_way(accounts: list[list[int]]) -> int:\n \"\"\"\n >>> faster_way([[1, 2, 3], [3, 2, 1]])\n 6\n \"\"\"\n return max(sum(i) for i in accounts)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(verbose=True)\n","sub_path":"Leet Code Problems/Easy Problems/maximum_wealth.py","file_name":"maximum_wealth.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"642929473","text":"#!/usr/bin/env python3\n\n\"\"\"\n ising.py - Ising model on a square lattice in D=2, v. 1.0\n\n Usage: ./ising.py [-L ] [-b ] [-t ] [-m ] [-s ] [-p ]\n\n Arguments:\n\n Options:\n -h --help Display this help and exit\n -L Lattice size [default: 32]\n -b Inverse temperature; if negative default to critical value (see code) [default: -1.0]\n -m Measure sweeps [default: 1000]\n -s Start; c for 'cold', h for 'hot' [default: c]\n -t Therma [default: 0]\n -p If NPLOT > 0 will plot in the end a point each NPLOT measures [default: 1]\n\"\"\"\n\nimport math\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport docopt\nimport progressbar as pbar\n\n#----------------------------------------------------------\n# parse options\n#----------------------------------------------------------\n\nBETAC = math.log(1.0+math.sqrt(2.0))/2.0 # infinite volume critical value\n\nargv = docopt.docopt(__doc__,version=\"1.0\")\n\nL = int(argv[\"-L\"])\nBETA = float(argv[\"-b\"])\nNTHERMA = int(argv[\"-t\"])\nNMC = int(argv[\"-m\"])\nSTART = argv[\"-s\"]\nNPLOT = int(argv[\"-p\"])\nPLOTIT = True if NPLOT > 0 else False\nif BETA < 0.0:\n BETA = BETAC\n\nV = L**2 # lattice volume\nm2beta = -2.0*BETA\n\n#----------------------------------------------------------\n# define and init field; cold start by default\n#----------------------------------------------------------\n\nspin = np.ones(V, dtype=int)\nif START == 'h':\n for ix in range(V):\n if random.random() < 0.5:\n spin[ix] = -1\n\n#----------------------------------------------------------\n# define and init geometry; PB Conditions\n#----------------------------------------------------------\n\nneigh = np.zeros((V,4), dtype=int)\nfor iy in range(L):\n for ix in range(L):\n iz = ix + iy * L\n neigh[iz,0] = iz + 1 if ix < L-1 else iz - (L-1)\n neigh[iz,1] = iz + L if iy < L-1 else iz - (L-1)*L\n neigh[iz,2] = iz - 1 if ix > 0 else iz + (L-1)\n neigh[iz,3] = iz - L if iy > 0 else iz + (L-1)*L\n\n#----------------------------------------------------------\n# a single sweep through the lattice\n#----------------------------------------------------------\n\ndef sweep():\n for ix in range(V):\n sigma = spin[neigh[ix,0]] + spin[neigh[ix,1]] + spin[neigh[ix,2]] + spin[neigh[ix,3]]\n minusBetaDeltaH = m2beta*spin[ix]*sigma\n if math.log(1.0-random.random()) < minusBetaDeltaH:\n spin[ix] = -spin[ix]\n\n#----------------------------------------------------------\n# measure primary quantities\n#----------------------------------------------------------\n\ndef measure():\n e = m = 0.0\n for ix in range(V):\n s = spin[ix]\n e -= s*(spin[neigh[ix,0]]+spin[neigh[ix,1]])\n m += s\n e /= V\n m /= V\n return e, m\n\n#----------------------------------------------------------\n# MAIN\n#----------------------------------------------------------\n\nbar = pbar.ProgressBar()\n\n#----------------------------------------------------------\n# therma + measure taking\n#----------------------------------------------------------\nnamefile=\"Mag_\"+str(L)+\"_\"+str(BETA)+\".dat\"\noutfile = open(namefile,\"w\")\n\neTot = e2Tot = mTot = m2Tot = aTot = a2Tot = 0.0\nip = []; ep = []; mp = []; ap = []\nj = 0\nfor i in bar(range(NTHERMA+NMC)):\n j += 1\n sweep()\n if j > NTHERMA:\n e, m = measure()\n a = math.fabs(m)\n ip.append(j-NTHERMA); ep.append(e); mp.append(m); ap.append(a)\n outfile.write(str(a))\n outfile.write('\\n')\n eTot += e\n mTot += m\n aTot += a\n e2Tot += e**2\n m2Tot += m**2\n a2Tot += a**2\n if j % NPLOT == 0:\n ip.append(j-NTHERMA); ep.append(e); mp.append(m); ap.append(a)\noutfile.close()\n\ninfile=open(namefile,'r')\nVVV=np.loadtxt(namefile)\nprint(len(VVV))\n\nif PLOTIT:\n plt.figure(figsize=(16,9))\n plt.title(r'Ising 2$D$')\n plt.xlabel(r'$i$')\n plt.ylabel(r'$e$, $m$, $|m|$')\n plt.plot(ip, ep, 'b-')\n plt.plot(ip, mp, 'r-')\n plt.plot(ip, ap, 'k-')\n plt.grid()\n plt.show()\n\neTot /= NMC; mTot /= NMC; aTot /= NMC\ne2Tot /= NMC; m2Tot /= NMC; a2Tot /= NMC\n\neErr = math.sqrt((e2Tot-eTot**2)/(NMC-1))\nmErr = math.sqrt((m2Tot-mTot**2)/(NMC-1))\naErr = math.sqrt((a2Tot-aTot**2)/(NMC-1))\n\nprint(\"\"\"\n\n < Energy > = %7.4f +/- %7.4f\n < Mag > = %7.4f +/- %7.4f\n < |Mag| > = %7.4f +/- %7.4f\n\n\"\"\" % (eTot, eErr, mTot, mErr, aTot, aErr))\n","sub_path":"lezioni/ising_lezione.py","file_name":"ising_lezione.py","file_ext":"py","file_size_in_byte":4485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"617453555","text":"\n#(1)\nimport random\nimport time\nrandom.seed(time.time())\na = list()\nfor _ in range(1000):\n a.append(random.randint(1,1000))\nstart_time = time.time()\na.sort(reverse = True)\nprint(time.time()-start_time, \"seconds\")\n\n#(2) linear search\ndef linear_search(target, dataset):\n for i in range(len(dataset)):\n if dataset[i] == target:\n return i\n return \"NOT found\"\n\ndataset = [15,20,30,50,3,40]\nresult = linear_search(40, dataset)\n\n#(3) 구구단 프로그램\nfor i in range(1,10):\n for j in range(1,10):\n print(i ,\"x\", j , \"=\", i*j, end = ' ')\n print()\n\n# student class\nclass Student :\n univ = \"SKKU\" # 클래스 변수 : class의 모든 객체가 공유하는 속성을 위한 변수\n def __init__(self, name, id): # 생성자\n self.name = name # 멤버변수 : 각각의 객체의 속성을 위한 변수\n self.id = id # 멤버 변수\n def get_name(self): # 멤버 함수(메소드)\n return self.name\n def get_id(self): # 멤버 함수(메소드)\n return self.id\n\n#(4) list 이용\nassistant = []\nnum_ass = int(input(\"input the number of assistant : \"))\nfor i in range(num_ass):\n new_name = input(\"please enter a name : \")\n new_id = input(\"please enter a id : \")\n assistant.append(Student(new_name, new_id))\nfor j in range(num_ass):\n print(j, assistant[j].get_name(), assistant[j].get_id())\n\n#(5) stack 이용\nclass Stack:\n def __init__(self):\n self.items=[]\n def push(self, item):\n self.items.append(item)\n def peek(self):\n return self.items[len(self.items)-1]\n def pop(self):\n self.items.pop()\n\nassistant = Stack()\nnum_ass = int(input(\"input the number of assistant : \"))\nfor i in range(num_ass):\n new_name = input(\"please enter a name : \")\n new_id = input(\"please enter a id : \")\n assistant.push(Student(new_name,new_id))\nfor j in range(num_ass):\n print(j, assistant.peek().name, assistant.peek().id)\n assistant.pop()\n\n#(6) factorial\ndef factorial(n): # O(n)\n if n==1 :\n return 1\n else:\n return n * factorial(n-1)\n\n#(7) fibo\ndef fibo(n): #O(2^n)\n if n<=2:\n return 1\n else:\n return fibo(n-2) + fibo(n-1)\n\n#(8) binary search\ndef binary_search(dataset, left, right, target):\n if left > right :\n return \"Not found\"\n else :\n mid = (left+right)//2\n if dataset[mid] == target :\n return mid\n elif dataset[mid] > target :\n binary_search(dataset, left, mid-1, target)\n else:\n binary_search(dataset, mid+1, right, target)\n","sub_path":"Data_Structure/Python/chap1.py","file_name":"chap1.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"403841954","text":"#!venv/bin/python3\n\nfrom config import Config\nfrom signal import signal, SIGINT, SIGTERM\nfrom logging import Logger, getLogger, FileHandler, StreamHandler, Formatter, DEBUG\n\nfrom midi.controller import Controller as MIDIController\nfrom device.controller import Controller as DeviceController\n\n\ndef setup_logger() -> Logger:\n _logger = getLogger('inux')\n _logger.setLevel(DEBUG)\n formatter = Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')\n file_handler = FileHandler(filename='logs/inux.log', encoding='utf-8', mode='w')\n file_handler.setFormatter(formatter)\n stream_handler = StreamHandler()\n stream_handler.setFormatter(formatter)\n _logger.addHandler(file_handler)\n _logger.addHandler(stream_handler)\n return _logger\n\n\ndef dismantle_logger(logger: Logger):\n handlers = logger.handlers.copy()\n for handler in handlers:\n handler.close()\n logger.removeHandler(handler)\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('mode', choices=['device', 'midi'], help='Whether to run in Midi or Device Mode')\n parser.add_argument('-c', '--config', help='Filepath to config file', type=str, default='~/.config/Inux/config.ini')\n args = parser.parse_args()\n\n logger = setup_logger()\n\n config = Config(args.config, logger)\n\n if args.mode == 'device':\n device_controller = DeviceController(config, logger)\n try:\n print('Booting up..')\n device_controller.run()\n except KeyboardInterrupt:\n device_controller.stop()\n except Exception as e:\n logger.exception(e)\n device_controller.stop()\n finally:\n print('Shutting down..')\n dismantle_logger(logger)\n\n elif args.mode == 'midi':\n print('Currently not supported')\n # midi_controller = MIDIController(config, logger)\n # midi_controller.run()\n # midi_controller.stop()\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"525858156","text":"\"\"\"Functions for manipulating the bookmarks db.\"\"\"\n\nimport psycopg2\nimport psycopg2.extensions\n\nconnection = None\n\ndef __connect():\n \"\"\"Connect to the db.\"\"\"\n global connection\n\n if not connection:\n connection = psycopg2.connect(\"\")\n elif connection.get_transaction_status() == \\\n psycopg2.extensions.TRANSACTION_STATUS_UNKNOWN:\n connection = psycopg2.connect(\"\")\n return connection\n\n# This is for testing purposes to allow tests to override the connection\n# factory.\nconnect = __connect\n\ndef callproc_one(name, *args):\n \"\"\"Calls a stored procedure returning a single value.\"\"\"\n\n conn = connect()\n cur = conn.cursor()\n cur.callproc(name, args)\n row = cur.fetchone()\n if not row:\n return None\n result = row[0]\n conn.commit()\n cur.close()\n\n return result\n\ndef create_link(url):\n \"\"\"Creates a new link in the db, returns the link id.\n\n >>> create_link(\"http://www.example.org/\")\n 1\n \"\"\"\n return callproc_one(\"bookmarks.create_link\", url)\n\n\ndef get_newest_link_time():\n \"\"\"Returns the time the most recent link in the database was created.\n\n >>> get_newest_link_time()\n datetime.datetime(2012, 11, 10, 9, 8, 7)\n \"\"\"\n dt = callproc_one(\"bookmarks.get_newest_link_time\")\n if not dt:\n return None\n\n # HTTP works exclusively with UTC\n if dt.utcoffset():\n dt = dt.replace(tzinfo=None) - dt.utcoffset()\n\n return dt\n\ndef get_links():\n \"\"\"Returns the list of stored links.\n\n >>> list(get_links())\n []\n >>> create_link(\"http://www.example.org/\")\n 1\n >>> list(get_links())\n [[1, 'http://www.example.org/']]\n >>> create_link(\"http://www.example.org/page\")\n 2\n >>> list(get_links())\n [[1, 'http://www.example.org/'], [2, 'http://www.example.org/page']]\n \"\"\"\n conn = connect()\n cur = conn.cursor()\n cur.callproc(\"bookmarks.get_links\")\n row = cur.fetchone()\n while row:\n yield row\n row = cur.fetchone()\n\ndef get_link_list():\n \"\"\"Returns a list of links with data relevant for the list page.\"\"\"\n conn = connect()\n cur = conn.cursor()\n cur.callproc(\"bookmarks.get_link_list\")\n row = cur.fetchone()\n while row:\n yield row\n row = cur.fetchone()\n\ndef get_links_missing_metadata():\n \"\"\"Returns a list of links that are missing at least some metadata.\"\"\"\n conn = connect()\n cur = conn.cursor()\n cur.callproc(\"bookmarks.get_links_missing_metadata\")\n row = cur.fetchone()\n while row:\n yield row\n row = cur.fetchone()\n\ndef get_link_url(link_id):\n \"\"\"Returns the url of a given link id.\n\n >>> link_id = create_link(\"http://www.example.org/\")\n >>> get_link_url(link_id)\n 'http://www.example.org/'\n \"\"\"\n return callproc_one(\"bookmarks.get_link_url\", link_id)\n\ndef add_screenshot(link_id, path):\n \"\"\"Adds a screenshot to the given link, returning the screenshot id.\n\n >>> link_id = create_link(\"http://www.example.org/\")\n >>> add_screenshot(link_id, \"/image.png\")\n 1\n \"\"\"\n return callproc_one(\"bookmarks.add_screenshot\", link_id, path)\n\ndef get_screenshots(link_id):\n \"\"\"Gets the screenshots associated with the given link.\n\n >>> link_id = create_link(\"http://www.example.org/\")\n >>> add_screenshot(link_id, \"/image.png\")\n 1\n >>> list(get_screenshots(link_id))\n [('/image.png', datetime.datetime(2012, 1, 2, 3, 4, 5))]\n \"\"\"\n conn = connect()\n cur = conn.cursor()\n cur.callproc(\"bookmarks.get_screenshots\", (link_id, ))\n row = cur.fetchone()\n while row:\n yield row\n row = cur.fetchone()\n\ndef add_thumbnail(link_id, path):\n \"\"\"Adds a thumbnail to the given link, returning the thumbnail id.\n\n >>> link_id = create_link(\"http://www.example.org/\")\n >>> add_thumbnail(link_id, \"/image.png\")\n 1\n \"\"\"\n return callproc_one(\"bookmarks.add_thumbnail\", link_id, path)\n\ndef get_thumbnail(link_id):\n \"\"\"Fetches the path of the most recent thumbnail for this path.\n\n >>> link_id = create_link(\"http://www.example.org/\")\n >>> add_thumbnail(link_id, \"/image.png\")\n 1\n >>> get_thumbnail(link_id)\n '/image.png'\n \"\"\"\n return callproc_one(\"bookmarks.get_thumbnail\", link_id)\n\ndef add_text_metadata(link_id, title, text, keywords):\n \"\"\"Adds text metadata to a link.\n\n >>> link_id = create_link(\"http://www.example.org/\")\n >>> add_text_metadata(link_id, \"Hello World!\", \"Hello to the world\", \"hello,world\")\n 1\n \"\"\"\n return callproc_one(\"bookmarks.add_text_metadata\", link_id, title, text, keywords)\n","sub_path":"bookmarks/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649364528","text":"# expt.command: step01_input\n\n# django\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\n\n# local\nfrom apps.expt.models import Experiment\nfrom apps.expt.data import *\nfrom apps.expt.util import *\n\n# util\nimport os\nfrom os.path import join, exists, splitext\nfrom optparse import make_option\nfrom subprocess import call\n\nspacer = ' ' *\t20\n\n### Command\nclass Command(BaseCommand):\n\toption_list = BaseCommand.option_list + (\n\n\t\tmake_option('--expt', # option that will appear in cmd\n\t\t\taction='store', # no idea\n\t\t\tdest='expt', # refer to this in options variable\n\t\t\tdefault='', # some default\n\t\t\thelp='Name of the experiment to import' # who cares\n\t\t),\n\n\t\tmake_option('--series', # option that will appear in cmd\n\t\t\taction='store', # no idea\n\t\t\tdest='series', # refer to this in options variable\n\t\t\tdefault='', # some default\n\t\t\thelp='Name of the series' # who cares\n\t\t),\n\n\t)\n\n\targs = ''\n\thelp = ''\n\n\tdef handle(self, *args, **options):\n\n\t\t# vars\n\t\texperiment_name = options['expt']\n\t\tseries_name = options['series']\n\n\t\tif experiment_name!='' and series_name!='':\n\t\t\texperiment = Experiment.objects.get(name=experiment_name)\n\t\t\tseries = experiment.series.get(name=series_name)\n\n\t\t\t# 1. Convert track files to csv\n\t\t\tdef convert_track_file(path, name_with_index):\n\t\t\t\t# names\n\t\t\t\tindex_template = r'(?P.+)_n[0-9]+'\n\t\t\t\talt = r'(?P.+)'\n\t\t\t\tname_match = re.match(index_template, name_with_index) if re.match(index_template, name_with_index) is not None else re.match(alt, name_with_index)\n\t\t\t\tname = name_match.group('name')\n\t\t\t\tcsv_file_name = '{}_{}_markers.csv'.format(join(path, name), random_string())\n\t\t\t\txls_file_name = '{}.xls'.format(join(path, name_with_index))\n\n\t\t\t\ttracks = {} # stores list of tracks that can then be put into the database\n\n\t\t\t\twith open(xls_file_name, 'rb') as track_file:\n\n\t\t\t\t\tlines = track_file.read().decode('mac-roman').split('\\n')[1:-1]\n\t\t\t\t\tfor i, line in enumerate(lines): # omit title line and final blank line\n\t\t\t\t\t\tline = line.split('\\t')\n\n\t\t\t\t\t\t# details\n\t\t\t\t\t\ttrack_id = int(float(line[1]))\n\t\t\t\t\t\tr = int(float(line[4]))\n\t\t\t\t\t\tc = int(float(line[3]))\n\t\t\t\t\t\tt = int(float(line[2])) - 1\n\n\t\t\t\t\t\tif track_id in tracks:\n\t\t\t\t\t\t\ttracks[track_id].append((r,c,t))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttracks[track_id] = [(r,c,t)]\n\n\t\t\t\twith open(csv_file_name, 'w+') as out_file:\n\t\t\t\t\tout_file.write('expt,series,channel,id,t,r,c\\n')\n\t\t\t\t\tfor track_id, track in tracks.items():\n\t\t\t\t\t\tfor frame in list(sorted(track, key=lambda t: t[2])):\n\t\t\t\t\t\t\tout_file.write('{},{},{},{},{},{},{}\\n'.format(experiment_name,series_name,'-zcomp',track_id,frame[2],frame[0],frame[1]))\n\n\t\t\t# for each track file in the track directory, if there is not a .csv file with the same name, then translate it into the new format\n\t\t\tfor file_name in [f for f in os.listdir(experiment.track_path) if ('.xls' in f and 'region' not in f and series_name in f)]:\n\t\t\t\tname_with_index, ext = splitext(file_name)\n\t\t\t\tconvert_track_file(experiment.track_path, name_with_index)\n\n\t\t\t# 2. Import tracks\n\t\t\t# select composite\n\t\t\tcomposite = series.composites.get()\n\n\t\t\t# add all track files to composite\n\t\t\tdata_file_list = [f for f in os.listdir(composite.experiment.track_path) if (os.path.splitext(f)[1] in allowed_data_extensions and composite.experiment.path_matches_series(f, composite.series.name) and 'regions' not in f)]\n\n\t\t\tfor df_name in data_file_list:\n\t\t\t\tprint('step02 | data file {}... '.format(df_name), end='\\r')\n\t\t\t\tdata_file, data_file_created, status = composite.get_or_create_data_file(composite.experiment.track_path, df_name)\n\t\t\t\tprint('step02 | data file {}... {}'.format(df_name, status))\n\n\t\t\t### MARKERS\n\t\t\tfor data_file in composite.data_files.filter(data_type='markers'):\n\t\t\t\tdata = data_file.load()\n\t\t\t\tfor i, marker_prototype in enumerate(data):\n\t\t\t\t\ttrack, track_created = composite.tracks.get_or_create(experiment=composite.experiment,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tseries=composite.series,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcomposite=composite,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchannel=composite.channels.get(name=marker_prototype['channel']),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttrack_id=marker_prototype['id'])\n\n\t\t\t\t\ttrack_instance, track_instance_created = track.instances.get_or_create(experiment=composite.experiment,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t series=composite.series,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t composite=composite,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t channel=composite.channels.get(name=marker_prototype['channel']),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t t=int(marker_prototype['t']))\n\n\t\t\t\t\tmarker, marker_created = track_instance.markers.get_or_create(experiment=composite.experiment,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tseries=composite.series,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcomposite=composite,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchannel=composite.channels.get(name=marker_prototype['channel']),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttrack=track,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tr=int(marker_prototype['r']),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tc=int(marker_prototype['c']))\n\n\t\t\t\t\tprint('step02 | processing marker ({}/{})... {} tracks, {} instances, {} markers'.format(i+1,len(data),composite.tracks.count(), composite.track_instances.count(), composite.markers.count()), end='\\n' if i==len(data)-1 else '\\r')\n\n\t\t\t# 3. Generate zDiff channel\n\t\t\tzdiff_mod = composite.mods.create(id_token=generate_id_token('img', 'Mod'), algorithm='mod_zdiff')\n\n\t\t\t# Run mod\n\t\t\tprint('step02 | processing mod_zdiff...', end='\\r')\n\t\t\tzdiff_mod.run()\n\t\t\tprint('step02 | processing mod_zdiff... done.{}'.format(spacer))\n\n\t\t\t# 4. Segment zdiff channel\n\t\t\tzdiff_channel = composite.channels.get(name='-zdiff')\n\t\t\tzdiff_unique = zdiff_channel.segment(marker_channel_name='-zcomp')\n\n\t\t\t# 5. Generate zEdge channel\n\t\t\tzedge_mod = composite.mods.create(id_token=generate_id_token('img', 'Mod'), algorithm='mod_zedge')\n\n\t\t\t# Run mod\n\t\t\tprint('step02 | processing mod_zedge...', end='\\r')\n\t\t\tzedge_mod.run(channel_unique_override=zdiff_unique)\n\t\t\tprint('step02 | processing mod_zedge... done.{}'.format(spacer))\n\n\t\t\t# 6. Segment zEdge channel\n\t\t\tzedge_channel = composite.channels.get(name='-zbf')\n\t\t\tzedge_unique = zedge_channel.segment(marker_channel_name='-zcomp', threshold_correction_factor=1.2)\n\n\t\t\t# 7. Export data to data directory\n\t\t\tseries.export_data()\n\n\t\t\t# 8. Tile mod\n\t\t\tcomposite.current_zedge_unique = zedge_unique\n\t\t\tcomposite.save()\n\t\t\ttile_mod = composite.mods.create(id_token=generate_id_token('img', 'Mod'), algorithm='mod_tile')\n\n\t\t\t# Run mod\n\t\t\tprint('step02 | processing mod_tile...', end='\\r')\n\t\t\ttile_mod.run(channel_unique_override=zedge_unique)\n\t\t\tprint('step02 | processing mod_tile... done.{}'.format(spacer))\n\n\t\telse:\n\t\t\tprint('Please enter an experiment')\n","sub_path":"woot/apps/expt/management/commands/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"578022445","text":"# client.py\nimport socket\n\n# create a socket object\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# get local machine name\nhost = \"192.168.0.5\"\n\n#ste the port\nport = 9999\n\n# connection to hostname on the port.\ns.connect((host, port))\n\n# Receive no more than 1024 bytes\ntm = s.recv(1024)\n\ncat = input(\"type a message > \")\n# send something\ns.send(cat.encode('ascii'))\n\ns.close()\n\nprint(\"The time got from the server is %s\" % tm.decode('ascii'))\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"409788578","text":"from __future__ import annotations\n\nimport copy\nimport logging\nfrom typing import Any\n\nimport fcutils\n\nfrom pygama import lgdo\nfrom pygama.raw.data_decoder import DataDecoder\n\nlog = logging.getLogger(__name__)\n\n# put decoded values here where they can be used also by the orca decoder\nfc_decoded_values = {\n # packet index in file\n \"packet_id\": {\n \"dtype\": \"uint32\",\n },\n # index of event\n \"eventnumber\": {\n \"dtype\": \"int32\",\n },\n # time since epoch\n \"timestamp\": {\n \"dtype\": \"float64\",\n \"units\": \"s\",\n },\n # time since beginning of file\n \"runtime\": {\n \"dtype\": \"float64\",\n \"units\": \"s\",\n },\n # number of triggered adc channels\n \"numtraces\": {\n \"dtype\": \"int32\",\n },\n # list of triggered adc channels\n \"tracelist\": {\n \"dtype\": \"int16\",\n \"datatype\": \"array<1>{array<1>{real}}\", # vector of vectors\n \"length_guess\": 16,\n },\n # fpga baseline\n \"baseline\": {\n \"dtype\": \"uint16\",\n },\n # fpga energy\n \"daqenergy\": {\n \"dtype\": \"uint16\",\n },\n # right now, index of the trigger (trace)\n \"channel\": {\n \"dtype\": \"uint32\",\n },\n # PPS timestamp in sec\n \"ts_pps\": {\n \"dtype\": \"int32\",\n },\n # clock ticks\n \"ts_ticks\": {\n \"dtype\": \"int32\",\n },\n # max clock ticks\n \"ts_maxticks\": {\n \"dtype\": \"int32\",\n },\n # the offset in sec between the master and unix\n \"to_mu_sec\": {\n \"dtype\": \"int64\",\n },\n # the offset in usec between master and unix\n \"to_mu_usec\": {\n \"dtype\": \"int32\",\n },\n # the calculated sec which must be added to the master\n \"to_master_sec\": {\n \"dtype\": \"int64\",\n },\n # the delta time between master and unix in usec\n \"to_dt_mu_usec\": {\n \"dtype\": \"int32\",\n },\n # the abs(time) between master and unix in usec\n \"to_abs_mu_usec\": {\n \"dtype\": \"int32\",\n },\n # startsec\n \"to_start_sec\": {\n \"dtype\": \"int64\",\n },\n # startusec\n \"to_start_usec\": {\n \"dtype\": \"int32\",\n },\n # start pps of the next dead window\n \"dr_start_pps\": {\n \"dtype\": \"int32\",\n },\n # start ticks of the next dead window\n \"dr_start_ticks\": {\n \"dtype\": \"int32\",\n },\n # stop pps of the next dead window\n \"dr_stop_pps\": {\n \"dtype\": \"int32\",\n },\n # stop ticks of the next dead window\n \"dr_stop_ticks\": {\n \"dtype\": \"int32\",\n },\n # maxticks of the dead window\n \"dr_maxticks\": {\n \"dtype\": \"int32\",\n },\n # current dead time calculated from deadregion (dr) fields.\n # Give the total dead time if summed up.\n \"deadtime\": {\n \"dtype\": \"float64\",\n },\n # waveform data\n \"waveform\": {\n \"dtype\": \"uint16\",\n \"datatype\": \"waveform\",\n \"wf_len\": 65532, # max value. override this before initializing buffers to save RAM\n \"dt\": 16, # override if a different clock rate is used\n \"dt_units\": \"ns\",\n \"t0_units\": \"ns\",\n },\n}\n\n\nclass FCEventDecoder(DataDecoder):\n \"\"\"\n Decode FlashCam digitizer event data.\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n # these are read for every event (decode_event)\n self.decoded_values = copy.deepcopy(fc_decoded_values)\n super().__init__(*args, **kwargs)\n self.skipped_channels = {}\n self.fc_config = None\n self.max_numtraces = 1\n\n def get_key_list(self) -> range:\n return range(self.fc_config[\"nadcs\"].value)\n\n def get_decoded_values(self, channel: int = None) -> dict[str, dict[str, Any]]:\n # FC uses the same values for all channels\n return self.decoded_values\n\n def set_file_config(self, fc_config: lgdo.Struct) -> None:\n \"\"\"Access ``FCIOConfig`` members once when each file is opened.\n\n Parameters\n ----------\n fc_config\n extracted via :meth:`~.fc_config_decoder.FCConfigDecoder.decode_config`.\n \"\"\"\n self.fc_config = fc_config\n self.decoded_values[\"waveform\"][\"wf_len\"] = self.fc_config[\"nsamples\"].value\n\n def decode_packet(\n self,\n fcio: fcutils.fcio,\n evt_rbkd: lgdo.Table | dict[int, lgdo.Table],\n packet_id: int,\n ) -> bool:\n \"\"\"Access ``FCIOEvent`` members for each event in the DAQ file.\n\n Parameters\n ----------\n fcio\n The interface to the ``fcio`` data. Enters this function after a\n call to ``fcio.get_record()`` so that data for `packet_id` ready to\n be read out.\n evt_rbkd\n A single table for reading out all data, or a dictionary of tables\n keyed by channel number.\n packet_id\n The index of the packet in the `fcio` stream. Incremented by\n :class:`~.raw.fc.fc_streamer.FCStreamer`.\n\n Returns\n -------\n n_bytes\n (estimated) number of bytes in the packet that was just decoded.\n \"\"\"\n if fcio.numtraces > self.max_numtraces:\n self.max_numtraces = fcio.numtraces\n # The buffer might be storing all channels' data, so set the\n # fill_safety to the max number of traces we've seen so far.\n for rb in evt_rbkd.values():\n rb.fill_safety = self.max_numtraces\n any_full = False\n\n # a list of channels is read out simultaneously for each event\n for iwf in fcio.tracelist:\n if iwf not in evt_rbkd:\n if iwf not in self.skipped_channels:\n # TODO: should this be a warning instead?\n log.debug(f\"skipping packets from channel {iwf}...\")\n self.skipped_channels[iwf] = 0\n self.skipped_channels[iwf] += 1\n continue\n tbl = evt_rbkd[iwf].lgdo\n if fcio.nsamples != tbl[\"waveform\"][\"values\"].nda.shape[1]:\n log.warning(\n \"event wf length was\",\n fcio.nsamples,\n \"when\",\n self.decoded_values[\"waveform\"][\"wf_len\"],\n \"were expected\",\n )\n ii = evt_rbkd[iwf].loc\n\n # fill the table\n tbl[\"channel\"].nda[ii] = iwf\n tbl[\"packet_id\"].nda[ii] = packet_id\n tbl[\"eventnumber\"].nda[\n ii\n ] = fcio.eventnumber # the eventnumber since the beginning of the file\n tbl[\"timestamp\"].nda[ii] = fcio.eventtime # the time since epoch in seconds\n tbl[\"runtime\"].nda[\n ii\n ] = fcio.runtime # the time since the beginning of the file in seconds\n tbl[\"numtraces\"].nda[ii] = fcio.numtraces # number of triggered adcs\n tbl[\"tracelist\"].set_vector(ii, fcio.tracelist) # list of triggered adcs\n tbl[\"baseline\"].nda[ii] = fcio.baseline[\n iwf\n ] # the fpga baseline values for each channel in LSB\n tbl[\"daqenergy\"].nda[ii] = fcio.daqenergy[\n iwf\n ] # the fpga energy values for each channel in LSB\n tbl[\"ts_pps\"].nda[ii] = fcio.timestamp_pps\n tbl[\"ts_ticks\"].nda[ii] = fcio.timestamp_ticks\n tbl[\"ts_maxticks\"].nda[ii] = fcio.timestamp_maxticks\n tbl[\"to_mu_sec\"].nda[ii] = fcio.timeoffset_mu_sec\n tbl[\"to_mu_usec\"].nda[ii] = fcio.timeoffset_mu_usec\n tbl[\"to_master_sec\"].nda[ii] = fcio.timeoffset_master_sec\n tbl[\"to_dt_mu_usec\"].nda[ii] = fcio.timeoffset_dt_mu_usec\n tbl[\"to_abs_mu_usec\"].nda[ii] = fcio.timeoffset_abs_mu_usec\n tbl[\"to_start_sec\"].nda[ii] = fcio.timeoffset_start_sec\n tbl[\"to_start_usec\"].nda[ii] = fcio.timeoffset_start_usec\n tbl[\"dr_start_pps\"].nda[ii] = fcio.deadregion_start_pps\n tbl[\"dr_start_ticks\"].nda[ii] = fcio.deadregion_start_ticks\n tbl[\"dr_stop_pps\"].nda[ii] = fcio.deadregion_stop_pps\n tbl[\"dr_stop_ticks\"].nda[ii] = fcio.deadregion_stop_ticks\n tbl[\"dr_maxticks\"].nda[ii] = fcio.deadregion_maxticks\n tbl[\"deadtime\"].nda[ii] = fcio.deadtime\n\n # if len(traces[iwf]) != fcio.nsamples: # number of sample per trace check\n tbl[\"waveform\"][\"values\"].nda[ii][:] = fcio.traces[iwf]\n\n evt_rbkd[iwf].loc += 1\n any_full |= evt_rbkd[iwf].is_full()\n\n return any_full\n","sub_path":"src/pygama/raw/fc/fc_event_decoder.py","file_name":"fc_event_decoder.py","file_ext":"py","file_size_in_byte":8447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"216416926","text":"import tweepy\n\n\"\"\" Tutorial 5 -- Cache\n\nTweepy provides a caching layer for frequently\nrequested data. This can help cut down on Twitter API\nrequests helping to make your application faster.\nBy default caching is disabled in API instances.\n\"\"\"\n\n\"\"\"\nLet's create a new API instance with caching enabled.\nTweepy comes with both a memory and file based cache.\nIn this example we will be using the memory cache.\n\"\"\"\ncached_api = tweepy.API(cache=tweepy.MemoryCache(timeout=120))\n\n\"\"\"\nNow we can use this API instance and any request that uses\n'GET' will be cached for 120 seconds. If no timeout is specified\nthe default is 60 seconds.\nHere is a demo using our new cached API instance...\n\"\"\"\nnon_cached_result = cached_api.public_timeline()\ncached_result = cached_api.public_timeline()\n\n\"\"\"\nThe first request (non_cached_result) will require a trip\nto the Twitter server. The second request (cached_result)\nwill be retrieved from the cache saving a trip to Twitter.\n\"\"\"\n\n\"\"\" Your own cache implementation\n\nIf you wish to use your own cache implementation just\nextend the Cache interface class (tweepy/cache.py).\nThen when you create your API instance pass it in.\n\"\"\"\n#my_api = tweepy.API(cache=MyCache())\n\n\"\"\" The End \"\"\"\n\n","sub_path":"tutorial/t5.py","file_name":"t5.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"439833514","text":"#영준이는 학생들의 시험을 위해 N개의 문제를 만들었다.\n#각 문제의 배점은 문제마다 다를 수 있고, 틀리면 0점 맞으면 배점만큼의 점수를 받게 된다.\n#학생들이 받을 수 있는 점수로 가능한 경우의 수는 몇 가지가 있을까?\n#예를 들어, 첫 번째 Testcase의 경우, 총 문제의 개수는 3개이며 각각의 배점은 2,3,5점이다.\n#가능한 시험점수의 경우의 수를 살펴보면 0,2,3,5,7,8,10의 7가지가 있다.\n#두번째 testcase의 경우, 총 문제의 개수는 10개이며 각각의 배점은 모두 1점이다.\n#가능한 시험점수의 경우의 수를 살펴보면 0,1,2,3,4,5,6,7,8,9,10으로 모두 11가지이다.\nT=int(input())\nfor tc in range(1, T+1):\n N = int(input())\n p = list(map(int,input().split())) #문제별 점수\n ans = set([0]) #0점인 경우\n #set: 중복제거\n\n for x in p:\n num = set()\n for y in ans:\n num.add(x+y)\n ans = set(list(ans)+list(num))\n print('#{} {}'.format(tc, len(set(ans))))\n\n","sub_path":"Problem Solving/SWEA/D4/3752.가능한 시험 점수.py","file_name":"3752.가능한 시험 점수.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"241027784","text":"import tensorflow as tf \n\ndef prelu(_x):\n with tf.variable_scope(\"prelu\"):\n alphas = tf.get_variable('alpha', _x.get_shape()[-1],\n initializer=tf.constant_initializer(0.0),\n dtype=tf.float32)\n pos = tf.nn.relu(_x)\n neg = alphas * (_x - abs(_x)) * 0.5\n \n return pos + neg\n\ndef embedding_lookup(raw_inp, embedding):\n '''\n Args:\n raw_inp: list of length `seq_len` tensor of shape `batch_size`\n Returns:\n list of length `seq_len` tensor of `batch_size * emb_size`\n '''\n with tf.variable_scope('embedding'):\n embedding = tf.get_variable(name or 'word_embedding', \n [vsize, emb_size], \n initializer=init,\n reuse=reuse)\n return [tf.nn.embedding_lookup(embedding, i) for i in raw_inp]\n\ndef highway_conn(inps, scope_name=None, is_list=True):\n '''\n Args:\n inps: single/list of seq_length tensor with [batch_size, state_size] shape\n '''\n if not isinstance(inps, list):\n inps = [inps]\n is_list = False\n state_size = inps[0].get_shape().as_list()[1] \n outputs = []\n with tf.variable_scope(scope_name or 'highway') as scope:\n for idx in range(len(inps)):\n input_ = inps[idx]\n if idx > 0 : scope.reuse_variables()\n weight = tf.get_variable(\"hw_w\", [state_size, state_size])\n z = tf.matmul(input_, weight)\n trans_gate = tf.sigmoid(z)\n output = trans_gate * tf.tanh(z) + (1-trans_gate) * input_\n outputs.append(output)\n if is_list:\n return outputs\n else:\n return outputs[0]\n\ndef linear_map(mat, to_dim, scope_name=None, init=None, rank=2):\n raw_dim1 = mat.get_shape().as_list()[-1]\n\n with tf.variable_scope(scope_name or 'linear_map'):\n weight = tf.get_variable('dr_w', \n [raw_dim1, to_dim], \n initializer=init)\n bias = tf.get_variable('dr_b', \n [to_dim], \n initializer=init)\n if rank == 3:\n batch_size = mat.get_shape().as_list()[0]\n raw_dim2 = mat.get_shape().as_list()[1]\n weight_r3 = tf.tile(\n tf.expand_dims(weight, axis=0), \n [batch_size, 1, 1])\n bias_r3 = tf.tile(\n tf.expand_dims(tf.expand_dims(bias, axis=0), axis=1), \n [batch_size, raw_dim2, 1])\n return mat @ weight_r3 + bias_r3\n else:\n return tf.nn.xw_plus_b(mat, weight, bias)\n\ndef fc_layer(inp, fan_out, init, fn):\n out = fn(linear_map(inp, fan_out, init=init))\n return out\n\ndef multi_layer_nn(inp, num_unit_each_layer, init=None, fn=tf.nn.tanh, use_layer_norm=True):\n '''\n Args:\n num_unit_per_layer: list, each element indicate the num_unit in each layer, first element is the\n shape of inp\n '''\n x = inp\n num_layer = len(num_unit_each_layer)\n with tf.variable_scope(\"mnn\"): \n for idx, fan_out in enumerate(num_unit_each_layer):\n with tf.variable_scope(\"mnn_\"+str(idx)):\n x = fc_layer(x, fan_out, init, fn)\n\n if use_layer_norm:\n x = tf.contrib.layers.layer_norm(\n inputs=x, \n center=True, \n scale=True, \n trainable=True\n )\n #get_variable_summary(x, name+'x', 1)\n return x \n\ndef prelu(_x):\n with tf.variable_scope(\"prelu\"):\n alphas = tf.get_variable('alpha', _x.get_shape()[-1],\n initializer=tf.constant_initializer(0.0), \n dtype=tf.float64)\n pos = tf.nn.relu(_x)\n neg = alphas * (_x - abs(_x)) * 0.5\n return pos + neg\n","sub_path":"basic_models/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"394260080","text":"from rest_framework import serializers\n\nfrom todo.models import Task\n\n\nclass TaskSerializer(serializers.ModelSerializer):\n \"\"\"\n Simple Task model serializer, that implements\n \"\"\"\n class Meta:\n model = Task\n fields = [\n 'id',\n 'title',\n 'description',\n 'children',\n 'state',\n 'created_at',\n ]\n\n\nclass ChildTaskSerializer(serializers.Serializer):\n \"\"\"\n Serializer used with the link action on StateFullTaskViewSet\n it takes an id and validates that it exists, returning a task instance\n in the validated_data.\n \"\"\"\n\n child = serializers.IntegerField(required=True)\n\n def validate_child(self, value):\n \"\"\"\n Make sure that the givin child's id has a crossponding object in\n the database and return it's instance in the validated_data.\n \"\"\"\n try:\n task = Task.objects.get(pk=value)\n except Task.DoesNotExist:\n raise serializers.ValidationError(\"The task you're trying to link is not found.\")\n return task\n","sub_path":"todo/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"275071687","text":"import unittest\nimport uuid\n\nfrom simphony.cuds.material_relations.coulomb import (\n Coulomb)\nfrom simphony.testing.abc_check_material_relation import (\n CheckMaterialRelation)\n\n\nclass TestCoulombMaterialRelation(\n CheckMaterialRelation,\n unittest.TestCase\n):\n def container_factory(\n self,\n name=\"Coulomb\",\n materials=[uuid.uuid4() for _ in xrange(1)]):\n return Coulomb(\n name=name,\n materials=materials\n )\n\n def test_cutoff_distance(self):\n relation = self.container_factory('foo_relation')\n\n self.assertEqual(relation.cutoff_distance, 1.0)\n\n def test_cutoff_distance_update(self):\n relation = self.container_factory('foo_relation')\n\n original = relation.cutoff_distance\n relation.cutoff_distance = original + 1\n\n self.assertEqual(relation.cutoff_distance, original + 1)\n\n def test_dielectric_constant(self):\n relation = self.container_factory('foo_relation')\n\n self.assertEqual(relation.dielectric_constant, 1.0)\n\n def test_dielectric_constant_update(self):\n relation = self.container_factory('foo_relation')\n\n original = relation.dielectric_constant\n relation.dielectric_constant = original + 1\n\n self.assertEqual(relation.dielectric_constant, original + 1)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"simphony/cuds/material_relations/tests/test_coulomb.py","file_name":"test_coulomb.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"195244148","text":"# -*- coding: utf-8 -*-\r\n# Created by Acer on 2018/1/31\r\n\r\nfrom urllib.parse import urljoin\r\nimport re\r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ndef prase_movie_list(base_url, html, movie_list, large):\r\n '''\r\n 解析列表页 返回 影片信息\r\n :param base_url:\r\n :param html:\r\n :param movie_list:\r\n :param large:\r\n :return:\r\n '''\r\n pattern = re.compile('共\\s*(\\d+)\\s*頁')\r\n soup = BeautifulSoup(html, 'lxml')\r\n div_list = soup.select('div[class=\"c cl\"] a[class=\"z\"]')\r\n for div in div_list:\r\n url = div['href']\r\n movie_list.append([div['title'], urljoin(base_url, url)])\r\n if large:\r\n large_num_label = soup.select('div[class=\"pg\"] label span')\r\n try:\r\n if large_num_label:\r\n large_num = large_num_label[0]['title']\r\n large_result = pattern.findall(large_num)\r\n if large_result:\r\n return large_result[0]\r\n else:\r\n print('not found')\r\n except IndexError:\r\n print('未找到最大目录')\r\n else:\r\n return None\r\n\r\n\r\ndef get_movie_list(session, base_url, movie_list, large=False):\r\n movie_list.clear()\r\n try:\r\n result = session.get(base_url)\r\n print('[*] 当前网址 %s' % base_url)\r\n if result.status_code == 200:\r\n return prase_movie_list(base_url, result.text, movie_list, large)\r\n except IndexError:\r\n print('error')\r\n","sub_path":"movie_list.py","file_name":"movie_list.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"299154739","text":"import sys\nimport pygame\n\n\ndef check_events(screen):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_event(event)\n\n\ndef check_keydown_event(event):\n \"\"\" Check event when keydown is detected \"\"\"\n if event.key == pygame.K_q:\n sys.exit()\n\n # if event.key == pygame.K_m:\n # calculate_mandelbrot()\n # display_fractal(screen, point_list)\n # update_screen()","sub_path":"students/douglas_klos/extra/side_projects/pygame-fractal/pygame_functions.py","file_name":"pygame_functions.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"318409182","text":"\"\"\"bbs URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nimport os\nfrom app01 import views\nfrom django.views.static import serve\n\nDIRNAME = os.path.dirname(os.path.dirname(__file__))\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^media/(?P.*)$', serve, {'document_root': os.path.join(DIRNAME, \"static/media\"), 'show_indexes': True }),\n url(r'^culture/',views.Culture),\n url(r'^hello/(\\d+)', views.Hello, name='hello'),\n url(r'^section(\\d+)/', views.Section, name='section'),\n url(r'^login/', views.Login),\n url(r'^logout/', views.Logout),\n url(r'^register/', views.Register),\n url(r'^personal/', views.Personal),\n url(r'^data/', views.Data),\n url(r'^comments/', views.Comments),\n url(r'^delete/(\\d+)', views.Delete),\n url(r'^delete_bbs/(\\d+)', views.Delete_bbs),\n url(r'^publish/', views.Publish),\n url(r'^modify/', views.Modify),\n\n\n]\n\n\n","sub_path":"bbs/bbs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171244220","text":"# This file is part of Tryton. The COPYRIGHT file at the top level of\n# this repository contains the full copyright notices and license terms.\nfrom collections import defaultdict\nfrom functools import wraps\n\nfrom trytond.pool import PoolMeta, Pool\nfrom trytond.model import ModelView, Workflow, fields\nfrom trytond.pyson import Eval, Bool\nfrom trytond.transaction import Transaction\nfrom trytond.wizard import Wizard, StateView, StateTransition, Button\n\n\nclass Journal(metaclass=PoolMeta):\n __name__ = 'account.payment.journal'\n clearing_account = fields.Many2One('account.account', 'Clearing Account',\n domain=[\n ('type', '!=', None),\n ('closed', '!=', True),\n ('party_required', '=', False),\n ],\n states={\n 'required': Bool(Eval('clearing_journal')),\n },\n depends=['clearing_journal'])\n clearing_journal = fields.Many2One('account.journal', 'Clearing Journal',\n states={\n 'required': Bool(Eval('clearing_account')),\n },\n depends=['clearing_account'])\n clearing_posting_delay = fields.TimeDelta(\n \"Clearing Posting Delay\",\n help=\"Post automatically the clearing moves after the delay.\\n\"\n \"Leave empty for no posting.\")\n\n @classmethod\n def cron_post_clearing_moves(cls, date=None):\n pool = Pool()\n Date = pool.get('ir.date')\n Move = pool.get('account.move')\n if date is None:\n date = Date.today()\n moves = []\n journals = cls.search([\n ('clearing_posting_delay', '!=', None),\n ])\n for journal in journals:\n move_date = date - journal.clearing_posting_delay\n moves.extend(Move.search([\n ('date', '<=', move_date),\n ('origin.journal.id', '=', journal.id,\n 'account.payment'),\n ('state', '=', 'draft'),\n ('company', '=', Transaction().context.get('company')),\n ]))\n Move.post(moves)\n\n\ndef cancel_clearing_move(func):\n @wraps(func)\n def wrapper(cls, payments, *args, **kwargs):\n pool = Pool()\n Move = pool.get('account.move')\n Line = pool.get('account.move.line')\n Reconciliation = pool.get('account.move.reconciliation')\n\n func(cls, payments, *args, **kwargs)\n\n to_delete = []\n to_reconcile = defaultdict(lambda: defaultdict(list))\n to_unreconcile = []\n for payment in payments:\n if payment.clearing_move:\n if payment.clearing_move.state == 'draft':\n to_delete.append(payment.clearing_move)\n for line in payment.clearing_move.lines:\n if line.reconciliation:\n to_unreconcile.append(line.reconciliation)\n else:\n cancel_move = payment.clearing_move.cancel()\n for line in (payment.clearing_move.lines\n + cancel_move.lines):\n if line.reconciliation:\n to_unreconcile.append(line.reconciliation)\n if line.account.reconcile:\n to_reconcile[payment.party][line.account].append(\n line)\n\n # Remove clearing_move before delete\n # in case reconciliation triggers use it.\n cls.write(payments, {'clearing_move': None})\n\n if to_unreconcile:\n Reconciliation.delete(to_unreconcile)\n if to_delete:\n Move.delete(to_delete)\n for party in to_reconcile:\n for lines in to_reconcile[party].values():\n Line.reconcile(lines)\n return wrapper\n\n\nclass Payment(metaclass=PoolMeta):\n __name__ = 'account.payment'\n account = fields.Many2One(\n 'account.account', \"Account\", ondelete='RESTRICT',\n domain=[\n ('closed', '!=', True),\n ('company', '=', Eval('company', -1)),\n ('type.statement', 'in', ['balance', 'off-balance']),\n ['OR',\n ('second_currency', '=', Eval('currency', None)),\n [\n ('company.currency', '=', Eval('currency', None)),\n ('second_currency', '=', None),\n ],\n ],\n ],\n states={\n 'readonly': Eval('state') != 'draft',\n 'invisible': Bool(Eval('line')),\n },\n depends=['company', 'currency', 'state', 'line'],\n help=\"Define the account to use for clearing move.\")\n clearing_move = fields.Many2One('account.move', 'Clearing Move',\n readonly=True)\n\n @classmethod\n def __setup__(cls):\n super(Payment, cls).__setup__()\n line_invisible = Bool(Eval('account'))\n if 'invisible' in cls.line.states:\n cls.line.states['invisible'] &= line_invisible\n else:\n cls.line.states['invisible'] = line_invisible\n cls._buttons.update({\n 'succeed_wizard': cls._buttons['succeed'],\n })\n\n @classmethod\n @ModelView.button_action('account_payment_clearing.wizard_succeed')\n def succeed_wizard(cls, payments):\n pass\n\n @classmethod\n @ModelView.button\n @Workflow.transition('succeeded')\n def succeed(cls, payments):\n pool = Pool()\n Move = pool.get('account.move')\n Line = pool.get('account.move.line')\n\n super(Payment, cls).succeed(payments)\n\n moves = []\n for payment in payments:\n move = payment.create_clearing_move(\n date=Transaction().context.get('clearing_date'))\n if move:\n moves.append(move)\n if moves:\n Move.save(moves)\n cls.write(*sum((([m.origin], {'clearing_move': m.id})\n for m in moves), ()))\n\n to_reconcile = []\n for payment in payments:\n if (payment.line\n and not payment.line.reconciliation\n and payment.clearing_move):\n lines = [l for l in payment.clearing_move.lines\n if l.account == payment.line.account] + [payment.line]\n if not sum(l.debit - l.credit for l in lines):\n to_reconcile.append(lines)\n for lines in to_reconcile:\n Line.reconcile(lines)\n\n @property\n def clearing_account(self):\n if self.line:\n return self.line.account\n elif self.account:\n return self.account\n\n @property\n def clearing_party(self):\n if self.line:\n return self.line.party\n else:\n return self.party\n\n def create_clearing_move(self, date=None):\n pool = Pool()\n Move = pool.get('account.move')\n Line = pool.get('account.move.line')\n Currency = pool.get('currency.currency')\n Period = pool.get('account.period')\n Date = pool.get('ir.date')\n\n if not self.clearing_account:\n return\n if (not self.journal.clearing_account\n or not self.journal.clearing_journal):\n return\n if self.clearing_move:\n return self.clearing_move\n\n if date is None:\n date = Date.today()\n period = Period.find(self.company.id, date=date)\n\n local_currency = self.journal.currency == self.company.currency\n if not local_currency:\n with Transaction().set_context(date=self.date):\n local_amount = Currency.compute(\n self.journal.currency, self.amount, self.company.currency)\n else:\n local_amount = self.amount\n\n move = Move(journal=self.journal.clearing_journal, origin=self,\n date=date, period=period, company=self.company)\n line = Line()\n if self.kind == 'payable':\n line.debit, line.credit = local_amount, 0\n else:\n line.debit, line.credit = 0, local_amount\n line.account = self.clearing_account\n if not local_currency:\n line.amount_second_currency = self.amount.copy_sign(\n line.debit - line.credit)\n line.second_currency = self.journal.currency\n\n line.party = (self.clearing_party\n if line.account.party_required else None)\n counterpart = Line()\n if self.kind == 'payable':\n counterpart.debit, counterpart.credit = 0, local_amount\n else:\n counterpart.debit, counterpart.credit = local_amount, 0\n counterpart.account = self.journal.clearing_account\n if not local_currency:\n counterpart.amount_second_currency = self.amount.copy_sign(\n counterpart.debit - counterpart.credit)\n counterpart.second_currency = self.journal.currency\n move.lines = (line, counterpart)\n return move\n\n @classmethod\n @Workflow.transition('processing')\n @cancel_clearing_move\n def proceed(cls, payments):\n super().proceed(payments)\n\n @classmethod\n @ModelView.button\n @Workflow.transition('failed')\n @cancel_clearing_move\n def fail(cls, payments):\n super(Payment, cls).fail(payments)\n\n @classmethod\n def copy(cls, payments, default=None):\n if default is None:\n default = {}\n else:\n default = default.copy()\n default.setdefault('clearing_move')\n return super(Payment, cls).copy(payments, default=default)\n\n\nclass Succeed(Wizard):\n \"Succeed Payment\"\n __name__ = 'account.payment.succeed'\n start = StateView('account.payment.succeed.start',\n 'account_payment_clearing.succeed_start_view_form', [\n Button('Cancel', 'end', 'tryton-cancel'),\n Button('Succeed', 'succeed', 'tryton-ok', default=True),\n ])\n succeed = StateTransition()\n\n def transition_succeed(self):\n with Transaction().set_context(clearing_date=self.start.date):\n self.model.succeed(self.records)\n return 'end'\n\n\nclass SucceedStart(ModelView):\n \"Succeed Payment\"\n __name__ = 'account.payment.succeed.start'\n date = fields.Date(\"Date\", required=True)\n\n @classmethod\n def default_date(cls):\n pool = Pool()\n Date = pool.get('ir.date')\n return Date.today()\n","sub_path":"account_payment_clearing/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":10413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"569929082","text":"# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nfrom frappe import _\n\"\"\"\nrecord of files\n\nnaming for same name files: file.gif, file-1.gif, file-2.gif etc\n\"\"\"\n\nimport frappe\nimport json\nimport os\nimport base64\nimport re\nimport hashlib\nimport mimetypes\nimport io\nimport shutil\nimport requests\nimport requests.exceptions\nimport imghdr\n\nfrom frappe.utils import get_hook_method, get_files_path, random_string, encode, cstr, call_hook_method, cint\nfrom frappe import _\nfrom frappe import conf\nfrom frappe.utils.nestedset import NestedSet\nfrom frappe.model.document import Document\nfrom frappe.utils import strip\nfrom PIL import Image, ImageOps\nfrom six import StringIO, string_types\nfrom six.moves.urllib.parse import unquote, quote\nfrom six import text_type, PY2\nimport zipfile\n\nclass MaxFileSizeReachedError(frappe.ValidationError):\n\tpass\n\n\nclass FolderNotEmpty(frappe.ValidationError): pass\n\nexclude_from_linked_with = True\n\n\nclass File(Document):\n\tno_feed_on_delete = True\n\n\tdef before_insert(self):\n\t\tfrappe.local.rollback_observers.append(self)\n\t\tself.set_folder_name()\n\t\tself.content = self.get(\"content\", None)\n\t\tself.decode = self.get(\"decode\", False)\n\t\tif self.content:\n\t\t\tself.save_file(content=self.content, decode=self.decode)\n\n\tdef get_name_based_on_parent_folder(self):\n\t\tif self.folder:\n\t\t\tpath = get_breadcrumbs(self.folder)\n\t\t\tfolder_name = frappe.get_value(\"File\", self.folder, \"file_name\")\n\t\t\treturn \"/\".join([d.file_name for d in path] + [folder_name, self.file_name])\n\n\tdef autoname(self):\n\t\t\"\"\"Set name for folder\"\"\"\n\t\tif self.is_folder:\n\t\t\tif self.folder:\n\t\t\t\tself.name = self.get_name_based_on_parent_folder()\n\t\t\telse:\n\t\t\t\t# home\n\t\t\t\tself.name = self.file_name\n\t\telse:\n\t\t\tself.name = frappe.generate_hash(\"\", 10)\n\n\tdef after_insert(self):\n\t\tif not self.is_folder:\n\t\t\tself.add_comment_in_reference_doc('Attachment',\n\t\t\t\t_('Added {0}').format(\"{file_name}{icon}\".format(**{\n\t\t\t\t\t\"icon\": ' ' if self.is_private else \"\",\n\t\t\t\t\t\"file_url\": quote(self.file_url) if self.file_url else self.file_name,\n\t\t\t\t\t\"file_name\": self.file_name or self.file_url\n\t\t\t\t})))\n\n\tdef after_rename(self, olddn, newdn, merge=False):\n\t\tfor successor in self.get_successor():\n\t\t\tsetup_folder_path(successor, self.name)\n\n\tdef get_successor(self):\n\t\treturn frappe.db.sql_list(\"select name from tabFile where folder='%s'\"%self.name) or []\n\n\tdef validate(self):\n\t\tif self.is_new():\n\t\t\tself.validate_duplicate_entry()\n\t\t\tself.validate_file_name()\n\t\tself.validate_folder()\n\n\t\tif not self.file_url and not self.flags.ignore_file_validate:\n\t\t\tif not self.is_folder:\n\t\t\t\tself.validate_file()\n\t\t\tself.generate_content_hash()\n\n\t\tself.validate_url()\n\n\t\tif frappe.db.exists('File', {'name': self.name, 'is_folder': 0}):\n\t\t\told_file_url = self.file_url\n\t\t\tif not self.is_folder and (self.is_private != self.db_get('is_private')):\n\t\t\t\tprivate_files = frappe.get_site_path('private', 'files')\n\t\t\t\tpublic_files = frappe.get_site_path('public', 'files')\n\n\t\t\t\tif not self.is_private:\n\t\t\t\t\tshutil.move(os.path.join(private_files, self.file_name),\n\t\t\t\t\t\tos.path.join(public_files, self.file_name))\n\n\t\t\t\t\tself.file_url = \"/files/{0}\".format(self.file_name)\n\n\t\t\t\telse:\n\t\t\t\t\tshutil.move(os.path.join(public_files, self.file_name),\n\t\t\t\t\t\tos.path.join(private_files, self.file_name))\n\n\t\t\t\t\tself.file_url = \"/private/files/{0}\".format(self.file_name)\n\n\n\t\t\t# update documents image url with new file url\n\t\t\tif self.attached_to_doctype and self.attached_to_name:\n\t\t\t\tif not self.attached_to_field:\n\t\t\t\t\tfield_name = None\n\t\t\t\t\treference_dict = frappe.get_doc(self.attached_to_doctype, self.attached_to_name).as_dict()\n\t\t\t\t\tfor key, value in reference_dict.items():\n\t\t\t\t\t\tif value == old_file_url:\n\t\t\t\t\t\t\tfield_name = key\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tself.attached_to_field = field_name\n\t\t\t\tif self.attached_to_field:\n\t\t\t\t\tfrappe.db.set_value(self.attached_to_doctype, self.attached_to_name,\n\t\t\t\t\t\tself.attached_to_field, self.file_url)\n\n\tdef set_folder_name(self):\n\t\t\"\"\"Make parent folders if not exists based on reference doctype and name\"\"\"\n\t\tif self.attached_to_doctype and not self.folder:\n\t\t\tself.folder = frappe.db.get_value(\"File\", {\"is_attachments_folder\": 1})\n\n\tdef validate_folder(self):\n\t\tif not self.is_home_folder and not self.folder and \\\n\t\t\tnot self.flags.ignore_folder_validate:\n\t\t\tself.folder = \"Home\"\n\n\tdef validate_file(self):\n\t\t\"\"\"Validates existence of public file\n\t\tTODO: validate for private file\n\t\t\"\"\"\n\t\tfull_path = self.get_full_path()\n\n\t\tif full_path.startswith('http'):\n\t\t\treturn True\n\n\t\tif not os.path.exists(full_path):\n\t\t\tfrappe.throw(_(\"File {0} does not exist\").format(self.file_url), IOError)\n\n\tdef validate_duplicate_entry(self):\n\t\tif not self.flags.ignore_duplicate_entry_error and not self.is_folder:\n\t\t\t# check duplicate name\n\n\t\t\t# check duplicate assignement\n\t\t\tfilters = {\n\t\t\t\t'content_hash': self.content_hash,\n\t\t\t\t'is_private': self.is_private,\n\t\t\t\t'name': ('!=', self.name)\n\t\t\t}\n\t\t\tif self.attached_to_doctype and self.attached_to_name:\n\t\t\t\tfilters.update({\n\t\t\t\t\t'attached_to_doctype': self.attached_to_doctype,\n\t\t\t\t\t'attached_to_name': self.attached_to_name\n\t\t\t\t})\n\t\t\tduplicate_file = frappe.db.get_value('File', filters, ['name', 'file_url'], as_dict=1)\n\n\t\t\tif duplicate_file:\n\t\t\t\tduplicate_file_doc = frappe.get_cached_doc('File', duplicate_file.name)\n\t\t\t\tif duplicate_file_doc.exists_on_disk():\n\t\t\t\t\t# if it is attached to a document then throw DuplicateEntryError\n\t\t\t\t\tif self.attached_to_doctype and self.attached_to_name:\n\t\t\t\t\t\tself.duplicate_entry = duplicate_file.name\n\t\t\t\t\t\tfrappe.throw(_(\"Same file has already been attached to the record\"),\n\t\t\t\t\t\t\tfrappe.DuplicateEntryError)\n\t\t\t\t\t# else just use the url, to avoid uploading a duplicate\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.file_url = duplicate_file.file_url\n\n\tdef validate_file_name(self):\n\t\tif not self.file_name and self.file_url:\n\t\t\tself.file_name = self.file_url.split('/')[-1]\n\n\tdef generate_content_hash(self):\n\t\tif self.content_hash or not self.file_url:\n\t\t\treturn\n\n\t\tif self.file_url.startswith(\"/files/\"):\n\t\t\ttry:\n\t\t\t\twith open(get_files_path(self.file_name.lstrip(\"/\")), \"rb\") as f:\n\t\t\t\t\tself.content_hash = get_content_hash(f.read())\n\t\t\texcept IOError:\n\t\t\t\tfrappe.msgprint(_(\"File {0} does not exist\").format(self.file_url))\n\t\t\t\traise\n\n\tdef on_trash(self):\n\t\tif self.is_home_folder or self.is_attachments_folder:\n\t\t\tfrappe.throw(_(\"Cannot delete Home and Attachments folders\"))\n\t\tself.check_folder_is_empty()\n\t\tself.call_delete_file()\n\t\tif not self.is_folder:\n\t\t\tself.add_comment_in_reference_doc('Attachment Removed', _(\"Removed {0}\").format(self.file_name))\n\n\tdef make_thumbnail(self, set_as_thumbnail=True, width=300, height=300, suffix=\"small\", crop=False):\n\t\tif self.file_url:\n\t\t\tif self.file_url.startswith(\"/files\"):\n\t\t\t\ttry:\n\t\t\t\t\timage, filename, extn = get_local_image(self.file_url)\n\t\t\t\texcept IOError:\n\t\t\t\t\treturn\n\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\timage, filename, extn = get_web_image(self.file_url)\n\t\t\t\texcept (requests.exceptions.HTTPError, requests.exceptions.SSLError, IOError, TypeError):\n\t\t\t\t\treturn\n\n\t\t\tsize = width, height\n\t\t\tif crop:\n\t\t\t\timage = ImageOps.fit(image, size, Image.ANTIALIAS)\n\t\t\telse:\n\t\t\t\timage.thumbnail(size, Image.ANTIALIAS)\n\n\t\t\tthumbnail_url = filename + \"_\" + suffix + \".\" + extn\n\n\t\t\tpath = os.path.abspath(frappe.get_site_path(\"public\", thumbnail_url.lstrip(\"/\")))\n\n\t\t\ttry:\n\t\t\t\timage.save(path)\n\n\t\t\t\tif set_as_thumbnail:\n\t\t\t\t\tself.db_set(\"thumbnail_url\", thumbnail_url)\n\n\t\t\t\tself.db_set(\"thumbnail_url\", thumbnail_url)\n\t\t\texcept IOError:\n\t\t\t\tfrappe.msgprint(_(\"Unable to write file format for {0}\").format(path))\n\t\t\t\treturn\n\n\t\t\treturn thumbnail_url\n\n\tdef check_folder_is_empty(self):\n\t\t\"\"\"Throw exception if folder is not empty\"\"\"\n\t\tfiles = frappe.get_all(\"File\", filters={\"folder\": self.name}, fields=(\"name\", \"file_name\"))\n\n\t\tif self.is_folder and files:\n\t\t\tfrappe.throw(_(\"Folder {0} is not empty\").format(self.name), FolderNotEmpty)\n\n\tdef call_delete_file(self):\n\t\t\"\"\"If file not attached to any other record, delete it\"\"\"\n\t\tif self.file_name and self.content_hash and (not frappe.db.count(\"File\",\n\t\t\t{\"content_hash\": self.content_hash, \"name\": [\"!=\", self.name]})):\n\t\t\t\tself.delete_file_data_content()\n\t\telif self.file_url:\n\t\t\tself.delete_file_data_content(only_thumbnail=True)\n\n\tdef on_rollback(self):\n\t\tself.flags.on_rollback = True\n\t\tself.on_trash()\n\n\tdef unzip(self):\n\t\t'''Unzip current file and replace it by its children'''\n\t\tif not \".zip\" in self.file_name:\n\t\t\tfrappe.msgprint(_(\"Not a zip file\"))\n\t\t\treturn\n\n\t\tzip_path = frappe.get_site_path(self.file_url.strip('/'))\n\t\tbase_url = os.path.dirname(self.file_url)\n\n\t\tfiles = []\n\t\twith zipfile.ZipFile(zip_path) as zf:\n\t\t\tzf.extractall(os.path.dirname(zip_path))\n\t\t\tfor info in zf.infolist():\n\t\t\t\tif not info.filename.startswith('__MACOSX'):\n\t\t\t\t\tfile_url = file_url = base_url + '/' + info.filename\n\t\t\t\t\tfile_name = frappe.db.get_value('File', dict(file_url=file_url))\n\t\t\t\t\tif file_name:\n\t\t\t\t\t\tfile_doc = frappe.get_doc('File', file_name)\n\t\t\t\t\telse:\n\t\t\t\t\t\tfile_doc = frappe.new_doc(\"File\")\n\t\t\t\t\tfile_doc.file_name = info.filename\n\t\t\t\t\tfile_doc.file_size = info.file_size\n\t\t\t\t\tfile_doc.folder = self.folder\n\t\t\t\t\tfile_doc.is_private = self.is_private\n\t\t\t\t\tfile_doc.file_url = file_url\n\t\t\t\t\tfile_doc.attached_to_doctype = self.attached_to_doctype\n\t\t\t\t\tfile_doc.attached_to_name = self.attached_to_name\n\t\t\t\t\tfile_doc.save()\n\t\t\t\t\tfiles.append(file_doc)\n\n\t\tfrappe.delete_doc('File', self.name)\n\t\treturn files\n\n\n\tdef get_file_url(self):\n\t\tdata = frappe.db.get_value(\"File\", self.file_data_name, [\"file_name\", \"file_url\"], as_dict=True)\n\t\treturn data.file_url or data.file_name\n\n\tdef exists_on_disk(self):\n\t\texists = os.path.exists(self.get_full_path())\n\t\treturn exists\n\n\tdef upload(self):\n\t\t# get record details\n\t\tself.attached_to_doctype = frappe.form_dict.doctype\n\t\tself.attached_to_name = frappe.form_dict.docname\n\t\tself.attached_to_field = frappe.form_dict.docfield\n\t\tself.file_url = frappe.form_dict.file_url\n\t\tself.file_name = frappe.form_dict.filename\n\t\tfrappe.form_dict.is_private = cint(frappe.form_dict.is_private)\n\n\t\tif not self.file_name and not self.file_url:\n\t\t\tfrappe.msgprint(_(\"Please select a file or url\"),\n\t\t\t\traise_exception=True)\n\n\t\tfile_doc = self.get_file_doc()\n\n\t\tcomment = {}\n\t\tif self.attached_to_doctype and self.attached_to_name:\n\t\t\tcomment = frappe.get_doc(self.attached_to_doctype, self.attached_to_name).add_comment(\"Attachment\",\n\t\t\t_\t(\"added {0}\").format(\"{file_name}{icon}\".format(**{\n\t\t\t\t\t\"icon\": ' ' \\\n\t\t\t\t\t\tif file_doc.is_private else \"\",\n\t\t\t\t\t\"file_url\": file_doc.file_url.replace(\"#\", \"%23\") \\\n\t\t\t\t\t\tif file_doc.file_name else file_doc.file_url,\n\t\t\t\t\t\"file_name\": file_doc.file_name or file_doc.file_url\n\t\t\t\t})))\n\n\t\treturn {\n\t\t\t\"name\": file_doc.name,\n\t\t\t\"file_name\": file_doc.file_name,\n\t\t\t\"file_url\": file_doc.file_url,\n\t\t\t\"is_private\": file_doc.is_private,\n\t\t\t\"comment\": comment.as_dict() if comment else {}\n\t\t}\n\n\tdef get_content(self):\n\t\t\"\"\"Returns [`file_name`, `content`] for given file name `fname`\"\"\"\n\t\tif self.get('content'):\n\t\t\treturn self.content\n\t\tfile_path = self.get_full_path()\n\n\t\t# read the file\n\t\tif PY2:\n\t\t\twith open(encode(file_path)) as f:\n\t\t\t\tcontent = f.read()\n\t\telse:\n\t\t\twith io.open(encode(file_path), mode='rb') as f:\n\t\t\t\tcontent = f.read()\n\t\t\t\ttry:\n\t\t\t\t\t# for plain text files\n\t\t\t\t\tcontent = content.decode()\n\t\t\t\texcept UnicodeDecodeError:\n\t\t\t\t\t# for .png, .jpg, etc\n\t\t\t\t\tpass\n\n\t\treturn content\n\n\tdef get_full_path(self):\n\t\t\"\"\"Returns file path from given file name\"\"\"\n\n\t\tfile_path = self.file_url or self.file_name\n\n\t\tif \"/\" not in file_path:\n\t\t\tfile_path = \"/files/\" + file_path\n\n\t\tif file_path.startswith(\"/private/files/\"):\n\t\t\tfile_path = get_files_path(*file_path.split(\"/private/files/\", 1)[1].split(\"/\"), is_private=1)\n\n\t\telif file_path.startswith(\"/files/\"):\n\t\t\tfile_path = get_files_path(*file_path.split(\"/files/\", 1)[1].split(\"/\"))\n\n\t\telif file_path.startswith(\"http\"):\n\t\t\tpass\n\n\t\telif not self.file_url:\n\t\t\tfrappe.throw(_(\"There is some problem with the file url: {0}\").format(file_path))\n\n\t\treturn file_path\n\n\tdef write_file(self):\n\t\t\"\"\"write file to disk with a random name (to compare)\"\"\"\n\t\tfile_path = get_files_path(is_private=self.is_private)\n\n\t\t# create directory (if not exists)\n\t\tfrappe.create_folder(file_path)\n\t\t# write the file\n\t\tself.content = self.get_content()\n\t\tif isinstance(self.content, text_type):\n\t\t\tself.content = self.content.encode()\n\t\twith open(os.path.join(file_path.encode('utf-8'), self.file_name.encode('utf-8')), 'wb+') as f:\n\t\t\tf.write(self.content)\n\n\t\treturn get_files_path(self.file_name, is_private=self.is_private)\n\n\tdef get_file_doc(self):\n\t\t'''returns File object (Document) from given parameters or form_dict'''\n\t\tr = frappe.form_dict\n\n\t\tif self.file_url is None: self.file_url = r.file_url\n\t\tif self.file_name is None: self.file_name = r.file_name\n\t\tif self.attached_to_doctype is None: self.attached_to_doctype = r.doctype\n\t\tif self.attached_to_name is None: self.attached_to_name = r.docname\n\t\tif self.attached_to_field is None: self.attached_to_field = r.docfield\n\t\tif self.folder is None: self.folder = r.folder\n\t\tif self.is_private is None: self.is_private = r.is_private\n\n\t\tif r.filedata:\n\t\t\tfile_doc = self.save_uploaded()\n\n\t\telif r.file_url:\n\t\t\tfile_doc = self.save()\n\n\t\treturn file_doc\n\n\n\tdef save_uploaded(self):\n\t\tself.content = self.get_uploaded_content()\n\t\tif self.content:\n\t\t\treturn self.save()\n\t\telse:\n\t\t\traise Exception\n\n\n\tdef validate_url(self, df=None):\n\t\tif self.file_url:\n\t\t\tif not self.file_url.startswith((\"http://\", \"https://\", \"/files/\", \"/private/files/\")):\n\t\t\t\tfrappe.throw(_(\"URL must start with 'http://' or 'https://'\"))\n\t\t\t\treturn\n\n\t\t\tself.file_url = unquote(self.file_url)\n\t\t\tself.file_size = frappe.form_dict.file_size or self.file_size\n\n\n\tdef get_uploaded_content(self):\n\t\t# should not be unicode when reading a file, hence using frappe.form\n\t\tif 'filedata' in frappe.form_dict:\n\t\t\tif \",\" in frappe.form_dict.filedata:\n\t\t\t\tfrappe.form_dict.filedata = frappe.form_dict.filedata.rsplit(\",\", 1)[1]\n\t\t\tfrappe.uploaded_content = base64.b64decode(frappe.form_dict.filedata)\n\t\t\treturn frappe.uploaded_content\n\t\telif self.content:\n\t\t\treturn self.content\n\t\tfrappe.msgprint(_('No file attached'))\n\t\treturn None\n\n\n\tdef save_file(self, content=None, decode=False, ignore_existing_file_check=False):\n\t\tfile_exists = False\n\t\tself.content = content\n\t\tif decode:\n\t\t\tif isinstance(content, text_type):\n\t\t\t\tself.content = content.encode(\"utf-8\")\n\n\t\t\tif b\",\" in self.content:\n\t\t\t\tself.content = self.content.split(b\",\")[1]\n\t\t\tself.content = base64.b64decode(self.content)\n\n\t\tif not self.is_private:\n\t\t\tself.is_private = 0\n\t\tself.file_size = self.check_max_file_size()\n\t\tself.content_hash = get_content_hash(self.content)\n\t\tself.content_type = mimetypes.guess_type(self.file_name)[0]\n\n\t\tduplicate_file = None\n\n\t\t# check if a file exists with the same content hash and is also in the same folder (public or private)\n\t\tif not ignore_existing_file_check:\n\t\t\tduplicate_file = frappe.get_value(\"File\", {\n\t\t\t\t\t\"content_hash\": self.content_hash,\n\t\t\t\t\t\"is_private\": self.is_private\n\t\t\t\t},\n\t\t\t\t[\"file_url\", \"name\"], as_dict=True)\n\n\t\tif duplicate_file:\n\t\t\tfile_doc = frappe.get_cached_doc('File', duplicate_file.name)\n\t\t\tif file_doc.exists_on_disk():\n\t\t\t\tself.file_url = duplicate_file.file_url\n\t\t\t\tfile_exists = True\n\n\t\tif os.path.exists(encode(get_files_path(self.file_name, is_private=self.is_private))):\n\t\t\tself.file_name = get_file_name(self.file_name, self.content_hash[-6:])\n\n\t\tif not file_exists:\n\t\t\tcall_hook_method(\"before_write_file\", file_size=self.file_size)\n\t\t\twrite_file_method = get_hook_method('write_file')\n\t\t\tif write_file_method:\n\t\t\t\treturn write_file_method(self)\n\t\t\treturn self.save_file_on_filesystem()\n\n\n\tdef save_file_on_filesystem(self):\n\t\tfpath = self.write_file()\n\n\t\tif self.is_private:\n\t\t\tself.file_url = \"/private/files/{0}\".format(self.file_name)\n\t\telse:\n\t\t\tself.file_url = \"/files/{0}\".format(self.file_name)\n\n\t\treturn {\n\t\t\t'file_name': os.path.basename(fpath),\n\t\t\t'file_url': self.file_url\n\t\t}\n\n\tdef get_file_data_from_hash(self):\n\t\tfor name in frappe.db.sql_list(\"select name from `tabFile` where content_hash=%s and is_private=%s\",\n\t\t\t(self.content_hash, self.is_private)):\n\t\t\tb = frappe.get_doc('File', name)\n\t\t\treturn {k: b.get(k) for k in frappe.get_hooks()['write_file_keys']}\n\t\treturn False\n\n\n\tdef check_max_file_size(self):\n\t\tmax_file_size = get_max_file_size()\n\t\tfile_size = len(self.content)\n\n\t\tif file_size > max_file_size:\n\t\t\tfrappe.msgprint(_(\"File size exceeded the maximum allowed size of {0} MB\").format(\n\t\t\t\tmax_file_size / 1048576),\n\t\t\t\traise_exception=MaxFileSizeReachedError)\n\n\t\treturn file_size\n\n\n\tdef delete_file_data_content(self, only_thumbnail=False):\n\t\tmethod = get_hook_method('delete_file_data_content')\n\t\tif method:\n\t\t\tmethod(self, only_thumbnail=only_thumbnail)\n\t\telse:\n\t\t\tself.delete_file_from_filesystem(only_thumbnail=only_thumbnail)\n\n\n\tdef delete_file_from_filesystem(self, only_thumbnail=False):\n\t\t\"\"\"Delete file, thumbnail from File document\"\"\"\n\t\tif only_thumbnail:\n\t\t\tdelete_file(self.thumbnail_url)\n\t\telse:\n\t\t\tdelete_file(self.file_url)\n\t\t\tdelete_file(self.thumbnail_url)\n\n\tdef is_downloadable(self):\n\t\tif self.is_private:\n\t\t\tif has_permission(self, 'read'):\n\t\t\t\treturn True\n\n\t\t\treturn False\n\n\tdef get_extension(self):\n\t\t'''returns split filename and extension'''\n\t\treturn os.path.splitext(self.file_name)\n\n\tdef add_comment_in_reference_doc(self, comment_type, text):\n\t\tif self.attached_to_doctype and self.attached_to_name:\n\t\t\ttry:\n\t\t\t\tdoc = frappe.get_doc(self.attached_to_doctype, self.attached_to_name)\n\t\t\t\tdoc.add_comment(comment_type, text)\n\t\t\texcept frappe.DoesNotExistError:\n\t\t\t\tfrappe.clear_messages()\n\n\ndef on_doctype_update():\n\tfrappe.db.add_index(\"File\", [\"attached_to_doctype\", \"attached_to_name\"])\n\ndef make_home_folder():\n\thome = frappe.get_doc({\n\t\t\"doctype\": \"File\",\n\t\t\"is_folder\": 1,\n\t\t\"is_home_folder\": 1,\n\t\t\"file_name\": _(\"Home\")\n\t}).insert()\n\n\tfrappe.get_doc({\n\t\t\"doctype\": \"File\",\n\t\t\"folder\": home.name,\n\t\t\"is_folder\": 1,\n\t\t\"is_attachments_folder\": 1,\n\t\t\"file_name\": _(\"Attachments\")\n\t}).insert()\n\n@frappe.whitelist()\ndef get_breadcrumbs(folder):\n\t\"\"\"returns name, file_name of parent folder\"\"\"\n\tpath = folder.split('/')\n\n\tfolders = []\n\tfor i, _ in enumerate(path):\n\t\tindexes = range(0, i)\n\t\tfolder = '/'.join([path[i] for i in indexes])\n\t\tif folder:\n\t\t\tfolders.append(folder)\n\n\treturn [frappe._dict(file_name=f) for f in folders]\n\n@frappe.whitelist()\ndef create_new_folder(file_name, folder):\n\t\"\"\" create new folder under current parent folder \"\"\"\n\tfile = frappe.new_doc(\"File\")\n\tfile.file_name = file_name\n\tfile.is_folder = 1\n\tfile.folder = folder\n\tfile.insert()\n\n@frappe.whitelist()\ndef move_file(file_list, new_parent, old_parent):\n\n\tif isinstance(file_list, string_types):\n\t\tfile_list = json.loads(file_list)\n\n\tfor file_obj in file_list:\n\t\tsetup_folder_path(file_obj.get(\"name\"), new_parent)\n\n\t# recalculate sizes\n\tfrappe.get_doc(\"File\", old_parent).save()\n\tfrappe.get_doc(\"File\", new_parent).save()\n\ndef setup_folder_path(filename, new_parent):\n\tfile = frappe.get_doc(\"File\", filename)\n\tfile.folder = new_parent\n\tfile.save()\n\n\tif file.is_folder:\n\t\tfrappe.rename_doc(\"File\", file.name, file.get_name_based_on_parent_folder(), ignore_permissions=True)\n\ndef get_extension(filename, extn, content):\n\tmimetype = None\n\n\tif extn:\n\t\t# remove '?' char and parameters from extn if present\n\t\tif '?' in extn:\n\t\t\textn = extn.split('?', 1)[0]\n\n\t\tmimetype = mimetypes.guess_type(filename + \".\" + extn)[0]\n\n\tif mimetype is None or not mimetype.startswith(\"image/\") and content:\n\t\t# detect file extension by reading image header properties\n\t\textn = imghdr.what(filename + \".\" + (extn or \"\"), h=content)\n\n\treturn extn\n\ndef get_local_image(file_url):\n\tfile_path = frappe.get_site_path(\"public\", file_url.lstrip(\"/\"))\n\n\ttry:\n\t\timage = Image.open(file_path)\n\texcept IOError:\n\t\tfrappe.msgprint(_(\"Unable to read file format for {0}\").format(file_url))\n\t\traise\n\n\tcontent = None\n\n\ttry:\n\t\tfilename, extn = file_url.rsplit(\".\", 1)\n\texcept ValueError:\n\t\t# no extn\n\t\twith open(file_path, \"r\") as f:\n\t\t\tcontent = f.read()\n\n\t\tfilename = file_url\n\t\textn = None\n\n\textn = get_extension(filename, extn, content)\n\n\treturn image, filename, extn\n\ndef get_web_image(file_url):\n\t# download\n\tfile_url = frappe.utils.get_url(file_url)\n\tr = requests.get(file_url, stream=True)\n\ttry:\n\t\tr.raise_for_status()\n\texcept requests.exceptions.HTTPError as e:\n\t\tif \"404\" in e.args[0]:\n\t\t\tfrappe.msgprint(_(\"File '{0}' not found\").format(file_url))\n\t\telse:\n\t\t\tfrappe.msgprint(_(\"Unable to read file format for {0}\").format(file_url))\n\t\traise\n\n\timage = Image.open(StringIO(frappe.safe_decode(r.content)))\n\n\ttry:\n\t\tfilename, extn = file_url.rsplit(\"/\", 1)[1].rsplit(\".\", 1)\n\texcept ValueError:\n\t\t# the case when the file url doesn't have filename or extension\n\t\t# but is fetched due to a query string. example: https://encrypted-tbn3.gstatic.com/images?q=something\n\t\tfilename = get_random_filename()\n\t\textn = None\n\n\textn = get_extension(filename, extn, r.content)\n\tfilename = \"/files/\" + strip(unquote(filename))\n\n\treturn image, filename, extn\n\n\ndef delete_file(path):\n\t\"\"\"Delete file from `public folder`\"\"\"\n\tif path:\n\t\tif \"..\" in path.split(\"/\"):\n\t\t\tfrappe.msgprint(_(\"It is risky to delete this file: {0}. Please contact your System Manager.\").format(path))\n\n\t\tparts = os.path.split(path.strip(\"/\"))\n\t\tif parts[0]==\"files\":\n\t\t\tpath = frappe.utils.get_site_path(\"public\", \"files\", parts[-1])\n\n\t\telse:\n\t\t\tpath = frappe.utils.get_site_path(\"private\", \"files\", parts[-1])\n\n\t\tpath = encode(path)\n\t\tif os.path.exists(path):\n\t\t\tos.remove(path)\n\n\ndef remove_file(fid=None, attached_to_doctype=None, attached_to_name=None, from_delete=False):\n\t\"\"\"Remove file and File entry\"\"\"\n\tfile_name = None\n\tif not (attached_to_doctype and attached_to_name):\n\t\tattached = frappe.db.get_value(\"File\", fid,\n\t\t\t[\"attached_to_doctype\", \"attached_to_name\", \"file_name\"])\n\t\tif attached:\n\t\t\tattached_to_doctype, attached_to_name, file_name = attached\n\n\tignore_permissions, comment = False, None\n\tif attached_to_doctype and attached_to_name:\n\t\tdoc = frappe.get_doc(attached_to_doctype, attached_to_name)\n\t\tignore_permissions = doc.has_permission(\"write\") or False\n\t\tif frappe.flags.in_web_form:\n\t\t\tignore_permissions = True\n\t\tif not file_name:\n\t\t\tfile_name = frappe.db.get_value(\"File\", fid, \"file_name\")\n\t\tcomment = doc.add_comment(\"Attachment Removed\", _(\"Removed {0}\").format(file_name))\n\t\tfrappe.delete_doc(\"File\", fid, ignore_permissions=ignore_permissions)\n\n\treturn comment\n\n\ndef get_max_file_size():\n\treturn conf.get('max_file_size') or 10485760\n\n\ndef remove_all(dt, dn, from_delete=False):\n\t\"\"\"remove all files in a transaction\"\"\"\n\ttry:\n\t\tfor fid in frappe.db.sql_list(\"\"\"select name from `tabFile` where\n\t\t\tattached_to_doctype=%s and attached_to_name=%s\"\"\", (dt, dn)):\n\t\t\tremove_file(fid=fid, attached_to_doctype=dt, attached_to_name=dn, from_delete=from_delete)\n\texcept Exception as e:\n\t\tif e.args[0]!=1054: raise # (temp till for patched)\n\n\ndef has_permission(doc, ptype=None, user=None):\n\tpermission = True\n\n\tif doc.attached_to_doctype and doc.attached_to_name:\n\t\tattached_to_doctype = doc.attached_to_doctype\n\t\tattached_to_name = doc.attached_to_name\n\n\t\ttry:\n\t\t\tref_doc = frappe.get_doc(attached_to_doctype, attached_to_name)\n\n\t\t\tif ptype in ['write', 'create', 'delete']:\n\t\t\t\tpermission = ref_doc.has_permission('write')\n\n\t\t\t\tif ptype == 'delete' and permission == False:\n\t\t\t\t\tfrappe.throw(_(\"Cannot delete file as it belongs to {0} {1} for which you do not have permissions\").format(\n\t\t\t\t\t\tdoc.attached_to_doctype, doc.attached_to_name),\n\t\t\t\t\t\tfrappe.PermissionError)\n\t\t\telse:\n\t\t\t\tpermission = ref_doc.has_permission('read')\n\t\texcept frappe.DoesNotExistError:\n\t\t\t# if parent doc is not created before file is created\n\t\t\t# we cannot check its permission so allow the file\n\t\t\tpermission = True\n\n\treturn permission\n\n\ndef remove_file_by_url(file_url, doctype=None, name=None):\n\tif doctype and name:\n\t\tfid = frappe.db.get_value(\"File\", {\n\t\t\t\"file_url\": file_url,\n\t\t\t\"attached_to_doctype\": doctype,\n\t\t\t\"attached_to_name\": name})\n\telse:\n\t\tfid = frappe.db.get_value(\"File\", {\"file_url\": file_url})\n\n\tif fid:\n\t\treturn remove_file(fid=fid)\n\n\ndef get_content_hash(content):\n\tif isinstance(content, text_type):\n\t\tcontent = content.encode()\n\treturn hashlib.md5(content).hexdigest() #nosec\n\n\ndef get_file_name(fname, optional_suffix):\n\t# convert to unicode\n\tfname = cstr(fname)\n\n\tf = fname.rsplit('.', 1)\n\tif len(f) == 1:\n\t\tpartial, extn = f[0], \"\"\n\telse:\n\t\tpartial, extn = f[0], \".\" + f[1]\n\treturn '{partial}{suffix}{extn}'.format(partial=partial, extn=extn, suffix=optional_suffix)\n\n\n@frappe.whitelist()\ndef download_file(file_url):\n\t\"\"\"\n\tDownload file using token and REST API. Valid session or\n\ttoken is required to download private files.\n\n\tMethod : GET\n\tEndpoint : frappe.core.doctype.file.file.download_file\n\tURL Params : file_name = /path/to/file relative to site path\n\t\"\"\"\n\tfile_doc = frappe.get_doc(\"File\", {\"file_url\": file_url})\n\tfile_doc.check_permission(\"read\")\n\n\tfrappe.local.response.filename = os.path.basename(file_url)\n\tfrappe.local.response.filecontent = file_doc.get_content()\n\tfrappe.local.response.type = \"download\"\n\ndef extract_images_from_doc(doc, fieldname):\n\tcontent = doc.get(fieldname)\n\tcontent = extract_images_from_html(doc, content)\n\tif frappe.flags.has_dataurl:\n\t\tdoc.set(fieldname, content)\n\n\ndef extract_images_from_html(doc, content):\n\tfrappe.flags.has_dataurl = False\n\n\tdef _save_file(match):\n\t\tdata = match.group(1)\n\t\tdata = data.split(\"data:\")[1]\n\t\theaders, content = data.split(\",\")\n\n\t\tif \"filename=\" in headers:\n\t\t\tfilename = headers.split(\"filename=\")[-1]\n\n\t\t\t# decode filename\n\t\t\tif not isinstance(filename, text_type):\n\t\t\t\tfilename = text_type(filename, 'utf-8')\n\t\telse:\n\t\t\tmtype = headers.split(\";\")[0]\n\t\t\tfilename = get_random_filename(content_type=mtype)\n\n\t\tdoctype = doc.parenttype if doc.parent else doc.doctype\n\t\tname = doc.parent or doc.name\n\n\t\t_file = frappe.get_doc({\n\t\t\t\"doctype\": \"File\",\n\t\t\t\"file_name\": filename,\n\t\t\t\"attached_to_doctype\": doctype,\n\t\t\t\"attached_to_name\": name,\n\t\t\t\"content\": content,\n\t\t\t\"decode\": True\n\t\t})\n\t\t_file.save(ignore_permissions=True)\n\t\tfile_url = _file.file_url\n\t\tif not frappe.flags.has_dataurl:\n\t\t\tfrappe.flags.has_dataurl = True\n\n\t\treturn ']*src\\s*=\\s*[\"\\'](?=data:)(.*?)[\"\\']', _save_file, content)\n\n\treturn content\n\n\ndef get_random_filename(extn=None, content_type=None):\n\tif extn:\n\t\tif not extn.startswith(\".\"):\n\t\t\textn = \".\" + extn\n\n\telif content_type:\n\t\textn = mimetypes.guess_extension(content_type)\n\n\treturn random_string(7) + (extn or \"\")\n\n\n@frappe.whitelist()\ndef unzip_file(name):\n\t'''Unzip the given file and make file records for each of the extracted files'''\n\tfile_obj = frappe.get_doc('File', name)\n\tfiles = file_obj.unzip()\n\treturn len(files)\n\n\n@frappe.whitelist()\ndef get_attached_images(doctype, names):\n\t'''get list of image urls attached in form\n\treturns {name: ['image.jpg', 'image.png']}'''\n\n\tif isinstance(names, string_types):\n\t\tnames = json.loads(names)\n\n\timg_urls = frappe.db.get_list('File', filters={\n\t\t'attached_to_doctype': doctype,\n\t\t'attached_to_name': ('in', names),\n\t\t'is_folder': 0\n\t}, fields=['file_url', 'attached_to_name as docname'])\n\n\tout = frappe._dict()\n\tfor i in img_urls:\n\t\tout[i.docname] = out.get(i.docname, [])\n\t\tout[i.docname].append(i.file_url)\n\n\treturn out\n\n\n@frappe.whitelist()\ndef validate_filename(filename):\n\tfrom frappe.utils import now_datetime\n\ttimestamp = now_datetime().strftime(\" %Y-%m-%d %H:%M:%S\")\n\tfname = get_file_name(filename, timestamp)\n\treturn fname\n\n@frappe.whitelist()\ndef get_files_in_folder(folder):\n\treturn frappe.db.get_all('File',\n\t\t{ 'folder': folder },\n\t\t['name', 'file_name', 'file_url', 'is_folder', 'modified']\n\t)\n","sub_path":"frappe/core/doctype/file/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":27895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"174166682","text":"import feed\nimport os\nimport tweepy\nimport json\nimport imageTest\n\nwith open('nytimes.json') as json_file:\n json_data = json.load(json_file)\n\n\ndef test_getFeed():\n if not os.path.exists(\"keys\"):\n status_json = json.loads(json_data)\n assert status_json['text'] == \"The public health director of Santa Clara County, California, confirmed that the county's new coronavirus case does… https://t.co/hl8ClztVbL\"\n else:\n data = feed.getFeed(\"realDonaldTrump\", 1)\n assert len(data[0]['text']) > 0\n\n\ndef test_annotateImage(capsys):\n if not os.path.exists(\"keys\"):\n assert imageTest.description == \"Sport venue, Basketball, Basketball court, Tournament, Sports, Leisure centre, Competition event, Ball game, Hardwood, Floor\"\n else:\n description = feed.annotateImage(imageTest.url)\n assert description == \"Sport venue, Basketball, Basketball court, Tournament, Sports, Leisure centre, Competition event, Ball game, Hardwood, Floor\"\n","sub_path":"test_feed.py","file_name":"test_feed.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"460865858","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: \"Marziye Derakhshannia and Dickson Owuor\"\n@license: \"MIT\"\n@version: \"1.0\"\n@email: \"dm.derakhshannia@gmail.com or owuordickson@gmail.com \"\n@created: \"26 October 2020\"\n\nDescription: greedy heuristic algorithm that optimizes data lake jobs\n\n\"\"\"\n\nimport numpy as np\nimport time\nfrom .daemon import Daemon\nfrom ..common.read_data import FileData\nfrom ..dl_job.dl_job import Dl_Job\nfrom ..dl_job.u_demand import Demand\n\n\nclass DlOpt(Daemon):\n\n def __init__(self, pidfile, file):\n super().__init__(pidfile)\n self.jobs = self.init_jobs(file)\n self.a_matrix = np.ones(len(self.jobs), dtype=float)\n # print(self.update_ab(self.jobs[0], Demand(2)))\n # self.demands = [] # to be sent randomly\n # self.cost_matrix = np.array([])\n # self.job_status = np.array([])\n\n def init_jobs(self, file_path):\n jobs = []\n cst_idx = -1\n fd = FileData(file_path)\n for k, v in fd.title:\n # print(str(k) + ' ' + str(v.decode()))\n if v.decode() == 'cost':\n cst_idx = k\n if cst_idx > 0:\n # print(fd.data)\n for obj in fd.data:\n jb = Dl_Job(obj[0], int(obj[cst_idx]))\n jobs.append(jb)\n # print(jb.name)\n return jobs\n\n def update_ab(self, job, demand):\n print(job.cost)\n print(job.last_time)\n print(demand.status)\n print(self.a_matrix)\n\n # def add_demand(self, demand):\n # if demand.status is 'incomplete':\n # self.demands.append(demand)\n\n def run(self):\n while True:\n print(\"running\")\n print(\"checking new demands - allocate to jobs\")\n time.sleep(1)\n\n# class DlService(Daemon): # to be removed\n# def run(self): # implemented in DlOpt\n # Or simply merge your code with MyDaemon.\n# your_code = DlOpt()\n# your_code.run()\n","sub_path":"tests/dlopt.py","file_name":"dlopt.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"284317239","text":"\n#AthMonitorCfgHelperOld must be imported in upper level JO \n\n\n#define the group names here, as you'll use them multiple times\naffectedRegGroupName=\"LArAffectedRegionsMonGroup\"\n\n\n#here are the helpers\nhelper_affectedReg = AthMonitorCfgHelperOld(DQMonFlags, affectedRegGroupName)\n\n#then the algorithms\n#from AthenaMonitoring.AthenaMonitoringConf import ExampleMonitorAlgorithm\nfrom LArMonitoring.LArMonitoringConf import LArAffectedRegionsAlg\nlarAffectedRegAlg = helper_affectedReg.addAlgorithm(LArAffectedRegionsAlg,'larAffectedRegAlg')\nlarAffectedRegAlg.AffectedRegionsGroupName=affectedRegGroupName\nlarAffectedRegAlg.IsOnline = isOnline\n\n#if you have to add a tool to the algorithm, put it here\n\n\n#now the groups and histograms\nfrom LArMonitoring import GlobalVariables #to define the ranges\n\nlarAffReg_hist_path='AffectedRegions/' #histogram path\n\n#EMBPS\ngroup_name_ending=\"EMBPS\" \nlarAffectedRegAlg.EMBPSname=group_name_ending\naffectedRegGroupEMBPS = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n\naffectedRegGroupEMBPS.defineHistogram('etaPOS,phi;LArAffectedRegionsEMBAPS',\n title='HV Affected Regions - EMBA - Presampler;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_EMB[\"EMBAPS\"][0],xmin=GlobalVariables.HVeta_EMB[\"EMBAPS\"][1],xmax=GlobalVariables.HVeta_EMB[\"EMBAPS\"][2],\n ybins=GlobalVariables.HVphi_EMB[\"EMBAPS\"][0],ymin=GlobalVariables.HVphi_EMB[\"EMBAPS\"][1],ymax=GlobalVariables.HVphi_EMB[\"EMBAPS\"][2]\n)\naffectedRegGroupEMBPS.defineHistogram('etaNEG,phi;LArAffectedRegionsEMBCPS',\n title='HV Affected Regions - EMBC - Presampler;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_EMB[\"EMBCPS\"][0],xmin=GlobalVariables.HVeta_EMB[\"EMBCPS\"][1],xmax=GlobalVariables.HVeta_EMB[\"EMBCPS\"][2],\n ybins=GlobalVariables.HVphi_EMB[\"EMBCPS\"][0],ymin=GlobalVariables.HVphi_EMB[\"EMBCPS\"][1],ymax=GlobalVariables.HVphi_EMB[\"EMBCPS\"][2]\n)\n\n\n#EMB\ngroup_name_ending=\"EMB\"\nlarAffectedRegAlg.EMBname=group_name_ending\naffectedRegGroupEMB = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n\naffectedRegGroupEMB.defineHistogram('etaPOS,phi;LArAffectedRegionsEMBA',\n title='HV Affected Regions - EMBA - Samplings 1-3;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_EMB[\"EMBA\"][0],xmin=GlobalVariables.HVeta_EMB[\"EMBA\"][1],xmax=GlobalVariables.HVeta_EMB[\"EMBA\"][2],\n ybins=GlobalVariables.HVphi_EMB[\"EMBA\"][0],ymin=GlobalVariables.HVphi_EMB[\"EMBA\"][1],ymax=GlobalVariables.HVphi_EMB[\"EMBA\"][2]\n)\naffectedRegGroupEMB.defineHistogram('etaNEG,phi;LArAffectedRegionsEMBC',\n title='HV Affected Regions - EMBC - Samplings 1-3;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_EMB[\"EMBC\"][0],xmin=GlobalVariables.HVeta_EMB[\"EMBC\"][1],xmax=GlobalVariables.HVeta_EMB[\"EMBC\"][2],\n ybins=GlobalVariables.HVphi_EMB[\"EMBC\"][0],ymin=GlobalVariables.HVphi_EMB[\"EMBC\"][1],ymax=GlobalVariables.HVphi_EMB[\"EMBC\"][2]\n)\n\n\n#EMECPS\ngroup_name_ending=\"EMECPS\"\nlarAffectedRegAlg.EMECPSname=group_name_ending\naffectedRegGroupEMECPS = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n\naffectedRegGroupEMECPS.defineHistogram('etaPOS,phi;LArAffectedRegionsEMECAPS',\n title='HV Affected Regions - EMECA - Presampler;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_EMEC[\"EMECAPS\"],\n ybins=GlobalVariables.HVphi_EMEC[\"EMECAPS\"]\n)\naffectedRegGroupEMECPS.defineHistogram('etaNEG,phi;LArAffectedRegionsEMECCPS',\n title='HV Affected Regions - EMECC - Presampler;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_EMEC[\"EMECCPS\"],\n ybins=GlobalVariables.HVphi_EMEC[\"EMECCPS\"]\n)\n\n\n#EMEC\ngroup_name_ending=\"EMEC\"\nlarAffectedRegAlg.EMECname=group_name_ending \naffectedRegGroupEMEC = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n\naffectedRegGroupEMEC.defineHistogram('etaPOS,phi;LArAffectedRegionsEMECA',\n title='HV Affected Regions - EMECA - Samplings 1-3;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_EMEC[\"EMECA\"],\n ybins=GlobalVariables.HVphi_EMEC[\"EMECA\"]\n)\naffectedRegGroupEMEC.defineHistogram('etaNEG,phi;LArAffectedRegionsEMECC',\n title='HV Affected Regions - EMECC - Samplings 1-3;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_EMEC[\"EMECC\"],\n ybins=GlobalVariables.HVphi_EMEC[\"EMECC\"]\n)\n\n#HEC0\ngroup_name_ending=\"HEC0\"\nlarAffectedRegAlg.HEC0name=group_name_ending\naffectedRegGroupHEC0 = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n\naffectedRegGroupHEC0.defineHistogram('etaPOS,phi;LArAffectedRegionsHECA0',\n title='HV Affected Regions - HECA - Layer 1;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"HECA\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"HECA\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"HECA\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"HECA\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"HECA\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"HECA\"][2]\n)\naffectedRegGroupHEC0.defineHistogram('etaNEG,phi;LArAffectedRegionsHECC0',\n title='HV Affected Regions - HECC - Layer 1;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"HECC\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"HECC\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"HECC\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"HECC\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"HECC\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"HECC\"][2]\n)\n\n#HEC1\ngroup_name_ending=\"HEC1\"\nlarAffectedRegAlg.HEC1name=group_name_ending\naffectedRegGroupHEC1 = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n \naffectedRegGroupHEC1.defineHistogram('etaPOS,phi;LArAffectedRegionsHECA1',\n title='HV Affected Regions - HECA - Layer 2;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"HECA\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"HECA\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"HECA\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"HECA\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"HECA\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"HECA\"][2]\n)\naffectedRegGroupHEC1.defineHistogram('etaNEG,phi;LArAffectedRegionsHECC1',\n title='HV Affected Regions - HECC - Layer 2;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"HECC\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"HECC\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"HECC\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"HECC\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"HECC\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"HECC\"][2]\n)\n \n#HEC2\ngroup_name_ending=\"HEC2\"\nlarAffectedRegAlg.HEC2name=group_name_ending\naffectedRegGroupHEC2 = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n \naffectedRegGroupHEC2.defineHistogram('etaPOS,phi;LArAffectedRegionsHECA2',\n title='HV Affected Regions - HECA - Layer 3;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"HECA\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"HECA\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"HECA\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"HECA\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"HECA\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"HECA\"][2]\n)\naffectedRegGroupHEC2.defineHistogram('etaNEG,phi;LArAffectedRegionsHECC2',\n title='HV Affected Regions - HECC - Layer 3;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"HECC\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"HECC\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"HECC\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"HECC\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"HECC\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"HECC\"][2]\n)\n \n#HEC3\ngroup_name_ending=\"HEC3\"\nlarAffectedRegAlg.HEC3name=group_name_ending\naffectedRegGroupHEC3 = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n \naffectedRegGroupHEC3.defineHistogram('etaPOS,phi;LArAffectedRegionsHECA3',\n title='HV Affected Regions - HECA - Layer 4;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"HECA\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"HECA\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"HECA\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"HECA\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"HECA\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"HECA\"][2]\n)\naffectedRegGroupHEC3.defineHistogram('etaNEG,phi;LArAffectedRegionsHECC3',\n title='HV Affected Regions - HECC - Layer 4;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"HECC\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"HECC\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"HECC\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"HECC\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"HECC\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"HECC\"][2]\n)\n \n#FCAL0\ngroup_name_ending=\"FCAL0\"\nlarAffectedRegAlg.FCAL0name=group_name_ending\naffectedRegGroupFCAL0 = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n \naffectedRegGroupFCAL0.defineHistogram('etaPOS,phi;LArAffectedRegionsFCALA0',\n title='HV Affected Regions - FCALA - Layer 1;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"FCalA\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"FCalA\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"FCalA\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"FCalA\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"FCalA\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"FCalA\"][2]\n)\naffectedRegGroupFCAL0.defineHistogram('etaNEG,phi;LArAffectedRegionsFCALC0',\n title='HV Affected Regions - FCALC - Layer 1;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"FCalC\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"FCalC\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"FCalC\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"FCalC\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"FCalC\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"FCalC\"][2]\n)\n\n#FCAL1\ngroup_name_ending=\"FCAL1\"\nlarAffectedRegAlg.FCAL1name=group_name_ending\naffectedRegGroupFCAL1 = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n \naffectedRegGroupFCAL1.defineHistogram('etaPOS,phi;LArAffectedRegionsFCALA1',\n title='HV Affected Regions - FCALA - Layer 2;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"FCalA\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"FCalA\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"FCalA\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"FCalA\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"FCalA\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"FCalA\"][2]\n)\naffectedRegGroupFCAL1.defineHistogram('etaNEG,phi;LArAffectedRegionsFCALC1',\n title='HV Affected Regions - FCALC - Layer 2;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"FCalC\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"FCalC\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"FCalC\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"FCalC\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"FCalC\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"FCalC\"][2]\n)\n\n#FCAL2\ngroup_name_ending=\"FCAL2\"\nlarAffectedRegAlg.FCAL2name=group_name_ending\naffectedRegGroupFCAL2 = helper_affectedReg.addGroup(\n larAffectedRegAlg,\n affectedRegGroupName+group_name_ending,\n '/LAr/'\n)\n\naffectedRegGroupFCAL2.defineHistogram('etaPOS,phi;LArAffectedRegionsFCALA2',\n title='HV Affected Regions - FCALA - Layer 3;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"FCalA\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"FCalA\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"FCalA\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"FCalA\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"FCalA\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"FCalA\"][2]\n)\naffectedRegGroupFCAL2.defineHistogram('etaNEG,phi;LArAffectedRegionsFCALC2',\n title='HV Affected Regions - FCALC - Layer 3;#eta;#phi',\n type='TH2F',\n path=larAffReg_hist_path,\n weight='problem',\n xbins=GlobalVariables.HVeta_HECFcal[\"FCalC\"][0],xmin=GlobalVariables.HVeta_HECFcal[\"FCalC\"][1],xmax=GlobalVariables.HVeta_HECFcal[\"FCalC\"][2],\n ybins=GlobalVariables.HVphi_HECFcal[\"FCalC\"][0],ymin=GlobalVariables.HVphi_HECFcal[\"FCalC\"][1],ymax=GlobalVariables.HVphi_HECFcal[\"FCalC\"][2]\n)\n\n#and finally add all to topSequence\ntopSequence += helper_affectedReg.result()\n","sub_path":"LArCalorimeter/LArMonitoring/share/LArAffectedRegionsAlg_jobOptions.py","file_name":"LArAffectedRegionsAlg_jobOptions.py","file_ext":"py","file_size_in_byte":17839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637747443","text":"\"\"\"\nWrite a program that takes a list of numbers (for example, a = [5, 10, 15, 20, 25])\nand makes a new list of only the first and last elements of the given list.\nFor practice, write this code inside a function.\n\"\"\"\n\n# initializing the list we will be using\na = [5, 10, 15, 20, 25]\n\n# creating a new list with the first and last element by checking if the index is either the first or last\n# the first index is always 0\n# the last index is the length of the array - 1 as indexing starts with 0 and counting with 1\nnew_list = list([x for x in a if x == a[0] or x == a[len(a) - 1]])\n\n# printing our new list\nprint(new_list)\n\n__author__ = \"Ruben Eekhof\"\n","sub_path":"practice_python/exercise12.py","file_name":"exercise12.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"490323919","text":"# --------------\n #Importing header files\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Loading the data\ndata = pd.read_csv(path)\n\n# Renaming column name and checking it\ndata.rename(columns = {'Total':'Total_Medals'}, inplace = True)\ndata.head(10)\n\n# Creating new column called better event\ndata['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'],'Summer',np.where(data['Total_Summer'] < data['Total_Winter'],'Winter','Both'))\ndata.head()\n\n# Count of how many countries has been doing better in summer and winter\nvalue = pd.DataFrame(data['Better_Event'].value_counts())\nvalue.reset_index(inplace = True)\nvalue\n\nbetter_event = value['index'].iloc[0]\nbetter_event\n\n# Creating a subset of the dataframe with only few columns\ntop_countries = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]\ntop_countries.tail()\n# Dropping the last row of the dataframe\ntop_countries.drop(top_countries.index[146],inplace = True)\ntop_countries.tail()\n\n# Creating function called top_ten\ndef top_ten(df,col):\n \"\"\"\n Inputs:\n Dataframe - df\n Column - col\n \n Outputs:\n Takes the dataframe and the column name as parameters.\n\n Creates a new empty list called 'country_list'\n\n Find the top 10 values for that particular column(for e.g. 'Total_Summer') using \"nlargest()\" function\n\n From the dataframe returned by nlargest function, slices the Country_Name column and stores it in the 'country_list' list\n\n Returns the 'country_list'\n \"\"\"\n country_list = []\n \n for i in (df.nlargest(10,col)['Country_Name']):\n country_list.append(i)\n \n return (country_list)\n\n# Using top ten function and storing the results in variable\ntop_10_summer = top_ten(top_countries,'Total_Summer')\ntop_10_winter = top_ten(top_countries,'Total_Winter')\ntop_10 = top_ten(top_countries,'Total_Medals')\n\nnew = pd.DataFrame(list(zip(top_10_summer,top_10_winter,top_10)),columns = ['Summer','Winter','Total'])\n\n# Creating a new list common that stores common elements of all the three lists\ntopsummer = set(top_10_summer)\ntopwinter = set(top_10_winter)\ntop10 = set(top_10)\ncommon = list((topsummer.intersection(topwinter)).intersection(top10))\ncommon\n\n#Subsetting dataframes\nsummer_df = data[data['Country_Name'].isin(top_10_summer)]\nwinter_df = data[data['Country_Name'].isin(top_10_winter)]\ntop_df = data[data['Country_Name'].isin(top_10)]\n\n#Plotting the graphs\nfig, (ax_1, ax_2, ax_3) = plt.subplots(3,1, figsize = (12,24))\n\nax_1.bar(summer_df['Country_Name'],summer_df['Total_Summer'])\nax_1.set_title('Bar-chart with top 10 countries in summer olympics')\nax_1.tick_params(labelrotation=45)\n\nax_2.bar(winter_df['Country_Name'],summer_df['Total_Winter'])\nax_2.set_title('Bar-chart with top 10 countries in winter olympics')\nax_2.tick_params(labelrotation=45)\n\nax_3.bar(summer_df['Country_Name'],summer_df['Total_Medals'])\nax_3.set_title('Bar-chart with top 10 countries in both summer and winter olympics')\nax_3.tick_params(labelrotation=45)\n\nplt.ylabel('Total medals won')\n\nplt.show()\n\n# Creating golden ratio column and identifying summer country gold and winter country gold\nsummer_df['Golden_Ratio'] = summer_df['Gold_Summer']/summer_df['Total_Summer']\nsummer_df.head()\nsummer_max_ratio = np.max(summer_df['Golden_Ratio'])\nsummer_country_gold = summer_df[summer_df['Golden_Ratio'] == np.max(summer_df['Golden_Ratio'])]['Country_Name']\nsummer_country_gold\n\nwinter_df['Golden_Ratio'] = winter_df['Gold_Winter']/summer_df['Total_Winter']\nwinter_df.head()\nwinter_max_ratio = np.max(winter_df['Golden_Ratio'])\nwinter_country_gold = winter_df[winter_df['Golden_Ratio'] == np.max(winter_df['Golden_Ratio'])]['Country_Name']\nwinter_country_gold\n\ntop_df['Golden_Ratio'] = top_df['Gold_Total']/top_df['Total_Medals']\ntop_df.head()\ntop_max_ratio = np.max(top_df['Golden_Ratio'])\ntop_country_gold = top_df['Country_Name'].iloc[0]\n\n# Best in the world\ndata_1 = data.copy()\ndata_1.drop(data_1.index[146], inplace = True)\n\ndata_1['Total_Points'] = (data_1['Gold_Total']*3) + data_1['Silver_Total']*2 + data_1['Bronze_Total']*1\n\nmost_points = np.max(data_1['Total_Points'])\nbest_country = data_1['Country_Name'].iloc[135]\n\nbest = data[data['Country_Name'] == 'United States']\nbest = best[['Gold_Total','Silver_Total','Bronze_Total']]\n\n# Plotting barplot of medals\nplt.figure(figsize = (8,10))\nbest.plot.bar(stacked = True)\nplt.xlabel('United States')\nplt.ylabel('Medals Tally')\nplt.xticks(rotation = 45)\nplt.show()\n\n\n\n\n\n\n\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"179458455","text":"#!/usr/bin/env python3\nimport sys, os, imp, glob\nsys.argv.append(\"-b\")\nds = imp.load_source('DataSetInfo',os.environ['LATDIR']+'/DataSetInfo.py')\nwl = imp.load_source('waveLibs',os.environ['LATDIR']+'/waveLibs.py')\nimport matplotlib\nmatplotlib.use('pdf')\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nimport numpy as np\nimport tinydb as db\n\nextPulserInfo = {\n # TODO: relabel special runs list and DataSetInfo to match this list\n # Test 1 - attenuation, --donotbuild was used\n 6: [[5942, 5945], [10,14,18,22], 200, 674], # att, rt, chan\n 7: [[5947, 5960], [10,14,18,22,26,30,34,38,42,46,50,54,58,62], 190, 674],\n 8: [[5964, 5977], [10,14,18,22,26,30,34,38,42,46,50,54,58,62], 190, 624],\n 9: [[5979, 5992], [10,14,18,22,26,30,34,38,42,46,50,54,58,62], 190, 688],\n 10: [[6191, 6204], [10,14,18,22,26,30,34,38,42,46,50,54,58,62], 190, 662],\n 11: [[6206, 6219], [10,14,18,22,26,30,34,38,42,46,50,54,58,62], 190, 608],\n # Test 2 - rise time\n 12: [[6934, 6944], [140,145,150,155,160,165,170,175,180,185,190], 18, 674], # rt, att, chan\n 13: [[6964, 6970], [4354,1257,1296,654,1278,1278,1278],0,[674,624,688,662,608,608,608]], # adc,att,chan\n 14: [[6971, 6976], [140,150,160,170,180,190], 18, 614],\n 15: [[6977, 6982], [140,150,160,170,180,190], 18, 624],\n 16: [[7002, 7007], [140,150,160,170,180,190], 18, 688],\n 17: [[7008, 7013], [140,150,160,170,180,190], 18, 662],\n # Test 3 - attenuation\n 18: [[7219, 7233], [14,18,22,26,30,999,30,34,38,42,46,50,54,58,62], 155, 674], # att, rt, chan\n 19: [[7234, 7246], [14,18,22,26,30,34,38,42,46,50,54,58,62], 164, 624],\n 20: [[7247, 7259], [14,18,22,26,30,34,38,42,46,50,54,58,62], 146, 688],\n 21: [[7260, 7272], [14,18,22,26,30,34,38,42,46,50,54,58,62], 138, 662],\n 22: [[13168, 13181], [14,18,22,26,30,34,38,42,46,50,54,58,62], 999, 690]\n }\n\noriginalList = {\n # 0: [4547, 4547], # ignore\n # 1: [4549, 4572], # ignore, whole BG range, full of regular pulsers, etc\n # 2: [4573, 4831], # ignore, whole BG range, full of regular pulsers, etc\n # 3: [5525, 5534], # ignore, \"setup system\"\n # 4: [5535, 5554], # ignore, \"numerous problems\"\n # 5: [5555, 5850], # ignore, whole BG range, full of regular pulsers, etc\n # 6: [5872, 5877], # ignore, short runs\n 7: [5940, 5963],\n 8: [5964, 5978],\n 9: [5979, 5992],\n 10: [6191, 6205],\n 11: [6206, 6219],\n 12: [6934, 6944],\n 13: [6964, 6970],\n 14: [6971, 6976],\n 15: [6977, 6982],\n 16: [7002, 7007],\n 17: [7008, 7013],\n 18: [7219, 7233],\n 19: [7234, 7246],\n 20: [7247, 7259],\n 21: [7260, 7272],\n 22: [13168, 13181]\n }\n\n\ndef main():\n\n # runTuneCut()\n runByRun()\n # fitSloEfficiency1()\n # fitSloEfficiency2()\n # riseTimeStudy()\n # combineData()\n # riseNoiseEfficiency()\n # TEEfficiency()\n # checkBadRuns()\n # createBadRunList()\n # checkFiles()\n\n\ndef hist2dExample():\n xArr, yArr = sloArr, rtArr\n xLo, xHi, bpX, yLo, yHi, bpY = 50, 100, 0.2, 335, 375, 0.2\n nBY, nBX = int((yHi-yLo)/bpY), int((xHi-xLo)/bpY)\n plt.hist2d(xArr, yArr, bins=[nBX,nBY], range=[[xLo,xHi],[yLo,yHi]], norm=LogNorm())\n plt.colorbar()\n plt.xlim(50, 100)\n plt.title(\"pIdx %d, channel %d\" % (pIdx, extChan))\n plt.xlabel(\"fitSlo\",horizontalalignment='right',x=1.0)\n plt.ylabel(\"riseTime (ns)\",horizontalalignment='right',y=1.0)\n plt.legend(loc=\"best\")\n plt.savefig(\"../plots/rtStudy_idx%d.pdf\" % (pIdx))\n # plt.show()\n\n\ndef checkFiles():\n\n calInfo = ds.CalInfo()\n extPulserInfo = calInfo.GetSpecialList()[\"extPulserInfo\"]\n\n for pIdx in range(7,23+1):\n runList = calInfo.GetSpecialRuns(\"extPulser\",pIdx)\n attList = extPulserInfo[pIdx][0]\n extChan = extPulserInfo[pIdx][-1]\n\n for i, run in enumerate(runList):\n fileList = ds.getLATRunList([run],\"%s/lat\" % (ds.specialDir))\n\n if len(fileList)==0:\n attRun = extPulserInfo[pIdx][0][i]\n print(\"No files:\",run,\"Att:\",attRun)\n\n # result:\n noFiles = [6936,6937,6940,6942,6944,6965,6968,6969,6974,6977,7224,7267,7268,13168]\n\n\ndef createBadRunList():\n\n origRunList = []\n for key in originalList:\n runLo, runHi = originalList[key][0], originalList[key][1]\n origRunList.extend([run for run in range(runLo, runHi+1)])\n\n newRunList = []\n for key in extPulserInfo:\n runLo, runHi = extPulserInfo[key][0][0], extPulserInfo[key][0][1]\n newRunList.extend([run for run in range(runLo, runHi+1)])\n\n print(len(origRunList), len(newRunList))\n\n for run in origRunList:\n if run not in newRunList:\n print(run)\n\n\ndef checkBadRuns():\n from ROOT import TChain\n\n cal = ds.CalInfo()\n for pIdx in range(14,22+1):\n\n extChan = extPulserInfo[pIdx][-1]\n syncChan = wl.getChan(0,10,0) # 672\n\n runList = cal.GetSpecialRuns(\"extPulser\",pIdx)\n fList = ds.getLATRunList(runList, ds.specialDir+\"/lat\")\n\n for f in fList:\n print(f)\n latChain = TChain(\"skimTree\")\n latChain.Add(\"%s/%s\" % (ds.specialDir+\"/lat\", f))\n\n tNames = [\"Entry$\",\"mH\",\"channel\",\"trapENFCal\",\"fitSlo\",\"den90\",\"den10\"]\n theCut = \"(channel==%d || channel==%d) && trapENFCal > 1\" % (syncChan, extChan)\n tVals = wl.GetVX(latChain,tNames)\n nPass = len(tVals[\"Entry$\"])\n if nPass == 0: continue\n\n for idx in range(nPass):\n chan = tVals[\"channel\"][idx]\n enf = tVals[\"trapENFCal\"][idx]\n rt = tVals[\"den90\"][idx]-tVals[\"den10\"][idx]\n\n print(pIdx,latChain.GetEntries(),nPass)\n\n\n skipList = []\n\n\ndef runTuneCut():\n \"\"\" Run TuneCut just like it would be in LAT3, but for ext pulser data. \"\"\"\n global ROOT, TChain, TCanvas, gPad, TGraph, TLine\n from ROOT import TChain, TCanvas, gPad, TGraph, TLine\n import ROOT\n\n dsNum, subNum, tName, eLo, eHi = 0, 19, \"extPulser\", 0, 250\n chList = [624]\n par, parName = \"fitSlo\", \"fitSlo\"\n theCut = \"channel==624\"\n fastMode = False\n tRange = [eLo, eHi]\n\n cal = ds.CalInfo()\n # runList = cal.GetSpecialRuns(\"extPulser\",subNum)\n runList = [7244]\n fList = ds.getLATRunList(runList, ds.specialDir+\"/lat\")\n skimTree = TChain(\"skimTree\")\n for f in fList: skimTree.Add(\"%s/%s\" % (ds.specialDir+\"/lat\", f))\n\n # returns the dict we would fill in the DB with.\n # cutDict = TuneCut(dsNum, subNum, tRange[0], tRange[1], tName, skimTree, chList, par, parName, theCut, fastMode)\n\n # make a plot to check the TuneCut plot.\n tNames = [\"run\",\"Entry$\",\"channel\",\"trapENFCal\",\"fitSlo\"]\n tVals = wl.GetVX(skimTree, tNames, \"(channel==624 || channel==672) && Entry$ < 200\")\n for idx in range(tVals[\"trapENFCal\"].size):\n run = tVals[\"run\"][idx]\n ent = tVals[\"Entry$\"][idx]\n chan = tVals[\"channel\"][idx]\n enf = tVals[\"trapENFCal\"][idx]\n slo = tVals[\"fitSlo\"][idx]\n print(\"r%d i%d c%d e%-8.3f s%.3f\" % (run,ent,chan,enf,slo))\n return\n\n fig = plt.figure(figsize=(10,5),facecolor='w')\n xArr, yArr = tVals[\"trapENFCal\"], tVals[\"fitSlo\"]\n xLo, xHi, yLo, yHi = 0, 50, -20, 300\n bpY, bpX = 1., 0.2\n nBinsY, nBinsX = int((yHi-yLo)/bpY), int((xHi-xLo)/bpX)\n plt.hist2d(xArr, yArr, bins=[nBinsX,nBinsY], range=[[xLo,xHi],[yLo,yHi]], norm=LogNorm())\n plt.colorbar()\n plt.xlabel(\"trapENFCal (keV)\",horizontalalignment='right',x=1.0)\n plt.ylabel(\"fitSlo\",horizontalalignment='right',y=1.0)\n plt.savefig(\"../plots/extPulser_idx%d.pdf\" % subNum)\n\n nOverflow = len([val for val in tVals[\"fitSlo\"] if val > yHi or val < yLo])\n print(\"Found %d overflows of %d total (%.2f%%)\" % (nOverflow, len(tVals[\"fitSlo\"]), 100.*nOverflow/len(tVals[\"fitSlo\"])))\n\n\ndef TuneCut(dsNum, subNum, tMin, tMax, tName, cal, chList, par, parName, theCut, fastMode):\n c = TCanvas(\"%s\"%(parName),\"%s\"%(parName),1600,600)\n c.Divide(3,1,0.00001,0.00001)\n cutDict = {}\n for ch in chList:\n cutDict[ch] = [0,0,0,0,0]\n eb, elo, ehi = (tMax-tMin),tMin,tMax\n d1Cut = theCut + \" && trapENFCal > %d && trapENFCal < %d && channel==%d\" % (elo,ehi,ch)\n d2Cut = theCut + \" && channel==%d\" % ch\n nPass = cal.Draw(\"trapENFCal:%s\"%(par), d1Cut, \"goff\")\n nEnergy = cal.GetV1()\n nCut = cal.GetV2()\n nCutList = list(float(nCut[n]) for n in range(nPass))\n nEnergyList = list(float(nEnergy[n]) for n in range(nPass))\n\n # Error and warning messages\n if len(nCutList) == 0 or len(nEnergyList) == 0:\n print(\"Error: Channel %d has no entries, cut cannot be set properly, setting to [0,0,0,0,0,0,0]\"%(ch))\n cutDict[ch] = [0,0,0,0,0]\n continue\n if len(nCutList) <= 1000 or len(nEnergyList) <= 1000:\n print(\"Warning: Channel %d has less than 1000 entries, cut values may not be accurate\"%(ch))\n\n vb, v5, v95 = 100000, np.percentile(nCutList, 5), np.percentile(nCutList,95)\n vlo, vhi = v5-5*abs(v5), v95+5*abs(v95)\n nCutListReduced = [x for x in nCutList if x > v5 and x < v95]\n outPlot = \"../plots/%s_ds%d_idx%d_%s_ch%d.png\" % (parName,dsNum,subNum,tName,ch)\n cut99,cut95,cut01,cut05,cut90 = MakeCutPlot(c,cal,par,eb,elo,ehi,vb,vlo,vhi,d2Cut,d1Cut,outPlot,fastMode)\n cutDict[ch] = [cut01,cut05,cut90,cut95,cut99]\n return cutDict\n\n\ndef MakeCutPlot(c,cal,var,eb,elo,ehi,vb,vlo,vhi,d2Cut,d1Cut,outPlot,fastMode):\n \"\"\" Creates a channel-specific energy calibration plot. \"\"\"\n\n # Calculate cut vals (assumes plot range is correct)\n h1 = wl.H1D(cal,vb,vlo,vhi,var,d1Cut)\n h1Sum = h1.Integral()\n if h1Sum == 0:\n print(\"Error: Failed %s, histogram sum is 0 so cannot normalize, setting to [0,0,0,0,0]\"%(var))\n return 0,0,0,0,0\n h1.Scale(1/h1Sum)\n try:\n cut99,cut95,cut01,cut05,cut90 = wl.GetIntegralPoints(h1)\n except:\n print(\"Error: Failed %s using cut %s, setting to [0,0,0,0,0]\"%(var,d1Cut))\n return 0,0,0,0,0\n if fastMode:\n print(\"Returning fastMode output: \", cut99,cut95,cut01,cut05,cut90)\n return cut99,cut95,cut01,cut05,cut90\n\n # Generate the plot for inspection.\n c.cd(2)\n gPad.SetLogy(0)\n h1.GetXaxis().SetRangeUser(cut01-abs(0.25*cut01), cut99 + abs(0.25*cut99) )\n h1.SetTitle(\"\")\n h1.GetXaxis().SetTitle(var)\n h1.Draw(\"hist\")\n\n c.cd(1)\n gPad.SetLogy(0)\n cal.Draw(\"%s:trapENFCal>>b(%d,%d,%d,%d,%.3E,%.3E)\"%(var,eb+10,elo-5,ehi+5,vb,cut01-abs(0.25*cut01),cut99+abs(0.25*cut99)) ,d2Cut)\n\n l1, l2, l3 = TLine(), TLine(), TLine()\n l1.SetLineColor(ROOT.kGreen)\n l2.SetLineColor(ROOT.kRed)\n l3.SetLineColor(ROOT.kMagenta)\n\n l1.DrawLine(elo-5, cut99, ehi+5, cut99)\n l2.DrawLine(elo-5, cut95, ehi+5, cut95)\n l2.DrawLine(elo-5, cut05, ehi+5, cut05)\n l1.DrawLine(elo-5, cut01, ehi+5, cut01)\n\n c.cd(3)\n x_h1, y_h1 = wl.npTH1D(h1)\n int_h1 = wl.integFunc(y_h1)\n g2 = TGraph(len(x_h1), x_h1, int_h1)\n g2.GetXaxis().SetRangeUser(cut01-abs(0.3*cut01), cut99 + abs(0.3*cut99) )\n g2.SetTitle(\"\")\n g2.GetXaxis().SetTitle(var)\n g2.GetYaxis().SetTitle(\"Percentile\")\n g2.Draw(\"ACP\")\n l1.DrawLine(cut99, 0, cut99, 1)\n l2.DrawLine(cut95, 0, cut95, 1)\n l1.DrawLine(cut01, 0, cut01, 1)\n l2.DrawLine(cut05, 0, cut05, 1)\n\n c.Print(outPlot)\n return cut99,cut95,cut01,cut05,cut90\n\n\ndef runByRun():\n \"\"\" Directly confirm settings of ext pulser scripts. \"\"\"\n import time\n from ROOT import TFile, TChain, GATDataSet, gROOT\n gROOT.ProcessLine(\"gErrorIgnoreLevel = 3001;\") # suppress ROOT error messages\n\n extPDict = {7:674, 8:624, 9:688, 10:662, 11:608, 12:674, 14:608, 15:624, 16:688, 17:662, 18:674, 19:624, 20:688, 21:662, 22:690}\n syncChan = wl.getChan(0,10,0) # 672\n\n syncRateNominal = 20 # Hz\n\n calInfo = ds.CalInfo()\n\n fig = plt.figure(figsize=(10,6),facecolor='w')\n\n # pIdxs = [12,14,15,16,17] # test 2 - rise time\n # pIdxs = [18,19,20,21] # test 3 - attenuation\n pIdxs = [22]\n for pIdx in pIdxs:\n runList = calInfo.GetSpecialRuns(\"extPulser\",pIdx)\n print(\"Range\",pIdx)\n\n extChan = extPDict[pIdx]\n xArr, yArr = [], [] # we're gonna plot these\n\n # runList = [7234]\n for run in runList:\n # if run in [6936,6937,6940,6942,6944, 6974, 6977]: continue # test 2\n # if run in [7224] or run > 7266: continue # test 3\n\n fileList = []\n subFiles = glob.glob(\"%s/lat/latSkimDS%d_run%d_*.root\" % (ds.specialDir, ds.GetDSNum(run), run))\n for idx in range(len(subFiles)):\n thisFile = \"%s/lat/latSkimDS%d_run%d_%d.root\" % (ds.specialDir, ds.GetDSNum(run), run, idx)\n if not os.path.isfile(thisFile):\n print(\"File doesn't exist: \",thisFile)\n else:\n fileList.append(thisFile)\n latChain = TChain(\"skimTree\")\n for f in fileList: latChain.Add(f)\n\n tNames = [\"Entry$\",\"run\",\"channel\",\"mH\",\"trapENFCal\",\"den90\",\"den10\",\"fitSlo\",\"localTime_s\",\"tOffset\",\"fitAmp\"]\n # theCut = \"(channel==%d || channel==%d) && mH==2\" % (syncChan,extChan)\n # theCut += \" && Entry$ < 100\"\n theCut = \"Entry$ < 200 && gain==0\"\n tVals = wl.GetVX(latChain,tNames,theCut)\n\n # don't delete this\n for idx in range(tVals[\"run\"].size):\n ent = tVals[\"Entry$\"][idx]\n run = tVals[\"run\"][idx]\n chan = tVals[\"channel\"][idx]\n mH = tVals[\"mH\"][idx]\n enf = tVals[\"trapENFCal\"][idx]\n d90 = tVals[\"den90\"][idx]\n d10 = tVals[\"den10\"][idx]\n fitSlo = tVals[\"fitSlo\"][idx]\n gt = tVals[\"localTime_s\"][idx]\n tOff = tVals[\"tOffset\"][idx]*1e-9\n hitTime = gt+tOff\n print(\"%d e%d m%d t%.8f c%-4d %-9.2f %-8.2f %.2f\" % (run,ent,mH,hitTime,chan,enf,d90-d10,fitSlo))\n\n continue\n\n # make sure we only have hits from syncChan and extChan\n # for entry in set(tVals[\"Entry$\"]):\n # idxs = [idx for idx in range(len(tVals[\"Entry$\"])) if tVals[\"Entry$\"][idx]==entry]\n # chans = [tVals[\"channel\"][idx] for idx in idxs]\n # if not set([extChan,syncChan]).issubset(set(chans)):\n # print(\"NOPE:\",chans)\n\n gds = GATDataSet(int(run))\n runTime = gds.GetRunTime()/1e9\n if len(tVals[\"Entry$\"])==0:\n print(\"Run %d, %.2f sec. Found no cts.\" % (run,runTime))\n continue\n\n syncRate = len(set(tVals[\"Entry$\"]))/runTime\n expectedCts = runTime * syncRateNominal\n extPCts = len([ch for ch in tVals[\"channel\"] if ch==extChan])\n syncCts = len([ch for ch in tVals[\"channel\"] if ch==syncChan])\n extPRate = extPCts/runTime\n syncRate = syncCts/runTime\n\n syncAmp = [tVals[\"fitAmp\"][i] for i in range(len(tVals[\"fitAmp\"])) if tVals[\"channel\"][i]==syncChan]\n syncAmp = np.asarray(syncAmp)\n muS, sigS = 0, 0\n if len(syncAmp)>0:\n muS, sigS = np.mean(syncAmp), np.std(syncAmp)\n\n extENF = [tVals[\"trapENFCal\"][i] for i in range(len(tVals[\"trapENFCal\"])) if tVals[\"channel\"][i]==extChan]\n extENF = np.asarray(extENF)\n muE, sigE = 0, 0\n if len(extENF)>0:\n muE, sigE = np.mean(extENF), np.std(extENF)\n\n print(\"Run %d, %.2f sec. #Expect %d #Sync %d (%.2f Hz) #extP %d (%.2f Hz) muE %.2f sigE %.2f muS %.2f sigS %.2f\" % (run,runTime,expectedCts,syncCts,syncRate,extPCts,extPRate,muE,sigE,muS,sigS))\n\n # fill the plot arrays\n xArr.extend([tVals[\"trapENFCal\"][i] for i in range(len(tVals[\"trapENFCal\"])) if tVals[\"channel\"][i]==extChan])\n yArr.extend([tVals[\"fitSlo\"][i] for i in range(len(tVals[\"fitSlo\"])) if tVals[\"channel\"][i]==extChan])\n\n return\n\n # make a plot for this range\n fig.clear()\n xLo, xHi, yLo, yHi = 0, 10, -20, 300 # test 3\n # xLo, xHi, yLo, yHi = 50, 100, -20, 200 # test 2\n bpY, bpX = 2, 0.1\n nBinsY, nBinsX = int((yHi-yLo)/bpY), int((xHi-xLo)/bpX)\n try:\n plt.hist2d(xArr, yArr, bins=[nBinsX,nBinsY], range=[[xLo,xHi],[yLo,yHi]], norm=LogNorm())\n plt.colorbar()\n plt.xlabel(\"trapENFCal (keV)\",horizontalalignment='right',x=1.0)\n plt.ylabel(\"fitSlo\",horizontalalignment='right',y=1.0)\n plt.title(\"Range %d, Channel %d\" % (pIdx, extChan))\n plt.tight_layout()\n plt.savefig(\"../plots/extPulser_idx%d.pdf\" % pIdx)\n except ValueError:\n pass\n\n\ndef fitSloEfficiency1():\n \"\"\" Plot the efficiency vs. energy \"\"\"\n from ROOT import TChain, gROOT\n gROOT.ProcessLine(\"gErrorIgnoreLevel = 3001;\")\n\n # 20: [7247, 7259], (run) P42661C P1D3 688\n pIdx, extChan, syncChan = 20, 688, 672\n calInfo = ds.CalInfo()\n runList = calInfo.GetSpecialRuns(\"extPulser\",pIdx)\n\n # get fitSlo value from the DB for this channel.\n # calIdx's\n # 33: [[6904,6925],6904,7274],\n # 34: [[7275,7279,7281,7295],7275,7614]\n dsNum, modNum, calIdx = 0, 1, 33\n calDB = db.TinyDB('../calDB.json')\n pars = db.Query()\n fsD = ds.getDBRecord(\"fitSlo_ds%d_idx%d_m%d_Peak\" % (dsNum, calIdx, modNum), False, calDB, pars)\n fsCut = fsD[extChan][2] # 90% value (used in LAT3)\n\n xArr, yArr = [], []\n for run in runList:\n fileList = []\n subFiles = glob.glob(\"%s/lat/latSkimDS%d_run%d_*.root\" % (ds.specialDir, ds.GetDSNum(run), run))\n for idx in range(len(subFiles)):\n thisFile = \"%s/lat/latSkimDS%d_run%d_%d.root\" % (ds.specialDir, ds.GetDSNum(run), run, idx)\n if not os.path.isfile(thisFile):\n print(\"File doesn't exist: \",thisFile)\n else:\n fileList.append(thisFile)\n\n latChain = TChain(\"skimTree\")\n for f in fileList: latChain.Add(f)\n tNames = [\"Entry$\",\"run\",\"channel\",\"mH\",\"trapENFCal\",\"fitSlo\"]\n theCut = \"(channel==%d || channel==%d) && mH==2\" % (syncChan,extChan)\n tVals = wl.GetVX(latChain,tNames,theCut)\n nPass = len(tVals[\"Entry$\"])\n if nPass == 0: continue\n\n xArr.extend([tVals[\"trapENFCal\"][i] for i in range(nPass) if tVals[\"channel\"][i]==extChan])\n yArr.extend([tVals[\"fitSlo\"][i] for i in range(nPass) if tVals[\"channel\"][i]==extChan])\n\n # calculate efficiency\n xLo, xHi, bpX = 0, 10, 0.2\n xAcc = np.arange(xLo, xHi, bpX)\n xArr, yArr = np.asarray(xArr), np.asarray(yArr)\n yEff = []\n errBars = []\n for i in range(int((xHi-xLo)/bpX),0,-1):\n eHi, eLo = i*bpX, (i-1)*bpX\n idx = np.where((xArr > eLo) & (xArr < eHi))\n ySlice = yArr[idx]\n idx2 = np.where(ySlice < fsCut)\n nTot, nPass = len(ySlice), len(ySlice[idx2])\n\n if len(yEff)==0 and nTot==0:\n eff = 100\n elif nTot == 0:\n eff = yEff[-1]\n else:\n eff = 100 * nPass/nTot\n # err = sm.stats.proportion.proportion_confint(nPass, nTot, alpha=0.05, method='beta')\n print(\"%d %.1f-%.1f keV %.2f%%\" % (i, eLo, eHi, eff))\n yEff.append(eff)\n yEff.reverse()\n\n # make a plot\n fig = plt.figure(figsize=(10,6),facecolor='w')\n p1 = plt.subplot(111)\n\n xLo, xHi, bpX = 0, 10, 0.2\n yLo, yHi, bpY = -20, 1000, 2.\n nBinsY, nBinsX = int((yHi-yLo)/bpY), int((xHi-xLo)/bpX)\n h = p1.hist2d(xArr, yArr, bins=[nBinsX,nBinsY], range=[[xLo,xHi],[yLo,yHi]], norm=LogNorm())\n\n p1.axhline(fsCut, color='black', linewidth=3)\n p1.set_xlabel(\"trapENFCal (keV)\",horizontalalignment='right',x=1.0)\n p1.set_ylabel(\"fitSlo\",horizontalalignment='right',y=1.0)\n p1.set_title(\"Range %d, Channel %d. fitSlo cut > %.2f\" % (pIdx, extChan, fsCut))\n # plt.colorbar(h[3], ax=p1)\n\n p2 = p1.twinx()\n p2.plot(xAcc, yEff, ls='steps-post',color='red',linewidth=2.)\n\n p2.set_ylim(0,110)\n p2.set_ylabel('% Efficiency', color='r', horizontalalignment='right',y=1.0)\n p2.tick_params('y', colors='r')\n\n plt.tight_layout()\n plt.savefig(\"../plots/efficiency_idx%d.pdf\" % pIdx)\n\n\ndef fitSloEfficiency2():\n \"\"\" Plot efficiency vs energy for the 3 lowest ext pulser sets. \"\"\"\n\n from ROOT import TChain, gROOT\n gROOT.ProcessLine(\"gErrorIgnoreLevel = 3001;\")\n\n pIdxs = [18,19,20]\n\n for pIdx in pIdxs:\n\n avgE = {}\n effic = {}\n\n # pIdx 20 settings, taken from ORCA logs (run database)\n pIdx, extChan, syncChan = 20, 688, 672\n riseTime = 146 # ns\n attens = {7247:14, 7248:18, 7249:22, 7250:26, 7251:30, 7252:34, 7253:38, 7254:42, 7255:46, 7256:50, 7257:54, 7258:58, 7259:62}\n\n # get fitSlo value from the DB for this channel.\n dsNum, modNum, calIdx = 0, 1, 33\n calDB = db.TinyDB('../calDB.json')\n pars = db.Query()\n fsD = ds.getDBRecord(\"fitSlo_ds%d_idx%d_m%d_Peak\" % (dsNum, calIdx, modNum), False, calDB, pars)\n fsCut = fsD[extChan][2] # 90% value (used in LAT3)\n\n fig = plt.figure(figsize=(8,8),facecolor='w')\n p1 = plt.subplot(111)\n\n calInfo = ds.CalInfo()\n runList = calInfo.GetSpecialRuns(\"extPulser\",pIdx)\n for run in runList:\n\n fileList = ds.getLATRunList([run],\"%s/lat\" % (ds.specialDir))\n latChain = TChain(\"skimTree\")\n for f in fileList: latChain.Add(\"%s/lat/%s\" % (ds.specialDir,f))\n tNames = [\"Entry$\",\"channel\",\"trapENFCal\",\"fitSlo\"]\n theCut = \"(channel==%d || channel==%d) && mH==2\" % (syncChan, extChan)\n tVals = wl.GetVX(latChain,tNames,theCut)\n nPass = len(tVals[\"Entry$\"])\n if nPass == 0: continue\n enfArr = [tVals[\"trapENFCal\"][i] for i in range(nPass) if tVals[\"channel\"][i]==extChan]\n sloArr = [tVals[\"fitSlo\"][i] for i in range(nPass) if tVals[\"channel\"][i]==extChan]\n enfArr, sloArr = np.asarray(enfArr), np.asarray(sloArr)\n\n muE, stdE = np.mean(enfArr), np.std(enfArr)\n muF, stdF = np.mean(sloArr), np.std(sloArr)\n nTot = len(sloArr)\n nAcc = len([fs for fs in sloArr if fs < fsCut])\n eff = 100. * (nAcc/nTot)\n\n print(\"Run %d Att %d muE %-5.2f stdE %-5.2f muF %-5.2f stdF %-5.2f nTot %-5d nAcc %-5d eff %.8f\" % (run,attens[run],muE,stdE,muF,stdF,nTot,nAcc,eff))\n\n avgE[run] = muE\n effic[run] = eff\n\n p1.cla()\n xLo, xHi, bpX = muE - stdE*3, muE + stdE*3, 0.2\n yLo, yHi, bpY = -10, 300, 0.1\n nBinsY, nBinsX = int((yHi-yLo)/bpY), int((xHi-xLo)/bpX)\n p1.hist2d(enfArr, sloArr, bins=[nBinsX,nBinsY], range=[[xLo,xHi],[yLo,yHi]], norm=LogNorm())\n p1.axhline(fsCut, color='black', linewidth=3)\n p1.set_xlabel(\"trapENFCal (keV)\",horizontalalignment='right',x=1.0)\n p1.set_ylabel(\"fitSlo\",horizontalalignment='right',y=1.0)\n p1.set_title(\"run %d ch %d fsCut %.2f eff %.2f\" % (run,extChan,fsCut,eff))\n plt.tight_layout()\n plt.savefig(\"../plots/efficiency_idx%d_run%d.pdf\" % (pIdx, run))\n\n # make arrays for plotting\n yAtt, yEff, xEne = [], [], []\n for run in sorted(attens):\n if run in avgE.keys() and run in effic.keys():\n yAtt.append(attens[run])\n yEff.append(effic[run])\n xEne.append(avgE[run])\n print(run,attens[run],avgE[run],effic[run])\n\n # make plot\n fig.clear()\n p1 = plt.subplot(211)\n\n p1.plot(xEne, yAtt, \".\")\n p1.set_title(\"Att. vs Energy, m %.2f b %.2f\" % (m,b))\n p1.set_xlabel(\"Energy (keV)\")\n p1.set_ylabel(\"Atten (db)\")\n\n p2 = plt.subplot(212)\n p2.plot(xEne, yEff, \".\")\n p2.set_title(\"Efficiency vs Energy\")\n p2.set_xlabel(\"Energy (keV)\")\n p2.set_ylabel(\"Efficiency\")\n p2.set_ylim(0,110 )\n\n plt.tight_layout()\n plt.savefig(\"../plots/attVsE.pdf\")\n\n\ndef riseTimeStudy():\n \"\"\" How much does fitSlo jump around? \"\"\"\n calInfo = ds.CalInfo()\n runList = calInfo.GetSpecialRuns(\"extPulser\",pIdx)\n for run in runList:\n fileList = ds.getLATRunList([run],\"%s/lat\" % (ds.specialDir))\n\n\n\nif __name__==\"__main__\":\n main()","sub_path":"sandbox/ext1.py","file_name":"ext1.py","file_ext":"py","file_size_in_byte":24463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"619001815","text":"import pandas as pd\nimport os\nimport pickle\nfrom matplotlib import pyplot as plt\n\ndata_path = os.path.join(os.getcwd(), \"..\", \"data\")\ntest_users_data_eval_path = os.path.join(data_path, \"clean\",\n \"test_users_data_eval\")\ntest_users_all_metrics_path = os.path.join(data_path, \"clean\",\n \"test_users_all_metrics\")\ntest_users_all_metrics_path_subscribers = os.path.join(data_path, \"clean\",\n \"test_users_all_metrics_subscribers\")\ntest_users_all_metrics_path_10 = os.path.join(data_path, \"clean\",\n \"test_users_all_metrics_10\")\ntest_users_all_metrics_path_10_filter = os.path.join(data_path, \"clean\",\n \"test_users_all_metrics_10_filter\")\ntest_users_all_metrics_path_20 = os.path.join(data_path, \"clean\",\n \"test_users_all_metrics_20\")\ntest_users_all_metrics_path_20_filter = os.path.join(data_path, \"clean\",\n \"test_users_all_metrics_20_filter\")\n\nwith open(test_users_data_eval_path, \"rb\") as outfile:\n ratable_test_users = pickle.load(outfile)\n\nall_metrics = pd.read_json(test_users_all_metrics_path_subscribers)\ntest_users_id = list(set(all_metrics['user_id']))\nlen_histories = list()\nnb_rec = list()\nsubscribers = list()\nfor user_id in test_users_id:\n len_histories.append(ratable_test_users[user_id]['len_history'])\n nb_rec.append(ratable_test_users[user_id]['nbrec'])\n subscribers.append(ratable_test_users[user_id]['subscriber'])\nnew_data = {'user_id': test_users_id,\n 'len_history': len_histories,\n 'nb_rec': nb_rec,\n 'subscriber': subscribers}\nnew_data = pd.DataFrame(new_data, columns=[\"user_id\", \"len_history\",\n \"nb_rec\", \"subscriber\"])\nall_metrics = all_metrics.merge(new_data, left_on=\"user_id\",\n right_on=\"user_id\", how=\"left\")\n\npop_data = all_metrics[all_metrics['rec_id'] == 'pop']\ncf_data = all_metrics[all_metrics['rec_id'] == 'cf']\ncontent_data = all_metrics[all_metrics['rec_id'] == 'content']\nall_metrics['bin_hist'] = pd.cut(all_metrics['len_history'],\n [0, 1, 2, 3, 4, 5, 10, 15, 20, 30, 40,\n 200])\nall_metrics['bin_rec'] = pd.cut(all_metrics['nb_rec'],\n [0, 1, 2, 3, 4, 5, 10, 15, 20])\nprint(\"ctr\")\nprint(pop_data['ctr'].mean())\nprint(cf_data['ctr'].mean())\nprint(content_data['ctr'].mean())\nprint(\"arhr\")\nprint(pop_data['arhr'].mean())\nprint(cf_data['arhr'].mean())\nprint(content_data['arhr'].mean())\nprint(\"precision\")\nprint(pop_data['precision'].mean())\nprint(cf_data['precision'].mean())\nprint(content_data['precision'].mean())\nprint(\"recall\")\nprint(pop_data['recall'].mean())\nprint(cf_data['recall'].mean())\nprint(content_data['recall'].mean())\nprint(\"f1\")\nprint(pop_data['f1'].mean())\nprint(cf_data['f1'].mean())\nprint(content_data['f1'].mean())\n\n\n\n\n\n# ctr = all_metrics.filter(['bin_hist', 'bin_rec', 'rec_id', 'ctr'])\n# aggregate_results = ctr.groupby(['bin_hist', 'rec_id']).mean().unstack()\n# aggregate_results.plot()\n#\n# arhr = all_metrics.filter(['bin_hist', 'bin_rec', 'rec_id', 'arhr'])\n# aggregate_results = arhr.groupby(['bin_hist', 'rec_id']).mean().unstack()\n# aggregate_results.plot()\n#\n# precision = all_metrics.filter(['bin_hist', 'bin_rec', 'rec_id', 'precision'])\n# aggregate_results = precision.groupby(['bin_hist', 'rec_id']).mean().unstack()\n# aggregate_results.plot()\n#\n# recall = all_metrics.filter(['bin_hist', 'bin_rec', 'rec_id', 'recall'])\n# aggregate_results = recall.groupby(['bin_hist', 'rec_id']).mean().unstack()\n# aggregate_results.plot()\n#\n# f1 = all_metrics.filter(['bin_hist', 'bin_rec', 'rec_id', 'f1'])\n# aggregate_results = f1.groupby(['bin_hist', 'rec_id']).mean().unstack()\n# aggregate_results.plot()\n#\n#\n# ctr = all_metrics.filter(['bin_hist', 'bin_rec', 'rec_id', 'ctr'])\n# aggregate_results = ctr.groupby(['bin_rec', 'rec_id']).mean().unstack()\n# aggregate_results.plot()\n#\n# arhr = all_metrics.filter(['bin_hist', 'bin_rec', 'rec_id', 'arhr'])\n# aggregate_results = arhr.groupby(['bin_rec', 'rec_id']).mean().unstack()\n# aggregate_results.plot()\n#\n# precision = all_metrics.filter(['bin_hist', 'bin_rec', 'rec_id', 'precision'])\n# aggregate_results = precision.groupby(['bin_rec', 'rec_id']).mean().unstack()\n# aggregate_results.plot()\n#\n# recall = all_metrics.filter(['bin_hist', 'bin_rec', 'rec_id', 'recall'])\n# aggregate_results = recall.groupby(['bin_rec', 'rec_id']).mean().unstack()\n# aggregate_results.plot()\n#\n# f1 = all_metrics.filter(['bin_hist', 'bin_rec', 'rec_id', 'f1'])\n# aggregate_results = f1.groupby(['bin_rec', 'rec_id']).mean().unstack()\n# aggregate_results.plot()\n# plt.show()\n","sub_path":"src/Evaluation/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201880685","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('form/', views.form_data, name='form'),\n path('update//', views.update, name='update'),\n path('delete//', views.delete, name='deleteStd'),\n\n path('followup_form/', views.follow_up_form, name='followup'),\n path('followup_all/', views.follow_up_all, name='followupall'),\n\n path('followup_detail//', views.follow_up_detail, name='followupdetail'),\n path('followup_delete//', views.follow_up_delete, name='followupdelete'),\n\n path('login/', views.login_form, name='login'),\n path('logout/', views.logout_user, name='logout'),\n]\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"90610247","text":"BIT = 1\nKILOBIT = 1024 * BIT\nMEGABIT = 1024 * KILOBIT\n\nBYTE = 8 * BIT\nKILOBYTE = 1024 * BYTE\nMEGABYTE = 1024 * KILOBYTE\n\nfile_size = 100 * MEGABYTE\nspeed = 100 * MEGABIT\ntime = file_size / speed\n\nprint(time)\n","sub_path":"data-types/solution/int_download_time.py","file_name":"int_download_time.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"158823523","text":"import csv\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\n\n# Load the data\n\nwith open('LifeCycleSavings V1.02.csv', 'r') as file:\n reader = csv.reader(file)\n feature_names = [next(reader)]\n features = list(reader)\n\nprint(\"\\nLoaded data\")\nprint(\"Feature Names:\\n\", feature_names)\nprint(\"\\nFeatures:\\n\", features)\n\n### NOISE AND ANOMALY REMOVAL ###\n\n# Cleaning Noise 1: remove records with anomaly of having empty fields\n\nfor record in features[:]:\n for value in record[:]:\n if value == '':\n features.remove(record)\n break\n \nprint(\"\\nCleaned Noise 1: remove records with anomaly of having empty fields\\n\")\nprint(\"\\nFeatures:\\n\", features) \n\n\n# Cleaning Noise 2: remove duplicate records\n\nno_duplicates = []\n\nfor record in features:\n if record not in no_duplicates:\n no_duplicates.append(record)\n \nfeatures = no_duplicates\n\nprint(\"\\nCleaned Noise 2: remove duplicate records\\n\")\nprint(\"\\nFeatures:\\n\", features)\n \n \n# Transforming feature_names and features to numpy array\n\nfeature_names = np.array(feature_names)\nfeatures = np.array(features)\n\n\n# Cleaning Noise 3: remove the irrelevant feature Country \n\nfeature_names = feature_names[:, 1:]\nfeatures = features[:, 1:]\nprint(\"\\nCleaned Noise 3: remove the irrelevant feature Country\\n\")\nprint(\"Feature Names:\\n\", feature_names)\nprint(\"\\nFeatures:\\n\", features)\n\n\n\n# Removing outliers\n\nprint(\"\\nRemoving Outliers\")\n\n# convert the features' values to floats\n\nfeatures_floats = features.astype(np.float)\nprint(features_floats)\n\n# calculate Q1, Q3, and IQR for each feature\nQ1 = []\nQ3 = []\nIQR = []\n\nfor column in features_floats.T:\n sorted_column = np.sort(column)\n q1 = np.quantile(sorted_column,0.25)\n Q1.append(q1)\n q3 = np.quantile(sorted_column,0.75)\n Q3.append(q3)\n IQR.append(q3 - q1)\n\nprint(\"\\n1st Quartile:\\n\", Q1)\nprint(\"\\n3rd Quartile:\\n\", Q3)\nprint(\"\\nInter Quartile Range:\\n\", IQR)\n\n# calculate upper bound and lower bound for each feature\n\nupper_bound = []\nlower_bound = []\n\nfor i in range(len(features.T)):\n upper_bound.append(Q3[i]+1.5*IQR[i])\n lower_bound.append(Q1[i]-1.5*IQR[i])\n \nprint(\"\\nUpper Bound:\\n\", upper_bound)\nprint(\"\\nLower Bound:\\n\", lower_bound)\n \n# Remove records where they have values which exceed the bounds\n\noutliers_list = []\n\n# change features to list for convenience\n\nfeatures = features.tolist()\n\nfor i in range(len(features)):\n for j in range(len(features[0])):\n if features_floats[i][j] > upper_bound[j] or features_floats[i][j] < lower_bound[j]:\n outliers_list.append(features[i])\n \nfor outlier in outliers_list:\n features.remove(outlier)\n\n# change features back to numpy array\n\nfeatures = np.array(features) \n \nprint(\"\\nOutliers Detected:\\n\", outliers_list)\nprint(\"\\nFeatures:\\n\", features) \n \n \n\n### CATEGORIZING THE DATA USING K-MEANS CLUSTERING ALGORITHM ###\n\n# convert the features' values to floats\n\nfeatures_floats = features.astype(np.float)\nprint(features_floats)\n\n# sort features based on income\n\nsorted_features = features_floats[np.argsort(features_floats[:, 3])]\nprint(sorted_features)\n\n# extract income column\n\nincome = sorted_features[:, 3]\n\n# reshape column to use in KMeans function\n\nincome = np.reshape(income, (-1,1))\nprint(income)\n\n# using KMeans function to form 3 clusters from income\n\nincome_kmeans = KMeans(n_clusters=3, random_state=0).fit(income)\n\n# get the labels\nlabels = income_kmeans.labels_\nprint(labels)\n\n# match category names to labels \nlabel_names = []\nfor label in labels:\n if str(label) not in label_names:\n label_names.append(str(label))\ncategory_names = [\"low\", \"medium\", \"high\"]\nprint(label_names)\nprint(category_names)\n\n# predicting the income column from the original unsorted array to \n# maintain original order of records\n\nincome = features_floats[:, 3]\nincome = np.reshape(income, (-1,1))\nincome_kmeans.predict(income) \nlabels = income_kmeans.predict(income) \nprint(labels)\n\n# assigning the correct category name to records \n\nincome_category = []\n\nfor label in labels:\n if str(label) == label_names[0]:\n income_category.append(\"low\")\n elif str(label) == label_names[1]:\n income_category.append(\"medium\")\n elif str(label) == label_names[2]:\n income_category.append(\"high\")\nprint(income_category)\n \n# create csv file containing Income column and Income_Category column\nnewCSV = []\nnewCSV.append([\"Income\", \"Income_Category\"])\nfor i in range(len(features)):\n newCSV.append([features[i, 3], income_category[i]])\nnp.savetxt('Income_IncomeCategory.csv', newCSV, delimiter=',', fmt='%s')\nprint(newCSV)\n\n# prepare income for plotting by flattening array\nincome = income.flatten()\n\n# visualizing the clusters \nplt.subplots(figsize=(10,5))\n\nfor i in range(len(labels)):\n \n if str(labels[i]) == label_names[0]:\n c1 = plt.scatter(income[i], 0, c='g', marker=\".\")\n \n elif str(labels[i]) == label_names[1]:\n c2 = plt.scatter(income[i], 0, c='b', marker=\".\")\n \n elif str(labels[i]) == label_names[2]:\n c3 = plt.scatter(income[i], 0, c='r', marker=\".\")\n \nplt.legend([c1, c2, c3], category_names)\nplt.tick_params(axis='y', which='both', left=False, right=False, labelleft=False) \nplt.xlabel(\"Income\")\nplt.ylim(0, 0)\nplt.title(\"Income Clusters by K-Means\")\nplt.show()\n\n\n\n \n\n\n \n \n\n","sub_path":"K Means Clustering - Categorizing Population Based on Income/Kmeans_Categorizing_Income.py","file_name":"Kmeans_Categorizing_Income.py","file_ext":"py","file_size_in_byte":5432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"174350214","text":"#!/usr/bin/env python\nimport pika\nimport sys\nimport mysql.connector\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\nchannel = connection.channel()\n\nchannel.exchange_declare(exchange='direct_logs',\n exchange_type='direct')\n\nresult = channel.queue_declare(exclusive=True)\nqueue_name = result.method.queue\n\nseverities = sys.argv[1:]\nif not severities:\n sys.stderr.write(\"Usage: %s [info] [warning] [error]\\n\" % sys.argv[0])\n sys.exit(1)\n\nfor severity in severities:\n channel.queue_bind(exchange='messenger',\n queue=queue_name,\n routing_key=severity)\n\nprint(' [*] Waiting for logs. To exit press CTRL+C')\n\ndef callback(ch, method, properties, body):\n print(\" [x] %r:%r\" % (method.routing_key, body))\n # Specifying the ODBC driver, server name, database, etc. directly\n #cnxn = mysql.connector.connect(\"DRIVER={MySQL ODBC 5.3 ANSI Driver}; SERVER=localhost;DATABASE=symfony; UID=root; PASSWORD=;\")\n cnxn = mysql.connector.connect(user='root', database='symfony')\n cursor = cnxn.cursor()\n cursor.execute(\"INSERT INTO message (origine, version, status, template_name) VALUES ('test_python', 'v1', '1','welcome')\")\n cnxn.commit()\n print(cursor.rowcount, \"record inserted.\")\n\nchannel.basic_consume(callback,\n queue=queue_name,\n no_ack=True)\n\nchannel.start_consuming()\n\n","sub_path":"receive_messenger.py","file_name":"receive_messenger.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201227549","text":"from Test import Test\nfrom Test1 import Test1\n\nclass Laba4 :\n\n def main() :\n a = int(input(\"Enter a: \"))\n b = int(input(\"Enter b: \"))\n c = int(input(\"Enter c: \"))\n\n #First Task\n result = Test.solve(a, b, c)\n print(\"a = %d, b = %d, c = %d \\n\" % (a,b,c))\n print(\"There is one pair matching: \", result)\n \n #Second Task\n result1 = Test1.solve(a, b, c)\n print(\"Nambers do have the same perity: \", result1 , \"\\n\")\nLaba4.main() \n \n","sub_path":"Laba4/Python/lab4.py","file_name":"lab4.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"206382946","text":"\"\"\" This module contains a DICOM routing engine which applies :class:`rules `\n to :class:`datasets `. Each rule contains \n :class:`conditions ` and :class:`actions `: if all \n conditions are met, then the actions are applied on the dataset.\n \n The following conditions are available :\n * :class:`AlwaysTrue`\n * :class:`AlwaysFalse`\n * :class:`And`\n * :class:`Or`\n * :class:`Not`\n * :class:`ElementMatch`\n \n The following actions are available :\n * :class:`SetElement`\n * :class:`DeleteElement`\n * :class:`EmptyElement`\n * :class:`ModifyDataSet`\n * :class:`RestoreDataSet`\n * :class:`SaveDataSet`\n \n The following example modifies datasets if they match the criteria, and\n saves the modified datasets to a directory. ::\n \n # Get some data sets (e.g. from a PACS)\n datasets = []\n \n condition = medipy.io.dicom.routing.ElementMatch(\"patients_name\", \"Doe^John\")\n \n actions = [\n medipy.io.dicom.routing.EmptyElement(\"patients_name\"),\n medipy.io.dicom.routing.SetElement(\"patient_id\", \"12345\"),\n medipy.io.dicom.routing.EmptyElement(\"patients_birth_date\"),\n medipy.io.dicom.routing.SetElement(\"study_id\", \"FOO\"),\n medipy.io.dicom.routing.SaveDataSet(\"/some/where\", \"hierarchical\"),\n ]\n \n rule = medipy.io.dicom.routing.Rule(condition, actions)\n \n for dataset in datasets :\n rule(dataset)\n\"\"\"\n\nfrom actions import (Action, SetElement, DeleteElement, EmptyElement, \n ModifyDataSet, RestoreDataSet, SaveDataSet)\nfrom conditions import (Condition, AlwaysTrue, AlwaysFalse, And, Or, Not,\n ElementMatch)\nfrom Rule import Rule\n\n__all__ = [\n \"Action\", \"SetElement\", \"DeleteElement\", \"EmptyElement\", \"ModifyDataSet\", \n \"RestoreDataSet\", \"SaveDataSet\", \"Condition\", \"AlwaysTrue\", \"AlwaysFalse\", \n \"And\", \"Or\", \"Not\", \"ElementMatch\", \"Rule\"\n]\n","sub_path":"lib/medipy/io/dicom/routing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"468688552","text":"import os\nimport random\nimport numpy as np\nimport pyopencl as cl\nimport pyopencl.array as pycl_array\nfrom pyopencl.reduction import ReductionKernel\nfrom collections import defaultdict\n\n\n# Get the OpenCL kernel\nkernel_path = os.path.join(\n os.path.dirname(__file__),\n \"kernels/brute-force.cl\"\n)\nkernel = open(kernel_path, \"r\").read()\n\n\nclass Backend:\n \"\"\"\n A class for the OpenCL backend to the simulator.\n\n This class shouldn't be used directly, as many of the\n methods don't have the same input checking as the State\n class.\n \"\"\"\n\n def __init__(self, num_qubits, dtype=np.complex64):\n \"\"\"\n Initialize a new OpenCL Backend\n\n Takes an argument of the number of qubits to use\n in the register, and returns the backend.\n \"\"\"\n self.num_qubits = num_qubits\n self.dtype = dtype\n\n self.context = cl.create_some_context()\n self.queue = cl.CommandQueue(self.context)\n self.program = cl.Program(self.context, kernel).build(options=\"-cl-no-signed-zeros -cl-mad-enable -cl-fast-relaxed-math\")\n\n # Buffer for the state vector\n self.buffer = pycl_array.to_device(\n self.queue,\n np.eye(2**num_qubits, 1, dtype=dtype)\n )\n\n def apply_gate(self, gate, target):\n \"\"\"Applies a gate to the quantum register\"\"\"\n\n self.program.apply_gate(\n self.queue,\n [int(self.buffer.shape[0] / 2)],\n None,\n self.buffer.data,\n np.int32(target),\n self.dtype(gate.a),\n self.dtype(gate.b),\n self.dtype(gate.c),\n self.dtype(gate.d)\n )\n\n def apply_controlled_gate(self, gate, control, target):\n \"\"\"Applies a controlled gate to the quantum register\"\"\"\n\n self.program.apply_controlled_gate(\n self.queue,\n [int(self.buffer.shape[0] / 2)],\n None,\n self.buffer.data,\n np.int32(control),\n np.int32(target),\n self.dtype(gate.a),\n self.dtype(gate.b),\n self.dtype(gate.c),\n self.dtype(gate.d)\n )\n \n def measure(self, samples=1):\n \"\"\"Measure the state of a register\"\"\"\n # This is a really horrible method that needs a rewrite - the memory\n # is attrocious\n\n probabilities = self.probabilities()\n\n choices = np.random.choice(\n np.arange(0, 2**self.num_qubits), \n samples, \n p=probabilities\n )\n \n results = defaultdict(int)\n for i in choices:\n results[np.binary_repr(i, width=self.num_qubits)] += 1\n \n return dict(results)\n \n\n def qubit_probability(self, target):\n \"\"\"Get the probability of a single qubit begin measured as '0'\"\"\"\n\n preamble = \"\"\"\n #include \n\n float probability(int target, int i, cfloat_t amp) {\n if ((i & (1 << target )) != 0) {\n return 0;\n }\n // return 6.0;\n float abs = cfloat_abs(amp);\n return abs * abs;\n }\n \"\"\"\n \n\n kernel = ReductionKernel(\n self.context, \n np.float, \n neutral = \"0\",\n reduce_expr=\"a + b\",\n map_expr=\"probability(target, i, amps[i])\",\n arguments=\"__global cfloat_t *amps, __global int target\",\n preamble=preamble\n )\n\n return kernel(self.buffer, target).get()\n\n def measure_qubit(self, target, samples):\n probability_of_0 = self.qubit_probability(target)\n\n \n\n\n choices = np.random.choice(\n [0, 1], \n samples, \n p=[probability_of_0, 1-probability_of_0]\n )\n \n results = defaultdict(int)\n for i in choices:\n results[np.binary_repr(i, width=1)] += 1\n \n return dict(results)\n\n def single_amplitude(self, i):\n \"\"\"Gets a single probability amplitude\"\"\"\n out = pycl_array.to_device(\n self.queue,\n np.empty(1, dtype=np.complex64)\n )\n\n self.program.get_single_amplitude(\n self.queue, \n (1, ), \n None, \n self.buffer.data,\n out.data,\n np.int32(i)\n )\n\n return out[0]\n\n def amplitudes(self):\n \"\"\"Gets the probability amplitudes\"\"\"\n return self.buffer.get()\n \n def probabilities(self):\n \"\"\"Gets the squared absolute value of each of the amplitudes\"\"\"\n out = pycl_array.to_device(\n self.queue,\n np.zeros(2**self.num_qubits, dtype=np.float32)\n )\n\n self.program.calculate_probabilities(\n self.queue,\n self.buffer.shape,\n None,\n self.buffer.data,\n out.data\n )\n\n return out.get()\n \n def release(self):\n self.buffer.base_data.release()\n","sub_path":"qcgpu/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"568939999","text":"import csv\nimport sys\nfrom os import walk\nfrom os.path import join, basename, dirname, exists\n\nimport jams\nfrom jams.util import smkdirs\n\nfrom tempo_eval.parser.util import create_jam, timestamps_to_bpm, create_tempo_annotation, \\\n get_bibtex_entry\nfrom tempo_eval.evaluation import get_references_path\n\nGTZAN = 'GTZAN'\n\n\ndef parse(*args, **kwargs):\n input_audio_dir = args[0]\n parse_marchand2015()\n parse_percival2014(input_audio_dir)\n parse_tzanetakis2013(input_audio_dir)\n\n\ndef parse_marchand2015():\n output_dir = get_references_path('gtzan', 'jams')\n smkdirs(output_dir)\n input_annotation_dir = get_references_path('gtzan', 'marchand2015')\n\n for (dirpath, _, filenames) in walk(input_annotation_dir):\n for filename in [f for f in filenames if f.endswith('.jams')]:\n jams_file = join(output_dir, filename.replace('.wav', ''))\n jam = jams.load(join(dirpath, filename))\n\n sandbox1 = jam.annotations['tag_open'][1]['sandbox']\n tags = []\n\n # identify meter, so find corresponding beats\n meter = 1\n if 'meter' in sandbox1 and '/' in sandbox1['meter']:\n meter = int(sandbox1['meter'].split('/')[0])\n tags.append(sandbox1['meter'])\n\n # iterate over beats\n beats = jam.annotations['beat'][0]['data']\n timestamps = [b.time for b in beats]\n\n # ICBI-based\n median_bpm, _, std, cv = timestamps_to_bpm(timestamps, meter)\n jam.annotations.append(_create_marchand2015_median_cor_beat_tempo_annotation(median_bpm))\n\n # IBI-based\n median_bpm, _, std, cv = timestamps_to_bpm(timestamps)\n jam.annotations.append(_create_marchand2015_median_beat_tempo_annotation(median_bpm))\n\n # derive some additional tags from first sandbox\n sandbox0 = jam.annotations['tag_open'][0]['sandbox']\n for k in sandbox0.keys():\n if 'yes' == sandbox0[k]:\n tags.append(k)\n else:\n tags.append('no_' + k)\n\n # simply append tags to first 'tag_open' annotation, so that we can later\n # process them easily\n for tag in set(tags):\n if len(tag.strip()) > 0:\n jam.annotations['tag_open'][0].append(time=0.0, value=tag, duration='nan', confidence=1.0)\n\n jam.save(jams_file)\n\n\ndef parse_percival2014(input_audio_dir):\n output_dir = get_references_path('gtzan', 'jams')\n input_annotation_file = get_references_path('gtzan', 'percival2014', 'genres_tempos.mf')\n\n with open(input_annotation_file, mode='r') as file:\n reader = csv.reader(file, delimiter='\\t')\n for row in reader:\n filename = basename(row[0])\n bpm = float(row[1])\n jams_file = join(output_dir, filename.replace('.wav', '.jams'))\n if exists(jams_file):\n jam = jams.load(jams_file)\n else:\n jam = create_jam(filename, input_audio_dir)\n jam.annotations.append(_create_percical2014_tempo_annotation(bpm))\n jam.save(jams_file)\n\n\ndef parse_tzanetakis2013(input_audio_dir):\n output_dir = get_references_path('gtzan', 'jams')\n input_annotation_file = get_references_path('gtzan', 'tzanetakis2013', 'genres_tempos.mf')\n\n with open(input_annotation_file, mode='r') as file:\n reader = csv.reader(file, delimiter='\\t')\n for row in reader:\n genre = basename(dirname(row[0])).replace('hiphop', 'hip-hop') # to please tag_gtzan namespace\n filename = basename(row[0])\n bpm = float(row[1])\n jams_file = join(output_dir, filename.replace('.wav', '.jams'))\n if exists(jams_file):\n jam = jams.load(jams_file)\n else:\n jam = create_jam(filename, input_audio_dir)\n jam.annotations.append(_create_tzanetakis2013_tempo_annotation(bpm))\n jam.annotations.append(_create_tzanetakis2013_genre_annotation(genre))\n jam.save(jams_file)\n\n\ndef _create_percical2014_tempo_annotation(bpm):\n tempo = create_tempo_annotation(bpm)\n tempo.annotation_metadata = jams.AnnotationMetadata(\n corpus=GTZAN,\n version='2.0',\n curator=jams.Curator(name='Graham Percival', email='graham@percival-music.ca'),\n data_source='',\n annotator={'bibtex': get_bibtex_entry('Percival2014'),\n 'ref_url': 'http://www.marsyas.info/tempo/'})\n return tempo\n\n\ndef _create_tzanetakis2013_tempo_annotation(bpm):\n tempo = create_tempo_annotation(bpm)\n tempo.annotation_metadata = jams.AnnotationMetadata(\n corpus=GTZAN,\n version='1.0',\n curator=jams.Curator(name='George Tzanetakis', email='gtzan@cs.uvic.ca'),\n data_source='',\n annotator={'bibtex': get_bibtex_entry('Tzanetakis2013'),\n 'ref_url': 'http://www.marsyas.info/tempo/'})\n return tempo\n\n\ndef _create_tzanetakis2013_genre_annotation(genre):\n tag = jams.Annotation(namespace='tag_gtzan')\n tag.annotation_metadata = jams.AnnotationMetadata(\n corpus=GTZAN,\n version='1.0',\n curator=jams.Curator(name='George Tzanetakis', email='gtzan@cs.uvic.ca'),\n data_source='',\n annotator={'bibtex': get_bibtex_entry('Tzanetakis2002'),\n 'ref_url': 'http://www.marsyas.info/tempo/'})\n tag.append(time=0.0, value=genre, duration='nan', confidence=1.0)\n return tag\n\n\ndef _create_marchand2015_median_beat_tempo_annotation(bpm):\n tempo = create_tempo_annotation(bpm)\n tempo.annotation_metadata = jams.AnnotationMetadata(\n corpus=GTZAN,\n version='GTZAN-Rhythm_v2_ismir2015_lbd_2015-10-28_IBI',\n curator=jams.Curator(name='Ugo Marchand & Quentin Fresnel', email='ugo.marchand@ircam.fr'),\n data_source='manual annotation',\n annotation_tools='derived from beat annotations',\n annotation_rules='median of inter beat intervals (IBI)',\n annotator={'bibtex': get_bibtex_entry('Marchand2015'),\n 'ref_url': 'https://hal.archives-ouvertes.fr/hal-01252603/document'})\n return tempo\n\n\ndef _create_marchand2015_median_cor_beat_tempo_annotation(bpm):\n tempo = create_tempo_annotation(bpm)\n tempo.annotation_metadata = jams.AnnotationMetadata(\n corpus=GTZAN,\n version='GTZAN-Rhythm_v2_ismir2015_lbd_2015-10-28_ICBI',\n curator=jams.Curator(name='Ugo Marchand & Quentin Fresnel', email='ugo.marchand@ircam.fr'),\n data_source='manual annotation',\n annotation_tools='derived from beat annotations',\n annotation_rules='median of inter corresponding beat intervals (ICBI)',\n annotator={'bibtex': get_bibtex_entry('Marchand2015'),\n 'ref_url': 'https://hal.archives-ouvertes.fr/hal-01252603/document'})\n return tempo\n\n\nif __name__ == '__main__':\n parse(*sys.argv[1:])","sub_path":"tempo_eval/parser/gtzan.py","file_name":"gtzan.py","file_ext":"py","file_size_in_byte":6971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"10820393","text":"\"\"\"\n Name: Jayrese Heslop\n Date: 4/7/2016\n \n A scene is just a piece of state seperate from everything else,\n example why would the main menu need to know the players position\n\"\"\"\nfrom ..Graphics import (render_utils, geometry, texture)\nfrom Batch import Batch\nfrom Camera import Camera\nfrom HeightmapBatch import HeightmapBatch\nfrom HeightmapInstance import HeightmapInstance\nfrom Instance import Instance\n\nimport Terrain as terrain\n\nclass Scene(object):\n def __init__(self):\n self.batches = None\n self.geometries = None\n self.textures = None\n self.camera = None\n self.hm_terrain_batches = None\n self.hm_terrains = None\n self.directional_lights = None\n self.point_lights = None\n self.spot_lights = None\n self.bg = [0, 64, 0]\n \n def onInit(self, manager):\n # Create a collection of directional lights\n self.directional_lights = dict()\n \n # Create a collection of point lights\n self.point_lights = dict()\n \n # Create a collection of spot lights\n self.spot_lights = dict()\n\n # Create a collection of batches\n self.batches = dict()\n \n # Create a collection of geometries\n self.geometries = dict()\n \n # Create a collection of textures\n self.textures = dict()\n\n # Create a collection of heightmap batches\n self.hm_terrain_batches = dict()\n\n # Create a collection of heightmap terrains\n self.hm_terrains = dict()\n \n # Create a projection matrix\n window = manager.get_window()\n width, height = window.get_size()\n \n # Create a camera\n self.camera = Camera(aspect=(float(width) / float(height)))\n \n def onEnter(self, manager):\n pass\n \n def onUpdate(self, manager, tpf):\n # Resize the camera if the window was resized\n window = manager.get_window()\n \n if(window.is_resized()):\n width, height = window.get_size()\n self.camera.resize(width, height)\n \n def onRender(self, manager, gbuffer, quad, g_s, m_s, a_s, d_s):\n # Clear the screen\n window = manager.get_window()\n bg_r, bg_g, bg_b = self.bg\n \n window.clear(bg_r, bg_g, bg_b)\n\n # Bind the gbuffer for writing to\n render_utils.enable_write_gbuffer(gbuffer)\n\n # Enable depth testing\n render_utils.enable_depth_test()\n \n # Enable multisampling\n render_utils.enable_multisampling()\n \n # Enable face culling\n render_utils.enable_culling()\n \n # Set mode to back face culling\n render_utils.set_backface_culling()\n\n # Bind the geometry shader\n g_s.bind()\n\n # Bind the camera\n self.camera.bind(g_s)\n \n # Render all of the batches\n for identifier, batch in self.batches.iteritems():\n batch.render(self.camera, g_s)\n \n # Unbind the camera\n self.camera.unbind(g_s)\n \n # Unbind the geometry shader\n g_s.unbind()\n\n # Bind the multitextured shader\n m_s.bind()\n\n # Bind the camera\n self.camera.bind(m_s)\n\n # Render all of the batches\n for identifier, batch in self.hm_terrain_batches.iteritems():\n batch.render(self.camera, m_s)\n \n # Unbind the camera\n self.camera.unbind(m_s)\n \n # Unbind the terrain shader\n m_s.unbind()\n\n # Disable face culling\n render_utils.disable_culling()\n \n # Disable multisampling\n render_utils.disable_multisampling()\n \n # Disable depth testing\n render_utils.disable_depth_test()\n\n # Disable writing to the gbuffer\n render_utils.disable_write_gbuffer(gbuffer)\n\n # Enable reading from the gbuffer\n render_utils.enable_read_gbuffer(gbuffer)\n\n # Bind the fullscreen quad\n geometry.bind(quad)\n\n # Do the ambient light shading pass\n a_s.bind()\n geometry.render(quad)\n a_s.unbind()\n\n # Do the directional light pass\n\n # Do the point light pass\n\n # Do the spotlight pass\n\n # Unbind the fullscreen quad\n geometry.unbind(quad)\n\n # Disable reading from the gbuffer\n render_utils.disable_read_gbuffer(gbuffer)\n\n def onExit(self, manager):\n # Destroy all of the geometries\n for identifier, _geometry in self.geometries.iteritems():\n geometry.destroy(_geometry)\n \n # Destroy all of the textures\n for identifier, _texture in self.textures.iteritems():\n texture.destroy(_texture)\n\n # Destroy all of the heightmap_terrains\n for identifier, _terrain in self.hm_terrains.iteritems():\n geometry.destroy(_terrain[\"geometry\"])\n\n del self.geometries\n del self.textures\n del self.hm_terrains\n \n def onDestroy(self, manager):\n self.camera.destroy()\n \n def get_camera(self):\n return self.camera\n\n def set_bg_color(self, r, g, b):\n self.bg[0] = r\n self.bg[1] = g\n self.bg[2] = b\n \n def load_geometry(self, identifier, path):\n self.geometries[identifier] = geometry.from_cmf(path)\n \n def load_texture(self, identifier, path, mipmap=False):\n self.textures[identifier] = texture.from_cif(path, mipmap)\n\n def load_static_batch(self, b_identifier, g_identifier, t_identifier):\n _geometry = self.geometries[g_identifier]\n _texture = self.textures[t_identifier]\n _batch = Batch(_geometry, _texture)\n \n self.batches[b_identifier] = _batch\n \n def unload_static_batch(self, b_identifier):\n _batch = self.batches[b_identifier]\n \n # Find the geometry from the collection\n for identifier, _geometry in self.geometries.iteritems():\n # Check if it's the one from the batch\n if(_batch.geometry is _geometry):\n # Remove it then stop looking\n self.unload_geometry(identifier)\n break\n \n # Find the texture from the collection\n for identifier, _texture in self.textures.iteritems():\n # Check if it's the one from the batch\n if(_batch.texture is _texture):\n # Remove it then stop looking\n self.unload_texture(identifier)\n break\n \n # Remove it from the collection\n del _batch\n\n def unload_texture(self, identifier):\n _texture = self.textures[identifier]\n \n # Destroy the texture\n texture.destroy(_texture)\n \n # Remove it from the collection\n del _texture\n \n def unload_geometry(self, identifier):\n _geometry = self.geometries[identifier]\n \n # Destroy the geometry\n geometry.destroy(_geometry)\n \n # Remove it from the collection\n del _geometry\n \n def add_instance(self, b_identifier, i_identifier):\n _instance = Instance()\n _batch = self.batches[b_identifier]\n _batch.add_instance(i_identifier, _instance)\n \n return _instance\n \n def remove_instance(self, identifier, i_identifier):\n _batch = self.batches[identifier]\n _batch.remove_instance(i_identifier)\n \n def add_directional_light(self, identifier, direction, color):\n light = dict(direction=direction, color=color)\n \n # Add it to the collection\n self.directional_lights[identifier] = light\n \n def remove_directional_light(self, identifier):\n # Remove it from the collection\n del self.directional_lights[identifier]\n \n def add_point_light(self, identifier, position, color, attenuation):\n light = dict(position=position, color=color, attenuation=attenuation)\n \n # Add it to the collection\n self.point_lights[identifier] = light\n \n def remove_point_light(self, identifier):\n # Remove it from the collection\n del self.point_lights[identifier]\n\n def load_heightmap_terrain(self, identifier, hm_path, tex1_id, tex2_id, tex3_id, bm_id, size, magnitude):\n tex_1 = self.textures[tex1_id]\n tex_2 = self.textures[tex2_id]\n tex_3 = self.textures[tex3_id]\n blend_map = self.textures[bm_id]\n \n _terrain = terrain.from_heightmap(hm_path, size, magnitude)\n _geometry = _terrain[\"geometry\"]\n _heights = _terrain[\"heights\"]\n \n self.hm_terrains[identifier] = _terrain\n self.hm_terrain_batches[identifier] = HeightmapBatch(_geometry, _heights, tex_1, tex_2, tex_3, blend_map, size)\n\n return _terrain\n \n def unload_heightmap_terrain(identifier):\n _terrain = self.hm_terrains[identifier]\n _terrain_batch = self.hm_terrain_batches[identifier]\n terrain.unload_terrain(_terrain)\n \n # Remove it from the collection\n del _terrain_batch\n del _terrain\n \n def add_heightmap_instance(self, h_identifier, i_identifier):\n _batch = self.hm_terrain_batches[h_identifier]\n _instance = HeightmapInstance(_batch)\n\n _batch.add_instance(i_identifier, _instance)\n \n return _instance\n \n def remove_heightmap_instance(self, h_identifier, i_identifier):\n _batch = self.hm_terrain_batches[h_identifier]\n _batch.remove_instance(i_identifier)\n","sub_path":"Carthage/Core/Scene.py","file_name":"Scene.py","file_ext":"py","file_size_in_byte":9518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341967961","text":"from model.assignment import Assignment\nfrom model.list_pkg.assignment_list import AssignmentList\nfrom model.list_pkg.account_list import AccountList\nfrom model.list_pkg.artifact_list import ArtifactList\n\nclass AssignmentManager:\n\n def __init__(self):\n self.assignment_list = AssignmentList()\n self.account_list = AccountList()\n self.artifact_list = ArtifactList()\n\n def create_assignment(self, paper_id, author_id):\n paper = self.artifact_list.get_entry(paper_id)\n author = self.account_list.get_entry(author_id)\n assignment_id = paper_id\n assignment = Assignment(assignment_id, Assignment.Status.WAITING_FOR_REVIEWS, paper, author)\n self.assignment_list.add_entry(assignment_id,assignment)\n\n def volunteer_paper(self, account_id, paper_id):\n account_id = self.account_list.get_entry(account_id)\n assignment = self.assignment_list.get_entry(paper_id)\n assignment.pcm_volunteer(account_id)\n self.assignment_list.update_entry(paper_id, assignment)\n\n def get_volunteerable_papers(self):\n lst = []\n assignments = self.assignment_list.get_list()\n for i in assignments:\n if assignments[i].status == Assignment.Status.WAITING_FOR_REVIEWS:\n lst.append(assignments[i].paper.create_entry_dictionary())\n return lst","sub_path":"controller/assignment_manager.py","file_name":"assignment_manager.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"101483957","text":"# in executors.py\nmodel = \"sentence-transformers/paraphrase-distilroberta-base-v1\"\ntop_k = 10\n\n# in app.py\nport = 45679\nWORKSPACE_DIR = \"workspace\"\ndatafile = \"./data/memes.json\"\nmax_docs = 1000\nrandom_seed = 42\n","sub_path":"backend-text/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"228285468","text":"# a * b = exp(ln(a) + ln(b))\n# largest product\n# because exp is monotone\n# is largest sum\n# [0, 0], [0, 1], [0, 2], ... [0, 5],\n# [1, 1], [1, 2], ... [1, 5],\n# [2, 2], .... [2, 5],\n# ...\n# [5, 5]\n# max of all the things like ln(a[1]) + ln(a[2]) + ln(a[3]))\n# let's pretend that we wanted to solve\n# maximum sum\n# maximum sum of any span\n# first_list .. x\n# what was the maximum sum of any span of first_list?\n# what was the maximum suffix of first_list?\n# max_span of the first list = max ( max_sum_of_first_list, max_suffix_of_first_list + x)\n# max_suffix = max(max_suffix_of_first_list + x, 0)\nimport math\n\n# this computes the maximum total that can be made from\n# a suffix of xs, using at most limit elements\ndef max_suffix(xs, limit):\n return max(sum(xs[i:]) for i in range(0, limit))\n\ndef largest_sum(xs, limit):\n max_span = 0\n for i, x in enumerate(xs):\n max_span = max(max_span, max_suffix(xs[:i], limit - 1) + x)\n return max_span\n\nassert(largest_sum([2, 9], 2) == 11)\nassert(largest_sum([0.69, 2.19], 2) == 2.88)\n\n# dynamic programming solution\n#\n# def max_suffix(xs, limit):\n# if not xs or limit == 0:\n# return 1\n# best_so_far = 1\n# for i in range(0, min(limit, len(xs))):\n# x = int(xs[-1-i])\n# if x == 0:\n# break\n# best_so_far *= x\n# return best_so_far\n#\n# def largest_product(xs, limit):\n# if limit > len(xs):\n# raise ValueError\n# if limit < 0:\n# raise ValueError\n# if not xs and limit != 0:\n# raise ValueError\n# if not xs or limit == 0:\n# return 1\n# max_span = 0\n# for i, xChar in enumerate(xs):\n# x = int(xChar)\n# if x == 0:\n# max_span = max_span\n# else:\n# max_span = max(max_span, max_suffix(xs[:i], limit - 1) * x)\n# return max_span\n\nassert(largest_product(\"0123456789\", 2) == 72)\n","sub_path":"Algo/largest-series-product/largest_series_product.py","file_name":"largest_series_product.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"342178046","text":"import numba.unittest_support as unittest\nfrom unittest import loader, case\nfrom os.path import isdir, isfile, join, dirname, basename\n\n\nclass TestLoader(loader.TestLoader):\n\n # Unfortunately there is right now no other way to set\n # the top-level directory.\n # ('-t' only works after the 'discover' command).\n _top_level_dir = dirname(dirname(dirname(__file__)))\n\n def _find_tests(self, start_dir, pattern, namespace=False):\n # Upstream doesn't look for 'load_tests' in start_dir.\n\n if isdir(start_dir) and not namespace and isfile(join(start_dir, '__init__.py')):\n name = self._get_name_from_path(start_dir)\n package = self._get_module_from_name(name)\n load_tests = getattr(package, 'load_tests', None)\n tests = self.loadTestsFromModule(package, use_load_tests=False)\n if load_tests is not None:\n try:\n yield load_tests(self, tests, pattern)\n except Exception as e:\n yield loader._make_failed_load_tests(package.__name__, e, self.suiteClass)\n else:\n for t in super(TestLoader, self)._find_tests(start_dir, pattern):\n yield t\n","sub_path":"numba/testing/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"13056680","text":"def main():\n girls_file = open('GirlNames.txt')\n boys_file = open('BoyNames.txt')\n \n girl_names = get_names(girls_file)\n boy_names = get_names(boys_file)\n \n girls_file.close()\n boys_file.close()\n\n print('Search if a name is a popular name.')\n for genre in ['boy', 'girl']:\n name = input('Enter a {} name: '.format(genre))\n if genre == 'boy':\n popularity(name in boy_names, name)\n else:\n popularity(name in girl_names, name)\n\ndef popularity(included, name):\n if included:\n print(name, 'is popular.')\n else:\n print(name, 'is not popular.')\n\ndef get_names(file):\n names = []\n for name in file:\n names.append(name.rstrip())\n return names\n\nmain()","sub_path":"ch7_ex/8_name_search.py","file_name":"8_name_search.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637039290","text":"#!/usr/bin/python3\n\n\"\"\"\n Dado un archivo de configuracion crea una table en la database especificada\n y almacena los resultados de los experimentos.\n\n dump_to_sqlite.py \n\n El archivo de configuracion debe estar en formato json y tener los sig.\n datos:\n\n - table_name: El nombre de la tabla dentro de la base de datos\n - table_schema: Una lista de diccionarios por cada columna con el sig. \n formato:\n - col_name: Nombre de la columna\n - datatype: Tipo de datos a almacenar\n\"\"\"\n\nimport csv\nimport sqlite3\nimport json\nimport sys\nimport re\n\nDB = sys.argv[1]\nCONFIG_FILE = sys.argv[2]\nREGEX_NUM = r'[\\d\\,]*,\\d\\d\\d'\nre_num = re.compile(REGEX_NUM)\n\nwith open(CONFIG_FILE) as data_file:\n data = json.load(data_file)\n\ntable = data['table_name']\n\nprint('Comenzando proceso de dump...')\nprint(' DB: ' + DB)\nprint(' Tabla: ' + table)\n\ncon = sqlite3.connect(DB)\ncur = con.cursor()\ncur.execute(\"DROP TABLE IF EXISTS {}\".format(table))\n\ntable_cols = ''\nplaceholder = ''\n\nprint('insertando filas...')\n\nfor column in data['table_schema']:\n placeholder += '?, '\n table_cols += column['col_name'] + ' ' + column['datatype'] + ', '\n\ntable_cols = table_cols[:-2]\nplaceholder = placeholder[:-2]\n\ncur.execute(\"CREATE TABLE {}({})\".format(table, table_cols))\n\nquery_insert = 'INSERT INTO {} VALUES ({})'.format(table, placeholder)\ncsv_file = data['nombre'] + '/' + data['nombre'] + '.csv'\nwith open(csv_file,'r') as file:\n csvr = csv.reader(file, delimiter='\\t')\n for row in csvr:\n for idx, col in enumerate(row):\n if re_num.match(col):\n row[idx] = col.replace(',', '')\n\n if row[0].lower() == 'filtro':\n continue\n\n cur.execute(query_insert, row)\n\n\ncon.commit()\ncon.close()\n\nprint('Listo!')","sub_path":"tp2/codigo/experimentos/dump_to_sqlite.py","file_name":"dump_to_sqlite.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"652431236","text":"import io\nimport requests\nimport json\nimport urllib.request\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport math\n\nsubscription_key = open('subscription_key').read()\nassert subscription_key\n\nface_api_url = 'https://brazilsouth.api.cognitive.microsoft.com/face/v1.0/detect'\n\n# para fazer upload de imagem rápido: https://imgbb.com/\n\nimage_url = 'http://4.bp.blogspot.com/--u7Fho2C3K8/UhIKepmpzqI/AAAAAAAAC7A/lgirTyldrHc/s1600/flo.jpg'\n\nimage_path = image_url.split('/')[-1].split('?')[0]\nurllib.request.urlretrieve(image_url, image_path)\n\nim = Image.open(image_path)\nmat = np.array(im)\n\nheaders = { 'Ocp-Apim-Subscription-Key': subscription_key }\n \nparams = {\n 'returnFaceId': 'true',\n 'returnFaceLandmarks': 'false',\n 'returnFaceAttributes': 'emotion',\n}\n\nresponse = requests.post(face_api_url, params=params, headers=headers, json={\"url\": image_url})\nfaces = response.json()\nprint('faces:', faces)\n\n# descricao rgb traducao cor\ncolors = {\n 'anger': [ 213, 0, 0 ], # raiva | vermelho\n 'contempt': [ 255, 109, 0 ], # desprezo | laranja escuro\n 'disgust': [ 98, 0, 234 ], # nojo | roxo\n 'fear': [ 0, 77, 64 ], # medo | preto\n 'happiness': [ 255, 234, 0 ], # alegria | verde\n 'neutral': [ 255, 255, 255 ], # neutro | branco\n 'sadness': [ 41, 98, 255 ], # triste | cinza\n 'surprise': [ 0, 229, 255 ], # surpreso | amarelo\n}\n\ndef getEmotion(emotions):\n values = list( emotions.values() )\n index = np.argmax( np.array(values) )\n return list(emotions.keys())[index]\n\ndef getEmotionColor(emotion):\n return np.array( colors[emotion], dtype=np.uint8 )\n\ngrayIntensity = np.array([ .21, .72, .07 ])\n\ndef euclidianDistance(p1, p2):\n return math.sqrt( (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 )\n\nfor face in faces:\n rect = face['faceRectangle']\n col1, col2 = rect['left'], rect['left'] + rect['width']\n row1, row2 = rect['top'], rect['top'] + rect['height'] \n emotions = face['faceAttributes']['emotion']\n emotion = getEmotion(emotions)\n color = getEmotionColor(emotion)\n # increased faces square\n row1 -= 10\n row2 += 10\n col1 -= 10\n col2 += 10\n rect = mat[row1:row2, col1:col2]\n center = ( (col2-col1)/2, (row2-row1)/2 )\n maxDistance = min(\n euclidianDistance( center, (col2-col1, center[1]) ),\n euclidianDistance( center, (center[0], row2-row1) ),\n )\n for i, j in np.ndindex(rect.shape[:-1]):\n pix = rect[i][j]\n gray = (pix * grayIntensity).sum() / 255.\n colored = np.array(color * gray, dtype=np.uint8)\n distance = euclidianDistance( center, (j,i) )\n prcDist = min(1, max(0, distance / maxDistance - 0.2))\n finalColor = pix * prcDist + colored * (1-prcDist)\n rect[i][j] = np.array(finalColor, dtype=np.uint8)\n # square on faces\n # b = border = 2\n # mat[row1:row1+b, col1:col2 ] = color\n # mat[row2:row2+b, col1:col2 ] = color\n # mat[row1:row2, col1:col1+b] = color\n # mat[row1:row2, col2:col2+b] = color\n\nImage.fromarray(mat).save('emotionfull.png')\nplt.imshow(mat)\nplt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"648342656","text":"from flask import Flask, render_template\n\nfrom utils import get_data\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef get_home_page():\n return render_template(\"home.html\")\n\n\n@app.route('/')\ndef return_any_route(value):\n for pages in get_data():\n if pages.get('title') == value:\n content = pages.get('text')\n return render_template(\"any.html\", value=value, data=content)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"flask_lesson_intro/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"503530922","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 15 01:57:29 2016\n\n@author: Tomography\n\"\"\"\n#*******************#\n\"\"\"SEM Constants\"\"\"\n#*******************#\nSEM_ip = '192.168.100.100'\nSEM_port = 8300\nNoseIndex = 4\nDwell = 5000\nSEMPixelWidth = 756\nSEMPixelHeight = 756\nSEMChannel = 0\nSEMDetector = 0\n\n#***************************#\n\"\"\"EBSD Mapping Constants\"\"\"\n#***************************#\nTEAM_ip = '192.168.100.105'\nTEAM_port = 8301\nEBSDProjectName = \"\"\nEBSDFolderPath = \"C:/ProgramData/EDAX/rshivaraman/TestFolder\"\nNumSlices = 2\nSliceThickness = 0.05\nGridType = 0\nXStart = -50.0\nYStart = -50.0\nXSize = 100.0\nYSize = 100.0\nResVal = 3 \nStep = 2.0\nMapTime = 5\n\n#*************************#\n\"\"\"Sub-Stage Constants\"\"\"\n#*************************#\nread_exepath = 'C:/Users/Tescan/Serial Sectioning/SerialSectioning/SubStage/read.exe'\nwrite_exepath = 'C:/Users/Tescan/Serial Sectioning/SerialSectioning/SubStage/write.exe'\ncal_exepath = 'C:/Users/Tescan/Serial Sectioning/SerialSectioning/SubStage/calibrate.exe'\nEBSDPos_path = 'C:/Users/Tescan/Serial Sectioning/SerialSectioning/SubStage/Position_EBSD.txt'\nLaserNPos_path = 'C:/Users/Tescan/Serial Sectioning/SerialSectioning/SubStage/Position_Laser.txt'\nSEMPos_path = 'C:/Users/Tescan/Serial Sectioning/SerialSectioning/SubStage/Position_SEM.txt'\nzIncrement = 0.5\nyIncrement = 0.5\n\n#************************************#\n\"\"\"Fast Steering Mirror Constants\"\"\"\n#************************************#\nVMAX_X = 5\nVMAX_Y= 5\nVMIN_X = -5\nVMIN_Y = -5\nSampsPerLine = 20\nNumRows = 20\nNumScans = 1\nScale = 4\nHighTimeClock = 0.1\nLowTimeClock = 0.1\nHighTimeLaser = 0.1\nLowTimeLaser = 0.1\nDelayClock = 0.0\nDelayLaser = 0.01","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"591702299","text":"import tweepy\nfrom credentials import *\n\n\ndef twitter_setup():\n \"\"\"\n Utility function to setup the Twitter's API\n with an access keys provided in a file credentials.py\n :return: the authentified API\n \"\"\"\n # Authentication and access using keys:\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n\n # Return API with authentication:\n api = tweepy.API(auth)\n return api\n\n\ndef collect_by_user(user_id,count):\n connexion =twitter_setup()\n statuses = connexion.user_timeline(id = user_id, count = int(count))\n for status in statuses:\n print(status.text)\n return statuses\n\n","sub_path":"CSTwitterAnalysis/twitter_collect/collect_by_user.py","file_name":"collect_by_user.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"163119559","text":"import ui\nimport wndMgr\n\nclass SideBar(ui.Board):\n\tdef __init__(self):\n\t\tui.Board.__init__(self)\n\t\t\n\t\tself.ExpandBtn = ui.Button()\n\t\tself.ExpandBtn.SetParent(self)\n\t\tself.ExpandBtn.SetPosition(138, 0)\n\t\tself.ExpandBtn.SetUpVisual(\"d:/ymir work/ui/game/belt_inventory/btn_minimize_normal.tga\")\n\t\tself.ExpandBtn.SetOverVisual(\"d:/ymir work/ui/game/belt_inventory/btn_minimize_over.tga\")\n\t\tself.ExpandBtn.SetDownVisual(\"d:/ymir work/ui/game/belt_inventory/btn_minimize_down.tga\")\n\t\tself.ExpandBtn.SetEvent(self.ToggleMinimize)\n\t\tself.ExpandBtn.Show()\n\t\t\n\t\tself.btns = []\n\t\tself.minimized = 1\n\t\n\tdef AddButton(self, text, event):\n\t\theight = len(self.btns) * 26\n\t\twndHeight = max(108, 52 + height)\n\t\tself.SetSize(140, wndHeight)\n\t\tself.SetPosition(-126, (wndMgr.GetScreenHeight() - wndHeight) / 2)\n\t\tself.ExpandBtn.SetPosition(126, (wndHeight - 108) / 2)\n\t\tbtn = ui.Button()\n\t\tbtn.SetParent(self)\n\t\tbtn.SetPosition(35, 15 + height)\n\t\tbtn.SetUpVisual(\"d:/ymir work/ui/public/Large_Button_01.sub\")\n\t\tbtn.SetOverVisual(\"d:/ymir work/ui/public/Large_Button_02.sub\")\n\t\tbtn.SetDownVisual(\"d:/ymir work/ui/public/Large_Button_03.sub\")\n\t\tbtn.SetText(text)\n\t\tbtn.SetEvent(event)\n\t\tbtn.Show()\n\t\tself.btns.append(btn)\n\n\tdef __del__(self):\n\t\tui.Board.__del__(self)\n\n\tdef Destroy(self):\n\t\tself.Hide()\n\t\tfor obj in self.btns:\n\t\t\tobj.Hide()\n\t\t\tobj = None\n\t\tdel self.btns[:]\n\t\tself.btns = None\n\n\tdef OnUpdate(self):\n\t\tif self.minimized == 1:\n\t\t\t(x, y) = self.GetGlobalPosition()\n\t\t\tif x > -126:\n\t\t\t\tself.SetPosition(max(-126, x - 4), (wndMgr.GetScreenHeight() - (30 + len(self.btns) * 26)) / 2)\n\t\telse:\n\t\t\t(x, y) = self.GetGlobalPosition()\n\t\t\tif x < -25:\n\t\t\t\tself.SetPosition(min(-25, x + 4), (wndMgr.GetScreenHeight() - (30 + len(self.btns) * 26)) / 2)\n\n\tdef ToggleMinimize(self):\n\t\tif self.minimized == 1:\n\t\t\tself.minimized = 0\n\t\t\tself.ExpandBtn.SetUpVisual(\"d:/ymir work/ui/game/belt_inventory/btn_expand_normal.tga\")\n\t\t\tself.ExpandBtn.SetOverVisual(\"d:/ymir work/ui/game/belt_inventory/btn_expand_over.tga\")\n\t\t\tself.ExpandBtn.SetDownVisual(\"d:/ymir work/ui/game/belt_inventory/btn_expand_down.tga\")\n\t\telse:\n\t\t\tself.minimized = 1\n\t\t\tself.ExpandBtn.SetUpVisual(\"d:/ymir work/ui/game/belt_inventory/btn_minimize_normal.tga\")\n\t\t\tself.ExpandBtn.SetOverVisual(\"d:/ymir work/ui/game/belt_inventory/btn_minimize_over.tga\")\n\t\t\tself.ExpandBtn.SetDownVisual(\"d:/ymir work/ui/game/belt_inventory/btn_minimize_down.tga\")\n\n","sub_path":"root/uisidebar.py","file_name":"uisidebar.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"316415547","text":"#!usr/bin/python\n#coding=utf-8\nimport sys, getopt\nfrom scapy.all import *\nimport binascii\nimport time\n\n#need modify /usr/lib/python2.7/dist-packages/scapy 252,262\n\nclass Tools:\n netSSID = 'ghost' #Network name\n @staticmethod\n def getPayloadFrame(cmd, ct_addr2, addr2):\n payload = Dot11Elt(ID=221, info=('\\x63\\x63\\x63'+cmd))\n response_dot11 = Dot11(subtype=5, addr1=ct_addr2,addr2=addr2, addr3=addr2,SC=22222)\n return RadioTap()/response_dot11/Dot11ProbeResp()/payload/Dot11Elt(ID='SSID', info=Tools.netSSID)\n @staticmethod\n def getTimeHash():\n hash_time = str(hash(time.time()))[0:8]\n return hash_time\n @staticmethod\n def CMDEncode(cmd):\n cmd_bin = \"\"\n for i in cmd:\n cmd_bin += binascii.a2b_hex(hex(ord(i))[2:4])\n cmd_bin += '\\0'\n return cmd_bin\n\nclass Action1:\n def __init__(self):\n self.ReceiveHash = None;\n self.SendHash = None;\n self.number = 0;\n self.time = 0;\n def Handle(self,packet):\n dot = packet.getlayer(Dot11)\n if dot != None and dot.type == 0 and dot.subtype == 4:\n data = str(packet)\n index = data.find(\"ac1\")\n if index>= 0:\n print(\"Get Frame!(0-11): \" + data[index:index+11])\n self.ReceiveHash = data[index+3:index+11]\n ct_addr2 = packet.addr2\n if(self.SendHash == None):\n self.SendHash = Tools.getTimeHash()\n cmd_bin = Tools().CMDEncode(self.SendHash+SendCMD)\n if(self.SendHash != self.ReceiveHash):\n if(time.time()-self.time>4):\n self.number += 1;\n print(\"Round [\"+str(self.number)+\"] : Context is {\"+cmd_bin+\"} \"),\n response_frame = Tools.getPayloadFrame(cmd_bin, ct_addr2, '22:22:22:22:22:22')\n sendp(response_frame, iface=\"wlan0\", count=450+50*self.number)\n self.time = time.time()\n else:\n self.SendHash = None;\n self.number = 0;\n print(\"=====Attack OK!!!======\")\n exit(1);\n\n\nSendCMD = \"\"\nopts, args = getopt.getopt(sys.argv[1:], \"hc:\")\nfor op, value in opts:\n if op == \"-c\":\n #try:\n SendCMD += value\n for j in args:\n SendCMD = SendCMD+\" \"+j\n print(SendCMD)\n act1 = Action1()\n #print(\"You input command is :[\" + SendCMD+\"]\")\n print(\"=====Attack Begin======\")\n print(\"Sniff Monitor...\")\n sniff(iface=\"wlan0\",prn=act1.Handle)\n #except:\n #print(\"Please try again later\")\n #pass\n elif op == \"-h\":\n print('''\n ----------------------------------\n Ghost Tunnel Tools Manual...\n ----------------------------------\n ''')\n","sub_path":"scapy_ghost.py","file_name":"scapy_ghost.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"510647056","text":"import requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport locale\nfrom locale import atof\n\n#Get data from worldometers using BeautifulSoup \nurl = requests.get(\"https://www.worldometers.info/coronavirus/\")\nfrom bs4 import BeautifulSoup\nsoup = BeautifulSoup(url.content, 'html.parser')\ntitle = soup.title \ntitleText = title.get_text()\nprint(titleText)\ndt = []\nfor table in soup.find_all('table', class_ = 'table table-bordered table-hover main_table_countries'):\n\tth = [e.text.strip() for e in table.find_all('th')]\n\tfor tr in table.find_all('tr'):\n\t\ttd = tr.find_all('td')\n\t\trow = [e.text.strip().replace(',','') for e in td]\n\t\tdt.append(row)\n\n# generate DataFrame from above data\ndf = pd.DataFrame(dt,columns=th)\ndf = pd.DataFrame(dt,columns=th, index=df['Country,Other'])\ndf = df[['TotalCases', 'TotalDeaths', 'TotalRecovered', 'ActiveCases', 'Serious,Critical']][2:12].copy()\ndf = df.astype(int)\n\n# plotting data\ndf.plot.bar(rot=0)#stacked=True)\nplt.title(titleText)\nplt.show()\nprint(df)\n","sub_path":"covid-19_data_visualization.py","file_name":"covid-19_data_visualization.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"483993714","text":"import pytest\nfrom bidu.utils.test_utils import layer_test, bidu_test\n\n\n@bidu_test\ndef test_leaky_relu():\n from bidu.layers.advanced_activations import LeakyReLU\n for alpha in [0., .5, -1.]:\n layer_test(LeakyReLU, kwargs={'alpha': alpha},\n input_shape=(2, 3, 4))\n\n\n@bidu_test\ndef test_prelu():\n from bidu.layers.advanced_activations import PReLU\n layer_test(PReLU, kwargs={},\n input_shape=(2, 3, 4))\n\n\n@bidu_test\ndef test_elu():\n from bidu.layers.advanced_activations import ELU\n for alpha in [0., .5, -1.]:\n layer_test(ELU, kwargs={'alpha': alpha},\n input_shape=(2, 3, 4))\n\n\n@bidu_test\ndef test_parametric_softplus():\n from bidu.layers.advanced_activations import ParametricSoftplus\n for alpha in [0., .5, -1.]:\n layer_test(ParametricSoftplus,\n kwargs={'alpha_init': 1.,\n 'beta_init': -1},\n input_shape=(2, 3, 4))\n\n\n@bidu_test\ndef test_thresholded_relu():\n from bidu.layers.advanced_activations import ThresholdedReLU\n layer_test(ThresholdedReLU, kwargs={'theta': 0.5},\n input_shape=(2, 3, 4))\n\n\n@bidu_test\ndef test_srelu():\n from bidu.layers.advanced_activations import SReLU\n layer_test(SReLU, kwargs={},\n input_shape=(2, 3, 4))\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n","sub_path":"tests/keras/layers/test_advanced_activations.py","file_name":"test_advanced_activations.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"469112933","text":"#import analysis as an\n#import tensorflow as tf\nimport importlib as imp\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os,csv,sys,pprint,time,operator\nimport pickle as pkl\nimport itertools\n# use imp.reload(an) to reload analysis\nfrom joblib import Parallel, delayed\nimport pandas as pd\nimport multiprocessing\nimport argparse\n\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import scale\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_curve, auc\nimport shutil\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n # Flags for defining the tf.train.ClusterSpec\n parser.add_argument(\n \"--trainlogdir\",\n type=str,\n default='',\n help=\"\"\n )\n\n args, unparsed = parser.parse_known_args()\n\n #for network inferences\n read_folder=os.path.join('../../',args.trainlogdir,'processing/output_feats')\n\n cases=os.listdir(read_folder)\n auto_props=[]\n for i in range(len(cases)):\n case=cases[i]\n #print(case+'___'+str(i))\n props=pkl.load(open(os.path.join(read_folder,case),'rb'))\n auto_props.append(props)\n print('props',auto_props)\n #auto_props=list(itertools.chain(*auto_props))\n #feature_list_auto=['Area','Case','Centroid_x','Centroid_y','Circularity','Class_id','Convex_area','Convex_perimeter','Eccentricity','Equivalent_diameter','Major_axis_length', 'Major_minor_axis_ratio','CD20+_mean_min_dist','CD3+CD4+_mean_min_dist','CD3+CD4-_mean_min_dist','CD20+_min_dist','CD3+CD4+_min_dist','CD3+CD4-_min_dist','Minor_axis_length','CD20+_object_number','CD3+CD4+_object_number','CD3+CD4-_object_number','Perim_circ_ratio','Perimeter','Pixel_size','Prob','Roi_num','Solidity']\n feature_list_auto=['Abs_coords','Area','Bbox_coords','Case','Centroid_x','Centroid_y','Circularity','Class_id','Convex_area','Convex_perimeter','Eccentricity','Equivalent_diameter','Major_axis_length', 'Major_minor_axis_ratio','BDCA2+_mean_min_dist','CD11c+_mean_min_dist','CD20+_mean_min_dist','CD3+CD4+_mean_min_dist','CD3+CD4-_mean_min_dist','BDCA2+_min_dist','CD11c+_min_dist','CD20+_min_dist','CD3+CD4+_min_dist','CD3+CD4-_min_dist','Minor_axis_length','BDCA2+_object_number','CD11c+_object_number','CD20+_object_number','CD3+CD4+_object_number','CD3+CD4-_object_number','Perim_circ_ratio','Perimeter','Pixel_size','Prob','Roi_num','Solidity']\n #feature_list_auto=['Area','Case','Centroid_x','Centroid_y','Circularity','Class_id','Convex_area','Convex_perimeter','DC','Eccentricity','Equivalent_diameter','Major_axis_length', 'Major_minor_axis_ratio','DC_mean_min_dist','CD3+CD4+_mean_min_dist','CD3+CD4-_mean_min_dist','DC_min_dist','CD3+CD4+_min_dist','CD3+CD4-_min_dist','Minor_axis_length','DC_object_number','CD3+CD4+_object_number','CD3+CD4-_object_number','Perim_circ_ratio','Perimeter','Pixel_size','Prob','Roi_num','Solidity']\n #feature_list_auto=['Area','Case','Circularity','Class_id','Convex_area','Convex_perimeter','DC','Eccentricity','Equivalent_diameter','Major_axis_length', 'Major_minor_axis_ratio','DC_mean_min_dist','Tw_mean_min_dist','Ta_mean_min_dist','DC_min_dist','Tw_min_dist','Ta_min_dist','Minor_axis_length','DC_object_number','Tw_object_number','Ta_object_number','Perim_circ_ratio','Perimeter','Pixel_size','Prob','Roi_num','Solidity']\n\n import pandas as pd\n auto_feats=pd.DataFrame.from_dict(auto_props)\n auto_feats=auto_feats[feature_list_auto]\n auto_feats.to_csv(os.path.join('../../',args.trainlogdir,'analysis','Human_FFPE_auto_feats_fixedDB.csv'))\n print('done with feat')\n\n\n writedir=os.path.join('../../',args.trainlogdir,'analysis','new_analysis')\n if os.path.exists(writedir):\n shutil.rmtree(writedir)\n os.makedirs(writedir)\n else:\n os.makedirs(writedir)\n\n #sub_list_auto=['Circularity','Major_minor_axis_ratio',\n #'Perim_circ_ratio','Equivalent_diameter','Area','Perimeter',\n #'Eccentricity','Major_axis_length','Minor_axis_length','Solidity','Convex_area','Convex_perimeter',\n #'cd20_object_number','bdca2_object_number','cd11c_object_number']\n sub_list_auto=['Area','Circularity','Convex_area','Convex_perimeter','Eccentricity','Equivalent_diameter','Major_axis_length','Major_minor_axis_ratio','Minor_axis_length','Perim_circ_ratio','Perimeter','Solidity']\n prob=0\n mf=pd.DataFrame.from_csv(os.path.join('../../',args.trainlogdir,'analysis','Human_FFPE_auto_feats_fixedDB.csv'))\n print(mf.shape)\n num_double=mf[(mf.Class_id=='CD3+CD4+')&(mf.Prob>prob)].shape[0]\n print('number of CD3+CD4+: ',num_double)\n mean_cd20dist=mf[(mf.Class_id=='CD3+CD4+')&(mf.Prob>prob)]['CD20+_min_dist'].mean()\n print(mean_cd20dist)\n mean_bdca2dist=mf[(mf.Class_id=='CD3+CD4+')&(mf.Prob>prob)]['BDCA2+_min_dist'].mean()\n print(mean_bdca2dist)\n mean_cd11cdist=mf[(mf.Class_id=='CD3+CD4+')&(mf.Prob>prob)]['CD11c+_min_dist'].mean()\n print(mean_cd11cdist)\n num_single=mf[(mf.Class_id=='CD3+CD4-')&(mf.Prob>prob)].shape[0]\n print('number of CD3+CD4-: ',num_single)\n mean_CD20dist=mf[(mf.Class_id=='CD3+CD4-')&(mf.Prob>prob)]['CD20+_min_dist'].mean()\n print(mean_CD20dist)\n mean_BDCA2dist=mf[(mf.Class_id=='CD3+CD4-')&(mf.Prob>prob)]['BDCA2+_min_dist'].mean()\n print(mean_BDCA2dist)\n mean_CD11cdist=mf[(mf.Class_id=='CD3+CD4-')&(mf.Prob>prob)]['CD11c+_min_dist'].mean()\n print(mean_CD11cdist)\n a=0\n b=0\n from scipy.stats import ttest_ind\n for dist_measure in ['CD20+_min_dist','BDCA2+_min_dist','CD11c+_min_dist']:\n print(dist_measure)\n a=mf[(mf.Class_id=='CD3+CD4+')&(mf.Prob>prob)][dist_measure]\n b=mf[(mf.Class_id=='CD3+CD4-')&(mf.Prob>prob)][dist_measure]\n (tstat,p)=ttest_ind(a.dropna(), b.dropna(), axis=0, equal_var=True)\n print('ttest')\n print(tstat)\n print(p)\n print('ttest')\n (tstat,p)=ttest_ind(a.dropna(), b.dropna(), axis=0, equal_var=False)\n print(tstat)\n print(p)\n a=0; b=0; c=0\n for case in mf['Case'].unique():\n for roi_num in mf[(mf.Case==case)]['Roi_num'].unique():\n a=a+mf[(mf.Case==case)&(mf.Roi_num==roi_num)]['CD20+_object_number'].iloc[0]\n b=b+mf[(mf.Case==case)&(mf.Roi_num==roi_num)]['BDCA2+_object_number'].iloc[0]\n c=c+mf[(mf.Case==case)&(mf.Roi_num==roi_num)]['CD11c+_object_number'].iloc[0]\n print('number of cd20: ',a)\n print('number of bdca2: ',b)\n print('number of cd11c: ',c)\n #scaler=MinMaxScaler()\n #scaler.fit(mf[sub_list_auto])\n #mf[sub_list_auto]=scaler.transform(mf[sub_list_auto])\n '''\n def plot_feats_auto(df,sub_list,dirname,prob,partition_vars,pvarmy):\n for feat in sub_list:\n f, ((ax1,ax2),(ax3,ax4),(ax5,ax6)) = plt.subplots(3,2,sharey=True,sharex=True,figsize=(10,15))\n ax1.set_title('cd20 CD3+CD4+')\n ax1.plot(\n df[(df.Class_id=='CD3+CD4+')&(df.Prob>prob)][partition_vars[0]],\n df[(df.Class_id=='CD3+CD4+')&(df.Prob>prob)][feat],'r.')\n ax2.set_title('cd20 CD3+CD4-')\n ax2.plot(\n df[(df.Class_id=='CD3+CD4-')&(df.Prob>prob)][partition_vars[0]],\n df[(df.Class_id=='CD3+CD4-')&(df.Prob>prob)][feat],'r.')\n ax3.set_title('bdca2 CD3+CD4+')\n ax3.plot(\n df[(df.Class_id=='CD3+CD4+')&(df.Prob>prob)][partition_vars[1]],\n df[(df.Class_id=='CD3+CD4+')&(df.Prob>prob)][feat],'r.')\n ax4.set_title('bdca2 CD3+CD4-')\n ax4.plot(\n df[(df.Class_id=='CD3+CD4-')&(df.Prob>prob)][partition_vars[1]],\n df[(df.Class_id=='CD3+CD4-')&(df.Prob>prob)][feat],'r.')\n ax5.set_title('cd11c CD3+CD4+')\n ax5.plot(\n df[(df.Class_id=='CD3+CD4+')&(df.Prob>prob)][partition_vars[2]],\n df[(df.Class_id=='CD3+CD4+')&(df.Prob>prob)][feat],'r.')\n ax6.set_title('cd11c CD3+CD4-')\n ax6.plot(\n df[(df.Class_id=='CD3+CD4-')&(df.Prob>prob)][partition_vars[2]],\n df[(df.Class_id=='CD3+CD4-')&(df.Prob>prob)][feat],'r.')\n f.savefig(os.path.join(writedir,'feats_'+pvarmy+'_'+feat+'.png'))\n\n partition_vars=['CD20_min_dist','BDCA2_min_dist','CD11c_min_dist']\n plot_feats_auto(mf,sub_list_auto,writedir,prob,partition_vars,'Min_dist')\n partition_vars=['CD20_mean_min_dist','BDCA2_mean_min_dist','CD11c_mean_min_dist']\n plot_feats_auto(mf,sub_list_auto,writedir,prob,partition_vars,'Mean_min_dist')\n\n def plot_auc(aucs,name1,name2,rng,folder,partition_variable):\n f,axes = plt.subplots(2,2,sharey=True,sharex=True,figsize=(10,10))\n for i in range(len(axes)):\n for j in range(len(axes[i])):\n k=i*2+j\n mean=np.array([np.mean(x) for x in aucs[:,k,0]])\n stdev=np.array([np.std(x) for x in aucs[:,k,0]])\n axes[i][j].plot(rng,mean)\n axes[i][j].plot(rng,mean-aucs[:,k,1])\n axes[i][j].plot(rng,mean+aucs[:,k,1])\n axes[i][j].plot(rng,0.5*np.ones(len(rng)))\n axes[i][j].plot(rng,0.6*np.ones(len(rng)))\n axes[0][0].set_title(name1+'_low vs '+name1+'_high')\n axes[0][1].set_title(name2+'_low vs '+name2+'_high')\n axes[1][0].set_title(name1+'_low vs '+name2+'_low')\n axes[1][1].set_title(name1+'_high vs '+name2+'_high')\n axes[1][1].set_ylim([0,1.0])\n f.savefig(os.path.join(folder,'without_dist_'+partition_variable+'_'+name1+'_'+name2+'.png'))\n\n def min_dist_analysis_auto(df,sub_list,folder,rng,kernel,testsize,partition_variable,prob):\n\n ylist1=an.test_mindist_low(\n df[(df.Class_id=='CD3+CD4+')&(df.Prob>prob)][sub_list+[partition_variable]].dropna(axis=0,how='any',subset=[partition_variable]).copy(),\n df[(df.Class_id=='CD3+CD4-')&(df.Prob>prob)][sub_list+[partition_variable]].dropna(axis=0,how='any',subset=[partition_variable]).copy(),\n rng,\n kernel,\n testsize,\n partition_variable)\n\n plot_auc(ylist1,'CD3+CD4+','CD3+CD4-',rng,folder,partition_variable)\n\n rng=0.2*np.array(list(range(1,100)))\n\n #for p in range(10):\n # prob=p*0.1\n # min_dist_analysis_auto(af,sub_list_auto,'auto_feats',rng,prob)\n kernel='rbf'\n testsize=0.2\n pvars=['CD20+_min_dist','BDCA2+_min_dist','CD11c+_min_dist','CD20+_mean_min_dist','BDCA2+_mean_min_dist','CD11c+_mean_min_dist']\n for partition_variable in pvars:\n min_dist_analysis_auto(mf,sub_list_auto,writedir,rng,kernel,testsize,partition_variable,prob)\n\n print('done with min dist analysis')\n\n # classify by all features including dist, need to normalize dist\n\n def with_dist_analysis(df,sub_list,folder,partition_variable,prob):\n\n sub_list=sub_list+[partition_variable]\n\n scaler=MinMaxScaler()\n scaler.fit(df[sub_list])\n df[sub_list]=scaler.transform(df[sub_list])\n\n auclist=an.try_classify(\n df[(df.Class_id=='CD3+CD4+')&(df.Prob>prob)][sub_list].dropna(axis=0,how='any',subset=[partition_variable]).values,\n df[(df.Class_id=='CD3+CD4-')&(df.Prob>prob)][sub_list].dropna(axis=0,how='any',subset=[partition_variable]).values,\n 'rbf',0.2)\n plt.figure()\n plt.plot(auclist[0])\n plt.savefig(os.path.join(folder,'with_dist_'+partition_variable+'_CD3+CD4+_vs_CD3+CD4-.png'))\n\n # for auto features\n mf=pd.DataFrame.from_csv(os.path.join(maindir,'feats.csv'))\n mf=mf.dropna(axis=0,how='any')\n for partition_variable in pvars:\n with_dist_analysis(mf,sub_list_auto,writedir,partition_variable,prob)\n\n # variance analysis\n\n print('done')\n '''\nif __name__=='__main__':\n main()\n","sub_path":"cell_and_tubule_segmentation/cell_predictions/Bcell_code/analysis/auto_cdm_analysis.py","file_name":"auto_cdm_analysis.py","file_ext":"py","file_size_in_byte":11920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"107870959","text":"\"\"\"\n SSW/CS 555\n Michael Ryan, Pat Song, Soumik Deb, Connor Santiago, Paul Szenher\n This file will contain all the test cases for all of our user stories\n\"\"\"\nimport sys\nimport datetime\nimport os\nfrom prettytable import PrettyTable\nfrom collections import defaultdict\nimport unittest\nimport json\nimport gedcomparser\nimport datetime\n\n\nclass TestParser (unittest.TestCase) :\n\n #example unittest\n def testUS00(self):\n self.assertEqual(1,1,'1 Should be Equal to 1')\n\n #Testing US_01\n #gedcom.ged file contains Four Errors related to US_01\n #Individual US01I1 is born in the future\n #Individual US01I1 dies in the future\n #Individual US01I4 dies in the future\n #Family US01F1 is married in the future\n #Family US01F2 is married in the future\n #Family US01F2 is divorced in the future\n #Family US01F3 is married in the future\n def testUS01(self) :\n today = datetime.date.today()\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n dates = gedcomparser.check_dates_after_today(individuals, families)\n ids = dates.keys()\n for id in [\"US01I1\", \"US01I2\", \"US01I3\", \"US01I4\", \"US01I5\", \"US01I6\", \"US01F1\", \"US01F2\", \"US01F3\"] :\n for tag in dates[id] :\n if dates[id][tag][0] == False :\n self.assertTrue(today < dates[id][tag][1], \"These dates occur in the future\")\n else :\n self.assertFalse(today < dates[id][tag][1], \"These dates do not occur in the future\")\n\n def testUS28(self) :\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n\n def convert_date(json_date):\n date = datetime.datetime.strptime(json_date, '%Y-%m-%d')\n real_date = date.date()\n return real_date\n\n individuals[\"US28matt\"][\"BIRT\"][0] = convert_date(individuals[\"US28matt\"][\"BIRT\"][0])\n individuals[\"US28pat\"][\"BIRT\"][0] = convert_date(individuals[\"US28pat\"][\"BIRT\"][0])\n individuals[\"US28kat\"][\"BIRT\"][0] = convert_date(individuals[\"US28kat\"][\"BIRT\"][0])\n\n\n children = gedcomparser.order_siblings_by_age(families[\"US28song\"][\"CHIL\"],individuals)\n\n self.assertEqual(children[0],\"US28matt\")\n self.assertEqual(children[1],\"US28pat\")\n self.assertEqual(children[2],\"US28kat\")\n\n def testUS02(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n us02_ind = {key:value for key, value in individuals.items() if 'us02' in key.lower()}\n us02_fam = {key:value for key, value in families.items() if 'us02' in key.lower()}\n\n # US02I1 is Born after they married\n # US02I2 is Born after they married\n # US02I3 is married after birth\n # US02I4 is married after birth\n result = gedcomparser.check_birth_before_marr(us02_ind, us02_fam)\n self.assertEqual(result[\"US02I1\"], False, \"US02I1 Should be born after they marry\")\n self.assertEqual(result[\"US02I2\"], False, \"US02I2 Should be born after they marry\")\n self.assertEqual(result[\"US02I3\"], True, \"US02I3 Should be born before they marry\")\n self.assertEqual(result[\"US02I4\"], True, \"US02I4 Should be born before they marry\")\n\n def testUS03(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n us03_ind = {key:value for key, value in individuals.items() if 'us03' in key.lower()}\n\n # US02I1 is Born after they married\n # US02I2 is Born after they married\n # US02I3 is married after birth\n # US02I4 is married after birth\n result = gedcomparser.check_birth_before_death(us03_ind)\n self.assertEqual(result[\"US03I1\"], True, \"US03I1 Should be born before their death\")\n self.assertEqual(result[\"US03I2\"], True, \"US03I2 Is not dead\")\n self.assertEqual(result[\"US03I3\"], False, \"US03I3 Is born the day after they die\")\n self.assertEqual(result[\"US03I4\"], False, \"US03I4 Should be born after they died\")\n\n def testUS08(self):\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.check_birth_before_marriage_of_parents(individuals, families)\n self.assertFalse(result[\"US08I3\"], \"US08I3 is born before marrying\")\n self.assertFalse(result[\"US08I4\"], \"US08I4 is born before marrying\")\n self.assertTrue(result[\"US08I7\"], \"US08I7 is born after marrying\")\n self.assertTrue(result[\"US08I8\"], \"US08I8 is born after marrying\")\n\n def testUS09(self):\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.check_birth_before_death_of_parents(individuals, families)\n self.assertFalse(result[\"US09I3\"], \"US09I3 is born after mother has died\")\n self.assertFalse(result[\"US09I4\"], \"US09I4 is born after mother has died\")\n self.assertTrue(result[\"US09I7\"], \"US09I7 is born before mother has died\")\n self.assertTrue(result[\"US09I8\"], \"US09I8 is born before mother has died\")\n\n def testUS11(self):\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.check_for_no_bigamy(individuals)\n self.assertFalse(result[\"US11I1\"], \"US11I1 is in more than 1 Family\")\n self.assertFalse(result[\"US11I2\"], \"US11I2 is in more than 1 Family\")\n self.assertTrue(result[\"US11I3\"], \"US11I3 is not in more than 1 Family\")\n self.assertTrue(result[\"US11I4\"], \"US11I4 is not in more than 1 Family\")\n\n def testUS16(self):\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n with open('families.json') as f:\n families = json.load(f)\n\n result = gedcomparser.check_male_last_names_in_families(individuals, families)\n self.assertFalse(result[\"US16I3\"], \"US16I3 does not have the same last name as father\")\n self.assertFalse(result[\"US16I4\"], \"US16I4 does not have the same last name as father\")\n self.assertTrue(result[\"US16I7\"], \"US16I7 has the same last name as father\")\n self.assertTrue(result[\"US16I8\"], \"US16I8 has the same last name as father\")\n\n def testUS23(self):\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.check_unique_individuals(individuals)\n self.assertFalse(result[\"US23I1\"], \"US23I1 does not have unique Name and Birthdate.\")\n self.assertFalse(result[\"US23I2\"], \"US23I2 does not have unique Name and Birthdate.\")\n self.assertTrue(result[\"US23I3\"], \"US23I3 has unique Name and Birthdate.\")\n self.assertTrue(result[\"US23I4\"], \"US23I4 has unique Name and Birthdate.\")\n\n\n def testUS25(self):\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n with open('families.json') as f:\n families = json.load(f)\n\n result = gedcomparser.check_unique_children(individuals)\n self.assertFalse(result[\"US25I1\"], \"US25I1 does not have unique Name and Birthdate.\")\n self.assertFalse(result[\"US25I2\"], \"US25I2 does not have unique Name and Birthdate.\")\n self.assertTrue(result[\"US25I3\"], \"US25I3 has unique Name and Birthdate.\")\n self.assertTrue(result[\"US25I4\"], \"US25I4 has unique Name and Birthdate.\")\n\n def testUS07(self):\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.check_age_less_than_150(individuals)\n self.assertFalse(result[\"US07I1\"], \"US07I1 is over 150 years old (no death)\")\n self.assertFalse(result[\"US07I2\"], \"US07I2 is over 150 years old (with death)\")\n self.assertTrue(result[\"US07I3\"], \"US07I3 is under 150 years old (with death) (one day)\")\n self.assertFalse(result[\"US07I4\"], \"US07I4 is over 150 years old (with death) (exact)\")\n self.assertFalse(result[\"US07I5\"], \"US07I5 is over 150 years old (with death) (one day)\")\n\n def testUS10(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n result = gedcomparser.check_marriage_after_14(individuals, families)\n\n self.assertTrue(result[\"US10I1\"], \"US10I1 Should be older than 14 when they marry\")\n self.assertTrue(result[\"US10I2\"], \"US10I2 Should be older than 14 when they marry (1 day)\")\n self.assertTrue(result[\"US10I3\"], \"US10I3 Should be exactly 14 when they marry (same day)\")\n self.assertFalse(result[\"US10I4\"], \"US10I4 Should be younger than 14 when they marry (day before)\")\n self.assertFalse(result[\"US10I5\"], \"US10I5 Should be younger than 14 when they marry\")\n self.assertFalse(result[\"US10I6\"], \"US10I6 Should be younger than 14 when they marry (marry before birth)\")\n\n def testUS12(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n result = gedcomparser.check_parents_too_old(individuals, families)\n self.assertTrue(result[\"US12F1\"], \"Family US12F1 should have parents too much older than their children\")\n self.assertFalse(result[\"US12F2\"], \"Family US12F2 should have parents appropriately older than their children\")\n\n def testUS13(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n result = gedcomparser.check_siblings_spacing(individuals, families)\n print(result)\n self.assertTrue(result[\"US13F1\"], \"Family US13F1 has siblings too close in birth date\")\n\n def testUS14(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n family_list = families[\"US14song\"]\n family = {\"US14song\":family_list}\n self.assertFalse(gedcomparser.check_no_more_than_five_births(family,individuals),\"No more than 5 siblings can be born at a time\")\n\n def testUS15(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n family_list = families[\"US15song\"]\n family = {\"US15song\":family_list}\n self.assertFalse(gedcomparser.check_fewer_than_fifteen_siblings(family,individuals),\"No more than 5 siblings can be born at a time\")\n\n def testUS17(self) :\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n output = gedcomparser.check_marriage_to_children(families)\n for id in output.keys() :\n if id in [\"US17F1\", \"US17F2\"] :\n self.assertFalse(output[id], \"US17F1 and US17F2 were constructed to have marriage between parents and children\")\n else :\n self.assertTrue(output[id], \"The rest of the families do not contain marriage between parents and children\")\n\n def testUS18(self) :\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n output = gedcomparser.check_marriage_to_siblings(individuals, families)\n for id in output.keys() :\n if id == \"US18F1\" :\n self.assertFalse(output[id], \"US18F1 was constructed to have marriage between siblings\")\n else :\n self.assertTrue(output[id], \"The rest of the families do not contain marriage between siblings\")\n\n def testUS19(self) :\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n output = gedcomparser.check_marriage_to_cousins(individuals, families)\n self.assertTrue(\"US19F1\" in output, \"The gedcom file was constructed such that US19F1 contained marriage between cousins\")\n self.assertTrue(len(output)==1, \"US19F1 is the only family containing marriage between cousins in our test gedcom file\")\n\n def testUS20(self) :\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n output = gedcomparser.check_marriage_to_aunts_and_uncles(individuals, families)\n self.assertTrue(\"US20F3\" in output, \"US20F3 was constructed to contain a marriage between Aunt and Nephew\")\n self.assertTrue(\"US20F4\" in output, \"US20F4 was constructed to contain a marriage between Uncle and Niece\")\n self.assertTrue(output[\"US20F3\"] == \"US20F2\", \"US20F2 is the family in which the Aunt is a child\")\n self.assertTrue(output[\"US20F4\"] == \"US20F2\", \"US20F2 is the family in which the Uncle is a child\")\n\n\n\n\n def testUS21(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n family_list = families[\"US21song\"]\n family = {\"US21song\":family_list}\n self.assertFalse(gedcomparser.check_correct_gender_for_role(family,individuals),\"Incorrect gender for role\")\n\n\n def testUS24(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n result = gedcomparser.check_unique_family_by_spouse(individuals, families)\n self.assertTrue(result[\"US24F1\"], \"Families US24F1 and US24F2 have exaclty matching spouses and marriage dates\")\n self.assertTrue(result[\"US24F2\"], \"Families US24F1 and US24F2 have exaclty matching spouses and marriage dates\")\n self.assertFalse(result[\"US24F3\"], \"Family US24F3 has unique spouse names and marriage date\")\n \n\n def testUS26(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n\n result = gedcomparser.check_corresponding_entries(individuals, families)\n\n self.assertFalse(result[\"US26F1\"], \"Family US26F1 has properly corresponding individual entries\")\n self.assertFalse(result[\"US26I1\"], \"Individual US26I1 has properly corresponding family entry\")\n self.assertFalse(result[\"US26I2\"], \"Individual US26I1 has properly corresponding family entry\")\n\n self.assertTrue(result[\"US26F2\"], \"Family US26F2 has non-corresponding spouses\")\n self.assertTrue(result[\"US26F3\"], \"Family US26F3 has non-corresponding spouses\")\n\n self.assertTrue(result[\"US26I3\"], \"Individual US26I3 is not listed in their family\")\n self.assertTrue(result[\"US26I4\"], \"Individual US26I4 is not listed in their family\")\n self.assertTrue(result[\"US26I5\"], \"Individual US26I5 is not listed in their family\")\n self.assertTrue(result[\"US26I6\"], \"Individual US26I6 is not listed in their family\")\n \n def testUS29(self):\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n US29_ind = {key: value for key, value in individuals.items() if 'US29' in key.upper()}\n\n result = gedcomparser.list_deceased(US29_ind)\n self.assertFalse(result[\"US29I1\"], \"US29I1 is alive.\")\n self.assertFalse(result[\"US29I2\"], \"US29I2 is alive.\")\n self.assertFalse(result[\"US29I3\"], \"US29I3 is alive.\")\n self.assertFalse(result[\"US29I4\"], \"US29I4 is alive.\")\n self.assertTrue(result[\"US29I5\"], \"US29I5 is deceased.\")\n self.assertFalse(result[\"US29I6\"], \"US29I6 is alive.\")\n self.assertTrue(result[\"US29I7\"], \"US29I7 is deceased.\")\n\n def testUS04(self):\n with open('families.json') as f:\n families = json.load(f)\n\n result = gedcomparser.check_marriage_before_divorce(families)\n self.assertFalse(result[\"US04F1\"], \"US04F1 is divorced before marrying\")\n self.assertFalse(result[\"US04F2\"], \"US04F2 is divorced before marrying\")\n\n\n def testUS05(self):\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.check_marriage_before_death(families, individuals)\n self.assertFalse(result[\"US05I1\"], \"US05I1 is dead before marrying\")\n self.assertFalse(result[\"US05I2\"], \"US05I2 is dead before marrying\")\n self.assertTrue(result[\"US05I3\"], \"US05I3 is not dead before marrying\")\n self.assertTrue(result[\"US05I4\"], \"US05I4 is not dead before marrying\")\n\n def testUS06(self) :\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n divorces = gedcomparser.check_divorce_before_death(families, individuals)\n for fam_id in divorces.keys() :\n divorce_date = families[fam_id][\"DIV\"][0]\n husb_id = families[fam_id][\"HUSB\"][0]\n wife_id = families[fam_id][\"WIFE\"][0]\n self.assertTrue(\"DEAT\" in individuals[husb_id] or \"DEAT\" in individuals[wife_id], \"Families without deaths should not be in the output dict\")\n for id in divorces[fam_id] :\n self.assertTrue(individuals[id][\"DEAT\"][2] < divorce_date, \"All individuals in this list should have died before the divorce date\")\n\n #US22I1 was duplicated twice after its initial creation\n #US22I2 was duplicated once after its initial creation\n #US22F1 was duplicated once after its initial creation\n #US22I3 is not duplicated at all\n def testUS22(self) :\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n self.assertEqual(individuals[\"US22I1\"][\"DUP_COUNT\"][0], 2, \"US22I1 was duplicated twice\")\n self.assertEqual(individuals[\"US22I2\"][\"DUP_COUNT\"][0], 1, \"US22I2 was duplicated once\")\n self.assertEqual(families[\"US22F1\"][\"DUP_COUNT\"][0], 1, \"US22F1 was duplicated once\")\n self.assertEqual(individuals[\"US22I3\"][\"DUP_COUNT\"][0], 0, \"US22I3 was not duplicated\")\n\n def testUS27(self) :\n birthdate = datetime.date(year = 1997, month = 4, day = 7)\n self.assertEqual(gedcomparser.calculate_age(birthdate), 22, \"A person born on April 7, 1997 is 22 years old\")\n bad_birthdate = datetime.date(year = 2021, month = 4, day = 7)\n self.assertEqual(gedcomparser.calculate_age(bad_birthdate), -2, \"Even though it is a bad input, the function will return the difference between today's date and the input date\")\n\n def testUS30(self) :\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n US30_ind = {key: value for key, value in individuals.items() if 'US30' in key.upper()}\n US30_fam = {key: value for key, value in families.items() if 'US30' in key.upper()}\n\n result = gedcomparser.list_living_married(US30_ind, US30_fam)\n self.assertTrue(result[\"US30I1\"], \"US30I1 is alive and currently married.\")\n self.assertTrue(result[\"US30I2\"], \"US30I2 is alive and currently married.\")\n self.assertFalse(result[\"US30I3\"], \"US30I3 is deceased.\")\n self.assertTrue(result[\"US30I4\"], \"US30I4 is alive and currently married.\")\n self.assertTrue(result[\"US30I5\"], \"US30I5 is alive and currently married.\")\n self.assertTrue(result[\"US30I6\"], \"US30I6 is alive and currently married.\")\n self.assertTrue(result[\"US30I7\"], \"US30I7 is alive and currently married.\")\n self.assertFalse(result[\"US30I8\"], \"US30I8 is deceased.\")\n\n def testUS31(self):\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n US31_ind = {key: value for key, value in individuals.items() if 'US31' in key.upper()}\n\n result = gedcomparser.list_living_single(US31_ind)\n self.assertFalse(result[\"US31I1\"]), \"US31I1 was married and is under 30.\"\n self.assertFalse(result[\"US31I2\"]), \"US31I2 was married and is under 30.\"\n self.assertFalse(result[\"US31I3\"]), \"US31I3 is deceased.\"\n self.assertFalse(result[\"US31I4\"]), \"US31I4 was married.\"\n self.assertTrue(result[\"US31I5\"]), \"US31I5 is alive, over 30, and has never been married.\"\n self.assertTrue(result[\"US31I6\"]), \"US31I6is alive, over 30, and has never been married.\"\n self.assertFalse(result[\"US31I7\"]), \"US31I7 is married.\"\n self.assertFalse(result[\"US31I8\"]), \"US31I8 is married.\"\n\n def testUS32(self):\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.list_multiple_births(individuals, families)\n self.assertTrue(result[\"US32F1\"]), \"US32F1 has had multiple births\"\n self.assertTrue(result[\"US32F2\"]), \"US32F2 has had multiple births\"\n self.assertFalse(result[\"US32F3\"]), \"US32F3 has not had multiple births\"\n self.assertTrue(result[\"US32F4\"]), \"US32F4 has had multiple births\"\n\n def testUS35(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n\n today = datetime.datetime.strptime('28Oct2019', '%d%b%Y').date()\n\n test_list = {\n \"US35born1\":individuals[\"US35born1\"],\n \"US35born2\":individuals[\"US35born2\"],\n \"US35born3\":individuals[\"US35born3\"]\n }\n\n test_list[\"US35born1\"][\"BIRT\"][0] = datetime.datetime.strptime(test_list[\"US35born1\"][\"BIRT\"][0], '%Y-%m-%d').date()\n test_list[\"US35born2\"][\"BIRT\"][0] = datetime.datetime.strptime(test_list[\"US35born2\"][\"BIRT\"][0], '%Y-%m-%d').date()\n test_list[\"US35born3\"][\"BIRT\"][0] = datetime.datetime.strptime(test_list[\"US35born3\"][\"BIRT\"][0], '%Y-%m-%d').date()\n\n res_list = gedcomparser.list_recent_births(test_list,today)\n\n self.assertEqual(res_list[0],\"US35born1\")\n self.assertEqual(res_list[1],\"US35born2\")\n self.assertEqual(res_list[2],\"US35born3\")\n\n\n\n def testUS33(self):\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.list_orphans(individuals, families)\n self.assertFalse(result[\"US33I1\"]), \"US33I1 is not an orphan\"\n self.assertFalse(result[\"US33I2\"]), \"US33I2 is not an orphan\"\n self.assertFalse(result[\"US33I3\"]), \"US33I3 is not an orphan\"\n self.assertTrue(result[\"US33I4\"]), \"US33I4 is an orphan\"\n self.assertFalse(result[\"US33I5\"]), \"US33I5 is not an orphan\"\n self.assertFalse(result[\"US33I6\"]), \"US33I6 is not an orphan\"\n self.assertFalse(result[\"US33I7\"]), \"US33I7 is not an orphan\"\n self.assertFalse(result[\"US33I8\"]), \"US33I8 is not an orphan\"\n self.assertFalse(result[\"US33I9\"]), \"US33I9 is not an orphan\"\n self.assertFalse(result[\"US33I10\"]), \"US33I10 is not an orphan\"\n self.assertFalse(result[\"US33I11\"]), \"US33I11 is not an orphan\"\n self.assertTrue(result[\"US33I12\"]), \"US33I12 is an orphan\"\n self.assertTrue(result[\"US33I13\"]), \"US33I13 is an orphan\"\n\n def testUS34(self):\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.list_large_age_differences(individuals, families)\n self.assertFalse(result[\"US34F1\"]), \"US34F1 does not have a large age gap\"\n self.assertFalse(result[\"US34F2\"]), \"US34F2 does not have a large age gap\"\n self.assertTrue(result[\"US34F3\"]), \"US34F3 has a large age gap\"\n self.assertTrue(result[\"US34F4\"]), \"US34F4 has a large age gap\"\n\n def testUS36(self):\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.list_recent_deaths(individuals)\n self.assertFalse(result[\"US36I1\"]), \"US36I1 has not died within the last 30 days\"\n self.assertTrue(result[\"US36I2\"]), \"US36I2 has died within the last 30 days\"\n self.assertFalse(result[\"US36I3\"]), \"US36I3 has not died within the last 30 days\"\n self.assertFalse(result[\"US36I4\"]), \"US36I4 has not died within the last 30 days\"\n self.assertFalse(result[\"US36I5\"]), \"US36I5 has not died within the last 30 days\"\n self.assertTrue(result[\"US36I6\"]), \"US36I6 has died within the last 30 days\"\n\n def testUS37(self):\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n result = gedcomparser.list_recent_survivors(individuals, families)\n self.assertTrue(result[\"US37I1\"]), \"US37I1 survives a family member who recently died\"\n self.assertFalse(result[\"US37I2\"]), \"US37I2 is dead.\"\n self.assertTrue(result[\"US37I3\"]), \"US37I3 survives a family member who recently died\"\n self.assertTrue(result[\"US37I4\"]), \"US37I4 survives a family member who recently died\"\n self.assertTrue(result[\"US37I5\"]), \"US37I5 survives a family member who recently died\"\n self.assertFalse(result[\"US37I6\"]), \"US37I6 is dead.\"\n self.assertFalse(result[\"US37I7\"]), \"US37I7 is dead.\"\n self.assertTrue(result[\"US37I8\"]), \"US37I8 survives a family member who recently died\"\n\n\n def testUS38(self):\n with open('individuals.json') as ind_file:\n individuals = json.load(ind_file)\n \n today = datetime.datetime.strptime('11Nov2019', '%d%b%Y').date()\n\n test_list = {\n \"US38min\":individuals[\"US38min\"],\n \"US38jeannie\":individuals[\"US38jeannie\"]\n }\n\n test_list[\"US38min\"][\"BIRT\"][0] = datetime.datetime.strptime(test_list[\"US38min\"][\"BIRT\"][0], '%Y-%m-%d').date()\n test_list[\"US38jeannie\"][\"BIRT\"][0] = datetime.datetime.strptime(test_list[\"US38jeannie\"][\"BIRT\"][0], '%Y-%m-%d').date()\n\n res_list = gedcomparser.list_upcoming_birthdays(test_list,today)\n\n self.assertEqual(res_list[0],\"US38jeannie\")\n self.assertEqual(res_list[1],\"US38min\")\n\n def testUS39(self):\n with open('families.json') as fam_file:\n families = json.load(fam_file)\n \n today = datetime.datetime.strptime('11Nov2019', '%d%b%Y').date()\n\n test_list = {\n \"US39song\":families[\"US39song\"],\n }\n\n test_list[\"US39song\"][\"MARR\"][0] = datetime.datetime.strptime(test_list[\"US39song\"][\"MARR\"][0], '%Y-%m-%d').date()\n\n res_list = gedcomparser.list_upcoming_annversaries(test_list,today)\n\n self.assertEqual(res_list[0],\"US39song\")\n\n\n\n\n #Testing US_40\n #gedcom.ged file was constructed with errors in specific places\n #for example, Individual US01I1 is born in the future\n #US01I1's birthdate appears on line 21 of gedcom.ged\n #Another example, Family US01F1 is married in the future\n #US01F1's marriage date appears on line 79 of gedcom.ged\n #any information from gedcom.ged can be expected and tested in this way, provided you have the ID and tag of what you want\n def testUS40(self) :\n\n with open('families.json') as f:\n families = json.load(f)\n\n with open('individuals.json') as f:\n individuals = json.load(f)\n\n self.assertEqual(gedcomparser.get_line_number(individuals, \"US01I1\", \"BIRT\"), \"21\", \"The error was placed on line 21\")\n self.assertEqual(gedcomparser.get_line_number(families, \"US01F1\", \"MARR\"), \"79\", \"The error was placed on line 79\")\n","sub_path":"TestParser.py","file_name":"TestParser.py","file_ext":"py","file_size_in_byte":28630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"17431026","text":"from django.shortcuts import render, redirect, reverse, get_object_or_404, HttpResponse\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\nfrom django.db.models.functions import Lower\nfrom django.contrib.auth.models import User\nfrom django.db.models import Avg\n\nfrom profiles.models import UserProfile\n\nfrom .models import Product, Category, RatingProducts\nfrom .forms import ProductForm, RatingProductsForm\n\n\ndef all_products(request):\n \"\"\" A view to show all products, including sorting and seach queries \"\"\"\n\n products = Product.objects.all()\n query = None\n categories = None\n sort = None\n direction = None\n\n if request.GET:\n\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n products = products.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products = products.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n products = products.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any seach criteria!\")\n return redirect(reverse('products'))\n\n queries = Q(name__icontains=query) | Q(\n description__icontains=query)\n products = products.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'products': products,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'products/products.html', context)\n\n\ndef product_detail(request, product_id):\n \"\"\" A view to show individual product details \"\"\"\n\n product = get_object_or_404(Product, pk=product_id)\n rating_products = RatingProducts.objects.filter(product=product)\n rating_avg = rating_products.aggregate(Avg('rate'))\n rating_count = rating_products.count()\n favorited = False\n\n if request.user.is_authenticated:\n profile = UserProfile.objects.get(user=request.user)\n\n if profile.favorites.filter(id=product_id).exists():\n favorited = True\n\n context = {\n 'product': product,\n 'rating_products': rating_products,\n 'rating_avg': rating_avg,\n 'rating_count': rating_count,\n 'favorited': favorited,\n }\n\n return render(request, 'products/product_detail.html', context)\n\n\n@login_required\ndef add_product(request):\n \"\"\" View that allows admin to add products to the store \"\"\"\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that!')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n product = form.save()\n messages.success(request, 'Successfully added product!')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(\n request, 'Failed to add product. Please make sure form is filled in correctly!')\n else:\n form = ProductForm()\n\n form = ProductForm()\n template = 'products/add_product.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef edit_product(request, product_id):\n \"\"\" Gives the ability for admin to edit products in store \"\"\"\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that!')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully updated product!')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(\n request, 'Product failed to update. Please make sure form is filled in correctly!')\n else:\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing {product.name}')\n\n template = 'products/edit_product.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef delete_product(request, product_id):\n \"\"\" Gives the ability for admin to delete products in store \"\"\"\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that!')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product deleted!')\n return redirect(reverse('products'))\n\n\n@login_required\ndef rating_products(request, product_id):\n \"\"\" Gives the ability for members to rate products \"\"\"\n product = get_object_or_404(Product, pk=product_id)\n user = request.user\n\n if request.method == 'POST':\n form = RatingProductsForm(request.POST)\n if form.is_valid():\n rate = form.save(commit=False)\n rate.user = user\n rate.product = product\n rate.save()\n messages.success(request, 'Your review has been added!')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n form = RatingProductsForm()\n\n template = 'products/rating_products.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef liked_product(request):\n \"\"\" Returns the liked template and displays liked items \"\"\"\n product = Product.objects.all()\n user = request.user\n profile = UserProfile.objects.get(user=user)\n\n like_list = profile.favorites.all()\n\n template = 'products/liked_product.html'\n context = {\n 'profile': profile,\n 'product': product,\n 'like_list': like_list,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef add_product_to_like(request, product_id):\n \"\"\" Adds liked member items to liked template \"\"\"\n product = Product.objects.get(id=product_id)\n user = request.user\n profile = UserProfile.objects.get(user=user)\n\n profile.favorites.add(product)\n messages.success(request, 'Your like has been added!')\n\n return redirect(reverse('product_detail', args=[product.id]))\n","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"209984889","text":"from typing import Iterable\r\nimport logging\r\nimport random\r\nimport re\r\nimport numpy as np\r\nfrom overrides import overrides\r\n\r\nfrom stog.utils import lazy_groups_of\r\nfrom stog.data.instance import Instance\r\nfrom stog.data.iterators.bucket_iterator import BucketIterator, DataIterator\r\nfrom stog.data.dataset import Batch\r\nfrom stog.utils import logging\r\n\r\nlogger = logging.init_logger()\r\n\r\n\r\n@DataIterator.register(\"CompetenceCurriculum\")\r\nclass CompetenceCurriculumIterator(BucketIterator):\r\n def __init__(self,*args, curriculum_len=None, initial_competence=None, slope_power=2, damr_name='DAMRR0V2', **kwargs):\r\n assert int(curriculum_len) > 0\r\n assert float(initial_competence) > 0\r\n assert float(slope_power)\r\n super().__init__(*args,**kwargs)\r\n self.curriculum_len = curriculum_len\r\n self.initial_competence = initial_competence\r\n self.slope_power = slope_power\r\n self.initial_competence_powered = self.initial_competence ** self.slope_power\r\n self.timestep = None\r\n self.traindata = None\r\n self.traindata_difficulty = None\r\n self.damr_name = damr_name\r\n\r\n def _init_curriculum(self, instances):\r\n self.traindata = instances\r\n self.timestep = 0\r\n self.damr = globals()[self.damr_name](instances)\r\n self.traindata_difficulty = self.damr.compute_data_difficulty()\r\n\r\n\r\n def competence(self, timestep):\r\n return 1 if timestep >= self.curriculum_len else ( timestep * ( 1 - self.initial_competence_powered ) / self.curriculum_len + self.initial_competence_powered ) ** ( 1 / self.slope_power ) \r\n\r\n @overrides\r\n def _create_batches(self, instances: Iterable[Instance], shuffle: bool, train_epoch = None) -> Iterable[Batch]:\r\n if train_epoch is None:\r\n yield from super()._create_batches(instances, shuffle=shuffle)\r\n return\r\n \r\n if self.timestep is None:\r\n self._init_curriculum(instances)\r\n \r\n assert instances is self.traindata, 'curriculumiter cannot be used on other training data'\r\n\r\n if self.timestep >= self.curriculum_len: \r\n yield from super()._create_batches(instances, shuffle=shuffle)\r\n return\r\n\r\n ninstances=0\r\n while True:\r\n current_competence = self.competence(self.timestep)\r\n sampled_data = [item for idx, item in enumerate(self.traindata) if self.traindata_difficulty[idx] <= current_competence + 0.0001]\r\n idx = list(range(len(sampled_data)))\r\n random.shuffle(idx)\r\n batch = [sampled_data[i] for i in idx[:self._batch_size]]\r\n yield Batch(batch)\r\n self.timestep += 1\r\n ninstances += self._batch_size\r\n if ninstances >= len(instances):\r\n break\r\n \r\n\r\nclass DAMR:\r\n def __init__(self,instances):\r\n self.data = instances\r\n self.amrs = [x.fields['amr'].metadata for x in instances]\r\n self.data_difficulty = None\r\n self.concept_idf=None\r\n self.rel_idf=None\r\n\r\n def compute_data_difficulty(self):\r\n if self.data_difficulty is None:\r\n raw = np.array([self.get_amr_difficulty(item) for item in self.amrs],dtype='float')\r\n cdf = (raw[None,:] <= raw[:,None]).mean(axis=1)\r\n self.data_difficulty = cdf\r\n self.raw_data_difficulty = raw\r\n return self.data_difficulty\r\n \r\n def get_amr_difficulty(self, amr): raise NotImplementedError\r\n\r\n def compute_concept_idf(self):\r\n df = {}\r\n for item in self.amrs:\r\n for nodeinfo in item.graph.get_nodes():\r\n concept = nodeinfo.instance\r\n if not concept: continue\r\n df[concept] = df.get(concept,0)+1\r\n self.concept_idf = {c:np.log(len(self.amrs)/v) for c,v in df.items()}\r\n\r\n def compute_rel_idf(self):\r\n df = {}\r\n for item in self.amrs:\r\n for edge,info in item.graph.get_edges.items():\r\n rel = info['label']\r\n df[rel] = df.get(rel, 0) + 1\r\n self.rel_idf = {c:np.log(len(self.amrs)/v) for c,v in df.items()}\r\n\r\n def rel_difficulty(self,rel):\r\n if self.rel_idf is None:\r\n self.compute_rel_idf()\r\n if 'ARG' in rel: return 1\r\n return 1 + self.rel_idf.get(rel, 1) \r\n \r\n def concept_difficulty(self, concept):\r\n if self.concept_idf is None:\r\n self.compute_concept_idf()\r\n return 1 + self.concept_idf.get(concept, 1)\r\n\r\n\r\nclass DAMRV1(DAMR):\r\n def _get_nodes_depth(self, amr):\r\n depths = {amr.graph._top:1}\r\n while True:\r\n added=False\r\n for edge in amr.graph.get_edges():\r\n src = edge[0].identifier\r\n if src not in depths:continue\r\n depth = depths[src]\r\n tgt = edge[1].identifier\r\n if tgt not in depths:\r\n depths[tgt] = depth + 1\r\n added = True\r\n if not added: break\r\n # this should not happen \r\n for nodeinfo in amr.graph.get_nodes():\r\n node = nodeinfo.instance\r\n if node not in depths:\r\n depths[node]=1\r\n return depths\r\n \r\n def get_amr_difficulty(self, amr):\r\n difficulty = 0\r\n nodes_depth = self._get_nodes_depth(amr)\r\n\r\n for nodeinfo in amr.graph.get_nodes():\r\n concept = nodeinfo.instance\r\n node = nodeinfo.identifier\r\n difficulty += self.concept_difficulty(concept) * nodes_depth[node] ** 2\r\n\r\n return difficulty\r\n\r\n\r\nclass DAMRV2(DAMR):\r\n\r\n def get_amr_difficulty(self, amr):\r\n difficulty = 0\r\n for edge,relinfo in amr.graph.get_edges().items():\r\n src_concept = edge[0].instance\r\n tgt_concept = edge[1].instance\r\n rel = relinfo['label']\r\n difficulty+= self.rel_difficulty(rel) * (\r\n self.concept_difficulty(src_concept) + self.concept_difficulty(tgt_concept))\r\n return difficulty\r\n\r\nclass DAMRR0V2(DAMRV2):\r\n def rel_difficulty(self,rel):\r\n if 'ARG' in rel: return 1\r\n return 2\r\n \r\n def concept_difficulty(self, concept):\r\n if self.concept_idf is None:\r\n self.compute_concept_idf()\r\n if re.match(r'^.*-\\d+$',concept):\r\n return 1 + self.concept_idf.get(concept, 1)\r\n return 1\r\n\r\nclass NodeCountDAMR(DAMR):\r\n def get_amr_difficulty(self, amr): \r\n return len(amr.graph.get_nodes())\r\n\r\nclass EdgeCountDAMR(DAMR):\r\n def get_amr_difficulty(self, amr): \r\n return len(amr.graph.get_edges())\r\n\r\n\r\n__all__ = [\r\n 'CompetenceCurriculumIterator'\r\n]","sub_path":"stog/data/iterators/curriculum_iterator.py","file_name":"curriculum_iterator.py","file_ext":"py","file_size_in_byte":6727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"511146834","text":"# coding=utf-8\r\nimport sys\r\nsys.path.append(\"..\")\r\nimport torch.nn.functional as F\r\nimport torch.nn as nn\r\nimport torch\r\nimport numpy as np\r\nimport os\r\nimport params as pms\r\n\r\n\r\ndef parse_model_cfg(path):\r\n path = os.path.join(pms.PROJECT_PATH,path)\r\n file = open(path, 'r')\r\n lines = file.read().split('\\n')\r\n lines = [x for x in lines if x and not x.startswith('#')]\r\n lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces\r\n module_defs = []\r\n for line in lines:\r\n if line.startswith('['): # This marks the start of a new block\r\n module_defs.append({})\r\n module_defs[-1]['type'] = line[1:-1].rstrip()\r\n if module_defs[-1]['type'] == 'convolutional':\r\n module_defs[-1]['batch_normalize'] = 0\r\n else:\r\n key, value = line.split(\"=\")\r\n value = value.strip()\r\n module_defs[-1][key.rstrip()] = value.strip()\r\n\r\n return module_defs\r\n\r\n\r\ndef create_modules(module_defs):\r\n hyperparams = module_defs.pop(0)\r\n output_filters = [int(hyperparams[\"channels\"])]\r\n module_list = nn.ModuleList()\r\n\r\n for i, module_def in enumerate(module_defs):\r\n modules = nn.Sequential()\r\n if module_def[\"type\"] == \"convolutional\":\r\n bn = int(module_def[\"batch_normalize\"])\r\n filters = int(module_def[\"filters\"])\r\n kernel_size = int(module_def[\"size\"])\r\n pad = (kernel_size - 1) // 2 if int(module_def[\"pad\"]) else 0\r\n stride = int(module_def[\"stride\"])\r\n modules.add_module(\"conv_%d\"%i, nn.Conv2d(in_channels=output_filters[-1],\r\n out_channels=filters,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n padding=pad,\r\n bias=not bn))\r\n if bn:\r\n modules.add_module(\"batch_norm_%d\"%i, nn.BatchNorm2d(filters))\r\n if module_def[\"activation\"] == \"leaky\":\r\n modules.add_module(\"leaky_%d\"%i, nn.LeakyReLU(0.1, inplace=True))\r\n\r\n elif module_def[\"type\"] == 'upsample':\r\n upsample = Upsample(scale_factor=int(module_def[\"stride\"]))\r\n modules.add_module(\"upsample_%d\"%i, upsample)\r\n\r\n elif module_def[\"type\"] == \"route\":\r\n layers = [int(x) for x in module_def[\"layers\"].split(',')] # 注意:int(\" 1\") 和int(\"1 \")都为1,即空格不影响\r\n filters = sum([output_filters[i+1 if i >0 else i] for i in layers])\r\n modules.add_module(\"route_%d\"%i, EmptyLayer())\r\n\r\n elif module_def[\"type\"] == \"shortcut\":\r\n filters = output_filters[int(module_def[\"from\"])]\r\n modules.add_module(\"shortcut_%d\"%i, EmptyLayer())\r\n\r\n elif module_def[\"type\"] == \"yolo\":\r\n anchor_idxs = [int(x) for x in module_def[\"mask\"].split(',')]\r\n anchors = [float(x) for x in module_def[\"anchors\"].split(',')]\r\n anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors), 2)]\r\n anchors = [anchors[i] for i in anchor_idxs]\r\n nC = int(module_def[\"classes\"])\r\n\r\n yolo_layer = YOLOLayer(anchors, nC)\r\n modules.add_module(\"yolo_%d\"%i, yolo_layer)\r\n\r\n module_list.append(modules)\r\n output_filters.append(filters)\r\n\r\n return hyperparams, module_list\r\n\r\n\r\nclass Upsample(nn.Module):\r\n def __init__(self, scale_factor=1, mode='nearest'):\r\n super(Upsample, self).__init__()\r\n self.scale_factor = scale_factor\r\n self.mode = mode\r\n\r\n def forward(self, x):\r\n return F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)\r\n\r\n\r\nclass EmptyLayer(nn.Module):\r\n def __init__(self):\r\n super(EmptyLayer, self).__init__()\r\n\r\n def forward(self, x):\r\n return x\r\n\r\n\r\nclass YOLOLayer(nn.Module):\r\n def __init__(self, anchors, nC):\r\n super(YOLOLayer, self).__init__()\r\n self.anchors = torch.FloatTensor(anchors)\r\n self.nA = len(anchors)\r\n self.nC = nC\r\n\r\n\r\n def forward(self, p, img_size, var=None):\r\n bs, nG = p.shape[0], p.shape[-1]\r\n p = p.view(bs, self.nA, 5 + self.nC , nG, nG).permute(0, 3, 4, 1, 2)\r\n\r\n # 训练和测试都进行解码\r\n p_de = self.__decode(p.clone().detach(), img_size)\r\n return (p, p_de)\r\n\r\n def __decode(self, p, img_size):\r\n conv_shape = p.shape\r\n\r\n device = p.device\r\n batch_size = conv_shape[0]\r\n output_size = conv_shape[1]\r\n anchor_per_scale = self.nA\r\n num_classes = self.nC\r\n stride = img_size / output_size\r\n anchors = (1.0 * self.anchors / stride).to(device)\r\n\r\n\r\n conv_output = p.view(batch_size, output_size, output_size, anchor_per_scale, 5 + num_classes)\r\n conv_raw_dxdy = conv_output[:, :, :, :, 0:2]\r\n conv_raw_dwdh = conv_output[:, :, :, :, 2:4]\r\n conv_raw_conf = conv_output[:, :, :, :, 4:5]\r\n conv_raw_prob = conv_output[:, :, :, :, 5:]\r\n\r\n\r\n y = torch.arange(0, output_size).unsqueeze(1).repeat(1, output_size)\r\n x = torch.arange(0, output_size).unsqueeze(0).repeat(output_size, 1)\r\n grid_xy = torch.stack([x, y], dim=-1)\r\n grid_xy = grid_xy.unsqueeze(0).unsqueeze(3).repeat(batch_size, 1, 1, 3, 1).float().to(device)\r\n\r\n pred_xy = (torch.sigmoid(conv_raw_dxdy) + grid_xy) * stride\r\n pred_wh = (torch.exp(conv_raw_dwdh) * anchors) * stride\r\n pred_xywh = torch.cat([pred_xy, pred_wh], dim=-1)\r\n pred_conf = torch.sigmoid(conv_raw_conf)\r\n pred_prob = torch.sigmoid(conv_raw_prob)\r\n pred_bbox = torch.cat([pred_xywh, pred_conf, pred_prob], dim=-1)\r\n\r\n return pred_bbox.view(-1, 5+self.nC) if not self.training else pred_bbox # 例如 shape : [bs, 13*13*3+26*26*3+52*52*3, 25]\r\n\r\n\r\nclass Darknet(nn.Module):\r\n def __init__(self, cfg_path, img_size=416):\r\n super(Darknet, self).__init__()\r\n self.module_defs = parse_model_cfg(cfg_path)\r\n self.module_defs[0][\"height\"] = img_size\r\n self.hypeparams, self.module_list = create_modules(self.module_defs)\r\n\r\n\r\n def forward(self, x):\r\n img_size = x.shape[-1]\r\n layer_outputs = []\r\n output = []\r\n\r\n for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):\r\n mtype = module_def[\"type\"]\r\n if mtype in [\"convolutional\", \"upsample\"]:\r\n x = module(x)\r\n\r\n elif mtype == \"route\":\r\n layer_i = [int(x) for x in module_def[\"layers\"].split(',')]\r\n if len(layer_i) == 1:\r\n x = layer_outputs[layer_i[0]]\r\n else:\r\n x = torch.cat([layer_outputs[i] for i in layer_i], 1)\r\n\r\n elif mtype == \"shortcut\":\r\n layer_i = int(module_def[\"from\"])\r\n x = layer_outputs[-1] + layer_outputs[layer_i]\r\n\r\n elif mtype == \"yolo\": # yolo层用到FPN各层的特征图和原图尺寸img_size,其中img_size是用来提供缩放比例的\r\n x = module[0](x, img_size)\r\n output.append(x)\r\n layer_outputs.append(x)\r\n\r\n if self.training:\r\n p, p_d = list(zip(*output))\r\n return p, p_d\r\n else:\r\n p, p_d = list(zip(*output))\r\n return p, torch.cat(p_d, 0)\r\n\r\n\r\n def load_darknet_weights(self, weight_path):\r\n weight_path = os.path.join(pms.PROJECT_PATH, weight_path)\r\n\r\n fp = open(weight_path, 'rb')\r\n header = np.fromfile(fp, dtype=np.int32, count=5)\r\n self.header_info = header\r\n self.seen = header[3]\r\n weights = np.fromfile(fp, dtype=np.float32)\r\n fp.close()\r\n\r\n ptr = 0\r\n for i, (module_def, module) in enumerate(zip(self.module_defs[:75], self.module_list[:75])):\r\n if module_def[\"type\"] == \"convolutional\":\r\n conv_layer = module[0]\r\n if module_def[\"batch_normalize\"]:\r\n bn_layer = module[1]\r\n num_b = bn_layer.bias.numel()\r\n\r\n bn_b = torch.from_numpy(weights[ptr:ptr+num_b]).view_as(bn_layer.bias.data)\r\n bn_layer.bias.data.copy_(bn_b)\r\n ptr += num_b\r\n\r\n bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight.data)\r\n bn_layer.weight.data.copy_(bn_w)\r\n ptr += num_b\r\n\r\n bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)\r\n bn_layer.running_mean.data.copy_(bn_rm)\r\n ptr += num_b\r\n\r\n bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)\r\n bn_layer.running_var.data.copy_(bn_rv)\r\n ptr += num_b\r\n else:\r\n num_b = conv_layer.bias.numel()\r\n conv_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.bias)\r\n conv_layer.bias.data.copy_(conv_b)\r\n ptr += num_b\r\n\r\n num_w = conv_layer.weight.numel()\r\n conv_w = torch.from_numpy(weights[ptr:ptr+num_w]).view_as(conv_layer.weight.data)\r\n conv_layer.weight.data.copy_(conv_w)\r\n ptr += num_w\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n net = Darknet(\"cfg/yolov3-voc.cfg\")\r\n print(net)\r\n in_img = torch.randn(2, 3, 416, 416)\r\n p, p_d = net(in_img)\r\n\r\n print(p[1].shape)\r\n print(p_d[0].shape)\r\n\r\n","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"10864441","text":"# TOSHIBA - TSDV\n# Team: OCRPoc\n# Author: Phung Dinh Tai\n# Email: tai.phungdinh@toshiba-tsdv.com\n# Date create: 17/05/2017\n# Last update: 17/05/2107\n# Description: Count frequency of list string\n\nclass FreqData:\n def __init__(self, content = None, freq = 0, replace = None):\n self.content = None\n self.frequency = 0\n self.replace = None\n\nclass FrequencyCounter:\n # Initial\n def __init__(self):\n self.data = [] # List of FreqData\n\n # @return: data\n def CountListString(self, string_list):\n self.data = []\n\n for s in string_list:\n founded = False\n\n for element in self.data:\n\n if s == element.content:\n founded = True\n element.frequency += 1\n\n if not founded:\n self.data.append(FreqData(s, 1))\n\n return self.data\n\n # @return: data\n def CountListString2D(self, list_data):\n self.data = []\n\n for arr in list_data:\n founded = False\n content = arr[0]\n replace = arr[1]\n\n for element in self.data:\n\n if (content == element.content) and (replace == element.replace):\n founded = True\n element.frequency += 1\n\n if not founded:\n self.data.append(FreqData(content, 1, replace))\n\n return self.data\n","sub_path":"Run_PHocr_test/Mekong/utilities/analysis/detail_report/frequency_counter.py","file_name":"frequency_counter.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"10946211","text":"import picamera\nimport picamera.array\nimport numpy as np\nimport cv2\nimport os\nfrom time import sleep\nimport time\n# Color definitions\nred = np.uint8([[[255, 0, 0]]])\ngreen = np.uint8([[[0, 255, 0]]])\nblue = np.uint8([[[0, 0, 255]]])\nyellow = np.uint8([[[255, 255, 0]]])\n\nred = cv2.cvtColor(red, cv2.COLOR_RGB2HSV)\ngreen = cv2.cvtColor(green, cv2.COLOR_RGB2HSV)\nblue = cv2.cvtColor(blue, cv2.COLOR_RGB2HSV)\nyellow = cv2.cvtColor(yellow, cv2.COLOR_RGB2HSV)\n\nlow_red = red[0][0][0] - 15, 100, 100\nhigh_red = red[0][0][0] + 15, 255, 255\nlow_green = green[0][0][0] - 35, 100, 100\nhigh_green = green[0][0][0] + 20, 255, 255\nlow_blue = blue[0][0][0] - 40, 100, 100\nhigh_blue = blue[0][0][0] + 20, 255, 255\nlow_yellow = yellow[0][0][0] - 10, 180, 180\nhigh_yellow = yellow[0][0][0] + 10, 255, 255\n\nnum = np.array([0, 0, 0, 0, 300])\nundefined = 300\ncolor = [\"red\", \"blue\", \"green\", \"yellow\",\"undefined\"]\n\nnsplits = 24\nif 'N_SPLITS' in os.environ:\n nsplits = int(os.environ['N_SPLITS'])\n\nsplit_height = int(240/nsplits)\n\nif ((float(240)/nsplits) - split_height)*100 != 0:\n uneven = True\nelse:\n uneven = False\nprint(\"%d horizontal splits\" % (nsplits))\n\nwith picamera.PiCamera() as camera:\n camera.resolution = (320, 240)\n while True:\n start = time.time()\n with picamera.array.PiRGBArray(camera) as output:\n camera.capture(output, 'rgb')\n output = output.array\n output = cv2.cvtColor(output,cv2.COLOR_RGB2HSV) #hsv color space\n # You can now treat output as a normal numpy array\n # Do your magic here\n\n red_mask = cv2.inRange(output, low_red, high_red)\n blue_mask = cv2.inRange(output, low_blue, high_blue)\n green_mask = cv2.inRange(output, low_green, high_green)\n yellow_mask = cv2.inRange(output, low_yellow, high_yellow)\n print(\"The capture's most occuring colors in the splits are:\")\n\n for i in range(0,nsplits-uneven):\n num[0] = np.sum(red_mask[:,i*split_height+1:(i+1)*split_height])\n num[1] = np.sum(blue_mask[:,i*split_height+1:(i+1)*split_height])\n num[2] = np.sum(green_mask[:,i*split_height+1:(i+1)*split_height])\n num[3] = np.sum(yellow_mask[:,i*split_height+1:(i+1)*split_height])\n #print(num[0],num[1],num[2],num[3])\n pos = num.argmax()\n print(color[pos])\n\n if (i == nsplits-1-uneven) and (uneven == True):\n num[0] = np.sum(red_mask[:,(i+1)*split_height+1: ])\n num[1] = np.sum(blue_mask[:,(i+1)*split_height+1: ])\n num[2] = np.sum(green_mask[:,(i+1)*split_height+1: ])\n num[3] = np.sum(yellow_mask[:,(i+1)*split_height+1: ])\n #print(num[0],num[1],num[2],num[3])\n pos = num.argmax()\n print(color[pos])\n elapsed_time = time.time()-start\n sleep(1-elapsed_time)\n #print(elapsed_time)\n","sub_path":"color_detector.py","file_name":"color_detector.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"458050315","text":"\"\"\"\n Nome do arquivo: fatoracaoLU.py\n Linguagem: Python3\n\n Descrição: Implementação do Algoritmo de Fatoração LU\n\n Autor: Gabriel Bittencourt Leite\n\"\"\"\n\n# Eliminação de Gauss\ndef eliminacaoLinhaPivo(A, N, linha, linhaPivo, fator):\n for coluna in range(0, N):\n A[linha][coluna] = A[linha][coluna] - fator * A[linhaPivo][coluna]\n\n\n# Inicializa um Matriz Identidade de ordem N\ndef inicializaMatrizIdentidade(N):\n I = []\n for i in range(N):\n I.append([0] * N)\n I[i][i] = 1\n return I\n\n\n# Multiplica duas matrizes\ndef multiplicaMatrizes(A, B):\n\n nLin_A = len(A)\n nCol_B = len(B[0])\n\n C = []\n for i in range(nLin_A):\n C.append([None] * nCol_B)\n\n for i in range(nLin_A):\n for j in range(nCol_B):\n acc = 0\n\n for k in range(nLin_A):\n acc += A[i][k] * B[k][j]\n \n C[i][j] = acc\n \n return C\n\n\n# Fatoração LU\ndef fatoracaoLU(A, N):\n\n # Inicializa a Matriz L\n L = inicializaMatrizIdentidade(N)\n\n for col in range(0, N):\n pivo = None\n\n # Seleciona pivô (Primeiro elemento não nulo da coluna)\n for lin in range(col, N):\n if A[lin][col] != 0:\n pivo = A[lin][col]\n break\n\n # Coluna é nula (Todos elementos da coluna são iguais a zero)\n if not pivo:\n continue\n\n # Para cada linha restante\n for linha in range(col + 1, N):\n\n # Cálculo do fator para eliminção de Gauss\n fator = A[linha][col] / pivo\n\n # Inicializa uma matriz identidade com mesma ordem que A\n I = inicializaMatrizIdentidade(N)\n\n # Aplica eliminação de Gauss na matriz elementar I\n eliminacaoLinhaPivo(I, N, linha, col, fator)\n \n # Constrói a Matriz L\n L[linha][col] = fator\n\n # Constrói a Matriz U\n A = multiplicaMatrizes(I, A)\n\n\n return A, L\n\n\n# Cálcula o vetor solução por retrosubstituição\n# em uma matriz triangular superior\ndef retrosubstituicaoSuperior(A, B, N):\n\n # Vetor solução\n S = [0] * N\n\n # Resolve para última incógnita\n S[N-1] = B[N-1][0] / A[N-1][N-1]\n # Para cada linha acima\n for i in range(N-2, -1, -1):\n\n acc = 0\n for j in range(N-1, i, -1):\n\n acc += A[i][j] * S[j]\n\n # Atualiza vetor solução\n S[i] = (B[i][0] - acc) / A[i][i]\n\n # Imprime a solução\n for i in range(N):\n print(\"X%d = %10.5f\" %(i, S[i]))\n\n return S\n\n\n# Cálcula o vetor solução por retrosubstituição\n# em uma matriz triangular inferior\ndef retrosubstituicaoInferior(A, B, N):\n\n # Vetor solução\n S = []\n for i in range(N):\n S.append([None])\n\n # Resolve para primeira incógnita\n S[0][0] = B[0][0] / A[0][0]\n\n # Para cada linha abaixo\n for i in range(0, N,):\n\n acc = 0\n for j in range(0, i):\n\n acc += A[i][j] * S[j][0]\n\n # Atualiza vetor solução\n S[i][0] = (B[i][0] - acc) / A[i][i]\n\n\n # Imprime a solução\n for i in range(N):\n print(\"Y%d = %10.5f\" %(i, S[i][0]))\n\n return S\n\n\ndef imprimeSistema(A, B, N):\n for i in range(N):\n print(\" [\", end=\" \")\n for j in range(N):\n print(\"%10.5f\" %(A[i][j]) , end=\" \")\n print(\"] [%10.5f]\" %(B[i][0]))\n print()\n\n\ndef imprimeMatriz(A, N):\n for i in range(N):\n print(\" [\", end=\" \")\n for j in range(N):\n print(\"%10.5f\" %(A[i][j]) , end=\" \")\n print(\" ]\")\n print()\n\n\n# Matriz dos coeficientes\nA = [[1, 4, 3],\n [2, 5, 4],\n [1, -3, -2]]\n\n# Matriz dos termos independentes\nB = [[ 1],\n [ 4],\n [ 5] ]\n\n# Número de equações\nN = len(A)\n\nprint(\"Sistema Inicial\")\nimprimeSistema(A, B, N)\nU, L = fatoracaoLU(A, N)\n\nprint(\"Matriz L\")\nimprimeMatriz(L, N)\n\nprint(\"Matriz U\")\nimprimeMatriz(U, N)\n\nprint(\"Solucao L.Y = B\")\nY = retrosubstituicaoInferior(L, B, N)\n\nprint(\"\\nSolução U.X = Y\")\nX = retrosubstituicaoSuperior(U, Y, N)\n","sub_path":"MetodoFatoracaoLU/fatoracaoLU.py","file_name":"fatoracaoLU.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"616733426","text":"from tkinter import *\nfrom tkinter import filedialog as fd\nimport hashlib\nimport glob\n\nfile=glob.glob(\"D:/*.txt\")\nmy_file = open(\"D:/save.txt\", \"w\")\nfor f in file:\n with open(f, 'rb') as f3:\n data48=f3.read()\n gethash = hashlib.md5(data48).hexdigest()\n my_file.write(\"f: \"+gethash)\n\n print(\"f: \" + gethash)\nmy_file.close()\npath_file1=''\npath_file2=''\n\nroot = Tk()\n\nb1= Button(root, text='Выбрать файл')\nb2= Button(root, text='Хешировать')\nl_file1= Label(root, bg='black', fg='white')\nl_file2= Label(root, bg='black', fg='white')\n\ndef openfile(event):\n global path_file1\n global path_file2\n path_file1=fd.askopenfilename()\n path_file2=fd.askopenfilename()\n print(path_file1)\n print(path_file2)\nb1.bind('', openfile)\n\ndef hash(event):\n h_file1 = hashlib.md5()\n h_file2 = hashlib.md5()\n BUF_SIZE=1\n if path_file1!='' or path_file2!='':\n with open(path_file1, 'rb') as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n h_file1.update(data)\n with open(path_file2, 'rb') as f2:\n while True:\n data2 = f2.read(BUF_SIZE)\n if not data2:\n break\n h_file2.update(data2)\n #hashlib.md5(data2)\n\n print(\"Файлы идентичны? \"+str(h_file1.hexdigest()==h_file2.hexdigest()))\n l_file1['text']='Хэширование файла ' + path_file1 + ' '+h_file1.hexdigest()\n l_file2['text']='Хэширование файла ' + path_file2 + ' '+h_file2.hexdigest()\n\nb2.bind('', hash)\n\n\nb1.pack()\nb2.pack()\nl_file1.pack()\nl_file2.pack()\n\nroot.mainloop()\n","sub_path":"venv/Lab1.py","file_name":"Lab1.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"510429825","text":"\"\"\"\nGiven an array of numbers, find the maximum sum of any contiguous subarray of\n the array.\n\nFor example, given the array [34, -50, 42, 14, -5, 86], the maximum sum would\n be 137, since we would take elements 42, 14, -5, and 86.\n\nGiven the array [-5, -1, -8, -9], the maximum sum would be 0, since we would \nnot take any elements.\n\nDo this in O(N) time.\n\"\"\"\n\ndef greatestSum(arr: list)->int:\n if not arr or max(arr) < 0:\n return 0\n\n current_max_sum = arr[0]\n overall_max_sum = arr[0]\n\n for num in arr[1:]:\n current_max_sum = max(num, current_max_sum + num)\n overall_max_sum = max(overall_max_sum, current_max_sum)\n\n return overall_max_sum\nif __name__ == \"__main__\":\n assert greatestSum([34, -50, 42, 14, -5, 86]) == 137\n assert greatestSum([-34, -50, -42, -14, -5, 86]) == 86\n assert greatestSum([34, -50, -42, -14, -5, -86]) == 34\n assert greatestSum([-5, -1, -8, -9]) == 0\n ","sub_path":"challenge49.py","file_name":"challenge49.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"77287270","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom . import config\nfrom . import types\n\n\n# Module API\n\ndef infer(headers, values, row_limit=None, explicit=False, primary_key=None):\n \"\"\"Return a schema from the passed headers and values.\n\n Args:\n headers: a list of header names\n values: a reader over data, yielding each row as a list of values\n explicit: be explicit.\n primary_key: pass in a primary key or iterable of keys.\n\n Returns:\n A Table Schema as a Python dict.\n\n \"\"\"\n\n guesser = _TypeGuesser()\n resolver = _TypeResolver()\n schema = {'fields': []}\n type_matches = {}\n\n if primary_key:\n schema['primaryKey'] = primary_key\n\n for header in headers:\n descriptor = {\n 'name': header,\n 'title': '',\n 'description': '',\n }\n\n constraints = {}\n if explicit:\n constraints.update({'required': True})\n\n if header == primary_key:\n constraints.update({'unique': True})\n\n if constraints:\n descriptor['constraints'] = constraints\n\n schema['fields'].append(descriptor)\n\n for index, row in enumerate(values):\n\n if row_limit and (index > row_limit):\n break\n\n else:\n\n # Normalize rows with invalid dimensions for sanity\n row_length = len(row)\n headers_length = len(headers)\n\n if row_length > headers_length:\n row = row[:len(headers)]\n\n if row_length < headers_length:\n diff = headers_length - row_length\n fill = [''] * diff\n row = row + fill\n\n # build a column-wise lookup of type matches\n for index, value in enumerate(row):\n rv = guesser.cast(value)\n\n if type_matches.get(index):\n type_matches[index].append(rv)\n else:\n type_matches[index] = [rv]\n\n # choose a type/format for each column based on the matches\n for index, results in type_matches.items():\n rv = resolver.get(results)\n schema['fields'][index].update(**rv)\n\n return schema\n\n\n# Internal\n\n_TYPE_ORDER = [\n 'duration',\n 'geojson',\n 'geopoint',\n 'object',\n 'array',\n 'datetime',\n 'time',\n 'date',\n 'integer',\n 'number',\n 'boolean',\n 'string',\n 'any',\n]\n\n\nclass _TypeGuesser(object):\n \"\"\"Guess the type for a value.\n\n Returns:\n A tuple of ('type', 'format')\n\n \"\"\"\n\n def cast(self, value):\n for name in _TYPE_ORDER:\n cast = getattr(types, 'cast_%s' % name)\n result = cast('default', value)\n if result != config.ERROR:\n return (name, 'default')\n\n\nclass _TypeResolver(object):\n \"\"\"Get the best matching type/format from a list of possible ones.\n \"\"\"\n\n @staticmethod\n def _sort_key(item):\n return (item[1], _TYPE_ORDER.index(item[0][0]))\n\n def get(self, results):\n\n variants = set(results)\n\n # only one candidate... that's easy.\n if len(variants) == 1:\n rv = {\n 'type': results[0][0],\n 'format': results[0][1],\n }\n\n else:\n counts = {}\n for result in results:\n if counts.get(result):\n counts[result] += 1\n else:\n counts[result] = 1\n\n # tuple representation of `counts` dict sorted by values\n sorted_counts = sorted(counts.items(), key=self._sort_key,\n reverse=True)\n rv = {\n 'type': sorted_counts[0][0][0],\n 'format': sorted_counts[0][0][1]\n }\n\n return rv\n","sub_path":"tableschema/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"27536584","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nsns.set_style('darkgrid')\r\n\r\ndf = pd.read_csv(\"data.csv\", header=None)\r\ndf.rename(columns={0:\"character\"}, inplace=True)\r\ndf = df.apply(lambda x: x.str.strip() if x.dtype == \"object\" else x)\r\nprint(df.head()) \r\n\r\n\r\ntier_dict = {\r\n \"S+\":14,\r\n \"S\":13,\r\n \"S-\":12,\r\n \"A+\":11,\r\n \"A\":10,\r\n \"A-\":9,\r\n \"B+\":8,\r\n \"B\":7,\r\n \"B-\":6,\r\n \"C+\":5,\r\n \"C\":4,\r\n \"C-\":3,\r\n \"D+\":2,\r\n \"D\":1\r\n }\r\n\r\n#df[1].map(tier_dict)\r\n#print(df.iloc[5,1], type(df.iloc[5,1]))\r\n\r\ndf = df.replace(tier_dict)\r\ndf.set_index('character', inplace=True)\r\ndf = df.transpose()\r\n\r\nprint(df)\r\nprint(df.describe())\r\nmeantier = df.mean()\r\nstdtier = df.std()\r\ndesctier = pd.concat([meantier, stdtier], axis=1)\r\ndesctier.columns = [\"mean\", \"std\"]\r\nprint(desctier)\r\nprint(desctier.sort_values(by=[\"mean\"]))\r\n# df['characters'] = df.index\r\nplt.errorbar(desctier.index, desctier[\"mean\"], yerr=desctier[\"std\"], fmt='o')\r\nplt.scatter(desctier[\"mean\"], desctier[\"std\"])\r\nplt.show()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"532550037","text":"import time \nimport RPi.GPIO as GPIO \nGPIO.setmode(GPIO.BCM) \n\nledPin = 18 \nsek = 2 \nGPIO.setup(ledPin, GPIO.OUT)\n\n\ntry: \t\t \n while True: \n GPIO.output(ledPin, False) \n print(\"led aus\")\n time.sleep(sek) \n GPIO.output(ledPin, True)\n print(\"led ein\")\n time.sleep(sek) \n \nfinally: \n print(\"Cleaning up\")\n GPIO.cleanup()\n","sub_path":"code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"302943222","text":"#!/usr/local/bin/python3\n\nimport sys\nimport os\nimport re\n\nsrcPath = '/Users/zhuxu/Documents/tmp/src'\ntargetPath = '/Users/zhuxu/Documents/tmp/target'\n\ndef copyName(srcPath, targetPath):\n\tif len(os.listdir(srcPath)) != len(os.listdir(targetPath)):\n\t\tprint(targetPath + '文件数和源目录不一致')\n\t\treturn\n\n\tsrcArr = os.listdir(srcPath)\n\tsrcArr.sort()\n\ttargetArr = os.listdir(targetPath)\n\ttargetArr.sort()\n\tfor i in range(0, len(srcArr)):\n\t\t# print(srcArr[i])\n\t\t# print(targetArr[i])\n\t\tsrcPre = re.sub(r'^(Friends)\\.S([\\d]*)E([\\d]*)(\\.[^\\.]+)+$', '\\\\2\\\\3', srcArr[i])\n\t\ttargetPre = re.sub(r'^(friends)\\.s([\\d]*)e([\\d]*)(\\.[^\\.]+)+$', '\\\\2\\\\3', targetArr[i])\n\t\tif srcPre != targetPre:\n\t\t\tprint(srcPre + '不一致')\n\t\t\tbreak\n\t\tnewTargetPath = targetPath + os.path.sep + srcArr[i]\n\t\tnewTargetPath = newTargetPath.replace('.srt', '.mkv')\n\t\tos.rename(targetPath + os.path.sep + targetArr[i], newTargetPath)\n\t\t# print(newTargetPath)\n\n# copyName('g:\\\\[老友记].friends\\\\srt6', 'g:\\\\[老友记].friends\\\\Friends.S06')\n# copyName('g:\\\\[老友记].friends\\\\srt7', 'g:\\\\[老友记].friends\\\\Friends.S07')\ncopyName('g:\\\\[老友记].friends\\\\srt8', 'g:\\\\[老友记].friends\\\\Friends.S08')\n# copyName('g:\\\\[老友记].friends\\\\srt9', 'g:\\\\[老友记].friends\\\\Friends.S09')\n# copyName('g:\\\\[老友记].friends\\\\srt10', 'g:\\\\[老友记].friends\\\\Friends.S10')\n\n\n\n\n\n\n\n\n\n\n","sub_path":"py_rename_folder_files.py","file_name":"py_rename_folder_files.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255940261","text":"class ReadGraph:\n def __init__(self, graph, filename):\n self.graph = graph\n self.filename = filename\n with open(filename, \"r+\") as f:\n lines = f.readlines()\n for line_index in range(len(lines)):\n if line_index == 0:\n (V, E) = lines[line_index].split(\" \")\n print('%s %s'%(V, E))\n assert self.graph.V() == int(V)\n else:\n (v, w) = lines[line_index].split(\" \")\n self.graph.add_edge(int(v), int(w))\n f.close()\n\n\n","sub_path":"graph/read_graph.py","file_name":"read_graph.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"445480578","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function, division\n\nimport tempfile\nimport zipfile\nimport shutil\nimport os\nimport sys\nimport platform\n\nfrom os.path import join as path_join, relpath\nfrom collections import OrderedDict\n\nSYSTEM = platform.system()\nDIR = os.path.dirname(os.path.abspath(__file__))\n\ndef load_file(filename):\n\twith open(path_join(DIR, 'package', filename), 'rb') as fp:\n\t\treturn fp.read()\n\ndef update_zip(archive, filemap):\n\ttempdir = tempfile.mkdtemp()\n\treplaced = set()\n\ttry:\n\t\ttempname = os.path.join(tempdir, 'tmp.zip')\n\n\t\twith zipfile.ZipFile(archive, 'r') as zipread:\n\t\t\twith zipfile.ZipFile(tempname, 'w') as zipwrite:\n\t\t\t\tfor item in zipread.infolist():\n\t\t\t\t\tif item.filename in filemap:\n\t\t\t\t\t\tdata = load_file(item.filename)\n\t\t\t\t\t\treplaced.add(item.filename)\n\t\t\t\t\t\tprint('updating file:', item.filename)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdata = zipread.read(item.filename)\n\t\t\t\t\tzipwrite.writestr(item, data, item.compress_type)\n\n\t\t\t\tfor filename in filemap:\n\t\t\t\t\tif filename not in replaced:\n\t\t\t\t\t\tprint('adding file:', filename)\n\t\t\t\t\t\tdata = load_file(filename)\n\t\t\t\t\t\tzipwrite.writestr(item, data, item.compress_type)\n\n\t\tshutil.move(tempname, archive)\n\n\tfinally:\n\t\tshutil.rmtree(tempdir)\n\ndef create_filemap(pkgdir):\n\tfilemap = OrderedDict()\n\tfor dirname, dirs, files in os.walk(pkgdir):\n\t\tfor filename in files:\n\t\t\tpath = relpath(path_join(dirname, filename), pkgdir)\n\t\t\tfilemap[path] = True\n\treturn filemap\n\nif SYSTEM == 'Linux':\n\tpaths = [\n\t\t\".local/share/Steam/SteamApps/common/Save the Dodos/package.nw\",\n\t\t\".steam/Steam/SteamApps/common/Save the Dodos/package.nw\"\n\t]\n\n\tdef find_path_ignore_case(basedir, components):\n\t\tpath = basedir\n\t\tfor name in components.split('/'):\n\t\t\tlower_name = name.lower()\n\t\t\tname_map = dict((filename.lower(), filename)\n\t\t\t\tfor filename in os.listdir(path))\n\n\t\t\ttry:\n\t\t\t\tfilename = name_map[lower_name]\n\t\t\texcept KeyError:\n\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tpath = path_join(path, filename)\n\n\t\treturn path\n\n\tdef find_archive():\n\t\thome = os.getenv(\"HOME\")\n\n\t\tfor path in paths:\n\t\t\tarchive = find_path_ignore_case(home, path)\n\t\t\tif archive is not None:\n\t\t\t\treturn archive\n\n\t\traise FileNotFoundError('package.nw not found.')\n\nelif SYSTEM == 'Windows':\n\ttry:\n\t\tfrom winreg import OpenKeyEx, QueryValueEx, KEY_QUERY_VALUE, \\\n\t\t REG_SZ, HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE\n\texcept ImportError:\n\t\tfrom _winreg import OpenKeyEx, QueryValueEx, KEY_QUERY_VALUE, \\\n\t\t REG_SZ, HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE\n\n\treg_keys = [\n\t\t# Have confirmed sigthing of these keys:\n\t\t( HKEY_LOCAL_MACHINE, \"Software\\\\Valve\\\\Steam\", \"InstallPath\" ),\n\t\t( HKEY_LOCAL_MACHINE, \"Software\\\\Wow6432node\\\\Valve\\\\Steam\", \"InstallPath\" ),\n\t\t( HKEY_CURRENT_USER, \"Software\\\\Valve\\\\Steam\", \"SteamPath\" ),\n\n\t\t# All the other possible combination, just to to try everything:\n\t\t( HKEY_CURRENT_USER, \"Software\\\\Wow6432node\\\\Valve\\\\Steam\", \"SteamPath\" ),\n\t\t( HKEY_LOCAL_MACHINE, \"Software\\\\Valve\\\\Steam\", \"SteamPath\" ),\n\t\t( HKEY_LOCAL_MACHINE, \"Software\\\\Wow6432node\\\\Valve\\\\Steam\", \"SteamPath\" ),\n\t\t( HKEY_CURRENT_USER, \"Software\\\\Valve\\\\Steam\", \"InstallPath\" ),\n\t\t( HKEY_CURRENT_USER, \"Software\\\\Wow6432node\\\\Valve\\\\Steam\", \"InstallPath\" )\n\t]\n\n\tdef get_path_from_registry(hKey, lpSubKey, lpValueName):\n\t\thSubKey = OpenKeyEx(hKey, lpSubKey, 0, KEY_QUERY_VALUE)\n\t\tvalue, reg_type = QueryValueEx(hSubKey, lpValueName)\n\t\tif reg_type != REG_SZ:\n\t\t\traise TypeError('Registry key has wrong type')\n\t\treturn path_join(value, \"steamapps\\\\common\\\\Save the Dodos\\\\package.nw\")\n\n\tdef find_archive():\n\t\tfor hKey, lpSubKey, lpValueName in reg_keys:\n\t\t\ttry:\n\t\t\t\tpath = get_path_from_registry(hKey, lpSubKey, lpValueName)\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\treturn path\n\n\t\traise FileNotFoundError('package.nw not found.')\n\nelif SYSTEM == 'Darwin':\n\traise OSError('Mac OS X not yet supported.')\nelse:\n\traise OSError('System not supported: %s' % SYSTEM)\n\ndef main():\n\ttry:\n\t\tpkgdir = path_join(DIR, 'package')\n\t\tfilemap = create_filemap(pkgdir)\n\t\tarchive = find_archive()\n\t\tprint('patching archive:', archive)\n\t\tupdate_zip(archive, filemap)\n\texcept Exception as ex:\n\t\tprint(ex)\n\t\treturn 1\n\telse:\n\t\treturn 0\n\tfinally:\n\t\tif SYSTEM == 'Windows':\n\t\t\tprint(\"Press ENTER to continue...\")\n\t\t\tsys.stdin.readline()\n\nif __name__ == '__main__':\n\tsys.exit(main())\n","sub_path":"save_the_zazus.py","file_name":"save_the_zazus.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"83608341","text":"import numpy as np\nfrom setting import *\nfrom modules.rattle import constrained_r, constrained_v\nfrom modules.normal import get_norm , prop_norm, get_stand\nfrom modules.thermostat import thermal_step, debug_thermal_step, thermal_step2\n\n\ndef zero_init():\n return np.zeros((N, P, dim)), np.zeros((N, P, dim))\n\n\ndef initialize(positions = None):\n \"\"\"\n return initialized data in shape (P,N,dim)\n still not tested for non random initialization\n more than 2 dimension not yet implemented\n \"\"\"\n if positions == None:\n pos = np.random.rand(P, dim)*large_scale-large_scale/2\n else: \n pos = position\n lattice = l*np.asarray([[np.cos(2*np.pi*i/N), np.sin(2*np.pi*i/N)] for i in range(N)])\n noise = np.random.rand(P, N, dim)*disp_scale-disp_scale/2\n v_n = np.random.rand(N, P, dim)*vel_beads_scale-vel_beads_scale/2\n q_n = (lattice + noise).swapaxes(0, 1)+pos\n return q_n, v_n\n \n\ndef dV(q):\n \"\"\"\n If you want the free case just set ext_omega2 == 0.\n \"\"\"\n dv = np.zeros((N, P, dim))\n for i in range(N):\n dv[i] = omega2*(2*q[i]-q[(i-1) % N]-q[(i+1) % N])+ext_omega2*q[i]\n return dv\n\n\ndef dVint(q):\n dv = np.zeros((N, P, dim))\n for i in range(N):\n dv[i] = omega2*(2*q[i]-q[(i-1) % N]-q[(i+1) % N])\n return dv\n\n \ndef dVext(q):\n \"\"\"\n Just the external potential, probably easier to just call it directly\n \"\"\"\n return ext_omega2*q\n\n\ndef dVH2(q):\n \"\"\"\n ####ACHTUNG added /2 to simulate the reduced mass\n \"\"\"\n return ext_omega2*np.stack((q[:, 0, :]-q[:, 1, :], q[:, 1, :]-q[:, 0, :])).swapaxes(0, 1)/2\n\n \ndef verlet_step(q, v, dV0, beta, fix=None, pos=None, fixc=None, posc=None, lenf=None, lenfc=None):\n \"\"\"\n vm == v_n-1/2\n vp == v_n+1/2\n lamda_v = lamda_v_n from the previous step\n lamda_vp = new lamda_v_(n+1)\n Achtung: there are a few problem with n+-1/2 solved by adatting q, pay attention!\n \"\"\"\n # evaluate lamda_r(n)\n lamda_r = constrained_r(q, v, dV0, fix, pos, fixc, posc, lenf, lenfc)\n # evaluate v(n+1/2)\n vp = v-dV0*dt/2-dt/2*lamda_r\n # evaluate q(n+1)\n q1 = q+dt*vp\n dV1 = dV(q1)\n # evaluate lamda_v(n+1)\n lamda_vp = constrained_v(q1, vp, dV1, fix, pos, fixc, posc, lenf, lenfc)\n # evaluate v(n+1)\n v1 = vp-dt/2*dV1-dt/2*lamda_vp\n return q1, v1, dV1\n\n\ndef verlet_step2(q, v, dV0int, dV0ext, beta, fix=None, pos=None, fixc=None, posc=None, lenf=None, lenfc=None, therm= None):\n \"\"\"\n new implementation of verlet_step using normal modes propagation.\n problem need to be fixed, try using python debugger\n I think that you should use dV0 and not dV0ext in the determination \n of lamda_r\n ADDED dV0int in arguments\n \"\"\"\n # single thermalization in rattle\n if therm:\n vt = thermal_step2(v, beta)\n else:\n vt = v\n # simulate thermal action as evolution through an effective potential \n #dV_eff = -2*(vt-v)/dt\n # evaluate lamda_r(n) assuming the effective potential acted\n # modified with v-> vt in lambda_r\n lamda_r = constrained_r(q, vt, dV0ext+dV0int, fix, pos, fixc, posc, lenf, lenfc)\n # evaluate v(n+1/2) \n vp = vt-dV0ext*dt/2-dt/2*lamda_r\n # normal mode change variable\n u, vpu = get_norm(q, vp)\n # normal mode propagation\n u1, vpu1 = prop_norm(u, vpu)\n # back to standard coordinate\n q1, vp = get_stand(u1, vpu1)\n dV1ext , dV1int = dVext(q1), dVint(q1)\n # evaluate lamda_v(n+1)\n lamda_vp = constrained_v(q1, vp, dV1ext+dV1int, fix, pos, fixc, posc, lenf, lenfc)\n # evaluate v(n+1)\n v1=vp-dt/2*dV1ext-dt/2*lamda_vp\n return q1, v1, dV1int, dV1ext\n\n \ndef verlet_step1(q, v, dV0ext, beta, fix=None, pos=None, fixc=None, posc=None, lenf=None, lenfc=None, therm=None):\n \"\"\"\n new implementation of verlet_step using normal modes propagation.\n problem need to be fixed, try using python debugger\n \"\"\"\n # first thermalization\n if therm:\n v = thermal_step(v, beta)\n # evaluate lamda_r(n)\n lamda_r = constrained_r(q, v, dV0ext, fix, pos, fixc, posc, lenf, lenfc)\n # evaluate v(n+1/2)\n vp = v-dV0ext*dt/2-dt/2*lamda_r\n # normal mode change variable\n u, vpu = get_norm(q, vp)\n # normal mode propagation\n u1, vpu1 = prop_norm(u, vpu)\n # back to standard coordinate\n q1, vp = get_stand(u1, vpu1)\n dV1ext = dVext(q1)\n # evaluate lamda_v(n+1)\n lamda_vp = constrained_v(q1, vp, dV1ext, fix, pos, fixc, posc, lenf, lenfc)\n # evaluate v(n+1)\n v1 = vp-dt/2*dV1ext-dt/2*lamda_vp\n # second thermalization\n if therm:\n v1 = thermal_step(v1, beta)\n return q1, v1, dV1ext\n\n \ndef debug_verl_step(q, v, dV0ext, beta, fix=None, pos=None, fixc=None, posc=None, lenf=None, lenfc=None, therm= None):\n # In debug mode q and v are actually the normal modes\n dV1ext=dV0ext\n # first thermalization\n vp = debug_thermal_step(v)\n # normal mode propagation\n q1, v1 = prop_norm(q, vp)\n # second thermalization\n v1 = debug_thermal_step(v1)\n return q1, v1, dV1ext\n\n\ndef verlet_algorithm(q_0, v_0, beta, fix=None, pos=None, fixc=None, posc=None, lenf=None, lenfc=None, norm=0, therm=None, debug=False):\n Q_n, V_n = [], []\n q_n, v_n = q_0, v_0\n dV0 = dV(q_n) # problem in dimension different than 2 maybe fix later\n dV0ext = dVext(q_n)\n dV0int = dVint(q_n)\n if debug:\n u_n, vu_n = get_norm(q_n, v_n)\n for _ in range(steps):\n Q_n.append(q_n)\n V_n.append(v_n)\n # remove this thing and add it in the constant\n if fix:\n pos = np.zeros((N, P, dim))\n for i, j in fix:\n pos[i, j] = q_0[i, j]\n if fixc:\n posc = np.zeros((P,dim))\n for j in fixc:\n posc[j] = q_0.swapaxes(0,1)[j].sum(0)\n if debug: \n u_n, vu_n, dV0 = debug_verl_step(u_n, vu_n, dV0, beta, fix=fix, pos=pos, fixc=fixc, posc=posc, lenf=lenf, lenfc=lenfc)\n q_n, v_n = get_stand(u_n, vu_n)\n continue\n if norm == 0:\n q_n, v_n, dV0 = verlet_step(q_n, v_n, dV0, beta, fix=fix, pos=pos, fixc=fixc, posc=posc, lenf=lenf, lenfc=lenfc)\n elif norm:\n #added dVint e verlet step 2\n q_n, v_n, dV0int, dV0ext = verlet_step2(q_n, v_n, dV0int, dV0ext, beta, fix=fix, pos=pos, fixc=fixc, posc=posc, lenf=lenf, lenfc=lenfc, therm=therm)\n return np.asarray(Q_n), np.asarray(V_n)","sub_path":"modules/verlet.py","file_name":"verlet.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"242747829","text":"#O(log(n)), space O(1)\ndef binarySearch(nums, target): \n left, right = 0,len(nums) - 1 \n while left <= right: \n mid = (left + right) // 2\n if target == nums[mid]: \n return mid \n elif nums[mid] > target: #if mid point value is larger than target\n right = mid - 1 # search left\n else: \n left = mid + 1 # search right if value is smaller \n return -1 \n\nn = [-1,0,3,5,9,12]\ntarget = 9\n# 9 exits in nums and its index is 4 \nprint(binarySearch(n,target))\n\n''' \nbinary search = check left and right and then the mid point (pivot)\n\nwhile left is less than equal to right \ndefine mid point \nif target is equal to the mid point of the nums list at that index \nreturn mid point \nif mid point of the list is smaller than target value , search right side \nif mid point of the list is larger than target value , search left side \n\n'''","sub_path":"LeetCode/BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"492930000","text":"import asyncio\nimport json\nfrom django.contrib.auth import get_user_model\nfrom channels.consumer import SyncConsumer\nfrom channels.db import database_sync_to_async\nimport base64\nfrom cv2 import cv2\nimport dlib\nimport numpy as np\n# from channels.consumer import WebsocketConsumer\n# from .models import Thread, ChatMessage\n\n\n \n# Poster to inject\n\n# poster = cv2.imread(\"Posters/food panda_test.png\")\n# print(poster)\n# pos_h, pos_w, _ = poster.shape\n\nclass StreamConsumer(SyncConsumer):\n \n \n\n #Function to get the upper and lower hsv range of values\n def get_hsv_range(self, hsv, reference_point):\n\n threshold = 35\n\n hsvPoint = hsv[reference_point]\n\n lower = np.array([0, hsvPoint[1] - threshold, hsvPoint[2] - threshold])\n upper = np.array([255, hsvPoint[1] + threshold, hsvPoint[2] + threshold])\n\n return lower, upper\n\n \n def websocket_connect(self, event):\n print(\"connected\", event)\n self.send({\n \"type\": \"websocket.accept\"\n })\n\n \n \n \n # encodedBytes = base64.b64encode(frame.encode(\"utf-8\"))\n # encodedStr = str(encodedBytes, \"utf-8\")\n\n # b64_src = 'data:image/jpg;base64,'\n # stringData = b64_src + encodedStr\n # print(\"hahaha\",stringData)\n \n # await self.send({\n # \"type\": \"websocket.send\",\n # \"text\" : stringData\n # })\n\n \n\n def websocket_receive(self, event):\n print(\"haha\")\n # Poster to inject\n poster = cv2.imread(\"Posters/food panda_test.png\")\n print(poster)\n pos_h, pos_w, _ = poster.shape\n ratio = pos_h/pos_w\n\n\n\n # Capturing the video frames\n cap = cv2.VideoCapture('Videos/test3.mp4')\n #cap = cv2.VideoCapture(0)\n\n # Getting the height and width of the video frame\n fr_h, fr_w, _ = cap.read()[1].shape\n\n # Getting the center coordinates of the frame to check the placement of the poster\n fr_xc, fr_yc = fr_w//2, fr_h//2\n\n # Setting up the switch: Switch is disabled as soon as a face is detected\n switch = True\n\n # Initializing to print the frame number\n framenum = 1\n\n # Initializing Face Detector\n detector = dlib.get_frontal_face_detector()\n\n switcher = True\n\n # Looping over all the video frames\n while True:\n # Reading the frames\n ret, frame = cap.read()\n\n # Break condition at the end of the video\n if not ret:\n break\n\n # Getting the faces in the frame\n faces = detector(frame)\n\n # If there is no any face and switch is True, then do nothing, just continue the loop\n if (not faces) and switch:\n cv2.imshow('frame', frame)\n print(f\"Frame {framenum}: No Face Detected!\")\n framenum += 1\n\n key = cv2.waitKey(20)\n if key == ord('q'):\n cv2.destroyAllWindows()\n break\n cap.release()\n\n continue\n\n # If there is a face and switch is True, get the top-left and bottom-right coordinates of the face and make switch False\n # Note: We define the injecting area as per the location of the face\n if faces and switch:\n\n # Looping over the faces in the frame\n for face in faces:\n\n # Switch is False as the face is detected\n switch = False\n\n # Getting the x1,y1 and x2,y2 coordinates of the face detected\n x1, y1, x2, y2 = face.left(), face.top(), face.right(), face.bottom()\n\n # Getting the center coordinates of the face rectangle\n fc_xc, fc_yc = (x1 + x2)//2, (y1 + y2)//2\n\n #If the face-center is less than frame center (in the left side), then poster is injected on the right. And vice versa\n if fc_xc < fr_xc:\n print('Face on left and poster on right')\n top, right = int(fr_h * 0.05), int(fr_w * 0.92)\n\n if (fr_h >= fr_w) and (pos_h >= pos_w):\n print(\"Frame and poster both are potrait\")\n left = right - (fr_w//4)\n bottom = top + int((right-left) * ratio)\n\n elif (fr_h >= fr_w) and (pos_h < pos_w):\n print(\"Frame is potrait and poster is landscape\")\n bottom = top + (fr_w//4)\n left = right - int((bottom - top) / ratio)\n\n elif (fr_h < fr_w) and (pos_h >= pos_w):\n print(\"Frame is landscape and poster is potrait\")\n left = right - (fr_h//4) \n bottom = top + int((right - left) * ratio)\n\n else:\n print(\"Frame and poster both are landscape\")\n bottom = top + (fr_h//4)\n left = right - int((bottom - top) / ratio)\n\n reference_point = right, top\n\n else:\n print('Face on right and poster on left')\n top, left = int(fr_h * 0.05), int(fr_w * 0.08)\n\n if (fr_h >= fr_w) and (pos_h >= pos_w):\n print(\"Frame and poster both are potrait\")\n right = left + (fr_w//4)\n bottom = top + int((right - left) * ratio)\n\n elif (fr_h >= fr_w) and (pos_h < pos_w):\n print(\"Frame is potrait and poster is landscape\")\n bottom = top + (fr_w//4)\n right = left + int((bottom - top) / ratio) \n\n elif (fr_h < fr_w) and (pos_h >= pos_w):\n print(\"Frame is landscape and poster is potrait\")\n right = left + (fr_h//4) \n bottom = top + int((right - left) * ratio)\n\n else:\n print(\"Frame and poster both are landscape\")\n bottom = top + (fr_h//4)\n right = left + int((bottom - top) / ratio)\n\n reference_point = left, top\n\n # Extracting the target-area from the frame to inject the poster\n area = frame[top:bottom, left:right, :]\n\n # Resizing the poster as per the defined area above. Note: cv2.resize() takes in as (w,h)\n poster = cv2.resize(poster, (right - left, bottom - top))\n\n # Convert the whole frame into HSV COLORSPACE\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n #Getting the lower and upper values\n lower, upper = self.get_hsv_range(hsv, reference_point)\n\n # Create kernel for image dilation\n kernel = np.ones((1,1), np.uint8)\n\n # Creating the mask using for the HSV frame using the upper and lower values\n mask = cv2.inRange(hsv, lower, upper)\n\n # Perform dilation on the mask to reduce noise\n dil = cv2.dilate(mask, kernel, iterations=5)\n\n # Now extract the area from the HSV frame with individual 3 channels\n mini_dil = np.zeros_like(area)\n mini_dil[:, :, 0] = dil[top: bottom, left: right]\n mini_dil[:, :, 1] = dil[top: bottom, left: right]\n mini_dil[:, :, 2] = dil[top: bottom, left: right]\n\n # Create the copy of the poster\n poster_copy = poster.copy()\n\n # Set pixel values of the poster_coy to 1 where pixel value of the mask is 0\n poster_copy[mini_dil == 0] = 1\n\n # Now set the pixel values in the target area to 1 where the pixel values of the poster_copy is not 1\n area[poster_copy != 1] = 1\n\n # Merge the poster_copy and the target area\n area = area * poster_copy\n\n # Now insert the final poster into the main frame\n frame[top: bottom, left:right, :] = area\n\n # Showing the final frame \n # cv2.imshow('frame', frame)\n print(f\"Frame {framenum}: Done!\")\n framenum += 1\n\n\n # cam = cv2.VideoCapture(0)\n # while cam:\n # img = cam.read()[1]\n # img = cam.read()[1]\n # cv2.imshow(\"Window\",img)\n \n # img = img.encode(\"utf-8\")\n finalStr = str(base64.b64encode(frame))\n print(finalStr)\n self.send({\n \"type\": \"websocket.send\",\n 'text': finalStr\n })\n\n # Waiting for the key to be pressed by the user. If key is 'q' then quit everything\n key = cv2.waitKey(20)\n if key == ord('q'):\n cv2.destroyAllWindows()\n break\n cap.release()\n\n\n\n\n\n\n\n\n def websocket_disconnect(self, event):\n print(\"disconnected\", event)\n\n\n \n\n\n","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":9200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"325890985","text":"# -*- coding: utf-8 -*-\nfrom openerp import api, fields, models\nfrom openerp.tools.translate import _\n\n\nclass PaymentOrder(models.Model):\n _name = 'payment.order'\n\n name = fields.Char(\n string='Order ID'\n )\n\n total_amount = fields.Float(\n string='Total Amount'\n )\n\n payment_type = fields.Selection(\n string='Payment Type',\n selection='_get_payment_type',\n )\n\n payment_status = fields.Selection(\n string='Payment Status',\n selection='_get_payment_status',\n )\n\n order_status = fields.Selection(\n string='Payment Status',\n selection='_get_order_status',\n )\n\n product_name = fields.Char(\n string='Product Name',\n )\n\n create_time = fields.Datetime(\n string=\"Create Time\",\n )\n\n update_time = fields.Datetime(\n string=\"Update Time\",\n )\n\n user_id = fields.Char(\n string=\"User ID\",\n )\n\n account_id = fields.Char(\n string=\"Account ID\",\n )\n\n description = fields.Char(\n string=\"Description\",\n )\n\n @api.model\n def _get_payment_type(self):\n return [\n ('alipay', _('Alipay')),\n ('weixin', _('Weixin Pay')),\n ('wangbipay', _('Wang Bi Pay')),\n ]\n\n @api.model\n def _get_payment_status(self):\n return [\n ('not_pay', _('Not Pay')),\n ('paid', _('Paid')),\n ('wait_pay', _('Wait_Pay')),\n ]\n\n @api.model\n def _get_order_status(self):\n return [\n ('draft', _('Draft')),\n ('done', _('Done')),\n ('cancel', _('Cancel')),\n ('return', _('Return')),\n ]\n","sub_path":"payment_gateway/models/payment_order.py","file_name":"payment_order.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"422430673","text":"import cv2\nimport numpy as np\nimport cvzone\n\nclass Deployment:\n\n def __init__(self, base_directory, context):\n print(\"Initialising My Deployment\")\n # Loading overlay images and classifier\n self.christmas_hat_image = cv2.imread(\"christmas_hat2.png\", cv2.IMREAD_UNCHANGED)\n self.christmas_border = cv2.imread(\"christmas_border.png\", cv2.IMREAD_UNCHANGED)\n self.christmas_decoration = cv2.imread(\"christmas_decoration.png\", cv2.IMREAD_UNCHANGED)\n self.cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\n def request(self, data):\n print(\"Processing request for My Deployment\")\n print(\"Reading input image.\")\n frame = cv2.imread(data[\"input_image\"])\n\n # Getting dimensions of the base image\n try:\n height, width, channels = frame.shape\n except:\n height, width = frame.shape\n\n print(\"Adding Christmas frame and decoration to image.\")\n border_resize = cv2.resize(self.christmas_border, (width, height))\n decoration_resize = cv2.resize(self.christmas_decoration, (width, int(height/4)))\n frame = cvzone.overlayPNG(frame, border_resize, [0,0])\n frame = cvzone.overlayPNG(frame, decoration_resize, [0,0])\n\n print(\"Detecting faces.\")\n gray_scale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = self.cascade.detectMultiScale(gray_scale)\n\n print(\"Giving everyone a Christmas hat.\")\n for (x, y, w, h) in faces:\n if ((w > 0.045*width) and (h > 0.045*height)):\n\n overlay_resize = cv2.resize(self.christmas_hat_image, (int(w*1.6), int(h*1.6)))\n\n try:\n # This is a better offset for the Christmas hat, but if a face is near the top of the screen it\n # will give an error. So then we use a smaller offset.\n frame = cvzone.overlayPNG(frame, overlay_resize, [int(x-w/3), int(y-(0.7)*h)])\n except:\n frame = cvzone.overlayPNG(frame, overlay_resize, [int(x-w/3), int(y-h/2)])\n else:\n continue\n cv2.imwrite(\"christmas_image.png\", frame)\n\n return {\n \"festive_image\": \"christmas_image.png\"\n }","sub_path":"ready-deployments/christmas-model/christmas_package/deployment.py","file_name":"deployment.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"402982822","text":"from tkinter import *\r\nfrom functools import partial\r\n\r\nroot = Tk()\r\nroot.title(\"Le Jeu de la Vie\")\r\nroot.iconbitmap(\"logo.ico\")\r\n\r\n'''' A faire : détecter la taille du fichier lu (ou en faire une par défaut, et dans ce cas, centrer les coordonnées) ;\r\n'''\r\n\r\n\r\n\r\ndef change_color(coords, _) : # changement de couleur des boutons\r\n i, j = coords\r\n if c.itemcget(grid_display[i][j], \"fill\") == \"white\" :\r\n c.itemconfig(grid_display[i][j], fill=\"black\")\r\n else :\r\n c.itemconfig(grid_display[i][j], fill=\"white\")\r\n\r\n\r\ndef initialisation() : # implémentation des boutons pour initialiser la grille et les contrôles\r\n\r\n global c, grid_display\r\n\r\n height = width = 900\r\n c = Canvas(root, height = height, width = width, bd=0, bg = \"grey\")\r\n c.grid(row=0, column=0, rowspan = 30)\r\n\r\n cell_side = height/size\r\n\r\n grid_display = []\r\n\r\n\r\n for i in range(size) :\r\n grid_display.append([])\r\n for j in range(size) :\r\n\r\n grid_display[i].append(c.create_rectangle(cell_side*j, cell_side*i, cell_side*(j+1), cell_side*(i+1), width = 2, fill=\"white\", tags=f\"{i}_{j}\"))\r\n\r\n c.tag_bind(f\"{i}_{j}\",\"\", partial(change_color, (i, j) ))\r\n\r\n\r\n\r\n global welcome_label, speed_scale, step, step_label, reset_button\r\n\r\n # Boutons permanents :\r\n\r\n # Texte de présentation :\r\n welcome_label = Label(text=\"Bienvenue dans le Jeu de la Vie !\")\r\n welcome_label.grid(row=3, column=2, columnspan=7)\r\n\r\n # Curseur de la vitesse :\r\n speed_scale = Scale(root, orient=\"horizontal\", from_ = 0, to = 16, resolution=0.1, tickinterval=2, label=\"Speed (steps/sec)\", length = 330)\r\n speed_scale.set(2)\r\n speed_scale.grid(row=11, column=2, rowspan=4, columnspan=7)\r\n\r\n # Affichage de l'étape actuelle :\r\n step = 0\r\n step_label = Label(root, text=f\"Step {step}\")\r\n step_label.grid(row=15, column=2, columnspan=7)\r\n\r\n # le bouton reset permet de revenir à la grille vierge à tout moment :\r\n reset_button = Button(root, command=reset, text=\" Reset \")\r\n reset_button.grid(row=20, column=5)\r\n\r\n\r\n # Boutons d'initialisation (stockés dans un dictionnaire pour pouvoir être détruits plus facilement) :\r\n\r\n global dico\r\n dico = {}\r\n\r\n # Entrée du fichier d'initialistion :\r\n dico[\"file_entry\"] = Entry(root, width = 30)\r\n dico[\"file_entry\"].grid(row=5, column=2, columnspan=7)\r\n\r\n # Bouton de départ :\r\n dico[\"launch_button\"] = Button(root, command=grid_generation, text=\" Go ! \")\r\n dico[\"launch_button\"].grid(row=6, column=2, columnspan=7)\r\n\r\n\r\n # Bouton pour sauvegarder l'affichage actuel :\r\n dico[\"save_button\"] = Button(root, command=sauvegarde, text=\"Save the grid\")\r\n dico[\"save_button\"].grid(row=27, column=2, columnspan=7)\r\n # Et une boîte d'entrée pour le nom du fichier (si l'entrée est vide, sauvegarde au nom 'Sauvegarde_tmp') :\r\n dico[\"save_entry\"] = Entry(root, width=30)\r\n dico[\"save_entry\"].grid(row=26, column=2, columnspan=7)\r\n\r\n\r\n # Label pour modifier la taille du quadrillage\r\n dico[\"size_label\"] = Label(root, text = f\" Changer la taille ({size}) : \")\r\n dico[\"size_label\"].grid(row=18, column=2, columnspan=7)\r\n\r\n # Entrée pour modifier manuellement la taille :\r\n dico[\"size_entry\"] = Entry(root, width=7)\r\n dico[\"size_entry\"].grid(row=19, column=5)\r\n\r\n dico[\"size_increment1\"] = Button(root, text=\"+1\", command=partial(reset, 1))\r\n dico[\"size_increment1\"].grid(row=19, column=6)\r\n\r\n dico[\"size_increment2\"] = Button(root, text=\"+2\", command=partial(reset, 2))\r\n dico[\"size_increment2\"].grid(row=19, column=7)\r\n\r\n dico[\"size_increment4\"] = Button(root, text=\"+4\", command=partial(reset, 4))\r\n dico[\"size_increment4\"].grid(row=19, column=8)\r\n\r\n\r\n dico[\"size_decrement1\"] = Button(root, text=\"-1\", command=partial(reset, -1))\r\n dico[\"size_decrement1\"].grid(row=19, column=4)\r\n\r\n dico[\"size_decrement2\"] = Button(root, text=\"-2\", command=partial(reset, -2))\r\n dico[\"size_decrement2\"].grid(row=19, column=3)\r\n\r\n dico[\"size_decrement4\"] = Button(root, text=\"-4\", command=partial(reset, -3))\r\n dico[\"size_decrement4\"].grid(row=19, column=2)\r\n\r\n if size < 9 :\r\n dico[\"size_decrement4\"].configure(state=DISABLED)\r\n if size < 7 :\r\n dico[\"size_decrement2\"].configure(state=DISABLED)\r\n if size < 6 :\r\n dico[\"size_decrement1\"].configure(state=DISABLED)\r\n\r\n\r\n\r\ndef sauvegarde() :\r\n file_name = dico[\"save_entry\"].get()\r\n if file_name == \"\" :\r\n file_name = \"Sauvegarde_tmp\"\r\n\r\n f = open(f\"{file_name}.txt\", \"w\")\r\n for i in range(len(grid_display)) :\r\n for j in range(len(grid_display)) :\r\n if c.itemcget(grid_display[i][j], \"fill\") == \"black\" :\r\n f.write(f\"{i} {j}\\n\")\r\n f.close\r\n\r\n\r\n\r\ndef grid_generation() : # création de la première grille 0_1, puis traduction en 0-15\r\n\r\n for i in range(len(grid_display)) : # désactivation des cases en enlevant les tags\r\n for j in range(len(grid_display)) :\r\n c.dtag(grid_display[i][j], f\"{i}_{j}\")\r\n\r\n taille = size\r\n if dico[\"file_entry\"].get() != \"\" :\r\n file = open(f\"{dico['file_entry'].get()}.txt\", \"r\")\r\n lines = file.readlines()\r\n file.close()\r\n\r\n grid = [[0]*taille for _ in range(taille)]\r\n for i in range(len(lines)) :\r\n coords = lines[i].split()\r\n grid[int(coords[0])][int(coords[1])] = 1\r\n\r\n else :\r\n grid = []\r\n for i in range(len(grid_display)) :\r\n grid.append([])\r\n for j in range(len(grid_display)) :\r\n if c.itemcget(grid_display[i][j], \"fill\") == \"white\" :\r\n grid[i].append(0)\r\n else :\r\n grid[i].append(1)\r\n\r\n # Sauvegarde automatique de la position de départ\r\n sauvegarde()\r\n\r\n first_grid = [[0]*len(grid) for _ in range(len(grid))]\r\n for x in range(1, len(grid)-1) :\r\n for y in range(1, len(grid) - 1) :\r\n if grid[x][y] == 1 :\r\n first_grid[x][y] += 1 #indique que la cellule était vivante au tour d'avant\r\n first_grid[x-1][y-1] += 2\r\n first_grid[x][y-1] += 2\r\n first_grid[x+1][y-1] += 2\r\n first_grid[x-1][y] += 2\r\n first_grid[x+1][y] += 2\r\n first_grid[x-1][y+1] += 2\r\n first_grid[x][y+1] += 2\r\n first_grid[x+1][y+1] += 2\r\n\r\n # Suppression des widget d'initialisation :\r\n for widget in dico.values() :\r\n widget.destroy()\r\n\r\n global pause_button, step\r\n # Bouton de pause\r\n pause_button = Button(root, text=\" Pause \", command=lambda : speed_scale.set(0)) # Le curseur mis à 0 arrête la boucle du update\r\n pause_button.grid(row=6, column=2, rowspan=2, columnspan=7)\r\n\r\n\r\n global reset_state\r\n reset_state = False\r\n\r\n step =- 1\r\n update(first_grid)\r\n\r\n\r\n\r\n\r\ndef update(grid) : # fonction d'actualisation des Labels à partir d'une grille donné.\r\n if reset_state :\r\n return\r\n\r\n for i in range(len(grid)) :\r\n for j in range(len(grid)) :\r\n\r\n if grid[i][j] % 2 == 0 :\r\n color = \"white\"\r\n else :\r\n color = \"black\"\r\n\r\n c.itemconfig(grid_display[i][j], fill = color) #change l'objet du canva, change la couleur du rectangle dans grid display\r\n\r\n global step\r\n step += 1\r\n step_label.configure(text=f\"Step {step}\")\r\n new_grid = main_evaluate(grid) # fonction qui calcule la grille suivante\r\n\r\n if grid == new_grid : # arrêt du programme quand il n'y a plus d'évolution\r\n pause_button.configure(state=DISABLED)\r\n return\r\n\r\n speed = speed_scale.get()\r\n if speed != 0 :\r\n root.after(int(1000/speed), update, new_grid) #pendant un certain temps j'attends, puis j'appelle la fonction update\r\n else :\r\n pause(new_grid)\r\n\r\n\r\n\r\n\r\ndef main_evaluate(grid) :\r\n stock = [[0]*len(grid) for _ in range(len(grid))]\r\n for x in range(1, len(grid)-1) :\r\n for y in range(1, len(grid)-1) :\r\n if grid[x][y] in [5, 6, 7] :\r\n stock[x][y] += 1 #indique que la cellule était vivante au tour suivant\r\n stock[x-1][y-1] += 2\r\n stock[x][y-1] += 2\r\n stock[x+1][y-1] += 2\r\n stock[x-1][y] += 2\r\n stock[x+1][y] += 2\r\n stock[x-1][y+1] += 2\r\n stock[x][y+1] += 2\r\n stock[x+1][y+1] += 2\r\n\r\n return stock\r\n\r\n\r\ndef pause(grid) :\r\n\r\n pause_button.configure(text=\"Resume\", command=partial(resume, grid))\r\n\r\n global step_button\r\n step_button = Button(root, text=\"Next step\", command=partial(update_step_by_step, grid))\r\n step_button.grid(row=9, column=2, rowspan=2, columnspan=7)\r\n\r\n\r\ndef resume(grid) :\r\n step_button.destroy()\r\n speed_scale.set(2)\r\n\r\n pause_button.configure(text=\"Pause\", command=lambda : speed_scale.set(0)) #le bouton pause pointe vers la fonction pause\r\n update(grid)\r\n\r\n\r\n\r\ndef update_step_by_step(grid) :\r\n\r\n global step\r\n step += 1\r\n step_label.configure(text=f\"Step {step}\")\r\n\r\n for i in range(len(grid)) :\r\n for j in range(len(grid)) :\r\n\r\n if grid[i][j] % 2 == 0 :\r\n color = \"white\"\r\n else :\r\n color = \"black\"\r\n\r\n c.itemconfig(grid_display[i][j], fill = color)\r\n\r\n new_grid = main_evaluate(grid) # fonction qui calcule la grille suivante\r\n\r\n if grid == new_grid : # arrêt du programme quand il n'y a plus d'évolution\r\n step_button.configure(state=DISABLED)\r\n pause_button.configure(state=DISABLED)\r\n return\r\n\r\n pause_button.configure(command=partial(resume, new_grid))\r\n\r\n step_button.configure(command=partial(update_step_by_step, new_grid))\r\n\r\n\r\n\r\n\r\n\r\ndef reset(x = 0) :\r\n\r\n global size, reset_state\r\n if reset_state == True :\r\n if dico[\"size_entry\"].get() != \"\" :\r\n size = int(dico[\"size_entry\"].get())\r\n if size < 5 :\r\n size = 5\r\n size += x\r\n\r\n reset_state = True\r\n\r\n for widget in root.winfo_children() :\r\n widget.destroy()\r\n\r\n initialisation()\r\n\r\n\r\n\r\n\r\nsize = 30\r\ninitialisation()\r\n\r\n\r\n\r\n\r\n\r\nroot.mainloop()\r\n","sub_path":"Ancienne version/fusion.py","file_name":"fusion.py","file_ext":"py","file_size_in_byte":10346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"207184630","text":"__author__ = 'niko'\nclass Node(object):\n def __init__(self,data):\n self.data=data\n self.nextNode=None\n\n def remove(self,data,previousNode):\n 'remove the node that contains data and store the reference of rest of the list to the previous node'\n if self.data == data:\n previousNode.nextNode = self.nextNode\n\n #not necessary , garabage collector will do it anyway as no object refernece it\n del self.data\n del self.nextNode\n else:\n if self.nextNode is not None:\n self.nextNode.remove(data,self)","sub_path":"LinkedListDataStracture/Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"323948185","text":"# -*- coding: utf-8 -*-\n\n\nimport hboot_image_compiler.netx90_app_iflash_image\n\nimport SCons.Script\n\n\ndef __iflash_image_action(target, source, env):\n fVerbose = False\n if 'IFLASHIMAGE_VERBOSE' in env:\n fVerbose = bool(env['IFLASHIMAGE_VERBOSE'])\n\n strAsicTyp = env['ASIC_TYP']\n if (strAsicTyp != 'NETX90_MPW_APP') and (strAsicTyp != 'NETX90_APP'):\n raise Exception(\n 'IFlash images are not possible for ASIC typ \"%s\".' % strAsicTyp\n )\n\n hboot_image_compiler.netx90_app_iflash_image.patch_image(\n source[0].get_path(),\n target[0].get_path(),\n fVerbose\n )\n\n return 0\n\n\ndef __iflash_image_emitter(target, source, env):\n env.Depends(target, SCons.Node.Python.Value(str(env['ASIC_TYP'])))\n\n return target, source\n\n\ndef __iflash_image_string(target, source, env):\n return 'IFlashImage %s' % target[0].get_path()\n\n\n# ---------------------------------------------------------------------------\n#\n# Add IFlashImage builder.\n#\ndef ApplyToEnv(env):\n env['IFLASHIMAGE_VERBOSE'] = False\n\n iflash_image_act = SCons.Action.Action(\n __iflash_image_action,\n __iflash_image_string\n )\n iflash_image_bld = SCons.Script.Builder(\n action=iflash_image_act,\n emitter=__iflash_image_emitter,\n suffix='.bin',\n single_source=1)\n env['BUILDERS']['IFlashImage'] = iflash_image_bld\n","sub_path":"site_scons/iflash_image.py","file_name":"iflash_image.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"168139338","text":"\n\n\nimport grace_ts_functions\n\nstation_name='MUSB';\ngrace_dir=\"../../GPS_POS_DATA/GRACE_loading_model/\"\nfilename=grace_dir+\"scaled_\"+station_name+\"_PREM_model_ts.txt\";\nout_dir=\"stations/\";\n\n\ngrace_ts_functions.plot_grace(station_name, filename, out_dir);\n","sub_path":"specific_experiments/plot_grace/single_driver.py","file_name":"single_driver.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"504980119","text":"\"\"\"This works so long as icon.png is available.\"\"\"\nfrom collections import Callable\n\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nimport my\nfrom collections import namedtuple, Iterable\nfrom functools import singledispatch\nimport logging\n\nlogging.basicConfig(format='%(asctime)s:%(levelname)s:%(module)s#%(funcName)s:%(message)s')\nlog = logging.getLogger(__name__) # module logging interface\nlog.setLevel('DEBUG')\n\n\"\"\"\nDesign Notes\nThis program shall reside within the system icon tray with a menu system as a UI\nMenu \n Start/Pause\n Status\n Label or MAC or IP - STATUS\n Jeremy iPhone - Last Seen 12h30m ago\n Jeremy Desktop - CONNECTED \n Configs\n Network Range\n Sleep Seconds / Cycle\n Send Wake On Lan\n Nodes DB\n Labels DB\n Notify for new nodes\n Notify known nodes after\n Exit\n\"\"\"\n\n\"\"\"\n\"\"\"\n\nMenuOP = namedtuple('MenuOp', 'func, params')\n\nclass SysTray(QApplication):\n def __init__(self):\n super().__init__([])\n self.setQuitOnLastWindowClosed(False)\n self.init_UI()\n\n def init_UI(self):\n # Create the tray\n self.tray = QSystemTrayIcon()\n self.tray.setIcon(QIcon(\"./icons/application.png\"))\n self.tray.setVisible(True)\n\n # Create the menu\n self.menu = QMenu()\n self.make_menu(self.menu, self.menu_defs)\n\n # Add the menu to the tray\n self.tray.setContextMenu(self.menu)\n\n @property\n def menu_defs(self):\n return {'Exit': self.close\n ,'A1': MenuOP(func=QAction, params=dict(text='X', icon=QIcon('./icons/exit.png')))\n ,'A2': MenuOP(func=QMenu, params=dict(title='M', icon=QIcon('./icons/exit.png')))\n ,'B': {\n 'B.a': self.quit\n ,'B.b': print\n ,'B.c': {\n 'B.C.a': self.quit\n , 'B.C.b': print\n , 'BC..c': ''\n }\n }\n }\n\n def make_menu(self, parent, obj):\n \"\"\"Translate object into a menu attached to parent.\"\"\"\n log.debug(f'parent={parent}, obj={obj}')\n if isinstance(obj, Callable):\n otype = 'callable'\n else:\n otype = type(obj).__name__\n attr_base_name = '_menu_from_'\n func = attr_base_name + otype\n if not hasattr(self, func):\n raise TypeError(f'Unexpected type {otype} for obj {obj}')\n call = getattr(self, func)\n call(parent, obj)\n\n def _menu_from_dict(self, parent, obj):\n \"\"\"Translate a dict into a menu.\n Dict keys become new items within parent. Menu if values are iterable. Action if not.\n Dict values become new sub-menu items within the newly created item from key.\n Values can be of any type handled by make_menu.\n \"\"\"\n log.debug(f'parent={parent}, obj={obj}')\n for k, v in obj.items():\n if isinstance(v, Iterable):\n new_menu_item = QMenu(title=k, parent=parent)\n parent.addMenu(new_menu_item)\n self.make_menu(parent=new_menu_item, obj=v)\n else:\n new_menu_item = QAction(parent=parent, text=k)\n parent.addAction(new_menu_item)\n self.make_menu(new_menu_item, v)\n\n def _menu_from_str(self, parent, obj):\n \"\"\"Translate a str into a menu item with no trigger.\"\"\"\n log.debug(f'parent={parent}, obj={obj}')\n parent.addAction(QAction(parent=parent, text=obj))\n\n def _menu_from_callable(self, parent, obj):\n \"\"\"Translate builtin callable to menu item that triggers it\"\"\"\n log.debug(f'parent={parent}, obj={obj}')\n parent.triggered.connect(obj)\n\n def _menu_from_MenuOp(self, parent: QMenu, obj: MenuOP):\n \"\"\"Make menu item(s) from MenuOp object\"\"\"\n log.debug(f'parent={parent}, obj={obj}')\n if obj.func is QAction:\n parent.addAction(obj.func(parent=parent, **obj.params))\n elif obj.func is QMenu:\n parent.addMenu(obj.func(parent=parent, **obj.params))\n\n def run(self):\n self.exec_()\n\n def close(self):\n \"\"\"Hide the icon before quitting to avoid filling\n the icon tray with dummy icon instances.\"\"\"\n self.tray.setVisible(False)\n self.quit()\n\nif __name__ == '__main__':\n s = SysTray()\n s.run()\n","sub_path":"SysTray.py","file_name":"SysTray.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"486285352","text":"#encoding = utf-8\n\n# 判断是否是闰年\n\ndef leap_year(num):\n if num%400 == 0 or (num%4 ==0 and num%100 != 0): #判断是否是闰年\n return True # 是闰年,返回True\n else:\n return False\n\n\nwhile True:\n # 获取用户输入\n try:\n num = int(input(\"请输入年份(0000表示退出):\"))\n except:\n print(\"您的输入有误,请重新输入!\")\n continue\n \n # 输出判断结果\n if num is 0000:\n break\n if leap_year(num) is True:\n print(\"%d是闰年\"%num)\n else:\n print(\"%d不是闰年\"%num)","sub_path":"Python_learning/1-Basic/1-4_Function/3-闰年.py","file_name":"3-闰年.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"585556050","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('home/', views.home, name='home'),\n path('base/', views.base, name='base'),\n path('contact/', views.contact, name='contact'),\n path('signup/', views.signup,name='signup'),\n\n # path('register/', views.register, name='register')\n]\n","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"567183648","text":"\nfrom __future__ import print_function\nimport os\nimport requests\nimport time\nimport logging\nimport config\nfrom pygcj import GCJProj\n\nlogging.basicConfig(level=logging.DEBUG)\n\nimport sys\nif sys.version_info >= (3, 0):\n xrange = range\n\n\ngcjproj = GCJProj()\n\n\ndef generate_gcp_origin(interval):\n \"\"\"Generate WGS84 Controls Points\n \n Arguments:\n interval {float} -- interval decimal degree\n \n Returns:\n {[(lng,lat)]} -- points array\n \"\"\"\n\n min_x = config.china_extent['min_lng'] + 0.0\n min_y = config.china_extent['min_lat'] + 0.0\n max_x = config.china_extent['max_lng'] + 0.0\n max_y = config.china_extent['max_lat'] + 0.0\n \n interval += 0.0\n\n gcp_count = int((max_x - min_x) / interval) * int((max_y - min_y) / interval)\n\n if gcp_count > 400000:\n logging.error('too many control points. choose larger interval please.')\n exit()\n \n if gcp_count < 20:\n logging.error(\n 'too little control points. choose smaller interval please.')\n exit()\n\n logging.info('generate control points number:' + str(gcp_count))\n \n gcps = []\n gcp_x = min_x + interval\n gcp_y = min_y + interval\n while gcp_x < max_x:\n while gcp_y < max_y:\n gcps.append((gcp_x, gcp_y))\n gcp_y += interval\n\n gcp_y = min_y + interval\n gcp_x += interval\n \n return gcps\n\n\ndef query_gaode(gcps_84,amap_key):\n req_str = ''\n for i in xrange(0,len(gcps_84)):\n gcp84 = gcps_84[i]\n req_str = req_str + \\\n str(round(gcp84[0], 6)) + ',' + \\\n str(round(gcp84[1], 6)) + '|'\n\n req_str = req_str[:-1]\n reqdata = {\n 'locations': req_str,\n 'coordsys': 'gps',\n 'output': 'json',\n 'key': amap_key\n }\n\n r = requests.get(\n 'http://restapi.amap.com/v3/assistant/coordinate/convert', params=reqdata)\n\n r_json = r.json()\n gd_str = r_json['locations']\n gd_array = gd_str.split(';')\n\n query_result = []\n for i in xrange(0,len(gd_array)):\n gd_x, gd_y = map(float,gd_array[i].split(','))\n gcp84_x, gcp84_y = gcps_84[i]\n local_trans_y, local_trans_x = gcjproj.wgs_to_gcj_raw(\n round(gcp84_y, 6), round(gcp84_x, 6))\n delta_x = gd_x - local_trans_x\n delta_y = gd_y - local_trans_y\n \n values = []\n values.append(round(gcp84_x, 6))\n values.append(round(gcp84_y, 6))\n values.append(gd_x)\n values.append(gd_y)\n values.append(delta_x)\n values.append(delta_y)\n\n value_str = ','.join(str(x) for x in values) + '\\r\\n'\n query_result.append(value_str)\n \n return query_result\n\n\ndef generate_gcp_gaode(interval,amap_key):\n \"\"\"Generate Control Points File for AMap API\n https://lbs.amap.com/\n \n Arguments:\n interval {float} -- control grid interval(decimal degree)\n amap_key {string} -- amap developer key\n \"\"\"\n\n gcps_84 = generate_gcp_origin(interval)\n\n results = []\n\n step = 40\n start = 0\n while start + step < len(gcps_84):\n query_gcps = gcps_84[start:start+step]\n results += query_gaode(query_gcps,amap_key)\n start += step\n # geode api QPS limits\n time.sleep(0.1)\n\n results += query_gaode(gcps_84[start:len(gcps_84)], amap_key)\n\n with open(os.path.join(os.path.dirname(__file__), 'gcps_gd'), 'w') as newfile:\n newfile.writelines(results)\n \n logging.info('finish.')\n\n\ndef print_help():\n print('Usage:\\n'\n ' generate_gcp \\n'\n 'interval:\\n'\n ' control points grid interval by decimal degree. eg:0.1\\n'\n 'amapkey:\\n'\n ' amap developer api key. visit \"http://lbs.amap.com/\" and require your key.\\n'\n )\n\n\ndef main(argvs):\n if len(argvs) < 3:\n print_help()\n\n try:\n interval = float(argvs[1])\n except:\n print_help()\n return\n\n try:\n amapkey = str(argvs[2])\n except:\n print_help()\n return\n\n generate_gcp_gaode(float(interval), amapkey)\n\nif __name__ == '__main__':\n main(sys.argv)\n\n","sub_path":"pygcj/generate_gcp.py","file_name":"generate_gcp.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"176637196","text":"import RPi.GPIO as GPIO\nfrom sensors.AbstractSensor import AbstractSensor\n\n\"\"\"\nDataSheet: https://www.futurlec.com/Datasheet/Sensor/MQ-135.pdf\nProduct page: https://www.adafruit.com/product/439\nOriginal implementation: https://github.com/seanbechhofer/raspberrypi/blob/master/python/TSL2561.py\nComponent name: TSL2561\nCommunication protocol: i2c\n\"\"\"\n\n\nclass MQ135(AbstractSensor):\n\tdef __init__(self, pin=7, debug=False):\n\t\tself.pin = pin\n\t\tself.debug = debug\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\t\tGPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n\t\tif self.debug:\n\t\t\tprint(\"Construct for MQ135 on pin {} with debugging set to {}\".format(self.pin, self.debug))\n\n\tdef measurement_callback(self):\n\t\tif self.debug:\n\t\t\tprint(\"Motherfucking activity!\")\n\n\tdef read(self):\n\t\tif self.debug:\n\t\t\tprint(\"Request for data. Adding event callback to GPIO.\")\n\n\t\tGPIO.add_event_detect(self.pin, GPIO.RISING)\n\t\tGPIO.add_event_callback(self.pin, self.measurement_callback())\n\n","sub_path":"sensors/MQ135.py","file_name":"MQ135.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"359444821","text":"'''\nglobal scope: the program runs outside of the function is running on the global scope\nglobal variable: the variable on the global scope\n\nfunction local scope: every time call a function, program enters the local scope\nlocal variable: the variable on the local scope\n\ndefault behaviour 1: if program runs on local scope(inside a function), program first search on local scope for variable\nif variable not found on local scope, program searches the variable on global scope\nrule: a variable cannot be both local and global inside of a function\n'''\n\n# def foo():\n# print(s) # print local s, error because cannot find s on local scope\n# s = \"Me too.\" # because of this variable definition, python thinks we want to use local s all the time\n# print(s) # print local s\n#\n#\n# s = \"I hate spam.\"\n# foo()\n# print(s)\n\n# def foo():\n# global s # use global s inside function\n# print(s) # print global s\n# s = \"That's clear.\" # set global s = “That’s clear”\n# print(s) # print global s\n#\n#\n# s = \"Python is great!\"\n# foo()\n# print(s) # ?\n\ndef calculation():\n global place\n place = \"Cape Town\"\n global name # name should be defined on global scope\n name = \"John\" # global variable \"name\"\n\n print(locals())\n print(globals())\n\n\nplace = \"Berlin\"\nprint(place)\ncalculation()\nprint(name)\n\n'''\nwrite a python function that takes a string and returns a dictionary\nthe dictionary key is the each character in the string, the value is the frequency of the character\n\ns = \"abbc\"\nd = {\"a\": 1, \"b\": 2, \"c\": 1}\n\ns = \"hello\"\nd = {\"h\": 1, \"e\": 1, \"l\": 2, \"o\": 1}\n'''\n\n'''\nabout global keyword\nglobal keyword allows you to modify the global variable inside a function\n'''\n\ndef keys_number(string):\n dic = {}\n for char in string:\n dic[char] = count_char(char, string)\n return dic\n\n# count_char counts how many times the target character is in the source string\ndef count_char(target, source):\n counter = 0\n for j in source:\n if target == j:\n counter += 1\n return counter\n\n\nprint(keys_number('12215'))\n\n'''\nWrite a Python program to print a dictionary where the keys are numbers between 1 and 10 (both included) \nand the values are square of keys. Solve this problem using dictionary comprehension\nsample dictionary {1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81, 10: 100}\n'''\ndef strDictConv(string):\n return {x: string.count(x) for x in string}\n\n\ns = \"hello\"\nprint(s.count(\"he\"))\n\n'''\nGiven a list and dictionary, map each element of list with each item of dictionary\ninput: dic = {\"four\": 4, \"nine\": 9} list = [1, 2]\noutput: result = {1: {\"four\": 4}, 2: {\"nine\": 9}}\n'''\ndic = {\"four\": 4, \"nine\": 9}\nlist = [1, 2]\n\nresult = {}\nindex = 0\nfor key, value in dic.items():\n result[list[index]] = {key: value}\n index += 1\n\n\n'''\nGiven an integer list, \nfind the contiguous sublist (containing at least one number) which has the largest sum and return its sum.\n\ninput = [-2,1,-3,4,-1,2,1,-5,4]\nsublist [4,-1,2,1] gives the largest sum 6, return 6\n\nInput: nums = [5,4,-1,7,8]\noutput: 23\n'''\n\ndic = {num : num**2 for num in range(1, 11)}\n\nd=dict()\nfor lol in range(1,11):\n d[lol]=lol**2\nprint(d)","sub_path":"python_demo_programs/2021/example_20210420.py","file_name":"example_20210420.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123823021","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport roslib\nroslib.load_manifest('barc')\nimport sys\nimport rospy\nimport cv2\nfrom std_msgs.msg import String, Int32, Float32\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport numpy as np\nfrom scipy import signal\nfrom math import sqrt, atan, pi\nimport kernel\n\npoint_num = 0\n\n# Parameter Default Values\ndisplay_image = False\npublish_image = False\ncalibrate_transform = False\nimage_calibrated = True\ncalibration_pts = None\n\ndef print_point_callback(event, x, y, flags, param):\n \"\"\"\n This callback function calibrates the image perspective transform\n \"\"\"\n global point_num\n global image_calibrated\n global calibration_pts\n global transformation\n global image_processor_global\n if event == cv2.EVENT_LBUTTONDOWN:\n point_num = point_num + 1\n if (point_num == 1):\n print('Start at upper left corner. Select points clockwise.')\n calibration_pts = np.float32([[x + 0.0,y + 0.0]])\n elif (point_num <= 4):\n calibration_pts = np.append(calibration_pts, np.float32([[x + 0.0,y + 0.0]]),axis = 0)\n \n if (point_num == 4):\n # Apply Projection Transform\n # ref points [TOPLEFT, TOPRIGHT, BOTTOMRIGHT, BOTTOMLEFT]\n ref_pts = np.float32([[image_processor_global.x_offset,0], \\\n [image_processor_global.x_width + image_processor_global.x_offset,0], \\\n [image_processor_global.x_width + image_processor_global.x_offset, image_processor_global.y_height], \\\n [image_processor_global.x_offset, image_processor_global.y_height]])\n\n image_processor_global.projection_dim = (image_processor_global.x_width + image_processor_global.x_offset * 2, image_processor_global.y_height)\n image_processor_global.projection_transform = cv2.getPerspectiveTransform(calibration_pts, ref_pts) \n image_calibrated = True\n cv2.destroyWindow(\"Calibrate Image Transform\")\n display_val ='Pt{}: x:{} y:{}'.format(point_num, x, y)\n print(display_val)\n\ndef find_offset_in_lane(img,x,y,width):\n \"\"\"\n Returns the difference in x and y positions\n operates on pixels. Return value is pixel offset from nominal\n \"\"\"\n x_left = x\n x_right = x\n while(x_left > 0 and not img[y, x_left]):\n x_left = x_left - 1\n while(x_right < width and not img[y, x_right]):\n x_right = x_right + 1\n return (x_left, x_right)\n\nclass image_processor:\n \"\"\"\n This class takes image messages from the USB Camera and converts them to a cv2 format\n subsequently it converts the image to grayscale and performs a perspective and hanning transform.\n Finally, it outputs a delta value indicating the offset of the vehicle from the center of the lane\n \"\"\"\n\n def __init__(self):\n global display_image, publish_image, calibrate_transform\n global calibration_pts\n \n #Create ROS Interfaces\n self.offset_lane_pub = rospy.Publisher(\"lane_offset\", Float32,queue_size=10)\n\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(\"/image_raw\", Image,self.callback_image)\n \n #Get Launch File Parameters and configure node\n calibrate_transform = rospy.get_param(\"/image_processing/calibrate_transform\")\n display_image = rospy.get_param(\"/image_processing/display_image\")\n publish_image = rospy.get_param(\"/image_processing/publish_image\")\n global image_calibrated\n \n if publish_image:\n self.image_pub = rospy.Publisher(\"cv_image\", Image, queue_size = 10)\n \n # Projection Transform Parameters\n self.x_offset = 50\n self.x_width = 75\n self.y_height = 50\n if calibrate_transform:\n image_calibrated = False\n cv2.namedWindow(\"Calibrate Image Transform\")\n cv2.setMouseCallback(\"Calibrate Image Transform\", print_point_callback)\n else:\n image_calibrated = True\n calibration_pts = np.float32( ([rospy.get_param(\"/image_processing/upperLeftX\"), rospy.get_param(\"/image_processing/upperLeftY\")], \\\n [rospy.get_param(\"/image_processing/upperRightX\"), rospy.get_param(\"/image_processing/upperRightY\")], \\\n [rospy.get_param(\"/image_processing/lowerRightX\"), rospy.get_param(\"/image_processing/lowerRightY\")], \\\n [rospy.get_param(\"/image_processing/lowerLeftX\"), rospy.get_param(\"/image_processing/lowerLeftY\")]))\n \n # Apply Projection Transform\n # ref points [TOPLEFT, TOPRIGHT, BOTTOMRIGHT, BOTTOMLEFT]\n ref_pts = np.float32([[self.x_offset,0], \\\n [self.x_width + self.x_offset,0], \\\n [self.x_width + self.x_offset, self.y_height], \\\n [self.x_offset, self.y_height]])\n\n self.projection_dim = (self.x_width + self.x_offset * 2, self.y_height)\n self.projection_transform = cv2.getPerspectiveTransform(calibration_pts, ref_pts) \n\n self.black_lane = rospy.get_param(\"/image_processing/black_lane\")\n self.kernel = kernel.kernel_get(5,1,11,5,self.black_lane)\n self.quartile = 95.0\n self.prev_offset = 0\n\n def callback_image(self,data):\n \"\"\"\n Callback for incoming image message\n \"\"\"\n # Global Variables\n global display_image, publish_image, image_calibrated\n \n # Convert ROS Image Message to CV2 image\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n (rows,cols,channels) = cv_image.shape\n # Convert Color Image to Grayscale\n gray_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)\n if display_image:\n cv2.imshow(\"Raw Image\", gray_image)\n cv2.waitKey(3) \n \n if image_calibrated:\n #IPM\n ipm = cv2.warpPerspective(gray_image, self.projection_transform, self.projection_dim) \n if display_image:\n cv2.imshow(\"IPM\", ipm)\n cv2.waitKey(3) \n # filter with kernel\n filtered = signal.fftconvolve(self.kernel,ipm) \n threshold = np.percentile(filtered, self.quartile)\n idx = filtered < threshold \n filtered[idx] = 0\n idx = filtered >= threshold\n filtered[idx] = 255\n filtered = np.array(filtered, dtype=np.uint8)\n kernel = np.ones((1,2))\n filtered = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)\n # Convert to RGB to visualize lane detection points\n if display_image:\n cv2.imshow(\"End Product\", filtered)\n cv2.waitKey(3) \n if display_image or publish_image:\n backtorgb = cv2.cvtColor(filtered, cv2.COLOR_GRAY2RGB)\n \n ## Lane Detection\n height, width = filtered.shape\n \n # Change nominal reference point based on previous reference point\n index_x = width//2 + self.prev_offset \n index_y = 10\n center_lane = []\n yvalid = []\n while index_y < height:\n x_left, x_right = find_offset_in_lane(filtered, index_x, height - index_y, width) \n # Nonvalid signal\n if x_left == 0 or x_right == width - 1:\n index_y += 1\n continue;\n centerlanex = ( x_left + x_right ) // 2\n center_lane += [centerlanex]\n yvalid += [index_y]\n index_y += 1\n index_x += centerlanex - index_x\n \n offset_lane = offset - width//2\n self.prev_offset = offset_lane\n\n if display_image or publish_image:\n cv2.circle(backtorgb, (x_left, height - index_y), 3, (0,255,255), -1)\n cv2.circle(backtorgb, (x_right, height - index_y), 3, (0,255,255), -1)\n cv2.circle(backtorgb, (offset, height - index_y), 3, (255,0,255), -1)\n\n self.offset_lane_pub.publish(Float32(offset_lane))\n \n if display_image:\n cv2.imshow(\"Image window\", backtorgb)\n if publish_image:\n try:\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(backtorgb, \"bgr8\"))\n except CvBridgeError as e:\n print(e)\n else :\n if display_image:\n cv2.imshow(\"Calibrate Image Transform\", gray_image)\n if publish_image:\n try:\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(gray_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)\n if display_image:\n cv2.waitKey(3)\n\ndef shutdown_func():\n cv2.destroyAllWindows() \n\nimage_processor_global = None\n\ndef main(args):\n rospy.on_shutdown(shutdown_func)\n global image_processor_global\n image_processor_global = image_processor()\n rospy.init_node('image_processing', anonymous=True)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"src/image_processing_experimental.py","file_name":"image_processing_experimental.py","file_ext":"py","file_size_in_byte":9391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"570636855","text":"#!/usr/bin/env python3\n\"\"\"\nFrom 'YFV_Ct_Callithrix_main_rev1.ipynb'\n\"\"\"\n\n\nfrom Bio.Seq import Seq\nfrom Bio import SeqIO\nfrom Bio.Alphabet import IUPAC\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set_style(\"whitegrid\")\n\nfrom xgboost import XGBClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\n\nfrom sklearn.model_selection import train_test_split, RandomizedSearchCV, GridSearchCV, StratifiedKFold\nfrom sklearn.metrics import roc_auc_score, roc_curve, auc\n\n\nimport shap\n\n\"\"\"\n#######################################################################\nImport data\nImport `.pkl` file that was created on \"data_preprocessing_YFV.ipynb\"\n#######################################################################\n\"\"\"\ndef get_data(pickle_seqdf, pickle_ohe):\n ohe_df = pd.read_pickle(pickle_ohe)\n seq_df = pd.read_pickle(pickle_seqdf)\n\n # Select only callithrix samples\n ohe_df_calli = ohe_df[ohe_df['Host'] == 'Callithrix']\n ohe_df_calli = ohe_df_calli.sort_values(by='Ct_Group')\n\n # Select alouatta samples for comparison later\n ohe_df_alou = ohe_df[ohe_df['Host'] == 'Alouatta']\n ohe_df_alou = ohe_df_alou.sort_values(by='Ct_Group')\n\n return (seq_df, ohe_df, ohe_df_calli, ohe_df_alou)\n\n\"\"\"\n#######################################################################\nTrain and test splits\n\nSeparate data into train and test sets.\nSince the dataset is small and imbalanced, I will separate only 10% for testing.\n#######################################################################\n\"\"\"\ndef get_train_test_split(ohe_df_calli, test_size=0.1):\n # Get only the ohe nucleotide info in X\n X = ohe_df_calli.drop([\"ID\",\"Host\",\"Ct\",\"Date\",\"Season\",\"Ct_Group\"], axis=1)\n # The target class is Ct_Group (high or low)\n y = ohe_df_calli[\"Ct_Group\"]\n\n X_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=test_size,\n random_state=0,\n shuffle=True,\n stratify=y)\n\n ## Dealing with imbalanced data\n scale_pos_weight = len(y_train)/y_train.sum()\n\n return (X, y, X_train, X_test, y_train, y_test, scale_pos_weight)\n\n\"\"\"\n#######################################################################\nClassifiers\n#######################################################################\n\"\"\"\n\n\"\"\"\n#######################################################################\nXGBoost parameter tuning\n#######################################################################\n\"\"\"\ndef initial_xgb_model(X_train, y_train, X_test, y_test, scale_pos_weight):\n # Initial XGB model\n initial_xgb = XGBClassifier(learning_rate=0.001,\n colsample_bytree = 0.3,\n subsample = 1,\n objective='binary:logistic',\n n_estimators=10000,\n max_depth=3,\n njobs=4,\n random_state=0,\n scale_pos_weight=scale_pos_weight\n )\n\n eval_set = [(X_train, y_train), (X_test, y_test)]\n eval_metric = [\"error\", \"auc\"]\n\n xgb.fit(X_train, y_train, eval_metric=eval_metric, eval_set=eval_set)\n\n results = xgb.evals_result()\n\n fig1, axes1 = plt.subplots(figsize=(10, 8), nrows=1, ncols=2)\n axes1[0].plot(results['validation_0']['error'], label='Train Error')\n axes1[0].plot(results['validation_1']['error'], label='Validation Error')\n axes1[0].set_title(\"Initial XGBoost Error\")\n axes1[0].set_xlabel(\"Iteration\")\n axes1[0].set_ylabel(\"Error\")\n axes1[0].legend()\n\n axes1[1].plot(results['validation_0']['auc'], label='Train AUC-ROC')\n axes1[1].plot(results['validation_1']['auc'], label='Validation AUC-ROC')\n axes1[1].set_title(\"Initial XGBoost AUC-ROC\")\n axes1[1].set_xlabel(\"Iteration\")\n axes1[1].set_ylabel(\"AUC\")\n axes1[1].legend()\n\n fig1.tight_layout();\n\n fig1.savefig('./figures/initial_xgb_model.png', format='png', dpi=300, transparent=False)\n\n return initial_xgb\n\"\"\"\n#######################################################################\nGrid Search XGBoost\n#######################################################################\n\"\"\"\ndef grid_cv_xgb(X_train, y_train, scale_pos_weight, params, folds = 5):\n xgb = XGBClassifier(objective='binary:logistic', njobs=4, random_state=0, scale_pos_weight=scale_pos_weight)\n\n skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001)\n\n grid = GridSearchCV(estimator=xgb,\n param_grid=params,\n scoring='roc_auc',\n n_jobs=4,\n cv=skf.split(X_train,y_train),\n verbose=3 )\n\n grid.fit(X_train, y_train)\n\n best_params = grid.best_params_\n results = pd.DataFrame(grid.cv_results_)\n results.to_csv('xgb-grid-search-results-01.csv', index=False)\n\n return (best_params, results)\n\"\"\"\n#######################################################################\nFinal XGBoost model\nFitting the final XGBoost with parameters found on grid_cv.\nUse all training data.\nTest on test data.\n#######################################################################\n\"\"\"\ndef final_xgb(X_train, y_train, X_test, y_test, scale_pos_weight, best_params):\n\n xgb = XGBClassifier(**best_params)\n xgb.set_params(njobs=4,\n random_state=0,\n objective='binary:logistic',\n scale_pos_weight=scale_pos_weight)\n\n eval_set = [(X_train, y_train), (X_test, y_test)]\n eval_metric = [\"error\", \"auc\"]\n\n xgb.fit(X_train, y_train,\n eval_metric=eval_metric,\n eval_set=eval_set)\n\n results = xgb.evals_result()\n\n fig1, axes1 = plt.subplots(figsize=(10, 8), nrows=1, ncols=2)\n axes1[0].plot(results['validation_0']['error'], label='Train Error')\n axes1[0].plot(results['validation_1']['error'], label='Validation Error')\n axes1[0].set_title(\"Final XGBoost Error\")\n axes1[0].set_xlabel(\"Iteration\")\n axes1[0].set_ylabel(\"Error\")\n axes1[0].legend()\n\n axes1[1].plot(results['validation_0']['auc'], label='Train AUC-ROC')\n axes1[1].plot(results['validation_1']['auc'], label='Validation AUC-ROC')\n axes1[1].set_title(\"Final XGBoost AUC-ROC\")\n axes1[1].set_xlabel(\"Iteration\")\n axes1[1].set_ylabel(\"AUC\")\n axes1[1].legend()\n\n fig1.tight_layout();\n\n fig1.savefig('./figures/final_xgb_model.png', format='png', dpi=300, transparent=False)\n\n return xgb\n\n\"\"\"\n#######################################################################\nRandom Forest\nBased on experience, we will use a random forest with 100 trees\n(we compared `oob_score_` values for different numbers of trees).\n\nSet `random state = 0` and `oob_score = True` to allow reproducibility\nand to use \"out of bag\" samples to compute accuracy.\n#######################################################################\n\"\"\"\ndef random_forest(X_train, y_train, X_test, y_test, cw, n=100):\n\n rf = RandomForestClassifier(n_estimators=n,\n random_state=0,\n oob_score=True,\n class_weight={0:1, 1:cw})\n\n rf.fit(X_train, y_train)\n\n y_pred = rf.predict(X_test)\n\n false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)\n\n roc_auc = auc(false_positive_rate, true_positive_rate)\n return (rf, roc_auc, rf.oob_score_)\n\n\n\"\"\"\n#######################################################################\nModified Logistic Regression\n#######################################################################\n\"\"\"\ndef logistic_regression(X_train, y_train, X, k=10):\n import Logistic_regression_modified as lr\n\n (e, ave_MSE, all_alphas, ave_alphas) = lr.LR_k_fold_CV(X_train, y_train, k)\n\n normalized_alphas = lr.normalize_alphas(ave_alphas, X)\n alphas = ohe_inverse_LR(normalized_alphas)\n\n return (alphas, e, ave_MSE)\n\"\"\"\n#######################################################################\nFeature Importances\n#######################################################################\n\"\"\"\n# load JS visualization code to notebook\nshap.initjs()\n\ndef get_explainer(xgb, rf, X_train):\n # Creates the explainer based on the model.\n rf_explainer = shap.TreeExplainer(rf, data=X_train)\n rf_shap_values = rf_explainer.shap_values(X_train)\n rf_shap_values = rf_shap_values[0] # For the random forest model, the shap TreeExplainer returns 2 sets of values, one for class 1 and one for class 0. They are symmetric, so you can use either.\n\n # Creates the explainer based on the model.\n xgb_explainer = shap.TreeExplainer(xgb, data=X_train)\n xgb_shap_values = xgb_explainer.shap_values(X_train)\n\n\n\n return (xgb_explainer, rf_explainer, xgb_shap_values, rf_shap_values)\n\n\n\"\"\"\n#######################################################################\nFrom one-hot encoding to original features' names.\n\nSince \"ohe\" creates additional columns to accomodate the attributes'\ncategories, we have to go back to the original attribute's names\nin order to clearly analyse the results, especially where it concerns\nfeature importances.\n\nProcedure\nTo do so, we will sum the shap values of all categories of each attribute,\nfor each sample, as suggested by SHAP's developer\n([see here](https://github.com/slundberg/shap/issues/397))\n#######################################################################\n\"\"\"\n\n\n\"\"\"\n#######################################################################\nInverse OHE function specifically for SHAP values\n#######################################################################\n\"\"\"\ndef ohe_inverse(df_shap_values):\n \"\"\"Converts a dataframe containing shap values in ohe format\n back to original genomic positions\"\"\"\n\n # Auxiliary list to recreate original shap_values dataframe\n list_shap_original = []\n\n # Regular expression to pick attributes names.\n # Since in our case attributes names are the genomic positions (i.e. an integer number), we use the regex below\n import re\n pattern = \"^\\d+\"\n\n # Auxiliary dictionary to create one pd.DataFrame for each sample, summing the shap values for each attribute.\n # Later, these dataframes will be appended together, resulting in the final df.\n dic={}\n\n # for each sample.\n for i, sample in df_shap_values.iterrows():\n # initialize an empty dictionary, that will contain \"attribute : summed shap values\" for\n # all attributes in this sample.\n dic = {}\n # The code below sums the importances for each category in each attribute in this sample.\n for pos in sample.index:\n attr = re.match(pattern, pos).group()\n if attr not in dic.keys():\n dic[attr] = sample[pos]\n else:\n dic[attr] += sample[pos]\n # Create a df containing only the current sample\n df_sample = pd.DataFrame(dic, index=[i])\n # Append it to a list that will become the full dataframe later\n list_shap_original.append(df_sample)\n\n # Create a DataFrame containing the shap values for the \"original\" attributes.\n shap_original = pd.concat(list_shap_original, axis=0)\n return shap_original\n\"\"\"\n#######################################################################\n#######################################################################\n\"\"\"\ndef ohe_inverse_LR(normalized_alphas):\n \"\"\"Converts a dataframe containing shap values in ohe format\n back to original genomic positions\"\"\"\n\n normalized_alphas = np.abs(normalized_alphas)\n\n # Regular expression to pick attributes names.\n # Since in our case attributes names are the genomic positions (i.e. an integer number), we use the regex below\n import re\n pattern = \"^\\d+\"\n\n # Auxiliary dictionary to create one pd.DataFrame for each sample, summing the shap values for each attribute.\n # Later, these dataframes will be appended together, resulting in the final df.\n dic={}\n\n for index, alpha in normalized_alphas.iteritems():\n # print(index)\n attr = re.match(pattern, index).group()\n if attr not in dic.keys():\n dic[attr] = (0.5 * alpha)\n else:\n dic[attr] += (0.5 * alpha)\n\n shap_original = pd.Series(dic)\n\n return shap_original\n\"\"\"\n#######################################################################\n#######################################################################\n\"\"\"\ndef get_sorted_importances(shap_values_df):\n abs = np.abs(shap_values_df)\n abs_sum = np.sum(abs, axis=0)\n abs_sum_sort = abs_sum.sort_values(ascending=False)\n\n return abs_sum_sort\n\"\"\"\n#######################################################################\nPlot importances on genome\n#######################################################################\n\"\"\"\n\ndef plot_importances_genome(xgb_shap_values_df, rf_shap_values_df, alphas):\n\n xgb_shap_values_df = np.abs(xgb_shap_values_df)\n xgb_shap_values_df = np.sum(xgb_shap_values_df, axis=0)\n\n rf_shap_values_df = np.abs(rf_shap_values_df)\n rf_shap_values_df = np.sum(rf_shap_values_df, axis=0)\n\n fig, axes = plt.subplots(figsize=(10, 8), nrows=3, ncols=1, sharex=True)\n\n axes[0].scatter(xgb_shap_values_df.index, xgb_shap_values_df, c='blue')\n axes[0].set_title(\"\")\n axes[0].set_xlabel(\"\")\n axes[0].set_ylabel(\"XGBoost importances\")\n axes[0].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n axes[0].set_yticklabels([])\n\n axes[1].scatter(rf_shap_values_df.index, rf_shap_values_df, c='blue')\n axes[1].set_title(\"\")\n axes[1].set_xlabel(\"\")\n axes[1].set_ylabel(\"Random Forest importances\")\n axes[1].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n axes[1].set_yticklabels([])\n\n axes[2].scatter(alphas.index, alphas, c='blue')\n axes[2].set_title(\"\")\n axes[2].set_xlabel(\"Feature position (nucleotide position)\")\n axes[2].set_ylabel(\"Logistic Regression importances\")\n axes[2].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n axes[2].set_yticklabels([])\n\n\n fig.tight_layout();\n\n fig.savefig('./figures/importances_1.png', format='png', dpi=300, transparent=False)\n\n\"\"\"\n#######################################################################\nSummary Plots\n#######################################################################\n\"\"\"\ndef importance_summary(xgb_shap_values_df, rf_shap_values_df, alphas):\n xgb_summary = get_sorted_importances(xgb_shap_values_df)\n rf_summary = get_sorted_importances(rf_shap_values_df)\n sorted_alphas = alphas.sort_values(ascending=False)\n\n fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(10, 8))\n\n ax[0].barh(xgb_summary.index[0:10], xgb_summary[0:10])\n ax[0].set_yticks(xgb_summary.index[0:10])\n ax[0].set_yticklabels(xgb_summary.index[0:10])\n ax[0].invert_yaxis() # labels read top-to-bottom\n ax[0].set_xlabel('Feature Importance')\n ax[0].set_title('XGBoosting')\n ax[0].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n\n ax[1].barh(rf_summary.index[0:10], rf_summary[0:10])\n ax[1].set_yticks(rf_summary.index[0:10])\n ax[1].set_yticklabels(rf_summary.index[0:10])\n ax[1].invert_yaxis() # labels read top-to-bottom\n ax[1].set_xlabel('Feature Importance')\n ax[1].set_title('Random Forest')\n ax[1].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n\n ax[2].barh(sorted_alphas.index[0:10], sorted_alphas[0:10])\n ax[2].set_yticks(sorted_alphas.index[0:10])\n ax[2].set_yticklabels(sorted_alphas.index[0:10])\n ax[2].invert_yaxis() # labels read top-to-bottom\n ax[2].set_xlabel('Feature Importance')\n ax[2].set_title('Logistic Regression')\n ax[2].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n\n fig.tight_layout();\n\n fig.savefig(\"./figures/fig_summary.png\", format='png', dpi=300, transparent=False)\n\n return (xgb_summary, rf_summary, sorted_alphas)\n\n\"\"\"\n#######################################################################\nMerge all results in one series\n\nGets the top 10 most important nucleotide positions from each one of the 3 ML algorithms.\nNormalizes their importance values (in each array, divides each value by the total sum of that array).\nMerges into one array.\nNormalizes again.\n#######################################################################\n\"\"\"\ndef get_merged_results(xgb_summary, rf_summary, sorted_alphas, top=10):\n xgb_summary_top = xgb_summary[:top]\n rf_summary_top = rf_summary[:top]\n sorted_alphas_top = sorted_alphas[:top]\n\n xgb_sum = xgb_summary_top.sum()\n rf_sum = rf_summary_top.sum()\n sorted_alphas_sum = sorted_alphas_top.sum()\n\n xgb_summary_top = xgb_summary_top/xgb_sum\n rf_summary_top = rf_summary_top/rf_sum\n sorted_alphas_top = sorted_alphas_top/sorted_alphas_sum\n\n results_top = pd.concat([xgb_summary_top, rf_summary_top, sorted_alphas_top], axis=0)\n results_dic = {}\n\n for pos, imp in results_top.iteritems():\n if pos not in results_dic:\n results_dic[pos] = imp\n else:\n results_dic[pos] += imp\n\n results_all = pd.Series(results_dic)\n r_sum = results_all.sum()\n results_all = results_all/r_sum\n results_all = results_all.sort_values(ascending=False)\n return results_all\n\n\"\"\"\n#######################################################################\nCompare SNVs found by machine learning\nCompare class 1 (high Ct) with class 0 (low Ct) and Alouatta samples.\nReturns a table with the comparisons for the most important nucleotide positions found by the 3 ML algorithms\n#######################################################################\n\"\"\"\ndef validate_SNV(seq_df, imp_merged):\n\n # Callithrix samples with high Ct\n cal_1 = seq_df[seq_df[\"Host\"]==\"Callithrix\"]\n cal_1 = cal_1[cal_1[\"Ct_Group\"]==1]\n # Columns corresponding to top feature importances\n cal_1 = cal_1.loc[:, np.array(list(imp_merged.index)).astype(\"int\")]\n\n # Callithrix samples with low Ct\n cal_0 = seq_df[seq_df[\"Host\"]==\"Callithrix\"]\n cal_0 = cal_0[cal_0[\"Ct_Group\"]==0]\n # Columns corresponding to top feature importances\n cal_0 = cal_0.loc[:, np.array(list(imp_merged.index)).astype(\"int\")]\n\n # Alouatta samples\n aloua = seq_df[seq_df[\"Host\"]==\"Alouatta\"]\n # Columns corresponding to top feature importances\n aloua = aloua.loc[:, np.array(list(imp_merged.index)).astype(\"int\")]\n\n # Arrays to keep the hallmark nucleotide values\n # The most frequent nucleotide for class 0 and Alouatta\n # The differing nucleotide for class 1\n seq_c1 = cal_0.iloc[0,:].copy()\n seq_c0 = cal_0.iloc[0,:].copy()\n seq_alo = cal_0.iloc[0,:].copy()\n\n seq_c1[:]='-'\n seq_c0[:]='-'\n seq_alo[:]='-'\n\n seq_c1.name='Callithrix High Ct'\n seq_c0.name='Callithrix Low Ct'\n seq_alo.name='Alouatta'\n\n # For each nucleotide, gets the most frequent in class 0 and in Alouatta.\n # For class 1, gets the nucleotide that is different from the most frequent in class 0.\n for col, value in seq_c1.iteritems():\n nn_count_0 = cal_0.loc[:,col].value_counts()\n index = pd.Index(nn_count_0).get_loc(nn_count_0.max())\n nn_0 = nn_count_0.index[index]\n seq_c0[col] = nn_0\n\n nn_count_alou = aloua.loc[:,col].value_counts()\n index = pd.Index(nn_count_alou).get_loc(nn_count_alou.max())\n nn_alou = nn_count_alou.index[index]\n seq_alo[col] = nn_alou\n\n nn_count_1 = cal_1.loc[:,col].value_counts()\n for nn, count in nn_count_1.iteritems():\n if nn != nn_0:\n seq_c1[col] = nn\n\n # Creates a dataframe comparing the results\n df1 = pd.DataFrame(seq_c0).T\n df2 = pd.DataFrame(seq_c1).T\n df3 = pd.DataFrame(seq_alo).T\n\n table = pd.concat([df1, df2, df3], axis=0)\n\n table = table.iloc[:, :10]\n\n table_latex = table.to_latex()\n\n return (table, table_latex)\n\n\"\"\"\n#######################################################################\nMAIN\n#######################################################################\n\"\"\"\n\n# Data inmport\n# %%\npickle_ohe = '../Callithrix_Analysis/DATA/!CLEAN/YFV_seq_ohe_df.pkl'\npickle_seqdf = '../Callithrix_Analysis/DATA/!CLEAN/YFV_seq_df.pkl'\n\n(seq_df, ohe_df, ohe_df_calli, ohe_df_alou) = get_data(pickle_seqdf, pickle_ohe)\n\n# Prepare data for training and testing\n(X, y, X_train, X_test, y_train, y_test, scale_pos_weight) = get_train_test_split(ohe_df_calli, test_size=0.1)\n\n# Cell containing XGBoost Grid Search\n# %%\n# A parameter grid for XGBoost\n# params = {\n# 'subsample': [1.0],\n# 'colsample_bytree': [0.3, 0.8],\n# 'max_depth': [3, 5],\n# 'learning_rate': [0.001, 1],\n# 'n_estimators': [250, 10000]\n# }\n#\n# (best_params, results) = grid_cv_xgb(X_train, y_train, scale_pos_weight, params, folds = 5)\n#\n# params_series = results.loc[results['mean_test_score'] == np.max(results['mean_test_score']), 'params']\n# for p in params_series:\n# print(p)\n#\n# best_params = params_series.iloc[2]\n\n\n# Train models\n# %%\nbest_params = {'colsample_bytree': 0.3,\n 'learning_rate': 1,\n 'max_depth': 3,\n 'n_estimators': 250,\n 'subsample': 1.0}\n\nxgb = final_xgb(X_train, y_train, X_test, y_test, scale_pos_weight, best_params)\n\nxgb.score(X, y)\n\n(rf, roc_auc, rf.oob_score_) = random_forest(X_train, y_train, X_test, y_test, scale_pos_weight, n=100)\n\n(alphas, e, ave_MSE) = logistic_regression(X_train, y_train, X, k=10)\n\n# Use SHAP to explain models\n# %%\n(xgb_explainer, rf_explainer, xgb_shap_values, rf_shap_values) = get_explainer(xgb, rf, X_train)\n\nxgb_shap_values.shape\nX_train.columns\nrf_shap_values_df = pd.DataFrame(rf_shap_values,\n index=X_train.index,\n columns=X_train.columns)\n\nxgb_shap_values_df = pd.DataFrame(xgb_shap_values,\n index=X_train.index,\n columns=X_train.columns)\n\nrf_shap_values_df = ohe_inverse(rf_shap_values_df)\nxgb_shap_values_df = ohe_inverse(xgb_shap_values_df)\n\n\"\"\"\n#######################################################################\nPlot results\n#######################################################################\n\"\"\"\n# Plot resulting feature importances\n# %%\nplot_importances_genome(xgb_shap_values_df, rf_shap_values_df, alphas)\n\n# Get importances values and genomic locations\n(xgb_summary, rf_summary, sorted_alphas) = importance_summary(xgb_shap_values_df, rf_shap_values_df, alphas)\n\n\"\"\"\n#######################################################################\nAnalyze results\n#######################################################################\n\"\"\"\n\nimp_merged = get_merged_results(xgb_summary, rf_summary, sorted_alphas, 30)\n\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 8))\n\nax.barh(imp_merged.index[0:30], imp_merged[0:30])\nax.set_yticks(imp_merged.index[0:30])\nax.set_yticklabels(imp_merged.index[0:30])\nax.invert_yaxis() # labels read top-to-bottom\nax.set_xlabel('Feature Importance')\nax.set_title('Most important genomic positions found by XGBoost, Random Forest and Modified Logistic Regression together.')\nax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n\nfig.tight_layout();\n\nfig.savefig(\"./figures/combined_30_summary.png\", format='png', dpi=300, transparent=False)\n\n# The analysis below and the results shown in \"table\" demonstrate the power of XGBoost. It only picked 3 features, and there was a total of 5 that really had any informative value. All the rest, that both RF and LR gave some importance (albeit small), have no information at all, given that they do not contain a nucleotide that is different from the most frequent one in the other class and in the Alouatta samples.\n(table, table_latex) = validate_SNV(seq_df, imp_merged)\n\n# Requires usepackage{booktabs} on LaTex document\nwith open('./tables/table1_latex.txt', 'w') as f:\n f.write(table_latex)\n\n\ntable_clean = table.copy()\n\nfor col in table_clean.columns:\n if '-' in table_clean.loc['Callithrix High Ct', col]:\n table_clean.drop(col, axis=1, inplace=True)\n\ntable2_latex = table_clean.to_latex()\nwith open('./tables/table2_latex.txt', 'w') as f:\n f.write(table2_latex)\n\n\nsnv_to_analyze = table_clean.loc['Callithrix High Ct', :]\n\n\n\nimport ref_genome_polyprot_toolbox as g_tool\n\ndataset_file = '../Callithrix_Analysis/DATA/!CLEAN/ALL_YFV.aln'\nref_genome_file = '../Callithrix_Analysis/DATA/!CLEAN/YFV_BeH655417_JF912190.gb'\nref_polyprot_file = '../Callithrix_Analysis/DATA/!CLEAN/YFV_polyprotein_AFH35044.gp'\n\n(ref_genome, ref_polyprot, seq) = g_tool.read_data(ref_genome_file, ref_polyprot_file, dataset_file)\nseq_rel_start = g_tool.find_align_start(seq, ref_genome, 20)\ndic_prot = g_tool.read_polyprotein(ref_polyprot)\n\nreport_dic = {}\nfor nn_pos, nn in snv_to_analyze.iteritems():\n\n dic = {}\n # nn_pos = snv_to_analyze.index[1]\n # nn = snv_to_analyze[nn_pos]\n nn_pos = int(nn_pos)\n\n (aa_pos, aa, codon, codon_pos) = g_tool.pos_aminoacid(nn_pos, seq_rel_start, ref_genome, ref_polyprot)\n\n prot = g_tool.which_protein(aa_pos, dic_prot)\n\n (codon_seq, aa_seq, ref_pos, codon_ref, aa_ref, codon_pos) = g_tool.seq_snv_info(nn_pos, seq, ref_genome, ref_polyprot)\n\n codon_seq = list(codon_ref)\n codon_seq[codon_pos] = nn\n codon_seq = Seq(\"\".join(codon_seq))\n aa_seq = codon_seq.translate()\n\n dic[\"protein\"] = str(prot)\n dic[\"Reference nn pos\"] = str(ref_pos)\n dic[\"Reference codon\"] = str(codon_ref)\n dic[\"Reference aa\"] = str(aa_ref)\n dic[\"Sequence codon\"] = str(codon_seq)\n dic[\"Sequence aa\"] = str(aa_seq)\n dic[\"Codon position (0, 1, 2)\"] = str(codon_pos)\n\n df = pd.DataFrame(dic, index=[nn_pos])\n report_dic[nn_pos] = df\n\ntable3 = pd.concat(list(report_dic.values()))\ntable3_latex = table3.to_latex()\nwith open('./tables/table3_latex.txt', 'w') as f:\n f.write(table3_latex)\n\n# Select only callithrix samples\ndf_calli = seq_df[seq_df['Host'] == 'Callithrix']\ndf_calli = df_calli.sort_values(by='Ct_Group')\n# df_calli.loc[:, [\"Ct_Group\", 2990]]\ndf_calli[df_calli[\"Ct_Group\"]==1][1199]\n","sub_path":"archive/YFV_Ct_Callithrix_main.py","file_name":"YFV_Ct_Callithrix_main.py","file_ext":"py","file_size_in_byte":26430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"44065798","text":"import copy\r\nclass selfDefineMatrix:\r\n def __init__(self,r=0,c=0):\r\n self.dimR=r\r\n self.dimC=c\r\n def printDim(self):\r\n return print(\"the row's : \", self.dimR ,\"the col's : \", self.dimC)\r\n def buildMatrix(self):\r\n matrixOperator= []\r\n for i in range(0, self.dimR):\r\n new = []\r\n for j in range(0, self.dimC):\r\n new.append(input(\">>\"))\r\n matrixOperator.append(new)\r\n return matrixOperator\r\n\r\n#write a row operation r2 = r1*c + r2\r\ndef rowOperation(rMatrix,r1,r2,c):\r\n for index in range(0,len(rMatrix[0])):\r\n rMatrix[r2][index] = rMatrix[r2][index] + c*rMatrix[r1][index]\r\n return rMatrix\r\n\r\n#write a col operation\r\ndef colOperation(cMatrix,c1,c2,c):\r\n for index in range(0,len(cMatrix)):\r\n cMatrix[index][c2] = cMatrix[index][c2] + c*cMatrix[index][c1]\r\n return cMatrix\r\n\r\n# def productMatrix(m1,m2):\r\ndef multiMatrix(matrix1,matrix2):\r\n multimatrix = []\r\n for indexI in range(0, len(matrix1)):\r\n new=[]\r\n for indexJ in range(0, len(matrix2)):\r\n tmp=0\r\n for indexK in range(0, len(matrix1[0])):\r\n tmp = tmp + matrix1[indexI][indexK]*matrix2[indexK][indexJ]\r\n new.append(tmp)\r\n multimatrix.append(new)\r\n return multimatrix\r\n\r\n#define Gaussian Inverse Matrix\r\n# def gaussianInverse(rMatrix):\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n identyMatrix=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]\r\n # print(id(identyMatrix)) #address\r\n\r\n testMatrix = list()\r\n for i in range(len(identyMatrix)):\r\n testMatrix.append(copy.copy(identyMatrix[i]))\r\n # print(id(testMatrix)) #address\r\n\r\n print(rowOperation(rMatrix=testMatrix,r1=0,r2=1,c=1))\r\n print(multiMatrix(testMatrix,testMatrix))\r\n\r\n print(identyMatrix)\r\n","sub_path":"BasicLangLearning/package_tutorial/math_array_operation/OperatingMatrix.py","file_name":"OperatingMatrix.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"483950343","text":"# coding=utf-8\n# Copyright 2019 StrTrek Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# System Required\nimport logging\nimport sched\nimport time\nfrom multiprocessing import Process\n# Outer Required\n# Inner Required\nfrom Babelor.Application import TEMPLE\nfrom Babelor.Presentation import MSG, URL, CASE\nfrom Babelor.Session import MQ\n# Global Parameters\n# logging.basicConfig(level=logging.INFO,\n# format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(levelname)s: %(message)s')\n\n\ndef func_sender(msg: MSG):\n # -—————————————------------------------ INIT ---------\n arguments = {}\n for i in range(0, msg.dt_count, 1):\n argument = msg.read_datum(i)\n if argument[\"path\"] not in arguments.keys():\n arguments[argument[\"path\"]] = argument[\"stream\"]\n # -—————————————------------------------ PROCESS ------\n msg_out = msg\n # -—————————————------------------------ END ----------\n return msg_out\n\n\ndef func_treater(msg: MSG):\n # -—————————————------------------------ INIT ---------\n data = {}\n for i in range(0, msg.dt_count, 1):\n datum = msg.read_datum(i)\n if datum[\"path\"] not in data.keys():\n data[datum[\"path\"]] = datum[\"stream\"]\n # -—————————————------------------------ PROCESS ------\n msg_out = msg\n # -—————————————------------------------ END ----------\n return msg_out\n\n\ndef func_encrypter(msg: MSG):\n # -————————————------------------------ INIT ---------\n data = {}\n for i in range(0, msg.dt_count, 1):\n datum = msg.read_datum(i)\n if datum[\"path\"] not in data.keys():\n data[datum[\"path\"]] = datum[\"stream\"]\n # -————————————------------------------ PROCESS ------\n msg_out = msg\n # -————————————------------------------ END ----------\n return msg_out\n\n\ndef sender(url):\n myself = TEMPLE(url)\n myself.open(role=\"sender\")\n\n\ndef treater(url):\n myself = TEMPLE(url)\n myself.open(role=\"treater\", func=func_treater)\n\n\ndef encrypter(url):\n myself = TEMPLE(url)\n myself.open(role=\"encrypter\", func=func_encrypter)\n\n\ndef receiver(url):\n myself = TEMPLE(url)\n myself.open(role=\"receiver\")\n\n\ndef receiver_init():\n # -————————————------------------------ MESSAGE -----\n case = CASE(\"{0}#{1}\".format(origination_url, destination_url))\n receiver_msg = MSG()\n receiver_msg.case = case\n # -————————————------------------------ RECEIVER ----\n receiver_msg.origination = edge_node_url[\"inner\"]\n receiver_msg.destination = destination_url\n # logging.warning(\"RECEIVER::INIT::{0} send:{1}\".format(receiver_url[\"inner\"], receiver_msg))\n recv_init = MQ(receiver_url[\"outer\"])\n recv_init.push(receiver_msg)\n\n\ndef sender_init():\n # -————————————------------------------ MESSAGE -----\n case = CASE(\"{0}#{1}\".format(origination_url, destination_url))\n sender_msg = MSG()\n sender_msg.case = case\n # -————————————------------------------ SENDER ------\n sender_msg.origination = origination_url\n sender_msg.destination = edge_node_url[\"outer\"]\n sender_msg.add_datum(None, \"20190505.xlsx\")\n sender_msg.add_datum(None, \"20190506.xlsx\")\n sender_msg.add_datum(None, \"20190507.xlsx\")\n send_init = MQ(sender_url[\"outer\"])\n send_init.push(sender_msg)\n\n\ndef main():\n # -————————————------------------------ PROCESS ------\n temple = {\n # \"treater\": Process(target=treater, args=(treater_url[\"inner\"],)),\n # \"encrypter\": Process(target=encrypter, args=(encrypter_url[\"inner\"],)),\n \"receiver\": Process(target=receiver, args=(receiver_url[\"inner\"],)),\n \"receiver_init\": Process(target=receiver_init),\n \"sender\": Process(target=sender, args=(sender_url[\"inner\"],)),\n \"sender_init\": Process(target=sender_init),\n }\n # for obj in temple.items():\n # key, value = obj\n # value.start()\n temple[\"receiver\"].start()\n temple[\"sender\"].start()\n temple[\"receiver_init\"].start()\n temple[\"sender_init\"].start()\n\n\nsender_url = {\n \"inner\": URL(\"tcp://*:20001\"),\n \"outer\": URL(\"tcp://127.0.0.1:20001\"),\n}\ntreater_url = {\n \"inner\": URL(\"tcp://*:20002\"),\n \"outer\": URL(\"tcp://127.0.0.1:20002\"),\n}\nencrypter_url = {\n \"inner\": URL(\"tcp://*:20003\"),\n \"outer\": URL(\"tcp://127.0.0.1:20003\"),\n}\nreceiver_url = {\n \"inner\": URL(\"tcp://*:20004\"),\n \"outer\": URL(\"tcp://127.0.0.1:20004\"),\n}\nedge_node_url = {\n \"inner\": URL(\"tcp://*:20005\"),\n \"outer\": URL(\"tcp://127.0.0.1:20005\"),\n}\norigination_url = URL(\"file:///C:/Users/geyua/PycharmProjects/Babelor/data/dir1/\")\ndestination_url = URL(\"file:///C:/Users/geyua/PycharmProjects/Babelor/data/dir2/\")\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Babelor/Application/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"418629350","text":"import actionlib\nimport rospy\nimport tf\n\nfrom agent.abstract_action import AbstractAction\nfrom agent.util.enuns import LogColor\n\nfrom hera.msg import gotoposeFeedback, gotoposeResult, gotoposeAction, gotoposeGoal\n\nfrom move_base_msgs.msg import MoveBaseGoal\nfrom geometry_msgs.msg import PoseStamped\n\nclass GotoPose(AbstractAction):\n \"\"\"docstring for GotoPose.\"\"\"\n def __init__(self, robot):\n super(GotoPose, self).__init__(robot)\n self.robot_ns = rospy.get_namespace()\n\n self._as = actionlib.SimpleActionServer(\"gotopose\", gotoposeAction, self.goal_callback, False)\n self._as.start()\n\n self.tf_listener = tf.TransformListener()\n\n def goal_callback(self, goal):\n result = self.execute(goal.location, goal.reference)\n self._as.set_succeeded(gotoposeResult(result=result))\n\n def execute(self, location, reference):\n self.robot.add_log('gotopose', '', color=LogColor.YELLOW)\n\n # get target position\n (target_trans, target_rot) = self.tf_listener.lookupTransform('/map', reference, rospy.Time(0))\n # (target_trans, target_rot) = get_tf(location)\n target = PoseStamped()\n target.header.frame_id = 'map'\n target.pose.position.x = target_trans[0] + location.position.x\n target.pose.position.y = target_trans[1] + location.position.y\n # target.pose.position.z = target_trans[2]\n # target.pose.orientation.x = target_rot[0]\n # target.pose.orientation.y = target_rot[1]\n target.pose.orientation.z = target_rot[2]\n target.pose.orientation.w = target_rot[3]\n \n # create goal\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = 'map'\n goal.target_pose.header.stamp = rospy.Time.now()\n goal.target_pose.pose = target.pose\n #\n return self.robot.get_actuators().goto(goal)","sub_path":"action_files/gotopose.py","file_name":"gotopose.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"77002858","text":"import sys\nsys.stdin = open('input_5203.txt', 'r')\n\ndef baby_gin(lst, chk_num):\n counting_list = [0] * 10\n for num in lst:\n counting_list[num] += 1\n for idx in range(10):\n if counting_list[idx] == 3:\n return 1 if not chk_num else 2\n if idx <= 7:\n if counting_list[idx] >= 1 and counting_list[idx + 1] >= 1 and counting_list[idx + 2] >= 1:\n return 1 if not chk_num else 2\n\n\nfor test_case in range(int(input())):\n player_1, player_2 = [], []\n numbers = list(map(int, input().split()))\n for idx in range(len(numbers)):\n if not idx % 2:\n player_1.append(numbers[idx])\n else:\n player_2.append(numbers[idx])\n if idx >= 4:\n if not idx % 2 and len(player_1) >= 3:\n baby_gin_value = baby_gin(sorted(player_1), idx % 2)\n if baby_gin_value is not None:\n print('#{} {}'.format(test_case + 1, baby_gin_value))\n break\n elif idx % 2 and len(player_2) >= 3:\n baby_gin_value = baby_gin(sorted(player_2), idx % 2)\n if baby_gin_value is not None:\n print('#{} {}'.format(test_case + 1, baby_gin_value))\n break\n else:\n print('#{} 0'.format(test_case + 1))","sub_path":"02_algorithm/sw_expert_academy/code_problem/all_problem/5203.py","file_name":"5203.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"311629948","text":"# -*- encoding: utf-8 -*-\n#################################################################################\n# #\n# Copyright (C) 2009 Renato Lima - Akretion, Gabriel C. Stabel #\n# #\n#This program is free software: you can redistribute it and/or modify #\n#it under the terms of the GNU Affero General Public License as published by #\n#the Free Software Foundation, either version 3 of the License, or #\n#(at your option) any later version. #\n# #\n#This program is distributed in the hope that it will be useful, #\n#but WITHOUT ANY WARRANTY; without even the implied warranty of #\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n#GNU Affero General Public License for more details. #\n# #\n#You should have received a copy of the GNU Affero General Public License #\n#along with this program. If not, see . #\n#################################################################################\n\nfrom osv import osv, fields\n\n\nclass res_partner(osv.osv):\n _inherit = 'res.partner'\n\n _columns = {\n 'partner_fiscal_type_id': fields.many2one('l10n_br_account.partner.fiscal.type',\n 'Tipo Fiscal do Parceiro', domain=\"[('tipo_pessoa','=',tipo_pessoa)]\"),\n }\n\nres_partner()\n\n\nclass account_fiscal_position_template(osv.osv):\n _inherit = 'account.fiscal.position.template'\n \n _columns = {\n 'fiscal_operation_id': fields.many2one('l10n_br_account.fiscal.operation', 'Operação Fiscal'),\n }\n \naccount_fiscal_position_template()\n\n\nclass account_fiscal_position(osv.osv):\n _inherit = 'account.fiscal.position'\n\n _columns = {\n 'fiscal_operation_id': fields.many2one('l10n_br_account.fiscal.operation', 'Operação Fiscal'),\n }\n \naccount_fiscal_position()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"l10n_br_account/partner.py","file_name":"partner.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"139953720","text":"from tabla_errores import *\ntabla_errores = TablaDeErrores()\n\n##-------------------------GRAMATICA ASCENDENTE-------------------------------\nreservadas = {\n 'create' : 'CREATE',\n 'databases' : 'DATABASES', \n 'database' : 'DATABASE', \n 'current_user': 'CURRENT_USER',\n 'session_user': 'SESSION_USER', \n 'table' : 'TABLE',\n 'insert': 'INSERT',\n 'inherits' : 'INHERITS',\n 'smallint': 'SMALLINT',\n 'integer': 'INTEGER',\n 'bigint': 'BIGINT',\n 'decimal': 'R_DECIMAL',\n 'numeric': 'NUMERIC',\n 'real': 'REAL',\n 'double': 'DOUBLE',\n 'precision': 'PRECISION',\n 'money': 'MONEY',\n 'character': 'CHARACTER',\n 'varying': 'VARYING',\n 'varchar' : 'VARCHAR',\n 'bytea' : 'BYTEA',\n 'char' : 'CHAR',\n 'text' : 'TEXT',\n 'now' : 'NOW',\n 'date_part' : 'date_part',\n 'current_date' : 'CURRENT_DATE',\n 'current_time' : 'CURRENT_TIME',\n 'extract' : 'EXTRACT',\n 'timestamp' : 'TIMESTAMP',\n 'without' : 'WITHOUT',\n 'time' : 'TIME',\n 'zone' : 'ZONE',\n 'date' : 'DATE',\n 'interval' : 'INTERVAL',\n 'month' : 'MONTH',\n 'day' : 'DAY',\n 'hour' : 'HOUR',\n 'minute' : 'MINUTE',\n 'second' : 'SECOND',\n 'boolean' : 'BOOLEAN',\n 'year' : 'YEAR',\n 'datetime' : 'DATETIME',\n 'drop' : 'DROP',\n 'alter' : 'ALTER',\n 'delete' : 'DELETE',\n 'not' : 'NOT',\n 'null' : 'NULL',\n 'foreign' : 'FOREIGN',\n 'key' : 'KEY',\n 'primary' : 'PRIMARY',\n 'references' : 'REFERENCES',\n 'use' : 'USE',\n 'select' : 'SELECT',\n 'distinct' : 'DISTINCT',\n 'as' : 'AS',\n 'enum' : 'ENUM',\n 'type' : 'TYPE',\n 'from' : 'FROM',\n 'left' : 'LEFT',\n 'join' : 'JOIN',\n 'right' : 'RIGHT',\n 'on' : 'ON',\n 'any' : 'ANY',\n 'count' : 'COUNT',\n 'sum' : 'SUM',\n 'like' : 'LIKE',\n 'avg' : 'AVG',\n 'abs' : 'ABS',\n 'cbrt' : 'CBRT',\n 'ceil' : 'CEIL',\n 'ceiling' : 'CEILING',\n 'degrees' : 'DEGREES',\n 'div' : 'DIV',\n 'exp' : 'REXP',\n 'factorial' : 'FACTORIAL',\n 'floor' : 'FLOOR',\n 'gcd' : 'GCD',\n 'ln' : 'LN',\n 'log' : 'LOG',\n 'mod' : 'MOD',\n 'pi' : 'PI',\n 'power' : 'POWER',\n 'radians' : 'RADIANS',\n 'round' : 'ROUND',\n 'acos' : 'ACOS',\n 'asin' : 'ASIN',\n 'atan' : 'ATAN',\n 'atan2' : 'ATAN2',\n 'cos' : 'COS',\n 'cot' : 'COT',\n 'sin' : 'SIN',\n 'tan' : 'TAN',\n 'acosd' : 'ACOSD',\n 'asind' : 'ASIND',\n 'atand' : 'ATAND',\n 'atan2d' : 'ATAN2D',\n 'cosd' : 'COSD',\n 'cotd' : 'COTD',\n 'sind' : 'SIND',\n 'tand' : 'TAND',\n 'sinh' : 'SINH',\n 'cosh' : 'COSH',\n 'tanh' : 'TANH',\n 'asinh' : 'ASINH',\n 'acosh' : 'ACOSH',\n 'atanh' : 'ATANH',\n 'max' : 'MAX',\n 'min' : 'MIN',\n 'order' : 'ORDER',\n 'where' : 'WHERE',\n 'if' : 'IF',\n 'owner' : 'OWNER',\n 'mode' : 'MODE',\n 'and' : 'AND',\n 'or' : 'OR',\n 'between' : 'BETWEEN',\n 'in' : 'IN',\n 'inner' : 'INNER',\n 'full' : 'FULL',\n 'self' : 'SELF',\n 'case' : 'CASE',\n 'union' : 'UNION',\n 'group' : 'GROUP',\n 'having' : 'HAVING',\n 'exists' : 'EXISTS',\n 'intersect' : 'INTERSECT',\n 'except' : 'EXCEPT',\n 'offset' : 'OFFSET',\n 'limit' : 'LIMIT',\n 'all' : 'ALL',\n 'into' : 'INTO',\n 'some' : 'SOME',\n 'backup' : 'backup',\n 'to' : 'TO',\n 'disk' : 'DISK',\n 'constraint' : 'CONSTRAINT',\n 'rename' : 'RENAME',\n 'add' : 'ADD',\n 'check' : 'CHECK',\n 'default' : 'DEFAULT',\n 'modify' : 'MODIFY',\n 'column' : 'COLUMN',\n 'set' : 'SET',\n 'unique' : 'UNIQUE',\n 'index' : 'INDEX',\n 'auto_increment' : 'AUTO_INCREMENT',\n 'values' : 'VALUES',\n 'identity' : 'IDENTITY',\n 'by' : 'BY',\n 'with' : 'WITH',\n 'replace' : 'REPLACE', \n 'desc' : 'DESC',\n 'outer' : 'OUTER',\n 'is' : 'IS',\n 'top' : 'TOP',\n 'truncate' : 'TRUNCATE',\n 'update' : 'UPDATE',\n 'asc' : 'ASC',\n 'show': 'SHOW',\n 'when' : 'WHEN',\n 'then' : 'THEN',\n 'greatest' : 'GREATEST',\n 'least' : 'LEAST',\n 'end' : 'END',\n 'else' : 'ELSE',\n 'least': 'LEAST',\n 'true' : 'TRUE',\n 'false' : 'FALSE',\n 'unknown' : 'UNKNOWN',\n 'isnull' : 'ISNULL',\n 'notnull' : 'NOTNULL',\n 'length' : 'LENGTH',\n 'substring' : 'SUBSTRING',\n 'trim' : 'TRIM',\n 'md5' : 'MD5',\n 'sha256' : 'SHA256',\n 'substr' : 'SUBSTR',\n 'get_byte' : 'GET_BYTE',\n 'set_byte' : 'SET_BYTE',\n 'convert' : 'CONVERT',\n 'encode' : 'ENCODE',\n 'decode' : 'DECODE',\n 'sign' : 'SIGN',\n 'sqrt' : 'SQRT',\n 'width_bucket' : 'WIDTH_BUCKET',\n 'trunc' : 'TRUNC',\n 'random' : 'RANDOM',\n 'exp' : 'EXP'\n}\n\ntokens = [\n 'PTCOMA',\n 'PARIZQ',\n 'PARDER',\n 'COMA',\n 'PUNTO',\n 'MAS',\n 'MENOS',\n 'POR',\n 'DIVISION',\n 'MODULO',\n 'CONCAT',\n 'PIPE',\n 'IGUAL',\n 'MAYORIGUAL',\n 'MAYOR',\n 'DIFERENTE',\n 'NO_IGUAL',\n 'MENORIGUAL',\n 'MENOR',\n 'ASIGNACION_SUMA',\n 'ASIGNACION_RESTA',\n 'ASIGNACION_MULT',\n 'ASIGNACION_DIVID',\n 'ASIGNACION_MODULO',\n 'DOS_PUNTOS',\n 'DIAG_INVERSA',\n 'DECIMAL',\n 'ENTERO',\n 'CADENA',\n 'ID',\n 'COMILLA_SIMPLE'\n] + list(reservadas.values())\n\n# Tokens\nt_PTCOMA = r';'\nt_PARIZQ = r'\\('\nt_PARDER = r'\\)'\nt_COMA = r'\\,'\nt_PUNTO = r'\\.'\nt_MAS = r'\\+'\nt_MENOS = r'-'\nt_POR = r'\\*'\nt_DIVISION = r'/'\nt_MODULO = r'\\%'\nt_PIPE = r'\\|'\nt_EXP = r'\\^'\nt_IGUAL = r'\\='\nt_MAYOR = r'>'\nt_MENOR = r'<'\nt_MENORIGUAL = r'<='\nt_MAYORIGUAL = r'>='\nt_DIFERENTE = r'<>'\nt_NO_IGUAL = r'!='\nt_ASIGNACION_SUMA = r'\\+='\nt_ASIGNACION_RESTA = r'\\-='\nt_ASIGNACION_MULT = r'\\*='\nt_ASIGNACION_DIVID = r'\\/='\nt_ASIGNACION_MODULO = r'\\%='\nt_DOS_PUNTOS = r'\\:'\nt_DIAG_INVERSA = r'\\\\'\nt_COMILLA_SIMPLE = r'\\''\n\ndef t_DECIMAL(t):\n r'\\d+\\.\\d+'\n try:\n t.value = float(t.value)\n except ValueError:\n print(\"Float value too large %d\", t.value)\n t.value = 0\n return t\n\ndef t_ENTERO(t):\n r'\\d+'\n try:\n t.value = int(t.value)\n except ValueError:\n print(\"Integer value too large %d\", t.value)\n t.value = 0\n return t\n\ndef t_ID(t):\n r'[a-zA-Z_@#][a-zA-Z_0-9@$#]*'\n t.type = reservadas.get(t.value.lower(),'ID') # Check for reserved words\n return t\n\ndef t_CADENA(t):\n r'(\\'.*?\\')|(\\\".*?\\\")'\n t.value = t.value[1:-1] # remuevo las comillas\n return t \n\n# Comentario de múltiples líneas /* .. */\ndef t_COMENTARIO_MULTILINEA(t):\n r'/\\*(.|\\n)*?\\*/'\n t.lexer.lineno += t.value.count('\\n')\n\n# Comentario simple // ...\ndef t_COMENTARIO_SIMPLE(t):\n r'--.*\\n'\n t.lexer.lineno += 1\n\n# Caracteres ignorados\nt_ignore = \" \\t\"\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n \ndef t_error(t):\n error = Error('Léxico', \"Caracter desconocido '%s'\" % t.value[0], t.lexer.lineno)\n tabla_errores.agregar(error)\n print(error.imprimir())\n t.lexer.skip(1)","sub_path":"parser/team22/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":7032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"109441260","text":"a=input()\r\nnum=int(a.split(\" \")[0])\r\nline=int(a.split(\" \")[1])\r\ndata=[]\r\n\r\ndata_raw=input()\r\n\r\nfor i in range(num):\r\n data.append(int(data_raw.split(\" \")[i]))\r\ndata.sort()\r\n\r\nfor i in range(1, num+1):\r\n print(data[i-1], end=\" \")\r\n if i%line==0:\r\n print(\"\")","sub_path":"1400/1425.py","file_name":"1425.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"371196615","text":"import logging\nimport datetime\n\nfrom basehandler import BaseHandler\nfrom Models import Game\nfrom storybooklib import getRecentScoreInfo, markAFKS, changeToDisplayPhase\nfrom django.utils import simplejson as json\n\nclass VoteCompleteVerification(BaseHandler):\n def post(self):\n if not self.user:\n logging.critical(\"Invalid vote completion verification detected!\")\n else:\n game_id = self.request.get('game_id')\n game = Game.get_by_key_name(game_id)\n #game = retrieveCache(game_id, Game)\n self.response.headers['Content-type'] = 'application/json'\n if not game.can_vote and game.display_phase:\n self.response.headers.add_header('completed', \"v\")\n recent_winner_string = \"\\\"\" + game.winning_sentences[len(game.winning_sentences)-1] + \"\\\" By: \" + game.winning_users_names[len(game.winning_users_names) - 1]\n self.response.headers.add_header('recent_winner', recent_winner_string)\n elif datetime.datetime.now() > game.end_vote_time:\n markAFKS(game, game.users_voted)\n changeToDisplayPhase(game, self)\n else:\n self.response.headers.add_header('completed', \"i\")\n response = {}\n response['winning_data'] = getRecentScoreInfo(game)\n self.response.out.write(json.dumps(response))\n return\n","sub_path":"votecompleteverification.py","file_name":"votecompleteverification.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"88741414","text":"import sys\nimport time\nimport MovementFilter\n\nt = time.clock()\nsrc = \"\"\ndest = \"\"\nhelp = \"-i input video\\n\" \\\n \"-o target folder\\n\" \\\n \"-w Webcam mode\\nPress \\'q\\' to quit programm\"\n\nif len(sys.argv) >= 1:\n for i in range(len(sys.argv)):\n if sys.argv[i] == \"-i\":\n i += 1\n assert sys.argv[i] != None\n src = sys.argv[i]\n continue\n elif sys.argv[i] == \"-o\":\n i += 1\n assert sys.argv[i] != None\n dest = sys.argv[i]\n continue\n elif sys.argv[i] == \"-w\":\n i += 1\n assert sys,argv[i] != None\n src = 0\n break\n filter = MovementFilter.MovementFilter(src, dest)\n filter.processVideo()\nelse:\n print(\"No parameters given.\")\n print(help)\n\ntt = time.clock()\nprint(str(tt - t) + \" s\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"192555201","text":"import variables\n\n# Home location\nhome = (variables.dim_x / 2.2, variables.dim_y / 2.2,0)\n\n# Size of home, measured from the home location along the +x, +y, and +z axes\nhome_size = (40, 40, variables.epsilon)\n\n# The exact center of the defined home square\nhome_center = tuple([h_coord + size_coord / 2 for h_coord, size_coord in \n zip(home,home_size)])\n\n# Location where construction is to begin\nconstruction_location = (variables.dim_x / 2, variables.dim_y / 2,0)\n\n# Measured in the same fashion as home_size\nconstruction_size = (40,40,variables.epsilon)\n\n# Exact center of defined construction square\nconstruction_location_center = tuple([h_coord + size_coord / 2 for h_coord, size_coord in \n zip(construction_location,construction_size)])\n\n# Angle Contraint : When wiggling, if no beam is found within this\n# angle from the vertical, than the beam is laid at vertical_angle_set (\n# which is just an angle, so no direction is actually specified)\n# All angles are in degrees.\nbeam = {\n # The lenght of the beam\n 'length' : variables.beam_length,\n\n # The minumum angle from vertical at which a beam is allowed to be constructed when\n # intersecting with another\n 'min_angle_constraint' : 5,\n\n # The maximum angle from vertical at which a beam is allowed to intersect with another\n 'max_angle_constraint' : 60,\n\n # The default vertical direction\n 'vertical_dir_set' : (0,0,1),\n\n # The limit at a joint at which a beam is considered unsuitable for further travel\n 'joint_limit' : variables.joint_limit,\n\n # The limit along a beam which if exceeded indicated the beam should be reapired\n # This is always the limit (if the class does not inherit from SmartRepairer),\n # or the 'vertical_beam_limit' if the used robot class does inherit)\n 'beam_limit' : 0.55,\n\n # This limit is only used by classes which inherit from SmartRepairer.\n # It is the limit at which a horizontal beam would be considered in need of \n # repair.\n 'horizontal_beam_limit' : 8.3,\n\n # Only used for classes which inherit from IntelligentRepairer\n # If the moment changes at a greater rate than moment_change_limit, then the\n # beam is considered to need repair. \n 'moment_change_limit' : 0.2,\n\n # If a beam exceeds this limit at any point, it is considered to have failed.\n # The simulation halts immediately.\n # The structural elements are checked at every timestep\n 'structure_check' : variables.structure_check,\n\n # The angle from vertical at which a support beam is constructed\n 'support_angle' : 60,\n\n # The minimum angle at which a support beam is allowed to be constructed if \n # it intersects with another beam in the structure\n 'support_angle_min' : 0,\n\n # The maximum angle (from vertical)\n 'support_angle_max' : 60,\n\n # This is only used for classes which inherit from LeanRepairer\n # This is the angle from the vertical at which a beam is initially constructed\n 'construction_angle' : 30,\n\n # This is the angle between the beam we want to repair and the beam we are\n # currently on.\n # If the actual angle is greater, then we add a support beam\n # If it is less, then we repair directly\n 'direct_repair_limit' : 90,\n\n # This is how far a support beam construction from our current beam must\n # occur in order for the beam to be considered acceptable\n 'support_angle_difference' : 10,\n\n # If a beam is within this angle from vertical, it is considered vertical for \n # the purpose of determining the direction we wish to travel in\n 'verticality_angle' : 5,\n\n # If a direction is within this angle of the direction of the beam we want to\n # repair, then we consider it a preferred location.\n 'direction_tolerance_angle': 30,\n\n # This is the maximum angle change that can occur due to the moment vector\n 'moment_angle_max' : 45,\n\n # If there is a joint within this distance of the tip, then the beam is \n # considered to be support and no longer requires repair\n 'joint_distance' : 48, # inches\n\n # If there is a joint within this distance of where we want to build, then the\n # robot goes ahead and uses this nearby joint.\n 'joint_error' : 2 # inches\n}\n","sub_path":"construction.py","file_name":"construction.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636451942","text":"import os\nfrom SCons.Script import *\n\ndef print_size(env, source, alias='size'):\n\taction = Action(\"$SIZE %s\" % source[0].path, cmdstr=\"Used section sizes:\")\n\treturn env.AlwaysBuild(env.Alias(alias, source, action))\n\n\ndef setup_gnu_tools(env, prefix):\n\tgnu_tools = ['gcc', 'g++', 'gnulink', 'ar', 'gas']\n\tfor tool in gnu_tools:\n\t\tenv.Tool(tool)\n\tenv['CC'] = prefix+'gcc'\n\tenv['CXX'] = prefix+'g++'\n\tenv['AR'] = prefix+'ar'\n\tenv['AS'] = prefix+'as'\n\tenv['OBJCOPY'] = prefix+'objcopy'\n\tenv['OBJDUMP'] = prefix+'objdump'\n\tenv['NM'] = prefix+\"nm\"\n\tenv['RANLIB'] = prefix+\"ranlib\"\n\tenv['SIZE'] = prefix+\"size\"\n\tenv['PROGSUFFIX'] = '.elf'\n\t\n\tdevice = env['DEVICE']\n\tenv.Append(CPPDEFINES = {})\n\tif 'defines' in device:\n\t\tenv.Append(CPPDEFINES = device['defines'])\n\tif 'clock' in device:\n\t\tenv.Append(CPPDEFINES = {'F_CPU' : device['clock'] })\n\t\n\tenv['CFLAGS'] = [\"-std=gnu99\", \"-Wredundant-decls\",\"-Wnested-externs\"]\n\t\n\tenv['CCFLAGS'] = [\n\t\t\"-mcpu=\" + device['cpu'],\n\t\t\"-gdwarf-2\",\n\t\t\"-funsigned-char\",\n\t\t\"-funsigned-bitfields\",\n\t\t\"-fshort-enums\",\n\t\t\"-fno-split-wide-types\",\n\t\t\"-ffunction-sections\",\n\t\t\"-fdata-sections\",\n\t\t\"-Wall\",\n\t\t\"-Wformat\",\n\t\t\"-Wextra\",\n\t\t\"-Wpointer-arith\",\n\t\t\"-Wunused\",\n\t\t#\"-pedantic\"\n\t]\n\tif 'archOptions' in device:\n\t\tfor option in device['archOptions']:\n\t\t\tenv.Append(CCFLAGS = \"-%s\" % option)\n\t\n\tenv['CXXFLAGS'] = [\n\t\t\"-fno-exceptions\",\n\t\t\"-nostdlib\",\n\t\t\"-fno-threadsafe-statics\",\n\t\t\"-fno-rtti\",\n\t\t\"-fuse-cxa-atexit\",\n\t\t\"-Woverloaded-virtual\",\n\t\t\"-std=c++03\"\n\t]\n\t\n\tenv['ASFLAGS'] = [\n\t\t\"-mcpu=\" + device['cpu'],\n\t\t\"-gdwarf-2\",\n\t\t\"-xassembler-with-cpp\",\n\t]\n\t\n\tlinkerscript = \"\"\n\tif 'linkerscript' in device:\n\t\tlinkerscript = os.path.join(env.Dir('.').srcnode().abspath, device['linkerscript'])\n\t\tlinkerscript = '\"-T%s\"' % linkerscript\n\t\n\tenv['LINKFLAGS'] = [\n\t\t\"-mcpu=\" + device['cpu'],\n\t\t\"-Wl,--gc-sections\",\n\t\t\"-nostartfiles\",\n\t\tlinkerscript\n\t]\n\t\n\thexBuilder = Builder(\n\t\taction = '$OBJCOPY -O ihex --only-section .text --only-section .rodata --only-section .ctors --only-section .dtors --only-section .data $SOURCE $TARGET', \n\t\tsrc_suffix = \".elf\",\n\t\tsuffix = \".hex\")\n\t\n\tdisasmBuilder = Builder(\n\t\taction = '$OBJDUMP -h -S $SOURCE > $TARGET', \n\t\tsrc_suffix = \".elf\",\n\t\tsuffix = \".lss\")\n\t\n\tenv.Append(BUILDERS = {\n\t\t'Hex': hexBuilder,\n\t\t'Disassembly': disasmBuilder})\n\t\n\tenv.AddMethod(print_size, 'Size')\n\t\n\tenv['CCCOMSTR'] = \"ARM Compiling C: $TARGET\"\n\tenv['CXXCOMSTR'] = \"ARM Compiling C++: $TARGET\"\n\tenv['ASCOMSTR'] = \"ARM Assembling: $TARGET\"\n\tenv['ASPPCOMSTR'] = \"ARM Assembling: $TARGET\"\n\tenv['LINKCOMSTR'] = \"ARM Linking: $TARGET\"\n\t\ndef generate(env, **kw):\n\tsetup_gnu_tools(env, 'arm-none-eabi-')\n\ndef exists(env):\n\treturn env.Detect('arm-none-eabi-gcc')","sub_path":"scons/arm.py","file_name":"arm.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"521291039","text":"from tkinter import *\nfrom typing import TYPE_CHECKING\nimport numpy as np\nimport random\nimport math\nfrom queue import Empty, PriorityQueue\nimport heapq\nfrom collections import deque\n\n\n# 0: blocked\n#1: avaliable block\n# -1: start\n# -2: end\n# -3: maze fire\n# 5: move back\n# 2 double move back\n# 6: on fire\n# 7: path\n\ndef main():\n \n # Declaring global variables \n global SIZE # Total size of the maze (Length of the array used to represent the maze)\n global DIM # Dimension of the maze to make a n x n maze.\n global GRID # Array object used to represent the maze.\n global PROB # Probability of a space in the maze being an obstacle\n global start # Starting point the agent travels from\n global end # End goal that the agent must reach\n global q # flammability rate\n\n # Initializing variables with user input through command line\n DIM = int(input('Enter the size of the array: \\n'))\n PROB = float(input('Enter the probability of an element between 1 or 0 (Not including 0): \\n'))\n\n # Ensures that the probaility must be at least a 0 and less than 1\n while (PROB <0 or PROB >=1):\n print ('PROB must be between 1 and 0 (Not including 0 or 1)')\n PROB = float(input('Enter the probability of an element between 1 or 0 (Not including 0 or 1): \\n'))\n\n # Initializing the flammability rate \n q = 0\n # Initializing the size based on user input for dimensions.\n SIZE = DIM**2\n\n # Asks users if they want random start and end points and generates start and end points\n startEnd = input('Random start and end locations? (Y or N): \\n').casefold().strip()\n # Checks for unacceptable answers\n while (startEnd != 'y' and startEnd != 'n'):\n print('Please enter Y or N')\n startEnd = input('Random start and end locations? (Y or N): \\n')\n if (startEnd == 'y'):\n start = random.randint(0,SIZE-1)\n end = random.randint(0,SIZE-1)\n while (end == start):\n end = random.randint(0,SIZE-1) # check so that start and end is not the same \n else:\n start = 0\n end = SIZE - 1\n\n # Print statement to let the user know what the start and end points are.\n print(\"Going from {} -> {}\\n\".format(start,end) ) \n\n # Starts a while loop to get user input on what type of search they would like to execute\n exit = False\n while not exit:\n searchOrStrat = (input('Would you like to see a search type or strategy? (enter 1 for search type and 2 for strategy) \\n')).strip()\n if searchOrStrat == '1':\n\n #makes the Grid and also make sure there is a solution\n makeGrid()\n solution = AStar(start,end)\n while (not solution):\n makeGrid()\n solution = AStar(start,end)\n searchType = input('Enter which search type you would like to use (enter 1 = BFS, 2 = DFS, 3 = Astar): \\n').strip()\n # Executes the right method based on the user input and exits the while loop\n if searchType == \"1\":\n path = BFS(start,end)\n paintGRID(path)\n exit = True\n \n elif searchType == \"2\":\n path = DFS(start,end)\n paintGRID(path)\n exit = True\n\n elif searchType == \"3\":\n path = AStar(start,end)\n paintGRID(path)\n exit = True\n # Case for invalid user input\n else: print(\"Please enter 1, 2, or 3.\\n\")\n\n elif searchOrStrat == '2':\n # Asks for a flammability rate\n q = float(input('Enter the flammability rate: \\n'))\n # Ensures the rate is at least a 0 and at most a 1\n while (q <0 or q >1):\n print ('Flammability rate must be between 1 and 0')\n q = float(input('Enter the flammability rate: \\n'))\n\n strat = input(\"Which strategy would you like to execute? (Enter 1, 2, or 3)\\n\").strip()\n\n # creates the maze and makes sure that there is a path from fire to maze and there is a solution\n checkGrid()\n\n if strat == \"1\":\n path = strategy1(start,end)\n exit = True\n\n elif strat == \"2\":\n path = strategy2(start,end)\n exit = True\n\n elif strat == \"3\":\n path = strategy3(start,end)\n exit = True\n else:\n print(\"Please enter 1, 2, or 3.\\n\")\n \n else: print(\"Please enter 1, 2, or 3.\\n\")\n\n print()\n\n######################[problems]######################\n\n# Method that determines if a path between two points is possible by using DFS \ndef problem2():\n global DIM\n global PROB\n i = 0\n k = 0\n iterations = 100\n result = [0] * 20\n while i < 1:\n PROB = float(str(round(i, 2)))\n print(PROB)\n result[k] = 0\n for j in range(iterations):\n makeGrid()\n if (DFS(start, end)):\n result[k] = result[k] + 1\n k = k + 1\n i = i + 0.05\n print(\"Dimension size is\", DIM, \"and there were\", iterations, \"iterations done\")\n print(result)\n\n# Method that executes all three strategies on the same maze and compares the results\ndef problem6():\n global GRID\n global DIM\n global PROB\n global q\n i = 0\n k = 0\n PROB = 0.3\n iterations = 30\n result1 = [0] * 20\n result2 = [0] * 20\n result3 = [0] * 20\n while i <= 1:\n q = float(str(round(i, 2)))\n print(q)\n result1[k] = 0\n result2[k] = 0\n result3[k] = 0\n for j in range(iterations):\n checkGrid()\n copyMaze = np.copy(GRID)\n\n if (strategy1(start, end)):\n result1[k] = result1[k] + 1\n GRID = np.copy(copyMaze)\n if (strategy2(start, end)):\n result2[k] = result2[k] + 1\n GRID = np.copy(copyMaze)\n if (strategy3(start, end)):\n result3[k] = result3[k] + 1\n GRID = np.copy(copyMaze)\n k = k + 1\n i = i + 0.05\n print(\"Dimension size is\", DIM, \"and there were\", iterations, \"iterations done\")\n print(\"Strategy 1:\", result1)\n print(\"Strategy 2:\", result2)\n print(\"Strategy 3:\", result3)\n\n######################[functions]######################\n# Method to determine the origin of the fire\ndef startFire():\n global GRID\n #Determines a random point to start the fire\n fireSeed = random.randint(0,SIZE-1) \n\n # Checks for fire starting at start or end point\n while (GRID[fireSeed] == 0 or fireSeed == SIZE-1):\n fireSeed = random.randint(0,SIZE-1)\n # Changes array element to represent fire\n GRID[fireSeed] = -3\n\n return fireSeed\n\n# Method that displays the colored maze\ndef paintGRID(path):\n global GRID\n\n showTempMaze()\n \n length = len(path)\n # fills in the colors for the maze \n for i in range(length):\n if (i > 1) and (path[i] not in getNeighbors(path[i-1])) and (not i == (length-1) ) :\n backtrack = [path[i-1]] + AStar(path[i-1],path[i])\n for j in backtrack:\n if j == path[i-1]:\n temp = GRID[j]\n GRID[j] = 6\n showTempMaze()\n GRID[j] = temp\n\n GRID[j]= pickColor(j)\n showTempMaze()\n\n else:\n GRID[path[i]] = pickColor(path[i])\n showTempMaze()\n\n GRID[path[i]] = -2\n showMaze()\n return\n\n# Method to execute strategy 3\ndef strategy3(start,end):\n global GRID\n # Checks if there is a path\n path = AStarMod(start,end) \n # If there is no path, prints an error message\n if not path:\n print(\"strategy 3: No Solution\")\n showMaze()\n return False\n\n current = start\n\n while not path[0] == end:\n path = AStarMod(current,end)\n\n if (not path):\n print(\"strategy 3: SORRY NO SAFE PATH!\")\n showMaze()\n return False\n\n GRID[path[0]] = pickColor(path[0])\n current = path[0]\n showTempMaze()\n\n #Case for the agent reaching the goal state\n if current == end:\n print(\"strategy 3: MADE IT!\")\n showTempMaze()\n GRID[end] = -2\n showMaze()\n return True\n\n advFireOneStep()\n showTempMaze()\n\n # Case for the agent catching on fire\n if (GRID[current] == -3):\n print(\"strategy 3: YOU'RE ON FIRE!\")\n GRID[current] = 6\n showMaze()\n return False\n # Safeguard incase code exits while loop without returning a value\n print(\"strategy 3: MADE IT!\")\n showTempMaze()\n GRID[end] = -2\n showMaze()\n return True\n\n# Creates a new heuristic that takes into account the fire and it's neighbors\ndef heu2(current,end):\n\n global SIZE\n global DIM\n\n #euclidean distance\n dis = diagDis(current,end)\n\n # distance from fire \n fire = diagDis(current,findFire(current))\n\n if fire <= 2:\n dis = dis + ((DIM*2)-fire) + len(getFireNeighbors(current,GRID))\n\n return dis\n\n# Method that INSERT DEF\ndef findFire(strt):\n\n global GRID\n\n\n # creates the stack that you have already visited\n closedSet = []\n\n # creates the fringe stack and adds start to fringe\n fringe = deque([(strt,[])])\n\n while fringe: # checks if the fringe is empty\n \n # current is the current node\n # path keeps track of the path taken to reach current from start\n # everything at the top of the queue will always be the shortest path\n current, path = fringe.popleft()\n closedSet.append(current)\n\n # found the path\n if (GRID[current] == -3):\n return current\n\n # calculate the valid neighbors\n vldneigh = getNeighbors(current) + getFireNeighbors(current,GRID)\n\n for child in vldneigh:\n if child not in closedSet:\n # new path is path + current for this child\n fringe.append((child,path + [current]))\n closedSet.append(child)\n\n return None\n\n# Method that uses Astar search but also uses the diagonal distance to end goal and distance to fire as a weight in its decision\ndef AStarMod(start,end):\n \n dist = {}\n processed = {}\n prev = {}\n #Initializes the lists used\n for v in range(SIZE):\n dist[v] = math.inf\n processed[v] = False\n prev[v] = NONE\n\n dist[start] =0\n fringe = []\n heapq.heappush(fringe,(dist[start],start,[]))\n prev[start] = start\n \n while fringe: # checks if the fringe is empty\n (d,v,path) = heapq.heappop(fringe)\n # found the path\n if (v == end):\n return path\n # goes through the nodes that haven't been processed yet.\n if not processed[v]:\n vldneigh = getNeighbors(v)\n for u in vldneigh:\n if ((d + heu2(u,end)) < dist[u]):\n dist[u] = d + heu2(u,end)\n heapq.heappush(fringe,(dist[u],u,path + [u]))\n prev[u] = v\n processed[v] = True\n return None\n\n# Executes the AStar search algo to find fire\ndef AStarFire(start,end):\n \n dist = {}\n processed = {}\n prev = {}\n\n for v in range(SIZE):\n dist[v] = math.inf\n processed[v] = False\n prev[v] = NONE\n\n dist[start] =0\n fringe = []\n heapq.heappush(fringe,(dist[start],start,[]))\n prev[start] = start\n \n while fringe: # checks if the fringe is empty\n \n (d,v,path) = heapq.heappop(fringe)\n #print(fringe)\n\n # found the path\n if (v == end):\n return path\n if not processed[v]:\n vldneigh = getNeighbors(v) + getFireNeighbors(v,GRID)\n for u in vldneigh:\n if ((d + diagDis(u,end)) < dist[u]):\n dist[u] = d + diagDis(u,end)\n #print('going to {} from {} is {}'.format(v,u,dist[u]))\n heapq.heappush(fringe,(dist[u],u,path + [u]))\n prev[u] = v\n processed[v] = True\n return None\n\n# Keeps track of current fire and creates a new path but never expects the future\ndef strategy2(start,end):\n\n global GRID\n\n path = AStar(start,end)\n\n if not path:\n print(\"strategy 2: No Solution\")\n showMaze()\n return False\n\n current = start\n\n while not path[0] == end:\n \n path = AStar(current,end)\n\n\n if (not path):\n print(\"strategy 2: SORRY NO SAFE PATH!\")\n showMaze()\n return False\n\n\n # guy moves\n GRID[path[0]] = pickColor(path[0])\n\n showTempMaze()\n\n current = path[0]\n\n if current == end:\n print(\"strategy 2: MADE IT!\")\n showTempMaze()\n GRID[end] = -2\n showMaze()\n return True\n\n advFireOneStep()\n\n showTempMaze()\n\n if (GRID[current] == -3):\n print(\"strategy 2: YOU'RE ON FIRE!\")\n GRID[current] = 6\n showMaze()\n return False\n\n print(\"strategy 2: MADE IT!\")\n showTempMaze()\n GRID[end] = -2\n showMaze()\n return True\n\n# Executes strategy1\ndef strategy1(start,end):\n\n global GRID\n path = AStar(start,end)\n\n if not path:\n print(\"strategy 1: No solution\")\n showMaze()\n return False\n\n pathLength = len(path)\n #print(path)\n\n for current in range(pathLength):\n\n if (GRID[path[current]] == -3):\n print(\"strategy 1: YOU'RE ON FIRE!\")\n GRID[path[current]] = 6\n showMaze()\n return False\n\n if path[current] == end:\n print(\"strategy 1: Made it!\")\n GRID[end] = 7\n showTempMaze()\n GRID[end] = -2\n showMaze()\n return True\n\n #moves the guy forward\n GRID[path[current]] = pickColor(path[current])\n showTempMaze()\n\n # moves fire forward\n advFireOneStep()\n showTempMaze()\n\n # kills guy if he is currently on fire (== -3) or if his next move is on fire\n if (GRID[path[current]] == -3):\n print(\"strategy 1: YOU'RE ON FIRE!\")\n GRID[path[current]] = 6\n showMaze()\n return False\n\n print(\"strategy 1: Made it!\")\n GRID[end] = 7\n showTempMaze()\n GRID[end] = -2\n showMaze()\n return True\n\n# Advances the fire by one step\ndef advFireOneStep():\n\n global q\n global GRID\n\n mazeCopy = np.copy(GRID)\n\n for current in range(SIZE):\n\n if not ((mazeCopy[current] == -3) or (mazeCopy[current] ==0)):\n neighbors = getFireNeighbors(current, mazeCopy)\n k = len(neighbors)\n prob = 1-((1-q)**k)\n if random.random() <= prob :\n #print(current)\n GRID[current] = -3\n \n\n return \n\n# Executes the AStar search algo\ndef AStar(start,end):\n global GRID\n dist = {}\n processed = {}\n prev = {}\n\n for v in range(SIZE):\n dist[v] = math.inf\n processed[v] = False\n prev[v] = NONE\n\n dist[start] =0\n fringe = []\n heapq.heappush(fringe,(dist[start],start,[]))\n prev[start] = start\n \n while fringe: # checks if the fringe is empty\n \n (d,v,path) = heapq.heappop(fringe)\n\n # found the path\n if (v == end):\n return path\n\n if not processed[v]:\n vldneigh = getNeighbors(v)\n for u in vldneigh:\n if ((d + diagDis(u,end)) < dist[u]):\n dist[u] = d + diagDis(u,end)\n heapq.heappush(fringe,(dist[u],u,path + [u]))\n prev[u] = v\n processed[v] = True\n return None\n\n# Eucledian heuristic for AStar\ndef diagDis(current,end):\n\n global DIM\n\n # heuristic is based on eucledian heuristic\n x = current % DIM\n y = current // DIM\n\n #print(x,y)\n\n\n # goal location\n endX = (end % DIM)\n endY = end // DIM\n\n #print(endX,endY)\n\n # distance formula on (x,y) -> (endX, endY)\n dis = math.sqrt(((x-endX)**2) + (y-endY)**2)\n\n return dis\n\n# Executes the BFS algorithm\ndef BFS(start,end):\n global GRID\n\n # checks if the start and the goal is not empty \n if (GRID[start] == 0 or GRID[end] == 0):\n print(\"Invalid start or end\")\n return\n\n # creats the stack that you have already visited\n closedSet = []\n\n # creates the fringe stack and adds start to fringe\n fringe = deque([(start,[])])\n\n while fringe: # checks if the fringe is empty\n \n # current is the current node\n # path keeps track of the path taken to reach current from start\n #everything at the top of the queue will always be the shortest path\n current, path = fringe.popleft()\n closedSet.append(current)\n\n # found the path\n if (current == end):\n #print (\"Success!\")\n path.remove(start)\n return path + [current]\n\n # calculate the valid neighbors\n vldneigh = getNeighbors(current)\n #print('Valid Neighbors for {} is {}'.format(current,vldneigh))\n\n for child in vldneigh:\n if child not in closedSet:\n # new path is path + current for this child\n fringe.append((child,path + [current]))\n closedSet.append(child)\n\n #print(\"No Solution \")\n return None\n\n# Executes the DFS algorithms\ndef DFS(start, end):\n global GRID\n # checks if the start and the goal is not empty \n if (GRID[start] == 0 or GRID[end] == 0):\n print(\"Invalid start or end\")\n return\n\n # creates the fringe stack\n fringe = deque() \n # adds the start node to the fringe\n fringe.append(start)\n\n # creats the stack that you have already visited\n closedSet = deque() #\n\n # path to the solution\n path = []\n\n while fringe: # checks if the fringe is empty\n\n # pops the top off the stack\n current = fringe.pop()\n path.append(current)\n #print(\"current {}\".format(current))\n\n # found the path\n if (current == end):\n #print (\"Success!\")\n path.remove(start)\n return path\n # calculate the valid neighbors\n vldneigh = getNeighbors(current)\n #print('Valid Neighbors for {} is {}'.format(current,vldneigh))\n\n for child in vldneigh:\n # checks in child is already in the closed set\n if child not in closedSet:\n fringe.append(child)\n #path.append(current)\n closedSet.append(current)\n #print(\"No Solution \")\n return None\n\n# Method to determine the color of the GUI based on the elements value\ndef pickColor(current):\n\n global GRID\n\n if GRID[current] == 1:\n color = 7\n elif GRID[current] == 7:\n color = 5\n elif GRID[current] ==5:\n color = 2\n elif GRID[current] == 2:\n color = 3\n else: \n color =7\n\n return color\n\n# Makes the grid using the probability and the size\ndef makeGrid():\n global SIZE\n global PROB\n global GRID\n global start\n global end\n\n GRID = np.ones(DIM**2)\n\n GRID[start] = -1\n GRID[end] = -2\n\n for i in range(SIZE):\n if ( not i == start ) and (not i == end):\n if (random.random() <= PROB):\n GRID[i] =0\n\n# Method that checks the maze and determines that there is a soloutio\ndef checkGrid():\n\n makeGrid()\n fireSeed = startFire()\n FirePath = AStarFire(start,fireSeed)\n solution = AStar(start,end)\n #print(solution)\n #print(FirePath)\n while (not FirePath) or (not solution):\n makeGrid()\n fireSeed = startFire()\n FirePath = AStarFire(start,fireSeed)\n solution = AStar(start,end)\n\n\n return\n\n# Makes the visual canvas for the grid\nclass showMaze():\n def __init__(self):\n global DIM\n # makes the window for the maze\n window = Tk()\n window.title(\"Fire Maze\")\n\n # makes the grid all white\n for i in range(SIZE):\n if(GRID[i] == 1):\n color = \"white\" # not empty\n elif(GRID[i] == 0):\n color = \"black\" # empty\n elif(GRID[i] == -1):\n color = \"blue\" # start is blue\n elif(GRID[i] == -2):\n color = \"purple\" # end is purple\n elif(GRID[i] == -3):\n color = \"orangered\" # on fire\n elif(GRID[i] == 5):\n color = \"yellow\" # backtrack\n elif(GRID[i] ==6):\n color = \"firebrick\" #you're on fire\n elif(GRID[i] == 2):\n color = \"hotpink\" # double backtrack\n elif(GRID[i] == 3):\n color = \"grey\" # triple backtrack\n else:\n color = \"green\" # path taken\n Canvas(window, width=30, height = 30, bg = color).grid(row = i // DIM, column = i % DIM)\n\n width = 30*DIM*1.20\n height = 30*DIM*1.20\n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n\n # calculate position x and y coordinates\n x = (screen_width/2) - (width/2)\n y = (screen_height/2) - (height/2)\n window.geometry('%dx%d+%d+%d' % (width, height, x, y))\n window.mainloop()\n\n# Psuedo animating the maze move\nclass showTempMaze():\n\n def __init__(self):\n global DIM\n # makes the window for the maze\n window = Tk()\n window.title(\"Fire Maze\")\n\n # makes the grid all white\n for i in range(SIZE):\n if(GRID[i] == 1):\n color = \"white\" # not empty\n elif(GRID[i] == 0):\n color = \"black\" # empty\n elif(GRID[i] == -1):\n color = \"blue\" # start is blue\n elif(GRID[i] == -2):\n color = \"purple\" # end is purple\n elif(GRID[i] == -3):\n color = \"orangered\" # on fire\n elif(GRID[i] == 5):\n color = \"yellow\" #backtrack\n elif(GRID[i] ==6):\n color = \"firebrick\" #you're on fire\n elif(GRID[i] == 2):\n color = \"hotpink\" # double backtrack\n elif(GRID[i] == 3):\n color = \"grey\" # triple backtrack\n else:\n color = \"green\" # path taken\n Canvas(window, width=30, height = 30, bg = color).grid(row = i // DIM, column = i % DIM)\n\n width = 30*DIM*1.20\n height = 30*DIM*1.20\n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n\n # calculate position x and y coordinates\n x = (screen_width/2) - (width/2)\n y = (screen_height/2) - (height/2)\n window.geometry('%dx%d+%d+%d' % (width, height, x, y))\n time = DIM*50\n window.after(time,window.destroy)\n window.mainloop()\n\n# Gets the left, right, up, and down neighbors in that order\ndef getNeighbors(current):\n\n global SIZE\n global GRID\n global DIM\n\n left = current -1\n right = current+1\n up = current - DIM\n down = current + DIM\n\n\n tempNeighbors = [ left, right, up, down ] # all possible neighbors\n neighbors = [] # valid neighbors\n\n #checks if the current is on the left edges and gets rid of left neighbor\n if (current % DIM == 0):\n tempNeighbors.remove(left)\n #checks if the current is on the right edges and gets rid of right neighbor\n elif (current % DIM == (DIM -1)):\n tempNeighbors.remove(right)\n #checks if the current is on the down edge and gets rid of down neighbor\n if (current // DIM == (DIM -1)):\n tempNeighbors.remove(down)\n #checks if the current is on the top edge and gets rid of top neighbor\n elif(current //DIM == 0):\n tempNeighbors.remove(up)\n # gets rid of all the nieghbors that are empty\n\n for i in tempNeighbors:\n if not ((GRID[i] == 0) or (GRID[i] == -3)):\n neighbors.append(i)\n\n return neighbors\n\n# Gets the Neighbors that are on fire\ndef getFireNeighbors(current, mazeCopy):\n \n global SIZE\n global GRID\n global DIM\n\n left = current -1\n right = current+1\n up = current - DIM\n down = current + DIM\n\n\n tempNeighbors = [ up, down, right, left] # all possible neighbors\n FireNeighbors = [] # valid neighbors\n\n #checks if the current is on the left edges and gets rid of left neighbor\n if (current % DIM == 0):\n tempNeighbors.remove(left)\n #checks if the current is on the right edges and gets rid of right neighbor\n elif (current % DIM == (DIM -1)):\n tempNeighbors.remove(right)\n #checks if the current is on the down edge and gets rid of down neighbor\n if (current // DIM == (DIM -1)):\n tempNeighbors.remove(down)\n #checks if the current is on the top edge and gets rid of top neighbor\n elif(current //DIM == 0):\n tempNeighbors.remove(up)\n # gets rid of all the nieghbors that are empty\n for i in tempNeighbors:\n if (mazeCopy[i] ==-3):\n FireNeighbors.append(i)\n\n return FireNeighbors\n\n#___________________________________________________________________\n# RUN THE MAIN: DO NOT DELETE!\nif __name__ == '__main__': main()","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":24550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"248384004","text":"#! /usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n#==============================================================================#\r\n# HISTORY #\r\n# ------- #\r\n# Developed by D. Hennessy, January 2017. #\r\n# Last modified: 31 March 2017 by D. Hennessy. #\r\n#==============================================================================#\r\n\"\"\"USAGE\r\nModule imported and used as the \"1b_Apply Chlorophyll_a Values\" script\r\ntool in the \"GEM2_Oil_Seep_Detection_Analysis\" Python Toolbox.\r\n\r\nSUMMARY\r\nApplies chlorophyll_a values from the MODIS chlorophyll_a data, downloaded with\r\nthe \"1a_Download Chlorophyll_a NetCDF Files\" script tool, to the dark targets\r\nfeature classes in the Yearly Data geodatabases. The value is a measure of the\r\nconcentration of chlorophyll_a in mg/m-3.\r\n\r\nINPUT\r\n- Folder Location of Dark Targets GDBs (user input): Folder containing the geodatabases\r\nproduced by \"1_Condition Yearly Dark Targets Data\". The script will iterate through\r\nthe geodatabases and apply the chlorophyll_a values for that acquisition day to\r\nthe dark targets.\r\n\r\n- Neighbourhood Cell Size (default user input): Integer parameter indicating the\r\nsize of the neighbourhood window to be used in the focal statistics calculation\r\nto determine the mean pixel value within the neighbourhood window.\r\n\r\nOUTPUT\r\n- \"chlor_a\" and \"chlor_5x5\" Attribute Fields (automated output): Attribute fields\r\nthat are joined to the acquisition day feature classes found within the various\r\ndark targets yearly geodatabases. The \"chlor_a\" attribute field contains the\r\nextracted raster value of the chlorophyll at the location of the dark target's\r\ncentroid. The \"chlor_5x5_\" attribute field contains the extracted raster value of\r\nthe mean of the pixel value within the specified neighbourhood window. ('5x5' in\r\nthis case)\r\n\r\nADDITIONAL FUNCTIONS (explained in script below)\r\n- yearDay\r\n- adapted from ESRI's Join_Field.py functions\r\n - joindataGen\r\n - percentile\r\n - join_field\r\n- cleanWorkspace\"\"\"\r\n\r\n# Libraries\r\n# =========\r\nimport arcpy\r\nimport os\r\nimport sys\r\nimport datetime\r\nimport logging\r\n\r\n\r\nclass applyChloro(object):\r\n def __init__(self):\r\n \"\"\"Define the tool (tool name is the name of the class).\"\"\"\r\n self.label = \"1b_Apply Chlorophyll_a Values\"\r\n self.description = \"Creates and applies chlorophyll_a attribute value\\\r\n to each dark targets feature class located in the yearly GDBs.\"\r\n self.canRunInBackground = False\r\n\r\n def getParameterInfo(self):\r\n \"\"\"Define parameter definitions\"\"\"\r\n params0 = arcpy.Parameter(\r\n displayName=\"Input: Folder Location of Dark Targets GDBs\",\r\n name=\"working_folder\",\r\n datatype=\"DEFolder\",\r\n parameterType=\"Required\",\r\n direction=\"Input\")\r\n\r\n params1 = arcpy.Parameter(\r\n displayName=\"Input: Neighbourhood Cell Size (pixels)\",\r\n name=\"cell_size\",\r\n datatype=\"GPLong\",\r\n parameterType=\"Required\",\r\n direction=\"Input\")\r\n\r\n params1.value = 5\r\n\r\n params = [params0, params1]\r\n\r\n return params\r\n\r\n def isLicensed(self):\r\n \"\"\"Set whether tool is licensed to execute. This script requires Spatial Analyst extension to function.\"\"\"\r\n try:\r\n if arcpy.CheckExtension(\"Spatial\") != \"Available\":\r\n raise Exception\r\n except Exception:\r\n return False\r\n\r\n return True\r\n\r\n def updateParameters(self, parameters):\r\n \"\"\"Modify the values and properties of parameters before internal\r\n validation is performed. This method is called whenever a parameter\r\n has been changed.\"\"\"\r\n return\r\n\r\n def updateMessages(self, parameters):\r\n \"\"\"Modify the messages created by internal validation for each tool\r\n parameter. This method is called after internal validation.\"\"\"\r\n return\r\n\r\n def execute(self, parameters, messages):\r\n \"\"\"The source code of the tool.\"\"\"\r\n # Set log configuration\r\n logPath = os.path.join(parameters[0].valueAsText, \"logs\")\r\n if not os.path.exists(logPath):\r\n os.makedirs(logPath)\r\n logFile = os.path.join(logPath, \"chloro.log\")\r\n logging.basicConfig(filename=logFile, format='%(asctime)s -- %(message)s', datefmt='%d/%m/%Y %H:%M:%S', level=logging.INFO)\r\n arcpy.AddMessage(\"\\nApplying available chlorophyll_a values to dark targets...\")\r\n logging.info(\"Starting applyChloro.py script...\")\r\n\r\n arcpy.CheckOutExtension(\"Spatial\")\r\n logging.info(\"Check Out Extension: Spatial Analyst extension checked out\\n\")\r\n\r\n # Define variables from parameters\r\n working_folder = parameters[0].valueAsText\r\n chloro_folder = os.path.join(os.path.dirname(working_folder), \"Auxiliary\", \"Chlorophyll\")\r\n if not os.path.exists(chloro_folder):\r\n os.makedirs(chloro_folder)\r\n cell_size = parameters[1].value\r\n focal_field = \"chlor_a_\" + str(cell_size) + \"x\" + str(cell_size)\r\n\r\n # Determine list of yearly GDBs in workspace\r\n arcpy.env.workspace = working_folder\r\n gdbList = arcpy.ListWorkspaces(\"*\", \"FileGDB\")\r\n arcpy.AddMessage(\"Workspace contains the following \" + str(len(gdbList)) + \" GDBs: \" + str(gdbList))\r\n\r\n # Iterate through yearly GDBs\r\n for gdb in gdbList:\r\n arcpy.AddMessage(\"\\nProcessing \" + str(gdb))\r\n logging.info(\"Processing '%s' geodatabase\\n\", gdb)\r\n gdbDesc = arcpy.Describe(gdb)\r\n gdbYear = gdbDesc.baseName\r\n\r\n # Determine list of .nc files in corresponding yearly chlorophyll folder\r\n chloro_year = os.path.join(chloro_folder, gdbYear)\r\n arcpy.env.workspace = chloro_year\r\n ncList = arcpy.ListFiles('*.nc')\r\n\r\n # Determine list of feature classes in current GDB\r\n arcpy.env.workspace = gdb\r\n fcList = arcpy.ListFeatureClasses()\r\n arcpy.AddMessage(\"\\nGDB contains the following \" + str(len(fcList)) + \" feature classes: \" + str(fcList))\r\n\r\n # Iterate through feature classes in GDB\r\n for fc in fcList:\r\n\r\n # Check if chlorophyll_a has already been added to current feature class\r\n arcpy.AddMessage(\"\\nVerifying \" + fc + \"...\")\r\n logging.info(\"Processing '%s' feature class\", fc)\r\n fldList = arcpy.ListFields(fc)\r\n fldNames = []\r\n for fld in fldList:\r\n fldNames.append(fld.name)\r\n\r\n # If no chlorophyll_a data already in feature class, proceed with applying values\r\n if not focal_field in fldNames:\r\n\r\n # Create points feature class from current feature class to extract chlorophyll raster values\r\n arcpy.AddMessage(\"Creating points feature class...\")\r\n targetLyr = \"targetLyr\"\r\n arcpy.MakeFeatureLayer_management(fc, targetLyr)\r\n logging.info(\"Make Feature Layer: '%s' layer created from '%s' feature class\", targetLyr, fc)\r\n pointFC = fc + \"_point\"\r\n arcpy.FeatureToPoint_management(targetLyr, pointFC, \"CENTROID\")\r\n logging.info(\"Feature To Point: '%s' points feature class created from centroid of features in '%s' layer\", pointFC, targetLyr)\r\n\r\n # Determine year and day of year to load appropriate .nc file as raster\r\n yDay = self.yearDay(fc.split(\"_\")[1])\r\n chloro_file = \"A\" + gdbYear + yDay\r\n\r\n # Iterate through list of year's .nc files to find corresponding file to current feature class\r\n for ncFile in ncList:\r\n\r\n # Check for .nc file and feature class match\r\n if ncFile.startswith(chloro_file):\r\n\r\n # Make NetCDF raster layer from .nc file\r\n arcpy.AddMessage(\"Preparing chlorophyll_a raster layer...\")\r\n ncFilePath = os.path.join(chloro_year, ncFile)\r\n arcpy.MakeNetCDFRasterLayer_md(ncFilePath, \"chlor_a\", \"lon\", \"lat\", chloro_file)\r\n logging.info(\"Make NetCDF Raster Layer: '%s' raster layer created from '%s'\", chloro_file, ncFilePath)\r\n\r\n # Apply extent to raster layer (to limit processing to pertinent region)\r\n chloro_extent = arcpy.Extent(-160.0, 40.0, -40.0, 89.989002)\r\n chloro_rectExtract = arcpy.sa.ExtractByRectangle(chloro_file, chloro_extent, \"INSIDE\")\r\n logging.info(\"Extract By Rectangle: Extent (-160 (W), 40 (S), -40 (E), 89.989002 (N)) applied to '%s'\", chloro_file)\r\n\r\n # Calculate focal statistics (mean value of focal window)\r\n arcpy.AddMessage(\"Calculating focal statistics...\")\r\n neighborhood = arcpy.sa.NbrRectangle(cell_size, cell_size, \"CELL\")\r\n chloro_focal = arcpy.sa.FocalStatistics(chloro_rectExtract, neighborhood, \"MEAN\", \"DATA\")\r\n logging.info(\"Focal Statistics: '%s' raster created by calculating mean value of '%s'x'%s' neighbourhood calculated for cells from '%s'\", chloro_focal, str(cell_size), str(cell_size), chloro_file)\r\n\r\n if not \"chlor_a\" in fldNames:\r\n # Extract point values from raster\r\n arcpy.AddMessage(\"Extracting raster chlorophyll_a values to points...\")\r\n extractFC = fc + \"_extract\"\r\n arcpy.sa.ExtractValuesToPoints(pointFC, chloro_rectExtract, extractFC)\r\n arcpy.AlterField_management(extractFC, \"RASTERVALU\", \"chlor_a\")\r\n logging.info(\"Extract Values to Points: '%s' feature class created with point values calculated from '%s' raster layer with '%s' feature class\", extractFC, chloro_file, pointFC)\r\n\r\n # Extract focal values from raster\r\n arcpy.AddMessage(\"Extracting raster chlorophyll_a mean values to points...\")\r\n finalExtractFC = fc + \"_final_extract\"\r\n arcpy.sa.ExtractValuesToPoints(extractFC, chloro_focal, finalExtractFC)\r\n## focal_field = \"chlor_a_\" + str(cell_size) + \"x\" + str(cell_size)\r\n arcpy.AlterField_management(finalExtractFC, \"RASTERVALU\", focal_field)\r\n logging.info(\"Extract Values to Points: '%s' feature class created with point values calculated from '%s' raster layer with '%s' feature class\", finalExtractFC, chloro_focal, extractFC)\r\n\r\n # Join point and focal values to feature class\r\n arcpy.AddMessage(\"Joining values to feature class...\")\r\n self.join_field(fc, \"OBJECTID\", finalExtractFC, \"ORIG_FID\", \"chlor_a;\" + focal_field)\r\n logging.info(\"Join Field: chlor_a and chlor_a focal values joined to '%s' feature class from '%s' table\", fc, finalExtractFC)\r\n\r\n # Break iteration through .nc files once processing with corresponding .nc file and feature class is complete\r\n break\r\n\r\n # If chlorophyll_a values found in feature class, no further processing required for current feature class\r\n else:\r\n arcpy.AddMessage(\"Chlorophyll_a values already applied to feature class. Continuing...\")\r\n logging.info(\"Values already applied\")\r\n\r\n\r\n logging.info(\"Processing for '%s' feature class complete\\n\", fc)\r\n\r\n # Delete extra feature classes used during geoprocessing\r\n self.cleanWorkspace(gdb)\r\n\r\n arcpy.CheckInExtension(\"Spatial\")\r\n logging.info(\"Check In Extension: Spatial Analyst extension checked back in\")\r\n logging.info(\"applyChloro.py script finished\\n\\n\")\r\n\r\n return\r\n\r\n def yearDay(self, fc_dateString):\r\n \"\"\"Calculates the day of the year for each RADARSAT-2 acquisition (to conform to MODIS naming convention)\r\n\r\n Parameter:\r\n fc_dateString = Date string acquired from the feature class name in the input geodatabase,\r\n conforming to the following format: YYYYmmDD (e.g. 20100925 for 25 September 2010)\r\n\r\n Return:\r\n Returns sub-directory string to be concatenated with ftp file directory in order to point to the required year and day of the year to access the desired MODIS imagery,\r\n conforming to the following format: YYYY/DDD (e.g. 2010/268 for 25 September 2010)\"\"\"\r\n year = fc_dateString[:4]\r\n month = fc_dateString[4:6].lstrip(\"0\")\r\n day = fc_dateString[-2:].lstrip(\"0\")\r\n fc_date = datetime.date(int(year), int(month), int(day))\r\n yDay = fc_date.timetuple().tm_yday\r\n return str(yDay).rjust(3, '0')\r\n\r\n def joindataGen(self,joinTable,fieldList,sortField):\r\n \"\"\"Code snippet from Esri's Join_Field.py (as a replacement to the Join Field geoprocessing tool, which suffers from exceedingly lengthy processing times.)\r\n\r\n Define generator for join data\"\"\"\r\n with arcpy.da.SearchCursor(joinTable,fieldList,sql_clause=['DISTINCT',\r\n 'ORDER BY '+sortField]) as cursor:\r\n for row in cursor:\r\n yield row\r\n\r\n def percentile(self,n,pct):\r\n \"\"\"Code snippet from Esri's Join_Field.py (as a replacement to the Join Field geoprocessing tool, which suffers from exceedingly lengthy processing times.)\r\n\r\n Function for progress reporting\"\"\"\r\n return int(float(n)*float(pct)/100.0)\r\n\r\n def join_field(self, inTable, inJoinField, joinTable, outJoinField, joinFields):\r\n \"\"\"Code snippet from Esri's Join_Field.py (as a replacement to the Join Field geoprocessing tool, which suffers from exceedingly lengthy processing times.)\r\n\r\n Add join fields\"\"\"\r\n arcpy.AddMessage('\\nAdding join fields...')\r\n fList = [f for f in arcpy.ListFields(joinTable) if f.name in joinFields.split(';')]\r\n for i in range(len(fList)):\r\n name = fList[i].name\r\n type = fList[i].type\r\n if type in ['Integer','OID']:\r\n arcpy.AddField_management(inTable,name,field_type='LONG')\r\n elif type == 'String':\r\n arcpy.AddField_management(inTable,name,field_type='TEXT',field_length=fList[i].length)\r\n elif type == 'Double':\r\n arcpy.AddField_management(inTable,name,field_type='DOUBLE')\r\n elif type == 'Date':\r\n arcpy.AddField_management(inTable,name,field_type='DATE')\r\n else:\r\n arcpy.AddError('\\nUnknown field type: {0} for field: {1}'.format(type,name))\r\n\r\n # Write values to join fields\r\n arcpy.AddMessage('\\nJoining data...')\r\n # Create generator for values\r\n fieldList = [outJoinField] + joinFields.split(';')\r\n joinDataGen = self.joindataGen(joinTable,fieldList,outJoinField)\r\n version = sys.version_info[0]\r\n if version == 2:\r\n joinTuple = joinDataGen.next()\r\n else:\r\n joinTuple = next(joinDataGen)\r\n #\r\n fieldList = [inJoinField] + joinFields.split(';')\r\n count = int(arcpy.GetCount_management(inTable).getOutput(0))\r\n breaks = [self.percentile(count,b) for b in range(10,100,10)]\r\n j = 0\r\n with arcpy.da.UpdateCursor(inTable,fieldList,sql_clause=(None,'ORDER BY '+inJoinField)) as cursor:\r\n for row in cursor:\r\n j+=1\r\n if j in breaks:\r\n arcpy.AddMessage(str(int(round(j*100.0/count))) + ' percent complete...')\r\n row = list(row)\r\n key = row[0]\r\n try:\r\n while joinTuple[0] < key:\r\n if version == 2:\r\n joinTuple = joinDataGen.next()\r\n else:\r\n joinTuple = next(joinDataGen)\r\n if key == joinTuple[0]:\r\n for i in range(len(joinTuple))[1:]:\r\n row[i] = joinTuple[i]\r\n row = tuple(row)\r\n cursor.updateRow(row)\r\n except StopIteration:\r\n arcpy.AddWarning('\\nEnd of join table.')\r\n break\r\n\r\n def cleanWorkspace(self, workspace):\r\n \"\"\"Clears geodatabase workspace of interim feature classes used during geoprocessing executed in this script.\r\n\r\n Parameter:\r\n workspace = Points to the workspace in which the deletion of feature classes will occur.\r\n\r\n Return:\r\n No return\"\"\"\r\n arcpy.env.workspace = workspace\r\n arcpy.AddMessage(\"\\nCleaning workspace...\")\r\n pointList = arcpy.ListFeatureClasses(\"*_point\")\r\n arcpy.AddMessage(\"Deleting point feature classes...\")\r\n for fc in pointList:\r\n arcpy.Delete_management(fc)\r\n logging.info(\"Delete: '%s' feature class deleted\", fc)\r\n extractList = arcpy.ListFeatureClasses(\"*_extract\")\r\n arcpy.AddMessage(\"Deleting raster value extraction feature classes...\")\r\n for fc in extractList:\r\n arcpy.Delete_management(fc)\r\n logging.info(\"Delete: '%s' feature class deleted\", fc)","sub_path":"applyChloro.py","file_name":"applyChloro.py","file_ext":"py","file_size_in_byte":17971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"619470231","text":"from flask import render_template, session, redirect, url_for, current_app\nfrom .. import db\nfrom ..models import User\nfrom ..email import send_email\nfrom . import main\nfrom .forms import NameForm\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n form = NameForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.name.data).first()\n if user is None:\n user = User(username=form.name.data, age=form.age.data)\n db.session.add(user)\n db.session.commit()\n session['known'] = False\n session['old'] = form.age.data >= 40\n # 163老是把我发的邮件当垃圾邮件,mmp以后再来搞它\n # if current_app.config['FLASKY_ADMIN']:\n # send_email(to=current_app.config['FLASKY_ADMIN'], subject='Fuck Flask, fuckkkk!!',\n # template='mail/new_user', user=user)\n else:\n session['known'] = True\n session['old'] = user.age >= 40\n\n session['name'] = form.name.data\n return redirect(url_for('main.index'))\n return render_template('index.html',\n form=form, name=session.get('name'),\n known=session.get('known', False), old=session.get('old', False))\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"184470888","text":"'''\nPin 23 to LED to Pin 25 GND\n'''\n\nimport thread\nimport re\nimport unicodedata\nimport socket\nimport time\nimport sys\nimport RPi.GPIO as GPIO\n\nTCP_IP = 'localhost'\nTCP_PORT = 200\nBUFFER_SIZE = 1024\n\nLED_PIN = 23\n\nshuttingDown = False\nisServerRunning = False\nisLEDControllerRunning = False\n\nLEDStateFlag = False\nLEDState = False\n\ndef main():\n\t\n\tglobal shuttingDown\n\tglobal isServerRunning\n\tglobal isLEDControllerRunning\n\t\n\tGPIO.setmode(GPIO.BOARD)\n\tGPIO.setup(LED_PIN, GPIO.OUT)\n\t\n\ttry:\n\t\n\t\twhile(shuttingDown == False):\n\t\t\tif isServerRunning == False:\n\t\t\t\tthread.start_new_thread(server, ())\n\t\t\tif isLEDControllerRunning == False:\n\t\t\t\tthread.start_new_thread(led_controller, ())\n\t\t\ttime.sleep(1)\n\t\tGPIO.cleanup()\n\texcept KeyboardInterrupt:\n\t\tGPIO.cleanup()\n\texcept:\n\t\tGPIO.cleanup()\n\t\n\tprint(\"Cleaned\")\n\t\n\t\n\t\ndef led_controller():\n\n\tglobal shuttingDown\n\tglobal isLEDControllerRunning\n\tglobal LEDStateFlag\n\t\n\tisLEDControllerRunning = True\n\t\n\ttry:\n\t\tprint(\"Starting LED Controller\")\n\t\twhile(shuttingDown == False):\n\t\t\ttime.sleep(0.1)\n\t\t\tif LEDStateFlag == True:\n\t\t\t\tprint(\"Changing LED State\")\n\t\t\t\tGPIO.output(LED_PIN, LEDState)\n\t\t\t\tLEDStateFlag = False\n\t\tprint(\"LED Controller Closed\")\n\t\tisLEDControllerRunning = False\n\texcept:\n\t\tisLEDControllerRunning = False\n\t\tprint(sys.exc_info()[0])\n\n\ndef server():\n\t\n\tglobal shuttingDown\n\tglobal isServerRunning\n\tglobal LEDStateFlag\n\tglobal LEDState\n\t\n\tisServerRunning = True\n\t\n\ttry:\n\t\t#Start the Server\n\t\tprint(\"Starting Server\")\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\ts.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, TCP_PORT)\n\t\ts.bind(('', TCP_PORT))\n\t\ts.listen(1)\n\t\t\n\t\t#Pause and Wait for Connection\n\t\tprint(\"Waiting for Connection...\")\n\t\tconn, addr = s.accept()\n\t\tprint(\"Connected to: \" + str(addr))\n\t\t\n\t\twhile(shuttingDown == False):\n\t\t\tprint(\"Waiting for Data...\")\n\t\t\tdata = conn.recv(BUFFER_SIZE)\n\t\t\t\n\t\t\tif not data:\n\t\t\t\tbreak\n\t\t\t\n\t\t\tprint(\"Recieved Data: \" + str(data))\n\t\t\t\n\t\t\tcommand = unicodedata.normalize(\"NFD\", unicode(re.sub(r'[^a-zA-Z0-9]', \"\", unicode(data))))\n\n\t\t\tif command != \"\":\n\t\t\t\t\n\t\t\t\t#Set Flags\n\t\t\t\tprint(\"Command: \" + str(command))\n\t\t\t\t\n\t\t\t\tresponse = \"\"\n\t\t\t\t\n\t\t\t\tif command == \"echo\":\n\t\t\t\t\tresponse = command\n\t\t\t\telif command == \"on\":\n\t\t\t\t\tLEDStateFlag = True\n\t\t\t\t\tLEDState = True\n\t\t\t\telif command == \"off\":\n\t\t\t\t\tLEDStateFlag = True\n\t\t\t\t\tLEDState = False\n\t\t\t\telif command == \"quit\":\n\t\t\t\t\tshuttingDown = True\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t#Respond\n\t\t\t\tif response != \"\":\n\t\t\t\t\tprint(\"Sending: \" + response)\n\t\t\t\t\tresponse = (response + \"\\n\").encode(\"utf-8\")\n\t\t\t\t\tconn.send(response)\n\t\t\t\t\t\n\t\t#Leaving the While Loop\t\t\t\n\t\tprint(\"Server Closed\")\n\t\tisServerRunning = False\n\t\t\n\texcept:\n\t\tisServerRunning = False\n\t\tprint(sys.exc_info()[0])\n\t\t\nmain()\n","sub_path":"Sandbox/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"450684762","text":"# Python RPG\n# Alex Galhardo Vieira\n# https://github.com/AlexGalhardo/Python-RPG\n# aleexgvieira@gmail.com\n# https://alexgalhardo.com\n\n# !/usr/bin/python3\n# coding: utf-8 \n\n# ./Python/Monsters/PitsOfInferno_Monsters/Dragon.py\n\nfrom SuperClass.MagicMonster import MagicMonster\n\nfrom Global.GLOBAL_PITS_OF_INFERNO_VARIABLES import GLOBAL_DRAGON_LIFE, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t GLOBAL_DRAGON_NAME, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t GLOBAL_DRAGON_MAGIC_ATTACK, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t GLOBAL_DRAGON_EXPERIENCE\n\n\nclass Dragon(MagicMonster):\n\n\t'''\n\t-- Herance LivingBeing\n\n\tself.livingBeingtotalLife\n\tself.livingBeingCurrentlyLife\n\tdef setLiveBeingTotalLife( $setLiveBeingTotalLife )\n\tdef getLiveBeingTotalLife():int\n\t'''\n\n\t'''\n\t-- Herance MagicMonster\n\n\tself.magicMonsterSpellDamage = magicMonsterSpellDamage\n\tself.magicMonsterName = magicMonsterName\n\tself.magicMonsterExperienceForKill = magicMonsterExperienceForKill\n\tself.lootGoldCoins = randint(100, 500)\n\t'''\n\n\tdef __init__(self):\n\n\t\t# construct MagicMonster\n\t\tsuper().__init__( GLOBAL_DRAGON_LIFE,\n\t\t\t\t\t\t\tGLOBAL_DRAGON_NAME,\n\t\t\t\t\t\t\tGLOBAL_DRAGON_MAGIC_ATTACK,\n\t\t\t\t\t\t\tGLOBAL_DRAGON_EXPERIENCE )\n","sub_path":"Monsters/Dragon.py","file_name":"Dragon.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"184909457","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport re\nfrom datetime import datetime\nimport uuid\nimport string\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.core.urlresolvers import reverse\n\nfrom roles.models import Role\nfrom guestbook.models import Guestbook, File\nfrom hack import models\n\n\nclass DefenceForm(forms.Form):\n floats = forms.CharField(label='ПО', required=True, widget=forms.TextInput(attrs={'style': 'width: 600px;'}))\n target = forms.IntegerField(label='Защита', required=True)\n\n def __init__(self, *args, **kwargs):\n if 'role' in kwargs:\n self.role = kwargs.pop('role')\n super(DefenceForm, self).__init__(*args, **kwargs)\n\n self.fields['target'].widget.choices = models.Target.TARGETS\n\n def clean_floats(self):\n hashes = map(string.strip, re.sub('[^\\w]', ' ', self.cleaned_data['floats']).split())\n floats = []\n for hash in hashes:\n try:\n floats.append(models.Float.objects.get(owner=self.role, target__isnull=True, hash=hash, is_active=True))\n except (models.Float.DoesNotExist, models.Float.MultipleObjectsReturned):\n raise forms.ValidationError('ПО с кодом \"%s\" не найдено' % hash)\n return floats\n\n def clean_target(self):\n try:\n return models.Target.objects.exclude(target='role.defence')\\\n .get(pk=self.cleaned_data['target'], role=self.role)\n except models.Target.DoesNotExist:\n raise forms.ValidationError('Неизвестная цель защиты')\n\n def clean(self):\n if self.errors:\n return\n\n target = self.cleaned_data['target']\n levels = target.get_levels()\n required_level = self.get_level_hole(levels)\n\n if len(self.cleaned_data['floats']) < required_level:\n raise forms.ValidationError(\n 'Недостаточно ПО для постановки защиты, требуется %s штук' % required_level\n )\n\n self.cleaned_data['floats'] = self.cleaned_data['floats'][:required_level]\n self.cleaned_data['level'] = required_level\n return self.cleaned_data\n\n def get_level_hole(self, levels):\n \"\"\"\n Ищет дырку в ряду уровней - минимальное число, которого нет в списке\n :param levels: список натуральных чисел\n :return: уровень\n \"\"\"\n levels = set(levels)\n for i in xrange(1, 100):\n if i not in levels:\n return i\n\n raise forms.ValidationError('Слишком много уровней защиты, хватит уже')\n\n def save(self):\n for float in self.cleaned_data['floats']:\n float.target = self.cleaned_data['target']\n float.target_level = self.cleaned_data['level']\n float.is_active = True\n float.save()\n return self.cleaned_data['level']\n\n\nclass GBDefenceForm(DefenceForm):\n floats = forms.CharField(label='ПО', required=True, widget=forms.TextInput(attrs={'style': 'width: 600px;'}))\n target = forms.IntegerField(label='Защита', required=True)\n\n def __init__(self, *args, **kwargs):\n self.guestbook = kwargs.pop('guestbook')\n super(GBDefenceForm, self).__init__(*args, **kwargs)\n\n self.fields['target'].widget.choices = models.Target.TARGETS\n\n def clean_target(self):\n try:\n return models.Target.objects.exclude(target='role.defence')\\\n .get(pk=self.cleaned_data['target'], additional=self.guestbook.title)\n except models.Target.DoesNotExist:\n raise forms.ValidationError('Неизвестная цель защиты')\n\n\nclass NewDuelForm(forms.Form):\n number = forms.CharField(label='Загадайте число')\n email_1 = forms.EmailField(label='Ваш email')\n email_2 = forms.EmailField(label='Email оппонента')\n\n def clean_number(self):\n number = self.cleaned_data['number']\n\n if not number.isdigit():\n raise forms.ValidationError('Введите число')\n\n if len(set(number)) != len(number):\n raise forms.ValidationError('Все цифры числа должны быть разными')\n\n return self.cleaned_data['number']\n\n def save(self, role):\n duel = models.Duel.objects.create(\n owner=role,\n role_1=uuid.uuid4().hex,\n role_2=uuid.uuid4().hex,\n email_1=self.cleaned_data['email_1'],\n email_2=self.cleaned_data['email_2'],\n number_1=self.cleaned_data['number'],\n dt=datetime.now(),\n )\n\n send_mail(\n 'Дуэль',\n 'Вы отправили приглашение на дуэль. Ваша ссылка - %s%s' %\n (settings.DOMAIN, reverse('hack:duel', args=[duel.role_1])),\n None,\n [self.cleaned_data['email_1']],\n )\n\n send_mail(\n 'Дуэль',\n 'Вы вызваны на дуэль с %s. Ваша ссылка - %s%s' %\n (self.cleaned_data['email_1'], settings.DOMAIN, reverse('hack:duel', args=[duel.role_2])),\n None,\n [self.cleaned_data['email_2']],\n )\n\n return duel\n\n\ndef check_number(number, number_len=None):\n try:\n int(number)\n if number_len:\n if len(number) != number_len:\n raise forms.ValidationError('Загаданное число должно содержать %s цифр' % number_len)\n else:\n if len(number) > 10:\n raise forms.ValidationError('Загаданное число должно содержать не более 10 цифры')\n\n if len(set(number)) != len(number):\n raise forms.ValidationError('Все цифры числа должны быть разными')\n\n return number\n\n except ValueError:\n raise forms.ValidationError('Введите четырехзначное число')\n\n\nclass NewHackForm(forms.Form):\n role = forms.IntegerField(label='Кого ломаем', widget=forms.Select, required=True)\n target = forms.CharField(label='Цель атаки', widget=forms.Select, required=True)\n additional = forms.CharField(label='Уточнение цели', required=False)\n floats = forms.CharField(label='ПО', required=False, widget=forms.TextInput(attrs={'style': 'width: 600px;'}))\n hacker_number = forms.CharField(label='Число хакера', required=False)\n\n def __init__(self, hacker, *args, **kwargs):\n self.hacker = hacker\n super(NewHackForm, self).__init__(*args, **kwargs)\n\n self.fields['target'].widget.choices = models.Target.TARGETS\n self.fields['role'].widget.choices = [(0, 'Гостевая')] + \\\n [(role.id, role.name) for role in Role.objects.current()]\n\n def clean_role(self):\n if self.cleaned_data['role'] == 0:\n return None\n\n try:\n return Role.objects.get(pk=self.cleaned_data['role'])\n except Role.DoesNotExist:\n raise forms.ValidationError('Неизвестный персонаж')\n\n def clean_floats(self):\n hashes = map(string.strip, re.sub('[^\\w]', ' ', self.cleaned_data['floats']).split())\n floats = []\n for hash in hashes:\n try:\n floats.append(models.Float.objects.get(owner=self.hacker, target__isnull=True,\n hash=hash, is_active=True))\n except (models.Float.DoesNotExist, models.Float.MultipleObjectsReturned):\n raise forms.ValidationError('ПО с кодом \"%s\" не найдено' % hash)\n return floats\n\n def clean_target(self):\n if self.cleaned_data['target'] in [target[0] for target in models.Target.TARGETS]:\n return self.cleaned_data['target']\n else:\n raise forms.ValidationError('Неизвестная цель атаки')\n\n def clean_additional(self):\n return (self.cleaned_data['additional'] or '').strip()\n\n def clean_hacker_number(self):\n min_len = 2\n number = self.cleaned_data['hacker_number']\n\n try:\n int(number)\n if min_len:\n if len(number) < min_len:\n raise forms.ValidationError('Загаданное число должно содержать не менее %s цифр' % min_len)\n else:\n if len(number) > 10:\n raise forms.ValidationError('Загаданное число должно содержать не более 10 цифры')\n\n if len(set(number)) != len(number):\n raise forms.ValidationError('Все цифры числа должны быть разными')\n\n return number\n\n except ValueError:\n raise forms.ValidationError('Введите число хакера')\n\n def clean(self):\n if self.errors:\n return\n\n if self.cleaned_data['target'] == 'role.steal_ship' and len(self.cleaned_data['floats']) < 3:\n raise forms.ValidationError('Для этого взлома требуются 3 дополнительных ПО')\n\n if self.cleaned_data['target'].startswith('role') and not self.cleaned_data['role']:\n raise forms.ValidationError('Для этого взлома требуется выбрать атакуемого человека')\n\n if self.cleaned_data['target'].startswith('guestbook'):\n if not self.cleaned_data['additional']:\n raise forms.ValidationError('Для этого взлома требуется ввести название атакуемой гостевой книги')\n\n if self.cleaned_data['target'] == 'guestbook.doc':\n try:\n File.objects.get(title=self.cleaned_data['additional'])\n except File.DoesNotExist:\n raise forms.ValidationError('Файла с таким названием не существует')\n else:\n\n try:\n Guestbook.objects.get(title=self.cleaned_data['additional'])\n except Guestbook.DoesNotExist:\n raise forms.ValidationError('Гостевой книга с таким названием не существует')\n\n if self.cleaned_data['target'].startswith('role'):\n target, _ = models.Target.objects.get_or_create(\n role=self.cleaned_data['role'],\n target=self.cleaned_data['target'],\n )\n else:\n target, _ = models.Target.objects.get_or_create(\n additional=self.cleaned_data['additional'],\n target=self.cleaned_data['target'],\n )\n\n self.cleaned_data['target'] = target\n\n if len(self.cleaned_data['hacker_number']) != target.complexity:\n raise forms.ValidationError('Номер хакера должен состоять из %s цифр' % target.complexity)\n\n return self.cleaned_data\n\n def save(self):\n if self.cleaned_data['target'].target.startswith('role'):\n defenders = [defender.security for defender in self.cleaned_data['role'].defenders.all()]\n elif self.cleaned_data['target'].target == 'guestbook.doc':\n doc = File.objects.get(title=self.cleaned_data['additional'])\n defenders = [member.member for member in doc.guestbook.member_set.all() if member.member.get_field('Хакер')]\n else:\n guestbook = Guestbook.objects.get(title=self.cleaned_data['additional'])\n defenders = [member.member for member in guestbook.member_set.all() if member.member.get_field('Хакер')]\n\n if defenders and self.cleaned_data['target'].target != 'role.defence':\n # Взлом с защитником\n hack = models.HackFight.objects.create(\n hacker=self.hacker,\n security=None, # кто первый придет, тот и защитник\n target=self.cleaned_data['target'],\n hacker_number=self.cleaned_data['hacker_number'],\n security_number=models.Hack.make_number(len(self.cleaned_data['hacker_number'])),\n additional=self.cleaned_data['additional'],\n )\n\n for defender in defenders:\n defender.send_mail(\n 'Цетаганда: защита',\n 'на вашего подопечного напали. Встать на защиту можно по ссылке %s%s?defence=1' %\n (settings.DOMAIN, hack.get_absolute_url())\n )\n defender.records.create(\n category='Защита',\n message='На вашего подопечного напали. Встать на защиту.' %\n (settings.DOMAIN, hack.get_absolute_url())\n )\n return hack\n\n else:\n # Взлом без защитника\n hack = models.Hack.objects.create(\n hacker=self.hacker,\n security_number=models.Hack.make_number(self.cleaned_data['target'].complexity),\n target=self.cleaned_data['target'],\n additional=self.cleaned_data['additional'],\n )\n return hack\n\n\nclass DefenderForm(forms.Form):\n defender = forms.IntegerField(label='Новый защитник', widget=forms.Select)\n\n def __init__(self, *args, **kwargs):\n super(DefenderForm, self).__init__(*args, **kwargs)\n\n self.hackers = [(role.id, role.name) for role in Role.objects.hackers()]\n self.fields['defender'].widget.choices = self.hackers\n\n def clean_defender(self):\n try:\n return Role.objects.hackers().get(pk=self.cleaned_data['defender'])\n except Role.DoesNotExist:\n raise forms.ValidationError('Машинист не найден')\n\n def save(self, client):\n models.Defender.objects.filter(client=client).delete()\n models.Defender.objects.create(client=client, security=self.cleaned_data['defender'])\n\n client.records.create(\n category='Защита',\n message='У вас новый защитник - %s' % self.cleaned_data['defender'],\n )\n\n self.cleaned_data['defender'].records.create(\n category='Защита',\n message='%s, у вас новый клиент на защиту - %s' % (self.cleaned_data['defender'], client),\n )\n","sub_path":"src/hack/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":14985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"259254551","text":"'''\n걍 lstm 쓰레기임\n100개로 돌려도 쓰레기임..!!!\n\nmse 는 115.99845886230469\n[[82.097115]\n [82.36568 ]\n [82.61987 ]\n [82.86034 ]\n [83.08766 ]\n [83.30248 ]]\nRMSE 는 14.834567084606148\nR2는 -74.45064477291889\n\n데이터 차원을 3개로 늘린게 잘 안맞는거 같아 \n뭔가 이런 숫자 데이터에 잘 안맞는거 아닐까...??\n'''\n\nimport numpy as np\n\na = np.array(range(1,101))\nsize = 5\ndef split_x(seq, size) :\n aaa = []\n for i in range(len(seq) - size + 1) :\n subset = seq[i : (i+size)]\n aaa.append([item for item in subset])\n print(type(aaa)) \n return np.array(aaa)\n\ndataset = split_x(a, size)\nx = np.array(dataset[:,:4])\ny = np.array(dataset[:,4])\nx_predict = x[-6:,:]\nx_predict_y = y[-6:]\n\nx = x.reshape(x.shape[0],x.shape[1],1)\nx_predict = x_predict.reshape(x_predict.shape[0],x_predict.shape[1],1)\n\nfrom sklearn.model_selection import train_test_split\n\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, random_state=66, test_size=0.2, shuffle=False\n)\n\n#2. 모델구성\nfrom keras.models import Sequential \nfrom keras.layers import LSTM, Dense \n\nmodel = Sequential()\nmodel.add(LSTM(320, input_shape=(4,1))) # input을 넣는거야 무조권 ^_^ \nmodel.add(Dense(32))\nmodel.add(Dense(640))\nmodel.add(Dense(320))\nmodel.add(Dense(16)) \nmodel.add(Dense(1)) \n\n#3. 설명한 후 훈련\nfrom keras.callbacks import EarlyStopping\nearly_stopping = EarlyStopping(monitor='loss', patience=500, mode='auto') # 어느정도 사이즈부터 성능향상에 도움을 주는지 아직 머르겠씁니다\nmodel.compile(loss='mse', optimizer='adam', metrics=['mse'])\nmodel.fit(x_train,y_train, epochs=500, batch_size=5, callbacks=[early_stopping], validation_split=0.2) \n\n#4. 평가와 예측\nloss,mse = model.evaluate(x_test,y_test) \nprint('mse 는',mse)\n\nx_predict = model.predict(x_predict)\nprint(x_predict)\n\nfrom sklearn.metrics import mean_squared_error \ndef RMSE(y_test, y_predict) :\n return np.sqrt(mean_squared_error(y_test, y_predict))\nprint('RMSE 는', RMSE(x_predict_y, x_predict) )\n\n# R2 구하기\nfrom sklearn.metrics import r2_score \nr2 = r2_score(x_predict_y, x_predict)\nprint('R2는 ', r2)\n","sub_path":"keras/keras42_lstm_split2.py","file_name":"keras42_lstm_split2.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"335462008","text":"import WeekSchedule as weekSchedule\nimport FrameController as frameController\nimport RequestController as req\nimport ConfigFileParser\nimport tkinter as tk\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\n#import grovepi\n\nclass scheduleDay(tk.Frame):\n def __init__(self, master):\n # Init frame\n tk.Frame.__init__(self,master)\n #[temp,hum] = grovepi.dht(4,0)\n self.temp = 21\n self.room = ConfigFileParser.ConfigFileParser()\n self.startList = [\"Start\",\" 8:30\",\" 9:20\", \"10:30\",\"11:20\",\"12:10\",\"13:00\",\"13:50\",\"15:00\",\"15:50\", \"17:00\", \"17:50\", \"18:40\", \"19:30\", \"20:20\",\"21:10\"]\n # Sets selected item to none\n self.selected_item = None\n\n # Creates variable for the master class FrameController\n self.master = master\n\n # Inits the buttons and pictures\n self.init_buttons()\n\n # Builds foundation of the treeview\n self.Build_Treeview(self.master)\n\n def Build_Treeview(self,master): \n # Init treeview\n self.tv = ttk.Treeview(self, height=15)\n self.tv['columns'] = ('lesuur', 'start', 'docent', 'klas', 'vak')\n self.tv.heading(\"#0\", text='')\n self.tv.column(\"#0\", stretch=\"NO\", width=1)\n self.tv.heading('lesuur', text='Les uur')\n self.tv.column('lesuur', anchor='center', width=100)\n self.tv.heading('start', text='Tijd stip')\n self.tv.column('start', anchor='center', width=150)\n self.tv.heading('docent', text='Leraar')\n self.tv.column('docent', anchor='center', width=150)\n self.tv.heading('klas', text='Klas')\n self.tv.column('klas', anchor='center', width=150)\n self.tv.heading('vak', text='Vak')\n self.tv.column('vak', anchor='center', width=200)\n self.tv.bind('', self.select_item) \n self.tv.grid(row=1, column=0, columnspan=20, padx=0, pady=20)\n\n ttk.Style().configure(\"Treeview\", font= ('Verdana', 12), background=\"#383838\", \n foreground=\"white\", fieldbackground=\"grey\")\n\n self.Fill_Treeview()\n\n def Fill_Treeview(self):\n # Requests for data : May take time\n jsonData = req.RetrieveRooms.RetrieveData(True,False,\"Schedule/Classroom/\")\n bookingData = req.RetrieveRooms.RetrieveData(False,True,\"Booking/Bookings/\")\n # Fill treeview\n n = 1\n while(n != 16):\n if (jsonData == [\"Lost\"] and bookingData == [\"Lost\"]):\n ttk.Label(self,text=\"Lost connection to server\",font=\"Verdana 12 bold\").grid(row= 3, column=0)\n break\n b = 0\n if(jsonData != []):\n for data in jsonData:\n if ( data.StartBlock == n):\n while (data.StartBlock + b < data.EndBlock + 1):\n self.tv.insert(\"\",\"end\",text = \"\",values = (n + b,self.startList[n + b], data.Teacher, data.Classes[0]['Name'], data.CourseCode))\n b = b + 1\n if(bookingData != [] and bookingData != ['Lost'] ):\n for data in bookingData:\n if (data.StartBlock == n):\n while (data.StartBlock + b < data.EndBlock + 1):\n self.tv.insert(\"\",\"end\",text = \"\",values = (n + b,self.startList[n + b], \"Geboekt\", \" Student\",\"\"))\n b = b + 1\n if (b == 0 or jsonData == []):\n self.tv.insert(\"\",\"end\",text = \"\",values = (n,self.startList[n],\"\",\"\",\"\"))\n n = n + 1\n else:\n n = n + b\n\n def select_item(self,a):\n\n try:\n item = self.tv.selection()[0]\n self.selected_item = item\n print(self.selected_item)\n except:\n print(\"Nothing selected :/\")\n \n\n def reserve_room(self,selected):\n try:\n item = self.tv.selection()[0]\n except:\n print(\"Nothing selected :/\")\n if selected == None or self.tv.item(selected)['values'][2] != \"\":\n print(\"Too bad\")\n elif (self.tv.item(selected)['values'][2] == \"\"): \n value= self.tv.item(selected)['values']\n self.tv.item(selected,values=(value[0],\n value[1],\n \"Student gereserveerd\",\n \"Student gereserveerd\",\n \"Zelf studie\"))\n\n\n def delete_reservation(self):\n try:\n item = self.tv.selection()[0]\n except:\n print(\"Nothing selected :/\")\n if (self.tv.item(item)['values'][2] == \"Student gereserveerd\"):\n value= self.tv.item(item)['values']\n self.tv.item(item,values=(value[0],\n value[1],\n \"\",\n \"\",\n \"\"))\n else:\n print(\"Too bad\")\n\n\n def init_buttons(self):\n\n # Load images \n logo = ImageTk.PhotoImage(Image.open(\"./Images/HRO.png\"))\n HROpicture = ttk.Label(self,image=logo)\n HROpicture.image = logo\n HROpicture.grid(row= 0, column=0)\n\n # Pictures and everything\n ttk.Button(self, text=\"Week rooster\", width=20,padding= 5, command=lambda:self.master.switch_frame(weekSchedule.scheduleWeek)).grid(row=0,column=1)\n \n ttk.Button(self, text=\"Refresh\", width=20, padding= 5, command=lambda:self.master.switch_frame(scheduleDay)).grid(row=0,column=2)\n \n ttk.Label(self,text=\"De temperatuur in lokaal \"+ str(self.room) +\" is: \\n\" + str(self.temp)+ \" graden.\",font=\"Verdana 9 bold\").grid(row= 0, column=3)\n\n #Options for reserving - NOT USED BUT MAYBE FOR IN THE FUTURE OR EXTRA'S\n #ttk.Button(self, text = \"Reserveer kamer\", width=20, command=lambda:self.reserve_room(self.selected_item)).grid(row=0, column=5, sticky=\"W\")\n #ttk.Button(self, text = \"Verwijder reservering\", width=25, command=lambda:self.delete_reservation()).grid(row=0, column=7, sticky=\"W\")\n","sub_path":"Tkinter/Tkinter/DaySchedule.py","file_name":"DaySchedule.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"648498289","text":"#!/usr/bin/env python2\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import print_function\r\n\r\nimport requests, os, shutil, sys, zipfile\r\n\r\n'''\r\nExperimental script. Not much checking, so use only with these parameters:\r\n OS: Windows 64-bit\r\n Python: 2.7 above (built on 2.7.15)\r\n\r\nNot much descriptive error handling (TODO, implement error handling)\r\nUse with fun!\r\n'''\r\n\r\n\r\nMB_NUM = 1024 ** 2\r\nBUFSIZ = 4096\r\nSCRIPT_DL_DIR = '.get_ffmpeg_py_tmp'\r\nFFMPEG_DL_URL = 'https://ffmpeg.zeranoe.com/builds/win64/static/ffmpeg-latest-win64-static.zip'\r\n_ZIP_PREFIX = ('ffmpeg-latest-win64-static', 'bin')\r\nZIP_EXE_PREFIX = '/'.join(_ZIP_PREFIX) + '/'\r\nOS_ZIP_EXE_PREFIX = os.path.join(*_ZIP_PREFIX)\r\nEXE_NAMES = ('ffmpeg.exe', 'ffprobe.exe', 'ffplay.exe')\r\nEXE_TO_EXTRACT = [(ZIP_EXE_PREFIX + x, x) for x in EXE_NAMES]\r\nZIP_FNAME = os.path.join('.', SCRIPT_DL_DIR, 'ffmpeg_latest.zip')\r\nSCRIPT_CWD = os.getcwd()\r\nHELP_MSG = u'''\r\nPython helper to download latest ffmpeg for Windows.\r\n\r\nUsage: get_latest_ffmpeg [OPTIONS]\r\n\r\nOptions: --no-check-zipfile, Do not check if an ffmpeg zip is already\r\n --force-download, -f in the temp directory, download instead.\r\n\r\n -y, --yes Do not ask for permission to download zip.\r\n\r\n -h, --help Show this help message.\r\n\r\nPublished under the MIT License.\r\n'''\r\ndef remove_file_no_error(fname, *args):\r\n for f in (fname, ) + args:\r\n try:\r\n os.remove(f)\r\n except OSError:\r\n pass\r\n''' First check for help messages '''\r\nif any(x in set(['--help', '-h', '--h']) for x in sys.argv[1:]):\r\n print(HELP_MSG)\r\n raise SystemExit\r\n\r\ntry:\r\n ''' Check if zipfile already exists '''\r\n if any(x in set(['--no-check-zipfile', '--force-download', '-f']) for x in sys.argv[1:]):\r\n raise IOError #Shortcut to go to except clause\r\n with zipfile.ZipFile(ZIP_FNAME, 'r') as outzip:\r\n for fullname, shortname in EXE_TO_EXTRACT:\r\n outzip.extract(fullname)\r\n shutil.move(os.path.join(OS_ZIP_EXE_PREFIX, shortname),\r\n os.path.join(SCRIPT_CWD, shortname))\r\nexcept IOError:\r\n #Remove exisiting temp dir\r\n try:\r\n os.listdir(SCRIPT_DL_DIR)\r\n except OSError:\r\n pass\r\n else:\r\n shutil.rmtree(SCRIPT_DL_DIR)\r\n finally:\r\n os.makedirs(SCRIPT_DL_DIR)\r\n ''' Start downloading '''\r\n req = requests.get(FFMPEG_DL_URL, stream = True)\r\n req_fs = int(req.headers['content-length'])\r\n req_fsmb = '%.2f' % (float(req_fs) / (MB_NUM))\r\n ''' Check for force yes commands '''\r\n if any(x.lower() in set(['-y', '--yes', '--y']) for x in sys.argv[1:]):\r\n pass\r\n else:\r\n continue_inp = raw_input('File Size: {0} MiB. Continue? [Y/N]: '.format(req_fsmb))\r\n if continue_inp not in set(['y', 'Y']):\r\n print('Exiting...')\r\n raise SystemExit\r\n ''' Download zip file '''\r\n with open(ZIP_FNAME, 'wb') as outf:\r\n #Some code credits: https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py\r\n cur_bytes = 0\r\n for chunk in req.iter_content(chunk_size = BUFSIZ):\r\n if chunk:\r\n cur_bytes += BUFSIZ\r\n print('%.2f/%.2f MiB: %s%%' % (float(cur_bytes) / MB_NUM, float(req_fs) / MB_NUM,\r\n int(float(cur_bytes) / req_fs * 100)), end = u' \\r')\r\n outf.write(chunk)\r\n ''' Extract files out of zip '''\r\n with zipfile.ZipFile(ZIP_FNAME, 'r') as outzip:\r\n new_prefix = os.path.join(*(ZIP_EXE_PREFIX.split('/')))\r\n for fullname, shortname in EXE_TO_EXTRACT:\r\n outzip.extract(fullname)\r\n shutil.move(os.path.join(OS_ZIP_EXE_PREFIX, shortname),\r\n os.path.join(SCRIPT_CWD, shortname))\r\n\r\n''' CLEANUP FILES '''\r\nshutil.rmtree(_ZIP_PREFIX[0])\r\nshutil.rmtree(SCRIPT_DL_DIR)\r\n","sub_path":"LatestWindowsFFMPEG/py_get_ffmpeg.py","file_name":"py_get_ffmpeg.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"132321987","text":"import os, json, sys\nimport logging\n\nfin = \"/run/cloud-init/result.json\"\nif os.path.exists(fin):\n ret = json.load(open(fin, \"r\"))\n if len(ret['v1']['errors']):\n sys.exit('Cloud-init finished with errors:' + \"\\n\".join(ret['v1']['errors']))\n else:\n sys.exit('Cloud-init finished with no errors.')\nelse:\n sys.exit('1')","sub_path":"lib/check_if_cloud_init_run_finished.py","file_name":"check_if_cloud_init_run_finished.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"399685862","text":"import cv2\nimg=cv2.imread(\"img.png\")\nimg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nimg_invert = cv2.bitwise_not(img_gray)\nimg_smoothing = cv2.GaussianBlur(img_invert, (21, 21),sigmaX=0, sigmaY=0)\ndef dodgeV2(x, y):\n return cv2.divide(x, 255 - y, scale=256)\nfinal_img = dodgeV2(img_gray, img_smoothing)\ncv2.imshow('result',final_img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"Awesome-face-operations/Pencil Sketch/pencil_sketch_code.py","file_name":"pencil_sketch_code.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"395067069","text":"#!/usr/bin/python\n\n\"\"\"\nrun.py\n\nFlask app wrapper to run as the main web server\n\"\"\"\n\nimport json\n\n# Imports...\nimport steam\nimport xbox\n\n# Flask...\nimport flask\nFLASK_APP = flask.Flask(__name__)\n\n# ------------------------------------------------------------\n# PAGES\n# ------------------------------------------------------------\n@FLASK_APP.route('/')\ndef home():\n \"\"\"\n home\n\n Return the html for the home page from the template\n \"\"\"\n return flask.render_template('home.html')\n\n# ------------------------------------------------------------\n# ACHIEVES : page displaying achievement info\n# ------------------------------------------------------------\n@FLASK_APP.route('/steam.json')\ndef steam_json():\n return json.dumps(steam.load_json())\n\n@FLASK_APP.route('/xbox.json')\ndef xbox_json():\n return json.dumps(xbox.load_json())\n\n@FLASK_APP.route('/achieves')\ndef achieves_page():\n flask.session.steam = True\n flask.session.xbox = True\n return flask.render_template('achieves.html')\n\n@FLASK_APP.route('/achieves/steam')\ndef steam_achieves_page():\n flask.session.steam = True\n flask.session.xbox = False\n flask.session.appid = None\n return flask.render_template('achieves.html')\n\n@FLASK_APP.route('/achieves/xbox')\ndef xbox_achieves_page():\n flask.session.steam = False\n flask.session.xbox = True\n flask.session.appid = None\n return flask.render_template('achieves.html')\n\n@FLASK_APP.route('/achieves/steam/')\ndef steam_game_achieves_page(appid):\n flask.session.steam = True\n flask.session.xbox = False\n flask.session.appid = appid\n return flask.render_template('achieves.html')\n\n@FLASK_APP.route('/achieves/xbox/')\ndef xbox_game_achieves_page(appid):\n flask.session.steam = False\n flask.session.xbox = True\n flask.session.appid = appid\n return flask.render_template('achieves.html')\n\n# Run the app...\nFLASK_APP.run(debug=True, host=\"0.0.0.0\", port=80)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"122248950","text":"domain_size = 2\n\nmax_time = 1.00000e+01\nsafety_factor = 8.00000e-01\nmax_time_step = 1.00000e-03\noutput_dt = 1.00000e+00\ndensity = 1.00000e+03\nviscosity = 1.00000e-06\nbody_force_x = 0.00000e+00\nbody_force_y = -9.81000e+00\nbody_force_z = 0.00000e+00\nwall_law_y = 0.00000e+00\nprint_layers = True \nuse_mass_correction = True \nredistance_frequency = 5.00000e+00\nextrapolation_layers = 5.00000e+00\nnumber_of_inital_steps = 1.00000e+01\ninitial_time_step = 1.00000e-05\nreduction_on_failure = 3.00000e-01\nstabdt_pressure_factor = 1.00000e+00\nstabdt_convection_factor = 1.00000e-02\ntau2_factor = 1.00000e+00\nedge_detection_angle = 4.50000e+01\nassume_constant_pressure = False \ncompute_porous_resistance_law = 1.00000e+00\n# Declare Python Variables\n\nproblem_name = 'StillWater_Edgebased_halfPorous'\nproblem_path = '/media/data/Documents_work/EXAMPLES/BENCHMARKING/StillWater_Edgebased_halfPorous.gid'\nkratos_path = '/home/antonia/kratos'\n\n","sub_path":"kratos_3_0_0/applications/incompressible_fluid_application/test_examples/StillWater_Edgebased_halfPorous.gid/edgebased_levelset_var.py","file_name":"edgebased_levelset_var.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"642177026","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Test wiki page traverser\n\n$Id$\n\"\"\"\nimport unittest, sys\n\nfrom zope.component.tests.request import Request\nfrom zope.interface import Interface, classImplements\nfrom zope.publisher.interfaces import NotFound\nfrom zope.proxy import removeAllProxies\n\nfrom zope.app import zapi\nfrom zope.app.annotation.attribute import AttributeAnnotations\nfrom zope.app.annotation.interfaces import IAnnotations, IAttributeAnnotatable\nfrom zope.app.location.interfaces import ILocation\nfrom zope.app.location.traversing import LocationPhysicallyLocatable\nfrom zope.app.testing import ztapi\nfrom zope.app.testing.placelesssetup import PlacelessSetup\nfrom zope.app.traversing.interfaces import IPhysicallyLocatable\n\nfrom zwiki.interfaces import IWikiPage, IWikiPageHierarchy\nfrom zwiki.wiki import Wiki\nfrom zwiki.wikipage import WikiPage, WikiPageHierarchyAdapter\nfrom zwiki.traversal import WikiPageTraverser\n\nclass I(Interface):\n pass\n\nclass Request(Request):\n def getEffectiveURL(self):\n return ''\n\nclass View:\n def __init__(self, comp, request):\n self._comp = comp\n\nclass TestTraverser(PlacelessSetup, unittest.TestCase):\n\n def setUp(self):\n super(TestTraverser, self).setUp()\n classImplements(WikiPage, IAttributeAnnotatable)\n ztapi.provideAdapter(IWikiPage, IWikiPageHierarchy,\n WikiPageHierarchyAdapter)\n ztapi.provideAdapter(IAttributeAnnotatable, IAnnotations,\n AttributeAnnotations)\n ztapi.provideAdapter(ILocation, IPhysicallyLocatable,\n LocationPhysicallyLocatable)\n\n def testAttr(self):\n wiki = Wiki()\n page1 = WikiPage()\n wiki['FrontPage'] = page1\n page2 = WikiPage()\n wiki['FooBar'] = page2\n IWikiPageHierarchy(page2).parents = ('FrontPage',)\n request = Request(I)\n\n T = WikiPageTraverser(page1, request)\n self.failUnless(\n removeAllProxies(T.publishTraverse(request, 'FooBar')) is page2)\n\n self.assertRaises(NotFound, T.publishTraverse, request,'morebar')\n\n def testView(self):\n wiki = Wiki()\n page1 = WikiPage()\n wiki['FrontPage'] = page1\n page2 = WikiPage()\n wiki['FooBar'] = page2\n IWikiPageHierarchy(page2).parents = ('FrontPage',)\n request = Request(I)\n\n T = WikiPageTraverser(page1, request)\n ztapi.provideView(IWikiPage, I, Interface, 'viewfoo', View)\n\n self.failUnless(\n T.publishTraverse(request, 'viewfoo').__class__ is View )\n self.failUnless(\n removeAllProxies(T.publishTraverse(request, 'FooBar')) is page2)\n\n self.assertRaises(NotFound, T.publishTraverse, request, 'morebar')\n self.assertRaises(NotFound, T.publishTraverse, request,\n '@@morebar')\n\n\ndef test_suite():\n return unittest.TestSuite((\n unittest.makeSuite(TestTraverser),\n ))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Zope3/branches/alienoid-adapter_lookup_coptimizations/src/zwiki/tests/test_traverser.py","file_name":"test_traverser.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"130796501","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n *\n * Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved\n * @file dist_collective_new_group2.py\n * @author liujie44@baidu.com\n * @date 2021-11-09 11:00\n * @brief\n *\n **************************************************************************/\n\"\"\"\nimport sys\nimport numpy as np\nimport paddle\nfrom paddle.distributed import init_parallel_env, ReduceOp\n\n\npaddle.distributed.init_parallel_env()\nd1 = np.array([1, 2, 3])\nd2 = np.array([2, 3, 4])\ntensor1 = paddle.to_tensor(d1)\ntensor2 = paddle.to_tensor(d2)\ngp = paddle.distributed.new_group([0, 1])\nprint(\"test_new_group ...ok\")\ntmp = np.array([0, 0, 0])\nresult = paddle.to_tensor(tmp)\n\n\ndef test_new_group_scatter():\n \"\"\"test_new_group_scatter\"\"\"\n paddle.distributed.scatter(result, [tensor2, tensor1], src=0, group=gp, sync_op=True)\n if gp.rank == 0:\n assert np.array_equal(result, tensor2)\n elif gp.rank == 1:\n assert np.array_equal(result, tensor1)\n print(\"test_new_group_scatter... ok\")\n\n\ndef test_new_group_reduce_sum():\n \"\"\"test_new_group_reduce_sum\"\"\"\n paddle.distributed.reduce(result, dst=0, group=gp, sync_op=True)\n if gp.rank == 0:\n assert np.array_equal(result.numpy(), [6, 10, 14])\n elif gp.rank == 1:\n assert np.array_equal(result.numpy(), [3, 5, 7])\n print(\"test_new_group_reduce... ok\")\n\n\ndef test_new_group_all_reduce_sum():\n \"\"\"test_new_group_all_reduce_sum\"\"\"\n paddle.distributed.all_reduce(result, sync_op=True)\n assert np.array_equal(result.numpy(), [3, 5, 7])\n print(\"test_new_group_all_reduce api ok\")\n\n\ndef test_new_group_all_gather():\n \"\"\"test_new_group_all_gather\"\"\"\n result = []\n # paddle.distributed.all_gather(\n # result, [self.tensor1, self.tensor1], group=gp, sync_op=True)\n paddle.distributed.all_gather(result, tensor1, group=gp, sync_op=True)\n assert np.array_equal(result[0], tensor1)\n assert np.array_equal(result[1], tensor1)\n print(\"test_new_group_all_gather... ok\")\n\n\ndef test_new_group_broadcast():\n \"\"\"test_new_group_broadcast\"\"\"\n tmp = np.array([0, 0, 0])\n result = paddle.to_tensor(tmp)\n paddle.distributed.broadcast(result, src=1, group=gp, sync_op=True)\n if gp.rank == 0:\n assert np.array_equal(result.numpy(), [0, 0, 0])\n print(\"test_new_group_broadcast_rank0... ok\")\n elif gp.rank == 1:\n assert np.array_equal(result.numpy(), [0, 0, 0])\n print(\"test_new_group_broadcast_rank1... ok\")\n\n\ndef test_new_group_barrier():\n \"\"\"test_new_group_barrier\"\"\"\n paddle.distributed.barrier(group=gp)\n assert 1 == 1\n print(\"test_new_group_barrier... ok\")\n\n\ndef test_new_group_wait():\n \"\"\"test_new_group_broadcast\"\"\"\n paddle.distributed.wait(result, gp, use_calc_stream=True)\n assert 1 == 1\n print(\"test_new_group_wait... ok\")\n\n\ndef test_get_group():\n \"\"\"test_get_group\"\"\"\n gid = paddle.distributed.new_group([4, 6])\n paddle.distributed.get_group(gid.id)\n print(\"test_get_group... ok\")\n\n\nif __name__ == \"__main__\":\n # test_new_group_scatter()\n # test_new_group_all_reduce_sum()\n # test_new_group_all_gather()\n # test_new_group_reduce_sum()\n test_new_group_broadcast()\n test_new_group_barrier()\n test_new_group_wait()\n test_get_group()\n","sub_path":"distributed/CE_API/case/dist_collective_new_group2.py","file_name":"dist_collective_new_group2.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"307061052","text":"import numpy as np\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom ensemble import *\nfrom sklearn import tree\nfrom PIL import Image\nfrom feature import *\nimport os\n\noriginal_data_directory = \"./datasets/original/\"\n\nface = 'face'\nnonface = 'nonface'\n\ndef extract(type):\n original_data_path = original_data_directory + type\n count = 0\n for filename in os.listdir(original_data_path):\n #images are converted into a size of 24 * 24 grayscale\n img = Image.open(original_data_path + \"/\" + filename).convert('L')\n img = img.resize((24, 24))\n img = np.array(img)\n\n #extract NPD features\n features = []\n feature = NPDFeature(img).extract()\n features.append(feature)\n\n count = count + 1\n print(count)\n np.save(\"./datasets/\"+type+\"_features.npy\", features)\n\nif __name__ == \"__main__\":\n # extract feature\n extract(face)\n extract(nonface)\n\n face_features = np.load(\"./datasets/face_features.npy\")\n nonface_features = np.load(\"./datasets/nonface_features.npy\")\n\n num_face_sample, num_face_feature = face_features.shape\n num_nonface_sample, num_nonface_feature = nonface_features.shape\n\n positive_label = [np.ones(1) for i in range(num_face_sample)]\n negative_label = [-np.ones(1) for i in range(num_nonface_sample)]\n\n positive_samples = np.concatenate((face_features, positive_label), axis=1)\n negative_samples = np.concatenate((nonface_features, negative_label), axis=1)\n\n training_size = 800\n rate = 0.5\n data = np.concatenate((positive_samples[:int(training_size*rate), :],\n negative_samples[:int(training_size*(1-rate)), :]),\n axis=0)\n X = data[:training_size, :num_face_feature]\n y = data[:training_size, -1]\n X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.33, random_state=42)\n y_train = y_train.reshape((len(y_train), 1))\n y_validation = y_validation.reshape((len(y_validation), 1))\n print(X_train.shape, y_train.shape, X_validation.shape, y_validation.shape)\n\n weak_classifier = tree.DecisionTreeClassifier(criterion='entropy', max_depth=4)\n classifier = AdaBoostClassifier(weak_classifier, 2)\n classifier.fit(X_train, y_train)\n\n Y_pred = classifier.predict(X_validation)\n target_names = ['face', 'non face']\n report = classification_report(y_validation, Y_pred, target_names=target_names, digits=4)\n with open(\"./report.txt\", 'w') as f:\n f.write(report)\n print(report)\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"587839799","text":"from flask import jsonify, request\nfrom lib import Action\n\n\nclass ActionRoutes:\n def __init__(self):\n pass\n\n @staticmethod\n def register_routes(app):\n @app.route(\"/things//actions\", methods=[\"GET\"])\n def get_thing_actions(thing_id):\n page = request.args.get(\"page\", default=0, type=int)\n size = request.args.get(\"size\", default=20, type=int)\n return jsonify(Action.get_action_requests(thing_id, page, size))\n\n @app.route(\"/things//actions\", methods=[\"POST\"])\n def enqueue_action_request(thing_id):\n request_json = request.get_json(force=True, silent=True)\n\n if not request_json:\n return jsonify({})\n\n try:\n action = next(iter(request_json))\n except:\n return jsonify({})\n\n action_json = request_json[action]\n\n if not isinstance(action_json, dict):\n return jsonify({})\n\n if not \"input\" in action_json:\n return jsonify({})\n\n return jsonify(Action.enqueue_action_request(thing_id, action, action_json[\"input\"]))\n\n @app.route(\"/things//actions//\", methods=[\"GET\"])\n def get_action_request_status(thing_id, action, request_id):\n return jsonify(Action.get_action_request_status(thing_id, action, request_id))\n","sub_path":"routes/action_routes.py","file_name":"action_routes.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"326179240","text":"class Solution:\n \n def isMatch(self, s: str, p: str) -> bool:\n length_s = len(s)\n length_p = len(p)\n df = [[None for j in range(length_p+1)] for i in range(length_s+1)]\n \n df[0][0] =True\n for i in range(1, length_s + 1):\n df[i][0] = False\n for j in range(1, length_p + 1):\n if j % 2 == 0 and p[j-1] == '*':\n df[0][j] = df[0][j-2]\n else:\n df[0][j] = False\n for i in range(1, length_s + 1):\n for j in range(1, length_p + 1):\n if s[i-1] == p[j-1] or p[j-1] == '.': \n df[i][j] = df[i-1][j-1]\n elif p[j-1] == '*':\n if p[j-2] == s[i-1] or p[j-2] == '.':\n df[i][j] = df[i-1][j] or df[i][j-2] or df[i][j-1]\n else:\n df[i][j] = df[i][j-2] #or df[i-1][j] \n else:\n df[i][j] = False\n return df\n\n\n\nif __name__ == '__main__':\n \n #s = 'aa'\n #p = 'a*'\n #s = 'mississippi'\n #p = 'mis*is*p*.'\n s = 'ab'\n p = '.*'\n solution = Solution()\n \n def output(df, s):\n for i in range(len(s)+1):\n print(s[:i])\n print(df[i])\n #print(\"final output:\")\n #print(solution[len(s)][-1])\n\n print('=====')\n\n s1 = 'aa'\n p1 = 'a*'\n df = solution.isMatch(s1,p1)\n output(df,s1)\n\n print('=====')\n\n s2 = 'mississippi'\n p2 = 'mis*is*p*.'\n df = solution.isMatch(s2,p2)\n output(df,s2)\n\n print('=====')\n\n s3 = 'aa'\n p3 = 'a'\n df = solution.isMatch(s3,p3)\n output(df,s3)\n\n print('=====')\n\n s4 = 'aab'\n p4 = 'c*a*b'\n df = solution.isMatch(s4,p4)\n output(df,s4)\n\n s5 = 'aaa'\n p5 = 'ab*a*c*a'\n df = solution.isMatch(s5,p5)\n output(df,s5)\n\n \n \n \n","sub_path":"regex_matching-DP.py","file_name":"regex_matching-DP.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"574198211","text":"# -*- encoding: utf-8 -*-\n#! /usr/bin/env python\n'''\n文件说明: Web版的极简交互式笔记程序\n作者信息: penguinjing\n版本自述: 0.1.0\n程序参考: None\n'''\n# 全局引用\nfrom bottle import request, route, run, template\nfrom time import localtime, strftime\n\n# 全局变量\n# PATH = \"/path/2/work dir\"\n# 函式撰写区\ndef read_diary():\n f = open('diary log.txt','r')\n return f.read()\n\ndef write_diary(newdiary):\n f = open('diary log.txt','a+')\n edit_time = strftime(\"%Y %b %d %H:%M:%S\", localtime())\n f.write('%s %s\\n' % (edit_time, newdiary))\n f.close()\n\n@route('/')\ndef start():\n log = read_diary()\n return template(\"diaryweb\", diarylog=log)\n\n@route('/', method='POST')\ndef input_new():\n newdiary = request.forms.get('newdiary')\n write_diary(newdiary)\n log = read_diary()\n return template(\"diaryweb\", diarylog=log)\n\n\nif __name__ == '__main__':\n run(host='localhost', port=8255, debug=True, reloader=True)","sub_path":"_src/om2py4w/4wex0/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"151733867","text":"import json\nimport requests\nimport urllib.parse as up\nimport datetime\n\nimport fromis9\n\nurl = \"https://www.googleapis.com/youtube/v3/search?\"\n\nvideoId = input(\"paste video id : \")\nm_res = 3\n\nparms = {\n 'part':'snippet',\n 'maxResults':m_res,\n 'q':videoId,\n 'key':'API key'\n }\n\nurl = url+up.urlencode(parms)\n\nreq = requests.get(url)\nres = req.text\n\ndatas = json.loads(res)\n\n\nprint('\\n프로미스나인(fromis_9) 의 Youtube 게시글을 검색합니다.')\nprint(datetime.datetime.now())\n\n\nprint('===========================================\\n')\n\nfor data in datas['items']:\n v_title = data['snippet']['title'] #i번째 글의 영상 제목\n v_desc = data['snippet']['description'] #i번째 글의 영상 설명\n\n print(f'제목 : {v_title}')\n print(f'설명 : {v_desc}')\n print(f'주소 : https://www.youtube.com/watch?v={videoId}')\n print('\\n===========================================\\n\\n')\n\n\n","sub_path":"auto-post/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"330457445","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport os\nimport time\nimport platform\nimport time\nimport sys\n\n\noutfile = input(\"Please enter an account name: \")\nhome = os.path.expanduser('~')\noutdir = home + '/commodity_game'\noutpath = outdir + '/' + outfile+'.txt'\nfile_test = os.path.exists(outpath)\nif file_test == True:\n print(\"This accounts exists\")\n f = open(outpath,'r')\n line = f.readline()\n contract = line.split(';')[0]\n balance = line.split(';')[5]\n number = line.split(';')[6]\n action = line.split(';')[7]\n f.close()\nelif file_test == False:\n print(\"Account does not exist please create an account\")\n print(\"Please fill out the below prompts\")\n action = input(\"Do you wish to buy or sell? (buy/sell): \")\n contract = input(\"Please enter a Corn or Soy contract: \")\n if contract[0] == 'C':\n com = 'corn'\n cost = 1500\n print(\"The margin for corn is $1500\")\n elif contract[0] == 'S':\n com = 'soy'\n cost = 3000\n print(\"The margin for soy is $3000\")\n number = input(\"Please enter the number of contracts {0} you wish to purchase: \".format(com))\n balance = int(number)*int(cost)\n print(\"The cost of this transaction is ${0}\".format(balance))\n\ndef price_datetime_func(contract):\n #yahoo url to find the commodity price\n url = \"http://finance.yahoo.com/q?s={0}.CBT\".format(contract)\n\n #downloads the html page to memory\n htmlcontent = urllib.request.urlopen(url).read()\n\n #converts to beautiful soup content\n soup = BeautifulSoup(htmlcontent)\n\n #parses out the line with price\n soup_price = soup.find_all(id='yfs_l10_{0}.cbt'.format(contract.lower()))\n\n #parses out the price from line\n price = str(soup_price[0]).split('<')[1].split('>')[-1]\n\n #parses out the line with the time\n soup_time = soup.find_all(id='yfs_market_time')\n\n #parses out the time from the line\n tmp_time = str(soup_time).split('>')[1].split('-')[0].split(',')[1:4]\n\n #pulls out all the time variables\n mon = tmp_time[0].strip().split()[0]\n day = tmp_time[0].strip().split()[1]\n year = tmp_time[1].strip()\n hour = tmp_time[2].strip().split(':')[0]\n minute = tmp_time[2].strip().split(':')[1][0:2]\n per = tmp_time[2].strip().split(':')[1][2:4]\n #turns hour into 24 hour notation\n if per == 'PM' and hour != 12:\n hour = (str(int(hour) + 12))\n elif per == 'AM' and hour == 12:\n hour = (str(int(hour) - 12))\n #converts to python time format\n datetime = time.strptime(\"{0} {1} {2} {3} {4}\".format(day, mon, year, hour, minute), \"%d %b %Y %H %M\")\n\n #pulls out time into different variables\n d_mon = str(datetime.tm_mon).zfill(2)\n d_day = str(datetime.tm_mday).zfill(2)\n d_year = datetime.tm_year\n d_hour = str(datetime.tm_hour).zfill(2)\n d_min = str(datetime.tm_min).zfill(2)\n\n #creates the time output\n out_time = \"{0}-{1}-{2} {3}:{4}\".format(d_year, d_mon, d_day, d_hour, d_min)\n\n #prints price and time\n return price,out_time\n\ndef create_price(contract,price,out_time,number):\n comm = contract[0]\n margin_dict = {'S' : 3000, 'C' : 15000}\n #figure out margin requirment for contracts\n if comm == 'S':\n margin = margin_dict[comm] * number\n elif comm == 'C':\n margin = margin_dict[comm] * number\n\n #finds the total contract price\n contract_price = (float(price)/100) * 5000\n total_contract_price = float(contract_price * float(number))\n\n return contract_price, total_contract_price\n\ndef file_write_func(outfile,contract,per_price,contract_price,total_contract_price,datetime,balance,number,action):\n print(\"Creating \" + outfile)\n outloc = open(outfile,'w')\n if action == 'buy':\n print(\"You are buying {0} contracts of {1} at the per bushel price of ${2}, per contract price of ${3}, and total price of ${4}. Your account balance is ${5}\".format(number,contract, round(per_price,2), round(contract_price,2), round(total_contract_price,2), round(balance,2)))\n elif action == 'sell':\n print(\"You are selling {0} contracts of {1} at the per bushel price of ${2}, per contract price of ${3}, and total price of ${4}. Your account balance is ${5}\".format(number,contract,per_price, contract_price,total_contract_price, balance))\n\n outloc.write(\"{0};{1};{2};{3};{4};{5};{6};{7}\\n\".format(contract,per_price,contract_price,total_contract_price,datetime,balance,number,action))\n\ndef update_price_func(contract,per_price,contract_price,out_time,number,outpath,act):\n f = open(outpath,'r')\n line = f.readline()\n f.close()\n f = open(outpath,'r')\n old_per_price = line.split(';')[1]\n old_contract_price = line.split(';')[2]\n old_total_contract_price = line.split(';')[3]\n old_datetime = line.split(';')[4]\n old_balance = line.split(';')[5]\n comm = contract[0]\n margin_dict = {'S' : 3000, 'C' : 15000}\n #figure out margin requirment for contracts\n if comm == 'S':\n margin = margin_dict[comm] * number\n elif comm == 'C':\n margin = margin_dict[comm] * number\n contract_price = (float(per_price)/100) * 5000\n total_contract_price = contract_price * float(number)\n print(\"{0} buy\".format(act))\n if act.strip() == 'buy':\n #buy\n per_price_diff = float(per_price )- float(old_per_price)\n contract_price_diff = float(contract_price) - float(old_contract_price)\n total_contract_price_diff = float(total_contract_price) - float(old_total_contract_price)\n elif act.strip() == 'sell':\n #sell\n per_price_diff = float(old_per_price) - float(per_price )\n contract_price_diff = float(old_contract_price) - float(contract_price)\n total_contract_price_diff = float(old_total_contract_price) - float(total_contract_price)\n balance = float(old_balance) + float(total_contract_price_diff)\n print(\"entry: datetime: {0}, price: {1}, contract price: {2}, total price of contracts: {3} starting balance {4}\".format(old_datetime, round(float(old_per_price),2), round(float(old_contract_price),2), round(float(old_total_contract_price),2), round(float(old_balance),2)))\n print(\"current: datetime: {0}, price: {1}, contract price: {2}, total price of contracts: {3}, current balance: {4}\".format(datetime,per_price, round(float(contract_price),2), round(float(total_contract_price),2), round(float(balance),2)))\n return balance\n\ndef update_file_func(outfile,contract,per_price,contract_price,total_contract_price,datetime,balance,\nnumber,action):\n print(\"Updating\" + outfile)\n outloc = open(outfile, 'a')\n outloc.write(\"{0};{1};{2};{3};{4};{5};{6};{7}\\n\".format(contract,per_price,contract_price,total_contract_price,datetime,balance,number,action))\n outloc.close()\n\ndef liquidate_func():\n #Liguudates and closes your postion or reviews the contract histroy\n liq = input(\"Do you wish to liqudate your position? (yes/no): \")\n f = open(outpath,'r')\n hist_list = []\n for i in f.readlines():\n if len(i) > 1:\n hist_list.append(i)\n f.close()\n f_line = hist_list[0]\n f_contract,f_per_price,f_contract_price,f_total_price,f_datetime,f_balance,f_number = f_line.split(';')[0],f_line.split(';')[1],f_line.split(';')[2],f_line.split(';')[3],f_line.split(';')[4],f_line.split(';')[5],f_line.split(';')[6]\n l_line = hist_list[-1]\n l_contract,l_per_price,l_contract_price,l_total_price,l_datetime,l_balance,l_number = l_line.split(';')[0],l_line.split(';')[1],l_line.split(';')[2],l_line.split(';')[3],l_line.split(';')[4],l_line.split(';')[5],l_line.split(';')[6]\n act = f_line.split(';')[7].strip()\n if liq == 'yes':\n if act == 'buy'.strip():\n profit = l_balance - f_balance\n print(\"You bought {0} {1} contracts at {2}\".format(f_number,f_contract,f_price_per))\n print(\"You are liquidating you position by selling {0} {1} contracts at {2}\".format(l_number,l_contract,l_price_per))\n if profit > 0:\n print(\"Congrations you made ${0}\".format(profit))\n elif profit < 0:\n print(\"You lost ${0}\".format(profit))\n elif act == 'sell'.strip():\n profit = f_balance - l_balance\n print(\"You sold {0} {1} contracts at {2}\".format(f_number,f_contract,f_price_per))\n print(\"You are liquidating you position by buying {0} {1} contracts at {2}\".format(l_number,l_contract,l_price_per))\n if profit > 0:\n print(\"Congrations you made ${0}\".format(profit))\n elif profit < 0:\n print(\"You lost ${0}\".format(profit))\n f = open(outpath,'a')\n f.write(\"CLOSED balance: {0}\".format(profit))\n f.close()\n elif liq == 'no':\n print(\"not liquidating\")\n for i in hist_list:\n h_con = i.split(';')[0]\n h_date = i.split(';')[4]\n h_price = i.split(';')[1]\n h_total = i.split(';')[3]\n h_balance = float(i.split(';')[5])\n if act == 'buy'.strip():\n h_profit = float(h_total) - float(f_total_price)\n elif act == 'sell'.strip():\n h_profit = float(f_total_price) - float(h_total)\n print(h_date,h_con,h_price,round(h_balance,2), \"Profit:\", round(h_profit,2))\n\nper_price, datetime = price_datetime_func(contract)\n\nprice_per_contract, total_price = create_price(contract, per_price,datetime,number)\n\ndir_test = os.path.isdir(outdir)\nif dir_test == False:\n os.makedirs(outdir)\n\nfile_test = os.path.exists(outpath)\nif file_test == False:\n file_write_func(outpath,contract,per_price,price_per_contract,total_price,datetime,balance,number,action)\nelif file_test == True:\n balance = update_price_func(contract,per_price,price_per_contract,datetime,number,outpath,action)\n update_file_func(outpath, contract, per_price, price_per_contract, total_price, datetime, balance, number, action)\n liquidate_func()\n","sub_path":"commodity_price_game.py","file_name":"commodity_price_game.py","file_ext":"py","file_size_in_byte":9929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246076417","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\nimport jsonfield.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=24)),\n ],\n ),\n migrations.CreateModel(\n name='Tile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=24)),\n ('content_template', models.CharField(max_length=60)),\n ('category', models.ForeignKey(on_delete=django.db.models.deletion.SET_DEFAULT, default=1, to='bigData.Category')),\n ],\n ),\n migrations.CreateModel(\n name='Tile_Data',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(default=b'Tile', max_length=24)),\n ('time', models.DateTimeField(verbose_name=b'refresh time')),\n ('data', jsonfield.fields.JSONField(default={})),\n ('url', models.CharField(default=b'gov.data.au', max_length=260)),\n ],\n ),\n migrations.CreateModel(\n name='Tile_tileData',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('dataId', models.ForeignKey(to='bigData.Tile_Data')),\n ('tileId', models.ForeignKey(to='bigData.Tile')),\n ],\n ),\n ]\n","sub_path":"bigData/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"29216543","text":"# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport logging\nimport random\nfrom argparse import ArgumentParser\nfrom os.path import join\n\nimport torch\nfrom torch.backends import cudnn\nfrom torch.nn import BCELoss\nfrom torch.nn import MSELoss\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom srgan_pytorch.dataset import BaseDataset\nfrom srgan_pytorch.loss import ContentLoss\nfrom srgan_pytorch.model import discriminator\nfrom srgan_pytorch.model import generator\nfrom srgan_pytorch.utils import create_folder\nfrom test import iqa\nfrom test import sr\n\n# It is a convenient method for simple scripts to configure the log package at one time.\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(format=\"[ %(levelname)s ] %(message)s\", level=logging.INFO)\n\nparser = ArgumentParser()\nparser.add_argument(\"--dataroot\", default=\"data/DIV2K/train\",\n help=\"Path to dataset.\")\nparser.add_argument(\"--p-epochs\", default=512, type=int,\n help=\"Number of total p-oral epochs to run. (Default: 512)\")\nparser.add_argument(\"--g-epochs\", default=128, type=int,\n help=\"Number of total g-oral epochs to run. (Default: 128)\")\nparser.add_argument(\"--batch-size\", default=16, type=int,\n help=\"The batch size of the dataset. (Default: 16)\")\nparser.add_argument(\"--p-lr\", default=0.0001, type=float,\n help=\"Learning rate for psnr-oral. (Default: 0.0001)\")\nparser.add_argument(\"--g-lr\", default=0.0001, type=float,\n help=\"Learning rate for gan-oral. (Default: 0.0001)\")\nparser.add_argument(\"--image-size\", default=96, type=int,\n help=\"Image size of high resolution image. (Default: 96)\")\nparser.add_argument(\"--scale\", default=4, type=int, choices=[4],\n help=\"Low to high resolution scaling factor. \"\n \"Optional: [4]. (Default: 4)\")\nparser.add_argument(\"--netD\", default=\"\", type=str,\n help=\"Path to Discriminator checkpoint.\")\nparser.add_argument(\"--netG\", default=\"\", type=str,\n help=\"Path to Generator checkpoint.\")\nparser.add_argument(\"--seed\", default=None, type=int,\n help=\"Seed for initializing training.\")\nparser.add_argument(\"--pretrained\", dest=\"pretrained\", action=\"store_true\",\n help=\"Use pre-trained model.\")\nparser.add_argument(\"--cuda\", dest=\"cuda\", action=\"store_true\",\n help=\"Enables cuda.\")\nargs = parser.parse_args()\n\n# Random seed can ensure that the results of each training are inconsistent.\nif args.seed is None:\n args.seed = random.randint(1, 10000)\nlogger.info(f\"Random Seed: {args.seed}\")\nrandom.seed(args.seed)\ntorch.manual_seed(args.seed)\n\n# Because the resolution of each input image is fixed, setting it to `True`\n# will make CUDNN automatically find the optimal convolution method.\n# If the input image resolution is not fixed, it needs to be set to `False`.\ncudnn.benchmark = True\n\n# Set whether to use CUDA.\nif torch.cuda.is_available() and not args.cuda:\n logger.warning(\"You have a CUDA device, so you should probably \"\n \"run with --cuda\")\ndevice = torch.device(\"cuda:0\" if args.cuda else \"cpu\")\n\n# Load dataset.\ndataset = BaseDataset(dataroot=args.dataroot,\n image_size=args.image_size,\n scale=args.scale)\ndataloader = DataLoader(dataset=dataset,\n batch_size=args.batch_size,\n shuffle=True,\n pin_memory=True)\n\n# Load model.\nnetD = discriminator().to(device)\nnetG = generator(args.pretrained).to(device)\n\n# Optional: Resume training.\nstart_p_epoch = 0\nstart_g_epoch = 0\nif args.netD != \"\" and args.netG != \"\":\n netD.load_state_dict(torch.load(args.netD))\n start_g_epoch = \"\".join(list(filter(str.isdigit, args.netD)))\n logger.info(f\"You loaded {args.netD} for discriminator.\"\n f\"G-Oral resume epoch from {start_g_epoch}.\")\nif args.netG != \"\" and args.netD == \"\":\n netG.load_state_dict(torch.load(args.netG))\n start_p_epoch = \"\".join(list(filter(str.isdigit, args.netG)))\n logger.info(f\"You loaded {args.netG} for generator.\"\n f\"P-Oral resume epoch from {start_p_epoch}.\")\n\n# Define loss function.\npixel_criterion = MSELoss().to(device)\ncontent_criterion = ContentLoss().to(device)\nadv_criterion = BCELoss().to(device)\n\n# Define optimizer function.\np_optim = Adam(netG.parameters(), args.p_lr, (0.9, 0.999))\nd_optim = Adam(netD.parameters(), args.g_lr, (0.9, 0.999))\ng_optim = Adam(netG.parameters(), args.g_lr, (0.9, 0.999))\n\n# Define scheduler function.\nd_scheduler = StepLR(d_optim, args.g_epochs // 2, 0.1)\ng_scheduler = StepLR(g_optim, args.g_epochs // 2, 0.1)\n\n# Visualization. Use Tensorboard to record the Loss curve during training.\np_writer = SummaryWriter(\"samples/psnr_logs\")\ng_writer = SummaryWriter(\"samples/gan_logs\")\n\n\ndef main():\n # Use PSNR value as the image evaluation index in the process of training PSNR.\n # Use SSIM value as the image evaluation index in the process of training GAN.\n # If an Epoch is higher than the current index, save the model weight under\n # the current Epoch as `XXX-best.pth` and save it to the `weights` folder.\n best_psnr = 0.0\n best_ssim = 0.0\n\n # Train the PSNR stage of the generative model, and save the model weight\n # after reaching a certain index.\n for epoch in range(int(start_p_epoch), args.p_epochs):\n # Training.\n train_psnr(epoch)\n # Test.\n sr(netG, join(\"assets\", \"lr.png\"), join(\"assets\", \"sr.png\"))\n psnr, ssim = iqa(join(\"assets\", \"sr.png\"), join(\"assets\", \"hr.png\"))\n logger.info(f\"P-Oral epoch {epoch} PSNR: {psnr:.2f}dB SSIM: {ssim:.4f}.\")\n # Write result to TensorBoard.\n p_writer.add_scalar(\"P_Test/PSNR\", psnr, epoch)\n p_writer.add_scalar(\"P_Test/SSIM\", ssim, epoch)\n\n # Check whether the PSNR value of the current epoch is the highest value\n # ever in the training PSNR phase.\n is_best = psnr > best_psnr\n best_psnr = max(psnr, best_psnr)\n # Save the model once after each epoch. If the current PSNR value is the\n # highest, save another model ending with `best`.\n torch.save(netG.state_dict(), join(\"weights\", f\"P_epoch{epoch}.pth\"))\n if is_best:\n torch.save(netG.state_dict(), join(\"weights\", \"P-best.pth\"))\n\n # Save the model weights of the last iteration of the PSNR stage.\n torch.save(netG.state_dict(), join(\"weights\", \"P-last.pth\"))\n \n # Load the model weights with the best results from the previous training.\n netG.load_state_dict(torch.load(join(\"weights\", \"P-best.pth\")))\n\n # Train the generative model in the GAN stage and save the model weight after\n # reaching a certain index.\n for epoch in range(int(start_g_epoch), args.g_epochs):\n # Training.\n train_gan(epoch)\n # Test.\n sr(netG, join(\"assets\", \"lr.png\"), join(\"assets\", \"sr.png\"))\n psnr, ssim = iqa(join(\"assets\", \"sr.png\"), join(\"assets\", \"hr.png\"))\n logger.info(f\"G-Oral epoch {epoch} PSNR: {psnr:.2f}dB SSIM: {ssim:.4f}.\")\n # Write result to TensorBoard.\n p_writer.add_scalar(\"G_Test/PSNR\", psnr, epoch)\n p_writer.add_scalar(\"G_Test/SSIM\", ssim, epoch)\n\n # Check whether the PSNR value of the current epoch is the highest value\n # in the history of the training GAN stage.\n is_best = ssim > best_ssim\n best_ssim = max(ssim, best_ssim)\n # Save the model once after each epoch, if the current PSNR value is the\n # highest, save another model ending with `best`.\n torch.save(netD.state_dict(), join(\"weights\", f\"D_epoch{epoch}.pth\"))\n torch.save(netG.state_dict(), join(\"weights\", f\"G_epoch{epoch}.pth\"))\n if is_best:\n torch.save(netD.state_dict(), join(\"weights\", \"D-best.pth\"))\n torch.save(netG.state_dict(), join(\"weights\", \"G-best.pth\"))\n\n # Call the scheduler function to adjust the learning rate of the\n # generator model and the discrimination model.\n d_scheduler.step()\n g_scheduler.step()\n\n # Save the model weights of the last iteration of the GAN stage.\n torch.save(netG.state_dict(), join(\"weights\", \"G-last.pth\"))\n\n\ndef train_psnr(epoch):\n num_batches = len(dataloader)\n for index, data in enumerate(dataloader, 1):\n # 将数据拷贝至指定设备当中.\n inputs, target = data[0].to(device), data[1].to(device)\n\n ##############################################\n # (0) Update G network: min MSE(output, target)\n ##############################################\n netG.zero_grad()\n output = netG(inputs)\n loss = pixel_criterion(output, target)\n loss.backward()\n p_optim.step()\n\n logger.info(f\"Epoch[{epoch}/{args.p_epochs}]\"\n f\"({index}/{num_batches}) P Loss: {loss.item():.4f}.\")\n\n # Write the loss value during PSNR training into Tensorboard.\n batches = index + epoch * num_batches + 1\n p_writer.add_scalar(\"Train/P_Loss\", loss.item(), batches)\n\n\ndef train_gan(epoch):\n num_batches = len(dataloader)\n for index, data in enumerate(dataloader, 1):\n # Copy the data to the designated device.\n inputs, target = data[0].to(device), data[1].to(device)\n batch_size = inputs.size(0)\n\n # Set the real sample label to 1, and the false sample label to 0.\n real_label = torch.full((batch_size, 1), 1, dtype=inputs.dtype).to(\n device)\n fake_label = torch.full((batch_size, 1), 0, dtype=inputs.dtype).to(\n device)\n\n ##############################################\n # (1) Update D network: E(real)[log(D(real))] + E(fake)[log(1 - D(G(fake))]\n ##############################################\n netD.zero_grad()\n fake = netG(inputs)\n d_loss_real = adv_criterion(netD(target), real_label)\n d_loss_fake = adv_criterion(netD(fake.detach()), fake_label)\n d_loss = d_loss_real + d_loss_fake\n d_loss.backward()\n d_optim.step()\n\n ##############################################\n # (2) Update G network: E(fake)[log(1 - D(G(fake))]\n ##############################################\n netG.zero_grad()\n fake = netG(inputs)\n pixel_loss = 1e+1 * pixel_criterion(fake, target.detach())\n content_loss = 2e-6 * content_criterion(fake, target.detach())\n adv_loss = 1e-3 * adv_criterion(netD(fake), real_label)\n g_loss = pixel_loss + content_loss + adv_loss\n g_loss.backward()\n g_optim.step()\n\n logger.info(f\"Epoch[{epoch}/{args.g_epochs}]\"\n f\"({index}/{num_batches}) \"\n f\"D Loss: {d_loss.item():.4f} \"\n f\"G Loss: {g_loss.item():.4f}.\")\n\n # Write the loss value during GAN training into Tensorboard.\n batches = index + epoch * num_batches + 1\n g_writer.add_scalar(\"Train/D_Loss\", d_loss.item(), batches)\n g_writer.add_scalar(\"Train/G_Loss\", g_loss.item(), batches)\n\n\nif __name__ == \"__main__\":\n create_folder(\"weights\")\n create_folder(\"samples\")\n\n logger.info(\"TrainEngine:\")\n logger.info(\"\\tAPI version .......... 0.4.0\")\n logger.info(\"\\tBuild ................ 2021.07.09\")\n\n main()\n\n logger.info(\"All training has been completed successfully.\\n\")\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"630247056","text":"__author__ = 'markdaniel'\nclass Solution(object):\n def get_as_set(self, nums, result_set):\n assert type(nums) is list\n assert type(result_set) is set\n # Rather than re-adding dups, return immediately\n if tuple(nums) in result_set:\n return\n tup = tuple(nums)\n result_set.add(tup)\n # Recursively check each for subsets by splitting up list\n for numIndex in range(len(nums)):\n newArr = nums[0:numIndex] + nums[numIndex + 1:]\n self.get_as_set(newArr, result_set)\n\n def subsets(self, nums):\n res = []\n result_set = set()\n self.get_as_set(nums, result_set)\n for tup in result_set:\n as_list = list(tup)\n as_list.sort()\n res.append(as_list)\n\n res.sort()\n return res\n\nif __name__ == '__main__':\n nums = [1,2,3,4,5,6,7,8,10,0]\n res = Solution().subsets(nums)\n assert hasattr(res, \"__iter__\")\n for r in res:\n print(r)","sub_path":"Subsets.py","file_name":"Subsets.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"331791943","text":"import abc\nimport warnings\nimport numpy as np\nimport random\nimport tensorflow as tf\nfrom keras.models import load_model\n\nfrom .BaseModel import BaseModel\n\n\nclass BaseKerasModel(BaseModel):\n \"\"\"\n Dockex ```BaseModel``` base class for [Keras](https://github.com/keras-team/keras)\n models.\n\n Subclasses must provide an ```instantiate_model``` method that sets a\n ```self.model``` with a Keras model.\n\n ```BaseKerasModel``` adds tensorflow 2.0 and keras as requirements.\n \"\"\"\n\n def __init__(self, input_args):\n super().__init__(input_args)\n\n self.batch_size = self.params[\"batch_size\"]\n self.epochs = self.params[\"epochs\"]\n\n self.callbacks = []\n\n def set_random_seeds(self):\n if self.random_seed is not None:\n print(\"Setting random seeds\")\n np.random.seed(self.random_seed)\n random.seed(self.random_seed)\n tf.random.set_seed(self.random_seed)\n\n @abc.abstractmethod\n def instantiate_model(self):\n pass\n\n def fit(self):\n self.instantiate_model()\n\n if self.X_valid is not None and self.y_valid is not None:\n validation_data = (self.X_valid, self.y_valid)\n else:\n validation_data = None\n\n print(\"Fitting model\")\n self.model.fit(\n self.X_train,\n self.y_train,\n batch_size=self.batch_size,\n epochs=self.epochs,\n validation_data=validation_data,\n callbacks=self.callbacks,\n verbose=2,\n )\n\n def load(self):\n if self.input_pathnames[\"model_keras\"] is not None:\n print(\"Loading model\")\n self.model = load_model(self.input_pathnames[\"model_keras\"])\n\n else:\n raise ValueError(\n 'Input pathname \"model_keras\" must point to a saved model.'\n )\n\n def save(self):\n if self.method == \"predict\":\n warnings.warn(\n \"User requested save model when model was already loaded from file. Skipping model save.\"\n )\n else:\n print(\"Saving model\")\n self.model.save(self.output_pathnames[\"model_keras\"])\n","sub_path":"BaseKerasModel.py","file_name":"BaseKerasModel.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"241332935","text":"# Your last recorded submission was :(Working)\nn=int(input());\na=[list(map(int,input().split())) for i in range(n)];\nfor i in range(n):\n for j in range(n):\n end = '' if j == n-1 else ' ';\n if(i>=j): #the assignment was to print the lower triangular matrix .'. i>=j\n print(a[i][j], end=end);\n else:\n print(0, end=end);\n end = '' if i == n-1 else '\\n';\n print('',end=end); \n\n\n\n# Sample solutions (Provided by instructor)\na = int(input())\n\n\nm = []\nfor i in range(1,a+1): \n l = list(map(int, input ().split ()))\n m.append(l)\n\nfor i in range(a):\n for j in range(a):\n if(i>=j):\n if(j==a-1):\n print(m[i][j], end=\"\")\n else:\n print(m[i][j], end=\" \")\n else:\n if(j==a-1):\n print(0, end=\"\")\n else:\n print(0, end=\" \")\n if(i!=a-1):\n print()\n","sub_path":"NPTEL/The Joy of Using Python/Assignment/Week 7/1/Week 7-1.py","file_name":"Week 7-1.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"131664987","text":"#!/usr/bin/env python\n# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"A simple host test module.\n\nThis module runs on the host machine and is responsible for creating 2\nclient machines, waiting for them, and running RPC calls on them.\n\"\"\"\n\n# Map the legion directory so we can import the host controller.\nimport sys\nsys.path.append('../../')\n\nimport logging\nimport time\n\nimport host_controller\n\n\nclass ExampleController(host_controller.HostController):\n \"\"\"A simple example controller for a test.\"\"\"\n\n def __init__(self):\n super(ExampleController, self).__init__()\n self.client1 = None\n self.client2 = None\n\n def CreateClient(self):\n \"\"\"Create a client object and set the proper values.\"\"\"\n client = self.NewClient(\n isolate_file='client_test.isolate',\n config_vars={'multi_machine': '1'},\n dimensions={'os': 'Linux', 'pool': 'legion'}, priority=200,\n idle_timeout_secs=90, connection_timeout_secs=90,\n verbosity=logging.INFO)\n client.Create()\n return client\n\n def SetUp(self):\n \"\"\"Create the client machines and wait until they connect.\n\n In this call the actual creation of the client machines is done in parallel\n by the system. The WaitForConnect calls are performed in series but will\n return as soon as the clients connect.\n \"\"\"\n self.client1 = self.CreateClient()\n self.client2 = self.CreateClient()\n self.client1.WaitForConnection()\n self.client2.WaitForConnection()\n\n def Task(self):\n \"\"\"Main method to run the task code.\"\"\"\n self.CallEcho(self.client1)\n self.CallEcho(self.client2)\n self.CallClientTest(self.client1)\n self.CallClientTest(self.client2)\n\n def CallEcho(self, client):\n \"\"\"Call rpc.Echo on a client.\"\"\"\n logging.info('Calling Echo on %s', client.name)\n logging.info(self.client1.rpc.Echo(client.name))\n\n def CallClientTest(self, client):\n \"\"\"Call client_test.py name on a client.\"\"\"\n logging.info('Calling Subprocess to run \"./client_test.py %s\"', client.name)\n retcode, stdout, stderr = client.rpc.Subprocess(\n ['./client_test.py', client.name])\n logging.info('retcode: %s, stdout: %s, stderr: %s', retcode, stdout, stderr)\n\n\nif __name__ == '__main__':\n ExampleController().RunController()\n","sub_path":"testing/legion/examples/hello_world/host_test.py","file_name":"host_test.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"116813661","text":"from tqdm import tqdm\nfrom loguru import logger\nimport numpy as np\nimport pandas as pd\nimport data.data_utils as du\nimport misc.graph_utils as gu\nfrom functools import reduce\nimport misc.utils as utils\nfrom p_tqdm import p_map\nfrom functools import partial\nfrom pathos.multiprocessing import ProcessingPool as Pool\nimport multiprocessing\n\n\n\"\"\"\nThis method is used to build a feature matrix to be used as a part of GNN layer\n\"\"\"\ndef build_features(graph):\n \n if graph == None:\n graph = gu.read_graph()\n\n proteins = graph.nodes()\n\n # load pro embeddings\n logger.info(\"Loading pro embeddings\")\n pro_embeddings_df = pd.read_csv(\"data/output/pro_embeddings.csv\")\n\n # load go embeddings\n logger.info(\"Loading go embeddings\")\n go_embeddings_df = pd.read_csv(\"data/output/go_embeddings.csv\")\n\n # load sequence embeddings\n logger.info(\"Loading sequence embedddings\")\n seq_embeddings_df = pd.read_csv(\"data/output/seq_embeddings.csv\")\n\n # create empty array to hold the data\n pro_vector = pro_embeddings_df.shape[1] - 1\n go_vector = go_embeddings_df.shape[1] - 1\n seq_vector = seq_embeddings_df.shape[1] - 1\n num_rows = len(proteins)\n num_columns = pro_vector + go_vector + seq_vector\n features_arr = np.empty(shape=(num_rows, num_columns))\n\n for index, protein_name in tqdm(enumerate(proteins), total=len(proteins)):\n \n # pro \n pro_vector = get_pro_vector(protein_name,pro_embeddings_df)\n\n # seq\n seq_vector = get_seq_vector(protein_name, seq_embeddings_df)\n \n # go vector\n go_vector = build_go_vector(protein_name,go_embeddings_df)\n\n # feature vector\n feature_vector = np.concatenate([pro_vector, go_vector, seq_vector])\n\n # append to features array\n features_arr[index] = feature_vector\n \n features_pd = pd.DataFrame(data=features_arr)\n features_pd.insert(0,\"protein\",proteins)\n\n features_pd.to_csv(\"data/output/features.csv\",index=False)\n\n return features_pd\n\ndef build_features_only_go(graph=None,go_embeddings_df=None,save=True):\n \n if graph == None:\n graph = gu.read_graph()\n\n proteins = graph.nodes()\n\n # load go embeddings\n if go_embeddings_df is None:\n logger.info(\"Loading go embeddings\")\n go_embeddings_df = pd.read_csv(\"data/output/go_embeddings_copy.csv\")\n\n # create empty array to hold the data\n go_vector = go_embeddings_df.shape[1] - 1\n num_rows = len(proteins)\n num_columns = go_vector\n features_arr = np.empty(shape=(num_rows, num_columns))\n\n for index, protein_name in tqdm(enumerate(proteins), total=len(proteins)):\n \n # go vector\n go_vector = build_go_vector(protein_name,go_embeddings_df)\n\n # append to features array\n features_arr[index] = go_vector\n \n features_pd = pd.DataFrame(data=features_arr)\n features_pd.insert(0,\"protein\",proteins)\n \n if save == True:\n features_pd.to_csv(\"data/output/features_only_go.csv\",index=False)\n\n return features_pd\n\ndef build_features_only_go_term_type(term_type,\n graph=None,\n go_embeddings_df=None,\n parent_mapping_file=\"data/output/go_parent_mapping.csv\",\n binary_operator=\"hadamard\",\n save=True\n ):\n \n if graph == None:\n graph = gu.read_graph()\n\n proteins = graph.nodes()\n\n # load go embeddings\n if go_embeddings_df is None:\n logger.info(\"Loading go embeddings\")\n go_embeddings_df = pd.read_csv(\"data/output/go_embeddings_copy.csv\")\n\n # load parent mapping\n logger.info(f\"Loading go parent mappings - {parent_mapping_file}\")\n parent_mapping_df = pd.read_csv(parent_mapping_file)\n\n # create empty array to hold the data\n go_vector = go_embeddings_df.shape[1] - 1\n num_rows = len(proteins)\n num_columns = go_vector\n features_arr = np.empty(shape=(num_rows, num_columns))\n\n # define work\n def do_work(protein_name):\n go_vector = build_go_vector_for_term_type(protein_name,go_embeddings_df,parent_mapping_df,term_type,binary_operator=binary_operator)\n go_vector = np.reshape(go_vector,(-1,num_columns))\n return go_vector\n\n # define pool\n cpu_count = multiprocessing.cpu_count()\n pool = Pool(processes=cpu_count-1)\n \n # do work\n logger.info(\"Getting go vector for term\")\n features_np_list = pool.map(do_work, proteins)\n\n features_arr = np.concatenate(features_np_list,axis=0)\n\n features_pd = pd.DataFrame(data=features_arr)\n features_pd.insert(0,\"protein\",proteins)\n\n if save == True:\n features_pd.to_csv(f\"data/output/features_only_go_{term_type}.csv\",index=False)\n\n return features_pd\n\ndef build_ec_category():\n proteins = du.read_protein_names()\n\n ec_mapping = []\n for protein in tqdm(proteins):\n ec_code = du.get_ec_category(protein)\n ec_mapping.append((protein,ec_code))\n \n ec_df = pd.DataFrame.from_records(ec_mapping, columns=[\"protein\",\"ec_category\"])\n ec_df.to_csv(\"data/output/ec_category.csv\",index=False)\n\n\ndef get_pro_vector(protein_name, embedding_df):\n rows_df = embedding_df[embedding_df[\"protein\"] == f\"PR:{protein_name}\"]\n \n # if protein found\n if len(rows_df.index) > 0:\n rows_df = rows_df.drop(columns=[\"protein\"])\n row_vector = rows_df.iloc[0].to_numpy().reshape(-1)\n return row_vector\n\n # if protein not found\n else:\n # create an empty vector\n num_dimensions = embedding_df.shape[1]-1\n empty_vector = np.zeros(shape=(num_dimensions,))\n return empty_vector\n\ndef get_seq_vector(protein_name, embedding_df):\n rows_df = embedding_df[embedding_df[\"protein\"] == protein_name]\n \n # if protein found\n if len(rows_df.index) > 0:\n rows_df = rows_df.drop(columns=[\"protein\"])\n row_vector = rows_df.iloc[0].to_numpy().reshape(-1)\n return row_vector\n\n # if protein not found\n else:\n # create an empty vector\n num_dimensions = embedding_df.shape[1]-1\n empty_vector = np.zeros(shape=(num_dimensions,))\n return empty_vector\n\n\ndef build_go_vector(protein_name, go_embeddings_df):\n # get go terms\n go_terms = du.get_go_terms(protein_name)\n\n # get embeddings for these go terms\n num_rows = len(go_terms)\n num_columns = go_embeddings_df.shape[1] - 1\n go_vectors = np.zeros(shape=(num_rows,num_columns))\n for index, go_term in enumerate(go_terms):\n go_vector = _get_go_vector(go_term,go_embeddings_df)\n go_vectors[index] = go_vector\n\n # check if all the elements in go_vectors are not zero\n if np.all(go_vectors == 0) == False:\n # calculate a sum of all the vectors\n go_vectors_mean = np.sum(go_vectors,axis=0)\n return go_vectors_mean\n else:\n return np.zeros(shape=(num_columns,))\n\ndef build_go_vector_for_term_type(protein_name, go_embeddings_df,parent_mapping_df,term_type,binary_operator=\"hadamard\"):\n\n # get go terms\n go_terms = du.get_go_terms(protein_name)\n\n if term_type == \"bp\":\n go_terms = filter_children_for_parent(go_terms,\"GO:0008150\",parent_mapping_df)\n elif term_type == \"cc\":\n go_terms = filter_children_for_parent(go_terms,\"GO:0005575\",parent_mapping_df)\n elif term_type == \"mp\":\n go_terms = filter_children_for_parent(go_terms,\"GO:0003674\",parent_mapping_df)\n elif term_type == \"all\":\n go_terms = go_terms\n else:\n raise Exception(f\"Unrecongnized term type : {term_type}\")\n\n # get embeddings for these go terms\n num_rows = len(go_terms)\n num_columns = go_embeddings_df.shape[1] - 1\n go_vectors = np.zeros(shape=(num_rows,num_columns))\n for index, go_term in enumerate(go_terms):\n go_vector = _get_go_vector(go_term,go_embeddings_df)\n go_vectors[index] = go_vector\n\n # check if all the elements in go_vectors are not zero\n if np.all(go_vectors == 0) == False:\n if binary_operator == \"l1\":\n return reduce(utils.operator_l1,go_vectors)\n elif binary_operator == \"l2\":\n return reduce(utils.operator_l2,go_vectors)\n elif binary_operator == \"hadamard\":\n return reduce(utils.operator_hadamard,go_vectors)\n elif binary_operator == \"avg\":\n return reduce(utils.operator_avg,go_vectors)\n elif binary_operator == \"cat\":\n return reduce(utils.operator_cat,go_vectors)\n else:\n return np.zeros(shape=(num_columns,))\n \n\ndef filter_children_for_parent(go_terms, parent_go_term, mappings_df):\n child_go_terms = []\n for term in go_terms:\n mappings_df_filtered = mappings_df[mappings_df[\"go_node\"] == term]\n if mappings_df_filtered.shape[0] > 0:\n parent = mappings_df_filtered.iloc[0][\"root_parent\"]\n if parent == parent_go_term:\n child_go_terms.append(term)\n\n return child_go_terms\n\ndef _get_go_vector(go_term, go_embeddings_df):\n rows_df = go_embeddings_df[go_embeddings_df[\"protein\"] == go_term]\n \n # if term found\n if len(rows_df.index) > 0:\n rows_df = rows_df.drop(columns=[\"protein\"])\n row_vector = rows_df.iloc[0].to_numpy().reshape(-1)\n return row_vector\n\n # if term not found\n else:\n # create an empty vector\n num_dimensions = go_embeddings_df.shape[1]-1\n empty_vector = np.zeros(shape=(num_dimensions,))\n return empty_vector\n\nif __name__ == \"__main__\":\n features_df = build_features(None)\n ","sub_path":"src/data/feature_builder.py","file_name":"feature_builder.py","file_ext":"py","file_size_in_byte":9291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"467967371","text":"#*-* coding: utf-8 *-*\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\n\nimport requests\nimport json\n\napp = Flask(__name__)\nCHARTS_DIR = \"static\"\n\n\ndef get_chart_URL(filename=\"myChart.jpeg\"):\n \"\"\"Download chart and save the image.\n ref: https://core.telegram.org/blackberry/chat-media-send\n \"\"\"\n r = requests.get('https://getcharts.herokuapp.com/updateChart')\n result = r.json()\n\n return result['url']\n\n\ndef get_ticker(currency):\n \"\"\"Get the currency echange ratio of bitcoin.\n Params:\n currencyt (str): the currency selected by the user\n Returns:\n (str) The response string\n \"\"\"\n CURRENCY_TYPE = {\n 'dollar': \"USD\",\n 'euro': \"EUR\",\n 'hkd': \"HKD\"\n }\n result = requests.get('https://blockchain.info/ticker')\n result = result.json()\n return \"The bitchoin exchange rate in {} is : {}\".format(\n CURRENCY_TYPE[currency],\n result[CURRENCY_TYPE[currency]]['last']\n )\ndef get_stats():\n \"\"\"Get the general stats about blockchains.\n\n Returns:\n (str) The response string\n \"\"\"\n \n result = requests.get('https://api.blockchain.info/stats')\n result = result.json()\n return \"The market price in USD is {}. The hash rate is {}. The number of blocks mined is {}. The number of total blocks is {}. The estimated transaction volume is {}. The bitcoin trade volume is {}. The USD trade volumes is {} \".format(\n result['market_price_usd'], \n result['hash_rate'], \n result['n_blocks_mined'],\n result['n_blocks_total'],\n result['estimated_transaction_volume_usd'],\n result['trade_volume_btc'],\n result['trade_volume_usd']\n )\n\n@app.route(\"/chainBot\", methods=['POST'])\ndef chainBot():\n \"\"\"The Chain Bot service.\n Doc: https://api.ai/docs/fulfillment\n Doc on error responses: https://api.ai/docs/fulfillment#errors\n \"\"\"\n ##\n # Convert the request data string into JSON obj\n req = json.loads(request.data)\n print(json.dumps(req, indent=2))\n ##\n # Check if the action is completed\n if not req['result']['actionIncomplete']:\n ##\n # INFO CRYPTOCURRENCY EXCHANGE\n if req['result']['contexts'][0]['name'] == \"info-cryptocurrency-exchange\":\n ##\n # Call the reponse for the \"info-bitcoin-exchange\" context\n if req['result']['contexts'][0]['parameters']['cryptocurrency'] == \"bitcoin\":\n response = get_ticker(req['result']['contexts'][\n 0]['parameters']['currency'])\n ##\n # Return the response\n return jsonify({\n \"speech\": response,\n \"displayText\": response,\n \"data\": {},\n \"contextOut\": [],\n \"source\": \"\"\n }), 200, {'Content-Type': 'application/json; charset=utf-8'}\n else:\n return jsonify({\n \"speech\": \"I don't know the exchange rate for that cryptocurrency...\",\n \"displayText\": \"I don't know the exchange rate for that cryptocurrency...\",\n \"data\": {},\n \"contextOut\": [],\n \"source\": \"\"\n }), 200, {'Content-Type': 'application/json; charset=utf-8'}\n ##\n # INFO MARKET\n elif req['result']['contexts'][0]['name'] == \"info-market\":\n chart_url = get_chart_URL()\n print(chart_url)\n return jsonify({\n \"speech\": chart_url,\n \"messages\": [\n {\n \"type\": 3,\n \"platform\": \"telegram\",\n \"imageUrl\": chart_url\n },\n {\n \"type\": 0,\n \"speech\": chart_url\n }\n ]\n }), 200, {'Content-Type': 'application/json; charset=utf-8'}\n ##\n #INFO STATS\n elif req['result']['contexts'][0]['name'] == \"info-stats\":\n response = get_stats()\n ##\n # Return the response\n return jsonify({\n \"speech\": response,\n \"displayText\": response,\n \"data\": {},\n \"contextOut\": [],\n \"source\": \"\"\n }), 200, {'Content-Type': 'application/json; charset=utf-8'}\n else:\n return jsonify({\n \"speech\": \"Sorry, can't understand your request...\",\n \"displayText\": \"Sorry, can't understand your request...\",\n \"data\": {},\n \"contextOut\": [],\n \"source\": \"\"\n }), 404, {'Content-Type': 'application/json; charset=utf-8'}\n else:\n return jsonify({\n \"speech\": \"Sorry, your request is not complete...\",\n \"displayText\": \"Sorry, your request is not complete...\",\n \"data\": {},\n \"contextOut\": [],\n \"source\": \"\"\n }), 400, {'Content-Type': 'application/json; charset=utf-8'}\n\n\nif __name__ == \"__main__\":\n app.run(\"0.0.0.0\", 80, debug=True\n ##\n # Only for HTTPS\n # , ssl_context=(\n # '/etc/letsencrypt/live/chain.vector3d.xyz/fullchain.pem',\n # '/etc/letsencrypt/live/chain.vector3d.xyz/privkey.pem'\n # )\n)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"36678218","text":"import os\nimport numpy as np\nimport cv2\nimport argparse\nimport glob2\nimport matplotlib.pyplot as plt\nimport pdb\nimport json\nfrom helper import *\n\ndef parse_args():\n# Function to parse arguments\n parser = argparse.ArgumentParser(description='Process input arguments.')\n parser.add_argument('input_path', default='./examples/inputs', help = 'Path to input files')\n parser.add_argument('output_path', default='./output', help = 'Path to output directory to store output files.')\n\n args = parser.parse_args()\n\n return args\n\ndef get_canny(img):\n\n# Function to detect edges using canny edge detector.\n# Unused\n img = smoothen(img, 'gaussian', (7,7))\n img = threshold(img, 'adaptive')\n img = smoothen(img, 'median', 3)\n\n edges = cv2.Canny(img, 10, 250)\n edges1 = smoothen(edges, 'bilateral', 7)\n edges1 = smoothen(edges1, 'median', 3)\n edges1 = get_mask(edges1)\n\n edges = edges - edges1\n\n return edges\n\n\ndef morphological_process(img, ktype, kh, kv):\n# Perform morphological ops using horizontal and vertical kernels\n horizontal_img = img.copy()\n vertical_img = img.copy()\n\n kernel = get_kernel(ktype, (kh, 1))\n horizontal_img = morphological_ops(horizontal_img, kernel, 1, 1)\n kernel = get_kernel(ktype, (1, kv))\n vertical_img = morphological_ops(vertical_img, kernel, 1, 1)\n\n mask = horizontal_img + vertical_img\n\n return mask\n\n\ndef get_mask(img):\n\n# Image preprocessing\n# Gaussian filtering and thresholding to detect sharp edges.\n# Invert image to remove noise.\n img = smoothen(img, 'gaussian', (5,5))\n img = cv2.addWeighted(img, 1.5, img, -0.5, 0) # sharpen edges\n thresh_img = threshold(img, 'adaptive_gaussian')\n inv = 255 - thresh_img\n\n mask_img = morphological_process(inv, 'rect', 15,15)\n\n return mask_img\n\n\ndef windowing(img, win_size, step_size):\n# This function divides the image into windows, performs morphological ops\n# on each window and stores it in a mask. The mask image is returned.\n mask = np.zeros_like(img)\n for (x,y,window) in get_window(img, step_size, win_size):\n if window.shape[0] <= 0.01*(win_size[0]) or window.shape[1] <= 0.01*(win_size[1]):\n continue\n win_mask = get_mask(window)\n mask[y:y+win_size[1],x:x+win_size[0]] = win_mask\n\n# Extra morphological ops to detect edges better.\n mask = morphological_process(mask, 'rect', 35,35)\n mask= smoothen(mask,'median',5)\n\n return mask\n\ndef detect_box(image, output_path):\n\n fname = image.split('\\\\')[-1].split('.')[0] #\n gray_img, c_img = read_img(image) # read image\n\n# Define constants\n small_win_size = [50,50]\n small_step_size = 10\n big_win_size = [int(c_img.shape[1]/2),int(c_img.shape[0]/2)]\n big_step_size = 100\n\n# Get mask using 2 windows\n small_win_mask = windowing(gray_img, small_win_size,small_step_size)\n big_win_mask = windowing(gray_img, big_win_size, big_step_size)\n mask = small_win_mask+big_win_mask\n\n# Get contours and find rectangle boxes\n im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n json_dict = {}\n rects=[]\n boxes = []\n for c in contours:\n peri = cv2.arcLength(c,True)\n approx = cv2.approxPolyDP(c,0.015*peri, True)\n\n if len(approx)>=3 and len(approx)<=6:\n rects.append(c) # append all rectangle boxes\n\n # Add points of each box to the dictonary\n box = []\n middle = {}\n for i in range(len(c)):\n box.append(c[i][0].tolist())\n middle['points'] = box\n boxes.append(middle)\n json_dict['boxes'] = boxes\n cv2.drawContours(c_img, rects, -1, (0, 0, 255), 3)\n\n # Save the image with contours drawn on it\n cv2.imwrite(output_path+'/'+fname + '_output.jpg', c_img)\n # cv2.imwrite(output_path+'/'+fname+'_mask.jpg', mask)\n\n # Write the dictionary to json file\n with open(output_path+'/'+fname + '_points.json', 'w+') as f:\n json.dump(json_dict,f)\n\n\ndef read_data(input_path):\n\n all_files = glob2.glob(input_path+ '/*.jpg')\n\n return all_files\n\ndef main():\n\n args = parse_args()\n all_files = read_data(args.input_path)\n if not os.path.isdir(args.output_path):\n os.makedirs(args.output_path)\n for filename in all_files:\n detect_box(filename,args.output_path)\n\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"537244673","text":"import asyncio\nfrom operator import attrgetter\nimport pytest\nfrom pathlib import Path\n\nfrom aionetworking.utils import alist\nfrom aionetworking.actions.echo import InvalidRequestError\n\n###Required for skipif in fixture params###\nfrom aionetworking.compatibility import datagram_supported, supports_pipe_or_unix_connections, \\\n supports_pipe_or_unix_connections_in_other_process\n\n\nclass TestOneWayServer:\n @pytest.mark.asyncio\n async def test_00_send_and_send_recording(self, one_way_server_started, one_way_client, tmp_path, json_objects,\n json_decoded_multi, json_recording_data, one_way_server_context,\n one_way_client_context, connections_manager, json_codec):\n async with one_way_client as conn:\n conn.encode_and_send_msgs(json_decoded_multi)\n await asyncio.wait_for(one_way_server_started.wait_num_has_connected(1), 3)\n assert conn.context.keys() == one_way_client_context.keys()\n await asyncio.sleep(0.1) # Workaround for bpo-38471\n one_way_server_started.close_all_connections()\n await one_way_server_started.wait_num_connections(0)\n await asyncio.wait_for(one_way_server_started.wait_all_tasks_done(), 3)\n recording_file_path = next(tmp_path.glob('recordings/*.recording'))\n assert recording_file_path.exists()\n expected_file = next(Path(tmp_path / 'data/Encoded').glob('*.JSON'))\n assert expected_file.exists()\n objs = await alist(json_codec.from_file(expected_file))\n objs.sort(key=attrgetter('request_id'), reverse=False)\n assert objs == json_objects\n expected_file.unlink()\n new_recording_path = tmp_path / 'recordings/new.recording'\n recording_file_path.rename(new_recording_path)\n async with one_way_client as conn:\n await conn.play_recording(new_recording_path)\n await asyncio.sleep(0.1) # Workaround for bpo-38471\n await asyncio.wait_for(one_way_server_started.wait_num_has_connected(2), 3)\n await asyncio.wait_for(one_way_server_started.wait_all_tasks_done(), 3)\n expected_file = next(Path(tmp_path / 'data/Encoded').glob('*.JSON'))\n assert expected_file.exists()\n objs = await alist(json_codec.from_file(expected_file))\n objs.sort(key=attrgetter('request_id'), reverse=False)\n assert objs == json_objects\n\n\nclass TestTwoWayServer:\n @pytest.mark.asyncio\n async def test_00_send_and_send_recording(self, two_way_server_started, two_way_client, tmp_path, echo_response_object,\n echo_exception_response_object, echo_notification_object):\n async with two_way_client as conn:\n echo_response = await conn.echo()\n conn.subscribe()\n notification = await conn.wait_notification()\n await asyncio.sleep(0.1) # Workaround for bpo-38471\n assert echo_response == echo_response_object\n assert notification == echo_notification_object\n recording_file_path = next(tmp_path.glob('recordings/*.recording'))\n assert recording_file_path.exists()\n async with two_way_client as conn2:\n await conn2.play_recording(recording_file_path)\n echo_response = await asyncio.wait_for(conn2.wait_notification(), timeout=1)\n notification = await asyncio.wait_for(conn2.wait_notification(), timeout=1)\n await asyncio.sleep(0.1) # Workaround for bpo-38471\n assert echo_response == echo_response_object\n assert notification == echo_notification_object\n\n @pytest.mark.asyncio\n async def test_01_send_multiple_senders(self, reset_endpoint_names, tcp_server_two_way_started, tcp_client_two_way,\n tcp_client_two_way_two, tmp_path, echo_response_object,\n echo_exception_response_object, echo_notification_object, server_sock_str):\n assert tcp_server_two_way_started.protocol_factory.full_name == f\"TCP Server {server_sock_str}\"\n assert tcp_client_two_way.protocol_factory.full_name == f\"TCP Client {server_sock_str}\"\n assert tcp_client_two_way_two.protocol_factory.full_name == f\"TCP Client {server_sock_str}_2\"\n async with tcp_client_two_way as conn1, tcp_client_two_way_two as conn2:\n echo_response1 = await asyncio.wait_for(conn1.echo(), timeout=2)\n echo_response2 = await asyncio.wait_for(conn2.echo(), timeout=2)\n conn1.subscribe()\n conn2.subscribe()\n notification1 = await asyncio.wait_for(conn1.wait_notification(), timeout=2)\n notification2 = await asyncio.wait_for(conn2.wait_notification(), timeout=2)\n assert echo_response1 == echo_response2 == echo_response_object\n assert notification1 == notification2 == echo_notification_object\n\n","sub_path":"tests/test_end_to_end/test_single_loop.py","file_name":"test_single_loop.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"206524607","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function;\nimport sys;\n\n\n_twords={};\n\ndef read_twords(ftwords):\n print('reading twords file {}'.format(ftwords), file=sys.stderr);\n with open(ftwords) as fh:\n i = 0;\n for line in fh:\n i = i+1;\n if i % 10 == 0:\n print('.', file=sys.stderr, end='');\n if i % 100 == 0 :\n print('', file=sys.stderr);\n if not line.startswith('\\t'):\n continue;\n line = line.strip();\n word, sig = line.split(' ', 2);\n _twords[word] = [];\n print('\\nread {} lines and {} top words.'.format(i, len(_twords)), file=sys.stderr);\n\ndef read_word2feat(fin):\n print('reading word2feat file {}'.format(fin), file=sys.stderr);\n with (open(fin) if fin else sys.stdin) as fh:\n i = 0;\n for line in fh:\n i = i+1;\n if i % 1e4 == 0:\n print('.', file=sys.stderr, end='');\n if i % 1e6 == 0 :\n print('', file=sys.stderr); \n line = line.strip();\n word, feat, sig = line.split('\\t',2);\n yield word, feat, sig;\n print('\\nread {} lines.'.format(i), file=sys.stderr);\n\ndef get_feat_for_twords(tuples):\n current_word = None;\n current_features = None;\n for word, feat, sig in tuples:\n if word != current_word:\n if current_word in _twords:\n _twords[current_word] = sorted(current_features, key=lambda x: float(x[1]),reverse=True)[:100]; # take just the top 100 features of that top word\n current_word = word;\n current_features = [];\n if word in _twords:\n current_features.append((feat, sig));\n \ndef print_new_twords(ftwords):\n print('reading twords file {}'.format(ftwords), file=sys.stderr);\n with open(ftwords) as fh:\n i = 0;\n for line in fh:\n i = i+1;\n if i % 10 == 0:\n print('.', file=sys.stderr, end='');\n if i % 100 == 0 :\n print('', file=sys.stderr);\n line = line.rstrip();\n if not line.startswith('\\t'):\n print(line);\n continue;\n word, _ = line.lstrip().split(' ', 1);\n print('{}\\t{}'.format(line, _twords[word]));\n print('\\nprinted {} lines.'.format(i, len(_twords)), file=sys.stderr);\n\ndef bims_for_top_jos(ftwords, fword2feat):\n read_twords(ftwords);\n tuples = read_word2feat(fword2feat);\n get_feat_for_twords(tuples);\n print_new_twords(ftwords);\n\nbims_for_top_jos(sys.argv[1], None);\n","sub_path":"lt.n2n/src/main/python/bims_for_top_jos.py","file_name":"bims_for_top_jos.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"420856587","text":"import json\nimport traceback\n\nimport requests as requests\nfrom bs4 import BeautifulSoup\n\n# Initialized variables\nregistry_url = 'https://registry.npmjs.com/'\nreplicate_url = 'https://replicate.npmjs.com/'\nnpmjs_url = 'https://www.npmjs.com/package/'\ntop_ten_packages_list = [\"lodash\", \"chalk\", \"request\", \"commander\", \"react\", \"express\", \"debug\", \"async\", \"fs-extra\",\n \"moment\"]\nfile = open(\"../TopTen_DependencyCollaborators/top_ten_packages.json\", \"w\")\n\n\n# Crawler for npmjs\ndef crawler(package_name):\n # print(\"Going to crawl \" + package_name)\n collaborators_list = []\n url = npmjs_url + package_name\n\n try:\n npmjs_request = requests.get(url, headers={'User-Agent': 'Chrome/35.0.1916.47'}).text\n npm_bs = BeautifulSoup(npmjs_request, 'html.parser')\n body = npm_bs.find('body')\n # collaborators_div = body.find_all(\"div\", {\"class\": \"w-100\"})\n collaborators_div = body.find(\"ul\", {\"class\": \"list pl0 cf\"})\n\n for li in collaborators_div:\n a = li.find('a')\n username = str(a['href']).replace('/~', '')\n collaborators_list.append(username)\n except:\n print('[Error] while crawling npmjs for package: ' + package_name + ' and url: ' + url)\n traceback.print_exc()\n return collaborators_list\n\n return collaborators_list\n\n\ndef metadata_extraction(package_name):\n error = \"{\\\"error\\\":\\\"not_found\\\",\\\"reason\\\": \\\"missing\\\"}\"\n\n url = replicate_url + package_name\n replicate_request = requests.get(url)\n # Converts data to a json object\n data = replicate_request.json()\n\n if \"error\" in data:\n print(\"There's no data in the replicate endpoint for \" + package_name + \". Checking registry endpoint.\")\n\n url = registry_url + package_name\n registry_request = requests.get(registry_url + package_name)\n # Converts data to a json object\n data = registry_request.json()\n\n try:\n # dist_tags contains the latest version this package is on\n dist_tags = data[\"dist-tags\"]\n latest_version = dist_tags[\"latest\"]\n\n # versions is a map of all the previous versions along with the data for each version\n versions = data[\"versions\"]\n version_data = versions[latest_version]\n except:\n print(\"URL: \" + url)\n print('[Error] at metadata_extraction_dependencies while retrieving data from ' + package_name)\n traceback.print_exc()\n\n # Retreive the dependencies, dev dependencies, contributors\n # (should we get the maintainers? what's the difference between contributors and maintainers?)\n\n # ----- Contributors -----\n contributors = data[\"contributors\"] if (\"contributors\" in data) else None\n\n # ----- Maintainers -----\n maintainers = data[\"maintainers\"] if (\"maintainers\" in data) else None\n\n # ----- Dependencies -----\n dependencies = version_data[\"dependencies\"] if (\"dependencies\" in version_data) else None\n\n # ----- Dev Dependencies -----\n dev_dependencies = version_data[\"devDependencies\"] if (\"devDependencies\" in version_data) else None\n\n # Crawl npmjs URL\n collaborators_list = crawler(package_name)\n\n # ----- Create Map of all relevant information for the top 10 packages -----\n package_map = {\"package_name\": package_name, \"version\": latest_version,\n \"contributors\": len(collaborators_list), \"dependencies\": dev_dependencies,\n \"dev_dependencies\": dev_dependencies}\n\n return package_map\n\n\ndef create_dependency_map(transitive_dependencies):\n dependencies_map = {}\n\n if transitive_dependencies is None:\n dependencies_map = {}\n else:\n for item in transitive_dependencies.items():\n dependency_name = item[0]\n\n # Crawl npmjs URL\n collaborators_list = crawler(dependency_name)\n\n # ----- Create Map of all relevant information for the top 10 packages -----\n package_map = {\"package_name\": dependency_name, \"contributors\": len(collaborators_list)}\n dependencies_map[dependency_name] = package_map\n\n return dependencies_map\n\n\n# Main method\ndef main():\n count = 1\n for package_name in top_ten_packages_list:\n package_map = metadata_extraction(package_name)\n\n # Get all the dependencies from the top 10 packages, each transitive dependency contains dependency name and\n # version\n transitive_dependencies = package_map.get(\"dependencies\")\n dev_transitive_dependencies = package_map.get(\"dev_dependencies\")\n\n # Creates a new map, each transitive dependency contains the dependency name and number of contributors\n transitive_dependencies_map = create_dependency_map(transitive_dependencies)\n dev_transitive_dependencies_map = create_dependency_map(dev_transitive_dependencies)\n\n # ----- Create Map of all relevant information for the top 10 packages -----\n full_package_map = {\"package_name\": package_map[\"package_name\"], \"version\": package_map[\"version\"],\n \"contributors\": package_map[\"contributors\"], \"dependencies\": transitive_dependencies_map,\n \"dev_dependencies\": dev_transitive_dependencies_map}\n\n file.write(json.dumps(full_package_map) + \"\\n\")\n\n # count += 1\n # if count > 3:\n # break\n\n # print(package_map)\n # file.write(json.dumps(package_map) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n print('[Info] Program has completed.')\n\n# ----- extra code -----\n# with open(\"all_packages.json\", \"w\") as write_file:\n# json.dump(data, write_file)\n# print(r.status_code)\n# print(data)\n","sub_path":"TopTen_DependencyCollaborators/download_top_packages.py","file_name":"download_top_packages.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534930095","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef crawl_volume_links_from_journal(journal_url, year):\n # return link lists of journal in 'journal_url' in recent 'year' years\n soup = BeautifulSoup(requests.get(url=journal_url).text, 'html.parser')\n a_objects = soup.find_all('ul', class_=False)[6].find_all('a')\n links = []\n for a_object in a_objects[:year]:\n links.append(a_object.get('href'))\n return links\n\n\ndef crawl_titles(url, keywords):\n # return paper title list in 'url' which contains 'keywords'\n titles = []\n html_soup = BeautifulSoup(requests.get(url=url).text, 'html.parser')\n for title in html_soup.find_all('span', class_='title'):\n text = title.get_text()\n flag = False\n for word in keywords:\n if text.upper().find(word.upper()) is not -1:\n flag = True\n if flag:\n titles.append(text)\n return titles\n\n\ndef crawl_contents_links_from_conference(conference_url, year):\n # return link lists of journal in 'journal_url' in recent 'year' years\n soup = BeautifulSoup(requests.get(url=conference_url).text, 'html.parser')\n publ_lists = soup.find_all('ul', class_='publ-list')\n links = []\n for i in range(year):\n a_objects = (publ_lists[i].find_all('a', class_='toc-link'))\n for a_object in a_objects:\n links.append(a_object.get('href'))\n # print(links)\n return links\n\n\nif __name__ == \"__main__\":\n # links = crawl_contents_links_from_conference('https://dblp.uni-trier.de/db/conf/sigmod/', 1)\n links = crawl_volume_links_from_journal('https://dblp.uni-trier.de/db/journals/tods/', year=5)\n print(links)\n for link in links:\n titles = crawl_titles(link, ['graph'])\n print(titles)\n","sub_path":"dblp_crawler.py","file_name":"dblp_crawler.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"381748816","text":"from .base import Result\n\n\ndef spread(df):\n df['owner.tz.hour'] = df['owner.tz'] / 60.0\n counts_by_hour = df.groupby('owner.tz.hour')\n\n return Result(\n counts_by_hour['owner._account_id'].count(),\n kind='bar'\n )\n","sub_path":"review_analysis/reports/timezones.py","file_name":"timezones.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"196739742","text":"import os,re,time\nfrom prometheus_client import start_http_server, Gauge\ng=Gauge('pvc_usage','fetching usge matched by k8s csi',['volumename'])\nstart_http_server(8848)\nwhile 1:\n get_pvc=os.popen(\"df -h|grep -E 'kubernetes.io/flexvolume|kubernetes.io~csi|kubernetes.io/gce-pd/mounts'\")\n pvcs=get_pvc.readlines()\n for i in pvcs:\n il=i.split(' ')\n volume=il[-1].split('/')[-1]\n for v in il[-1].split('/'):\n if re.match(\"^pvc\",v):\n volume=v\n elif re.match(\"^gke-data\",v):\n volume='pvc'+v.split('pvc')[-1]\n for u in il:\n if re.match(\"^[0-9]*\\%\",u):\n usage=float(u.strip('%'))/100.0\n #pvc_usage[volume]=usage\n print(volume,usage)\n g.labels(volume).set(usage)\n\n time.sleep(15)\n","sub_path":"block-pvc-scanner/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"343466898","text":"import json\nfrom collections import defaultdict\n\nfrom tqdm import tqdm\n\nfrom flight.models import Flight, Airport\nfrom meta_config import SPIDER_DATA_DIRNAME, BULK_CREATE_BATCH_SIZE\n\n\ndef flight_daily_import():\n Flight.objects.all().delete()\n with open(f'{SPIDER_DATA_DIRNAME}/flights_data.json', 'r+', encoding='utf-8') as f:\n data = json.loads(f.read())\n\n imported = defaultdict(bool)\n objs = []\n bar = tqdm(list(enumerate(data)), dynamic_ncols=True)\n for line, item in bar:\n bar.set_description(f'[line{line}]')\n bar.set_postfix_str(f'{item[\"dept_city\"]} => {item[\"arri_city\"]}')\n \n dept_airport = item['dept_city']\n if Airport.objects.filter(airport_code=dept_airport).count() == 0:\n dept_airport = None\n else:\n dept_airport = Airport.objects.filter(airport_code=dept_airport).get()\n arri_airport = item['arri_city']\n if Airport.objects.filter(airport_code=arri_airport).count() == 0:\n arri_airport = None\n else:\n arri_airport = Airport.objects.filter(airport_code=arri_airport).get()\n if arri_airport is None or dept_airport is None:\n continue\n if not imported[item['code']]:\n imported[item['code']] = True\n kwargs = {'code': item['code'], 'dept_time': item['dept_time'], 'dept_airport': dept_airport, 'arri_time': item['arri_time'], 'arri_airport': arri_airport, 'condition': item['condition']}\n objs.append(Flight(**kwargs))\n bar.close()\n \n Flight.objects.bulk_create(objs, BULK_CREATE_BATCH_SIZE)\n","sub_path":"spiders/flight_daily_importer.py","file_name":"flight_daily_importer.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"420226163","text":"from math import *\nfrom Loading import *\nfrom matplotlib import pyplot as plt\nimport moi_changing_configuration as mcc\ndef inertiaxx(c,As,Nsb,Nst,t):\n\n hfsloc = 0.2 # location of the frontspar (times the chorlength) []\n hrsloc = 0.75 # location of the rearspar (times the chordlength) []\n hfsratio = 2 * 0.0633\n hrsratio = 0.0194 + 0.0473\n widthratio = hrsloc - hfsloc\n Ilst = [] # define lst\n ycentroidlst = []\n alfa = atan((hfsratio - hrsratio) / widthratio)\n\n #centroid calculation\n #calc the numerator taking different altitudes of stringers into account assuming they heve equal intervals\n for i in range(len(c)):\n\n hfs = c[i]*hfsratio\n hrs = c[i]*hrsratio\n w = c[i]*widthratio\n x = sqrt(w ** 2 + (hfs - hrs) ** 2)\n intervalNsb = (w / (Nsb + 1))\n\n numerator = hfs / 2 * hfs * t + hfs * w * t + (hfs - hrs / 2) * hrs * t + ((hfs - hrs) / 2) * x * t + hfs * Nst * As\n for i in range(Nsb):\n numerator = numerator+ tan(alfa)*intervalNsb * (Nsb+1) * As\n denominator = hfs * t + w * t + hrs * t + x * t + (Nsb + Nst) * As\n\n ycentroid = numerator / denominator\n\n # Moment of Ineratia calculations\n I1 = 1 / 12 * t * hfs ** 3 + hfs * t * (ycentroid - hfs / 2) ** 2\n I2 = 1 / 12 * w * t ** 3 + w * t * (ycentroid - hfs) ** 2\n I3 = 1 / 12 * t * hrs ** 3 + hrs * t * (ycentroid + hrs / 2 - hfs) ** 2\n I4 = 1 / 12 * t * x * (hfs - hrs) ** 2 + x * t * (ycentroid - (hfs - hrs) / 2) ** 2\n\n Istringertop = As * (ycentroid - hfs) ** 2 * Nst\n\n #account for different heigth of bottom stringers\n Istringerbottom = 0\n for i in range(Nsb):\n Istringerbottom = Istringerbottom + As * (ycentroid - tan(alfa)*(Nsb+1)*intervalNsb) ** 2\n\n Itotal = I1 + I2 + I3 + I4 + Istringerbottom + Istringertop\n Ilst.append(Itotal)\n ycentroidlst.append(ycentroid)\n return Ilst,ycentroidlst\n\ndef inertiazz(c,As,Nsb,Nst,t):\n hfsloc = 0.2 # location of the frontspar (times the chorlength) []\n hrsloc = 0.75 # location of the rearspar (times the chordlength) []\n hfsratio = 2 * 0.0633\n hrsratio = 0.0194 + 0.0473\n widthratio = hrsloc - hfsloc\n alfa = atan((hfsratio - hrsratio) / widthratio) #angle of inclined plate\n Ilst = [] #define lst\n zcentroidlst = []\n #calculate list of moment of inertias and append them to list\n for i in range(len(c)):\n hfs = hfsratio*c[i]\n hrs = hrsratio*c[i]\n w = widthratio*c[i]\n\n #first calc centroid\n numerator = hrs*(t**2)/2+t*(w**2)/2+sqrt((hfs-hrs)**2+(w)**2)*t*w/2+hfs*t*(w-t/2)+(Nsb+Nst)*As*w/2\n denominator = (hfs+hrs+w+sqrt((hfs-hrs)**2+(w)**2))*t+(Nst+Nsb)*As\n zcentroid = numerator/denominator\n\n #calculate MOI's (terms with t^2 or higher are left out becuase of thin walled)\n I1 = hrs*t*(zcentroid-t/2)**2 #moi of hrs\n I2 = 1/12*w**3*t+w*t*(zcentroid-w)**2 #MOI of w plate\n I3 = hfs*t*(w-t/2-zcentroid)**2 #MOI of hfs\n I4 = 1/12*sqrt((hfs-hrs)**2+(w)**2)**3*t*cos(alfa)**2\n\n Istringertop = 0\n intervalNst = (w / (Nst + 1))\n Istringerbottom = 0\n intervalNsb = (w / (Nsb + 1))\n for i in range(Nsb):\n Istringerbottom = Istringerbottom + As*(intervalNsb*i-zcentroid)**2\n for i in range(Nst):\n Istringertop = Istringertop + As * (intervalNst * i - zcentroid) ** 2\n\n\n Itotal = I1 + I2 + I3 + I4 + Istringertop + Istringerbottom\n Ilst.append(Itotal)\n zcentroidlst.append(zcentroid)\n return Ilst,zcentroidlst\n\n#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n\n#import moment and pos matrices\ng0 = 9.80665\n\n#define some parameters\nxflrfilename = 'MainWingData_a10.csv'\nengine_mass = 3930 #[kg]\nengine_max_thrust = 137.14 *1000 #[N]\nwing_span = 31.97 #[m]\nfuselage_mass = 34602.4-3930-30088*0.453592 #[kg]\nwing_mass = 30088*0.453592 #[kg]\ny_frac_from_f_engine = 0.35 #[-]\n\nwing1 = wing(xflrfilenamea0,xflrfilenamea10)\nwing1.force_analysis_clean(aoa=-1.31, velocity=289.22, density=1.225) # load factor of 5.4 accounted for\nwing1.add_engine(engine_mass, 0.35, max_thrust=engine_max_thrust, takeoff=True)\n# wing1.add_fuel_weight(fuel_mass = 38600, fuel_frac = 0.8)\nwing1.add_wing_weight(wing_mass)\nwing1.get_internal_forces(beam=False)\n\n\n\n#to get the second load case\nwing2 = wing(xflrfilenamea0,xflrfilenamea10)\nwing2.force_analysis_clean(aoa=-9.5, velocity=289.22, density=0.4135) # load factor of 5.4 accounted for\nwing2.add_engine(engine_mass, 0.35, max_thrust=engine_max_thrust, takeoff=True)\nwing2.add_fuel_weight(fuel_mass = 38600, fuel_frac = 0.8)\nwing2.add_wing_weight(wing_mass)\nwing2.get_internal_forces(beam=False)\n\n\nwing1.convert_to_beam()\nwing2.convert_to_beam()\n\n\n#defining some values\ntcratio = 0.14 #thickness to chord ratio []\nhfsloc = 0.2 #location of the frontspar (times the chorlength) []\nhrsloc = 0.75 #location of the rearspar (times the chordlength) []\nwidthratio = hrsloc-hfsloc #Ratio of the width of the wing box and chordlength (to get actual width multiply with chord) []\nhfsratio = 2 * 0.0633\nhrsratio = 0.0194 + 0.0473\nt = [0.0025,0.01,0.003] #thicknss of the plates of the wingbox [m]\nAs = 0.0015 #area of the stringers [m^2]\nNst = [32,18,29] #number of stringers top plate\nNsb = [30,15,27] #number of stringer bottom plate\nEmodulus = 72*10**9 # [Pa]\nGmodulus = 28.3*10**9 # [Pa]\nhalfspan = 31.97/2 # halfspan width in [m]\n\n\ny = wing1.pos_matrix[:,1] #position array along the wing [m]\nc = wing1.chord_array #creat array of the chord with corresponding pos along wing\n\nMx = wing1.internal_moment[:,0] #corresponding values to y\nTy = wing1.internal_moment[:,1] #=My torsion forces inside the wingbox along the span [Nm]\nMz = wing1.internal_moment[:,2]\n\n\nMx2 = wing2.internal_moment[:,0] #corresponding values to y\nMz2 = wing2.internal_moment[:,2]\nTy2 = wing2.internal_moment[:,1] #=My torsion forces inside the wingbox along the span [Nm]\n\n\nKl = 28 #fracture toughness\na = 0.0025 #halfsize of the crack[m]\nmaxsigma = Kl/sqrt(pi*a)*10**6\nprint(maxsigma)\n#define lists\nsigmalst1 = []\nsigmalst4 = []\n#create list of stresses at two most extreme point\nfor i in range(3):\n Ixxlst = inertiaxx(c,As,Nsb[i],Nst[i],t[i])[0]\n Izzlst = inertiazz(c,As,Nsb[i],Nst[i],t[i])[0]\n xcentroid = inertiaxx(c, As, Nsb[i], Nst[i], t[i])[1]\n zcentroid = inertiazz(c, As, Nsb[i], Nst[i], t[i])[1]\n sigma1 = []\n sigma4 = []\n for j in range(len(c)):\n\n sigma1.append(Mx2[j]/Ixxlst[j]*(c[j]*hfsratio-xcentroid[j])+Mz2[j]/Izzlst[j]*(widthratio*c[j]-zcentroid[j]))\n #sigma 2 not considered because of counteracting stress wich results in lower tensil stress anyways\n #sigma3 same as 2+ closer to centroid\n sigma4.append(abs(Mx[j]/Ixxlst[j]*(xcentroid[j]))+Mz[j]/Izzlst[j]*(widthratio*c[j]-zcentroid[j]))\n sigmalst1.append(list(sigma1))\n sigmalst4.append(list(sigma4))\n\n\nplt.subplot(221)\nplt.title('stresses')\nplt.plot(y,sigmalst1[0],label='tension in point 1')\nplt.plot(y,sigmalst4[0], label='tension in point 4')\nplt.legend()\n\nplt.subplot(222)\nplt.title('just chord to span')\nplt.plot(y,c)\n\n\nplt.subplot(223)\nplt.title('MOIs')\nplt.plot(y,Ixxlst, label='moixx')\nplt.plot(y,Izzlst, label='moizz')\nplt.legend()\n\nplt.subplot(224)\nplt.plot(y,Mx, label='mx')\nplt.plot(y,Mz, label='mz')\nplt.plot(y,Mx2, label='mx2')\nplt.plot(y,Mz2, label='mz2')\nplt.grid(True)\nplt.legend()\n\nplt.show()\n\n#plots of safetymargin to moments of 3 different configurations\n\nsafetymargin0 = []\nsafetymargin1 = []\nsafetymargin2 = []\nfor i in range(len(y)-2):\n\n safetymargin0.append(maxsigma/sigmalst4[0][i])\n safetymargin1.append(maxsigma/sigmalst4[1][i])\n safetymargin2.append(maxsigma/sigmalst4[2][i])\nsafetymargin0.append(1)\nsafetymargin0.append(1)\nsafetymargin1.append(1)\nsafetymargin1.append(1)\nsafetymargin2.append(1)\nsafetymargin2.append(1)\n\nplt.subplot(221)\nplt.title('safetymargin to longitudinal place')\nplt.plot(y,safetymargin0, label='configuration 1')\nplt.grid(True)\nplt.legend()\n\nplt.subplot(222)\nplt.plot(y,safetymargin1, label='configuration 2')\nplt.grid(True)\nplt.legend()\n\nplt.subplot(223)\nplt.plot(y,safetymargin2, label='configuration 3')\nplt.grid(True)\nplt.legend()\n\n\nplt.show()\n\nprint(sigmalst4[0])\nprint(max(sigmalst4[0]))\nindex = sigmalst4[0].index(max(sigmalst4[0]))\nprint(y[index],c[index], widthratio*c[index])\n\n#\n# Istr = 4.0271565755208345**(-6)\n# As = 0.0015\n# viable = True\n# configlst = []\n# print(maxsigma)\n# for Nst1 in range(25,33,3):\n# for Nst2 in range(20,33,3):\n# for Nst3 in range(15,33,3):\n# for t in range(2,10):\n# t = t*0.001\n# Nsb1 = Nst1\n# Nsb2 = Nst2\n# Nsb3 = Nst3\n# viable = True\n# Izzlst = mcc.inertiazz(c,t,Istr,As,Nst1,Nst2,Nst3,Nsb1,Nsb2,Nsb3)[0]\n# Ixxlst = mcc.inertiaxx(c,t,Istr,As,Nst1,Nst2,Nst3,Nsb1,Nsb2,Nsb3)[0]\n# xcentroid = mcc.inertiaxx(c,t,Istr,As,Nst1,Nst2,Nst3,Nsb1,Nsb2,Nsb3)[1]\n# zcentroid = mcc.inertiazz(c, t, Istr, As, Nst1, Nst2, Nst3, Nsb1, Nsb2, Nsb3)[1]\n#\n# for j in range(len(c)):\n# sigma1 = (Mx2[j]/Ixxlst[j]*(c[j]*hfsratio-xcentroid[j])+Mz2[j]/Izzlst[j]*(widthratio*c[j]-zcentroid[j]))\n# # sigma 2 not considered because of counteracting stress wich results in lower tensil stress anyways\n# # sigma3 same as 2+ closer to centroid\n# sigma4 = abs(Mx[j] / Ixxlst[j] * (xcentroid[j])) + Mz[j] / Izzlst[j] * (hfsratio*c[j]-zcentroid[j])\n# if sigma1>maxsigma or sigma4>maxsigma :\n# #print('check',sigma4*10**(-6),sigma1*10**(-6))\n# viable = False\n# break\n# if viable:\n# configlst.append([t,Istr,As,Nst1,Nst2,Nst3,Nsb1,Nsb2,Nsb3])\n# #print(t,Istr,As,Nst1,Nst2,Nst3,Nsb1,Nsb2,Nsb3)\n#\n# mass = []\n# for i in range(len(configlst)):\n# mass.append(mcc.weight(configlst[i][0],configlst[i][1],configlst[i][2],configlst[i][3],configlst[i][4],configlst[i][5],configlst[i][6],configlst[i][7],configlst[i][8],))\n# mass.index(min(mass))\n# print(len(mass))\n# print(min(mass))\n# print(configlst[mass.index(min(mass))])","sub_path":"Tensilestresses.py","file_name":"Tensilestresses.py","file_ext":"py","file_size_in_byte":10726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"332539145","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('posts', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('comment_text', models.TextField()),\n ('created_timestamp', models.DateTimeField(auto_now_add=True)),\n ('updated_timestamp', models.DateTimeField(auto_now=True)),\n ('comment_poster', models.ForeignKey(related_name='related_comments', to=settings.AUTH_USER_MODEL)),\n ('parent_post', models.ForeignKey(related_name='comments', to='posts.post')),\n ],\n ),\n ]\n","sub_path":"portal/posts/migrations/0002_comment.py","file_name":"0002_comment.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"350737849","text":"#!/usr/bin/env python3\nimport os\nimport hashlib\nfrom scripts.psql_firmware import psql\nimport boto\n\n\ndef getFileMd5(fileName):\n with open(fileName, mode='rb') as fin:\n return hashlib.md5(fin.read()).hexdigest()\n\n\ndef s3_download(furl, fname):\n from urllib.parse import urlsplit\n try:\n sr = urlsplit(furl)\n s3 = boto.connect_s3()\n buck = s3.get_bucket(sr.netloc)\n obj = buck.get_key(sr.path)\n obj.get_contents_to_filename(fname)\n return fname\n finally:\n s3.close()\n\n\ndef s3_search(fname, iid, brand, md5):\n try:\n s3 = boto.connect_s3()\n buck = s3.get_bucket('grid-iot-firmware-harvest')\n def find(b):\n for obj in buck.list('fw_files/%s/', b):\n if obj.key.endswith(fname):\n obj.get_contents_to_filename(fname)\n if getFileMd5(fname) != md5:\n print('md5 not match ', obj.key)\n continue\n furl = \"s3://%s/%s\" % (buck.name, obj.key)\n psql(\"UPDATE image SET file_url=%(furl)s\"\n \"WHERE id = %(iid)s\", locals())\n return fname\n if find(brand):\n return fanme\n elif find(brand.lower()):\n return fname\n elif find(brand.lower().replace('-', '')):\n return fname\n return None\n finally:\n s3.close()\n\n\ndef s3_check_md5(furl, fname, md5, iid, brand):\n fname = s3_download(furl, fname)\n if md5 != getFileMd5(fname):\n print(\"erase iid=%(iid)s\")\n psql(\"UPDATE image SET file_url=NULL \"\n \"WHERE id = %(iid)s\", locals())\n return s3_search(fname, iid, brand, md5)\n else:\n return fname\n\n\ndef main():\n rows = psql(\"SELECT \"\n \"id, open_ports, file_url, brand, filename, hash \"\n \"FROM image \"\n # \"WHERE file_url ILIKE 's3://%' \"\n # \"WHERE open_ports @> array['(23,tcp,telnet)'] AND mirai_botnet_positive IS NULL \"\n \" WHERE id>=83539\"\n \" ORDER BY id\")\n # rows = [_ for _ in rows]\n for row in rows:\n statvfs = os.statvfs('/home')\n free_gb = statvfs.f_frsize * statvfs.f_bavail/1000/1000/1000\n if free_gb < 2.0:\n print(\"free space not enought\")\n break\n iid, open_ports, furl, brand, filename, md5 = row\n md5 = md5.replace(\"-\", \"\")\n print(\"%(iid)s %(open_ports)s \" % locals())\n print('scripts/process_firmware_file.py '\n ' \"%(brand)s\" \"%(furl)s\"' % locals())\n if not furl:\n furl = s3_search(filename, iid, brand, md5)\n if not furl:\n print(\"Failed to search %s in S3\" % (filename))\n continue\n elif furl.startswith('s3://'):\n furl = s3_check_md5(furl, filename, md5, iid, brand)\n os.system('python3 -u scripts/process_firmware_file.py '\n '\"%(brand)s\" \"%(furl)s\"' % locals())\n try:\n os.remove(furl)\n except:\n pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"process_01.py","file_name":"process_01.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"363918926","text":"import numpy as np\n\ndef Gauss(A,B) :\n n = A.shape[0] # 행은 0 열은 1\n \n l = np.zeros(n,dtype=np.int) # index vector\n s = np.zeros(n) # scale vector\n X = np.matrix(np.zeros(n))\n X = X.T # transpose\n \n # 기본 행 벡터로 잡는다\n \n for i in range(n) :\n l[i] = i\n \n smax = 0\n \n for j in range(n):\n smax = max(smax,abs(A[i,j]))\n \n s[i]= smax\n \n for k in range(n) :\n rmax=0\n \n for i in range(k,n):\n r = abs(A[l[i],k]/s[l[i]])\n \n if (r>rmax):\n rmax =r\n j = i\n \n tmp = l[j]\n l[j] = l[k]\n l[k] = tmp\n \n for i in range(k+1,n) :\n alpha = A[l[i],k]/A[l[k],k]\n \n for j in range(k,n):\n A[l[i],j]=A[l[i],j]-alpha*A[l[k],j]\n \n B[l[i]]=B[l[i]] - alpha*B[l[k]]\n \n \n #Back substitution\n \n X[n-1] = B[l[n-1]]/ A[l[n-1],n-1]\n \n for i in range(n-2,-1,-1):\n sum = B[l[i]]\n \n for j in range(i+1,n) :\n sum = sum - A[l[i],j]*X[j]\n \n X[i] = sum/A[l[i],i]\n \n print(A)\n \n print(A)\n \n return X\n\nA = np.matrix([[3.,-13.,9.,3.],[-6.,4.,1.,-18.],[6.,-2.,2.,4.],[12.,-8.,6.,10.]])\nB = np.matrix([-19.,-34.,16.,26.])\nB = B.T\n\nGauss(A,B)\n","sub_path":"GaussianElimination.py","file_name":"GaussianElimination.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"48691398","text":"\n\nfrom xai.brain.wordbase.nouns._antecedent import _ANTECEDENT\n\n#calss header\nclass _ANTECEDENTS(_ANTECEDENT, ):\n\tdef __init__(self,): \n\t\t_ANTECEDENT.__init__(self)\n\t\tself.name = \"ANTECEDENTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"antecedent\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_antecedents.py","file_name":"_antecedents.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"445946533","text":"\n# coding: utf-8\n\n# In[656]:\n\n\nfrom pypinyin import pinyin, lazy_pinyin, Style\nimport sys\nimport math\nimport itertools\nimport pickle\nimport os\n\n\n# In[657]:\n\n\nwork_dir = os.path.abspath(__file__)\nindexOfLastSlash = work_dir.rfind(\"/\")\nwork_dir = work_dir[0:indexOfLastSlash]+\"/\"\n\n\n# In[658]:\n\n\nsfile = open(work_dir+'pinyin_to_simplified.pickle', 'rb')\n\n# dump information to that file\npinyin_to_simplified = pickle.load(sfile)\nsfile.close()\n\ntfile = open(work_dir+'pinyin_to_traditional.pickle', 'rb')\npinyin_to_traditional = pickle.load(tfile)\ntfile.close()\n\n\n# In[659]:\n\n\ndef to_pinyin(utterance):\n length = len(utterance)\n translated = []\n pinyin_encodings = pinyin(utterance, style=Style.TONE2)\n for i in range(length):\n currPinyin = pinyin_encodings[i][0]\n# print(\"{} translates to {}\".format(currPinyin, putToneToEnd(currPinyin)))\n translated.append(putToneToEnd(currPinyin))\n return translated\n\ndef putToneToEnd(input_pinyin):\n if len(input_pinyin) is 1:\n return input_pinyin + '1'\n tone_index = 0\n tone = '1'\n for index, character in enumerate(input_pinyin):\n if character in (\"1\",\"2\",\"3\",\"4\"):\n tone_index = index\n tone = input_pinyin[index]\n break;\n if tone_index is 0:\n return input_pinyin + \"5\"\n return input_pinyin[0:index] + input_pinyin[index+1:] + tone\n\ndef get_distance(utterance1, utterance2):\n if(len(utterance1) is not len(utterance2)):\n print(\"the two inputs do not have the same length\")\n return sys.float_info.max\n else:\n u1 = to_pinyin(utterance1)\n u2 = to_pinyin(utterance2)\n \n la = []\n lb = []\n for py in u1:\n la.append(Pinyin(py))\n for py in u2:\n lb.append(Pinyin(py))\n \n\n res = 0.0\n numDiff = 0 \n tot = len(utterance1)*2.1\n for i in range (len(utterance1)):\n apy = la[i]\n bpy = lb[i]\n\n if (apy is None) or (bpy is None):\n print(\"!Error {},{}\".format(la, lb))\n \n res += getEditDistanceClose_TwoDCode(apy, bpy)\n \n if apy.consonant is not bpy.consonant:\n numDiff+=1\n \n if not(str(apy.vowel) == str(bpy.vowel)):\n numDiff+=1\n \n if apy.tone is not bpy.tone:\n numDiff+=0.01;\n \n diffRatio = (numDiff)/tot;\n a = 0\n if diffRatio is 0:\n a=1\n return res*diffRatio;\n \ndef getEditDistanceClose_TwoDCode(a, b):\n res = 0\n try:\n if (a is None) or (b is None):\n print(\"Error:pinyin({},{})\".format(a.toString(),b.toString()))\n return res\n \n twoDcode_consonant_a = consonantMap_TwoDCode[a.consonant]\n twoDcode_consonant_b = consonantMap_TwoDCode[b.consonant]\n \n cDis = abs(getDistance_TwoDCode(twoDcode_consonant_a, twoDcode_consonant_b))\n \n twoDcode_vowel_a = vowelMap_TwoDCode[a.vowel]\n twoDcode_vowel_b = vowelMap_TwoDCode[b.vowel]\n \n vDis = abs(getDistance_TwoDCode(twoDcode_vowel_a, twoDcode_vowel_b))\n\n hcDis = getSimDisFromHardcodMap(a,b)\n \n res = min((cDis+vDis),hcDis) + 1.0*abs(a.tone-b.tone)/10\n \n except:\n print(\"Error pinyin {}{}\".format(a.toString(), b.toString()))\n raise\n return res\n\ndef getSimDisFromHardcodMap(a, b):\n try:\n simPy = hardcodeMap[a.toStringNoTone()]\n if simPy is not None:\n if simPy is b.toStringNoTone():\n return 2.0\n else:\n simPy=hardcodeMap[b.toStringNoTone()]\n if simPy is not None and simPy is a.toStringNoTone():\n return 2.0\n return sys.float_info.max\n except:\n return sys.float_info.max\n \n \ndef getDistance_TwoDCode(X, Y):\n x1, x2 = X\n y1, y2 = Y\n\n x1d = abs(x1-y1)\n x2d = abs(x2-y2)\n \n return math.sqrt( x1d**2 + x2d**2)\n\n\nconsonantMap_TwoDCode ={\n \"b\":(1.0,0.5),\n \"p\":(1.0,1.5), \n\n \"g\":(7.0,0.5), \n \"k\":(7.0,1.5), \n \"h\":(7.0,3.0), \n \"f\":(7.0,4.0), \n\n \"d\":(12.0,0.5), \n \"t\":(12.0,1.5), \n\n \"n\":(22.5,0.5), \n \"l\":(22.5,1.5), \n \"r\":( 22.5,2.5), \n\n \n \"zh\":(30,1.7), \n \"z\":(30,1.5), \n \"j\":(30.0,0.5), \n\n \"ch\":(31,1.7), \n \"c\":(31,1.5), \n \"q\":(31.0,0.5), \n\n \"sh\":(33,3.7),\n \"s\":(33,3.5),\n \"x\":(33,2.5),\n\n \n \"m\":(50.0,3.5), \n\n \"y\":(40.0,0.0), \n \"w\":(40,5.0),\n \n \"\":(99999.0,99999.0)\n}\n\n\n# In[662]:\n\n\nvowelMap_TwoDCode = {\n \"a\":(1.0,0.0),\n \"an\":(1.0,1.0),\n \"ang\":(1.0,1.5),\n\n \n \"ia\":(0.0,0.0),\n \"ian\":(0.0,1.0),\n \"iang\":(0.0,1.5),\n\n \"ua\":(2.0,0.0),\n \"uan\":(2.0,1.0),\n \"uang\":(2.0,1.5),\n \"u:an\":(2.0,1.0),\n\n \n \"ao\":(5.0,0.0),\n \"iao\":(5.0,1.5),\n\n \"ai\":(8.0,0.0),\n \"uai\":(8.0,1.5),\n\n \n\n \"o\":(20,0.0),\n \"io\":(20,2.5),\n \"iou\":(20,4),\n \"iu\":(20,4),\n \"ou\":(20,5.5),\n \"uo\":(20,6.0),\n\n \"ong\":(20,8.0),\n \"iong\":(20,9.5),\n\n \n \"er\":(41,1),\n \"e\":(41,0.0),\n\n \"u:e\":(40,5.0),\n \"ve\":(40,5.0),\n \"ue\":(40,5.0),\n \"ie\":(40,4.5),\n \"ei\":(40,4.0),\n \"uei\":(40,3.0),\n \"ui\":(40,3.0),\n\n \"en\":(42,0.5),\n \"eng\":(42,1.0),\n\n \"uen\":(43,0.5),\n \"un\":(43,0.5),\n \"ueng\":(43,1.0),\n\n \n \"i\":(60,1.0),\n \"in\":(60,2.5),\n \"ing\":(60,3.0),\n\n \"u:\":(61,1.0),\n \"v\":(61,1.0),\n \"u:n\":(61,2.5),\n \"vn\":(61,2.5),\n\n \"u\":(80,0.0),\n\n \"\":(99999.0,99999.0)\n}\n\n\n# In[663]:\n\n\nconsonantList = [\"b\", \"p\", \"m\", \"f\", \"d\", \"t\", \"n\", \"l\", \"g\", \"k\",\"h\", \"j\", \"q\", \"x\", \"zh\", \"ch\", \"sh\", \"r\", \"z\", \"c\", \"s\",\"y\", \"w\"]\n\n\n# In[664]:\n\n\nvowelList = [\"a\", \"o\", \"e\", \"i\", \"u\", \"v\",\"u:\",\"er\", \"ao\",\"ai\", \"ou\",\"ei\", \"ia\", \"iao\", \"iu\", \"iou\",\"ie\", \"ui\",\"uei\",\"ua\",\"uo\",\"uai\", \"u:e\",\"ve\", \"an\", \"en\", \"in\", \"un\",\"uen\", \"vn\",\"u:n\",\"ian\",\"uan\", \"u:an\",\"van\", \"ang\", \"eng\", \"ing\", \"ong\",\"iang\",\"iong\",\"uang\",\"ueng\"]\n\n\n# In[665]:\n\n\nclass Pinyin:\n consonantList = [\"b\", \"p\", \"m\", \"f\", \"d\", \"t\", \"n\", \"l\", \"g\", \"k\", \"h\", \"j\", \"q\", \"x\", \"zh\", \"ch\", \"sh\", \"r\", \"z\", \"c\", \"s\",\"y\", \"w\"]\n vowelList = [\"a\", \"o\", \"e\", \"i\", \"u\", \"v\",\"u:\",\"er\", \"ao\",\"ai\", \"ou\",\"ei\", \"ia\", \"iao\", \"iu\", \"iou\",\"ie\", \"ui\",\"uei\",\"ua\",\"uo\",\"uai\", \"u:e\",\"ve\", \"an\", \"en\", \"in\", \"un\",\"uen\", \"vn\",\"u:n\",\"ian\",\"uan\", \"u:an\",\"van\", \"ang\", \"eng\", \"ing\", \"ong\",\"iang\",\"iong\",\"uang\",\"ueng\"]\n \n def __init__(self, pinyinstr):\n self.tone = int(pinyinstr[-1])\n self.locp = pinyinstr[0:-1].lower()\n self.consonant, self.vowel = self.parseConsonant(self.locp)\n# print(\"before rewriting consonant={}, vowel={}, locp={}, tone={}\".format(self.consonant,\n# self.vowel,\n# self.locp,\n# self.tone))\n self.pinyinRewrite()\n# print(\"after rewriting consonant={}, vowel={}, locp={}, tone={}\".format(self.consonant,\n# self.vowel,\n# self.locp,\n# self.tone))\n \n def parseConsonant(self, pinyin):\n for consonant in consonantList:\n if pinyin.startswith(consonant):\n return (consonant, pinyin[len(consonant):])\n # it's a vowel without consonant\n if pinyin in vowelList:\n return None, pinyin.lower()\n \n print(\"Invalid Pinyin, please check!\")\n return None, None\n \n def toStringNoTone(self):\n return \"{}{}\".format(self.consonant, self.vowel)\n \n def toStringWithTone(self):\n return \"{}{}{}\".format(self.consonant, self.vowel, self.tone)\n \n def toString(self):\n return \"{}{}{}\".format(self.consonant, self.vowel, self.tone)\n \n def pinyinRewrite(self):\n import re\n yVowels = {\"u\",\"ue\",\"uan\",\"un\",\"u:\",\"u:e\",\"u:an\",\"u:n\"}\n tconsonant = {\"j\",\"g\",\"x\"}\n if self.vowel is not None and 'v' in self.vowel:\n self.vowel = self.vowel.replace(\"v\", \"u:\")\n \n if self.consonant is None or self.consonant is \"\":\n self.consonant = \"\"\n return\n if self.consonant is \"y\":\n if self.vowel in yVowels:\n if \"u:\" not in self.vowel:\n self.vowel = self.vowel.replace(\"u\",\"u:\")\n else:\n self.vowel=\"i\"+self.vowel\n regex = re.compile(\"i+\")\n self.vowel = self.vowel.replace(\"iii\",\"i\")\n self.vowel = self.vowel.replace(\"ii\",\"i\")\n self.consonant=\"\"\n \n if self.consonant is \"w\":\n self.vowel=\"u\"+self.vowel;\n self.vowel=self.vowel.replace(\"uuu\",\"u\")\n self.vowel=self.vowel.replace(\"uu\",\"u\")\n self.consonant = \"\"\n \n if (self.consonant in tconsonant) and (self.vowel is \"u\") or (self.vowel is \"v\"):\n self.vowel=\"u:\"\n \n if self.vowel is \"iou\":\n self.vowel = \"iu\"\n \n if self.vowel is \"uei\":\n self.vowel = \"ui\"\n \n if self.vowel is \"uen\":\n self.vowel = \"un\"\n \n \n \n \n\n\n# In[666]:\n\n\nhardcodeMap = {\n \"hua\":\"fa\",\n \"fa\":\"hua\",\n \"huan\":\"fan\",\n \"fan\":\"huan\",\n \"hui\":\"fei\",\n \"jie\":\"zhe\",\n \"kou\":\"ke\",\n \"gou\":\"ge\",\n \"zhong\":\"zen\",\n \"san\":\"shang\"\n}\n\n\n# In[667]:\n\n\nconsonantMap = {\n \"b\":1.0,\n \"p\":2.0,\n \n \"m\":11.0,\n \"f\":12.0,\n \n \"d\":21.0,\n \"t\":22.0,\n \n \"n\":31.0,\n \"l\":31.0,\n \"r\":32.0,\n \n \"g\":41.0,\n \"k\":42.0,\n \"h\":43.0,\n \n \"j\":46.0,\n \"q\":47.0,\n \"x\":48.0,\n \n \"z\":61.0,\n \"c\":62.0,\n \n \"zh\":71.0,\n \"ch\":72.0,\n \n \"sh\":81.0,\n \"s\":82.0,\n \n \"y\":90.0,\n \"w\":100.0,\n \n \"\":99999.0,\n \"__v\":99999.0\n}\n\n\n# In[668]:\n\n\nvowelMap = {\n \"ia\":0.0,\n \"a\":2.0,\n \"ai\":3.0,\n \"uai\":4.0,\n \"iao\":6.0,\n \"ao\":7.0,\n \n \"uan\":10.0,\n \"an\":11.0,\n \"ang\":12.0,\n \"ian\":14.0,\n \"iang\":15.0,\n \"uang\":17.0,\n \"ua\":18.0,\n \n \"o\":21.0,\n \"io\":22.0,\n \"ou\":23.0,\n \"uo\":24.0,\n \"ong\":26.0,\n \"iong\":27.0,\n \n \"e\":31.0,\n \"ei\":33.0,\n \"ie\":34.0,\n \"er\":37.0,\n \n \"ve\":40.0,\n \"ue\":40.0,\n \"u:e\":40.0,\n \n \"en\":43.0,\n \"eng\":44.0,\n \n \"uen\":45.0,\n \"ueng\":45.0,\n \n \"u:en\":42.0,\n \"ven\":42.0,\n \n \"i\":50.0,\n \"u:\":51.0,\n \"v\":51.0,\n \"u:n\":53.0,\n \"vn\":53.0,\n \"u:an\":55.0,\n \"v:an\":55.0,\n \n \"in\":53.0,\n \"ing\":55.0,\n \n \"u\":60.0,\n \"ui\":63.0,\n \"uei\":63.0,\n \"iu\":64.0,\n \"iou\":64.0,\n \"un\":66.0,\n \n \"\":99999.0,\n \"__v\":99999.0\n}\n\n\n# ### Get Pinyin Candidates that are close to an input pinyin\n\n# In[644]:\n\n\ndoubleConsonantsMap = {}\ndoubleVowelsMap = {}\n\ndef getClosePinyinCandids(word, theta=2):\n res = []\n word_pinyin = to_pinyin(word)\n word_py = Pinyin(word_pinyin[0])\n \n cCandids = getConsonantCandids(theta, word_py)\n for i in range(len(cCandids)):\n if cCandids[i] == word_py.consonant:\n continue\n for j in range(1,5,1):\n newPy = cCandids[i]+word_py.vowel+str(j)\n res.append(Pinyin(newPy))\n \n vCandids = getVowelCandids(theta, word_py)\n for i in range(len(vCandids)):\n for j in range(1,5,1):\n if word_py.consonant is None:\n# print(word_py.toStringWithTone(),\"has none consonant\")\n newPy = vCandids[i]+str(j)\n else:\n newPy = word_py.consonant+vCandids[i]+str(j)\n res.append(Pinyin(newPy))\n return res\n \n \n \ndef getConsonantCandids(theta, word_py):\n populateDoubleConsonantsMap()\n res = []\n curCode = 0 \n if word_py.consonant is None:\n orgCode = consonantMap[\"__v\"]\n else:\n orgCode = consonantMap[word_py.consonant]\n for i in range(int(orgCode-theta), int(orgCode+theta), 1):\n if float(i) in doubleConsonantsMap:\n cand = doubleConsonantsMap[float(i)]\n if cand is not None:\n res += cand\n return res\n \n\ndef getVowelCandids(theta, word_py):\n populateDoubleVowelsMap()\n res = []\n curCode = 0 \n orgCode = vowelMap[word_py.vowel]\n for i in range(int(orgCode-theta), int(orgCode+theta), 1):\n if float(i) in doubleVowelsMap:\n cand = doubleVowelsMap[float(i)]\n if cand is not None:\n res += cand\n return res\n\ndef populateDoubleConsonantsMap():\n if len(doubleConsonantsMap) is not 0:\n return\n hmCdouble = consonantMap\n for consonant in hmCdouble:\n if hmCdouble[consonant] not in doubleConsonantsMap:\n doubleConsonantsMap[hmCdouble[consonant]] = []\n \n doubleConsonantsMap[hmCdouble[consonant]].append(consonant)\n \ndef populateDoubleVowelsMap():\n if len(doubleVowelsMap) is not 0:\n return\n hmVdouble = vowelMap\n for vowel in hmVdouble:\n if hmVdouble[vowel] not in doubleVowelsMap:\n doubleVowelsMap[hmVdouble[vowel]] = []\n \n doubleVowelsMap[hmVdouble[vowel]].append(vowel) \n \n\ndef getCandidates(sentence, mode=\"simplified\", theta=1):\n candidates = []\n words_candidates = []\n for word in sentence:\n candid = getClosePinyinCandids(word, theta)\n words_candidates.append(candid)\n all_combinations = itertools.product(*words_candidates)\n counter = 0\n for combination in all_combinations:\n counter+=1\n searchKey = \"\"\n for i in combination:\n searchKey = searchKey + i.toStringWithTone().replace(\"None\",\"\") + \" \"\n if mode is \"simplified\":\n if searchKey.strip() in pinyin_to_simplified:\n candidates+=pinyin_to_simplified[searchKey.strip()]\n else:\n if searchKey.strip() in pinyin_to_traditional:\n candidates+=pinyin_to_traditional[searchKey.strip()]\n return candidates\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/dimsim/dimsim/dimsim.py","file_name":"dimsim.py","file_ext":"py","file_size_in_byte":14318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"421091108","text":"import http.client\nimport json\n\nclass Split_API_Request:\n def __init__(self, token, mode=\"sandbox\"):\n self.token = token\n\n if mode == \"sandbox\":\n self.base_url = \"api.sandbox.split.cash\"\n elif mode == \"production\":\n self.base_url = \"api.split.cash\"\n else:\n raise Exception(\"Not a valid mode.\")\n\n self.conn = http.client.HTTPSConnection(self.base_url)\n self.headers = {\n 'content-type': \"application/json\",\n 'accept': \"application/json\",\n 'authorization': \"Bearer {}\".format(self.token)\n }\n self.data = None\n\n def execute(self, endpoint, payload=None, method=\"GET\"):\n if payload != None:\n self.conn.request(method, endpoint, json.dumps(payload), headers=self.headers)\n else:\n self.conn.request(method, endpoint, headers=self.headers)\n res = self.conn.getresponse()\n self.data = res.read()\n\n def show_response(self, decode=False):\n if self.data != None:\n if decode:\n print(json.loads(self.data))\n self.data = None\n else:\n print(self.data)\n self.data = None\n else:\n print(\"No data. Could be due to not running execute command.\")\n","sub_path":"SplitTester.py","file_name":"SplitTester.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"99764595","text":"# This script is in the public domain and has no copyright\n#\n# CallAt/CallIn are simple utility classes that call a specified\n# function at a given point in the future.\n#\n\nimport simcore\nimport net.tinyos.sim.event\n\nclass CallAt:\n def __init__(self, when, callback, args = None):\n interruptID = simcore.interp.getInterruptID()\n\n def mycallback(interruptEvent):\n if (interruptEvent.get_id() != interruptID):\n return\n simcore.interp.removeEventHandler(self.eventID);\n if args != None:\n callback(args);\n else:\n callback();\n\n evclass = net.tinyos.sim.event.InterruptEvent\n self.eventID = simcore.interp.addEventHandler(mycallback, evclass);\n simcore.interp.interruptInFuture(when, interruptID)\n\n def cancel(self):\n simcore.interp.removeEventHandler(self.eventID)\n\nclass CallIn(CallAt):\n def __init__(self, delay, callback, args = None):\n when = simcore.sim.getTossimTime() + delay;\n CallAt.__init__(self, when, callback, args);\n\n\n\n","sub_path":"tinyos-1.x/tools/java/net/tinyos/sim/pyscripts/callat.py","file_name":"callat.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"265290755","text":"from decimal import *\nimport copy\n\nclass staggered_Pivoting:\n def __init__(self):\n self.original= []\n self.new = []\n self.total= []\n self.result=[]\n self.rows = []\n self.s = []\n getcontext().prec = 25\n\n def partial_staggered_algorithm(self,matrix,matrixb):\n if len(matrixb) <= 0 or matrixb is None:\n self.result.append(\"No matrix b\")\n elif len(matrix) <= 0 or matrix is None:\n self.result.append(\"No matrix a\")\n elif not self.check_diagonal(self.merge(matrix,matrixb)):\n self.result.append(\"No valid matrix\")\n else:\n #matrix=self.merge(matrix,matrixb)\n self.imprimirMatriz(matrix)\n self.original=copy.deepcopy(matrix)\n no_error=True\n mayor = []\n pMayor = 0\n vMayor = 0.0\n temp = []\n for i in range(len(matrix)):\n self.s.append(0)\n self.busquedaDelMayorDeCadaFila(matrix)\n for k in range(len(matrix)):\n for c in range(k, len(matrix), 1):\n mayor.append(abs(matrix[c][k] / self.s[c]))\n if vMayor <= abs(matrix[c][k] / self.s[c]): \n vMayor = abs(matrix[c][k] / self.s[c])\n pMayor = c\n if vMayor != 0:\n temp = matrix[pMayor]\n matrix[pMayor] = matrix[k]\n matrix[k] = temp\n for i in range(k+1, len(matrix), 1):\n M = matrix[i][k]/matrix[k][k]\n for j in range(len(matrix)+1):\n matrix[i][j] = matrix[i][j]- M*matrix[k][j] \n self.imprimirMatriz(matrix)\n vMayor = 0.0\n pMayor = 0\n else:\n self.result.append(\"No valid matrix o div wth 0\")\n break\n self.imprimirMatriz(matrix)\n self.new = matrix\n if(self.check_diagonal(self.new)):\n self.variable_resolution()\n self.row_definition()\n else:\n self.result.append(\"No solutions or infinite solutions or Div 0\")\n\n self.imprimirMatriz(matrix)\n \n def check_diagonal(self,matrix):\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if(j==i):\n self.result.append(0)\n if(matrix[i][j]==0):\n print(\"falso en el valor: \"+str(matrix[i][j]))\n return False\n return True\n\n def variable_resolution(self):\n i=len(self.new)-1\n while(i>=0):\n j=len(self.new[i])-1\n aux=0\n while(j>=0):\n if(j!=i):\n if(len(self.new[i])-1==j):\n self.result[i]=self.new[i][j]\n else:\n self.result[i]-=self.new[i][j]*self.result[j]\n else:\n aux=self.new[i][j]\n if(j==0):\n self.result[i]/=aux\n \n j-=1\n \n i-=1 \n def merge(self,matrix,matrixb):\n for i in range(0,len(matrix),1):\n print(i)\n print(\"matrix\" + str(matrix[i]))\n print(\"append\" + str(matrixb[0][i]))\n matrix[i].append(matrixb[0][i])\n return matrix\n\n def row_definition(self):\n self.rows = []\n for i in range(1,(len(self.new)+1)):\n self.rows.append(f\"x{i}\")\n self.rows.append(\"b\")\n\n def imprimirMatriz(self, A):\n for i in range(len(A)):\n print(A[i])\n\n def busquedaDelMayorDeCadaFila(self, A):\n for i in range(len(A)):\n for j in range(len(A)):\n if abs(A[i][j])> self.s[i]:\n self.s[i] = abs(A[i][j])\n\n def value_table(self):\n self.total = []\n print(\"*** NEW ***\"+str(len(self.new[0])))\n for i in range(len(self.new)):\n self.total.append([])\n for j in range(len(self.new[i])):\n self.total[i].append(Decimal(self.new[i][j]))\n print(\"*** TOTAL ***\"+str(len(self.total)))\n return self.total\n\n def get_results(self):\n results=\"\"\n aux=1\n for i in range(len(self.new)):\n results+=f\"X{aux}: \"+(str)(self.result[i])+\"\\n\"\n aux+=1\n return results\n\n#[2,1,1]\n#[4,-6,0]\n#[-2,7,2]\n#[5,-2,9]\n","sub_path":"Proyecto_Final/Systems_of_equations/staggered_Pivoting.py","file_name":"staggered_Pivoting.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"400642666","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-i686/egg/pocoo/pkg/webadmin/pages.py\n# Compiled at: 2006-12-26 17:18:01\n__doc__ = '\\n pocoo.pkg.webadmin.pages\\n ~~~~~~~~~~~~~~~~~~~~~~~~\\n\\n :copyright: 2006 by Armin Ronacher.\\n :license: GNU GPL, see LICENSE for more details.\\n'\nfrom pocoo.db import meta\nfrom pocoo.http import PageNotFound\nfrom pocoo.pkg.webadmin.base import AdminPage\nfrom pocoo.pkg.webadmin.forms import GeneralSettingsForm, CacheSettingsForm, SecuritySettingsForm, EmailSettingsForm, AvatarSettingsForm, SignatureSettingsForm, BoardSettingsForm, ForumEditForm, EditUserForm, AddUserForm\nfrom pocoo.pkg.core.textfmt import get_markup_formatters\nfrom pocoo.pkg.core.auth import get_auth_provider_mapping\nfrom pocoo.pkg.core.db import forums\nfrom pocoo.pkg.core.user import User\n\nclass GeneralSettings(AdminPage):\n __module__ = __name__\n category = 'settings'\n identifier = 'general'\n\n def get_title(self, req):\n _ = req.gettext\n return _('General')\n\n def get_description(self, req):\n _ = req.gettext\n return _('Manage general settings (serverpath, packages, database etc)')\n\n def get_admin_page(self, req):\n _ = req.gettext\n msg = None\n cfg = req.ctx.cfg\n auth_modules = [ (x, x) for x in get_auth_provider_mapping(self.ctx) ]\n auth_modules.sort(key=lambda x: x[0].lower())\n form = GeneralSettingsForm(req, self, 'POST', {'serverpath': cfg.get('general', 'serverpath', ''), 'packages': ('\\n').join(cfg.get('general', 'packages', [])), 'auth_module': cfg.get('general', 'auth_module', 'SessionAuth'), 'dburi': cfg.get('database', 'uri', '')})\n if req.method == 'POST':\n form.update(req.form, prefix='f_')\n if not form.has_errors:\n d = form.to_dict()\n cfg.set('general', 'packages', d.pop('packages').splitlines())\n cfg.set('database', 'uri', d.pop('dburi'))\n for (key, value) in d.iteritems():\n cfg.set('general', key, value)\n\n cfg.save()\n msg = _('Changes saved')\n return ('webadmin/settings/general.html', {'msg': msg, 'form': form.generate(prefix='f_')})\n\n\nclass CacheSettings(AdminPage):\n __module__ = __name__\n category = 'settings'\n identifier = 'caching'\n\n def get_title(self, req):\n _ = req.gettext\n return _('Caching')\n\n def get_description(self, req):\n _ = req.gettext\n return _('Manage the caching system')\n\n def get_admin_page(self, req):\n _ = req.gettext\n msg = None\n cfg = req.ctx.cfg\n form = CacheSettingsForm(req, self, 'POST', {'enabled': cfg.get_bool('cache', 'enabled', True), 'template_memcache': cfg.get_bool('cache', 'template_memcache', True), 'template_diskcache': cfg.get_bool('cache', 'template_diskcache', False), 'static_cache': cfg.get_bool('cache', 'static_cache', True)})\n if req.method == 'POST':\n form.update(req.form, prefix='f_')\n if not form.has_errors:\n d = form.to_dict()\n for (key, value) in d.iteritems():\n cfg.set('cache', key, value)\n\n cfg.save()\n msg = _('Changes saved')\n return ('webadmin/settings/cache.html', {'msg': msg, 'form': form.generate(prefix='f_')})\n\n\nclass SecuritySettings(AdminPage):\n __module__ = __name__\n category = 'settings'\n identifier = 'security'\n\n def get_title(self, req):\n _ = req.gettext\n return _('Security')\n\n def get_description(self, req):\n _ = req.gettext\n return _('Manage the security settings (user activation, password strength etc)')\n\n def get_admin_page(self, req):\n _ = req.gettext\n msg = None\n cfg = req.ctx.cfg\n form = SecuritySettingsForm(req, self, 'POST', {'password_strength': cfg.get('security', 'password_strength', '3'), 'activation_level': cfg.get('security', 'activation_level', '1'), 'username_change': cfg.get_bool('security', 'username_change', False)})\n if req.method == 'POST':\n form.update(req.form, prefix='f_')\n if not form.has_errors:\n d = form.to_dict()\n for (key, value) in d.iteritems():\n cfg.set('security', key, value)\n\n cfg.save()\n msg = _('Changes saved')\n return ('webadmin/settings/security.html', {'msg': msg, 'form': form.generate(prefix='f_')})\n\n\nclass EmailSettings(AdminPage):\n __module__ = __name__\n category = 'settings'\n identifier = 'email'\n\n def get_title(self, req):\n _ = req.gettext\n return _('Email')\n\n def get_description(self, req):\n _ = req.gettext\n return _('Manage the email server settings')\n\n def get_admin_page(self, req):\n _ = req.gettext\n msg = None\n cfg = req.ctx.cfg\n form = EmailSettingsForm(req, self, 'POST', {'admin_mails': ('\\n').join(cfg.get('general', 'admin_mails')), 'send_error_mails': cfg.get_bool('general', 'send_error_mails'), 'mail_host': cfg.get('email', 'host', 'localhost'), 'mail_user': cfg.get('email', 'user', ''), 'mail_pass': cfg.get('email', 'pass', ''), 'mail_prefix': cfg.get('email', 'prefix', '[pocoo] '), 'mail_suffix': cfg.get('email', 'suffix', ''), 'mail_signature': cfg.get('email', 'signature', 'Your pocoo team')})\n if req.method == 'POST':\n form.update(req.form, prefix='f_')\n if not form.has_errors:\n d = form.to_dict()\n cfg.set('general', 'admin_mails', d.pop('admin_mails'))\n cfg.set('general', 'send_error_mails', d.pop('send_error_mails'))\n for (key, value) in d.iteritems():\n cfg.set('email', key[5:], value)\n\n cfg.save()\n msg = _('Changes saved')\n return ('webadmin/settings/email.html', {'msg': msg, 'form': form.generate(prefix='f_')})\n\n\nclass AvatarSettings(AdminPage):\n __module__ = __name__\n category = 'settings'\n identifier = 'avatars'\n\n def get_title(self, req):\n _ = req.gettext\n return _('Avatars')\n\n def get_description(self, req):\n _ = req.gettext\n return _('Manage avatar related settings')\n\n def get_admin_page(self, req):\n _ = req.gettext\n msg = None\n cfg = req.ctx.cfg\n form = AvatarSettingsForm(req, self, 'POST', {'allow_avatars': cfg.get_bool('board', 'allow_avatars', True), 'avatar_dimension': cfg.get_int('board', 'avatar_dimension', 80)})\n if req.method == 'POST':\n form.update(req.form, prefix='f_')\n if not form.has_errors:\n d = form.to_dict()\n for (key, value) in d.iteritems():\n cfg.set('board', key, value)\n\n cfg.save()\n msg = _('Changes saved')\n return ('webadmin/settings/avatar.html', {'msg': msg, 'form': form.generate(prefix='f_')})\n\n\nclass SignatureSettings(AdminPage):\n __module__ = __name__\n category = 'settings'\n identifier = 'signature'\n\n def get_title(self, req):\n _ = req.gettext\n return _('Signature')\n\n def get_description(self, req):\n _ = req.gettext\n return _('Manage signature filters or disable signatures entirely')\n\n def get_admin_page(self, req):\n _ = req.gettext\n msg = None\n cfg = req.ctx.cfg\n form = SignatureSettingsForm(req, self, 'POST', {'signature_length': cfg.get_int('board', 'signature_length', 255), 'signature_lines': cfg.get_int('board', 'signature_lines', 3)})\n if req.method == 'POST':\n form.update(req.form, prefix='f_')\n if not form.has_errors:\n d = form.to_dict()\n for (key, value) in d.iteritems():\n cfg.set('board', key, value)\n\n cfg.save()\n msg = _('Changes saved')\n return ('webadmin/settings/signature.html', {'msg': msg, 'form': form.generate(prefix='f_')})\n\n\nclass BoardSettings(AdminPage):\n __module__ = __name__\n category = 'settings'\n identifier = 'board'\n\n def get_title(self, req):\n _ = req.gettext\n return _('Board')\n\n def get_description(self, req):\n _ = req.gettext\n return _('Manage forum settings like the board title, description, favicon, forum defaults etc.')\n\n def get_admin_page(self, req):\n _ = req.gettext\n msg = None\n cfg = req.ctx.cfg\n form = BoardSettingsForm(req, self, 'POST', {'title': cfg.get('board', 'title'), 'description': cfg.get('board', 'description'), 'logo': cfg.get('board', 'logo'), 'favicon': cfg.get('board', 'favicon'), 'redirecttime': cfg.get_int('board', 'redirecttime'), 'autologin': cfg.get_bool('board', 'autologin'), 'enable_coppa': cfg.get_bool('board', 'enable_coppa'), 'cookieexpire': cfg.get_int('board', 'cookieexpire'), 'cookiename': cfg.get('board', 'cookiename'), 'default_view': cfg.get('board', 'default_view'), 'posts_per_page': cfg.get_int('board', 'posts_per_page'), 'threads_per_page': cfg.get_int('board', 'threads_per_page'), 'syntax_parser': cfg.get('board', 'syntax_parser')})\n if req.method == 'POST':\n form.update(req.form, prefix='f_')\n if not form.has_errors:\n d = form.to_dict()\n for (key, value) in d.iteritems():\n cfg.set('board', key, value)\n\n cfg.save()\n msg = _('Changes saved')\n return ('webadmin/settings/board.html', {'msg': msg, 'form': form.generate(prefix='f_')})\n return\n\n\nclass ForumsSettings(AdminPage):\n __module__ = __name__\n category = 'forum'\n identifier = 'forums'\n\n def get_title(self, req):\n _ = req.gettext\n return _('Forums')\n\n def get_description(self, req):\n _ = req.gettext\n return _('Manage forums and categories')\n\n def get_admin_page(self, req):\n _ = req.gettext\n msg = None\n forum_list = []\n f = forums.c\n engine = self.ctx.engine\n forum_id = req.args.get('forum') or None\n this_forum = None\n this_url = self.url\n if forum_id:\n this_url += '?forum=%s' % forum_id\n if forum_id is not None:\n row = engine.execute(meta.select([f.name, f.parent_id], f.forum_id == forum_id)).fetchone()\n if row is None:\n return PageNotFound()\n parent_url = self.url\n if row['parent_id'] is not None:\n parent_url += '?forum=%s' % row['parent_id']\n this_forum = {'name': row['name'], 'forum_id': forum_id, 'url': self.ctx.make_url(self.url + '?forum=%s' % forum_id), 'parent_url': parent_url}\n for forum in engine.execute(meta.select([f.forum_id, f.name, f.description, f.position, f.link], f.parent_id == forum_id)):\n form = ForumEditForm(req, this_url, 'POST', dict(forum))\n if req.method == 'POST' and req.form.get('f_forum_id') == unicode(forum['forum_id']):\n form.update(req.form, prefix='f_')\n if not form.has_errors:\n engine.execute(forums.update(f.forum_id == forum['forum_id']), **form.to_dict())\n msg = _('Saved changes')\n else:\n msg = _('Could not save changes, see the error messages below')\n d = form.generate(prefix='f_')\n d['delete_url'] = self.ctx.make_url(self.url + '?delete=%s' % forum['forum_id'])\n d['switch_url'] = self.ctx.make_url(self.url + '?forum=%s' % forum['forum_id'])\n try:\n d['form_position'] = int(d['position']['value'])\n except ValueError:\n d['form_position'] = forum['position']\n\n forum_list.append(d)\n\n forum_list.sort(key=lambda x: x['form_position'])\n add_form = ForumEditForm(req, self, 'POST', {'forum_id': 0})\n return (\n 'webadmin/settings/forums.html', {'msg': msg, 'forums': forum_list, 'forum': this_forum, 'add_form': add_form.generate(prefix='f_')})\n\n\nclass EditUser(AdminPage):\n __module__ = __name__\n category = 'user'\n identifier = 'edituser'\n\n def get_title(self, req):\n _ = req.gettext\n return _('Edit User')\n\n def get_description(self, req):\n _ = req.gettext\n return _(\"Alter a user's settings\")\n\n def get_admin_page(self, req):\n _ = req.gettext\n user = new_username = msg = None\n if req.method == 'POST':\n if req.form.get('f_user_id'):\n user = User(req.ctx, int(req.form['f_user_id']))\n else:\n try:\n user = User.by_name(req.ctx, req.form['f_search_username'])\n except User.NotFound:\n form = EditUserForm(req, self, 'POST', {})\n form.update(req.form, prefix='f_')\n return (\n 'webadmin/settings/edituser.html', {'user': None, 'form': form.generate(prefix='f_')})\n\n get_setting = lambda x: user.profile.get(x, '')\n form = EditUserForm(req, self, 'POST', {'user_id': user.user_id, 'username': user.username, 'email': user.email, 'new_password': '', 'show_email': user.settings.get('show_email') or False, 'aol': get_setting('aol'), 'icq': get_setting('icq'), 'jabber': get_setting('jabber'), 'msn': get_setting('msn'), 'yahoo': get_setting('yahoo'), 'website': get_setting('website'), 'interests': get_setting('interests')})\n form.update(req.form, prefix='f_')\n if not form.has_errors and req.form.get('f_user_id'):\n msg = _('Saved changes')\n d = form.to_dict()\n d.pop('user_id')\n new_username = d.pop('username')\n if new_username != d.pop('search_username'):\n user.username = new_username\n user.email = d.pop('email')\n new_password = d.pop('new_password')\n if new_password:\n user.set_password(new_password)\n user.settings.update({'show_email': d.pop('show_email')})\n user.profile.update(d)\n user.save()\n else:\n form = EditUserForm(req, self, 'POST', {})\n f = form.generate(prefix='f_')\n f['search_username']['value'] = new_username or f['search_username']['value']\n return (\n 'webadmin/settings/edituser.html', {'user': user and {'user_id': user.user_id} or False, 'form': f, 'msg': msg})\n\n\nclass AddUser(AdminPage):\n __module__ = __name__\n category = 'user'\n identifier = 'adduser'\n\n def get_title(self, req):\n _ = req.gettext\n return _('New User')\n\n def get_description(self, req):\n _ = req.gettext\n return _('Create a new user')\n\n def get_admin_page(self, req):\n _ = req.gettext\n msg = None\n form = AddUserForm(req, self, 'POST', {})\n if req.method == 'POST':\n form.update(req.form, prefix='f_')\n if not form.has_errors:\n d = form.to_dict()\n user = User.create(req.ctx, d.pop('username'), d.pop('new_password'), d.pop('email'), False)\n user.settings.update({'show_email': d.pop('show_email')})\n user.profile.update(d)\n user.save()\n form = AddUserForm(req, self, 'POST', {})\n msg = _('The user was successfully created.')\n return ('webadmin/settings/adduser.html', {'form': form.generate(prefix='f_'), 'msg': msg})","sub_path":"pycfiles/pocoui-1.0.79.tar/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":15786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"617286447","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.template import loader\n# Create your views here.\nfrom .models import Posts\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\n\ndef index(request, page='1'):\n if request.method == 'POST':\n if len(request.POST['comment']) > 140:\n return HttpResponseBadRequest(\"400 Error: BAD INPUT\")\n Posts.objects.create(text=request.POST['comment'])\n return redirect('/')\n latest_posts = Posts.objects.order_by('-date_time')[:]\n hasNextPage = 0\n if len(latest_posts) - (int(page) * 20) > 0:\n hasNextPage = 1\n template = loader.get_template('django_squawker/index.html')\n context = {\n 'greeting_list': latest_posts[(int(page) - 1) * 20:int(page) * 20],\n 'next_num': int(page) + 1,\n 'hasNextPage': hasNextPage,\n }\n return HttpResponse(template.render(context, request))\n","sub_path":"django_squawker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"596207522","text":"import gdal, ogr, osr\nimport os\nimport sys\n'''\nCreated on Apr 23, 2017\n\n@author: simulant\n'''\n\n#@profile\ndef rrl(file_name, data_type=\"f4\", raster_band=1, return_srs=False):\n #read raster layer and return array\n return read_raster_layer(file_name, data_type, raster_band\n ,return_srs)\n \n \n#@profile\ndef read_raster_layer(file_name, data_type=\"f4\", raster_band=1\n , return_srs=False):\n #read raster layer and return array\n print(\"Reading: %s\" % file_name)\n ds = gdal.Open(file_name)\n band = ds.GetRasterBand(raster_band)\n print (\" Got RasterBand\")\n arr = band.ReadAsArray().astype(data_type)\n \n geotransform_obj = ds.GetGeoTransform()\n epsg = None\n try:\n srs = osr.SpatialReference(wkt=ds.GetProjection())\n if srs.IsProjected:\n geogcs = srs.GetAttrValue(\"GEOGCS\") \n epsg = int(srs.GetAttrValue(\"GEOGCS|AUTHORITY\",1))\n except Exception as e:\n print(e)\n \n print (\" Done!\")\n \n if return_srs == True:\n return (arr, geotransform_obj, epsg)\n else: \n return (arr, geotransform_obj)\n \n","sub_path":"cm/app/api_v1/my_calculation_module_directory/CM/__delete_if_tested__/CEDM/modules/Subfunctions.py","file_name":"Subfunctions.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"340256533","text":"import speech_recognition as sr\nimport datetime\nimport webbrowser\nimport time\nfrom gtts import gTTS\nimport playsound\nimport os\nimport random\nimport pyowm\n\nr = sr.Recognizer()\nnow = datetime.datetime.now()\ncity = 'Mykolaiv, Ukraine' # need to create func that will detect location\nowm_api_key = open(\"owm_api.txt\", 'r')\nowm = pyowm.OWM(owm_api_key.readline())\nmgr = owm.weather_manager()\nobservation = mgr.weather_at_place(city)\nw = observation.weather\ntemp = w.temperature('celsius')['temp']\n\n\ndef words_exists(options):\n for opt in options:\n if opt in voice_data:\n return True\n\n\ndef record_audio(ask=False):\n with sr.Microphone(device_index=1) as source:\n r.adjust_for_ambient_noise(source)\n if ask:\n fox_speak(ask)\n audio = r.listen(source)\n voice_data = ''\n try:\n voice_data = r.recognize_google(audio)\n except sr.UnknownValueError:\n fox_speak(\"Sorry, didn't get that\")\n except sr.RequestError:\n fox_speak(\"Check your Internet connection\")\n print(f\">> {voice_data.lower()}\")\n return voice_data\n\n\ndef fox_speak(audio_string):\n tts = gTTS(text=audio_string, lang='en')\n ran = random.randint(1, 10000000)\n audio_file = 'audio-' + str(ran) + '.mp3'\n tts.save(audio_file)\n playsound.playsound(audio_file)\n print(audio_string)\n os.remove(audio_file)\n\n\ndef respond(voice_data):\n\n if words_exists(['what is your name', 'please remember your name', 'your name is', 'remember your name']):\n fox_speak(\"My name is Fox\")\n\n if words_exists(['what time is now', 'current time', 'what time', 'whats the time now']):\n fox_speak(\"Now \" + str(now.hour) + \" hours \" + str(now.minute) + \" minutes\")\n\n if words_exists(['what date is today', 'current date']):\n fox_speak(\"Today is \" + str(now.day) + \" of \" + str(now.strftime(\"%B\")))\n\n if words_exists(['search', 'search in google']):\n search = record_audio(\"What do you want to search\")\n url = \"https://google.com/search?q=\" + search\n webbrowser.get().open(url)\n fox_speak(\"Here is what i found for \" + search)\n\n if words_exists(['play music', 'turn music']):\n fox_speak('I know some cool mixes on youtube')\n urls = [\n 'https://www.youtube.com/watch?v=5qap5aO4i9A',\n 'https://www.youtube.com/watch?v=DWcJFNfaw9c',\n 'https://www.youtube.com/watch?v=5yx6BWlEVcY',\n 'https://www.youtube.com/watch?v=7NOSDKb0HlU'\n ]\n music_choice = urls[random.randint(0, len(urls) - 1)]\n webbrowser.get().open(music_choice)\n fox_speak('Hope you will enjoy')\n\n if words_exists(['find location', 'find the location', 'i need location']):\n location = record_audio(\"What's the location\")\n url = \"https://google.nl/maps/place/\" + location + '/&'\n webbrowser.get().open(url)\n fox_speak(\"Here is the location \" + location)\n\n if words_exists(['how are you', 'what\\'s up', 'whats up', 'how you doing', 'what\\'s new', 'whats new']):\n answers = [\"I am fine and always ready to work. Let\\'s start\",\n \"I'm just wonderful. I missed you very much\",\n \"Excellent, I am rested and ready to work\",\n \"Cool. Today is a great day\"\n ]\n answer = answers[random.randint(0, len(answers)-1)]\n fox_speak(answer)\n\n if words_exists(['hi fox', 'hello fox']):\n answers = [\"I am here master\",\n \"I am still here and ready to work\",\n \"I already thought you wouldn't call me anymore\",\n \"Have an idea?. I'am in\"\n ]\n answer = answers[random.randint(0, len(answers)-1)]\n fox_speak(answer)\n\n if words_exists([\"what's the weather\", \"weather now is\"]):\n fox_speak('In ' + city + ' now is ' + str(round(temp, 1)) + ' degrees celsius')\n owm_api_key.close()\n\n if words_exists([\"f***\", \"b****\", \"w****\"]):\n fox_speak(\"I do not react to this\")\n\n if words_exists(['exit', 'finish', 'end of the work', 'goodbye']):\n fox_speak(\"See you soon, master\")\n exit()\n\n\ndef launch_fox(): # this func is for introduction with user\n fox_speak(\"Hello sir. What we gonna do?\")\n\n\ntime.sleep(1)\nlaunch_fox()\nwhile True:\n voice_data = record_audio()\n respond(voice_data)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"122146815","text":"import sys\nimport time\nimport networkx as nx\nfrom pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql import functions\nfrom graphframes import *\nfrom copy import deepcopy\n\nsc=SparkContext(\"local\", \"degree.py\")\nsqlContext = SQLContext(sc)\n\ndef articulations(g, usegraphframe=False):\n\t# Get the starting count of connected components\n\t# YOUR CODE HERE\n\tconnected_component_count = g.connectedComponents().select('component').distinct().count()\n\t# Default version sparkifies the connected components process \n\t# and serializes node iteration.\n\tif usegraphframe:\n\t\t# Get vertex list for serial iteration\n\t\t# YOUR CODE HERE\n\t\tvertices = g.vertices.map(lambda row: row.id).collect()\n\t\t# For each vertex, generate a new graphframe missing that vertex\n\t\t# and calculate connected component count. Then append count to\n\t\t\t# the output\n\t\t# YOUR CODE HERE\n\t\tcomponents = []\n\t\tfor vertex in vertices:\n\t\t\tv_list = g.vertices.filter('id !=\"'+vertex+'\"')\n\t\t\tedge_list = g.edges.filter('src!=\"'+vertex+'\"').filter('dst!=\"'+vertex+'\"')\n\t\t\tnew_g = GraphFrame(v_list, edge_list)\n\t\t\tnew_g_count = new_g.connectedComponents().select('component').distinct().count()\n\t\t\tcomponents.append([vertex, 1] if new_g_count>connected_component_count else [vertex, 0])\n\n\t\tarticulations_df = sqlContext.createDataFrame(components, ['id', 'articulation'])\n\t\treturn articulations_df\t\n\t# Non-default version sparkifies node iteration and uses networkx \n\t# for connected components count.\n\telse:\n\t\t# YOUR CODE HERE\n\t\tnxg = nx.Graph()\n\t\tvertices = g.vertices.map(lambda row: row.id).collect()\n\t\tedges = g.edges.map(lambda row: (row.src, row.dst)).collect()\n\t\tnxg.add_nodes_from(vertices)\n\t\tnxg.add_edges_from(edges)\n\t\tcomponents = []\n\t\tfor vertex in vertices:\n\t\t\tcopy_nxg = deepcopy(nxg)\n\t\t\tcopy_nxg.remove_node(vertex)\n\t\t\tc_count = nx.number_connected_components(copy_nxg)\n\t\t\tcomponents.append([vertex, 1] if c_count>connected_component_count else [vertex, 0])\n\t\t\n\t\tarticulations_df = sqlContext.createDataFrame(components, ['id', 'articulation'])\n\t\treturn articulations_df\t\n\n\t\t\n\nfilename = sys.argv[1]\nlines = sc.textFile(filename)\n\npairs = lines.map(lambda s: s.split(\",\"))\ne = sqlContext.createDataFrame(pairs,['src','dst'])\ne = e.unionAll(e.selectExpr('src as dst','dst as src')).distinct() # Ensure undirectedness \t\n\n# Extract all endpoints from input file and make a single column frame.\nv = e.selectExpr('src as id').unionAll(e.selectExpr('dst as id')).distinct()\t\n\n# Create graphframe from the vertices and edges.\ng = GraphFrame(v,e)\n\n#Runtime approximately 5 minutes\nprint(\"---------------------------\")\nprint(\"Processing graph using Spark iteration over nodes and serial (networkx) connectedness calculations\")\ninit = time.time()\ndf = articulations(g, False)\nprint(\"Execution time: %s seconds\" % (time.time() - init))\nprint(\"Articulation points:\")\ndf.filter('articulation = 1').show(truncate=False)\ndf.filter('articulation = 1').toPandas().to_csv('articulation_out.csv')\nprint(\"---------------------------\")\n\n#Runtime for below is more than 2 hours\nprint(\"Processing graph using serial iteration over nodes and GraphFrame connectedness calculations\")\ninit = time.time()\ndf = articulations(g, True)\nprint(\"Execution time: %s seconds\" % (time.time() - init))\nprint(\"Articulation points:\")\ndf.filter('articulation = 1').show(truncate=False)\ndf.filter('articulation = 1').toPandas().to_csv('articulation_out2.csv')\n","sub_path":"Network-Properties-with-Spark/articulation.py","file_name":"articulation.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"531698044","text":"import time\n\nclass Reminder:\n\n def set_reminder(self):\n reminder = str(input(\"What shall i remind you about??\"))\n set = float(input(\"Enter the time interval (in mins): \"))\n set = set*60\n while (True):\n time.sleep(set)\n print(reminder)\n stop = int(input(\"Do you wish to stop the reminder?? \\n 1: Yes \\n 0: No \\n\"))\n if stop == 1:\n print(\"Thank you\")\n break\n\n\n\n\n\n\n\n\n\nreminder = Reminder()\nreminder.set_reminder()\n\n","sub_path":"remind.py","file_name":"remind.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"379345852","text":"from lhereader import readLHEF\nfrom ROOT import TCanvas, TH1F, TH2F\nimport math\ndata01 =readLHEF('FFopt1/unweighted_events.lhe')\ndata03 =readLHEF('FFopt2/unweighted_events.lhe')\n\nelectrons01=data01.getParticlesByIDs([11,-11])\nelectrons02=data03.getParticlesByIDs([11,-11])\n\nc=TCanvas()\nc.Divide(2,2)\nhist_4mom_diff_opt1=TH1F(\"#Delta q^{2} in opt1\", \"Outgoing - Incoming Electron q^{2} opt1\", 100,0,4.5)\nhist_4mom_diff_opt2=TH1F(\"#Delta q^{2} in opt2\", \"Outgoing - Incoming Electron q^{2} opt2\", 100,0,4.5)\nhist_4momRatio=TH1F(\"q^{2} out opt1/opt2\", \"Ratio q^{2} \", 100,0,4.5)\n\nfor e in electrons01:\n squared_4mom_in = 0\n squared_4mom_out = 0\n if e.status is -1 :\n print('incoming', e.eventid)\n squared_4mom_in = (e.px*e.px + e.py*e.py+e.pz*e.pz+e.energy*e.energy)\n if e.status is 1:\n print('outgoing', e.eventid)\n squared_4mom_out = (e.px*e.px + e.py*e.py+e.pz*e.pz+e.energy*e.energy)\n hist_4mom_diff_opt1.Fill(squared_4mom_out-squared_4mom_in)\n\nfor e in electrons02:\n squared_4mom_in = 0\n squared_4mom_out = 0\n if e.status is -1 :\n print('incoming', e.eventid)\n squared_4mom_in = (e.px*e.px + e.py*e.py+e.pz*e.pz+e.energy*e.energy)\n if e.status is 1:\n print('outgoing', e.eventid)\n squared_4mom_out = (e.px*e.px + e.py*e.py+e.pz*e.pz+e.energy*e.energy)\n hist_4mom_diff_opt2.Fill(squared_4mom_out-squared_4mom_in)\n\n\nc.cd(1)\nhist_4mom_diff_opt1.GetXaxis().SetTitle(\"#Delta q^{2}\")\nhist_4mom_diff_opt1.Draw()\nc.cd(2)\nhist_4mom_diff_opt2.GetXaxis().SetTitle(\"#Delta q^{2}\")\nhist_4mom_diff_opt2.Draw()\nfor i in range(0,hist_4mom_diff_opt2.GetNbinsX()):\n if hist_4mom_diff_opt2.GetBinContent(i) !=0 :\n ratio = hist_4mom_diff_opt1.GetBinContent(i)/hist_4mom_diff_opt2.GetBinContent(i)\n hist_4momRatio.SetBinContent(i,ratio)\nc.cd(3)\nhist_4momRatio.Draw()\n#hist_4momRatio.Draw()\nc.SaveAs(\"compare4mom.png\")\n","sub_path":"compare4mom.py","file_name":"compare4mom.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"139797652","text":"from torchvision.utils import make_grid, save_image\nseed = 2019\n# import\n# system libraries\nimport os, sys, glob\nimport os.path as osp\nimport time\nimport shutil\nimport numpy as np\nnp.random.seed(seed)\nfrom PIL import Image\nimport gc\nfrom collections import OrderedDict\n# import GPUtil\nimport pickle\nimport tqdm\n\nimport torch\nimport torchvision.transforms as transforms\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed_all(seed)\n\n# libraries within this package\nfrom cmd_args import parse_args\nfrom utils.tools import *\nfrom utils.visualizer import Visualizer\nfrom utils.util import print_param_info, save_model\nimport datasets\nimport models\n\n# FID\nfrom scipy import linalg\nfrom scipy.misc import imread\nfrom torch.nn.functional import adaptive_avg_pool2d\nfrom inception import InceptionV3\nfrom fid_score import calculate_frechet_distance\n\n# LPIPS\nfrom PerceptualSimilarity import dist_model as dm\n\n# SSIM\nfrom skimage.measure import compare_ssim\n\n# VGGFACE 2\nfrom models.resnet import resnet50\nfrom verification import KFold, eval_acc, find_best_threshold\nfrom sklearn import metrics\n\nFID_DIMS = 2048\nFID_DIMS_VGGFACE = 2048\n\n# FUNCTIONS, ignore\n\ndef alignment(src_pts):\n # TODO: different processing in Jason's code, here, and the paper (paper concatenate multi-scale cropping)\n\n ref_pts = [[30.2946, 51.6963], [65.5318, 51.5014],\n [48.0252, 71.7366], [33.5493, 92.3655], [62.7299, 92.2041]]\n # crop_size = (96, 112)\n\n s = np.array(src_pts).astype(np.float32)\n r = np.array(ref_pts).astype(np.float32)\n\n s = s / 125. - 1.\n r[:, 0] = r[:, 0] / 48. - 1\n r[:, 1] = r[:, 1] / 56. - 1\n\n all_tfms = np.empty((s.shape[0], 2, 3), dtype=np.float32)\n for idx in range(s.shape[0]):\n all_tfms[idx, :, :] = models.get_similarity_transform_for_cv2(r, s[idx, ...])\n all_tfms = torch.from_numpy(all_tfms).to(torch.device('cuda:0'))\n return all_tfms\n\ndef unsigned_long_to_binary_repr(unsigned_long, passwd_length):\n batch_size = unsigned_long.shape[0]\n target_size = passwd_length // 4\n\n binary = np.empty((batch_size, passwd_length), dtype=np.float32)\n for idx in range(batch_size):\n binary[idx, :] = np.array([int(item) for item in bin(unsigned_long[idx])[2:].zfill(passwd_length)])\n\n dis_target = np.empty((batch_size, target_size), dtype=np.long)\n for idx in range(batch_size):\n tmp = unsigned_long[idx]\n for byte_idx in range(target_size):\n dis_target[idx, target_size - 1 - byte_idx] = tmp % 16\n tmp //= 16\n return binary, dis_target\n\ndef generate_code(passwd_length, batch_size, device, inv):\n unsigned_long = np.random.randint(0, 2 ** passwd_length, size=(batch_size,), dtype=np.uint64)\n binary, dis_target = unsigned_long_to_binary_repr(unsigned_long, args.passwd_length)\n z = torch.from_numpy(binary).to(device)\n dis_target = torch.from_numpy(dis_target).to(device)\n\n repeated = True\n while repeated:\n rand_unsigned_long = np.random.randint(0, 2 ** passwd_length, size=(batch_size,), dtype=np.uint64)\n repeated = np.any(unsigned_long - rand_unsigned_long == 0)\n rand_binary, rand_dis_target = unsigned_long_to_binary_repr(rand_unsigned_long, args.passwd_length)\n rand_z = torch.from_numpy(rand_binary).to(device)\n rand_dis_target = torch.from_numpy(rand_dis_target).to(device)\n\n if not inv:\n if args.use_minus_one:\n z = (z - 0.5) * 2\n rand_z = (rand_z - 0.5) * 2\n\n return z, dis_target, rand_z, rand_dis_target\n else:\n inv_unsigned_long = 2 ** args.passwd_length - 1 - unsigned_long\n inv_binary, inv_dis_target = unsigned_long_to_binary_repr(inv_unsigned_long, args.passwd_length)\n\n inv_z = torch.from_numpy(inv_binary).to(device)\n inv_dis_target = torch.from_numpy(inv_dis_target).to(device)\n\n repeated = True\n while repeated:\n another_rand_unsigned_long = np.random.randint(0, 2 ** passwd_length, size=(batch_size,), dtype=np.uint64)\n repeated = np.any(inv_unsigned_long - another_rand_unsigned_long == 0)\n another_rand_binary, another_rand_dis_target = unsigned_long_to_binary_repr(another_rand_unsigned_long,\n args.passwd_length)\n another_rand_z = torch.from_numpy(another_rand_binary).to(device)\n another_rand_dis_target = torch.from_numpy(another_rand_dis_target).to(device)\n\n if args.use_minus_one:\n z = (z - 0.5) * 2\n rand_z = (rand_z - 0.5) * 2\n inv_z = z * -1.\n another_rand_z = (another_rand_z - 0.5) * 2\n\n return z, dis_target, rand_z, rand_dis_target, \\\n inv_z, inv_dis_target, another_rand_z, another_rand_dis_target\n\n\n\n\nTRAIN_MODE = True\n\n# main\nargs = parse_args(sys.argv[1])\nargs.old_ckpt_dir = osp.dirname(sys.argv[1])\nprint('args.old_ckpt_dir', args.old_ckpt_dir)\nargs.evaluate = True\n\nargs.resume = osp.join(args.old_ckpt_dir, args.ckpt_name)\n\nif not '_' in args.ckpt_name:\n list_of_files = glob.glob(osp.join(args.old_ckpt_dir, 'checkpoint_*'))\n if len(list_of_files) == 0:\n args.ckpt_epoch = 'last'\n args.ckpt_iter = 'iter_last'\n else:\n args.ckpt_name = osp.basename(max(list_of_files, key=os.path.getctime))\n print('found args.ckpt_name', args.ckpt_name)\n args.ckpt_epoch, args.ckpt_iter = args.ckpt_name.split('.')[0].split('_')[1:]\nelse:\n args.ckpt_epoch, args.ckpt_iter = args.ckpt_name.split('.')[0].split('_')[1:]\n\nargs.name = 'test_epoch_' + args.ckpt_epoch + '_' + args.ckpt_iter + '_' + args.name\n# CHANGE ckpt_dir to new one!\nargs.ckpt_dir = osp.join(args.old_ckpt_dir, args.name)\nargs.display_id = -1\n\nargs.gpu_ids = list(range(len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))))\nargs.device = torch.device('cuda:0') if args.gpu_ids else torch.device('cpu')\nargs.batch_size = args.batch_size // 4 * len(args.gpu_ids)\n\nos.makedirs(args.ckpt_dir, mode=0o777, exist_ok=True)\nvisualizer = Visualizer(args)\n# visualizer.logger.log('sys.argv:\\n' + ' '.join(sys.argv))\nfor arg in sorted(vars(args)):\n visualizer.logger.log('{:20s} {}'.format(arg, getattr(args, arg)))\nvisualizer.logger.log('')\n\n# -------------------- code copy --------------------\n# copy config yaml\nshutil.copyfile(sys.argv[1], osp.join(args.ckpt_dir, osp.basename(sys.argv[1])))\n\n# repo_basename = osp.basename(osp.dirname(osp.abspath('.')))\n# repo_path = osp.join(args.ckpt_dir, repo_basename)\n# os.makedirs(repo_path, mode=0o777, exist_ok=True)\n#\n# walk_res = os.walk('.')\n# useful_paths = [path for path in walk_res if\n# '.git' not in path[0] and\n# 'checkpoints' not in path[0] and\n# 'configs' not in path[0] and\n# '__pycache__' not in path[0] and\n# 'tee_dir' not in path[0] and\n# 'tmp' not in path[0]]\n# # print('useful_paths', useful_paths)\n# for p in useful_paths:\n# for item in p[-1]:\n# if not (item.endswith('.py') or item.endswith('.c') or item.endswith('.h') or item.endswith('.md')):\n# continue\n# old_path = osp.join(p[0], item)\n# new_path = osp.join(repo_path, p[0][2:], item)\n# basedir = osp.dirname(new_path)\n# os.makedirs(basedir, mode=0o777, exist_ok=True)\n# shutil.copyfile(old_path, new_path)\nshutil.copyfile(args.resume, osp.join(args.ckpt_dir, 'model_used.pth.tar'))\n\n# -------------------- dataset & loader --------------------\ntest_dataset = datasets.__dict__[args.dataset](\n train=False,\n transform=transforms.Compose([\n transforms.Resize(args.imageSize, Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))\n ]),\n args=args\n)\n\nvisualizer.logger.log('test_dataset: ' + str(test_dataset))\n\n# TODO: modify here for the visualization\nif len(test_dataset) < 100:\n visualizer.logger.log('test img paths:')\n for anno in test_dataset.raw_annotations:\n visualizer.logger.log('%s %d %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f' % (\n anno[0], anno[1], anno[2], anno[3], anno[4], anno[5], anno[6], anno[7], anno[8], anno[9], anno[10], anno[11]))\n visualizer.logger.log('')\n\ntest_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.workers,\n pin_memory=True,\n worker_init_fn=lambda x: np.random.seed((torch.initial_seed()) % (2 ** 32))\n)\n\nif not args.evaluate:\n args.evaluate = True\n _test_dataset = datasets.__dict__[args.dataset](\n train=False,\n transform=transforms.Compose([\n transforms.Resize(args.imageSize, Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))\n ]),\n args=args\n )\n args.evaluate = False\n _test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.workers,\n pin_memory=True,\n worker_init_fn=lambda x: np.random.seed((torch.initial_seed()) % (2 ** 32))\n )\nelse:\n _test_dataset = test_dataset\n _test_loader = test_loader\n\n\nmodel_dict = {}\n\nif 'with_noise' in args.which_model_netG or args.lambda_dis == 0.:\n G_input_nc = args.input_nc\nelse:\n G_input_nc = args.input_nc + args.passwd_length\nmodel_dict['G'] = models.define_G(G_input_nc, args.output_nc,\n args.ngf, args.which_model_netG, args.n_downsample_G,\n args.norm, not args.no_dropout,\n args.init_type, args.init_gain,\n args.gpu_ids,\n args.passwd_length,\n use_leaky=args.use_leakyG,\n use_resize_conv=args.use_resize_conv)\nmodel_dict['G_nets'] = [model_dict['G']]\n\nif args.resume:\n if osp.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location='cpu')\n args.start_epoch = checkpoint['epoch'] + 1\n\n name = 'G'\n net = model_dict[name]\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n net.load_state_dict(checkpoint['state_dict_' + name])\n\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n gc.collect()\n torch.cuda.empty_cache()\n\ntorch.backends.cudnn.benchmark = True\n\nif TRAIN_MODE:\n model_dict['G'].train()\nelse:\n model_dict['G'].eval()\n\nvggface2_model = resnet50(num_classes=8631, include_top=False)\nwith open('pretrained_models/resnet50_ft_weight.pkl', 'rb') as f:\n obj = f.read()\nweights = {key: torch.from_numpy(arr) for key, arr in pickle.loads(obj, encoding='latin1').items()}\nvggface2_model.load_state_dict(weights)\nvggface2_model.to(args.gpu_ids[0])\nvggface2_model = torch.nn.DataParallel(vggface2_model, args.gpu_ids)\nvggface2_model.eval()\n\n\nblock_idx = InceptionV3.BLOCK_INDEX_BY_DIM[FID_DIMS]\nfid_model = InceptionV3([block_idx], normalize_input=False)\nfid_model.to(args.gpu_ids[0])\nfid_mode = torch.nn.DataParallel(fid_model, args.gpu_ids)\nfid_model.eval()\n\n\nlpips_model = dm.DistModel()\nlpips_model.initialize(model='net-lin',net='alex',use_gpu=True)\n\n\n# # one hot code, not used yet\n# a = np.array([1, 2, 4, 8, 16, 32, 64, 128]).astype(np.uint64)\n# z_list = []\n# for i in range(8):\n# rolled = np.roll(a, i)\n#\n# one_hot_passwords = np.tile(rolled, 2)\n# binary_passwords, dis_target = unsigned_long_to_binary_repr(one_hot_passwords, args.passwd_length)\n# z = torch.from_numpy(binary_passwords).to(args.device)\n# z_list.append(z)\n\n\nnum_test_imgs = len(test_dataset) // args.batch_size * args.batch_size\n_num_test_imgs = len(_test_dataset) // args.batch_size * args.batch_size\nvisualizer.logger.log('# test imgs ' + str(num_test_imgs))\n\nreal_pred_arr = np.empty((num_test_imgs, FID_DIMS))\nfake_pred_arr = np.empty((num_test_imgs, FID_DIMS))\nrecon_pred_arr = np.empty((num_test_imgs, FID_DIMS))\nwrong_pred_arr = np.empty((num_test_imgs, FID_DIMS))\n\nreal_pred_arr_aligned = np.empty((num_test_imgs, FID_DIMS))\nfake_pred_arr_aligned = np.empty((num_test_imgs, FID_DIMS))\nrecon_pred_arr_aligned = np.empty((num_test_imgs, FID_DIMS))\nwrong_pred_arr_aligned = np.empty((num_test_imgs, FID_DIMS))\n\nreal_pred_arr_vggface = np.empty((num_test_imgs, FID_DIMS_VGGFACE))\nfake_pred_arr_vggface = np.empty((num_test_imgs, FID_DIMS_VGGFACE))\nrecon_pred_arr_vggface = np.empty((num_test_imgs, FID_DIMS_VGGFACE))\nwrong_pred_arr_vggface = np.empty((num_test_imgs, FID_DIMS_VGGFACE))\n\nreal_positive_arr = np.empty((_num_test_imgs))\nreal_negative_arr = np.empty((_num_test_imgs))\nreal_fake_arr = np.empty((num_test_imgs))\nreal_recon_arr = np.empty((num_test_imgs))\nreal_wrong_recon_arr = np.empty((num_test_imgs))\n\ncriterion_L1 = nn.L1Loss().cuda()\ncriterion_L2 = nn.MSELoss().cuda()\n\nLPIPS = AverageMeter()\nL1 = AverageMeter()\nL2 = AverageMeter()\nDSSIM = AverageMeter()\n\n\ndef tensor2im(image_tensor, cent=1., factor=255./2.):\n# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):\n image_numpy = image_tensor.cpu().float().numpy()\n image_numpy = (np.transpose(image_numpy, (0, 2, 3, 1)) + cent) * factor\n return image_numpy\n\nwith torch.no_grad():\n for i, (img, label, landmarks, img_path, a_img, p_img, n_img) in tqdm.tqdm(enumerate(_test_loader)):\n if img.size(0) != args.batch_size:\n continue\n\n img_cuda = img.cuda()\n a_img_cuda = a_img.cuda()\n p_img_cuda = p_img.cuda()\n n_img_cuda = n_img.cuda()\n\n start = i * args.batch_size\n end = start + args.batch_size\n\n # face verification\n real_feature = vggface2_model(a_img_cuda).squeeze()\n positive_feature = vggface2_model(p_img_cuda).squeeze()\n negative_feature = vggface2_model(n_img_cuda).squeeze()\n\n cosdistance_real_positive = (real_feature * positive_feature).sum(dim=1) / (real_feature.norm(dim=1) * positive_feature.norm(dim=1) + 1e-5)\n cosdistance_real_negative = (real_feature * negative_feature).sum(dim=1) / (real_feature.norm(dim=1) * negative_feature.norm(dim=1) + 1e-5)\n\n real_positive_arr[start:end] = cosdistance_real_positive.cpu().numpy()\n real_negative_arr[start:end] = cosdistance_real_negative.cpu().numpy()\n\nwith torch.no_grad():\n for i, (img, label, landmarks, img_path, a_img, p_img, n_img) in tqdm.tqdm(enumerate(test_loader)):\n if img.size(0) != args.batch_size:\n continue\n\n theta = alignment(landmarks)\n grid = torch.nn.functional.affine_grid(theta, torch.Size((args.batch_size, 3, 112, 96)))\n\n img_cuda = img.cuda()\n a_img_cuda = a_img.cuda()\n p_img_cuda = p_img.cuda()\n n_img_cuda = n_img.cuda()\n\n z, dis_target, rand_z, rand_dis_target, \\\n inv_z, inv_dis_target, another_rand_z, another_rand_dis_target = generate_code(args.passwd_length,\n args.batch_size,\n torch.device('cuda:0'),\n inv=True)\n\n fake = model_dict['G'](img, z.cpu())\n recon = model_dict['G'](fake, inv_z)\n wrong_recon = model_dict['G'](fake, rand_z)\n\n real_aligned = torch.nn.functional.grid_sample(img_cuda, grid)\n fake_aligned = torch.nn.functional.grid_sample(fake, grid)\n recon_aligned = torch.nn.functional.grid_sample(recon, grid)\n wrong_recon_aligned = torch.nn.functional.grid_sample(wrong_recon, grid)\n\n # face verification\n real_feature = vggface2_model(a_img_cuda).squeeze()\n fake_feature = vggface2_model(test_dataset.vggface_transform_GPU(fake).cuda()).squeeze()\n recon_feature = vggface2_model(test_dataset.vggface_transform_GPU(recon).cuda()).squeeze()\n wrong_recon_feature = vggface2_model(test_dataset.vggface_transform_GPU(wrong_recon).cuda()).squeeze()\n\n cosdistance_real_fake = (real_feature * fake_feature).sum(dim=1) / (\n real_feature.norm(dim=1) * fake_feature.norm(dim=1) + 1e-5)\n cosdistance_real_recon = (real_feature * recon_feature).sum(dim=1) / (\n real_feature.norm(dim=1) * recon_feature.norm(dim=1) + 1e-5)\n cosdistance_real_wrong_recon = (real_feature * wrong_recon_feature).sum(dim=1) / (\n real_feature.norm(dim=1) * wrong_recon_feature.norm(dim=1) + 1e-5)\n\n real_fake_arr[start:end] = cosdistance_real_fake.cpu().numpy()\n real_recon_arr[start:end] = cosdistance_real_recon.cpu().numpy()\n real_wrong_recon_arr[start:end] = cosdistance_real_wrong_recon.cpu().numpy()\n\n # FID\n start = i * args.batch_size\n end = start + args.batch_size\n\n real_pred = fid_model(img_cuda)[0]\n fake_pred = fid_model(fake)[0]\n recon_pred = fid_model(recon)[0]\n wrong_recon_pred = fid_model(wrong_recon)[0]\n\n real_pred_aligned = fid_model(real_aligned)[0]\n fake_pred_aligned = fid_model(fake_aligned)[0]\n recon_pred_aligned = fid_model(recon_aligned)[0]\n wrong_recon_pred_aligned = fid_model(wrong_recon_aligned)[0]\n\n real_pred_arr[start:end] = real_pred.cpu().numpy().reshape(args.batch_size, -1)\n fake_pred_arr[start:end] = fake_pred.cpu().numpy().reshape(args.batch_size, -1)\n recon_pred_arr[start:end] = recon_pred.cpu().numpy().reshape(args.batch_size, -1)\n wrong_pred_arr[start:end] = wrong_recon_pred.cpu().numpy().reshape(args.batch_size, -1)\n\n real_pred_arr_aligned[start:end] = real_pred_aligned.cpu().numpy().reshape(args.batch_size, -1)\n fake_pred_arr_aligned[start:end] = fake_pred_aligned.cpu().numpy().reshape(args.batch_size, -1)\n recon_pred_arr_aligned[start:end] = recon_pred_aligned.cpu().numpy().reshape(args.batch_size, -1)\n wrong_pred_arr_aligned[start:end] = wrong_recon_pred_aligned.cpu().numpy().reshape(args.batch_size, -1)\n\n real_pred_arr_vggface[start:end] = real_feature.cpu().numpy().reshape(args.batch_size, -1)\n fake_pred_arr_vggface[start:end] = fake_feature.cpu().numpy().reshape(args.batch_size, -1)\n recon_pred_arr_vggface[start:end] = recon_feature.cpu().numpy().reshape(args.batch_size, -1)\n wrong_pred_arr_vggface[start:end] = wrong_recon_feature.cpu().numpy().reshape(args.batch_size, -1)\n\n # Recon metrics: LPIPS, SSIM, L2, L1\n dist_lpips = lpips_model.forward(img_cuda, recon)\n LPIPS.update(np.mean(dist_lpips), n=img.size(0))\n\n L1.update(criterion_L1(img_cuda, recon).item(), n=args.batch_size)\n L2.update(criterion_L2(img_cuda, recon).item(), n=args.batch_size)\n\n # compare_ssim(p0, p1, data_range=range, multichannel=True)\n img_np = tensor2im(img)\n recon_np = tensor2im(recon)\n for img_idx in range(args.batch_size):\n DSSIM.update((1. - compare_ssim(img_np[img_idx, ...], recon_np[img_idx, ...], data_range=255., multichannel=True)) / 2.)\n\n\n # print('Distance: ', dist01)\n # sys.exit(1)\n\n\ndef _compute_statistics_from_pred_arr(pred_arr):\n mu = np.mean(pred_arr, axis=0)\n sigma = np.cov(pred_arr, rowvar=False)\n return mu, sigma\n\n\nmu_real, sigma_real = _compute_statistics_from_pred_arr(real_pred_arr)\nmu_fake, sigma_fake = _compute_statistics_from_pred_arr(fake_pred_arr)\nmu_recon, sigma_recon = _compute_statistics_from_pred_arr(recon_pred_arr)\nmu_wrong, sigma_wrong = _compute_statistics_from_pred_arr(wrong_pred_arr)\n\nmu_real_aligned, sigma_real_aligned = _compute_statistics_from_pred_arr(real_pred_arr_aligned)\nmu_fake_aligned, sigma_fake_aligned = _compute_statistics_from_pred_arr(fake_pred_arr_aligned)\nmu_recon_aligned, sigma_recon_aligned = _compute_statistics_from_pred_arr(recon_pred_arr_aligned)\nmu_wrong_aligned, sigma_wrong_aligned = _compute_statistics_from_pred_arr(wrong_pred_arr_aligned)\n\nmu_real_vggface, sigma_real_vggface = _compute_statistics_from_pred_arr(real_pred_arr_vggface)\nmu_fake_vggface, sigma_fake_vggface = _compute_statistics_from_pred_arr(fake_pred_arr_vggface)\nmu_recon_vggface, sigma_recon_vggface = _compute_statistics_from_pred_arr(recon_pred_arr_vggface)\nmu_wrong_vggface, sigma_wrong_vggface = _compute_statistics_from_pred_arr(wrong_pred_arr_vggface)\n\n# here we use training stat for FID\nwith open('training_stat_FID.pickle', 'rb') as f:\n training_stat = pickle.load(f)\nmu_real, mu_real_aligned, mu_real_vggface, \\\nsigma_real, sigma_real_aligned, sigma_real_vggface = \\\n training_stat['mu_real'], training_stat['mu_real_aligned'], training_stat['mu_real_vggface'], \\\n training_stat['sigma_real'], training_stat['sigma_real_aligned'], training_stat['sigma_real_vggface']\n\nfid_real_fake = calculate_frechet_distance(mu_real, sigma_real, mu_fake, sigma_fake)\nfid_real_recon = calculate_frechet_distance(mu_real, sigma_real, mu_recon, sigma_recon)\nfid_real_wrong = calculate_frechet_distance(mu_real, sigma_real, mu_wrong, sigma_wrong)\n\nfid_real_fake_aligned = calculate_frechet_distance(mu_real_aligned, sigma_real_aligned, mu_fake_aligned, sigma_fake_aligned)\nfid_real_recon_aligned = calculate_frechet_distance(mu_real_aligned, sigma_real_aligned, mu_recon_aligned, sigma_recon_aligned)\nfid_real_wrong_aligned = calculate_frechet_distance(mu_real_aligned, sigma_real_aligned, mu_wrong_aligned, sigma_wrong_aligned)\n\nfid_real_fake_vggface = calculate_frechet_distance(mu_real_vggface, sigma_real_vggface, mu_fake_vggface, sigma_fake_vggface)\nfid_real_recon_vggface = calculate_frechet_distance(mu_real_vggface, sigma_real_vggface, mu_recon_vggface, sigma_recon_vggface)\nfid_real_wrong_vggface = calculate_frechet_distance(mu_real_vggface, sigma_real_vggface, mu_wrong_vggface, sigma_wrong_vggface)\n\nfolds = KFold(n=num_test_imgs, n_folds=10)\nthresholds = np.arange(-1.0, 1.0, 0.005)\npredicts_fake = []\npredicts_recon = []\npredicts_wrong_recon = []\npredicts_positive = []\npredicts_negative = []\nfor i in range(num_test_imgs):\n predicts_fake.append([real_fake_arr[i], 1])\n predicts_recon.append([real_recon_arr[i], 1])\n predicts_wrong_recon.append([real_wrong_recon_arr[i], 1])\nfor i in range(_num_test_imgs):\n predicts_positive.append([real_positive_arr[i], 1])\n predicts_negative.append([real_negative_arr[i], 0])\n\npredicts_fake = np.asarray(predicts_fake)\npredicts_recon = np.asarray(predicts_recon)\npredicts_wrong_recon = np.asarray(predicts_wrong_recon)\npredicts_positive = np.asarray(predicts_positive)\npredicts_negative = np.asarray(predicts_negative)\npredicts_positive_negative = np.concatenate([predicts_positive, predicts_negative], axis=0)\n\nbest_thresh = find_best_threshold(thresholds, predicts_positive_negative)\naccuracy_fake = eval_acc(best_thresh, predicts_fake)\naccuracy_recon = eval_acc(best_thresh, predicts_recon)\naccuracy_wrong_recon = eval_acc(best_thresh, predicts_wrong_recon)\n\nfpr_fake, tpr_fake, _ = metrics.roc_curve(np.concatenate([predicts_fake[:, 1], predicts_negative[:, 1]], axis=0),\n np.concatenate([predicts_fake[:, 0], predicts_negative[:, 0]], axis=0), pos_label=1)\nfpr_recon, tpr_recon, _ = metrics.roc_curve(np.concatenate([predicts_recon[:, 1], predicts_negative[:, 1]], axis=0),\n np.concatenate([predicts_recon[:, 0], predicts_negative[:, 0]], axis=0), pos_label=1)\nfpr_wrong_recon, tpr_wrong_recon, _ = metrics.roc_curve(np.concatenate([predicts_wrong_recon[:, 1], predicts_negative[:, 1]], axis=0),\n np.concatenate([predicts_wrong_recon[:, 0], predicts_negative[:, 0]], axis=0), pos_label=1)\nauc_fake = metrics.auc(fpr_fake, tpr_fake)\nauc_recon = metrics.auc(fpr_recon, tpr_recon)\nauc_wrong_recon = metrics.auc(fpr_wrong_recon, tpr_wrong_recon)\n\nvisualizer.logger.log('FID Inception: real vs fake {}; real vs recon {}; real vs wrong {}'.format(fid_real_fake, fid_real_recon, fid_real_wrong))\nvisualizer.logger.log('FID Inception aligned: real vs fake {}; real vs recon {}; real vs wrong {}'.format(fid_real_fake_aligned, fid_real_recon_aligned, fid_real_wrong_aligned))\nvisualizer.logger.log('FID VGGFACE: real vs fake {}; real vs recon {}; real vs wrong {}'.format(fid_real_fake_vggface, fid_real_recon_vggface, fid_real_wrong_vggface))\nvisualizer.logger.log('Recon metrics: LPIPS {}; SSIM {}; L2 {}; L1 {}'.format(LPIPS.avg, DSSIM.avg, L2.avg, L1.avg))\nvisualizer.logger.log('Face verification accuracy: fake {}; recon {}; wrong {}'.format(accuracy_fake, accuracy_recon, accuracy_wrong_recon))\nvisualizer.logger.log('Face verification auc: fake {}; recon {}; wrong {}'.format(auc_fake, auc_recon, auc_wrong_recon))\n\n\n\n# dpi = 80.0\n# xpixels, ypixels = 128 * BATCH_SIZE, 128 * 8\n# def show(img):\n# npimg = img.numpy()\n# plt.figure(figsize=(ypixels / dpi, xpixels / dpi), dpi=dpi)\n# plt.imshow(np.transpose(npimg, (1, 2, 0)), interpolation='nearest')\n\n\n\n\n# show(make_grid((torch.cat(new_fake_list, dim=0) + 1.) / 2., nrow=BATCH_SIZE))\n# save_image((torch.cat(new_fake_list, dim=0) + 1.) / 2., filename='qualitative/' + SAVE_NAME + '_fake.png',\n# nrow=BATCH_SIZE)\n\n\n\n","sub_path":"generate_csv/eval_funcs_add_face_vr.py","file_name":"eval_funcs_add_face_vr.py","file_ext":"py","file_size_in_byte":25454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"339506699","text":"# 2d scatter plot\nimport numpy as np\nfrom matplotlib import cm\nfrom sklearn.decomposition import PCA\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\n\n\ndef add_legend(ax,legend_title,legend_str,legend_handles=None,shrink = True,fontsize=15):\n box = ax.get_position()\n if shrink:\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n if legend_handles is not None:\n ax.legend(legend_handles,legend_str,\n title = legend_title,\n title_fontsize=fontsize,\n fontsize=fontsize,\n loc='center left',\n bbox_to_anchor=(1, 0.5)) \n else:\n ax.legend(legend_str,\n title = legend_title,\n title_fontsize=fontsize,\n fontsize=fontsize,\n loc='center left',\n bbox_to_anchor=(1, 0.5)) \n \n return ax\n\n\ndef proj2d(features,labels,ax=None,method='TSNE'):\n if method == 'TSNE':\n xfer = TSNE(n_components=2)\n elif method == 'PCA':\n xfer = PCA(n_components=2) \n else:\n raise ValueError(f\"Uknown dim reduction method {method}\")\n ftrs_2d = xfer.fit_transform(features)\n _labels = np.unique(labels)\n df = pd.DataFrame({\n 'x_1': ftrs_2d[:,0],\n 'x_2': ftrs_2d[:,1],\n 'labels': labels,\n }) \n fg = sns.FacetGrid(data=df, hue='labels', hue_order=_labels, aspect=1.61)\n if ax is None:\n sns_ax = fg.map(plt.scatter, 'x_1', 'x_2').add_legend()\n else:\n sns_ax = fg.map(ax.scatter, 'x_1', 'x_2').add_legend()\n # add_legend(ax,\"Classes\",_labels)\n plt.close(fg.fig)\n return sns_ax\n","sub_path":"base_plot.py","file_name":"base_plot.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649564267","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Deep Learning imports\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\" #gpu select\n\nfrom keras import backend as K\nimport tensorflow as tf\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\nsess = tf.Session(config=config)\nK.set_session(sess)\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom sklearn.utils import shuffle\n\n\ndef donuts_data_generator(N,R_inner,R_outer,split):\n R1 = np.random.randn(N//2) + R_inner\n theta = 2*np.pi*np.random.random(N//2)\n X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T\n \n R2 = np.random.randn(N//2) + R_outer\n theta = 2*np.pi*np.random.random(N//2)\n X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T\n \n data = np.concatenate([ X_inner, X_outer ])\n data = data/max(abs(data.min()),data.max())\n targets = np.array([0]*(N//2) + [1]*(N//2))\n obj,targets = shuffle(data,targets)\n x,y = int(len(obj)*split),int(len(obj)*((1-split)/2+split))\n X_train = obj[:x]\n X_valid = obj[x:y]\n X_test = obj[y:]\n Y_train = targets[:x]\n Y_valid = targets[x:y]\n Y_test = targets[y:]\n return X_train, X_valid, X_test, Y_train, Y_valid, Y_test,obj,targets\n\nX_train, X_valid, X_test, Y_train, Y_valid, Y_test,data,targets = donuts_data_generator(10000, 5, 15, 0.5)\n\nplt.scatter(data[:,0],data[:,1],label='All Data',alpha=0.3, edgecolors='none')\nplt.scatter(X_train[:,0],X_train[:,1],label='Training Data', alpha=0.3, edgecolors='none')\nplt.scatter(X_train[:,0],X_train[:,1],label='Validation Data', alpha=0.3, edgecolors='none')\nplt.scatter(X_test[:,0],X_test[:,1],label='Testing Data', alpha=0.3, edgecolors='none')\nplt.title('All Data')\nplt.legend()\nplt.grid()\nplt.show()\n\nplt.scatter(X_train[:,0],X_train[:,1])\nplt.title('Train Data')\nplt.show()\n\n\nplt.scatter(X_valid[:,0],X_valid[:,1])\nplt.title('Valid Data')\nplt.show()\n\nplt.scatter(X_test[:,0],X_test[:,1])\nplt.title('Test Data')\nplt.show()\n\n\ndef donuts_m():\n model = Sequential()\n model.add(Dense(units=4 , input_shape =(2,), activation='relu')) #input layer\n # model.add(Dense(units=4 , activation='relu')) #Hiden layer\n model.add(Dense(units=len(set(targets)) , activation='softmax')) # output layer\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n return model \n\nmodel = donuts_m()\n\nhy = model.fit(X_train,\n Y_train,\n epochs=30,\n # batch_size=32,\n validation_data=(X_valid, Y_valid)\n )\n# model.test_on_batch(X_test, Y_test)\n\nscore = model.evaluate(X_test, Y_test, \n batch_size=32,verbose =1)\n\n# print(hy.history.keys())\n# print(hy.history['acc'])\n# acc\nplt.plot(hy.history['acc'])\nplt.plot(hy.history['val_acc'])\nplt.plot(hy.history['loss'])\nplt.plot(hy.history['val_loss'])\nplt.title('Model info')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['Acc train', 'Acc validation', 'Loss train', 'Loss validation'], loc='bottom left')\nplt.grid()\nplt.show()\n\n\n\n\n\n\n\n\n","sub_path":"Random/Donuts_Keras.py","file_name":"Donuts_Keras.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"433984763","text":"#!/usr/bin/env python\n# coding:utf-8\nimport logging\n\nfrom tools_lib.transwarp import db\nfrom tools_lib.transwarp.tz import utc_now, utc_8_now\nfrom logics import DeliverymanFSMLogLogic\n\n\nclass Dict(dict):\n \"\"\"\n Simple dict but support access as x.y style AND hash.\n \"\"\"\n\n def __init__(self, names=(), values=(), **kw):\n super(Dict, self).__init__(**kw)\n for k, v in zip(names, values):\n self[k] = v\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n raise AttributeError(r\"'Dict' object has no attribute '%s'\" % key)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __hash__(self):\n return hash(self.int) if self.get(\"int\", None) else None\n\n def __eq__(self, other):\n return self.int == other.int if self.get(\"int\", None) else -1\n\n\n# 定义CallBack\n# === 判断当前操作是否为离职\n@db.with_transaction\ndef _quitting(**kw):\n # 拿参数, 没有合法参数直接丢异常\n deliveryman_id = kw['deliveryman_id']\n # deliveryman_num = DeliverymanOrgLogic.get('deliveryman_id', deliveryman_id)['deliveryman_num']\n # TODO 老数据维护(迁移完成[后台/APP没有接口调这些数据相关的查询了以后], 删除我)\n\n # 解绑架构\n # DeliverymanOrgLogic.bind_unbind(deliveryman_num, 0)\n # 离职\n db.update('update user_center.real_info set quit_time=? where user=?', utc_now(), deliveryman_id)\n # db.update('update user_center.deliver_org set user_id=? where user_id=?', None, deliveryman_id)\n return DeliverymanFSM.CHECK_STATUS_QUITED\n\n\n# === 判断当前操作是否为拉黑,拉黑除状态修改之外,还需要修改quit_time和banned字段\n@db.with_transaction\ndef _ban(**kw):\n # 拿参数, 没有合法参数直接丢异常\n deliveryman_id = kw['deliveryman_id']\n # deliveryman_num = DeliverymanOrgLogic.get('deliveryman_id', deliveryman_id)['deliveryman_num']\n # TODO 老数据维护(迁移完成[后台/APP没有接口调这些数据相关的查询了以后], 删除我)\n\n # 解绑架构\n # DeliverymanOrgLogic.bind_unbind(deliveryman_num, 0)\n db.update('update user_center.real_info set banned=?, quit_time=? where user=?', True, utc_now(), deliveryman_id)\n # db.update('update user_center.deliver_org set user_id=? where user_id=?', None, deliveryman_id)\n return DeliverymanFSM.CHECK_STATUS_BANNED\n\n\n# === 定时任务接首单,更新旧数据库的首单时间.\n@db.with_transaction\ndef _adopt_first_order(**kw):\n first_order_time = kw['first_order_time']\n deliveryman_id = kw['deliveryman_id']\n\n db.update('update user_center.real_info set first_order_time=? where user=?', first_order_time, deliveryman_id)\n return DeliverymanFSM.CHECK_STATUS_WORKING\n\n\n# === 变化状态到BINDING_TEAM,更新旧数据库的qualified_work_time时间.\n@db.with_transaction\ndef _binding_team(**kw):\n deliveryman_id = kw['deliveryman_id']\n\n db.update('update user_center.real_info set qualified_work_time=? where user=?', utc_now(), deliveryman_id)\n return DeliverymanFSM.CHECK_STATUS_BINDING_TEAM\n\n\n# === 变化状态到RESIGN,更新旧数据库的resign_apply_time时间.\ndef _apply_resign(**kw):\n deliveryman_id = kw['deliveryman_id']\n\n db.update('update user_center.apply_deliver set resign_apply_time=? where user=?', utc_now(), deliveryman_id)\n return DeliverymanFSM.CHECK_STATUS_RESIGN\n\n\nclass DeliverymanFSM(object):\n \"\"\"\n 配送员有限状态机\n \"\"\"\n ################################################\n # 配送员状态\n ################################################\n # 初始状态\n CHECK_STATUS_INIT = 'CHECK_INIT'\n # 评分未通过\n CHECK_STATUS_REGISTERED_DENY = 'CHECK_REGISTERED_DENY'\n # APP注册\n CHECK_STATUS_APP_REGISTERED = 'CHECK_APP_REGISTERED'\n # 兼职审核通过\n CHECK_STATUS_PART_TIME_WORKING = 'CHECK_PART_TIME_WORKING'\n # 申请全职待审核状态\n CHECK_STATUS_APPLY_FULL_TIME = 'CHECK_APPLY_FULL_TIME'\n # 申请全职待面试状态\n CHECK_STATUS_WAIT_INTERVIEW = 'CHECK_WAIT_INTERVIEW'\n # 已面试\n CHECK_STATUS_HRBP_INTERVIEWED = 'CHECK_HRBP_INTERVIEWED'\n # 资料待补全\n CHECK_STATUS_WAIT_INFO_COMPLETED = 'CHECK_WAIT_INFO_COMPLETED'\n # 管控审核通过\n CHECK_STATUS_UNALLOCATED = 'CHECK_UNALLOCATED'\n # 管控审核拒绝\n CHECK_STATUS_UNALLOCATED_DENY = 'CHECK_UNALLOCATED_DENY'\n # 已绑定小队\n CHECK_STATUS_BINDING_TEAM = 'CHECK_BINDING_TEAM'\n # 已上岗\n CHECK_STATUS_WORKING = 'CHECK_WORKING'\n # 申请离职\n CHECK_STATUS_RESIGN = 'CHECK_RESIGN'\n # 离职\n CHECK_STATUS_QUITED = 'CHECK_QUITED'\n # 推荐自离: 连续三日旷工\n CHECK_STATUS_RECOMMEND_QUIT = 'CHECK_RECOMMEND_QUIT'\n # 黑名单\n CHECK_STATUS_BANNED = 'CHECK_STATUS_BANNED'\n # 停职\n CHECK_STATUS_RETAIN = 'CHECK_STATUS_RETAIN'\n\n ################################################\n # 不同业务功能配送员状态分类集合\n ################################################\n # 能接单(全职)\n FULL_TIME_CAN_ADOPT_STATUS_LIST = [\n CHECK_STATUS_WORKING,\n CHECK_STATUS_RESIGN,\n CHECK_STATUS_RECOMMEND_QUIT,\n CHECK_STATUS_BINDING_TEAM\n ]\n # 所有能接单(全职+兼职)\n CAN_ADOPT_STATUS_LIST = FULL_TIME_CAN_ADOPT_STATUS_LIST + [CHECK_STATUS_PART_TIME_WORKING]\n # 可以绑定组织架构树的状态集合 = 能接单(全职)状态集合 + 监管审核通过状态\n CAN_BIND_TEAM_STATUS_LIST = FULL_TIME_CAN_ADOPT_STATUS_LIST + [CHECK_STATUS_UNALLOCATED]\n # 已离职的状态集合(完结态-不能有任何事件使这个配送员改变状态了)\n QUIT_STATUS_LIST = [\n CHECK_STATUS_QUITED, CHECK_STATUS_BANNED\n ]\n # 待上岗的状态集合\n WAIT_WORKING_STATUS_LIST = [\n CHECK_STATUS_APP_REGISTERED,\n CHECK_STATUS_WAIT_INFO_COMPLETED, CHECK_STATUS_WAIT_INTERVIEW,\n CHECK_STATUS_UNALLOCATED, CHECK_STATUS_APPLY_FULL_TIME\n ]\n\n ####################################################\n # 配送员状态转换事件\n ####################################################\n # 城市已开通且评分通过且WEB注册\n EVENT_CITY_SCORE_WEB_YES = Dict(int=1, str='EVENT_CITY_SCORE_WEB_YES')\n # 城市未开通\n EVENT_CITY_NO = Dict(int=2, str='EVENT_CITY_NO')\n # 城市已开通且评分已通过且APP注册\n EVENT_CITY_SCORE_APP_YES = Dict(int=3, str='EVENT_CITY_SCORE_APP_YES')\n # 城市已开通且评分不通过\n EVENT_CITY_SCORE_NO = Dict(int=4, str='EVENT_CITY_SCORE_NO')\n # 人事经理面试通过\n EVENT_HM_INTERVIEW_YES = Dict(int=5, str='EVENT_HM_INTERVIEW_YES')\n # 人事经理面试拒绝\n EVENT_HM_INTERVIEW_NO = Dict(int=6, str='EVENT_HM_INTERVIEW_NO')\n # APP资料补全\n EVENT_COMPLETE_INFO = Dict(int=7, str='EVENT_COMPLETE_INFO')\n # 后台人力审核通过\n EVENT_HR_CHECK_YES = Dict(int=8, str='EVENT_HR_CHECK_YES')\n # 后台人力审核拒绝\n EVENT_HR_CHECK_NO = Dict(int=9, str='EVENT_HR_CHECK_NO')\n # 后台人力判定资料不全\n EVENT_HR_DECIDE_INFO_INCOMPLETE = Dict(int=10, str='EVENT_HR_DECIDE_INFO_INCOMPLETE')\n # 后台配送架构绑定\n EVENT_BIND_TREE = Dict(int=13, str='EVENT_BIND_TREE')\n # 后台配送架构解绑\n EVENT_UNBIND_TREE = Dict(int=21, str='EVENT_UNBIND_TREE')\n # 接首单\n EVENT_ADOPT_FIRST_ORDER = Dict(int=14, str='EVENT_ADOPT_FIRST_ORDER')\n # 申请离职\n EVENT_APPLY_RESIGN = Dict(int=15, str='EVENT_APPLY_RESIGN')\n # 人事经理判定辞退\n EVENT_HM_DECIDE_QUIT = Dict(int=16, str='EVENT_HM_DECIDE_QUIT')\n # 人事经理判定留职 TODO RESIGN/RECOMMEND_QUIT\n EVENT_HM_DECIDE_RETAIN = Dict(int=17, str='EVENT_HM_DECIDE_RETAIN')\n # 连续3天旷工\n EVENT_CONTINUE_ABSENT = Dict(int=18, str='EVENT_CONTINUE_ABSENT')\n # 当月累计5次淘汰预警\n EVENT_WARNING = Dict(int=19, str='EVENT_WARNING')\n # 7天未接首单\n EVENT_CONTINUE_NO_FIRST_ORDER = Dict(int=20, str='EVENT_CONTINUE_NO_FIRST_ORDER')\n # 兼职风先生申请成为全职\n EVENT_APPLY_FULL_TIME = Dict(int=23, str='EVENT_APPLY_FULL_TIME')\n # 绑定区域人力经理\n EVENT_BIND_HRBP = Dict(int=24, str='EVENT_BIND_HRBP')\n # 后台拉黑\n EVENT_BANNED = Dict(int=25, str='EVENT_BANNED')\n # TODO: 不自己生成事件,反而让FE/APP调接口.\n # 注册时系统判定全职申请\n EVENT_SYSTEM_JUMP_FULL_TIME = Dict(int=27, str='EVENT_SYSTEM_JUMP_FULL_TIME')\n\n # TODO: 风先生申请离职然后自己撤销\n EVENT_CANCEL_RESIGN = Dict(int=35, str='EVENT_CANCEL_RESIGN')\n # 风人力反馈面试结果\n EVENT_HRBP_INTERVIEW = Dict(int=34, str='EVENT_HRBP_INTERVIEW')\n # 上帝操作, 重置状态为CHECK_INIT(这样就可以重复注册了).\n EVENT_RESET = Dict(int=-1, str='EVENT_RESET')\n\n EMAP = {\n # 城市已开通且评分通过且WEB注册\n EVENT_CITY_SCORE_WEB_YES.int: EVENT_CITY_SCORE_WEB_YES,\n EVENT_CITY_SCORE_WEB_YES.str: EVENT_CITY_SCORE_WEB_YES,\n # 城市未开通\n EVENT_CITY_NO.int: EVENT_CITY_NO,\n EVENT_CITY_NO.str: EVENT_CITY_NO,\n # 城市已开通且评分已通过且APP注册\n EVENT_CITY_SCORE_APP_YES.int: EVENT_CITY_SCORE_APP_YES,\n EVENT_CITY_SCORE_APP_YES.str: EVENT_CITY_SCORE_APP_YES,\n # 城市已开通且评分不通过\n EVENT_CITY_SCORE_NO.int: EVENT_CITY_SCORE_NO,\n EVENT_CITY_SCORE_NO.str: EVENT_CITY_SCORE_NO,\n # 人事经理面试通过\n EVENT_HM_INTERVIEW_YES.int: EVENT_HM_INTERVIEW_YES,\n EVENT_HM_INTERVIEW_YES.str: EVENT_HM_INTERVIEW_YES,\n # 人事经理面试拒绝\n EVENT_HM_INTERVIEW_NO.int: EVENT_HM_INTERVIEW_NO,\n EVENT_HM_INTERVIEW_NO.str: EVENT_HM_INTERVIEW_NO,\n # APP资料补全\n EVENT_COMPLETE_INFO.int: EVENT_COMPLETE_INFO,\n EVENT_COMPLETE_INFO.str: EVENT_COMPLETE_INFO,\n # 后台人力审核通过\n EVENT_HR_CHECK_YES.int: EVENT_HR_CHECK_YES,\n EVENT_HR_CHECK_YES.str: EVENT_HR_CHECK_YES,\n # 后台人力审核拒绝\n EVENT_HR_CHECK_NO.int: EVENT_HR_CHECK_NO,\n EVENT_HR_CHECK_NO.str: EVENT_HR_CHECK_NO,\n # 后台配送架构绑定\n EVENT_BIND_TREE.int: EVENT_BIND_TREE,\n EVENT_BIND_TREE.str: EVENT_BIND_TREE,\n # 后台配送架构解绑\n EVENT_UNBIND_TREE.int: EVENT_UNBIND_TREE,\n EVENT_UNBIND_TREE.str: EVENT_UNBIND_TREE,\n # 接首单\n EVENT_ADOPT_FIRST_ORDER.int: EVENT_ADOPT_FIRST_ORDER,\n EVENT_ADOPT_FIRST_ORDER.str: EVENT_ADOPT_FIRST_ORDER,\n # 申请离职\n EVENT_APPLY_RESIGN.int: EVENT_APPLY_RESIGN,\n EVENT_APPLY_RESIGN.str: EVENT_APPLY_RESIGN,\n # 人事经理判定辞退\n EVENT_HM_DECIDE_QUIT.int: EVENT_HM_DECIDE_QUIT,\n EVENT_HM_DECIDE_QUIT.str: EVENT_HM_DECIDE_QUIT,\n # 人事经理判定留职\n EVENT_HM_DECIDE_RETAIN.int: EVENT_HM_DECIDE_RETAIN,\n EVENT_HM_DECIDE_RETAIN.str: EVENT_HM_DECIDE_RETAIN,\n # 连续3天旷工\n EVENT_CONTINUE_ABSENT.int: EVENT_CONTINUE_ABSENT,\n EVENT_CONTINUE_ABSENT.str: EVENT_CONTINUE_ABSENT,\n # 当月累计5次淘汰预警\n EVENT_WARNING.int: EVENT_WARNING,\n EVENT_WARNING.str: EVENT_WARNING,\n # 7天未接首单\n EVENT_CONTINUE_NO_FIRST_ORDER.int: EVENT_CONTINUE_NO_FIRST_ORDER,\n EVENT_CONTINUE_NO_FIRST_ORDER.str: EVENT_CONTINUE_NO_FIRST_ORDER,\n # 兼职风先生申请成为全职\n EVENT_APPLY_FULL_TIME.int: EVENT_APPLY_FULL_TIME,\n EVENT_APPLY_FULL_TIME.str: EVENT_APPLY_FULL_TIME,\n # 绑定区域人力经理\n EVENT_BIND_HRBP.int: EVENT_BIND_HRBP,\n EVENT_BIND_HRBP.str: EVENT_BIND_HRBP,\n # 后台拉黑\n EVENT_BANNED.int: EVENT_BANNED,\n EVENT_BANNED.str: EVENT_BANNED,\n # TODO: 不自己生成事件,反而让FE/APP调接口.\n # 注册时系统判定全职申请\n EVENT_SYSTEM_JUMP_FULL_TIME.int: EVENT_SYSTEM_JUMP_FULL_TIME,\n EVENT_SYSTEM_JUMP_FULL_TIME.str: EVENT_SYSTEM_JUMP_FULL_TIME,\n # 风先生申请离职然后自己撤销\n EVENT_CANCEL_RESIGN.int: EVENT_CANCEL_RESIGN,\n EVENT_CANCEL_RESIGN.str: EVENT_CANCEL_RESIGN,\n # 风人力反馈面试结果\n EVENT_HRBP_INTERVIEW.int: EVENT_HRBP_INTERVIEW,\n EVENT_HRBP_INTERVIEW.str: EVENT_HRBP_INTERVIEW,\n # 上帝操作\n EVENT_RESET.int: EVENT_RESET,\n EVENT_RESET.str: EVENT_RESET,\n }\n\n # 状态转换字典, 元素结构: (初始状态,条件): 下一个状态\n FSM = {\n (CHECK_STATUS_INIT, EVENT_CITY_SCORE_NO): CHECK_STATUS_REGISTERED_DENY,\n (CHECK_STATUS_INIT, EVENT_CITY_SCORE_APP_YES): CHECK_STATUS_APP_REGISTERED,\n\n (CHECK_STATUS_APP_REGISTERED, EVENT_BANNED): _ban,\n (CHECK_STATUS_APP_REGISTERED, EVENT_HR_CHECK_NO): CHECK_STATUS_INIT,\n\n # 注册的时候选择全职则自动进入全职审核状态\n # TODO 不自己生成事件,反而让FE/APP调接口.\n (CHECK_STATUS_APP_REGISTERED, EVENT_SYSTEM_JUMP_FULL_TIME): CHECK_STATUS_APPLY_FULL_TIME,\n\n (CHECK_STATUS_APPLY_FULL_TIME, EVENT_HR_CHECK_NO): CHECK_STATUS_INIT,\n (CHECK_STATUS_APPLY_FULL_TIME, EVENT_BIND_HRBP): CHECK_STATUS_WAIT_INTERVIEW,\n\n # 注册的时候选择全职,然后分别在人力、管控被拒绝\n (CHECK_STATUS_WAIT_INTERVIEW, EVENT_HRBP_INTERVIEW): CHECK_STATUS_HRBP_INTERVIEWED,\n\n (CHECK_STATUS_WAIT_INFO_COMPLETED, EVENT_HM_INTERVIEW_YES): CHECK_STATUS_HRBP_INTERVIEWED,\n\n # 注册的时候选择全职,然后分别在人力、管控被拒绝\n (CHECK_STATUS_HRBP_INTERVIEWED, EVENT_COMPLETE_INFO): CHECK_STATUS_WAIT_INFO_COMPLETED,\n (CHECK_STATUS_HRBP_INTERVIEWED, EVENT_BANNED): _ban,\n (CHECK_STATUS_HRBP_INTERVIEWED, EVENT_HM_INTERVIEW_NO): CHECK_STATUS_INIT,\n # TODO 不维护'job_type', 'qualified_work_time', 'quit_time', 'first_order_time'\n (CHECK_STATUS_HRBP_INTERVIEWED, EVENT_HM_INTERVIEW_YES): CHECK_STATUS_UNALLOCATED,\n\n (CHECK_STATUS_UNALLOCATED, EVENT_BANNED): _ban,\n (CHECK_STATUS_UNALLOCATED, EVENT_BIND_TREE): _binding_team, # 更新qualified_work_time, 状态变BINDING_TEAM\n\n (CHECK_STATUS_BINDING_TEAM, EVENT_BANNED): _ban,\n (CHECK_STATUS_BINDING_TEAM, EVENT_UNBIND_TREE): CHECK_STATUS_UNALLOCATED,\n (CHECK_STATUS_BINDING_TEAM, EVENT_APPLY_RESIGN): _apply_resign,\n (CHECK_STATUS_BINDING_TEAM, EVENT_ADOPT_FIRST_ORDER): _adopt_first_order, # 更新first_order_time, 状态变WORKING\n\n (CHECK_STATUS_WORKING, EVENT_BANNED): _ban,\n (CHECK_STATUS_WORKING, EVENT_APPLY_RESIGN): _apply_resign,\n (CHECK_STATUS_WORKING, EVENT_CONTINUE_ABSENT): CHECK_STATUS_RECOMMEND_QUIT, # 定时任务: 连续3天旷工\n (CHECK_STATUS_WORKING, EVENT_ADOPT_FIRST_ORDER): _adopt_first_order, # 更新first_order_time, 状态仍旧是WORKING\n\n (CHECK_STATUS_RESIGN, EVENT_BANNED): _ban,\n (CHECK_STATUS_RESIGN, EVENT_HM_DECIDE_QUIT): _quitting,\n (CHECK_STATUS_RESIGN, EVENT_HM_DECIDE_RETAIN): CHECK_STATUS_WORKING,\n (CHECK_STATUS_RESIGN, EVENT_CANCEL_RESIGN): CHECK_STATUS_WORKING, # 申请离职撤销\n\n # 系统推荐离职,暂时还保留\n (CHECK_STATUS_RECOMMEND_QUIT, EVENT_HM_DECIDE_QUIT): _quitting,\n (CHECK_STATUS_RECOMMEND_QUIT, EVENT_HM_DECIDE_RETAIN): CHECK_STATUS_WORKING,\n\n }\n\n @classmethod\n def get_next_status(cls, current_status, event, **kwargs):\n \"\"\"\n 获取下一状态\n :param current_status: 当前状态\n :param event: 条件\n :return: 如果返回None表示错误状态或条件\n \"\"\"\n status = cls.FSM.get((current_status, cls.EMAP[event]), None)\n return status if (not status) or isinstance(status, str) else status(**kwargs)\n\n @classmethod\n @db.with_transaction\n def update_status(cls, man, event, current_status=None, **kwargs):\n \"\"\"\n 更新对象的状态\n :param man: Deliveryman 对象\n :param current_status: 当前状态, 如果为None就拿obj的status字段\n :param event: 配送员事件类型\n :param kwargs: 目前支持:\n first_order_time: BL传入的首单时间\n executor_id: 操作人id\n remark: 操作备注\n :return: obj 或 None, None表示出错\n \"\"\"\n if current_status is None:\n current_status = man.status if man.status else cls.CHECK_STATUS_INIT\n # 构造传入状态机本身的参数\n kw = {\n 'deliveryman_id': man.id,\n 'first_order_time': kwargs.get('first_order_time')\n }\n next_status = cls.get_next_status(current_status, event, **kw)\n\n # ====> 上帝操作!!! <====\n if event == cls.EVENT_RESET.str or event == cls.EVENT_RESET.int:\n next_status = cls.CHECK_STATUS_INIT\n\n # 日志\n debug_str = \"deliveryman_id[%s][%s]: from [%s] to [%s], event[%s].\" % (\n man.id, man.real_name, current_status, next_status, cls.EMAP[event].str)\n logging.info(debug_str) if next_status else logging.warning(debug_str)\n\n if next_status:\n # 更新状态\n from_status = current_status\n man.status = next_status\n man.update_time = utc_8_now()\n man.update()\n\n # 事件记录(新+旧)\n fsm_log = dict(deliveryman_id=man.id,\n executor_id=kwargs.get('executor_id', 0),\n from_status=from_status,\n to_status=next_status,\n event=cls.EMAP[event].str,\n remark=kwargs.get('remark', None),\n create_time=utc_8_now())\n DeliverymanFSMLogLogic.create(**fsm_log)\n # TODO 老数据维护(迁移完成[后台/APP没有接口调这些数据相关的查询了以后], 删除我)\n db.update('update user_center.real_info set check_status=?, update_time=? where user=?',\n next_status, utc_now(), man.id)\n db.update('update user_center.apply_deliver set check_status=?, update_time=? where user=?',\n next_status, utc_now(), man.id)\n db.insert('F_DB_EVENT_PS.deliver_fsm_log', **dict(\n user_id=man.id,\n executor_id=kwargs.get('executor_id', 0),\n from_status=from_status,\n to_status=next_status,\n event=cls.EMAP[event].int,\n remark=kwargs.get('remark', ''),\n create_time=utc_now()\n ))\n return man.status\n else:\n return None\n","sub_path":"data_and_service/man/model_logics/fsm_old.py","file_name":"fsm_old.py","file_ext":"py","file_size_in_byte":18824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"606612811","text":"import os\nimport __init__\nfrom config import CONST\nimport unittest\nimport tempfile\n\n\nclass FlaskrTestCase(unittest.TestCase):\n\n def setUp(self):\n self.db_fd, CONST.DATABASE = tempfile.mkstemp()\n __init__.app.testing = True\n self.app = __init__.app.test_client()\n with __init__.app.app_context():\n __init__.home_controller()\n\n def tearDown(self):\n os.close(self.db_fd)\n os.unlink(CONST.DATABASE)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"SiteManager_tests.py","file_name":"SiteManager_tests.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"269970187","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Genere una lista de tuplas, donde cada tupla contiene en la primera \n## posicion, el valor de la segunda columna; la segunda parte de la \n## tupla es una lista con las letras (ordenadas y sin repetir letra) \n## de la primera columna que aparecen asociadas a dicho valor de la \n## segunda columna. Esto es:\n##\n## Rta/\n## ('0', ['C'])\n## ('1', ['A', 'B', 'D', 'E'])\n## ('2', ['A', 'D', 'E'])\n## ('3', ['A', 'B', 'D', 'E'])\n## ('4', ['B', 'E'])\n## ('5', ['B', 'C', 'D', 'E'])\n## ('6', ['A', 'B', 'C', 'E'])\n## ('7', ['A', 'C', 'D', 'E'])\n## ('8', ['A', 'B', 'E'])\n## ('9', ['A', 'B', 'C', 'E'])\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\nimport itertools\nfile_csv = open(\"data.csv\",\"r\").readlines()\ntuplas = [(linea.split(\"\\t\")[1],linea.split(\"\\t\")[0]) for linea in file_csv]\nfor key, group in itertools.groupby(sorted(tuplas), lambda x: x[0]): \n grupo = [i[1] for i in sorted(set(list(group)))]\n print((key,grupo))\n\n","sub_path":"03-python=1/q08=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"57182715","text":"import gym\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\n\nfrom network import Network\nfrom agent import Agent\nfrom replay import GameBuffer, ReplayBuffer\n\n\nclass Trainer:\n def __init__(self):\n pass\n\n def train(\n self,\n env: gym.Env,\n agent: Agent,\n network: Network,\n optimizer,\n window_size: int,\n nb_self_play: int,\n num_unroll_steps: int,\n td_steps: int,\n discount: float,\n batch_size: int,\n nb_train_update: int,\n nb_train_epochs: int,\n max_grad_norm: float,\n filename: str,\n ent_c: float,\n ):\n replay_buffer = ReplayBuffer(window_size, batch_size)\n\n for epoch in range(nb_train_epochs):\n network.eval()\n rewards = []\n for _ in range(nb_self_play):\n game_buffer = self._play_one_game(env, agent)\n # game_buffer.print_buffer()\n replay_buffer.append(game_buffer)\n rewards.append(np.sum(game_buffer.rewards))\n\n network.train()\n losses = []\n for _ in range(nb_train_update):\n batch = replay_buffer.sample_batch(num_unroll_steps, td_steps, discount)\n losses.append(self._update_weights(network, optimizer, batch, max_grad_norm, ent_c))\n v_loss, r_loss, p_loss, entropy = np.mean(losses, axis=0)\n print(\n f\"Epoch[{epoch+1}]: Reward[{np.mean(rewards)}], Loss: V[{v_loss:.6f}]/R[{r_loss:.6f}]/P[{p_loss:.6f}]/E[{entropy:.6f}]\"\n )\n\n if (epoch + 1) % 10 == 0:\n agent.save_model(filename)\n\n def validate(self, env: gym.Env, agent: Agent, network: Network):\n network.eval()\n rewards = []\n for _ in range(1):\n game_buffer = self._play_one_game(env, agent)\n game_buffer.print_buffer()\n rewards.append(np.sum(game_buffer.rewards))\n print(f\"Episode reward[{np.mean(rewards)}]\")\n\n def _play_one_game(self, env: gym.Env, agent: Agent) -> GameBuffer:\n buffer = GameBuffer()\n obs = env.reset()\n done = False\n while not done:\n obs = np.array([obs])\n action, root = agent.get_action(obs)\n next_obs, reward, done, info = env.step(action)\n\n visit_sum = np.sum([child.visit_count for child in root.children])\n child_visits = [child.visit_count / visit_sum for child in root.children]\n buffer.append(obs, action, reward, root.value, child_visits)\n\n obs = next_obs\n return buffer\n\n def _update_weights(self, network, optimizer, batch, max_grad_norm, ent_c):\n v_loss = 0.0\n r_loss = 0.0\n p_loss = 0.0\n entropy = 0.0\n batch_size = len(batch)\n for obs, actions, targets in batch:\n target_values, target_rewards, target_policies = targets\n target_values = torch.Tensor(target_values)\n target_rewards = torch.Tensor(target_rewards)\n target_policies = torch.Tensor(target_policies)\n\n state, policy, value = network.initial_inference(obs)\n c = Categorical(policy)\n\n v_loss += F.mse_loss(value, target_values[0].unsqueeze(0))\n p_loss += -(target_policies[0] * policy.log()).mean()\n entropy += -ent_c * c.entropy().mean()\n\n gradient_scale = 1 / len(actions)\n for i, action in enumerate(actions):\n state, reward, policy, value = network.recurrent_inference(state, np.array([action]))\n v_loss += gradient_scale * F.mse_loss(value, target_values[i + 1].unsqueeze(0))\n r_loss += gradient_scale * F.mse_loss(reward, target_rewards[i + 1].unsqueeze(0))\n p_loss += gradient_scale * (-(target_policies[i + 1] * policy.log()).mean())\n entropy += gradient_scale * (-ent_c * c.entropy().mean())\n\n v_loss = v_loss / batch_size\n r_loss = r_loss / batch_size\n p_loss = p_loss / batch_size\n entropy = entropy / batch_size\n\n optimizer.zero_grad()\n total_loss = v_loss + r_loss + p_loss + entropy\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(network.parameters(), max_grad_norm)\n optimizer.step()\n\n return v_loss.item(), r_loss.item(), p_loss.item(), entropy.item()\n","sub_path":"RL/muzero/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346088392","text":"import requests\nimport sys\n\nfrom cmd import Cmd\nfrom time import sleep\n\nPIECE_NAME = {\n 3: 'bishop',\n 5: 'king',\n 7: 'knight',\n 9: 'pawn',\n 11: 'queen',\n 13: 'rook',\n}\nPROMPT = '> '\nBRIGHT_GREEN = '\\u001b[42;1m'\nRESET = '\\u001b[0m'\nSELECTED_PIECE = f'{ BRIGHT_GREEN }{{}}{ RESET }'\nTOP_BOARD_OUTPUT_SHELL = '''\n A B C D E F G H\n +---------------'''\nBOARD_OUTPUT_SHELL = ('8|', '7|', '6|', '5|', '4|', '3|', '2|', '1|')\n\n\ndef get_info(api_url, game_id):\n response = requests.get(f'{ api_url }/v1.0/games/{ game_id }/info')\n return response.json()['print']\n\n\ndef format_board(board):\n return map(' '.join, board.splitlines())\n\n\ndef print_board(board):\n \"\"\"\n Print board in shell.\n \"\"\"\n print(TOP_BOARD_OUTPUT_SHELL)\n for shell, line in zip(\n BOARD_OUTPUT_SHELL, tuple(board)):\n print(f'{ shell }{ \"\".join(line) }')\n\n\nclass CLIAgent(Cmd):\n prompt = PROMPT\n\n def __init__(self, api_url):\n \"\"\"\n Init player board.\n \"\"\"\n super().__init__()\n self.api_url = api_url\n self.do_reset()\n\n def do_reset(self, *args):\n self.piece = None\n game = requests.post(f'{ self.api_url }/v1.0/games').json()\n game['user'] = 1\n self.game_id = game['id']\n self.user = requests.post(\n f'{ self.api_url }/issue-agent',\n json=game,\n headers={\n 'content-type': 'application/json'\n },\n ).json()['agent_id']\n requests.post(\n f'{ self.api_url }/issue-agent-lookahead',\n json={'id': self.game_id, 'player': 2, 'lookahead': 4})\n print('> piece # select piece')\n print('> move # move selected piece to')\n print('> reset # start a new game')\n print_board(format_board(get_info(self.api_url, self.game_id)))\n\n def do_piece(self, arg_str):\n \"\"\"\n Select piece for move.\n \"\"\"\n args = self.parse(arg_str)\n if len(args) != 2:\n return self.print_invalid('piece ' + arg_str)\n self.piece = args\n response = requests.get(f'{ self.api_url }/v1.0/games/{ self.game_id }')\n state = response.json()['state']\n if state == {'end': True}:\n return print('game over')\n board = tuple(map(bytes.fromhex, state))\n try:\n piece = board[args[1]][args[0]]\n except IndexError:\n return self.print_invalid('piece ' + arg_str)\n if not (piece and (piece & 1)):\n return self.print_invalid('piece ' + arg_str)\n board = list(map(list, get_info(self.api_url, self.game_id).splitlines()))\n board[args[1]][args[0]] = SELECTED_PIECE.format(\n board[args[1]][args[0]])\n print_board(map(' '.join, board))\n print(f'Selected: { PIECE_NAME[piece & 0xf] }')\n\n def do_move(self, arg_str):\n \"\"\"\n Make move.\n \"\"\"\n if not self.piece:\n return self.print_invalid('move ' + arg_str)\n\n args = self.parse(arg_str)\n if len(args) != 2:\n return self.print_invalid('move ' + arg_str)\n\n move = {'move': (tuple(reversed(self.piece)), tuple(reversed(args)))}\n self.piece = None\n\n response = requests.put(\n f'{ self.api_url }/agent/{ self.user }',\n json=move,\n headers={\n 'content-type': 'application/json',\n }\n )\n if response.status_code != 200 or response.json().get('invalid', False):\n print_board(format_board(get_info(self.api_url, self.game_id)))\n return print('Invalid move.')\n if response.json().get('state', {}).get('end', False):\n print_board(format_board(get_info(self.api_url, self.game_id)))\n return print('you won')\n response = requests.get(f'{ self.api_url }/v1.0/games/{ self.game_id }')\n in_board = response.json()['state']\n print_board(format_board(get_info(self.api_url, self.game_id)))\n if in_board == {'end': True}:\n return print('you won')\n print('making move ...')\n board = in_board\n while in_board == board:\n sleep(2)\n response = requests.get(f'{ self.api_url }/v1.0/games/{ self.game_id }')\n state = response.json()['state']\n if state == {'end': True}:\n return print('game over')\n response = requests.get(\n f'{ self.api_url }/agent/{ self.user }',\n headers={\n 'content-type': 'application/json',\n }\n )\n if response.status_code != 200:\n return self.do_reset()\n try:\n if response.json()['state'] == {'end': True}:\n return self.do_reset()\n except Exception:\n return self.do_reset()\n board = state\n print_board(format_board(get_info(self.api_url, self.game_id)))\n\n def print_invalid(self, args):\n print_board(format_board(get_info(self.api_url, self.game_id)))\n print('invalid command:', args)\n print('> piece # select piece')\n print('> move # move selected piece to')\n print('> reset # start a new game')\n\n @staticmethod\n def parse(args):\n \"\"\"\n Split arguments.\n \"\"\"\n args = args.split()\n if len(args) != 2:\n return args\n try:\n args[1] = 8 - int(args[1])\n if not (0 <= args[1] < 8):\n print('out of range row')\n raise ValueError\n except ValueError:\n print('not int', args[1])\n return ()\n try:\n args[0] = ord(args[0]) - ord('a')\n if not (0 <= args[1] < 8):\n print('out of range column')\n raise ValueError\n except ValueError:\n print('not char', args[0])\n return ()\n return args\n\n def emptyline(self):\n \"\"\"\n Do nothing on empty command.\n \"\"\"\n\n def precmd(self, line):\n \"\"\"\n Sanitize data.\n \"\"\"\n return line.strip().lower()\n\n\ndef main(argv=sys.argv):\n try:\n port = 8080\n api_url = f'http://localhost:{ port }'\n if len(argv) > 1:\n api_url = argv[1]\n while True:\n try:\n CLIAgent(api_url).cmdloop()\n except Exception:\n pass\n except KeyboardInterrupt:\n print()\n","sub_path":"neuralknight/scripts/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":6577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"53324880","text":"import numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\ntrain_IDG = ImageDataGenerator(rescale=1./255, \n horizontal_flip=True, \n vertical_flip=True, \n width_shift_range=0.1, \n height_shift_range=0.1,\n rotation_range=5, \n zoom_range=1.2, \n shear_range=0.7, \n fill_mode='nearest')\ntest_IDG = ImageDataGenerator(rescale=1./255) # scale은 train, test 동일\n\nxy_train = train_IDG.flow_from_directory('../_data/brain/train', \n target_size=(150, 150),\n batch_size=5,\n class_mode='binary')\nxy_test = test_IDG.flow_from_directory('../_data/brain/test', \n target_size=(150, 150),\n batch_size=5,\n class_mode='binary')\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Dense, Conv2D, Flatten\n\ninput = Input((150, 150, 3))\nc = Conv2D(32, (2,2))(input)\nf = Flatten()(c)\noutput = Dense(1, activation='sigmoid')(f)\n\nmodel = Model(inputs=input, outputs=output)\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])\nhist = model.fit_generator(xy_train, epochs=50, steps_per_epoch=32, # 160 / 5 =32\n validation_data=xy_test, validation_steps=4)\nacc = hist.history['acc']\nval_acc = hist.history['val_acc']\nloss = hist.history['loss']\nval_loss = hist.history['val_loss']\n\nprint('acc =', acc[-1])\nprint('val_acc = ', val_acc[-1])\n\n'''\nacc = 0.643750011920929\nval_acc = 0.75\n'''\n","sub_path":"keras01/keras59_2_fit_generator.py","file_name":"keras59_2_fit_generator.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"54416254","text":"import pandas\nimport os\nimport glob\nimport time\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\nfrom email.mime.text import MIMEText\nimport smtplib\n\n#imporing environment variables to connect with email\n\ntry:\n login = os.getenv('EMAIL_LOGIN')\n password = os.getenv('EMAIL_PASS')\nexcept Exception as e:\n print(f'You need to add environment variables EMAIL_LOGIN and EMAIL_PASS. Error: {e}')\n\n#checking connection with your email server\n\ntry:\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.login(login, password)\nexcept Exception as e:\n print(f'Something went wrong: {e}')\n\n#importing CSV file\n\ntry:\n df = pandas.read_csv('Untitled 1.csv')\nexcept Exception as e:\n print(f'incorrect data download from excel file{e}')\n\n#changing .png file in scrit location to name and surname\n\ndef change_attachment_name(name, surname):\n try:\n filename = glob.glob('*.png')[0]\n os.rename(filename, f'{name}_{surname}.png')\n except Exception as e:\n print(f'File does not exist{e}')\n\n#sending content\n\ndef sending_email(html):\n for i in range(0, len(df['imie_nazwisko'])):\n imie_nazwisko = df['imie_nazwisko'][i]\n first, last = imie_nazwisko.split()\n email = df['email'][i]\n message = f'Hi {first}! it’s file generated for you'\n change_attachment_name(first, last)\n msg = MIMEMultipart()\n img_data = MIMEImage(open(glob.glob('*.png')[0], 'rb').read())\n img_data.add_header('Content-Disposition', \"attachment; filename= %s\" % glob.glob('*.png')[0])\n msg.attach(img_data)\n messege1 = MIMEText(message, 'plain')\n html1 = MIMEText(html, 'html')\n msg.attach(messege1)\n msg.attach(html1)\n msg['Subject'] = 'Your image'\n server.sendmail(login, email, msg.as_string())\n time.sleep(2)\n server.quit()\n\n\nhtml = \"\"\"\n\n \n \n

    Hi!
    \n How are you?
    \n Here is the link you wanted.\n

    \n \n\n\"\"\"\n\n\nsending_email(html)\n\n\n\n","sub_path":"day_3_email_send/email_send.py","file_name":"email_send.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"222140318","text":"from lib.factory.Loader import Loader as LoaderFactory\nfrom lib.parser.map.google.GMapFactory import GMapFactory as MapFactory\nfrom lib.config.Yaml import Yaml as Config\n\n\nconfig = Config('../../../config/config.yml')\n\nloader = LoaderFactory.loader_gmaps_with_cache(config.get('googlemaps'), config.get('mongodb'))\n\naddress = 'France, Paris'\n\naddress_content = loader.by_address(address=address)\n\nprint('.' if len(address_content) else 'E', end='')\n\nobjects = MapFactory.france(address_content)\n\nprint('.' if len(objects) else 'E', end='')\n","sub_path":"tests/gmaps/france/parsing_by_address.py","file_name":"parsing_by_address.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"77967141","text":"# -*- coding: utf-8 -*-\nimport pygame, sys, os\nfrom pygame.locals import *\nfrom game import *\nfrom about import *\n\nif not pygame.display.get_init():\n\tpygame.display.init()\n\nif not pygame.font.get_init():\n\tpygame.font.init()\n\n\nclass Menu(object):\n\n\tlegacy_list = []\n\tfields = []\n\tfont_size = 32\n\tfont_path = 'data/font/coders_crux.ttf'\n\tfont = pygame.font.Font\n\tdest_surface = pygame.Surface\n\tfields_quantity = 0\n\tbackground_color = (51, 51, 51)\n\ttext_color = (255, 255, 153)\n\tselection_color = (153, 102, 255)\n\tselection_position = 0\n\tpaste_position = (0, 0)\n\tmenu_width = 0\n\tmenu_height = 0\n\n\tclass Pole(object):\n\t\ttext = ''\n\t\tfield = pygame.Surface\n\t\tfield_rect = pygame.Rect\n\t\tselection_rect = pygame.Rect\n\t\tpygame.display.set_caption('Arkanoid')\n\n\tdef move_menu(self, top, left):\n\t\tself.paste_position = (top, left)\n\n\tdef set_colors(self, text, selection, background):\n\t\tself.background_color = background\n\t\tself.text_color = text\n\t\tself.selection_color = selection\n\n\tdef set_fontsize(self, font_size):\n\t\tself.font_size = font_size\n\n\tdef set_font(self, path):\n\t\tself.font_path = path\n\n\tdef get_position(self):\n\t\treturn self.selection_position\n\n\tdef init(self, legacy_list, dest_surface):\n\t\tself.legacy_list = legacy_list\n\t\tself.dest_surface = dest_surface\n\t\tself.fields_quantity = len(self.legacy_list)\n\t\tself.create_structure()\n\n\tdef draw(self, move=0):\n\t\tif move:\n\t\t\tself.selection_position += move\n\t\t\tif self.selection_position == -1:\n\t\t\t\tself.selection_position = self.fields_quantity - 1\n\t\t\tself.selection_position %= self.fields_quantity\n\t\tmenu = pygame.Surface((self.menu_width, self.menu_height))\n\t\tmenu.fill(self.background_color)\n\t\tselection_rect = self.fields[self.selection_position].zaznaczenie_rect\n\t\tpygame.draw.rect(menu, self.selection_color, selection_rect)\n\n\t\tfor i in xrange(self.fields_quantity):\n\t\t\tmenu.blit(self.fields[i].pole, self.fields[i].pole_rect)\n\t\tself.dest_surface.blit(menu, self.paste_position)\n\t\treturn self.selection_position\n\n\tdef create_structure(self):\n\t\tself.menu_height = 0\n\t\tself.font = pygame.font.Font(self.font_path, self.font_size)\n\t\tfor i in xrange(self.fields_quantity):\n\t\t\tself.fields.append(self.Pole())\n\t\t\tself.fields[i].tekst = self.legacy_list[i]\n\t\t\tself.fields[i].pole = self.font.render(\n\t\t\t\tself.fields[i].tekst,\n\t\t\t\t1,\n\t\t\t\tself.text_color\n\t\t\t)\n\n\t\t\tself.fields[i].pole_rect = self.fields[i].pole.get_rect()\n\t\t\tmove = int(self.font_size * 0.2)\n\n\t\t\theight = self.fields[i].pole_rect.height\n\t\t\tself.fields[i].pole_rect.left = move\n\t\t\tself.fields[i].pole_rect.top = move + (move * 2 + height) * i\n\n\t\t\twidth = self.fields[i].pole_rect.width + move * 2\n\t\t\theight = self.fields[i].pole_rect.height + move * 2\n\t\t\tleft = self.fields[i].pole_rect.left - move\n\t\t\ttop = self.fields[i].pole_rect.top - move\n\n\t\t\tself.fields[i].zaznaczenie_rect = (left, top, width, height)\n\t\t\tif width > self.menu_width:\n\t\t\t\t\tself.menu_width = width\n\t\t\tself.menu_height += height\n\t\tx = self.dest_surface.get_rect().centerx - self.menu_width / 2\n\t\ty = self.dest_surface.get_rect().centery - self.menu_height / 2\n\t\tmx, my = self.paste_position\n\t\tself.paste_position = (x+mx, y+my)\n\ndef letsgo():\n\t# 0,6671875 and 0,(6) of HD resolution\n\tsurface = pygame.display.set_mode((854, 480))\n\tsurface.fill((51, 51, 51))\n\tmenu = Menu() # necessary\n\t#menu.set_colors((255,255,255), (0,0,255), (0,0,0)) # optional\n\tmenu.set_fontsize(64) # optional\n\t#menu.set_font('data/couree.fon') # optional\n\t#menu.move_menu(100, 99) # optional\n\tmenu.init(['Start', 'About', 'Exit'], surface) # necessary\n\t#menu.move_menu(0, 0) # optional\n\tmenu.draw() # necessary\n\n\tpygame.key.set_repeat(199, 69) # (delay,interval)\n\tpygame.display.update()\n\twhile 1:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == KEYDOWN:\n\t\t\t\tif event.key == K_UP:\n\t\t\t\t\t# here is the Menu class method\n\t\t\t\t\tmenu.draw(-1)\n\t\t\t\tif event.key == K_DOWN:\n\t\t\t\t\t# here is the Menu class method\n\t\t\t\t\tmenu.draw(1)\n\t\t\t\tif event.key == K_RETURN:\n\t\t\t\t\tif menu.get_position() == 0:\n\t\t\t\t\t\tgame()\n\t\t\t\t\tif menu.get_position() == 1:\n\t\t\t\t\t\tabout()\n\t\t\t\t\tif menu.get_position() == 2:\n\t\t\t\t\t\tpygame.display.quit()\n\t\t\t\t\t\tsys.exit()\n\t\t\t\tif event.key == K_ESCAPE:\n\t\t\t\t\tpygame.display.quit()\n\t\t\t\t\tsys.exit()\n\t\t\t\tpygame.display.update()\n\t\t\telif event.type == QUIT:\n\t\t\t\tpygame.display.quit()\n\t\t\t\tsys.exit()\n\t\tpygame.time.wait(8)\n\nif __name__ == \"__main__\":\n\tletsgo()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"101136077","text":"from __future__ import (absolute_import, division, print_function)\n\nimport os\nimport subprocess\n\nfrom ranger.api.commands import Command\n\n\nclass fzf_select(Command):\n \"\"\"\n :fzf_select\n\n Find a file using fzf.\n\n With a prefix argument select only directories.\n\n See:\n - https://github.com/junegunn/fzf\n - https://www.youtube.com/watch?v=C64LKCZFzME\n - https://github.com/gotbletu/shownotes/blob/master/ranger_file_locate_fzf.md\n \"\"\"\n\n def execute(self):\n command = r\"find . -path '*/**' -print 2> /dev/null | cut -b3- | fzf +m\"\n fzf = self.fm.execute_command(command, stdout=subprocess.PIPE)\n stdout, _ = fzf.communicate()\n if fzf.returncode == 0:\n fzf_file = os.path.abspath(stdout.decode('utf-8').rstrip('\\n'))\n if os.path.isdir(fzf_file):\n self.fm.cd(fzf_file)\n else:\n self.fm.select_file(fzf_file)\n","sub_path":"templates/dotfiles/shared/.config/ranger/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"402988080","text":"\"\"\"メイン画面\"\"\"\nimport os # パスを操作するモジュール\nimport sys # パスを読み込むモジュール\nsys.path.append(os.path.abspath(os.path.join('..'))) # 自作モジュールのパス指定\n# ライブラリ\nimport tkinter\nfrom tkinter import ttk\nimport time #価格取得を繰り返す為\nimport sqlite3 #DBへの追加時のエラーを取得する為\n\nfrom app.module.exchangess import bitbank\n#from app.module.exchangess import binance\nfrom app.module.sns import line\n\n\n# startボタンを押したときの処理\ndef changePage(page):\n # MainPageを上位層にする\n page.tkraise()\n\ndef main() -> None:\n # インスタンス生成\n window = tkinter.Tk()\n\n # ウィンドウタイトルを決定\n window.title(\"メインメニュー\")\n\n # ウィンドウの大きさを決定\n window.geometry(\"1500x1000\")\n\n # ウィンドウのグリッドを 1x1 にする\n window.grid_rowconfigure(0, weight=1)\n window.grid_columnconfigure(0, weight=1)\n\n #-----------------------------------StartPage---------------------------------\n ### StartPage用のFrameを生成\n startPage = ttk.Frame(window)\n startTest = tkinter.Frame(startPage, bg='black', width=300, height=200, bd=30, relief=\"groove\")\n backPage = tkinter.Frame(startPage, bg='black', height=400, width=330, bd=10, relief=\"ridge\")\n bidPage = tkinter.Frame(startPage, bg='black', height=175, width=330, bd=10, relief=\"ridge\")\n askPage = tkinter.Frame(startPage, bg='black', height=175, width=330, bd=10, relief=\"ridge\")\n tablePage = tkinter.Frame(startPage, bg='black', width=1010, height=400, bd=15, relief=\"sunken\")\n\n PointFont = (\"Helevetice\", 14)\n PointFont2 = (\"\", 11)\n\n ### ボタン表示\n # APIキー登録ボタン生成\n startButton =\\\n tkinter.Button(startTest, width=31, height=3, text=\"取引所APIキー登録画面\", font=PointFont, command=lambda : changePage(mainPage))\n\n #expand 親に合わせて変化 fill frameの空きスペースを埋めるか\n startButton.pack(side=\"left\", expand=1, fill=\"both\")\n\n # SNSボタン生成\n lineButton =\\\n tkinter.Button(startTest, width=31, height=3, text=\"SNS登録\", font=PointFont, command=lambda : changePage(snsPage))\n\n lineButton.pack(side=\"left\", expand=1, fill=\"both\")\n\n # 設定ボタン生成\n ConfigButton =\\\n tkinter.Button(startTest, width=31, height=3, text=\"設定\", font=PointFont, command=lambda : changePage(configPage))\n\n ConfigButton.pack(side=\"left\", expand=1, fill=\"both\")\n\n\n exchange = ttk.Label(backPage, text=u\"取引所\", foreground='white', background='black', font=PointFont)\n exchange.place(relx=0.1, rely=0.1)\n bid = ttk.Label(backPage, text=u'買値', foreground='white', background='black', font=PointFont)\n bid.place(relx=0.4, rely=0.1)\n ask = ttk.Label(backPage, text=u'売値', foreground='white', background='black', font=PointFont)\n ask.place(relx=0.7, rely=0.1)\n exchangeA = ttk.Label(backPage, text=u\"bitbank\", foreground='white', background='black', font=PointFont)\n exchangeA.place(relx=0.1, rely=0.2)\n exchangeB = ttk.Label(backPage, text=u\"binance\", foreground='white', background='black', font=PointFont)\n exchangeB.place(relx=0.1, rely=0.3)\n\n\n \"\"\"\n # bitbankから価格の取得\n exhanges, ask, bid = bitbank.BITBANK.currencyinformation('XRP')\n print(exchange, ask, bid)\n bitbank_ask = ttk.Label(backPage, text=ask, foreground='white', background='black', font=PointFont)\n bitbank_ask.place(relx=0.4, rely=0.2)\n bitbank_bid = ttk.Label(backPage, text=bid, foreground='white', background='black', font=PointFont)\n bitbank_bid.place(relx=0.7, rely=0.2)\n\n\n # binanceから価格の取得\n exchange2, ask2, bid2 = binance.BINANCE.xrp(0)\n print(exchange2, ask2, bid2)\n binance_ask = ttk.Label(backPage, text=ask2, foreground='white', background='black', font=PointFont)\n binance_ask.place(relx=0.4, rely=0.3)\n binance_bid = ttk.Label(backPage, text=bid2, foreground='white', background='black', font=PointFont)\n binance_bid.place(relx=0.7, rely=0.3)\n \"\"\"\n\n buy_order = ttk.Label(bidPage, text=u\"買い注文\", foreground='white', background='black', font=PointFont)\n buy_order.place(relx=0.4, rely=0.1)\n sell_order = ttk.Label(askPage, text=u\"売り注文\", foreground='white', background='black', font=PointFont)\n sell_order.place(relx=0.4, rely=0.1)\n\n\n #フレームを配置\n startPage.grid(row=0, column=0, sticky=\"nsew\")\n startTest.place(relx=0.01, rely=0.01)\n backPage.place(relx=0.75, rely=0.01)\n bidPage.place(relx=0.75, rely=0.5)\n askPage.place(relx=0.75, rely=0.75)\n tablePage.place(relx=0.01, rely=0.19)\n\n #-----------------------------------MainPage---------------------------------\n ### MainPage用のFrameを生成\n mainPage = tkinter.Frame(window)\n\n #別ファイルから読み込み実行\n #exec(open(\"./entry.py\",'r',encoding=\"utf-8\").read())\n side = (\"\", 32)\n\n TitlePage = tkinter.Frame(mainPage, bg='gray', width=1340, height=250, bd=10)\n MainPage = tkinter.Frame(mainPage, bg='gray', width=1340, height=450, bd=10)\n #Page = tkinter.Frame(startPage, bg='black', width=1010, height=400, bd=15, relief=\"sunken\")\n\n\n #以下、entry_screenを流用\n HEAD = tkinter.Label(TitlePage, text=u'API KEYS', foreground='white', background='gray', font=(\"\", 40))\n HEAD.place(relx=0.01, rely=0.01)\n CONTENT = tkinter.Label(TitlePage, text=u'各取引所からAPIキーを取得してください。', foreground='white', background='gray', font=(\"\", 25))\n CONTENT.place(relx=0.15, rely=0.3)\n\n EXCHANGES = tkinter.Label(MainPage, text=u'取引所', foreground='white', background='gray', font=side)\n EXCHANGES.place(relx=0.015, rely=0.01)\n\n APIK = tkinter.Label(MainPage, text=u'APIキー', foreground='white', background='gray', font=side)\n APIK.place(relx=0.3, rely=0.01)\n\n TOKEN = tkinter.Label(MainPage, text=u'トークンキー', foreground='white', background='gray', font=side)\n TOKEN.place(relx=0.6, rely=0.01)\n\n BANK = tkinter.Label(MainPage, text=u' bitbank ', foreground='white', background='gray', font=side, bd=25, relief=\"ridge\")\n BANK.place(relx=0.01, rely=0.2)\n BINA = tkinter.Label(MainPage, text=u' binance ', foreground='white', background='gray', font=side, bd=25, relief=\"ridge\")\n BINA.place(relx=0.01, rely=0.4)\n\n # エントリー (APIキーの値を入れる)\n BITBANK_API = tkinter.Entry(MainPage, width=29, bd=25, font=(\"\",20), relief=\"flat\")\n BITBANK_API.place(relx=0.2, rely=0.2)\n\n BINACE_API = tkinter.Entry(MainPage, width=29, bd=25, font=(\"\",20), relief=\"flat\")\n BINACE_API.place(relx=0.2, rely=0.4)\n\n # エントリー2 (トークンの値を入れる)\n BITBANK_TOKEN = tkinter.Entry(MainPage, width=29, bd=25, font=(\"\",20), relief=\"flat\")\n BITBANK_TOKEN.place(relx=0.55, rely=0.2)\n\n BINACE_TOKEN = tkinter.Entry(MainPage, width=29, bd=25, font=(\"\",20), relief=\"flat\")\n BINACE_TOKEN.place(relx=0.55, rely=0.4)\n\n def bitbank_entry(self):\n API_value = BITBANK_API.get()\n TOKEN_value = BITBANK_TOKEN.get()\n bitbank.BITBANK.registration(\"BITBANK\", API_value, TOKEN_value)\n\n\n BUTTON_BITBANK = tkinter.Button(MainPage, text=u'登録', foreground='white', background='gray', font=side)\n BUTTON_BITBANK.place(relx=0.9, rely=0.2)\n BUTTON_BITBANK.bind(\"\", bitbank_entry)\n\n def binance_entry(self):\n API_value = BINACE_API.get()\n TOKEN_value = BINACE_TOKEN.get()\n #binance.BINANCE.registration(\"BINANCE\", API_value, TOKEN_value)\n\n BUTTON_BINANCE = tkinter.Button(MainPage, text=u'登録', foreground='white', background='gray', font=side)\n BUTTON_BINANCE.place(relx=0.9, rely=0.4)\n BUTTON_BINANCE.bind(\"\", binance_entry)\n\n\n MAIN_MENU = tkinter.Button(MainPage, width=30, height=1, text=\" 戻る \", command=lambda : changePage(startPage), font=(\"\",24))\n MAIN_MENU.place(relx=0.3, rely=0.7)\n\n # MainPageを配置\n mainPage.grid(row=0, column=0, sticky=\"nsew\")\n TitlePage.place(relx=0.01, rely=0.01)\n MainPage.place(relx=0.01, rely=0.3)\n\n\n\n # StartPageを上位層にする\n startPage.tkraise()\n\n\n#----snsPage------------------------------------\n\n # SNS登録のフレーム\n snsPage = ttk.Frame(window)\n\n titlePage = tkinter.Frame(snsPage, bg='gray', width=1340, height=250, bd=10)\n SnsPage = tkinter.Frame(snsPage, bg='gray', width=1340, height=450, bd=10)\n\n side = (\"\", 32)\n\n #以下、line_screenから流用\n HEAD = tkinter.Label(titlePage, text=u'API KEYS', foreground='white', background='gray', font=(\"\", 40))\n HEAD.place(relx=0.01, rely=0.01)\n CONTENT = tkinter.Label(titlePage, text=u'各SNSからAPIキーを取得してください。', foreground='white', background='gray', font=(\"\", 25))\n CONTENT.place(relx=0.15, rely=0.3)\n\n EXCHANGES = tkinter.Label(SnsPage, text=u'SNS', foreground='white', background='gray', font=side)\n EXCHANGES.place(relx=0.015, rely=0.01)\n\n APIK = tkinter.Label(SnsPage, text=u'APIキー', foreground='white', background='gray', font=side)\n APIK.place(relx=0.3, rely=0.01)\n\n SNS = tkinter.Label(SnsPage, text=u'LINE', foreground='white', background='gray', font=side, bd=25, relief=\"ridge\")\n SNS.place(relx=0.01, rely=0.2)\n PLANS = tkinter.Label(SnsPage, text=u'予定', foreground='white', background='gray', font=side, bd=25, relief=\"ridge\")\n PLANS.place(relx=0.01, rely=0.4)\n\n # エントリー2 (トークンの値を入れる)\n LINE_TOKEN = tkinter.Entry(SnsPage, width=29, bd=25, font=(\"\",20), relief=\"flat\")\n LINE_TOKEN.place(relx=0.3, rely=0.2)\n\n PLANS_TOKEN = tkinter.Entry(SnsPage, width=29, bd=25, font=(\"\",20), relief=\"flat\")\n PLANS_TOKEN.place(relx=0.3, rely=0.4)\n\n def line_entry(self):\n line_value = LINE_TOKEN.get()\n line.LINE.registration(\"LINE\", line_value)\n\n BUTTON_LINE = tkinter.Button(SnsPage, text=u'登録', foreground='white', background='gray', font=side)\n BUTTON_LINE.place(relx=0.9, rely=0.2)\n BUTTON_LINE.bind(\"\", line_entry)\n\n BUTTON_PLANS = tkinter.Button(SnsPage, text=u'登録', foreground='white', background='gray', font=side)\n BUTTON_PLANS.place(relx=0.9, rely=0.4)\n\n\n LINE_Button = tkinter.Button(SnsPage, width=30, height=1, text=\" 戻る \", command=lambda: changePage(startPage), font=(\"\", 24))\n LINE_Button.place(relx=0.3, rely=0.7)\n\n # MainPageを配置\n snsPage.grid(row=0, column=0, sticky=\"nsew\")\n titlePage.place(relx=0.01, rely=0.01)\n SnsPage.place(relx=0.01, rely=0.3)\n\n # StartPageを上位層にする\n startPage.tkraise()\n\n\n # プログラムを始める\n window.mainloop()\n\n\n# メイン\nif __name__ == '__main__':\n main()","sub_path":"app/main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":10808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"198479650","text":"\"\"\"\nClass for managing application settings\n\n Nordschleife (Tourist)\n Lap started at position: 0,9525604248046875\n Lap finished at position: 0,8690996170043945\n\"\"\"\nimport ac\nfrom lib import Tracks\nfrom lib import Constants\nfrom lib import Globals\n\n# Inside My Documents directory.\nDIR_PARTS = ('Assetto Corsa', 'plugins', 'Sectors')\n\n\ndef log(msg):\n ac.console(Constants.APP_NAME + \" - %s\" % msg)\n ac.log(Constants.APP_NAME + \" - %s\" % msg)\n\n\ndef logEx(ex):\n log(\"ERROR -> {}\".format(type(ex)))\n log(\"ERROR -> {}\".format(ex.args))\n log(\"ERROR -> {}\".format(ex))\n\n\ntry:\n import os\n import json\nexcept ImportError as ex:\n os = None\n json = None\n raise Exception(\"Sectors - Error importing 'os' module: {}\".format(ex))\n\n\ndef GetMyDocumentsPath():\n try:\n import winreg\n\n folder_redirection = winreg.OpenKey(winreg.HKEY_CURRENT_USER,\n r'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders')\n return winreg.QueryValueEx(folder_redirection, 'Personal')[0]\n except Exception as ex:\n logEx(ex)\n return '/tmp/'\n\n\nclass Settings(object):\n def __init__(self):\n \"\"\"\n :rtype: Settings\n\n \"\"\"\n log(\"Settings -> Initializing\")\n self.supportedTracks = [Tracks.NordschleifeTourist.NAME]\n self.currentTrack = None\n self.checkValidTrack()\n\n self.CarName = ac.getCarName(0)\n self.configFileName = \"record-{}-{}.json\".format(self.currentTrack, self.CarName)\n log(\"Config file name set to: {}\".format(self.configFileName))\n\n def checkValidTrack(self):\n track = ac.getTrackName(0)\n log(\" Checking track: %s\" % track)\n\n for count in range(len(self.supportedTracks)):\n supportedTrack = self.supportedTracks[count]\n\n if track == supportedTrack:\n Globals.IS_TRACK_SUPPORTED = True\n self.currentTrack = supportedTrack\n log(\" Activating app for track: {}\".format(self.currentTrack))\n break\n\n def getConfigFolderPath(self):\n log(\"Returning config folder path:\")\n path = os.path.join(GetMyDocumentsPath(), *DIR_PARTS)\n os.makedirs(path, exist_ok=True)\n return path\n\n def getConfigFileName(self):\n log(\"Returning config file name:\")\n path = self.getConfigFolderPath()\n return os.path.join(path, self.configFileName)\n\n def SaveRecord(self, lap):\n log(\"Saving record lap to disk\")\n file_name = self.getConfigFileName()\n\n try:\n with open(file_name, 'w') as fp:\n log(\" Writing json: {}\".format(file_name))\n lap_dict = lap.ToDict(self.CarName)\n log(\" Dumping lapDict to json\")\n dump = json.dumps(lap_dict, indent=2, separators=(',', ': '))\n log(\" Dumptype: {}\".format(type(dump)))\n log(\" Value: {}\".format(dump))\n\n fp.write(dump)\n except Exception as ex_inner:\n logEx(ex_inner)\n\n def load_record(self):\n \"\"\"\n\n :return: Dictionary with sector times\n :rtype: dict\n \"\"\"\n log(\"Loading record lap from disk\")\n file_name = self.getConfigFileName()\n\n lap_dict = {}\n\n if not os.path.isfile(file_name):\n log(\" No record json found: {}\".format(file_name))\n return lap_dict\n\n log(\" Assuming that file exists\")\n try:\n log(\"Trying to open file\")\n with open(file_name, 'r') as f:\n log(\" Parsing json: {}\".format(file_name))\n lap_dict = json.loads(f.read())\n except Exception as ex_inner:\n logEx(ex_inner)\n\n return lap_dict\n","sub_path":"lib/Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"536969452","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport os\nfrom scipy import stats\nimport pickle\nimport json\nfrom numpy.random import RandomState\nimport argparse\nimport multiprocessing as mp\n\n\nnp.set_printoptions(suppress=True)\nnp.set_printoptions(precision=3)\n\nnp.random.seed(0)\n\nparser = argparse.ArgumentParser(description='Random Games of Skill form DPP')\nparser.add_argument('--dim', type=int, default=1000)\nparser.add_argument('--nb_iters', type=int, default=200)\n\nargs = parser.parse_args()\n\nLR = 0.5\nTH = 0.03\n\nexpected_card = []\nsizes = []\n\ntime_string = time.strftime(\"%Y%m%d-%H%M%S\")\nPATH_RESULTS = os.path.join('results', time_string + '_' + str(args.dim))\nif not os.path.exists(PATH_RESULTS):\n os.makedirs(PATH_RESULTS)\n\n\n# Search over the pure strategies to find the BR to a strategy\ndef get_br_to_strat(strat, payoffs=None, verbose=False):\n row_weighted_payouts = strat @ payoffs\n br = np.zeros_like(row_weighted_payouts)\n br[np.argmin(row_weighted_payouts)] = 1\n if verbose:\n print(row_weighted_payouts[np.argmin(row_weighted_payouts)], \"exploitability\")\n return br\n\n\n# Fictituous play as a nash equilibrium solver\ndef fictitious_play(iters=2000, payoffs=None, verbose=False):\n dim = payoffs.shape[0]\n pop = np.random.uniform(0, 1, (1, dim))\n pop = pop / pop.sum(axis=1)[:, None]\n averages = pop\n exps = []\n for i in range(iters):\n average = np.average(pop, axis=0)\n br = get_br_to_strat(average, payoffs=payoffs)\n exp1 = average @ payoffs @ br.T\n exp2 = br @ payoffs @ average.T\n exps.append(exp2 - exp1)\n # if verbose:\n # print(exp, \"exploitability\")\n averages = np.vstack((averages, average))\n pop = np.vstack((pop, br))\n return averages, exps\n\n\n# Solve exploitability of a nash equilibrium over a fixed population\ndef get_exploitability(pop, payoffs, iters=1000):\n emp_game_matrix = pop @ payoffs @ pop.T\n averages, _ = fictitious_play(payoffs=emp_game_matrix, iters=iters)\n strat = averages[-1] @ pop # Aggregate\n test_br = get_br_to_strat(strat, payoffs=payoffs)\n exp1 = strat @ payoffs @ test_br.T\n exp2 = test_br @ payoffs @ strat\n return exp2 - exp1\n\n\ndef joint_loss(pop, payoffs, meta_nash, k, lambda_weight, lr):\n dim = payoffs.shape[0]\n\n br = np.zeros((dim,))\n values = []\n cards = []\n for i in range(dim):\n br_tmp = np.zeros((dim, ))\n br_tmp[i] = 1.\n\n aggregated_enemy = meta_nash @ pop[:k]\n value = br_tmp @ payoffs @ aggregated_enemy.T\n\n pop_k = lr * br_tmp + (1 - lr) * pop[k]\n pop_tmp = np.vstack((pop[:k], pop_k))\n M = pop_tmp @ payoffs @ pop_tmp.T\n metanash_tmp, _ = fictitious_play(payoffs=M, iters=1000)\n # L = np.diag(metanash_tmp[-1]) @ M @ M.T @ np.diag(metanash_tmp[-1])\n L = M @ M.T\n l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))\n\n cards.append(l_card)\n values.append(value)\n\n if np.random.randn() < lambda_weight:\n br[np.argmax(values)] = 1\n else:\n br[np.argmax(cards)] = 1\n\n return br\n\n\ndef psro_steps(iters=5, payoffs=None, verbose=False, seed=0,\n num_learners=4, improvement_pct_threshold=.03, lr=.2, loss_func='dpp', full=False):\n dim = payoffs.shape[0]\n\n r = np.random.RandomState(seed)\n pop = r.uniform(0, 1, (1 + num_learners, dim))\n pop = pop / pop.sum(axis=1)[:, None]\n exp = get_exploitability(pop, payoffs, iters=1000)\n exps = [exp]\n\n M = pop @ payoffs @ pop.T\n L = M @ M.T\n l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))\n l_cards = [l_card]\n\n learner_performances = [[.1] for i in range(num_learners + 1)]\n for i in range(iters):\n # Define the weighting towards diversity as a function of the fixed population size, this is currently a hyperparameter\n lambda_weight = 0.85\n if i % 5 == 0:\n print('iteration: ', i, ' exp full: ', exps[-1])\n print('size of pop: ', pop.shape[0])\n\n for j in range(num_learners):\n # first learner (when j=num_learners-1) plays against normal meta Nash\n # second learner plays against meta Nash with first learner included, etc.\n k = pop.shape[0] - j - 1\n emp_game_matrix = pop[:k] @ payoffs @ pop[:k].T\n meta_nash, _ = fictitious_play(payoffs=emp_game_matrix, iters=1000)\n population_strategy = meta_nash[-1] @ pop[:k] # aggregated enemy according to nash\n\n if loss_func == 'br':\n # standard PSRO\n br = get_br_to_strat(population_strategy, payoffs=payoffs)\n else:\n # Diverse PSRO\n br = joint_loss(pop, payoffs, meta_nash[-1], k, lambda_weight, lr)\n br_orig = get_br_to_strat(population_strategy, payoffs=payoffs)\n\n # Update the mixed strategy towards the pure strategy which is returned as the best response to the\n # nash equilibrium that is being trained against.\n pop[k] = lr * br + (1 - lr) * pop[k]\n performance = pop[k] @ payoffs @ population_strategy.T + 1 # make it positive for pct calculation\n learner_performances[k].append(performance)\n\n # if the first learner plateaus, add a new policy to the population\n if j == num_learners - 1 and performance / learner_performances[k][-2] - 1 < improvement_pct_threshold:\n learner = np.random.uniform(0, 1, (1, dim))\n learner = learner / learner.sum(axis=1)[:, None]\n pop = np.vstack((pop, learner))\n learner_performances.append([0.1])\n\n # calculate exploitability for meta Nash of whole population\n exp = get_exploitability(pop, payoffs, iters=1000)\n exps.append(exp)\n\n M = pop @ payoffs @ pop.T\n L = M @ M.T\n l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))\n l_cards.append(l_card)\n\n return pop, exps, l_cards\n\n\n# Define the self-play algorithm\ndef self_play_steps(iters=10, payoffs=None, verbose=False, improvement_pct_threshold=.03, lr=.2, seed=0):\n dim = payoffs.shape[0]\n r = np.random.RandomState(seed)\n pop = r.uniform(0, 1, (2, dim))\n pop = pop / pop.sum(axis=1)[:, None]\n exp = get_exploitability(pop, payoffs, iters=1000)\n exps = [exp]\n performances = [.01]\n\n M = pop @ payoffs @ pop.T\n L = M@M.T\n l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))\n l_cards = [l_card]\n\n for i in range(iters):\n if i % 10 == 0:\n print('iteration: ', i, 'exploitability: ', exps[-1])\n br = get_br_to_strat(pop[-2], payoffs=payoffs)\n pop[-1] = lr * br + (1 - lr) * pop[-1]\n performance = pop[-1] @ payoffs @ pop[-2].T + 1\n performances.append(performance)\n if performance / performances[-2] - 1 < improvement_pct_threshold:\n learner = np.random.uniform(0, 1, (1, dim))\n learner = learner / learner.sum(axis=1)[:, None]\n pop = np.vstack((pop, learner))\n exp = get_exploitability(pop, payoffs, iters=1000)\n exps.append(exp)\n\n M = pop @ payoffs @ pop.T\n L = M @ M.T\n l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))\n l_cards.append(l_card)\n\n return pop, exps, l_cards\n\n\n# Define the PSRO rectified nash algorithm\ndef psro_rectified_steps(iters=10, payoffs=None, verbose=False, eps=1e-2, seed=0,\n num_start_strats=1, num_pseudo_learners=4, lr=0.3, threshold=0.001):\n dim = payoffs.shape[0]\n r = np.random.RandomState(seed)\n pop = r.uniform(0, 1, (num_start_strats, dim))\n pop = pop / pop.sum(axis=1)[:, None]\n exp = get_exploitability(pop, payoffs, iters=1000)\n exps = [exp]\n counter = 0\n\n M = pop @ payoffs @ pop.T\n L = M @ M.T\n l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))\n l_cards = [l_card]\n\n while counter < iters * num_pseudo_learners:\n if counter % (5 * num_pseudo_learners) == 0:\n print('iteration: ', int(counter / num_pseudo_learners), ' exp: ', exps[-1])\n print('size of population: ', pop.shape[0])\n\n new_pop = np.copy(pop)\n emp_game_matrix = pop @ payoffs @ pop.T\n averages, _ = fictitious_play(payoffs=emp_game_matrix, iters=iters)\n\n # go through all policies. If the policy has positive meta Nash mass,\n # find policies it wins against, and play against meta Nash weighted mixture of those policies\n for j in range(pop.shape[0]):\n if counter > iters * num_pseudo_learners:\n return pop, exps, l_cards\n # if positive mass, add a new learner to pop and update it with steps, submit if over thresh\n # keep track of counter\n if averages[-1][j] > eps:\n # create learner\n learner = np.random.uniform(0, 1, (1, dim))\n learner = learner / learner.sum(axis=1)[:, None]\n new_pop = np.vstack((new_pop, learner))\n idx = new_pop.shape[0] - 1\n\n current_performance = 0.02\n last_performance = 0.01\n while current_performance / last_performance - 1 > threshold:\n counter += 1\n mask = emp_game_matrix[j, :]\n mask[mask >= 0] = 1\n mask[mask < 0] = 0\n weights = np.multiply(mask, averages[-1])\n weights /= weights.sum()\n strat = weights @ pop\n br = get_br_to_strat(strat, payoffs=payoffs)\n new_pop[idx] = lr * br + (1 - lr) * new_pop[idx]\n last_performance = current_performance\n current_performance = new_pop[idx] @ payoffs @ strat + 1\n\n if counter % num_pseudo_learners == 0:\n # count this as an 'iteration'\n\n # exploitability\n exp = get_exploitability(new_pop, payoffs, iters=1000)\n exps.append(exp)\n\n M = pop @ payoffs @ pop.T\n L = M @ M.T\n l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))\n l_cards.append(l_card)\n\n pop = np.copy(new_pop)\n\n return pop, exps, l_cards\n\n\ndef run_experiment(param_seed):\n params, seed = param_seed\n iters = params['iters']\n num_threads = params['num_threads']\n dim = params['dim']\n lr = params['lr']\n thresh = params['thresh']\n\n psro = params['psro']\n pipeline_psro = params['pipeline_psro']\n dpp_psro = params['dpp_psro']\n rectified = params['rectified']\n self_play = params['self_play']\n\n psro_exps = []\n psro_cardinality = []\n pipeline_psro_exps = []\n pipeline_psro_cardinality = []\n dpp_psro_exps = []\n dpp_psro_cardinality = []\n rectified_exps = []\n rectified_cardinality = []\n self_play_exps = []\n self_play_cardinality = []\n\n print('Experiment: ', seed + 1)\n np.random.seed(seed)\n W = np.random.randn(dim, dim)\n S = np.random.randn(dim, 1)\n payoffs = 0.5 * (W - W.T) + S - S.T\n payoffs /= np.abs(payoffs).max() \n\n if psro:\n print('PSRO')\n pop, exps, cards = psro_steps(iters=iters, num_learners=1, seed=seed+1,\n improvement_pct_threshold=thresh, lr=lr,\n payoffs=payoffs, loss_func='br')\n psro_exps = exps\n psro_cardinality = cards\n if pipeline_psro:\n print('Pipeline PSRO')\n pop, exps, cards = psro_steps(iters=iters, num_learners=num_threads, seed=seed+1,\n improvement_pct_threshold=thresh, lr=lr,\n payoffs=payoffs, loss_func='br')\n pipeline_psro_exps = exps\n pipeline_psro_cardinality = cards\n if dpp_psro:\n print('DPP')\n pop, exps, cards = psro_steps(iters=iters, num_learners=num_threads, seed=seed+1,\n improvement_pct_threshold=thresh, lr=lr,\n payoffs=payoffs, loss_func='dpp')\n dpp_psro_exps = exps\n dpp_psro_cardinality = cards\n if rectified:\n print('Rectified')\n pop, exps, cards = psro_rectified_steps(iters=iters, num_pseudo_learners=num_threads, payoffs=payoffs, seed=seed+1,\n lr=lr, threshold=thresh)\n rectified_exps = exps\n rectified_cardinality = cards\n if self_play:\n print('Self-play')\n pop, exps, cards = self_play_steps(iters=iters, payoffs=payoffs, improvement_pct_threshold=thresh, lr=lr, seed=seed+1)\n self_play_exps = exps\n self_play_cardinality = cards\n\n\n return {\n 'psro_exps': psro_exps,\n 'psro_cardinality': psro_cardinality,\n 'pipeline_psro_exps': pipeline_psro_exps,\n 'pipeline_psro_cardinality': pipeline_psro_cardinality,\n 'dpp_psro_exps': dpp_psro_exps,\n 'dpp_psro_cardinality': dpp_psro_cardinality,\n 'rectified_exps': rectified_exps,\n 'rectified_cardinality': rectified_cardinality,\n 'self_play_exps': self_play_exps,\n 'self_play_cardinality': self_play_cardinality,\n }\n\n\ndef run_experiments(num_experiments=1, iters=40, num_threads=20, dim=60, lr=0.6, thresh=0.001, logscale=True,\n psro=False,\n pipeline_psro=False,\n rectified=False,\n self_play=False,\n dpp_psro=False,\n ):\n\n params = {\n 'num_experiments': num_experiments,\n 'iters': iters,\n 'num_threads': num_threads,\n 'dim': dim,\n 'lr': lr,\n 'thresh': thresh,\n 'psro': psro,\n 'pipeline_psro': pipeline_psro,\n 'dpp_psro': dpp_psro,\n 'rectified': rectified,\n 'self_play': self_play,\n }\n\n psro_exps = []\n psro_cardinality = []\n pipeline_psro_exps = []\n pipeline_psro_cardinality = []\n dpp_psro_exps = []\n dpp_psro_cardinality = []\n rectified_exps = []\n rectified_cardinality = []\n self_play_exps = []\n self_play_cardinality = []\n\n with open(os.path.join(PATH_RESULTS, 'params.json'), 'w', encoding='utf-8') as json_file:\n json.dump(params, json_file, indent=4)\n\n pool = mp.Pool()\n result = pool.map(run_experiment, [(params, i) for i in range(num_experiments)])\n\n for r in result:\n psro_exps.append(r['psro_exps'])\n psro_cardinality.append(r['psro_cardinality'])\n pipeline_psro_exps.append(r['pipeline_psro_exps'])\n pipeline_psro_cardinality.append(r['pipeline_psro_cardinality'])\n dpp_psro_exps.append(r['dpp_psro_exps'])\n dpp_psro_cardinality.append(r['dpp_psro_cardinality'])\n rectified_exps.append(r['rectified_exps'])\n rectified_cardinality.append(r['rectified_cardinality'])\n self_play_exps.append(r['self_play_exps'])\n self_play_cardinality.append(r['self_play_cardinality'])\n\n d = {\n 'psro_exps': psro_exps,\n 'psro_cardinality': psro_cardinality,\n 'pipeline_psro_exps': pipeline_psro_exps,\n 'pipeline_psro_cardinality': pipeline_psro_cardinality,\n 'dpp_psro_exps': dpp_psro_exps,\n 'dpp_psro_cardinality': dpp_psro_cardinality,\n 'rectified_exps': rectified_exps,\n 'rectified_cardinality': rectified_cardinality,\n 'self_play_exps': self_play_exps,\n 'self_play_cardinality': self_play_cardinality,\n }\n pickle.dump(d, open(os.path.join(PATH_RESULTS, 'data.p'), 'wb'))\n\n def plot_error(data, label=''):\n data_mean = np.mean(np.array(data), axis=0)\n error_bars = stats.sem(np.array(data))\n plt.plot(data_mean, label=label)\n plt.fill_between([i for i in range(data_mean.size)],\n np.squeeze(data_mean - error_bars),\n np.squeeze(data_mean + error_bars), alpha=alpha)\n\n alpha = .4\n for j in range(2):\n fig_handle = plt.figure()\n\n if psro:\n if j == 0:\n plot_error(psro_exps, label='PSRO')\n elif j == 1:\n plot_error(psro_cardinality, label='PSRO')\n if pipeline_psro:\n if j == 0:\n plot_error(pipeline_psro_exps, label='P-PSRO')\n elif j == 1:\n plot_error(pipeline_psro_cardinality, label='P-PSRO')\n if rectified:\n if j == 0:\n length = min([len(l) for l in rectified_exps])\n for i, l in enumerate(rectified_exps):\n rectified_exps[i] = rectified_exps[i][:length]\n plot_error(rectified_exps, label='PSRO-rN')\n elif j == 1:\n length = min([len(l) for l in rectified_cardinality])\n for i, l in enumerate(rectified_cardinality):\n rectified_cardinality[i] = rectified_cardinality[i][:length]\n plot_error(rectified_cardinality, label='PSRO-rN')\n if self_play:\n if j == 0:\n plot_error(self_play_exps, label='Self-play')\n elif j == 1:\n plot_error(self_play_cardinality, label='Self-play')\n if dpp_psro:\n if j == 0:\n plot_error(dpp_psro_exps, label='Ours')\n elif j == 1:\n plot_error(dpp_psro_cardinality, label='Ours')\n\n\n plt.legend(loc=\"upper left\")\n plt.title('Dim {:d}'.format(args.dim))\n\n if logscale and (j==0):\n plt.yscale('log')\n\n plt.savefig(os.path.join(PATH_RESULTS, 'figure_'+ str(j) + '.pdf'))\n\n\nif __name__ == \"__main__\":\n run_experiments(num_experiments=10, num_threads=2, iters=args.nb_iters, dim=args.dim, lr=.5, thresh=TH,\n psro=True,\n pipeline_psro=True,\n rectified=True,\n self_play=True,\n dpp_psro=True,\n )\n\n\n","sub_path":"random_games_skill.py","file_name":"random_games_skill.py","file_ext":"py","file_size_in_byte":18355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"139749789","text":"# -*- coding: utf-8 -*-\n#################################################################################\n# Copyright (c) 2018-Present Webkul Software Pvt. Ltd. ()\n# You should have received a copy of the License along with this program.\n# If not, see \n#################################################################################\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass StockPicking(models.Model):\n _inherit = 'stock.picking'\n\n @api.model\n def get_picking_price(self, package_id):\n move_line_ids = self.env['stock.move.line'].search(\n [('result_package_id', '=', package_id.id)])\n return sum([x.qty_done * x.product_id.list_price for x in move_line_ids])\n\n @api.model\n def wk_update_package(self, package_id=None):\n if self.carrier_id.delivery_type not in ['base_on_rule', 'fixed']:\n packaging_id = package_id.packaging_id\n if package_id and (not packaging_id):\n packaging_id = self.carrier_id.packaging_id\n package_id.packaging_id = packaging_id.id\n amount = self.get_picking_price(package_id)\n package_id.cover_amount = packaging_id.get_cover_amount(amount)\n return True\n\n @api.multi\n def put_in_pack(self):\n self.ensure_one()\n cover=0\n carrier_id = self.carrier_id\n move_line_ids = [po for po in self.move_line_ids if po.qty_done > 0 and not po.result_package_id]\n total_weight = sum([po.qty_done * po.product_id.weight for po in move_line_ids])\n\n if carrier_id.packaging_id.cover_amount_option == 'fixed':\n cover = self.carrier_id.packaging_id.cover_amount\n else:\n cover_amount = sum([po.qty_done * po.product_id.lst_price for po in move_line_ids])\n cover = cover_amount\n \n if total_weight == 0 :\n shipping_weight = self.carrier_id.default_product_weight\n else:\n shipping_weight = total_weight\n\n\n res = super(StockPicking, self).put_in_pack()\n if res and (type(res) == dict):\n context = res.get('context') and res.get(\n 'context').copy() or dict()\n delivery_type = context.get('current_package_carrier_type')\n ctx = {\n 'no_description':\n not(delivery_type in ['fedex', 'dhl', 'ups', 'auspost', 'canada_post']),\n 'no_cover_amount':\n not(delivery_type in [\n 'fedex', 'dhl', 'ups', 'usps', 'auspost', 'canada_post']),\n 'no_edt_document':\n not(delivery_type in ['fedex', 'ups']),\n 'current_package_picking_id': self.id,\n\n }\n \n if carrier_id and carrier_id.delivery_type not in ['base_on_rule', 'fixed']:\n ctx['default_delivery_packaging_id'] = self.carrier_id.packaging_id.id\n ctx['default_height'] = self.carrier_id.packaging_id.height\n ctx['default_width'] = self.carrier_id.packaging_id.width\n ctx['default_length'] = self.carrier_id.packaging_id.length\n ctx['default_cover_amount']=cover\n ctx['default_shipping_weight'] = shipping_weight\n context.update(ctx)\n res['context'] = context\n return res\n\n @api.one\n @api.depends('package_ids')\n def _compute_cover_amount(self):\n self.cover_amount = sum(self.package_ids.mapped('cover_amount'))\n\n label_genrated = fields.Boolean(string='Label Generated', copy=False)\n shipment_uom_id = fields.Many2one(related='carrier_id.uom_id', readonly=\"1\",\n help=\"Unit of measurement for use by Delivery method\", copy=False)\n\n date_delivery = fields.Date(string='Expected Date Of Delivery',\n help='Expected Date Of Delivery :The delivery time stamp provided by Shipment Service', copy=False, readonly=1)\n weight_shipment = fields.Float(\n string='Send Weight', copy=False, readonly=1)\n cover_amount = fields.Float(\n string='Cover Amount',\n compute='_compute_cover_amount',\n copy=False, readonly=1)\n\n @api.multi\n def action_cancel(self):\n for obj in self:\n if obj.label_genrated == True:\n raise ValidationError(\n 'Please cancel the shipment before canceling picking! ')\n return super(StockPicking, self).action_cancel()\n\n @api.multi\n def do_new_transfer(self):\n for pick in self:\n carrier_id = pick.carrier_id\n if carrier_id and (carrier_id.delivery_type not in ['base_on_rule', 'fixed']):\n if not len(pick.package_ids):\n raise ValidationError(\n 'Create the package first for picking %s before sending to shipper.' % (pick.name))\n return super(StockPicking, self).do_new_transfer()\n\n @api.multi\n def send_to_shipper(self):\n self.ensure_one()\n if self.carrier_id.delivery_type and (self.carrier_id.delivery_type not in ['base_on_rule', 'fixed']):\n if not len(self.package_ids):\n raise ValidationError(\n 'Create the package first for picking %s before sending to shipper.' % (self.name))\n else:\n # try:\n res = self.carrier_id.send_shipping(self)[0]\n self.carrier_price = res.get('exact_price')\n self.carrier_tracking_ref = res.get(\n 'tracking_number') and res.get('tracking_number').strip(',')\n self.label_genrated = True\n self.date_delivery = res.get('date_delivery')\n self.weight_shipment = float(res.get('weight'))\n msg = _(\"Shipment sent to carrier %s for expedition with tracking number %s\") % (\n self.carrier_id.delivery_type, self.carrier_tracking_ref)\n self.message_post(\n body=msg,\n subject=\"Attachments of tracking\",\n attachments=res.get('attachments')\n )\n # except Exception as e:\n # return self.carrier_id._shipping_genrated_message(e)\n\n @api.model\n def unset_fields_prev(self):\n self.carrier_tracking_ref = False\n self.carrier_price = False\n self.label_genrated = False\n self.date_delivery = False\n self.weight_shipment = False\n self.number_of_packages = False\n return True\n\n @api.multi\n def cancel_shipment(self):\n self.ensure_one()\n # try:\n if self.carrier_id.void_shipment:\n self.carrier_id.cancel_shipment(self)\n msg = \"Shipment of %s has been canceled\" % self.carrier_tracking_ref\n self.message_post(body=msg)\n self.unset_fields_prev()\n\n else:\n msg = 'Void Shipment not allowed, please contact your Admin to enable the Void Shipment for %s.' % (\n self.carrier_id.name)\n self.message_post(\n body=msg, subject=\"Not allowed to Void the Shipment.\")\n return self.carrier_id._shipping_genrated_message(msg)\n # except Exception as e:\n # return self.carrier_id._shipping_genrated_message(e)\n","sub_path":"addons/odoo_shipping_service_apps/models/stock_picking.py","file_name":"stock_picking.py","file_ext":"py","file_size_in_byte":7481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"372186817","text":"# Copyright (c) 2015 Huawei Tech. Co., Ltd. .\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport netaddr\nimport struct\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom neutron.common import config as common_config\nfrom neutron.i18n import _, _LI, _LE, _LW\n\nfrom ryu.controller.handler import CONFIG_DISPATCHER\nfrom ryu.controller.handler import MAIN_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.controller import ofp_event\nfrom ryu.lib import addrconv\nfrom ryu.lib.packet import dhcp\nfrom ryu.lib.packet import ethernet\nfrom ryu.lib.packet import ipv4\nfrom ryu.lib.packet import packet as ryu_packet\nfrom ryu.lib.packet import udp\nfrom ryu.ofproto import ether\nfrom ryu.ofproto import ofproto_v1_3\n\nfrom dragonflow.controller.common import constants as const\nfrom dragonflow.controller.df_base_app import DFlowApp\n\nDF_DHCP_OPTS = [\n cfg.ListOpt('df_dns_servers',\n default=['8.8.8.8', '8.8.8.7'],\n help=_('Comma-separated list of the DNS servers which will be used.')),\n cfg.IntOpt('df_default_network_device_mtu', default=1460,\n help=_('default MTU setting for interface.')),\n]\n\nLOG = log.getLogger(__name__)\n\nDHCP_DOMAIN_NAME_OPT = 15\nDHCP_INTERFACE_MTU_OPT = 26\nDHCP_DISCOVER = 1\nDHCP_OFFER = 2\nDHCP_REQUEST = 3\nDHCP_ACK = 5\n\n\nclass DHCPApp(DFlowApp):\n OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]\n BASE_RPC_API_VERSION = '1.0'\n\n def __init__(self, *args, **kwargs):\n super(DHCPApp, self).__init__(*args, **kwargs)\n self.dp = None\n self.idle_timeout = 30\n self.hard_timeout = 0\n self.db_store = kwargs['db_store']\n\n cfg.CONF.register_opts(DF_DHCP_OPTS)\n cfg.CONF.register_opts(common_config.core_opts)\n self.global_dns_list = cfg.CONF.df_dns_servers\n self.lease_time = cfg.CONF.dhcp_lease_duration\n self.domain_name = cfg.CONF.dns_domain\n self.advertise_mtu = cfg.CONF.advertise_mtu\n self.default_interface_mtu = cfg.CONF.df_default_network_device_mtu\n\n self.local_tunnel_to_pid_map = {}\n\n def start(self):\n super(DHCPApp, self).start()\n return 1\n\n def is_ready(self):\n return self.dp is not None\n\n @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)\n def switch_features_handler(self, ev):\n self.dp = ev.msg.datapath\n self._install_flows_on_switch_up()\n # TODO(gampel) handle network changes\n\n @set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)\n def port_desc_stats_reply_handler(self, ev):\n pass\n\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def OF_packet_in_handler(self, event):\n msg = event.msg\n if msg.table_id != const.DHCP_TABLE:\n return\n\n pkt = ryu_packet.Packet(msg.data)\n is_pkt_ipv4 = pkt.get_protocol(ipv4.ipv4) is not None\n\n if is_pkt_ipv4:\n pkt_ip = pkt.get_protocol(ipv4.ipv4)\n else:\n LOG.error(_LE(\"No support for none IpV4 protocol\"))\n return\n\n if pkt_ip is None:\n LOG.error(_LE(\"Received None IP Packet\"))\n return\n\n port_tunnel_key = msg.match.get('metadata')\n if port_tunnel_key not in self.local_tunnel_to_pid_map:\n LOG.error(\n _LE(\"No lport found for tunnel_id %s for dhcp req\"),\n port_tunnel_key)\n return\n\n lport_id = self.local_tunnel_to_pid_map[port_tunnel_key]\n lport = self.db_store.get_port(lport_id)\n if lport is None:\n LOG.error(\n _LE(\"No lport found for tunnel_id %s for dhcp req\"),\n port_tunnel_key)\n return\n try:\n self._handle_dhcp_request(msg, pkt, lport)\n except Exception as exception:\n LOG.error(_LE(\n \"Unable to handle packet %(msg)s: %(e)s\")\n % {'msg': msg, 'e': exception}\n )\n\n def _handle_dhcp_request(self, msg, pkt, lport):\n packet = ryu_packet.Packet(data=msg.data)\n in_port = msg.match.get(\"in_port\")\n dhcp_packet = dhcp.dhcp.parser(packet[3])\n dhcp_message_type = self._get_dhcp_message_type_opt(dhcp_packet)\n send_packet = None\n if dhcp_message_type == DHCP_DISCOVER:\n #DHCP DISCOVER\n send_packet = self._create_dhcp_offer(\n pkt,\n dhcp_packet,\n lport)\n LOG.info(_LI(\"sending DHCP offer for port IP %(port_ip)s\"\n \" port id %(port_id)s\")\n % {'port_ip': lport.get_ip(), 'port_id': lport.get_id()})\n elif dhcp_message_type == DHCP_REQUEST:\n #DHCP REQUEST\n send_packet = self._create_dhcp_ack(\n pkt,\n dhcp_packet,\n lport)\n LOG.info(_LI(\"sending DHCP ACK for port IP %(port_ip)s\"\n \" port id %(tunnel_id)s\")\n % {'port_ip': lport.get_ip(),\n 'tunnel_id': lport.get_id()})\n else:\n LOG.error(_LE(\"DHCP message type %d not handled\"),\n dhcp_message_type)\n if send_packet:\n self._send_packet(self.dp, in_port, send_packet)\n\n def _create_dhcp_ack(self, pkt, dhcp_packet, lport):\n pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)\n pkt_ethernet = pkt.get_protocol(ethernet.ethernet)\n\n subnet = self._get_subnet_by_port(lport)\n if subnet is None:\n LOG.error(_LE(\"No subnet found for port <%s>\") %\n lport.get_id())\n return\n\n dns = self._get_dns_address_list_bin(subnet)\n dhcp_server_address = str(self._get_dhcp_server_address(subnet))\n gateway_address = self._get_port_gateway_address(subnet)\n netmask_bin = self._get_port_netmask(subnet).packed\n domain_name_bin = struct.pack('!256s', self.domain_name)\n lease_time_bin = struct.pack('!I', self.lease_time)\n option_list = [\n dhcp.option(dhcp.DHCP_MESSAGE_TYPE_OPT, b'\\x05', 1),\n dhcp.option(dhcp.DHCP_SUBNET_MASK_OPT, netmask_bin, 4),\n dhcp.option(dhcp.DHCP_GATEWAY_ADDR_OPT, gateway_address.packed, 4),\n dhcp.option(dhcp.DHCP_IP_ADDR_LEASE_TIME_OPT,\n lease_time_bin, 4),\n dhcp.option(dhcp.DHCP_DNS_SERVER_ADDR_OPT, dns, len(dns)),\n dhcp.option(DHCP_DOMAIN_NAME_OPT,\n domain_name_bin,\n len(self.domain_name))]\n\n if self.advertise_mtu:\n intreface_mtu = self._get_port_mtu(lport)\n mtu_bin = struct.pack('!H', intreface_mtu)\n option_list.append(dhcp.option(\n DHCP_INTERFACE_MTU_OPT,\n mtu_bin,\n len(mtu_bin)))\n options = dhcp.options(option_list=option_list)\n dhcp_offer_pkt = ryu_packet.Packet()\n dhcp_offer_pkt.add_protocol(ethernet.ethernet(\n ethertype=ether.ETH_TYPE_IP,\n dst=pkt_ethernet.src,\n src=pkt_ethernet.dst))\n dhcp_offer_pkt.add_protocol(ipv4.ipv4(dst=pkt_ipv4.src,\n src=dhcp_server_address,\n proto=pkt_ipv4.proto))\n dhcp_offer_pkt.add_protocol(udp.udp(src_port=67, dst_port=68))\n dhcp_offer_pkt.add_protocol(dhcp.dhcp(op=2, chaddr=pkt_ethernet.src,\n siaddr=dhcp_server_address,\n boot_file=dhcp_packet[0].boot_file,\n yiaddr=lport.get_ip(),\n xid=dhcp_packet[0].xid,\n options=options))\n return dhcp_offer_pkt\n\n def _create_dhcp_offer(self, pkt, dhcp_packet, lport):\n pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)\n pkt_ethernet = pkt.get_protocol(ethernet.ethernet)\n\n subnet = self._get_subnet_by_port(lport)\n if subnet is None:\n LOG.error(_LE(\"No subnet found for port <%s>\") %\n lport.get_id())\n return\n\n dns = self._get_dns_address_list_bin(subnet)\n dhcp_server_address = self._get_dhcp_server_address(subnet)\n netmask_bin = self._get_port_netmask(subnet).packed\n lease_time_bin = struct.pack('!I', self.lease_time)\n gateway_address = self._get_port_gateway_address(subnet)\n domain_name_bin = struct.pack('!256s', self.domain_name)\n\n option_list = [\n dhcp.option(dhcp.DHCP_MESSAGE_TYPE_OPT, b'\\x02', 1),\n dhcp.option(dhcp.DHCP_SUBNET_MASK_OPT, netmask_bin, 4),\n dhcp.option(dhcp.DHCP_DNS_SERVER_ADDR_OPT, dns, len(dns)),\n dhcp.option(dhcp.DHCP_IP_ADDR_LEASE_TIME_OPT,\n lease_time_bin, 4),\n dhcp.option(dhcp.DHCP_SERVER_IDENTIFIER_OPT,\n dhcp_server_address.packed, 4),\n dhcp.option(15, domain_name_bin, len(self.domain_name))]\n if gateway_address:\n option_list.append(dhcp.option(\n dhcp.DHCP_GATEWAY_ADDR_OPT,\n gateway_address.packed,\n 4))\n\n options = dhcp.options(option_list=option_list)\n dhcp_offer_pkt = ryu_packet.Packet()\n dhcp_offer_pkt.add_protocol(ethernet.ethernet(\n ethertype=ether.ETH_TYPE_IP,\n dst=pkt_ethernet.src,\n src=pkt_ethernet.dst))\n dhcp_offer_pkt.add_protocol(ipv4.ipv4(dst=pkt_ipv4.src,\n src=str(dhcp_server_address),\n proto=pkt_ipv4.proto))\n dhcp_offer_pkt.add_protocol(udp.udp(src_port=67, dst_port=68))\n dhcp_offer_pkt.add_protocol(dhcp.dhcp(op=2, chaddr=pkt_ethernet.src,\n siaddr=str(dhcp_server_address),\n boot_file=dhcp_packet[0].boot_file,\n yiaddr=lport.get_ip(),\n xid=dhcp_packet[0].xid,\n options=options))\n return dhcp_offer_pkt\n\n def _get_dns_address_list_bin(self, subnet):\n dns_servers = self.global_dns_list\n if len(subnet.get_dns_name_servers()) > 0:\n dns_servers = subnet.get_dns_name_servers()\n dns_bin = ''\n for address in dns_servers:\n dns_bin += addrconv.ipv4.text_to_bin(address)\n return dns_bin\n\n def _get_dhcp_message_type_opt(self, dhcp_packet):\n for opt in dhcp_packet[0].options.option_list:\n if opt.tag == dhcp.DHCP_MESSAGE_TYPE_OPT:\n return ord(opt.value)\n\n def _get_subnet_by_port(self, lport):\n l_switch_id = lport.get_lswitch_id()\n l_switch = self.db_store.get_lswitch(l_switch_id)\n subnets = l_switch.get_subnets()\n ip = netaddr.IPAddress(lport.get_ip())\n for subnet in subnets:\n if ip in netaddr.IPNetwork(subnet.get_cidr()):\n return subnet\n return None\n\n def _get_dhcp_server_address(self, subnet):\n return netaddr.IPAddress(subnet.get_dhcp_server_address())\n\n def _get_port_gateway_address(self, subnet):\n return netaddr.IPAddress(subnet.get_gateway_ip())\n\n def _get_port_netmask(self, subnet):\n return netaddr.IPNetwork(subnet.get_cidr()).netmask\n\n def _is_dhcp_enabled_on_network(self, lport, net_id):\n subnet = self._get_subnet_by_port(lport)\n if subnet:\n return subnet.enable_dhcp()\n LOG.warning(_LW(\"No subnet found for port <%s>\") %\n lport.get_id())\n return True\n\n def _get_port_mtu(self, lport):\n #TODO(gampel) Get mtu from network object onec we add support\n return self.default_interface_mtu\n\n def remove_local_port(self, lport):\n\n tunnel_key = lport.get_tunnel_key()\n if tunnel_key in self.local_tunnel_to_pid_map:\n self.local_tunnel_to_pid_map.pop(tunnel_key, None)\n # Remove ingress classifier for port\n ofport = lport.get_external_value('ofport')\n parser = self.dp.ofproto_parser\n ofproto = self.dp.ofproto\n match = parser.OFPMatch()\n match.set_in_port(ofport)\n\n msg = parser.OFPFlowMod(\n datapath=self.dp,\n cookie=0,\n cookie_mask=0,\n table_id=const.DHCP_TABLE,\n command=ofproto.OFPFC_DELETE,\n priority=const.PRIORITY_MEDIUM,\n out_port=ofproto.OFPP_ANY,\n out_group=ofproto.OFPG_ANY,\n match=match)\n self.dp.send_msg(msg)\n\n def _is_port_a_vm(self, lport):\n owner = lport.get_device_owner()\n if not owner or \"compute\" in owner:\n return True\n return False\n\n def add_local_port(self, lport):\n network_id = lport.get_external_value('local_network_id')\n if self.dp is None:\n return\n\n lport_id = lport.get_id()\n tunnel_key = lport.get_tunnel_key()\n self.local_tunnel_to_pid_map[tunnel_key] = lport_id\n\n if not self._is_dhcp_enabled_on_network(lport, network_id):\n return\n\n if not self._is_port_a_vm(lport):\n return\n\n LOG.info(_LI(\"Regiter VM as DHCP client::port <%s>\") % lport.get_id())\n\n ofport = lport.get_external_value('ofport')\n parser = self.dp.ofproto_parser\n ofproto = self.dp.ofproto\n match = parser.OFPMatch()\n match.set_in_port(ofport)\n actions = []\n actions.append(parser.OFPActionSetField(metadata=tunnel_key))\n actions.append(parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,\n ofproto.OFPCML_NO_BUFFER))\n inst = [self.dp.ofproto_parser.OFPInstructionActions(\n ofproto.OFPIT_APPLY_ACTIONS, actions)]\n\n self.mod_flow(\n self.dp,\n inst=inst,\n table_id=const.DHCP_TABLE,\n priority=const.PRIORITY_MEDIUM,\n match=match)\n\n def _install_dhcp_match_flow(self):\n parser = self.dp.ofproto_parser\n\n match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP,\n eth_dst='ff:ff:ff:ff:ff:ff',\n ip_proto=17,\n udp_src=68,\n udp_dst=67)\n\n self.add_flow_go_to_table(self.dp,\n const.SERVICES_CLASSIFICATION_TABLE,\n const.PRIORITY_MEDIUM,\n const.DHCP_TABLE, match=match)\n\n def _install_flows_on_switch_up(self):\n self._install_dhcp_match_flow()\n self.add_flow_go_to_table(self.dp,\n const.DHCP_TABLE,\n const.PRIORITY_DEFAULT,\n const.L2_LOOKUP_TABLE)\n\n for port in self.db_store.get_ports():\n if port.get_external_value('is_local'):\n self.add_local_port(port)\n","sub_path":"dragonflow/controller/dhcp_app.py","file_name":"dhcp_app.py","file_ext":"py","file_size_in_byte":15955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"439144745","text":"import sys\n\ndef main():\n\n emails = []\n\n for line in sys.stdin:\n line = line.strip().split()\n\n for item in line:\n if \"@\" in item:\n emails.append(item)\n\n names = []\n\n for email in emails:\n name = email.split(\"@\")\n names.append(name[0])\n\n for person in names:\n full_name = [name.capitalize() for name in person.split(\".\")]\n print(\" \".join(full_name))\n\nif __name__ == \"__main__\":\n main()","sub_path":"week-10/q2_101.py","file_name":"q2_101.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"226484070","text":"\"\"\"\nNAN.ai after sales chatbot\nThis transforms a user utterance into a machine understandable units\ni.e. a predefined semantic frame as a precursor to intent identification\nThis model is integrated to the NAN.ai ecosystem via web service\n\nThis script implements pre-processing\n\"\"\"\n\nimport spacy\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import word_tokenize\n\nps = PorterStemmer()\n\nnlp = spacy.load(\"en_core_web_sm\")\n\n# Removes custom stopwords in English and Filipino\ndef remove_stopwords(stoplist_tl, line):\n \n # Add to default stoplist\n nlp.Defaults.stop_words |= stoplist_tl\n\n line = line.lower()\n line = \" \".join(token.lemma_ for token in nlp(line) if not token.is_stop)\n line = \" \".join(token.lemma_ for token in nlp(line) if not token.is_punct and not token.is_digit)\n\n return line\n\ndef stemming(line):\n\n tokenized = word_tokenize(line)\n stemmed = []\n\n for t in tokenized:\n stemmed.append(ps.stem(t))\n print(t, \" : \", ps.stem(t))\n\n return stemmed\n\n\ndef encoding_doc(token, words):\n return(token.texts_to_sequences(words))\n\n\ndef padding_doc(encoded_doc, max_length):\n return(pad_sequences(encoded_doc, maxlen = max_length, padding = \"post\"))","sub_path":"nlu tasks/cleaning/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"127207946","text":"n=int(input())\na=input().split()\nl=[]\nfor i in range(0,len(a)):\n for j in range(i+1,len(a)):\n if(a[i]==a[j]):\n l.append(a[j])\nif(len(l)==0):\n print(\"unique\")\nelse:\n print(l[0])\n","sub_path":"repeated.py","file_name":"repeated.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"274082215","text":"import numpy as np\nfrom scipy import constants as ct\n\nfrom AAKwrapper import AAKwrapper as cpu_AAK\nfrom gpuAAK import GPUAAK\nfrom pygpuAAK import pygpuAAK\n\nimport time\n\nfit_pars = {\n 0.5:{\n 'alpha': 0.133,\n 'beta': 243,\n 'kappa': 482,\n 'gamma': 917,\n 'f_k': 2.58e-3\n },\n 1:{\n 'alpha': 0.171,\n 'beta': 292,\n 'kappa': 1020,\n 'gamma': 1680,\n 'f_k': 2.15e-3\n },\n 2:{\n 'alpha': 0.165,\n 'beta': 299,\n 'kappa': 611,\n 'gamma': 1340,\n 'f_k': 1.73e-3\n },\n 4:{\n 'alpha': 0.138,\n 'beta': -221,\n 'kappa': 521,\n 'gamma': 1680,\n 'f_k': 1.13e-3\n }\n}\n\ndef P_OMS(f):\n return (1.5e-11)**2*(1 + (2e-3/f)**4)\n\ndef P_acc(f):\n return (3e-15)**2*(1 + (0.4e-3/f)**2)*(1+(f/8e-3)**4)\n\ndef S_c(f, dur=4):\n if dur not in [0.5, 1, 2, 4]:\n raise ValueError(\"dur needs to be 0.5, 1, 2, or 4 years.\")\n\n alpha = fit_pars[dur]['alpha']\n beta = fit_pars[dur]['beta']\n kappa = fit_pars[dur]['kappa']\n gamma = fit_pars[dur]['gamma']\n f_k = fit_pars[dur]['f_k']\n A = 9e-45\n return A* f**(-7/3)*np.exp(-f**alpha + beta*f*np.sin(kappa*f))*(1+np.tanh(gamma*(f_k - f)))\n\n\ndef LISA_Noise(f, L=2.5e9, f_star=19.09e-3, dur=4):\n S_n=20./(3.*L**2)*(P_OMS(f) + 4*P_acc(f)/(2*np.pi*f)**4)*(1+6/10*(f/f_star)**2)+S_c(f, dur=4)\n return S_n;\n\n\ndef test():\n iota = 0.2\n s = 0.8\n p = 8.0\n e = 0.7\n T_fit = 1.0\n init_length = 315576\n length = 3155760\n init_dt = 100.0\n dt = 10.0\n M = 1e6\n mu = 1e1\n gamma = 0.0\n psi = 0.0\n alph = 0.0\n theta_S = 0.785\n phi_S = 0.785\n theta_K = 1.05\n phi_K = 1.05\n D = 1.0\n LISA = True\n backint = True\n\n pars = {'backint': True,\n 'LISA': True,\n 'length': length,\n 'dt': dt,\n 'p': p,\n 'T': 1.,\n 'f': 2.e-3,\n 'T_fit': T_fit,\n 'mu': mu,\n 'M': M,\n 's': s,\n 'e': e,\n 'iota': iota,\n 'gamma': gamma,\n 'psi': psi,\n 'theta_S': theta_S,\n 'phi_S': phi_S,\n 'theta_K': theta_K,\n 'phi_K': phi_K,\n 'alpha': alph,\n 'D': D}\n\n print('Running cpu waveform.')\n st = time.perf_counter()\n tc, hIc, hIIc, timing = cpu_AAK.wave(pars)\n et = time.perf_counter()\n print('CPU Waveform complete: {} seconds'.format(et - st))\n\n hI_f, hII_f = np.fft.rfft(hIc), np.fft.rfft(hIIc)\n data_stream = {'channel1': hI_f, 'channel2': hII_f}\n\n freqs = np.fft.rfftfreq(len(hIc), d=dt)\n freqs[0] = 1e-8\n deltaF = 1/dt\n ASD = np.sqrt(LISA_Noise(freqs, dur=4))\n noise_channels = {'channel1': ASD**2, 'channel2': ASD**2}\n\n kwargs = {\n 'T_fit': 1.0,\n 'LISA': True,\n 'backint': True\n }\n\n like_class = pygpuAAK.pyGPUAAK(data_stream, noise_channels, length, dt, init_dt, **kwargs)\n likelihood = like_class.NLL(iota, s, p, e, M, mu, gamma, psi, alph, theta_S,\n phi_S, theta_K, phi_K, D)\n num = 10\n st = time.perf_counter()\n for i in range(num):\n likelihood = like_class.NLL(iota, s, p, e, M, mu, gamma, psi, alph, theta_S,\n phi_S, theta_K, phi_K, D)\n et = time.perf_counter()\n print('{} likelihood calculations with {} time points.'.format(num, length))\n print('Time per likelihood calculation:', (et - st)/num)\n\n snr = like_class.NLL(iota, s, p, e, M, mu, gamma, psi, alph, theta_S,\n phi_S, theta_K, phi_K, D, return_snr=True)\n\n print('SNR: ', snr, 'Likelihood:', likelihood)\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"examples/pygpuAAKdemo.py","file_name":"pygpuAAKdemo.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"41271153","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Stoichiometric matrix\nV = np.array([[-1.0, 1.0, 0.0],[-1.0, 1.0, 1.0],[1.0, -1.0, -1.0],[0.0, 0.0, 1.0]])\n\n# Parameters and Initial Conditions\nnA = 6.023e23 # Avagadro's number\nvol = 1e-15 # volume of system\nX = np.zeros((4,))\nc = np.zeros((3,))\nd = np.zeros_like(c)\na = np.zeros_like(c)\nX[0] = round(5e-7 * nA * vol) # molecules of substrate\nX[1] = round(2e-7 * nA * vol) # molecules of enzyme \nc[0] = 1.0e6 / (nA * vol)\nc[1] = 1.0e-4\nc[2] = 0.1\n\nt = 0.0\ntfinal = 50.0\n\ncount = 1\ntvals = [0.0]\nXvals = [list(X)]\n\nwhile t < tfinal:\n a[0] = c[0] * X[0] * X[1]\n a[1] = c[1] * X[2]\n a[2] = c[2] * X[2]\n asum = np.sum(a)\n j = np.argmax(np.random.rand() < np.cumsum(a / asum))\n tau = np.log(1.0 / np.random.rand()) / asum\n X += V[:, j]\n t += tau\n count += 1\n tvals.append(t)\n Xvals.append(list(X))\n\nL = len(tvals)\ntnew = np.zeros(2*L-1)\ntnew[1:-1:2] = tvals[1:]\ntnew[2::2] = tvals[1:]\ntnew[0] = tvals[0]\n\nSvals = np.array(Xvals)[:, 0]\nynew = np.zeros(2*L-1)\nynew[::2] = Svals\nynew[1:-1:2] = Svals[:-1]\nplt.plot(tnew, ynew, 'go-', label = \"Substrate\")\n\nPvals = np.array(Xvals)[:, 3]\nynew = np.zeros(2*L-1)\nynew[::2] = Pvals\nynew[1:-1:2] = Pvals[:-1]\nplt.plot(tnew, ynew, 'r*-', label = \"Product\")\nplt.xlabel(\"Time\", size=14)\nplt.ylabel(\"Molecules\", size=14)\nplt.xlim((0.0, 55.0))\nplt.ylim((0.0, 310.0))\nplt.legend(fontsize=14)\nplt.show()\n","sub_path":"ssa_plot.py","file_name":"ssa_plot.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"237574497","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 4 19:20:50 2020\r\n\r\n@author: Vijay Khot\r\n\"\"\"\r\n# corona virus Analysis\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nurl='https://www.worldometers.info/coronavirus/'\r\n\r\n\r\n#request to the website worldometers\r\nr=requests.get(url)\r\n#print(r)\r\n\r\n#parsing html file to beutifulsoup\r\nhtml=r.text\r\nsoup=BeautifulSoup(html,'html.parser')\r\n\r\n#Extract Basic Data\r\nprint(soup.title.text)\r\nprint()\r\nlive_data=soup.find_all('div',id='maincounter-wrap')\r\n#print(live_data)\r\nfor i in live_data:\r\n print(i.text)\r\n \r\n\r\nprint(\"Analysis based on individual Countries\")\r\n\r\n#Extracting table data\r\ntable_body=soup.find('tbody')\r\ntable_rows=table_body.find_all('tr')\r\n\r\ncountries=[]\r\ncases=[]\r\ntodays=[]\r\ndeaths=[]\r\nrecovered=[]\r\n\r\nfor tr in table_rows:\r\n td = tr.find_all('td')\r\n countries.append(td[0].text)\r\n cases.append(td[1].text)\r\n todays.append(td[2].text)\r\n deaths.append(td[3].text)\r\n recovered.append(td[5].text)\r\n \r\n#print(recovered) \r\nindices=[i for i in range(1,len(countries)+1)] \r\nheaders=['Countries/othr','Total Cases','Todays Cases','Deaths','Recovered'] \r\ndf=pd.DataFrame(list(zip(countries,cases,todays,deaths,recovered)),columns=headers,index=indices)\r\ndf.to_csv('corona-analysisww.csv')\r\nprint(df) \r\n\r\n#ploting the bar graph\r\ny_pos=[i for i in range(1,len(countries)+1)]\r\nplt.bar(y_pos,cases[::-1],align='center',alpha=0.1)\r\nplt.xticks(y_pos,countries,rotation=90)\r\nplt.ylabel('Total Cases')\r\nplt.title('Persons affected by corona virus')\r\nplt.savefig('corona-analysisww.png',dpi=600)\r\nplt.show() \r\n \r\n\r\n","sub_path":"corona_virus.py","file_name":"corona_virus.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"358977859","text":"#! /usr/bin/env python\n\n\nimport scapy.all as scapy\nimport optparse\n\n\ndef get_options():\n parser = optparse.OptionParser()\n parser.add_option(\"-t\", dest=\"target\", help=\"Enter target ip/range\")\n return parser.parse_args()[0]\n\n\ndef scan(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_request_broadcast = broadcast / arp_request\n\n answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]\n\n clients_list = []\n for element in answered_list:\n client_dict = {\"ip\": element[1].psrc, \"mac\": element[1].hwsrc}\n clients_list.append(client_dict)\n return clients_list\n\n\ndef display_result(result_list):\n print(\"ip\\t\\t\\tMAC\\n...............................................\")\n for client in result_list:\n print(client[\"ip\"] + \"\\t\\t\" + client[\"mac\"])\n\n\noptions = get_options()\nscan_results = scan(options.target)\ndisplay_result(scan_results)","sub_path":"net_scanner.py","file_name":"net_scanner.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"543697189","text":"#Autor: Maria Fernanda Torres Velazquez A01746537\n#El programa lee 2 colores primarios e imprime el color resultante\n\n#La funcion recibe 2 colores, los compara y regresa el color resultante de su mezcla\ndef mezclarColores(c1,c2):\n if (c1==\"rojo\" and c2==\"azul\") or (c1==\"azul\" and c2==\"rojo\"):\n m=(\"MORADO\")\n\n elif (c1==\"azul\" and c2==\"amarillo\") or (c1==\"amarillo\" and c2==\"azul\"):\n m=(\"VERDE\")\n\n elif (c1==\"amarillo\" and c2==\"rojo\") or (c1==\"rojo\" and c2==\"amarillo\"):\n m= (\"NARANJA\")\n\n elif (c1==c2):\n m= (\"EL COLOR SIGUE SIENDO EL MISMO\")\n\n else:\n m=\"ERROR, LOS COLORES NO SON PRIMARIOS\"\n\n return m\n#Funcion principal\ndef main():\n print (\"BIENVENIDO AL MEZCLADOR DE COLORES PRIMARIOS\")\n c1=(str(input(\"Inroduce el color 1:\")))\n c2=(str(input(\"Inroduce el color 2:\")))\n c1=c1.lower()\n c2=c2.lower()\n\n mezcla= mezclarColores(c1,c2)\n print (\"-------------------------------\")\n print (\"EL COLOR RESULTANTE ES: \",mezcla)\n\nmain()\n","sub_path":"Mezcla_Colores.py","file_name":"Mezcla_Colores.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"517522095","text":"import json\nimport re\nimport typing as t\nfrom enum import Enum\nfrom pathlib import Path\n\n\nclass BookAssetType(Enum):\n EPUB = 'epub'\n MOBI = 'mobi'\n TXT = 'txt'\n COVER = 'cover'\n\n\nDEFAULT_BOOK_INTRO_LENGTH = 5000\n\nBOOK_ASSETS_FILES_TYPES: t.Dict[BookAssetType, t.Pattern] = {\n BookAssetType.EPUB: re.compile('^pg\\d+-images\\.epub$'),\n BookAssetType.COVER: re.compile('^pg\\d+\\.cover\\.medium\\.jpg$'),\n BookAssetType.MOBI: re.compile('^pg\\d+-images\\.mobi$'),\n BookAssetType.TXT: re.compile('^pg\\d+\\.txt\\.utf8$'),\n}\n\n\nclass BookAsset(t.NamedTuple):\n size: int\n type: BookAssetType\n path: Path\n\n\nclass BookProcessingResult(t.NamedTuple):\n book_id: str\n rdf_file_content: str\n assets: t.Dict[BookAssetType, BookAsset]\n intro: t.Union[str, None]\n\n def assets_as_json(self, books_root_path: str) -> str:\n assets_as_simple_structures = {\n type.value: {'path': str(data.path.relative_to(books_root_path)), 'size': data.size}\n for (type, data) in self.assets.items()\n }\n return json.dumps(assets_as_simple_structures)\n\n\ndef process_book_from_pg_rdf_file(pg_book_id: str, rdf_file: Path) -> t.Optional[BookProcessingResult]:\n rdf_file_dir = rdf_file.parent\n assets = get_pg_book_assets(rdf_file_dir)\n\n if BookAssetType.EPUB not in assets:\n # Allowing people to download books in epub format is our top priority,\n # so we don't handle PG items that don't have an epub file\n return None\n\n rdf_file_content = rdf_file.read_text(encoding='utf-8')\n\n book_intro: t.Optional[str] = None if BookAssetType.TXT not in assets else get_book_intro(\n assets[BookAssetType.TXT].path)\n\n result = BookProcessingResult(\n book_id=pg_book_id, rdf_file_content=rdf_file_content,\n assets=assets, intro=book_intro\n )\n\n return result\n\n\ndef get_book_intro(book_as_txt: Path, intro_length: int = DEFAULT_BOOK_INTRO_LENGTH) -> str:\n with open(book_as_txt, 'r', encoding='utf-8') as f:\n return f.read(intro_length)\n\n\ndef get_pg_book_assets(rdf_file_dir: Path) -> t.Dict[BookAssetType, BookAsset]:\n assets = {}\n for sibling in rdf_file_dir.iterdir():\n for asset_type, pattern in BOOK_ASSETS_FILES_TYPES.items():\n if pattern.fullmatch(sibling.name):\n book_asset = BookAsset(type=asset_type, size=sibling.stat().st_size, path=sibling.resolve())\n assets[asset_type] = book_asset\n return assets\n","sub_path":"server/pg-rdfs-indexing/src/pg_import/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"63143501","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nwizard.py\n\n:copyright: (c) 2014 by Alexander Skiba \n:licence: MIT\n:bugreports: https://github.com/GhostLyrics/sms2dxf\n\nUnit tests for sms2dxf project, section WIZARD\n\n\"\"\"\nimport unittest\nimport mock\nimport sms2dxf.modules.wizard as wizard\n\n\nclass Wizard(unittest.TestCase):\n \"\"\"Tests for handling the input the wizard receives.\"\"\"\n\n def test_default_delimiter(self):\n with mock.patch('__builtin__.raw_input', return_value=\"\"):\n result = wizard.ask_delimiter()\n self.assertIsNone(result)\n\n def test_nondefault_delimiter(self):\n given_delimiter = \",\"\n with mock.patch('__builtin__.raw_input', return_value=given_delimiter):\n result = wizard.ask_delimiter()\n self.assertEqual(result, given_delimiter)\n","sub_path":"sms2dxf/unit tests/test_wizard.py","file_name":"test_wizard.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"508479815","text":"import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\nimport os\nfrom stable_baselines.common.policies import MlpPolicy\n\ninference = True\n# Enjoy trained agent\nnum_of_paths = 1\nmax_ep_steps = 10000\nalgorithm = \"SAC\" # PPO2, SAC\nmodel_save_name = \"sac_ekf_model_3\" #\"ppo2_ekf_0\", \"sac_ekf_model_2\"\nenv_name = 'Ex3_EKF_gyro-v0' # 'Ex3_EKF_gyro-v0', 'Pendulum-v0','Ex3_pureEKF_gyro'\n\nif algorithm == \"PPO2\":\n from stable_baselines.common import make_vec_env\n from stable_baselines import PPO2\n model = PPO2.load(model_save_name)\n env = make_vec_env(env_name)\nelif algorithm == \"SAC\":\n from stable_baselines import SAC\n model = SAC.load(model_save_name)\n env = gym.make(env_name)\nelif algorithm == \"DDPG\":\n from stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec\n from stable_baselines import DDPG\n model = DDPG.load(model_save_name)\n env = gym.make(env_name)\n\nif inference:\n save_figs = False\n LOG_PATH = \"./logs\"\n fig_file_type = \"pdf\"\n roll_out_paths = {}\n roll_out_paths = {\n \"s\": [],\n \"r\": [],\n \"s_\": [],\n \"state_of_interest\": [],\n \"reference\": [],\n \"episode_length\": [],\n \"return\": [],\n \"death_rate\": 0.0,\n }\n for i in range(num_of_paths):\n\n # Path storage buckets\n episode_path = {\n \"s\": [],\n \"r\": [],\n \"s_\": [],\n \"state_of_interest\": [],\n \"reference\": [],\n }\n # while not dones[0]:\n s = env.reset()\n for j in range(max_ep_steps):\n action, _states = model.predict(s)\n s_, rewards, dones, infos = env.step(action)\n\n # Store observations\n episode_path[\"s\"].append(s)\n episode_path[\"r\"].append(rewards)\n episode_path[\"s_\"].append(s_)\n if algorithm == \"PPO2\":\n info = infos[0]\n elif algorithm == \"SAC\":\n info = infos\n if \"state_of_interest\" in info.keys():\n episode_path[\"state_of_interest\"].append(\n np.array([info[\"state_of_interest\"]])\n )\n if \"reference\" in info.keys():\n episode_path[\"reference\"].append(np.array(info[\"reference\"]))\n\n # Terminate if max step has been reached\n if algorithm == \"PPO2\":\n done = dones[0]\n if algorithm == \"SAC\":\n done = dones\n\n if j == (max_ep_steps-1):\n done = True\n s = s_\n\n # Check if episode is done and break loop\n if done:\n break\n\n # Append paths to paths list\n roll_out_paths[\"s\"].append(episode_path[\"s\"])\n roll_out_paths[\"r\"].append(episode_path[\"r\"])\n roll_out_paths[\"s_\"].append(episode_path[\"s_\"])\n roll_out_paths[\"state_of_interest\"].append(\n episode_path[\"state_of_interest\"]\n )\n roll_out_paths[\"reference\"].append(episode_path[\"reference\"])\n roll_out_paths[\"episode_length\"].append(len(episode_path[\"s\"]))\n roll_out_paths[\"return\"].append(np.sum(episode_path[\"r\"]))\n\n # Calculate roll_out death rate\n roll_out_paths[\"death_rate\"] = sum(\n [\n episode <= (max_ep_steps - 1)\n for episode in roll_out_paths[\"episode_length\"]\n ]) / len(roll_out_paths[\"episode_length\"])\n\n mean_return = np.mean(roll_out_paths[\"return\"])\n print('mean_return: ',mean_return)\n mean_episode_length = np.mean(\n roll_out_paths[\"episode_length\"]\n )\n print('mean_episode_length: ',mean_episode_length)\n death_rate = roll_out_paths[\"death_rate\"]\n print('death_rate: ',death_rate)\n\n print(\"Plotting states of reference...\")\n print(\"Plotting mean path and standard deviation...\")\n\n # Calculate mean path of reference and state_of_interest\n soi_trimmed = [\n path\n for path in roll_out_paths[\"state_of_interest\"]\n if len(path) == max(roll_out_paths[\"episode_length\"])\n ] # Needed because unequal paths # FIXME: CLEANUP\n ref_trimmed = [\n path\n for path in roll_out_paths[\"reference\"]\n if len(path) == max(roll_out_paths[\"episode_length\"])\n ] # Needed because unequal paths # FIXME: CLEANUP\n soi_mean_path = np.transpose(\n np.squeeze(np.mean(np.array(soi_trimmed), axis=0))\n )\n soi_std_path = np.transpose(\n np.squeeze(np.std(np.array(soi_trimmed), axis=0))\n )\n ref_mean_path = np.transpose(\n np.squeeze(np.mean(np.array(ref_trimmed), axis=0))\n )\n ref_std_path = np.transpose(\n np.squeeze(np.std(np.array(ref_trimmed), axis=0))\n )\n\n # Make sure arrays are right dimension\n soi_mean_path = (\n np.expand_dims(soi_mean_path, axis=0)\n if len(soi_mean_path.shape) == 1\n else soi_mean_path\n )\n soi_std_path = (\n np.expand_dims(soi_std_path, axis=0)\n if len(soi_std_path.shape) == 1\n else soi_std_path\n )\n ref_mean_path = (\n np.expand_dims(ref_mean_path, axis=0)\n if len(ref_mean_path.shape) == 1\n else ref_mean_path\n )\n ref_std_path = (\n np.expand_dims(ref_std_path, axis=0)\n if len(ref_std_path.shape) == 1\n else ref_std_path\n )\n\n\n # Plot mean path of reference and state_of_interest\n fig_1 = plt.figure(\n figsize=(9, 6), num=f\"state-q-ppo2\"\n )\n ax = fig_1.add_subplot(111)\n colors = \"bgrcmk\"\n cycol = cycle(colors)\n for i in range(0, min(soi_mean_path.shape[0], ref_mean_path.shape[0])):\n color1 = next(cycol)\n color2 = color1\n t = [i / 100.0 for i in range(0, max(roll_out_paths[\"episode_length\"]))]\n if i <= (len(soi_mean_path) - 1):\n ax.plot(\n t,\n soi_mean_path[i],\n color=color1,\n linestyle=\"dashed\",\n # label=f\"state_of_interest_{i+1}_mean\",\n )\n ax.fill_between(\n t,\n soi_mean_path[i] - soi_std_path[i],\n soi_mean_path[i] + soi_std_path[i],\n color=color1,\n alpha=0.3,\n # label=f\"state_of_interest_{i+1}_std\",\n )\n path = np.concatenate(\n [np.transpose(ref_mean_path), np.transpose(soi_mean_path), np.transpose(soi_std_path)], 1)\n # np.savetxt('inferenceResult-52.csv', path, delimiter=',')\n if i <= (len(ref_mean_path) - 1):\n ax.plot(\n t,\n ref_mean_path[i],\n color=color2,\n # label=f\"reference_{i+1}\",\n )\n if i <= (len(ref_mean_path) - 1):\n # ax.plot(\n # t,\n # ref_mean_path[i+4],\n # color=color2,\n # linestyle=\"dotted\",\n # # label=f\"reference_{i+1}\",\n # )\n plt.ylabel(\"Quaternion\", fontsize=20)\n plt.xlabel(\"Time(s)\", fontsize=20)\n plt.xticks(fontsize=20)\n plt.yticks(fontsize=20)\n plt.gcf().subplots_adjust(bottom=0.15, left=0.15)\n # ax.fill_between(\n # t,\n # ref_mean_path[i] - ref_std_path[i],\n # ref_mean_path[i] + ref_std_path[i],\n # color=color2,\n # alpha=0.3,\n # label=f\"reference_{i+1}_std\",\n # ) # FIXME: remove\n ax.set_rasterized(True)\n\n # Also plot mean and std of the observations\n print(\"Plotting observations...\")\n print(\"Plotting mean path and standard deviation...\")\n\n\n # Create figure\n fig_2 = plt.figure(\n figsize=(9, 6), num=\"observation-ppo2\"\n )\n colors = \"bgrcmk\"\n cycol = cycle(colors)\n ax2 = fig_2.add_subplot(111)\n\n # Calculate mean observation path and std\n obs_trimmed = [\n path\n for path in roll_out_paths[\"s\"]\n if len(path) == max(roll_out_paths[\"episode_length\"])\n ]\n obs_mean_path = np.transpose(\n np.squeeze(np.mean(np.array(obs_trimmed), axis=0))\n )\n obs_std_path = np.transpose(\n np.squeeze(np.std(np.array(obs_trimmed), axis=0))\n )\n t = range(max(roll_out_paths[\"episode_length\"]))\n\n # Plot state paths and std\n for i in range(0, obs_mean_path.shape[0]):\n color = next(cycol)\n ax2.plot(\n t,\n obs_mean_path[i],\n color=color,\n linestyle=\"dashed\",\n label=(f\"s_{i + 1}\"),\n )\n ax2.fill_between(\n t,\n obs_mean_path[i] - obs_std_path[i],\n obs_mean_path[i] + obs_std_path[i],\n color=color,\n alpha=0.3,\n label=(f\"s_{i + 1}_std\"),\n )\n ax2.set_title(\"Observations\")\n handles2, labels2 = ax2.get_legend_handles_labels()\n ax2.legend(handles2, labels2, loc=2, fancybox=False, shadow=False)\n\n # Plot mean cost and std\n # Create figure\n fig_3 = plt.figure(\n figsize=(9, 6), num=\"return-ppo2\"\n )\n ax3 = fig_3.add_subplot(111)\n\n # Calculate mean observation path and std\n cost_trimmed = [\n path\n for path in roll_out_paths[\"r\"]\n if len(path) == max(roll_out_paths[\"episode_length\"])\n ]\n cost_mean_path = np.transpose(\n np.squeeze(np.mean(np.array(cost_trimmed), axis=0))\n )\n cost_std_path = np.transpose(\n np.squeeze(np.std(np.array(cost_trimmed), axis=0))\n )\n t = range(max(roll_out_paths[\"episode_length\"]))\n\n # Plot state paths and std\n ax3.plot(\n t, cost_mean_path, color=\"g\", linestyle=\"dashed\", label=(\"mean cost\"),\n )\n ax3.fill_between(\n t,\n cost_mean_path - cost_std_path,\n cost_mean_path + cost_std_path,\n color=\"g\",\n alpha=0.3,\n label=(\"mean cost std\"),\n )\n ax3.set_title(\"Mean cost\")\n handles3, labels3 = ax3.get_legend_handles_labels()\n ax3.legend(handles3, labels3, loc=2, fancybox=False, shadow=False)\n\n # Show figures\n plt.show()\n\n # Save figures to pdf if requested\n if save_figs:\n fig_1.savefig(\n os.path.join(LOG_PATH, \"Quatonian.\" + fig_file_type),\n bbox_inches=\"tight\",\n )\n fig_2.savefig(\n os.path.join(LOG_PATH, \"State.\" + fig_file_type),\n bbox_inches=\"tight\",\n )\n fig_3.savefig(\n os.path.join(LOG_PATH, \"Cost.\" + fig_file_type),\n bbox_inches=\"tight\",\n )\n\n\n# for i in range(num_of_paths):\n# episode_path = {\n# \"s\": [],\n# \"r\": [],\n# \"s_\": [],\n# \"state_of_interest\": [],\n# \"reference\": [],\n# }\n# s = env.reset()\n#\n# print(max_ep_steps)\n# for j in range(max_ep_steps):\n# action, _states = model.predict(s)\n# s_, rewards, dones, infos = env.step(action)\n# # Store observations\n# episode_path[\"s\"].append(s)\n# episode_path[\"r\"].append(rewards)\n# episode_path[\"s_\"].append(s_)\n# info = infos[0]\n#\n#\n# # Terminate if max step has been reached\n# if j == (max_ep_steps - 1):\n# dones[0] = True\n# s = s_\n#\n# # Check if episode is done and break loop\n# # if dones[0]:\n# # break\n#\n# print(j)\n# print(\"max_ep_steps: \", max_ep_steps)\n\n\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":11386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"351915505","text":"# Copyright (c) 2016, Kenneth P. J. Dyer\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms,\n# with or without modification, are permitted provided\n# that the following conditions are met:\n#\n# * Redistributions of source code must retain the\n# above copyright notice, this list of conditions\n# and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the\n# above copyright notice, this list of conditions\n# and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of pyspell nor the names of its\n# contributors may be used to endorse or promote\n# products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS\n# AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT\n# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\n# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# Module Imports\nimport sys\nimport subprocess\nimport cowtermcolor\n\n# Controller Class\nclass PySpell():\n \"\"\"Main Process for the PySpell Application\n\n This application provides a wrapper with some minor additional functionality to\n the Linux utility spell, which itself is a wrapper for the GNU Aspell list \n functionality. If you call spell on a file, it returns a list of mispelled \n words in the file. If you call spell on a dozen files, it returns a list of\n all mispelled words found collectively.\n\n PySpell iterates over the source filles, calling spell on each via subprocess,\n it then removes any entries matching a user-defined ignore list and formats\n the results to list each file read and the errors found in that file.\n\n It provides no functionality for correcting these errors or updating the\n ignore list. The purpose of PySpell is to report on spelling errors in\n large documentation projects. That is, to generate and update a list\n of misspellings for the technical writers to work through in making\n corrections.\n \"\"\"\n\n # Initialize Class\n def __init__(self, args):\n \"\"\" Initialize the class and begin the main process\n\n Receive the argparse object from the command-line script.\n \"\"\"\n\n # Masthead\n version = \"0.1\"\n name = \"PySpell - The Project Level Spellchecker\"\n\n if args.verbose or args.version:\n content = [ name,\n \"Kenneth P. J. Dyer\",\n \"Avocet Editorial Consultants\",\n \"kenneth@avoceteditors.com\",\n \"Version %s\" % version, ' ' ]\n masthead = '\\n '.join(content)\n else:\n content = [name, \"Version %s\" % version]\n masthead = ' - '.join(content)\n print(masthead)\n\n if args.version:\n sys.exit(0)\n\n # Run Spell for Base\n data = self.run_spell(args.source)\n\n # Load Ignorelist\n if args.ignore is not None:\n ignorelist = self.build_ignore(args.ignore)\n else:\n ignorelist = ['']\n data = self.clear_ignores(data, ignorelist)\n\n # Print to Stdout\n self.report(data)\n\n # Exit\n sys.exit(0)\n\n # Run Basic Check\n def run_spell(self, paths):\n \"\"\" Check the given files for spelling errors\n\n Method iterates over each file given from the command-line.\n It executes the spell application with subprocess, then checks\n the output from stdout into the data dict.\n\n Returns the data dict.\n \"\"\"\n\n # Initialize Data Dict\n data = {}\n\n # Check Input for Misspellings\n for i in paths:\n\n command = ['spell', i]\n errors = subprocess.check_output(command)\n\n # Store, Decode and Split Output\n data[i] = errors.decode().split('\\n')\n\n return data\n\n\n # Build Ignorelist\n def build_ignore(self, path):\n \"\"\" Generate the Ignorelist\n\n The ignorelist is an arbitrary list of words that PySpell should\n ignore. This is not the same as words that you want to add to the\n main dictionary. For instanc, a documentation file might include\n code sampls, which contain a number of variable names like targetBA.\n This is something you might ignore in one project rather than add to\n the main dictionary.\n \"\"\"\n\n # Open Ignorelist\n try:\n f = open(path, 'r')\n content = f.read()\n f.close()\n except:\n content = ''\n\n # Convert and Clean Ignorelist\n base = content.split('\\n')\n ignores = []\n for entry in base:\n if entry != '':\n entry = entry.strip()\n ignores.append(entry)\n\n return ignores\n\n\n # Clear Ignorelist from Data\n def clear_ignores(self, data, ignorelist):\n \"\"\" Checks the errorslist found in each file and removes the words\n that you want it to ignore.\n\n This method appears to be the principal bottleneck for the application.\n If you have any suggestions of how to improve performance, feel free to\n open an issue on GitHub or submit a pull request.\n \"\"\"\n\n # Check if Ignorelist Empty\n if ignorelist == ['']:\n return data\n\n for i in data:\n newlist = []\n\n for entry in data[i]:\n if entry not in ignorelist:\n newlist.append(entry)\n\n data[i] = newlist\n return data\n\n\n # Report Findings\n def report(self, data):\n \"\"\" Report Findings to Stdout\n\n Method iterates over the data dict, for each entry it prints the\n filename in green, then prints a list of the misspelled words in\n yellow.\n \"\"\"\n\n # Initialize Cowtermcolor\n yellow = cowtermcolor.Color(cowtermcolor.YELLOW)\n green = cowtermcolor.Color(cowtermcolor.GREEN)\n\n\n\n # Print Outputs\n for key in data:\n errors = data[key]\n\n # Print File Name\n print(green(key))\n\n # Print Errors\n for err in errors:\n if err != '':\n line = ' - ' + yellow(err)\n print(line)\n","sub_path":"pyspell/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":6900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"484160098","text":"##############################################################################\n#\n# Copyright (c) 2004 Five Contributors. All rights reserved.\n#\n# This software is distributed under the terms of the Zope Public\n# License (ZPL) v2.1. See COPYING.txt for more information.\n#\n##############################################################################\n\"\"\"Five ZCML directive schemas\n\n$Id$\n\"\"\"\nfrom zope.interface import Interface\nfrom zope.app.publisher.browser.metadirectives import IBasicResourceInformation\nfrom zope.configuration.fields import GlobalObject, Tokens, PythonIdentifier\nfrom zope.schema import TextLine\n\nclass IImplementsDirective(Interface):\n \"\"\"State that a class implements something.\n \"\"\"\n class_ = GlobalObject(\n title=u\"Class\",\n required=True\n )\n\n interface = Tokens(\n title=u\"One or more interfaces\",\n required=True,\n value_type=GlobalObject()\n )\n\nclass ITraversableDirective(Interface):\n \"\"\"Make instances of class traversable publically.\n\n This can be used to browse to pages, resources, etc.\n\n Traversal can be controlled by registering an ITraverser adapter.\n \"\"\"\n class_ = GlobalObject(\n title=u\"Class\",\n required=True\n )\n\nclass IDefaultViewableDirective(Interface):\n \"\"\"Make instances of class viewable publically.\n\n The default view is looked up using a IBrowserDefault adapter.\n \"\"\"\n class_ = GlobalObject(\n title=u\"Class\",\n required=True\n )\n\nclass ISendEventsDirective(Interface):\n \"\"\"Make instances of class send events.\n \"\"\"\n\n class_ = GlobalObject(\n title=u\"Class\",\n required=True\n )\n\nclass IBridgeDirective(Interface):\n \"\"\"Bridge from a Zope 2 interface to an equivalent Zope3 interface.\n \"\"\"\n zope2 = GlobalObject(\n title=u\"Zope2\",\n required=True\n )\n\n package = GlobalObject(\n title=u\"Target package\",\n required=True\n )\n\n name = PythonIdentifier(\n title=u\"Zope3 Interface name\",\n description=u\"If not supplied, the new interface will have the same \"\n u\"name as the source interface.\",\n required=False\n )\n\nclass IPagesFromDirectoryDirective(IBasicResourceInformation):\n \"\"\"Register each file in a skin directory as a page resource\n \"\"\"\n\n for_ = GlobalObject(\n title=u\"The interface this view is for.\",\n required=False\n )\n\n module = GlobalObject(\n title=u\"Module\",\n required=True\n )\n\n directory = TextLine(\n title=u\"Directory\",\n description=u\"The directory containing the resource data.\",\n required=True\n )\n","sub_path":"Products.Five/tags/0.3/fivedirectives.py","file_name":"fivedirectives.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"567715154","text":"from django import forms \r\nfrom ads_app.models import Comment, Category, Advert\r\nfrom django.forms.widgets import ClearableFileInput\r\nfrom django.contrib.auth.models import User\r\n\r\nclass MyClearableFileInput(ClearableFileInput):\r\n initial_text = 'Текущяя'\r\n input_text = 'Изменить'\r\n clear_checkbox_label = 'Удалить'\r\n\r\nclass CommentForm(forms.ModelForm):\r\n\r\n author = forms.CharField(\r\n widget=forms.TextInput(\r\n attrs={\r\n 'required': True,\r\n 'type': 'text',\r\n 'class':'input-comment',\r\n 'placeholder': 'Введите ваше имя'\r\n }\r\n )\r\n )\r\n content = forms.CharField(\r\n widget=forms.Textarea(\r\n attrs={\r\n 'required': True,\r\n 'type': 'text',\r\n 'class': 'textarea-comment',\r\n 'placeholder': 'Введите ваш комментарий'\r\n }\r\n )\r\n )\r\n class Meta:\r\n model = Comment\r\n fields = ('author', 'content')\r\n\r\nclass AdvertForm(forms.ModelForm):\r\n\r\n title = forms.CharField(\r\n label='Название объявления',\r\n required=True,\r\n widget=forms.TextInput(\r\n attrs={\r\n 'type': 'text',\r\n 'class': 'form-control',\r\n 'placeholder': 'Введите название объявления'\r\n }\r\n )\r\n )\r\n content = forms.CharField(\r\n label='Содержание объявления',\r\n required=True,\r\n widget=forms.Textarea(\r\n attrs={\r\n 'type': 'text',\r\n 'class': 'form-control',\r\n 'placeholder': 'Введите содержание объявления'\r\n }\r\n )\r\n )\r\n tags = forms.CharField(\r\n label='Теги',\r\n required=False,\r\n widget=forms.TextInput(\r\n attrs={\r\n 'type': 'text',\r\n 'class': 'form-control',\r\n 'placeholder': 'Введите теги через пробел'\r\n }\r\n )\r\n )\r\n category = forms.ModelChoiceField(\r\n queryset=Category.objects.all(),\r\n required=True,\r\n label='Категория',\r\n widget=forms.Select(\r\n attrs={\r\n 'class': 'form-control'\r\n }\r\n )\r\n )\r\n photo = forms.ImageField(\r\n label= 'Фото',\r\n required=False,\r\n widget= MyClearableFileInput(\r\n attrs={\r\n 'class': 'form-control',\r\n 'accept': '.jpg,.jpeg,.png,.gif'\r\n }\r\n )\r\n )\r\n class Meta:\r\n model = Advert\r\n fields = ('title', 'content', 'tags', 'category', 'photo')\r\n","sub_path":"ads_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"554542524","text":"import os\nimport cgi\nimport wsgiref.handlers\n\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\n#TMPL_ROOT = os.path.join(os.path.dirname(__file__), 'main')\n\n#def get_template(template_name):\n #return os.path.join(TMPL_ROOT, template_name)\n\ndef reg(request):\n user = users.get_current_user()\n if user:\n name = user.nickname()\n lurl_out = users.create_logout_url(request.uri)\n un_and_lo = {'un': name,\n 'lo': lurl_out}\n return un_and_lo\n else:\n lurl = users.create_login_url(request.uri)\n return lurl \n\nclass MainPage(webapp.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user:\n login = reg(self.request)\n template_values = {'username': login['un'],\n 'url_out': login['lo']}\n #path = get_template('main.html')\n path = os.path.join(os.path.dirname(__file__), 'main.html')\n self.response.out.write(template.render(path, template_values))\n else:\n login = reg(self.request)\n template_values = {'url': login}\n path = os.path.join(os.path.dirname(__file__), 'main.html')\n #path = get_template('main.html')\n self.response.out.write(template.render(path, template_values))\n\n'''class Reg(webapp.RequestHandler):\n def get(self):\n lurl = users.create_login_url(self.request.uri)\n template_values = {'url': lurl}\n\n path = os.path.join(os.path.join(os.path.dirname(__file__), 'template'), 'url.html')\n self.response.out.write(template.render(path, template_values))'''\nclass ForumPage(webapp.RequestHandler):\n def get(self):\n path = os.path.join(os.path.dirname(__file__), 'forum_templ.html')\n self.response.out.write(template.render(path, None))\n\nclass ForumMesPage(webapp.RequestHandler):\n def get(self):\n path = os.path.join(os.path.dirname(__file__), 'forum_tema.html')\n self.response.out.write(template.render(path, None))\n\n\napplication = webapp.WSGIApplication(\n [('/.*', MainPage),\n \n #('/forum', ForumPage),\n #('/forum_id', ForumMesPage)],\n #('/url', Reg)],\n ],\n debug=True)\n\ndef main():\n wsgiref.handlers.CGIHandler().run(application)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"web1/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"394396449","text":"import numpy as np\nimport tensorflow as tf\n\nclass DAQN:\n\n\tdef __init__(self,X,Y,n_classes,pooling_bool = True):\n\n\t\t\"\"\"\n\t\t\tInput: 83x83x? (? = 1-4)\n\n\t\t\t1st layer (CONV): activation - TANH\n\t\t\t\t16 feature maps (19x19)\n\t\t\t\t16 filters (8x8) (stride 4)\n\t\t\t\t4x4 max-pooling\n\n\t\t\t2nd layer (CONV): activation - TANH\n\t\t\t\t32 feature maps (8x8)\n\t\t\t\t32 filters (4x4)\n\t\t\t\t2x2 max-pooling\n\n\t\t\t3rd layer (FC):\n\t\t\t\tinput: 2048\n\t\t\t\toutput: 256\n\n\t\t\tOutput layer (FC):\n\t\t\t\tinput: 256\n\t\t\t\toutput: 3 (actions)\n\t\t\t\t\tsoft-max non-linearity\n\n\t\t\tcost = squared sum of the difference of q(s,a) from network (so array of 3 values) and q*(s,a) from expert (1-hot encoded array of real action chosen)\n\t\t\tLearning rate is done with AdaGrad\n\n\t\t\tTrained with: expert trajectories (s,a)\n\t\t\tHyperparameters: gamma (discount rate), nu (learning rate)\n\t\t\"\"\"\n\n\t\t\n\t\t# layer 1\n\t\t\n\t\tconv1,_ = self.conv2d(input=X,\t\t\t \n\t\t\t \t\t\t\tnum_input_channels=1, \n\t\t\t \t\t\t\tfilter_size=8,\t\t\n\t\t\t \t\t\t\tnum_filters=16,\t\t\n\t\t\t \t\t\t\tuse_pooling=pooling_bool, \n\t\t\t \t\t\t\tpooling = 4,\t\t\n\t\t\t \t\t\t\tstride=4, \n\t\t\t \t\t\t\tpool_stride = 1,\n\t\t\t \t\t\t\tpadding=\"VALID\",\n\t\t\t \t\t\t\tpool_pad = \"SAME\")\n\t\t\n\t\t\n\t\t# layer 2\t\t\n\t\tconv2,_ = self.conv2d(input=conv1,\t\t\t \n\t\t\t \t\t\t\tnum_input_channels=16, #num filters from last layer\n\t\t\t \t\t\t\tfilter_size=4,\t\t\n\t\t\t \t\t\t\tnum_filters=32,\t\t\n\t\t\t \t\t\t\tuse_pooling=pooling_bool, \n\t\t\t \t\t\t\tpooling = 2,\t\t\n\t\t\t \t\t\t\tstride=1, \n\t\t\t \t\t\t\tpool_stride = 2,\n\t\t\t \t\t\t\tpadding=\"VALID\",\n\t\t\t \t\t\t\tpool_pad = \"SAME\")\n\n\t\n\n\t\t# layer 3\n\t\tlayer_flat, num_features = self.flatten_layer(conv2)\n\t\tlayer_fc1 = self.fc_layer(input=layer_flat,\n\t\t\t\t\t num_inputs=num_features,\n\t\t\t\t\t num_outputs=256,\n\t\t\t\t\t activation=\"tanh\")\n\n\t\t# Output\n\t\toutpre = self.fc_layer(input=layer_fc1,\n\t\t\t\t\t num_inputs=256,\n\t\t\t\t\t num_outputs=n_classes,\n\t\t\t\t\t activation=\"\")\n\n\t\toutpost = tf.nn.softmax(outpre)\t\n\n\t\tself.outpre = outpre\n\t\tself.outpost = outpost\n\n\tdef conv2d(self, input,\t\t\t # The previous layer.\n\t\t\t num_input_channels, # Num. channels in prev. layer.\n\t\t\t filter_size,\t\t# Width and height of each filter.\n\t\t\t num_filters,\t\t# Number of filters.\n\t\t\t use_pooling=True, # Use max-pooling. \n\t\t\t pooling = 2,\t\t# kernel size\n\t\t\t stride=1, \n\t\t\t pool_stride = 1,\n\t\t\t padding=\"SAME\",\n\t\t\t pool_pad = \"SAME\"):\n \t# Conv2D wrapper, with bias and relu activation\n\n \t# Shape of the filter-weights for the convolution.\n\t\t# This format is determined by the TensorFlow API.\n\t\tshape = [filter_size, filter_size, num_input_channels, num_filters]\n\t\t# Create new weights aka. filters with the given shape.\n\t\tweights = self.new_weights(shape=shape)\n\n\t\t# Create new biases, one for each filter.\n\t\tbiases = self.new_biases(length=num_filters)\n\n\t\t# Create the TensorFlow operation for convolution.\n\t\t# Note the strides are set to 1 in all dimensions.\n\t\t# The first and last stride must always be 1,\n\t\t# because the first is for the image-number and\n\t\t# the last is for the input-channel.\n\t\t# But e.g. strides=[1, 2, 2, 1] would mean that the filter\n\t\t# is moved 2 pixels across the x- and y-axis of the image.\n\t\t# The padding is set to 'SAME' which means the input image\n\t\t# is padded with zeroes so the size of the output is the same.\n\t\tlayer = tf.nn.conv2d(input=input,\n\t\t\t\t\t\t filter=weights,\n\t\t\t\t\t\t strides=[1, stride, stride, 1],\n\t\t\t\t\t\t padding=padding)\n\n\t\t# Add the biases to the results of the convolution.\n\t\t# A bias-value is added to each filter-channel.\n\t\tlayer += biases\n\n\t\t# Use pooling to down-sample the image resolution?\n\t\tif use_pooling:\n\t\t\t# This is 2x2 max-pooling, which means that we\n\t\t\t# consider 2x2 windows and select the largest value\n\t\t\t# in each window. Then we move 2 pixels to the next window.\n\t\t\tlayer = tf.nn.max_pool(value=layer,\n\t\t\t\t\t\t\t\t ksize=[1, pooling, pooling, 1],\n\t\t\t\t\t\t\t\t strides=[1, pool_stride, pool_stride, 1],\n\t\t\t\t\t\t\t\t padding=pool_pad)\n\n\t\t# Rectified Linear Unit (ReLU).\n\t\t# It calculates max(x, 0) for each input pixel x.\n\t\t# This adds some non-linearity to the formula and allows us\n\t\t# to learn more complicated functions.\n\t\tlayer = tf.nn.relu(layer)\n\n\t\t# Note that ReLU is normally executed before the pooling,\n\t\t# but since relu(max_pool(x)) == max_pool(relu(x)) we can\n\t\t# save 75% of the relu-operations by max-pooling first.\n\n\t\t# We return both the resulting layer and the filter-weights\n\t\t# because we will plot the weights later.\n\t\treturn layer, weights\n\n\tdef maxpool2d(self, x, k=2, s=2):\n\t # MaxPool2D wrapper\n\t return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, s, s, 1], padding='SAME')\n\n\tdef flatten(self, x, dim):\n\t\t#Flatten wrapper\n\t\t#dims = int(np.prod(x.get_shape().as_list()[1:])) #may need to splice [1:] because first x.get_shape().as_list() is None\n\t\tx = tf.reshape(x, [-1,dim])\n\t\treturn x\n\n\tdef flatten_layer(self,layer):\n\t\t# Get the shape of the input layer.\n\t\tlayer_shape = layer.get_shape()\n\n\t\t# The shape of the input layer is assumed to be:\n\t\t# layer_shape == [num_images, img_height, img_width, num_channels]\n\n\t\t# The number of features is: img_height * img_width * num_channels\n\t\t# We can use a function from TensorFlow to calculate this.\n\t\tnum_features = layer_shape[1:4].num_elements()\n\n\t\t# Reshape the layer to [num_images, num_features].\n\t\t# Note that we just set the size of the second dimension\n\t\t# to num_features and the size of the first dimension to -1\n\t\t# which means the size in that dimension is calculated\n\t\t# so the total size of the tensor is unchanged from the reshaping.\n\t\tlayer_flat = tf.reshape(layer, [-1, num_features])\n\n\t\t# The shape of the flattened layer is now:\n\t\t# [num_images, img_height * img_width * num_channels]\n\n\t\t# Return both the flattened layer and the number of features.\n\t\treturn layer_flat, num_features\n\n\tdef fc_layer(self,input,\t\t # The previous layer.\n\t\t\t num_inputs,\t # Num. inputs from prev. layer.\n\t\t\t num_outputs,\t# Num. outputs.\n\t\t\t activation=\"\"): # Use Rectified Linear Unit (ReLU)?\n\n\t\t# Create new weights and biases.\n\t\tweights = self.new_weights(shape=[num_inputs, num_outputs])\n\t\tbiases = self.new_biases(length=num_outputs)\n\n\t\t# Calculate the layer as the matrix multiplication of\n\t\t# the input and weights, and then add the bias-values.\n\t\tlayer = tf.matmul(input, weights) + biases\n\n\t\t# Use ReLU?\n\t\tif activation == \"relu\":\n\t\t\tlayer = tf.nn.relu(layer)\n\t\telif activation == \"tanh\":\n\t\t\tlayer = tf.nn.tanh(layer)\n\n\t\treturn layer\n\n\tdef new_weights(self,shape):\n\t\treturn tf.Variable(tf.random_normal(shape))\n\n\tdef new_biases(self,length):\n\t\treturn tf.Variable(tf.constant(0.05, shape=[length]))\n\ndef sse(y_pred, y_true):\n\t#sum of square error\n\t#y_true = tf.Print(y_true, [y_true], message=\"y_true is: \")\n\t#y_pred = tf.Print(y_pred, [y_pred], message=\"y_pred is: \")\n\tloss = tf.square(tf.subtract(y_true,y_pred))\n\tloss.set_shape(y_true.shape)\n\treturn tf.reduce_sum(loss) #SHOULD THIS STILL BE SUMMED?\n\ndef generateNetworkStructure():\n\t\n\tn_classes = 5 #5 actions\n\tlearning_rate = 0.001\n\t\n\n\tgraph = tf.Graph()\n\twith graph.as_default():\n\n\t\t# Store layers weight & bias\n\t\tweights = {\n\t\t # 8x8 conv, 1 input, 16 outputs\n\t\t 'wc1': tf.Variable(tf.random_normal([8, 8, 1, 16])),\n\t\t # 4x4 conv, 16 inputs, 32 outputs\n\t\t 'wc2': tf.Variable(tf.random_normal([4, 4, 16, 32])),\n\t\t # fully connected, 7*7*64 inputs, 1024 outputs\n\t\t 'wd1': tf.Variable(tf.random_normal([11*11*32, 256])),\n\t\t # 1024 inputs, 10 outputs (class prediction)\n\t\t 'out': tf.Variable(tf.random_normal([256, n_classes]))\n\t\t}\n\n\t\tbiases = {\n\t\t 'bc1': tf.Variable(tf.random_normal([16])),\n\t\t 'bc2': tf.Variable(tf.random_normal([32])),\n\t\t 'bd1': tf.Variable(tf.random_normal([256])),\n\t\t 'out': tf.Variable(tf.random_normal([n_classes]))\n\t\t}\t\n\n\t\n\t\t#Place Holder\n\t\tX = tf.placeholder(shape=[1,100,100,1], dtype=\"float32\",name='s')\n\t\tY = tf.placeholder(shape=[1,5], dtype=\"float32\", name='a')\n\t\tnet = DAQN(X,Y, weights, biases)\n\t\tdaqn = net.outpost\n\t\tdaqn_presoft = net.outpre\n\t\t\n\t\t# Define Loss and optimizer\n\t\tcost = sse(daqn,Y)\n\t\toptimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(cost)\n\n\t\t# Evaluate model\n\t\tcorrect_pred = tf.equal(tf.argmax(daqn, 0), tf.argmax(Y, 0)) #1 instead of 0?\n\t\taccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\t\t#Initialize variables\n\t\tinit = tf.global_variables_initializer()\n\t#print(daqn)\n\t#print(darn)\n\tx_data = np.random.rand(1,100,100,1)\n\ty_data = np.random.rand(1,5)\n\n\twith tf.Session(graph=graph) as sess:\n\n\t\tsess.run(init)\t\n\t\twriter = tf.summary.FileWriter('DAQN_log_test',graph=sess.graph)\n\t\tsess.run(optimizer,feed_dict={X : x_data, Y : y_data})\n\n\n\twriter.flush()\n\twriter.close()\t\t\n\n#generateNetworkStructure()","sub_path":"Proposed Architecture/daqn.py","file_name":"daqn.py","file_ext":"py","file_size_in_byte":8560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"378270094","text":"'''\nThis is an re-implementation of \"Sequence to Sequence Learning with Neural\nNetworks\" A few key points:\n1. reverse input order / Bidirectional LSTM/RNN;\n2. beam search; (Auto-regressive Learning)\n3. Encoder-Decoder; (Auto-regressive / Teacher Forcing / Curriculum Learning)\n4. BLEU score (4 gram) / Word error rate (WER);\n\nFollows \"1 - Sequence to Sequence Learning with Neural Networks.ipynb\"\n\nENV: PyTorch 1.8, torchtext 0.9 and spaCy 3.0, using Python 3.8.\n\nconda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0\ncudatoolkit=10.2 -c pytorch\n\npip install torchtext==0.9.0\n\npython -m spacy download en_core_web_sm \npython -m spacy download de_core_news_sm\nmay need to be download manually\n\n\nAn embedding layer to map each word to its feature vector. (How exactly?)\n\nappend a start of sequence () and \nend of sequence () token \nto the start and end of sentence, respectively.\n\nWhen training/testing our model, we always know how many words are in our target\nsentence, so we stop generating words once we hit that many. During inference it\nis common to keep generating words until the model outputs an token or\nafter a certain amount of words have been generated.\n\nOnce we have our predicted target sentence, $\\hat{Y} = \\{ \\hat{y}_1, \\hat{y}_2, ..., \\hat{y}_T \\}$, \nwe compare it against our actual target sentence, $Y = \\{ y_1, y_2, ..., y_T \\}$, to calculate our loss. \nWe then use this loss to update all of the parameters in our model.\n'''\n\nfrom typing import Text\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom torchtext.legacy.datasets import Multi30k\nfrom torchtext.legacy.data import Field, BucketIterator\n\nfrom LSTM_Models import Encoder,Decoder,Seq2Seq\nfrom LSTM_Models import train, evaluate\nfrom utils import init_weights, count_parameters, epoch_time\nfrom utils import adj_tfr_lin_step, adj_tfr_lin, adj_tfr_exp\n\nimport spacy\nimport numpy as np\n\nimport random\nimport math\nimport time\n\nimport pdb\n\n# tfr_sche = adj_tfr_lin_step(step_size=10, gamma=0.1)\n# tfr_sche = adj_tfr_lin(bound_step=50)\ntfr_sche = adj_tfr_exp(bound_step=100)\nfor i in range(110):\n print('{:.4f}'.format(tfr_sche.update()))\npdb.set_trace()\n\nSEED = 1234\n\nrandom.seed(SEED)\nnp.random.seed(SEED)\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\n\n# A tokenizer is used to turn a string containing a sentence into a list of\n# individual tokens that make up that string, e.g. \"good morning!\" becomes\n# [\"good\", \"morning\", \"!\"]. We'll start talking about the sentences being a\n# sequence of tokens from now, instead of saying they're a sequence of words.\n\n# spaCy has model for each language (\"de_core_news_sm\" for German and\n# \"en_core_web_sm\" for English) which need to be loaded so we can access the\n# tokenizer of each model.\n# German tokenizer\nspacy_de = spacy.load('de_core_news_sm')\n# English tokenizer\nspacy_en = spacy.load('en_core_web_sm')\n\n# German is input sequence, we reverse it.\ndef tokenize_de(text):\n \"\"\"\n Tokenizes German text from a string into a list of strings (tokens) and reverses it\n \"\"\"\n return [tok.text for tok in spacy_de.tokenizer(text)][::-1]\n\ndef tokenize_en(text):\n \"\"\"\n Tokenizes English text from a string into a list of strings (tokens)\n \"\"\"\n return [tok.text for tok in spacy_en.tokenizer(text)]\n\n# # TODO: the fields in object 'tok' is needed. \n# def tokenize_fileds(text):\n# for tok in spacy_en.tokenizer(text):\n# # vars() 函数返回对象object的属性和属性值的字典对象。TypeError: vars()\n# # argument must have __dict__ attribute\n# print(vars(tok))\n# pdb.set_trace()\n\n# tokenize_fileds(en_text)\n\nen_text = 'I am Fool!'\nprint(tokenize_en(en_text))\n\n\nSRC = Field(tokenize=tokenize_de,\n init_token = '',\n eos_token = '',\n lower = True)\nTRG = Field(tokenize=tokenize_en,\n init_token = '',\n eos_token = '',\n lower = True)\n\n# download and load the train, validation and test data. Multi30k is used. This\n# is a dataset with ~30,000 parallel English, German and French sentences, each\n# with ~12 words per sentence.\n# extract the German and English Pair with the fields as SRC and TRG;\ntrain_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'), \n fields = (SRC, TRG))\nprint(f\"Number of training examples: {len(train_data.examples)}\")\nprint(f\"Number of validation examples: {len(valid_data.examples)}\")\nprint(f\"Number of testing examples: {len(test_data.examples)}\")\n\n# making sure the source sentence is reversed;\n# vars() 函数返回对象object的属性和属性值的字典对象。\nprint(vars(train_data.examples[0]))\n# whether should be counted. Nope\n\n# Build the vocabulary of both language from train data. \n\n# Using the min_freq argument, we only allow tokens that appear at least 2 times to appear in our\n# vocabulary. Tokens that appear only once are converted into an (unknown) token.\nSRC.build_vocab(train_data, min_freq = 2)\nTRG.build_vocab(train_data, min_freq = 2)\nprint(f\"Unique tokens in source (de) vocabulary: {len(SRC.vocab)}\")\nprint(f\"Unique tokens in target (en) vocabulary: {len(TRG.vocab)}\")\n# pdb.set_trace()\n\n\n# Final step of data prepare: create the data iterators. These can be iterated on to return a\n# batch of data which will have a 'src' attribute (the PyTorch tensors containing\n# a batch of numericalized 'source' sentences) and a 'trg' attribute (the PyTorch\n# tensors containing a batch of numericalized 'target' sentences). ##Numericalized##\n# is just a fancy way of saying they have been converted from a sequence of\n# readable tokens to a sequence of corresponding ##indexes##, using the vocabulary.\n# one-hot vector as word label?\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nBATCH_SIZE = 128\n\n# BucketIterator instead of the standard Iterator as it creates batches in such\n# a way that it minimizes the amount of padding in both the source and target\n# sentences. (NICE Property!!!!!)\ntrain_iterator, valid_iterator, test_iterator = BucketIterator.splits(\n (train_data, valid_data, test_data), \n batch_size = BATCH_SIZE, \n device = device)\n# The usage of 'torchtext' here is nice but too gigh level. Reverse the input\n# sequence order is considered in data preprocessing, as in 'tokenize_de'.\n\n\n# Building the Seq2Seq Model(LSTM Based)\n\n# Training the Seq2Seq Model. \n# The embedding (vocabulary) sizes and dropout rates of\n# encoder and decoder can be different.\nINPUT_DIM = len(SRC.vocab)\nOUTPUT_DIM = len(TRG.vocab)\n\n# Input feature dimension\nENC_EMB_DIM = 256\nDEC_EMB_DIM = 256\nHID_DIM = 512\nN_LAYERS = 2\nENC_DROPOUT = 0.5\nDEC_DROPOUT = 0.5\n\nenc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)\ndec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)\nmodel = Seq2Seq(enc, dec, device).to(device)\n\nmodel.apply(init_weights)\nprint(f'The model has {count_parameters(model):,} trainable parameters')\n\noptimizer = optim.Adam(model.parameters())\n\n# CrossEntropyLoss \n# Our loss function calculates the average loss per token,\n# however by passing the index of the token as the ignore_index argument\n# we ignore the loss whenever the target token is a padding token.\n# nice feature, how to build it?\nTRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]\ncriterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX)\n\n# SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]\n# pdb.set_trace()\n\nN_EPOCHS = 10\nCLIP = 1\n\nbest_valid_loss = float('inf')\n\nfor epoch in range(N_EPOCHS):\n \n start_time = time.time()\n \n train_loss = train(model, train_iterator, optimizer, criterion, CLIP)\n valid_loss = evaluate(model, valid_iterator, criterion)\n \n end_time = time.time()\n \n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), './models/tut1-model.pt')\n \n print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')\n print(f'\\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')\n # pdb.set_trace()\n\nmodel.load_state_dict(torch.load('./models/tut1-model.pt'))\ntest_loss = evaluate(model, test_iterator, criterion)\nprint(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')\n\n# pdb.set_trace()","sub_path":"one.py","file_name":"one.py","file_ext":"py","file_size_in_byte":8520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"159441341","text":"from Tkinter import*\n\ndef college():\n if(var.get()==1):\n a=\"dean\"\n elif(var.get()==2):\n a=\"teacher\"\n else:\n a=\"student\"\n L1.config(text=a)\ntop=Tk()\nvar=IntVar()\n\nL1=Label(top,text=\"college\")\nL1.pack()\n\nrb1=Radiobutton(top,text=\"dean\",variable=var,value=1)\nrb1.pack()\n\nrb2=Radiobutton(top,text=\"teacher\",variable=var,value=2)\nrb2.pack()\n\nrb3=Radiobutton(top,text=\"student\",variable=var,value=3)\nrb3.pack()\n\nb=Button(top,text=\"click Here\",command=college)\nb.pack()\ntop.mainloop()\n","sub_path":"PYTHON/Advance/Python Controls/self/2nd check box & radio button/3ld radio button.py","file_name":"3ld radio button.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"323095192","text":"import csv\nimport re\n\n\nrf = open('story.csv', 'r')\nwf1 = open('character.csv', 'w')\nwf2 = open('feature.csv', 'w')\nwf3 = open('apparition.csv', 'w')\n\ndef get_names(string):\n filtered1 = re.sub(r'\\([^)]*\\)', '', string)\n filtered2 = re.sub(r'\\[[^)]*\\]', '', filtered1)\n filtered3 = re.sub(r'\\{[^)]*\\}', '', filtered2)\n filtered4 = re.sub(r'\\d+', '', filtered3)\n names = set()\n for name in filtered4.split(';'):\n filtered = re.sub(r'[?|.|\\[|\\]|\\{|\\}|\\(|\\)|\\\"]', '', name).strip().title()\n names.add(filtered)\n return filter(None, names)\n\nreader = csv.reader(rf, delimiter=',', quoting=csv.QUOTE_NONE)\nnext(reader, None)\n\nwriter1 = csv.writer(wf1)\nwriter2 = csv.writer(wf2)\nwriter3 = csv.writer(wf3)\n\nstory_field_to_writer = {\n 2:writer2,\n 11:writer3\n}\n\nnames = {}\nn = 0\n\nfor row in reader:\n for field in (2, 11):\n for name in get_names(row[field]):\n if name not in names:\n n = n+1\n names[name] = n\n writer1.writerow((n,name))\n story_field_to_writer[field].writerow((row[0], names[name]))\n","sub_path":"create_characters.py","file_name":"create_characters.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"308474767","text":"import tkinter, sys, random, pickle\r\nroot = tkinter.Tk()\r\n\r\nclass Project:\r\n def __init__(self, master):\r\n self.master = master \r\n \r\n # Make a text widget // adding a scrollbar to it \r\n self.textWidget = tkinter.Text(self.master, width=100, height=15, bg=\"#FFFFFF\", bd=10, relief=\"flat\", font=('Arial', 10))\r\n self.scroll = tkinter.Scrollbar(self.master, orient=\"vertical\")\r\n self.scroll.configure(command=self.textWidget.yview)\r\n self.textWidget.configure(yscrollcommand=self.scroll)\r\n self.textWidget.pack(side=\"left\")\r\n self.scroll.pack(side=\"left\", fill=\"y\")\r\n \r\n self.mSave_CONTEXT = tkinter.Menu(self.master)\r\n self.textWidget.bind(\"